1 // SPDX-License-Identifier: GPL-2.0-only
3 * QLogic Fibre Channel HBA Driver
4 * Copyright (c) 2003-2014 QLogic Corporation
9 #include <linux/delay.h>
10 #include <linux/slab.h>
11 #include <linux/vmalloc.h>
13 #include "qla_devtbl.h"
19 #include "qla_target.h"
22 * QLogic ISP2x00 Hardware Support Function Prototypes.
24 static int qla2x00_isp_firmware(scsi_qla_host_t
*);
25 static int qla2x00_setup_chip(scsi_qla_host_t
*);
26 static int qla2x00_fw_ready(scsi_qla_host_t
*);
27 static int qla2x00_configure_hba(scsi_qla_host_t
*);
28 static int qla2x00_configure_loop(scsi_qla_host_t
*);
29 static int qla2x00_configure_local_loop(scsi_qla_host_t
*);
30 static int qla2x00_configure_fabric(scsi_qla_host_t
*);
31 static int qla2x00_find_all_fabric_devs(scsi_qla_host_t
*);
32 static int qla2x00_restart_isp(scsi_qla_host_t
*);
34 static struct qla_chip_state_84xx
*qla84xx_get_chip(struct scsi_qla_host
*);
35 static int qla84xx_init_chip(scsi_qla_host_t
*);
36 static int qla25xx_init_queues(struct qla_hw_data
*);
37 static void qla24xx_handle_gpdb_event(scsi_qla_host_t
*vha
,
38 struct event_arg
*ea
);
39 static void qla24xx_handle_prli_done_event(struct scsi_qla_host
*,
41 static void __qla24xx_handle_gpdb_event(scsi_qla_host_t
*, struct event_arg
*);
43 /* SRB Extensions ---------------------------------------------------------- */
46 qla2x00_sp_timeout(struct timer_list
*t
)
48 srb_t
*sp
= from_timer(sp
, t
, u
.iocb_cmd
.timer
);
49 struct srb_iocb
*iocb
;
50 scsi_qla_host_t
*vha
= sp
->vha
;
52 WARN_ON(irqs_disabled());
53 iocb
= &sp
->u
.iocb_cmd
;
57 kref_put(&sp
->cmd_kref
, qla2x00_sp_release
);
59 if (vha
&& qla2x00_isp_reg_stat(vha
->hw
)) {
60 ql_log(ql_log_info
, vha
, 0x9008,
61 "PCI/Register disconnect.\n");
62 qla_pci_set_eeh_busy(vha
);
66 void qla2x00_sp_free(srb_t
*sp
)
68 struct srb_iocb
*iocb
= &sp
->u
.iocb_cmd
;
70 del_timer(&iocb
->timer
);
74 void qla2xxx_rel_done_warning(srb_t
*sp
, int res
)
76 WARN_ONCE(1, "Calling done() of an already freed srb %p object\n", sp
);
79 void qla2xxx_rel_free_warning(srb_t
*sp
)
81 WARN_ONCE(1, "Calling free() of an already freed srb %p object\n", sp
);
84 /* Asynchronous Login/Logout Routines -------------------------------------- */
87 qla2x00_get_async_timeout(struct scsi_qla_host
*vha
)
90 struct qla_hw_data
*ha
= vha
->hw
;
92 /* Firmware should use switch negotiated r_a_tov for timeout. */
93 tmo
= ha
->r_a_tov
/ 10 * 2;
95 tmo
= FX00_DEF_RATOV
* 2;
96 } else if (!IS_FWI2_CAPABLE(ha
)) {
98 * Except for earlier ISPs where the timeout is seeded from the
99 * initialization control block.
101 tmo
= ha
->login_timeout
;
106 static void qla24xx_abort_iocb_timeout(void *data
)
109 struct srb_iocb
*abt
= &sp
->u
.iocb_cmd
;
110 struct qla_qpair
*qpair
= sp
->qpair
;
113 int sp_found
= 0, cmdsp_found
= 0;
116 ql_dbg(ql_dbg_async
, sp
->vha
, 0x507c,
117 "Abort timeout - cmd hdl=%x, cmd type=%x hdl=%x, type=%x\n",
118 sp
->cmd_sp
->handle
, sp
->cmd_sp
->type
,
119 sp
->handle
, sp
->type
);
121 ql_dbg(ql_dbg_async
, sp
->vha
, 0x507c,
122 "Abort timeout 2 - hdl=%x, type=%x\n",
123 sp
->handle
, sp
->type
);
125 spin_lock_irqsave(qpair
->qp_lock_ptr
, flags
);
126 for (handle
= 1; handle
< qpair
->req
->num_outstanding_cmds
; handle
++) {
127 if (sp
->cmd_sp
&& (qpair
->req
->outstanding_cmds
[handle
] ==
129 qpair
->req
->outstanding_cmds
[handle
] = NULL
;
131 qla_put_fw_resources(qpair
, &sp
->cmd_sp
->iores
);
134 /* removing the abort */
135 if (qpair
->req
->outstanding_cmds
[handle
] == sp
) {
136 qpair
->req
->outstanding_cmds
[handle
] = NULL
;
138 qla_put_fw_resources(qpair
, &sp
->iores
);
142 spin_unlock_irqrestore(qpair
->qp_lock_ptr
, flags
);
144 if (cmdsp_found
&& sp
->cmd_sp
) {
146 * This done function should take care of
147 * original command ref: INIT
149 sp
->cmd_sp
->done(sp
->cmd_sp
, QLA_OS_TIMER_EXPIRED
);
153 abt
->u
.abt
.comp_status
= cpu_to_le16(CS_TIMEOUT
);
154 sp
->done(sp
, QLA_OS_TIMER_EXPIRED
);
158 static void qla24xx_abort_sp_done(srb_t
*sp
, int res
)
160 struct srb_iocb
*abt
= &sp
->u
.iocb_cmd
;
161 srb_t
*orig_sp
= sp
->cmd_sp
;
164 qla_wait_nvme_release_cmd_kref(orig_sp
);
166 if (sp
->flags
& SRB_WAKEUP_ON_COMP
)
167 complete(&abt
->u
.abt
.comp
);
170 kref_put(&sp
->cmd_kref
, qla2x00_sp_release
);
173 int qla24xx_async_abort_cmd(srb_t
*cmd_sp
, bool wait
)
175 scsi_qla_host_t
*vha
= cmd_sp
->vha
;
176 struct srb_iocb
*abt_iocb
;
178 int rval
= QLA_FUNCTION_FAILED
;
180 /* ref: INIT for ABTS command */
181 sp
= qla2xxx_get_qpair_sp(cmd_sp
->vha
, cmd_sp
->qpair
, cmd_sp
->fcport
,
184 return QLA_MEMORY_ALLOC_FAILED
;
186 qla_vha_mark_busy(vha
);
187 abt_iocb
= &sp
->u
.iocb_cmd
;
188 sp
->type
= SRB_ABT_CMD
;
190 sp
->qpair
= cmd_sp
->qpair
;
193 sp
->flags
= SRB_WAKEUP_ON_COMP
;
195 init_completion(&abt_iocb
->u
.abt
.comp
);
196 /* FW can send 2 x ABTS's timeout/20s */
197 qla2x00_init_async_sp(sp
, 42, qla24xx_abort_sp_done
);
198 sp
->u
.iocb_cmd
.timeout
= qla24xx_abort_iocb_timeout
;
200 abt_iocb
->u
.abt
.cmd_hndl
= cmd_sp
->handle
;
201 abt_iocb
->u
.abt
.req_que_no
= cpu_to_le16(cmd_sp
->qpair
->req
->id
);
203 ql_dbg(ql_dbg_async
, vha
, 0x507c,
204 "Abort command issued - hdl=%x, type=%x\n", cmd_sp
->handle
,
207 rval
= qla2x00_start_sp(sp
);
208 if (rval
!= QLA_SUCCESS
) {
210 kref_put(&sp
->cmd_kref
, qla2x00_sp_release
);
215 wait_for_completion(&abt_iocb
->u
.abt
.comp
);
216 rval
= abt_iocb
->u
.abt
.comp_status
== CS_COMPLETE
?
217 QLA_SUCCESS
: QLA_ERR_FROM_FW
;
219 kref_put(&sp
->cmd_kref
, qla2x00_sp_release
);
226 qla2x00_async_iocb_timeout(void *data
)
229 fc_port_t
*fcport
= sp
->fcport
;
230 struct srb_iocb
*lio
= &sp
->u
.iocb_cmd
;
235 ql_dbg(ql_dbg_disc
, fcport
->vha
, 0x2071,
236 "Async-%s timeout - hdl=%x portid=%06x %8phC.\n",
237 sp
->name
, sp
->handle
, fcport
->d_id
.b24
, fcport
->port_name
);
239 fcport
->flags
&= ~(FCF_ASYNC_SENT
| FCF_ASYNC_ACTIVE
);
241 pr_info("Async-%s timeout - hdl=%x.\n",
242 sp
->name
, sp
->handle
);
247 rc
= qla24xx_async_abort_cmd(sp
, false);
249 /* Retry as needed. */
250 lio
->u
.logio
.data
[0] = MBS_COMMAND_ERROR
;
251 lio
->u
.logio
.data
[1] =
252 lio
->u
.logio
.flags
& SRB_LOGIN_RETRIED
?
253 QLA_LOGIO_LOGIN_RETRIED
: 0;
254 spin_lock_irqsave(sp
->qpair
->qp_lock_ptr
, flags
);
255 for (h
= 1; h
< sp
->qpair
->req
->num_outstanding_cmds
;
257 if (sp
->qpair
->req
->outstanding_cmds
[h
] ==
259 sp
->qpair
->req
->outstanding_cmds
[h
] =
264 spin_unlock_irqrestore(sp
->qpair
->qp_lock_ptr
, flags
);
265 sp
->done(sp
, QLA_FUNCTION_TIMEOUT
);
269 case SRB_CT_PTHRU_CMD
:
276 rc
= qla24xx_async_abort_cmd(sp
, false);
278 spin_lock_irqsave(sp
->qpair
->qp_lock_ptr
, flags
);
279 for (h
= 1; h
< sp
->qpair
->req
->num_outstanding_cmds
;
281 if (sp
->qpair
->req
->outstanding_cmds
[h
] ==
283 sp
->qpair
->req
->outstanding_cmds
[h
] =
288 spin_unlock_irqrestore(sp
->qpair
->qp_lock_ptr
, flags
);
289 sp
->done(sp
, QLA_FUNCTION_TIMEOUT
);
295 static void qla2x00_async_login_sp_done(srb_t
*sp
, int res
)
297 struct scsi_qla_host
*vha
= sp
->vha
;
298 struct srb_iocb
*lio
= &sp
->u
.iocb_cmd
;
301 ql_dbg(ql_dbg_disc
, vha
, 0x20dd,
302 "%s %8phC res %d \n", __func__
, sp
->fcport
->port_name
, res
);
304 sp
->fcport
->flags
&= ~(FCF_ASYNC_SENT
| FCF_ASYNC_ACTIVE
);
306 if (!test_bit(UNLOADING
, &vha
->dpc_flags
)) {
307 memset(&ea
, 0, sizeof(ea
));
308 ea
.fcport
= sp
->fcport
;
309 ea
.data
[0] = lio
->u
.logio
.data
[0];
310 ea
.data
[1] = lio
->u
.logio
.data
[1];
311 ea
.iop
[0] = lio
->u
.logio
.iop
[0];
312 ea
.iop
[1] = lio
->u
.logio
.iop
[1];
315 ea
.data
[0] = MBS_COMMAND_ERROR
;
316 qla24xx_handle_plogi_done_event(vha
, &ea
);
320 kref_put(&sp
->cmd_kref
, qla2x00_sp_release
);
324 qla2x00_async_login(struct scsi_qla_host
*vha
, fc_port_t
*fcport
,
328 struct srb_iocb
*lio
;
329 int rval
= QLA_FUNCTION_FAILED
;
331 if (!vha
->flags
.online
|| (fcport
->flags
& FCF_ASYNC_SENT
) ||
332 fcport
->loop_id
== FC_NO_LOOP_ID
) {
333 ql_log(ql_log_warn
, vha
, 0xffff,
334 "%s: %8phC - not sending command.\n",
335 __func__
, fcport
->port_name
);
340 sp
= qla2x00_get_sp(vha
, fcport
, GFP_KERNEL
);
344 qla2x00_set_fcport_disc_state(fcport
, DSC_LOGIN_PEND
);
345 fcport
->flags
|= FCF_ASYNC_SENT
;
346 fcport
->logout_completed
= 0;
348 sp
->type
= SRB_LOGIN_CMD
;
350 sp
->gen1
= fcport
->rscn_gen
;
351 sp
->gen2
= fcport
->login_gen
;
352 qla2x00_init_async_sp(sp
, qla2x00_get_async_timeout(vha
) + 2,
353 qla2x00_async_login_sp_done
);
355 lio
= &sp
->u
.iocb_cmd
;
356 if (N2N_TOPO(fcport
->vha
->hw
) && fcport_is_bigger(fcport
)) {
357 lio
->u
.logio
.flags
|= SRB_LOGIN_PRLI_ONLY
;
359 if (vha
->hw
->flags
.edif_enabled
&&
361 lio
->u
.logio
.flags
|=
362 (SRB_LOGIN_FCSP
| SRB_LOGIN_SKIP_PRLI
);
364 lio
->u
.logio
.flags
|= SRB_LOGIN_COND_PLOGI
;
368 if (NVME_TARGET(vha
->hw
, fcport
))
369 lio
->u
.logio
.flags
|= SRB_LOGIN_SKIP_PRLI
;
371 rval
= qla2x00_start_sp(sp
);
373 ql_dbg(ql_dbg_disc
, vha
, 0x2072,
374 "Async-login - %8phC hdl=%x, loopid=%x portid=%06x retries=%d %s.\n",
375 fcport
->port_name
, sp
->handle
, fcport
->loop_id
,
376 fcport
->d_id
.b24
, fcport
->login_retry
,
377 lio
->u
.logio
.flags
& SRB_LOGIN_FCSP
? "FCSP" : "");
379 if (rval
!= QLA_SUCCESS
) {
380 fcport
->flags
|= FCF_LOGIN_NEEDED
;
381 set_bit(RELOGIN_NEEDED
, &vha
->dpc_flags
);
389 kref_put(&sp
->cmd_kref
, qla2x00_sp_release
);
390 fcport
->flags
&= ~FCF_ASYNC_SENT
;
392 fcport
->flags
&= ~FCF_ASYNC_ACTIVE
;
395 * async login failed. Could be due to iocb/exchange resource
396 * being low. Set state DELETED for re-login process to start again.
398 qla2x00_set_fcport_disc_state(fcport
, DSC_DELETED
);
402 static void qla2x00_async_logout_sp_done(srb_t
*sp
, int res
)
404 sp
->fcport
->flags
&= ~(FCF_ASYNC_SENT
| FCF_ASYNC_ACTIVE
);
405 sp
->fcport
->login_gen
++;
406 qlt_logo_completion_handler(sp
->fcport
, sp
->u
.iocb_cmd
.u
.logio
.data
[0]);
408 kref_put(&sp
->cmd_kref
, qla2x00_sp_release
);
412 qla2x00_async_logout(struct scsi_qla_host
*vha
, fc_port_t
*fcport
)
415 int rval
= QLA_FUNCTION_FAILED
;
417 fcport
->flags
|= FCF_ASYNC_SENT
;
419 sp
= qla2x00_get_sp(vha
, fcport
, GFP_KERNEL
);
423 sp
->type
= SRB_LOGOUT_CMD
;
425 qla2x00_init_async_sp(sp
, qla2x00_get_async_timeout(vha
) + 2,
426 qla2x00_async_logout_sp_done
);
428 ql_dbg(ql_dbg_disc
, vha
, 0x2070,
429 "Async-logout - hdl=%x loop-id=%x portid=%02x%02x%02x %8phC explicit %d.\n",
430 sp
->handle
, fcport
->loop_id
, fcport
->d_id
.b
.domain
,
431 fcport
->d_id
.b
.area
, fcport
->d_id
.b
.al_pa
,
432 fcport
->port_name
, fcport
->explicit_logout
);
434 rval
= qla2x00_start_sp(sp
);
435 if (rval
!= QLA_SUCCESS
)
441 kref_put(&sp
->cmd_kref
, qla2x00_sp_release
);
443 fcport
->flags
&= ~(FCF_ASYNC_SENT
| FCF_ASYNC_ACTIVE
);
448 qla2x00_async_prlo_done(struct scsi_qla_host
*vha
, fc_port_t
*fcport
,
451 fcport
->flags
&= ~FCF_ASYNC_ACTIVE
;
452 /* Don't re-login in target mode */
453 if (!fcport
->tgt_session
)
454 qla2x00_mark_device_lost(vha
, fcport
, 1);
455 qlt_logo_completion_handler(fcport
, data
[0]);
458 static void qla2x00_async_prlo_sp_done(srb_t
*sp
, int res
)
460 struct srb_iocb
*lio
= &sp
->u
.iocb_cmd
;
461 struct scsi_qla_host
*vha
= sp
->vha
;
463 sp
->fcport
->flags
&= ~FCF_ASYNC_ACTIVE
;
464 if (!test_bit(UNLOADING
, &vha
->dpc_flags
))
465 qla2x00_post_async_prlo_done_work(sp
->fcport
->vha
, sp
->fcport
,
468 kref_put(&sp
->cmd_kref
, qla2x00_sp_release
);
472 qla2x00_async_prlo(struct scsi_qla_host
*vha
, fc_port_t
*fcport
)
477 rval
= QLA_FUNCTION_FAILED
;
479 sp
= qla2x00_get_sp(vha
, fcport
, GFP_KERNEL
);
483 sp
->type
= SRB_PRLO_CMD
;
485 qla2x00_init_async_sp(sp
, qla2x00_get_async_timeout(vha
) + 2,
486 qla2x00_async_prlo_sp_done
);
488 ql_dbg(ql_dbg_disc
, vha
, 0x2070,
489 "Async-prlo - hdl=%x loop-id=%x portid=%02x%02x%02x.\n",
490 sp
->handle
, fcport
->loop_id
, fcport
->d_id
.b
.domain
,
491 fcport
->d_id
.b
.area
, fcport
->d_id
.b
.al_pa
);
493 rval
= qla2x00_start_sp(sp
);
494 if (rval
!= QLA_SUCCESS
)
501 kref_put(&sp
->cmd_kref
, qla2x00_sp_release
);
503 fcport
->flags
&= ~FCF_ASYNC_ACTIVE
;
508 void qla24xx_handle_adisc_event(scsi_qla_host_t
*vha
, struct event_arg
*ea
)
510 struct fc_port
*fcport
= ea
->fcport
;
513 ql_dbg(ql_dbg_disc
, vha
, 0x20d2,
514 "%s %8phC DS %d LS %d rc %d login %d|%d rscn %d|%d lid %d\n",
515 __func__
, fcport
->port_name
, fcport
->disc_state
,
516 fcport
->fw_login_state
, ea
->rc
, fcport
->login_gen
, ea
->sp
->gen2
,
517 fcport
->rscn_gen
, ea
->sp
->gen1
, fcport
->loop_id
);
519 WARN_ONCE(!qla2xxx_is_valid_mbs(ea
->data
[0]), "mbs: %#x\n",
522 if (ea
->data
[0] != MBS_COMMAND_COMPLETE
) {
523 ql_dbg(ql_dbg_disc
, vha
, 0x2066,
524 "%s %8phC: adisc fail: post delete\n",
525 __func__
, ea
->fcport
->port_name
);
527 spin_lock_irqsave(&vha
->work_lock
, flags
);
528 /* deleted = 0 & logout_on_delete = force fw cleanup */
529 if (fcport
->deleted
== QLA_SESS_DELETED
)
532 fcport
->logout_on_delete
= 1;
533 spin_unlock_irqrestore(&vha
->work_lock
, flags
);
535 qlt_schedule_sess_for_deletion(ea
->fcport
);
539 if (ea
->fcport
->disc_state
== DSC_DELETE_PEND
)
542 if (ea
->sp
->gen2
!= ea
->fcport
->login_gen
) {
543 /* target side must have changed it. */
544 ql_dbg(ql_dbg_disc
, vha
, 0x20d3,
545 "%s %8phC generation changed\n",
546 __func__
, ea
->fcport
->port_name
);
548 } else if (ea
->sp
->gen1
!= ea
->fcport
->rscn_gen
) {
549 qla_rscn_replay(fcport
);
550 qlt_schedule_sess_for_deletion(fcport
);
554 __qla24xx_handle_gpdb_event(vha
, ea
);
557 static int qla_post_els_plogi_work(struct scsi_qla_host
*vha
, fc_port_t
*fcport
)
559 struct qla_work_evt
*e
;
561 e
= qla2x00_alloc_work(vha
, QLA_EVT_ELS_PLOGI
);
563 return QLA_FUNCTION_FAILED
;
565 e
->u
.fcport
.fcport
= fcport
;
566 fcport
->flags
|= FCF_ASYNC_ACTIVE
;
567 qla2x00_set_fcport_disc_state(fcport
, DSC_LOGIN_PEND
);
568 return qla2x00_post_work(vha
, e
);
571 static void qla2x00_async_adisc_sp_done(srb_t
*sp
, int res
)
573 struct scsi_qla_host
*vha
= sp
->vha
;
575 struct srb_iocb
*lio
= &sp
->u
.iocb_cmd
;
577 ql_dbg(ql_dbg_disc
, vha
, 0x2066,
578 "Async done-%s res %x %8phC\n",
579 sp
->name
, res
, sp
->fcport
->port_name
);
581 sp
->fcport
->flags
&= ~(FCF_ASYNC_SENT
| FCF_ASYNC_ACTIVE
);
583 memset(&ea
, 0, sizeof(ea
));
585 ea
.data
[0] = lio
->u
.logio
.data
[0];
586 ea
.data
[1] = lio
->u
.logio
.data
[1];
587 ea
.iop
[0] = lio
->u
.logio
.iop
[0];
588 ea
.iop
[1] = lio
->u
.logio
.iop
[1];
589 ea
.fcport
= sp
->fcport
;
592 ea
.data
[0] = MBS_COMMAND_ERROR
;
594 qla24xx_handle_adisc_event(vha
, &ea
);
596 kref_put(&sp
->cmd_kref
, qla2x00_sp_release
);
600 qla2x00_async_adisc(struct scsi_qla_host
*vha
, fc_port_t
*fcport
,
604 struct srb_iocb
*lio
;
605 int rval
= QLA_FUNCTION_FAILED
;
607 if (IS_SESSION_DELETED(fcport
)) {
608 ql_log(ql_log_warn
, vha
, 0xffff,
609 "%s: %8phC is being delete - not sending command.\n",
610 __func__
, fcport
->port_name
);
611 fcport
->flags
&= ~FCF_ASYNC_ACTIVE
;
615 if (!vha
->flags
.online
|| (fcport
->flags
& FCF_ASYNC_SENT
))
618 fcport
->flags
|= FCF_ASYNC_SENT
;
620 sp
= qla2x00_get_sp(vha
, fcport
, GFP_KERNEL
);
624 sp
->type
= SRB_ADISC_CMD
;
626 sp
->gen1
= fcport
->rscn_gen
;
627 sp
->gen2
= fcport
->login_gen
;
628 qla2x00_init_async_sp(sp
, qla2x00_get_async_timeout(vha
) + 2,
629 qla2x00_async_adisc_sp_done
);
631 if (data
[1] & QLA_LOGIO_LOGIN_RETRIED
) {
632 lio
= &sp
->u
.iocb_cmd
;
633 lio
->u
.logio
.flags
|= SRB_LOGIN_RETRIED
;
636 ql_dbg(ql_dbg_disc
, vha
, 0x206f,
637 "Async-adisc - hdl=%x loopid=%x portid=%06x %8phC.\n",
638 sp
->handle
, fcport
->loop_id
, fcport
->d_id
.b24
, fcport
->port_name
);
640 rval
= qla2x00_start_sp(sp
);
641 if (rval
!= QLA_SUCCESS
)
648 kref_put(&sp
->cmd_kref
, qla2x00_sp_release
);
650 fcport
->flags
&= ~(FCF_ASYNC_SENT
| FCF_ASYNC_ACTIVE
);
651 qla2x00_post_async_adisc_work(vha
, fcport
, data
);
655 static bool qla2x00_is_reserved_id(scsi_qla_host_t
*vha
, uint16_t loop_id
)
657 struct qla_hw_data
*ha
= vha
->hw
;
659 if (IS_FWI2_CAPABLE(ha
))
660 return loop_id
> NPH_LAST_HANDLE
;
662 return (loop_id
> ha
->max_loop_id
&& loop_id
< SNS_FIRST_LOOP_ID
) ||
663 loop_id
== MANAGEMENT_SERVER
|| loop_id
== BROADCAST
;
667 * qla2x00_find_new_loop_id - scan through our port list and find a new usable loop ID
668 * @vha: adapter state pointer.
669 * @dev: port structure pointer.
672 * qla2x00 local function return status code.
677 static int qla2x00_find_new_loop_id(scsi_qla_host_t
*vha
, fc_port_t
*dev
)
680 struct qla_hw_data
*ha
= vha
->hw
;
681 unsigned long flags
= 0;
685 spin_lock_irqsave(&ha
->vport_slock
, flags
);
687 dev
->loop_id
= find_first_zero_bit(ha
->loop_id_map
, LOOPID_MAP_SIZE
);
688 if (dev
->loop_id
>= LOOPID_MAP_SIZE
||
689 qla2x00_is_reserved_id(vha
, dev
->loop_id
)) {
690 dev
->loop_id
= FC_NO_LOOP_ID
;
691 rval
= QLA_FUNCTION_FAILED
;
693 set_bit(dev
->loop_id
, ha
->loop_id_map
);
695 spin_unlock_irqrestore(&ha
->vport_slock
, flags
);
697 if (rval
== QLA_SUCCESS
)
698 ql_dbg(ql_dbg_disc
, dev
->vha
, 0x2086,
699 "Assigning new loopid=%x, portid=%x.\n",
700 dev
->loop_id
, dev
->d_id
.b24
);
702 ql_log(ql_log_warn
, dev
->vha
, 0x2087,
703 "No loop_id's available, portid=%x.\n",
709 void qla2x00_clear_loop_id(fc_port_t
*fcport
)
711 struct qla_hw_data
*ha
= fcport
->vha
->hw
;
713 if (fcport
->loop_id
== FC_NO_LOOP_ID
||
714 qla2x00_is_reserved_id(fcport
->vha
, fcport
->loop_id
))
717 clear_bit(fcport
->loop_id
, ha
->loop_id_map
);
718 fcport
->loop_id
= FC_NO_LOOP_ID
;
721 static void qla24xx_handle_gnl_done_event(scsi_qla_host_t
*vha
,
722 struct event_arg
*ea
)
724 fc_port_t
*fcport
, *conflict_fcport
;
725 struct get_name_list_extended
*e
;
726 u16 i
, n
, found
= 0, loop_id
;
730 u8 current_login_state
, nvme_cls
;
733 ql_dbg(ql_dbg_disc
, vha
, 0xffff,
734 "%s %8phC DS %d LS rc %d %d login %d|%d rscn %d|%d lid %d edif %d\n",
735 __func__
, fcport
->port_name
, fcport
->disc_state
,
736 fcport
->fw_login_state
, ea
->rc
,
737 fcport
->login_gen
, fcport
->last_login_gen
,
738 fcport
->rscn_gen
, fcport
->last_rscn_gen
, vha
->loop_id
, fcport
->edif
.enable
);
740 if (fcport
->disc_state
== DSC_DELETE_PEND
)
743 if (ea
->rc
) { /* rval */
744 if (fcport
->login_retry
== 0) {
745 ql_dbg(ql_dbg_disc
, vha
, 0x20de,
746 "GNL failed Port login retry %8phN, retry cnt=%d.\n",
747 fcport
->port_name
, fcport
->login_retry
);
752 if (fcport
->last_rscn_gen
!= fcport
->rscn_gen
) {
753 qla_rscn_replay(fcport
);
754 qlt_schedule_sess_for_deletion(fcport
);
756 } else if (fcport
->last_login_gen
!= fcport
->login_gen
) {
757 ql_dbg(ql_dbg_disc
, vha
, 0x20e0,
758 "%s %8phC login gen changed\n",
759 __func__
, fcport
->port_name
);
760 set_bit(RELOGIN_NEEDED
, &vha
->dpc_flags
);
764 n
= ea
->data
[0] / sizeof(struct get_name_list_extended
);
766 ql_dbg(ql_dbg_disc
, vha
, 0x20e1,
767 "%s %d %8phC n %d %02x%02x%02x lid %d \n",
768 __func__
, __LINE__
, fcport
->port_name
, n
,
769 fcport
->d_id
.b
.domain
, fcport
->d_id
.b
.area
,
770 fcport
->d_id
.b
.al_pa
, fcport
->loop_id
);
772 for (i
= 0; i
< n
; i
++) {
774 wwn
= wwn_to_u64(e
->port_name
);
775 id
.b
.domain
= e
->port_id
[2];
776 id
.b
.area
= e
->port_id
[1];
777 id
.b
.al_pa
= e
->port_id
[0];
780 if (memcmp((u8
*)&wwn
, fcport
->port_name
, WWN_SIZE
))
783 if (IS_SW_RESV_ADDR(id
))
788 loop_id
= le16_to_cpu(e
->nport_handle
);
789 loop_id
= (loop_id
& 0x7fff);
790 nvme_cls
= e
->current_login_state
>> 4;
791 current_login_state
= e
->current_login_state
& 0xf;
793 if (PRLI_PHASE(nvme_cls
)) {
794 current_login_state
= nvme_cls
;
795 fcport
->fc4_type
&= ~FS_FC4TYPE_FCP
;
796 fcport
->fc4_type
|= FS_FC4TYPE_NVME
;
797 } else if (PRLI_PHASE(current_login_state
)) {
798 fcport
->fc4_type
|= FS_FC4TYPE_FCP
;
799 fcport
->fc4_type
&= ~FS_FC4TYPE_NVME
;
802 ql_dbg(ql_dbg_disc
, vha
, 0x20e2,
803 "%s found %8phC CLS [%x|%x] fc4_type %d ID[%06x|%06x] lid[%d|%d]\n",
804 __func__
, fcport
->port_name
,
805 e
->current_login_state
, fcport
->fw_login_state
,
806 fcport
->fc4_type
, id
.b24
, fcport
->d_id
.b24
,
807 loop_id
, fcport
->loop_id
);
809 switch (fcport
->disc_state
) {
810 case DSC_DELETE_PEND
:
814 if ((id
.b24
!= fcport
->d_id
.b24
&&
816 fcport
->loop_id
!= FC_NO_LOOP_ID
) ||
817 (fcport
->loop_id
!= FC_NO_LOOP_ID
&&
818 fcport
->loop_id
!= loop_id
)) {
819 ql_dbg(ql_dbg_disc
, vha
, 0x20e3,
820 "%s %d %8phC post del sess\n",
821 __func__
, __LINE__
, fcport
->port_name
);
822 if (fcport
->n2n_flag
)
823 fcport
->d_id
.b24
= 0;
824 qlt_schedule_sess_for_deletion(fcport
);
830 fcport
->loop_id
= loop_id
;
831 if (fcport
->n2n_flag
)
832 fcport
->d_id
.b24
= id
.b24
;
834 wwn
= wwn_to_u64(fcport
->port_name
);
835 qlt_find_sess_invalidate_other(vha
, wwn
,
836 id
, loop_id
, &conflict_fcport
);
838 if (conflict_fcport
) {
840 * Another share fcport share the same loop_id &
841 * nport id. Conflict fcport needs to finish
842 * cleanup before this fcport can proceed to login.
844 conflict_fcport
->conflict
= fcport
;
845 fcport
->login_pause
= 1;
848 switch (vha
->hw
->current_topology
) {
850 switch (current_login_state
) {
851 case DSC_LS_PRLI_COMP
:
853 vha
, 0x20e4, "%s %d %8phC post gpdb\n",
854 __func__
, __LINE__
, fcport
->port_name
);
856 if ((e
->prli_svc_param_word_3
[0] & BIT_4
) == 0)
857 fcport
->port_type
= FCT_INITIATOR
;
859 fcport
->port_type
= FCT_TARGET
;
860 data
[0] = data
[1] = 0;
861 qla2x00_post_async_adisc_work(vha
, fcport
,
864 case DSC_LS_PLOGI_COMP
:
865 if (vha
->hw
->flags
.edif_enabled
) {
866 /* check to see if App support Secure */
867 qla24xx_post_gpdb_work(vha
, fcport
, 0);
871 case DSC_LS_PORT_UNAVAIL
:
873 if (fcport
->loop_id
== FC_NO_LOOP_ID
) {
874 qla2x00_find_new_loop_id(vha
, fcport
);
875 fcport
->fw_login_state
=
878 ql_dbg(ql_dbg_disc
, vha
, 0x20e5,
879 "%s %d %8phC\n", __func__
, __LINE__
,
881 qla24xx_fcport_handle_login(vha
, fcport
);
886 fcport
->fw_login_state
= current_login_state
;
888 switch (current_login_state
) {
889 case DSC_LS_PRLI_PEND
:
891 * In the middle of PRLI. Let it finish.
892 * Allow relogin code to recheck state again
893 * with GNL. Push disc_state back to DELETED
894 * so GNL can go out again
896 qla2x00_set_fcport_disc_state(fcport
,
898 set_bit(RELOGIN_NEEDED
, &vha
->dpc_flags
);
900 case DSC_LS_PRLI_COMP
:
901 if ((e
->prli_svc_param_word_3
[0] & BIT_4
) == 0)
902 fcport
->port_type
= FCT_INITIATOR
;
904 fcport
->port_type
= FCT_TARGET
;
906 data
[0] = data
[1] = 0;
907 qla2x00_post_async_adisc_work(vha
, fcport
,
910 case DSC_LS_PLOGI_COMP
:
911 if (vha
->hw
->flags
.edif_enabled
&&
913 /* check to see if App support secure or not */
914 qla24xx_post_gpdb_work(vha
, fcport
, 0);
917 if (fcport_is_bigger(fcport
)) {
918 /* local adapter is smaller */
919 if (fcport
->loop_id
!= FC_NO_LOOP_ID
)
920 qla2x00_clear_loop_id(fcport
);
922 fcport
->loop_id
= loop_id
;
923 qla24xx_fcport_handle_login(vha
,
929 if (fcport_is_smaller(fcport
)) {
930 /* local adapter is bigger */
931 if (fcport
->loop_id
!= FC_NO_LOOP_ID
)
932 qla2x00_clear_loop_id(fcport
);
934 fcport
->loop_id
= loop_id
;
935 qla24xx_fcport_handle_login(vha
,
941 } /* switch (ha->current_topology) */
945 switch (vha
->hw
->current_topology
) {
948 for (i
= 0; i
< n
; i
++) {
950 id
.b
.domain
= e
->port_id
[0];
951 id
.b
.area
= e
->port_id
[1];
952 id
.b
.al_pa
= e
->port_id
[2];
954 loop_id
= le16_to_cpu(e
->nport_handle
);
956 if (fcport
->d_id
.b24
== id
.b24
) {
958 qla2x00_find_fcport_by_wwpn(vha
,
960 if (conflict_fcport
) {
961 ql_dbg(ql_dbg_disc
+ ql_dbg_verbose
,
963 "%s %d %8phC post del sess\n",
965 conflict_fcport
->port_name
);
966 qlt_schedule_sess_for_deletion
971 * FW already picked this loop id for
974 if (fcport
->loop_id
== loop_id
)
975 fcport
->loop_id
= FC_NO_LOOP_ID
;
977 qla24xx_fcport_handle_login(vha
, fcport
);
980 qla2x00_set_fcport_disc_state(fcport
, DSC_DELETED
);
981 if (time_after_eq(jiffies
, fcport
->dm_login_expire
)) {
982 if (fcport
->n2n_link_reset_cnt
< 2) {
983 fcport
->n2n_link_reset_cnt
++;
985 * remote port is not sending PLOGI.
986 * Reset link to kick start his state
989 set_bit(N2N_LINK_RESET
,
992 if (fcport
->n2n_chip_reset
< 1) {
993 ql_log(ql_log_info
, vha
, 0x705d,
994 "Chip reset to bring laser down");
995 set_bit(ISP_ABORT_NEEDED
,
997 fcport
->n2n_chip_reset
++;
999 ql_log(ql_log_info
, vha
, 0x705d,
1000 "Remote port %8ph is not coming back\n",
1002 fcport
->scan_state
= 0;
1005 qla2xxx_wake_dpc(vha
);
1008 * report port suppose to do PLOGI. Give him
1009 * more time. FW will catch it.
1011 set_bit(RELOGIN_NEEDED
, &vha
->dpc_flags
);
1015 qla24xx_fcport_handle_login(vha
, fcport
);
1023 static void qla24xx_async_gnl_sp_done(srb_t
*sp
, int res
)
1025 struct scsi_qla_host
*vha
= sp
->vha
;
1026 unsigned long flags
;
1027 struct fc_port
*fcport
= NULL
, *tf
;
1028 u16 i
, n
= 0, loop_id
;
1029 struct event_arg ea
;
1030 struct get_name_list_extended
*e
;
1035 ql_dbg(ql_dbg_disc
, vha
, 0x20e7,
1036 "Async done-%s res %x mb[1]=%x mb[2]=%x \n",
1037 sp
->name
, res
, sp
->u
.iocb_cmd
.u
.mbx
.in_mb
[1],
1038 sp
->u
.iocb_cmd
.u
.mbx
.in_mb
[2]);
1041 sp
->fcport
->flags
&= ~(FCF_ASYNC_SENT
|FCF_ASYNC_ACTIVE
);
1042 memset(&ea
, 0, sizeof(ea
));
1046 if (sp
->u
.iocb_cmd
.u
.mbx
.in_mb
[1] >=
1047 sizeof(struct get_name_list_extended
)) {
1048 n
= sp
->u
.iocb_cmd
.u
.mbx
.in_mb
[1] /
1049 sizeof(struct get_name_list_extended
);
1050 ea
.data
[0] = sp
->u
.iocb_cmd
.u
.mbx
.in_mb
[1]; /* amnt xfered */
1053 for (i
= 0; i
< n
; i
++) {
1055 loop_id
= le16_to_cpu(e
->nport_handle
);
1056 /* mask out reserve bit */
1057 loop_id
= (loop_id
& 0x7fff);
1058 set_bit(loop_id
, vha
->hw
->loop_id_map
);
1059 wwn
= wwn_to_u64(e
->port_name
);
1061 ql_dbg(ql_dbg_disc
, vha
, 0x20e8,
1062 "%s %8phC %02x:%02x:%02x CLS %x/%x lid %x \n",
1063 __func__
, &wwn
, e
->port_id
[2], e
->port_id
[1],
1064 e
->port_id
[0], e
->current_login_state
, e
->last_login_state
,
1065 (loop_id
& 0x7fff));
1068 spin_lock_irqsave(&vha
->hw
->tgt
.sess_lock
, flags
);
1072 if (!list_empty(&vha
->gnl
.fcports
))
1073 list_splice_init(&vha
->gnl
.fcports
, &h
);
1074 spin_unlock_irqrestore(&vha
->hw
->tgt
.sess_lock
, flags
);
1076 list_for_each_entry_safe(fcport
, tf
, &h
, gnl_entry
) {
1077 spin_lock_irqsave(&vha
->hw
->tgt
.sess_lock
, flags
);
1078 list_del_init(&fcport
->gnl_entry
);
1079 fcport
->flags
&= ~(FCF_ASYNC_SENT
| FCF_ASYNC_ACTIVE
);
1080 spin_unlock_irqrestore(&vha
->hw
->tgt
.sess_lock
, flags
);
1083 qla24xx_handle_gnl_done_event(vha
, &ea
);
1086 /* create new fcport if fw has knowledge of new sessions */
1087 for (i
= 0; i
< n
; i
++) {
1092 wwn
= wwn_to_u64(e
->port_name
);
1095 list_for_each_entry_safe(fcport
, tf
, &vha
->vp_fcports
, list
) {
1096 if (!memcmp((u8
*)&wwn
, fcport
->port_name
,
1103 id
.b
.domain
= e
->port_id
[2];
1104 id
.b
.area
= e
->port_id
[1];
1105 id
.b
.al_pa
= e
->port_id
[0];
1108 if (!found
&& wwn
&& !IS_SW_RESV_ADDR(id
)) {
1109 ql_dbg(ql_dbg_disc
, vha
, 0x2065,
1110 "%s %d %8phC %06x post new sess\n",
1111 __func__
, __LINE__
, (u8
*)&wwn
, id
.b24
);
1112 wwnn
= wwn_to_u64(e
->node_name
);
1113 qla24xx_post_newsess_work(vha
, &id
, (u8
*)&wwn
,
1114 (u8
*)&wwnn
, NULL
, 0);
1118 spin_lock_irqsave(&vha
->hw
->tgt
.sess_lock
, flags
);
1120 if (!list_empty(&vha
->gnl
.fcports
)) {
1122 list_for_each_entry_safe(fcport
, tf
, &vha
->gnl
.fcports
,
1124 list_del_init(&fcport
->gnl_entry
);
1125 fcport
->flags
&= ~(FCF_ASYNC_SENT
| FCF_ASYNC_ACTIVE
);
1126 if (qla24xx_post_gnl_work(vha
, fcport
) == QLA_SUCCESS
)
1130 spin_unlock_irqrestore(&vha
->hw
->tgt
.sess_lock
, flags
);
1133 kref_put(&sp
->cmd_kref
, qla2x00_sp_release
);
1136 int qla24xx_async_gnl(struct scsi_qla_host
*vha
, fc_port_t
*fcport
)
1139 int rval
= QLA_FUNCTION_FAILED
;
1140 unsigned long flags
;
1143 if (!vha
->flags
.online
|| (fcport
->flags
& FCF_ASYNC_SENT
))
1146 ql_dbg(ql_dbg_disc
, vha
, 0x20d9,
1147 "Async-gnlist WWPN %8phC \n", fcport
->port_name
);
1149 spin_lock_irqsave(&vha
->hw
->tgt
.sess_lock
, flags
);
1150 fcport
->flags
|= FCF_ASYNC_SENT
;
1151 qla2x00_set_fcport_disc_state(fcport
, DSC_GNL
);
1152 fcport
->last_rscn_gen
= fcport
->rscn_gen
;
1153 fcport
->last_login_gen
= fcport
->login_gen
;
1155 list_add_tail(&fcport
->gnl_entry
, &vha
->gnl
.fcports
);
1156 if (vha
->gnl
.sent
) {
1157 spin_unlock_irqrestore(&vha
->hw
->tgt
.sess_lock
, flags
);
1161 spin_unlock_irqrestore(&vha
->hw
->tgt
.sess_lock
, flags
);
1164 sp
= qla2x00_get_sp(vha
, fcport
, GFP_KERNEL
);
1168 sp
->type
= SRB_MB_IOCB
;
1169 sp
->name
= "gnlist";
1170 sp
->gen1
= fcport
->rscn_gen
;
1171 sp
->gen2
= fcport
->login_gen
;
1172 qla2x00_init_async_sp(sp
, qla2x00_get_async_timeout(vha
) + 2,
1173 qla24xx_async_gnl_sp_done
);
1175 mb
= sp
->u
.iocb_cmd
.u
.mbx
.out_mb
;
1176 mb
[0] = MBC_PORT_NODE_NAME_LIST
;
1177 mb
[1] = BIT_2
| BIT_3
;
1178 mb
[2] = MSW(vha
->gnl
.ldma
);
1179 mb
[3] = LSW(vha
->gnl
.ldma
);
1180 mb
[6] = MSW(MSD(vha
->gnl
.ldma
));
1181 mb
[7] = LSW(MSD(vha
->gnl
.ldma
));
1182 mb
[8] = vha
->gnl
.size
;
1183 mb
[9] = vha
->vp_idx
;
1185 ql_dbg(ql_dbg_disc
, vha
, 0x20da,
1186 "Async-%s - OUT WWPN %8phC hndl %x\n",
1187 sp
->name
, fcport
->port_name
, sp
->handle
);
1189 rval
= qla2x00_start_sp(sp
);
1190 if (rval
!= QLA_SUCCESS
)
1197 * use qla24xx_async_gnl_sp_done to purge all pending gnl request.
1198 * kref_put is call behind the scene.
1200 sp
->u
.iocb_cmd
.u
.mbx
.in_mb
[0] = MBS_COMMAND_ERROR
;
1201 qla24xx_async_gnl_sp_done(sp
, QLA_COMMAND_ERROR
);
1202 fcport
->flags
&= ~(FCF_ASYNC_SENT
);
1204 fcport
->flags
&= ~(FCF_ASYNC_ACTIVE
);
1208 int qla24xx_post_gnl_work(struct scsi_qla_host
*vha
, fc_port_t
*fcport
)
1210 struct qla_work_evt
*e
;
1212 e
= qla2x00_alloc_work(vha
, QLA_EVT_GNL
);
1214 return QLA_FUNCTION_FAILED
;
1216 e
->u
.fcport
.fcport
= fcport
;
1217 fcport
->flags
|= FCF_ASYNC_ACTIVE
;
1218 return qla2x00_post_work(vha
, e
);
1221 static void qla24xx_async_gpdb_sp_done(srb_t
*sp
, int res
)
1223 struct scsi_qla_host
*vha
= sp
->vha
;
1224 struct qla_hw_data
*ha
= vha
->hw
;
1225 fc_port_t
*fcport
= sp
->fcport
;
1226 u16
*mb
= sp
->u
.iocb_cmd
.u
.mbx
.in_mb
;
1227 struct event_arg ea
;
1229 ql_dbg(ql_dbg_disc
, vha
, 0x20db,
1230 "Async done-%s res %x, WWPN %8phC mb[1]=%x mb[2]=%x \n",
1231 sp
->name
, res
, fcport
->port_name
, mb
[1], mb
[2]);
1233 fcport
->flags
&= ~(FCF_ASYNC_SENT
| FCF_ASYNC_ACTIVE
);
1235 if (res
== QLA_FUNCTION_TIMEOUT
)
1238 memset(&ea
, 0, sizeof(ea
));
1242 qla24xx_handle_gpdb_event(vha
, &ea
);
1245 dma_pool_free(ha
->s_dma_pool
, sp
->u
.iocb_cmd
.u
.mbx
.in
,
1246 sp
->u
.iocb_cmd
.u
.mbx
.in_dma
);
1248 kref_put(&sp
->cmd_kref
, qla2x00_sp_release
);
1251 int qla24xx_post_prli_work(struct scsi_qla_host
*vha
, fc_port_t
*fcport
)
1253 struct qla_work_evt
*e
;
1255 if (vha
->host
->active_mode
== MODE_TARGET
)
1256 return QLA_FUNCTION_FAILED
;
1258 e
= qla2x00_alloc_work(vha
, QLA_EVT_PRLI
);
1260 return QLA_FUNCTION_FAILED
;
1262 e
->u
.fcport
.fcport
= fcport
;
1264 return qla2x00_post_work(vha
, e
);
1267 static void qla2x00_async_prli_sp_done(srb_t
*sp
, int res
)
1269 struct scsi_qla_host
*vha
= sp
->vha
;
1270 struct srb_iocb
*lio
= &sp
->u
.iocb_cmd
;
1271 struct event_arg ea
;
1273 ql_dbg(ql_dbg_disc
, vha
, 0x2129,
1274 "%s %8phC res %x\n", __func__
,
1275 sp
->fcport
->port_name
, res
);
1277 sp
->fcport
->flags
&= ~FCF_ASYNC_SENT
;
1279 if (!test_bit(UNLOADING
, &vha
->dpc_flags
)) {
1280 memset(&ea
, 0, sizeof(ea
));
1281 ea
.fcport
= sp
->fcport
;
1282 ea
.data
[0] = lio
->u
.logio
.data
[0];
1283 ea
.data
[1] = lio
->u
.logio
.data
[1];
1284 ea
.iop
[0] = lio
->u
.logio
.iop
[0];
1285 ea
.iop
[1] = lio
->u
.logio
.iop
[1];
1287 if (res
== QLA_OS_TIMER_EXPIRED
)
1288 ea
.data
[0] = QLA_OS_TIMER_EXPIRED
;
1290 ea
.data
[0] = MBS_COMMAND_ERROR
;
1292 qla24xx_handle_prli_done_event(vha
, &ea
);
1295 kref_put(&sp
->cmd_kref
, qla2x00_sp_release
);
1299 qla24xx_async_prli(struct scsi_qla_host
*vha
, fc_port_t
*fcport
)
1302 struct srb_iocb
*lio
;
1303 int rval
= QLA_FUNCTION_FAILED
;
1305 if (!vha
->flags
.online
) {
1306 ql_dbg(ql_dbg_disc
, vha
, 0xffff, "%s %d %8phC exit\n",
1307 __func__
, __LINE__
, fcport
->port_name
);
1311 if ((fcport
->fw_login_state
== DSC_LS_PLOGI_PEND
||
1312 fcport
->fw_login_state
== DSC_LS_PRLI_PEND
) &&
1313 qla_dual_mode_enabled(vha
)) {
1314 ql_dbg(ql_dbg_disc
, vha
, 0xffff, "%s %d %8phC exit\n",
1315 __func__
, __LINE__
, fcport
->port_name
);
1319 sp
= qla2x00_get_sp(vha
, fcport
, GFP_KERNEL
);
1323 fcport
->flags
|= FCF_ASYNC_SENT
;
1324 fcport
->logout_completed
= 0;
1326 sp
->type
= SRB_PRLI_CMD
;
1328 qla2x00_init_async_sp(sp
, qla2x00_get_async_timeout(vha
) + 2,
1329 qla2x00_async_prli_sp_done
);
1331 lio
= &sp
->u
.iocb_cmd
;
1332 lio
->u
.logio
.flags
= 0;
1334 if (NVME_TARGET(vha
->hw
, fcport
))
1335 lio
->u
.logio
.flags
|= SRB_LOGIN_NVME_PRLI
;
1337 ql_dbg(ql_dbg_disc
, vha
, 0x211b,
1338 "Async-prli - %8phC hdl=%x, loopid=%x portid=%06x retries=%d fc4type %x priority %x %s.\n",
1339 fcport
->port_name
, sp
->handle
, fcport
->loop_id
, fcport
->d_id
.b24
,
1340 fcport
->login_retry
, fcport
->fc4_type
, vha
->hw
->fc4_type_priority
,
1341 NVME_TARGET(vha
->hw
, fcport
) ? "nvme" : "fcp");
1343 rval
= qla2x00_start_sp(sp
);
1344 if (rval
!= QLA_SUCCESS
) {
1345 fcport
->flags
|= FCF_LOGIN_NEEDED
;
1346 set_bit(RELOGIN_NEEDED
, &vha
->dpc_flags
);
1354 kref_put(&sp
->cmd_kref
, qla2x00_sp_release
);
1355 fcport
->flags
&= ~FCF_ASYNC_SENT
;
1359 int qla24xx_post_gpdb_work(struct scsi_qla_host
*vha
, fc_port_t
*fcport
, u8 opt
)
1361 struct qla_work_evt
*e
;
1363 e
= qla2x00_alloc_work(vha
, QLA_EVT_GPDB
);
1365 return QLA_FUNCTION_FAILED
;
1367 e
->u
.fcport
.fcport
= fcport
;
1368 e
->u
.fcport
.opt
= opt
;
1369 fcport
->flags
|= FCF_ASYNC_ACTIVE
;
1370 return qla2x00_post_work(vha
, e
);
1373 int qla24xx_async_gpdb(struct scsi_qla_host
*vha
, fc_port_t
*fcport
, u8 opt
)
1376 struct srb_iocb
*mbx
;
1377 int rval
= QLA_FUNCTION_FAILED
;
1380 struct port_database_24xx
*pd
;
1381 struct qla_hw_data
*ha
= vha
->hw
;
1383 if (IS_SESSION_DELETED(fcport
)) {
1384 ql_log(ql_log_warn
, vha
, 0xffff,
1385 "%s: %8phC is being delete - not sending command.\n",
1386 __func__
, fcport
->port_name
);
1387 fcport
->flags
&= ~FCF_ASYNC_ACTIVE
;
1391 if (!vha
->flags
.online
|| fcport
->flags
& FCF_ASYNC_SENT
) {
1392 ql_log(ql_log_warn
, vha
, 0xffff,
1393 "%s: %8phC online %d flags %x - not sending command.\n",
1394 __func__
, fcport
->port_name
, vha
->flags
.online
, fcport
->flags
);
1398 sp
= qla2x00_get_sp(vha
, fcport
, GFP_KERNEL
);
1402 qla2x00_set_fcport_disc_state(fcport
, DSC_GPDB
);
1404 fcport
->flags
|= FCF_ASYNC_SENT
;
1405 sp
->type
= SRB_MB_IOCB
;
1407 sp
->gen1
= fcport
->rscn_gen
;
1408 sp
->gen2
= fcport
->login_gen
;
1409 qla2x00_init_async_sp(sp
, qla2x00_get_async_timeout(vha
) + 2,
1410 qla24xx_async_gpdb_sp_done
);
1412 pd
= dma_pool_zalloc(ha
->s_dma_pool
, GFP_KERNEL
, &pd_dma
);
1414 ql_log(ql_log_warn
, vha
, 0xd043,
1415 "Failed to allocate port database structure.\n");
1419 mb
= sp
->u
.iocb_cmd
.u
.mbx
.out_mb
;
1420 mb
[0] = MBC_GET_PORT_DATABASE
;
1421 mb
[1] = fcport
->loop_id
;
1422 mb
[2] = MSW(pd_dma
);
1423 mb
[3] = LSW(pd_dma
);
1424 mb
[6] = MSW(MSD(pd_dma
));
1425 mb
[7] = LSW(MSD(pd_dma
));
1426 mb
[9] = vha
->vp_idx
;
1429 mbx
= &sp
->u
.iocb_cmd
;
1430 mbx
->u
.mbx
.in
= (void *)pd
;
1431 mbx
->u
.mbx
.in_dma
= pd_dma
;
1433 ql_dbg(ql_dbg_disc
, vha
, 0x20dc,
1434 "Async-%s %8phC hndl %x opt %x\n",
1435 sp
->name
, fcport
->port_name
, sp
->handle
, opt
);
1437 rval
= qla2x00_start_sp(sp
);
1438 if (rval
!= QLA_SUCCESS
)
1444 dma_pool_free(ha
->s_dma_pool
, pd
, pd_dma
);
1446 kref_put(&sp
->cmd_kref
, qla2x00_sp_release
);
1447 fcport
->flags
&= ~FCF_ASYNC_SENT
;
1449 fcport
->flags
&= ~FCF_ASYNC_ACTIVE
;
1450 qla24xx_post_gpdb_work(vha
, fcport
, opt
);
1455 void __qla24xx_handle_gpdb_event(scsi_qla_host_t
*vha
, struct event_arg
*ea
)
1457 unsigned long flags
;
1459 spin_lock_irqsave(&vha
->hw
->tgt
.sess_lock
, flags
);
1460 ea
->fcport
->login_gen
++;
1461 ea
->fcport
->logout_on_delete
= 1;
1463 if (!ea
->fcport
->login_succ
&& !IS_SW_RESV_ADDR(ea
->fcport
->d_id
)) {
1464 vha
->fcport_count
++;
1465 ea
->fcport
->login_succ
= 1;
1467 spin_unlock_irqrestore(&vha
->hw
->tgt
.sess_lock
, flags
);
1468 qla24xx_sched_upd_fcport(ea
->fcport
);
1469 spin_lock_irqsave(&vha
->hw
->tgt
.sess_lock
, flags
);
1470 } else if (ea
->fcport
->login_succ
) {
1472 * We have an existing session. A late RSCN delivery
1473 * must have triggered the session to be re-validate.
1474 * Session is still valid.
1476 ql_dbg(ql_dbg_disc
, vha
, 0x20d6,
1477 "%s %d %8phC session revalidate success\n",
1478 __func__
, __LINE__
, ea
->fcport
->port_name
);
1479 qla2x00_set_fcport_disc_state(ea
->fcport
, DSC_LOGIN_COMPLETE
);
1481 spin_unlock_irqrestore(&vha
->hw
->tgt
.sess_lock
, flags
);
1484 static int qla_chk_secure_login(scsi_qla_host_t
*vha
, fc_port_t
*fcport
,
1485 struct port_database_24xx
*pd
)
1489 if (pd
->secure_login
) {
1490 ql_dbg(ql_dbg_disc
, vha
, 0x104d,
1491 "Secure Login established on %8phC\n",
1493 fcport
->flags
|= FCF_FCSP_DEVICE
;
1495 ql_dbg(ql_dbg_disc
, vha
, 0x104d,
1496 "non-Secure Login %8phC",
1498 fcport
->flags
&= ~FCF_FCSP_DEVICE
;
1500 if (vha
->hw
->flags
.edif_enabled
) {
1501 if (fcport
->flags
& FCF_FCSP_DEVICE
) {
1502 qla2x00_set_fcport_disc_state(fcport
, DSC_LOGIN_AUTH_PEND
);
1503 /* Start edif prli timer & ring doorbell for app */
1504 fcport
->edif
.rx_sa_set
= 0;
1505 fcport
->edif
.tx_sa_set
= 0;
1506 fcport
->edif
.rx_sa_pending
= 0;
1507 fcport
->edif
.tx_sa_pending
= 0;
1509 qla2x00_post_aen_work(vha
, FCH_EVT_PORT_ONLINE
,
1512 if (DBELL_ACTIVE(vha
)) {
1513 ql_dbg(ql_dbg_disc
, vha
, 0x20ef,
1514 "%s %d %8phC EDIF: post DB_AUTH: AUTH needed\n",
1515 __func__
, __LINE__
, fcport
->port_name
);
1516 fcport
->edif
.app_sess_online
= 1;
1518 qla_edb_eventcreate(vha
, VND_CMD_AUTH_STATE_NEEDED
,
1519 fcport
->d_id
.b24
, 0, fcport
);
1523 } else if (qla_ini_mode_enabled(vha
) || qla_dual_mode_enabled(vha
)) {
1524 ql_dbg(ql_dbg_disc
, vha
, 0x2117,
1525 "%s %d %8phC post prli\n",
1526 __func__
, __LINE__
, fcport
->port_name
);
1527 qla24xx_post_prli_work(vha
, fcport
);
1535 void qla24xx_handle_gpdb_event(scsi_qla_host_t
*vha
, struct event_arg
*ea
)
1537 fc_port_t
*fcport
= ea
->fcport
;
1538 struct port_database_24xx
*pd
;
1539 struct srb
*sp
= ea
->sp
;
1542 pd
= (struct port_database_24xx
*)sp
->u
.iocb_cmd
.u
.mbx
.in
;
1544 fcport
->flags
&= ~FCF_ASYNC_SENT
;
1546 ql_dbg(ql_dbg_disc
, vha
, 0x20d2,
1547 "%s %8phC DS %d LS %x fc4_type %x rc %x\n", __func__
,
1548 fcport
->port_name
, fcport
->disc_state
, pd
->current_login_state
,
1549 fcport
->fc4_type
, ea
->rc
);
1551 if (fcport
->disc_state
== DSC_DELETE_PEND
) {
1552 ql_dbg(ql_dbg_disc
, vha
, 0x20d5, "%s %d %8phC\n",
1553 __func__
, __LINE__
, fcport
->port_name
);
1557 if (NVME_TARGET(vha
->hw
, fcport
))
1558 ls
= pd
->current_login_state
>> 4;
1560 ls
= pd
->current_login_state
& 0xf;
1562 if (ea
->sp
->gen2
!= fcport
->login_gen
) {
1563 /* target side must have changed it. */
1565 ql_dbg(ql_dbg_disc
, vha
, 0x20d3,
1566 "%s %8phC generation changed\n",
1567 __func__
, fcport
->port_name
);
1569 } else if (ea
->sp
->gen1
!= fcport
->rscn_gen
) {
1570 qla_rscn_replay(fcport
);
1571 qlt_schedule_sess_for_deletion(fcport
);
1572 ql_dbg(ql_dbg_disc
, vha
, 0x20d5, "%s %d %8phC, ls %x\n",
1573 __func__
, __LINE__
, fcport
->port_name
, ls
);
1578 case PDS_PRLI_COMPLETE
:
1579 __qla24xx_parse_gpdb(vha
, fcport
, pd
);
1581 case PDS_PLOGI_COMPLETE
:
1582 if (qla_chk_secure_login(vha
, fcport
, pd
)) {
1583 ql_dbg(ql_dbg_disc
, vha
, 0x20d5, "%s %d %8phC, ls %x\n",
1584 __func__
, __LINE__
, fcport
->port_name
, ls
);
1588 case PDS_PLOGI_PENDING
:
1589 case PDS_PRLI_PENDING
:
1590 case PDS_PRLI2_PENDING
:
1591 /* Set discovery state back to GNL to Relogin attempt */
1592 if (qla_dual_mode_enabled(vha
) ||
1593 qla_ini_mode_enabled(vha
)) {
1594 qla2x00_set_fcport_disc_state(fcport
, DSC_GNL
);
1595 set_bit(RELOGIN_NEEDED
, &vha
->dpc_flags
);
1597 ql_dbg(ql_dbg_disc
, vha
, 0x20d5, "%s %d %8phC, ls %x\n",
1598 __func__
, __LINE__
, fcport
->port_name
, ls
);
1600 case PDS_LOGO_PENDING
:
1601 case PDS_PORT_UNAVAILABLE
:
1603 ql_dbg(ql_dbg_disc
, vha
, 0x20d5, "%s %d %8phC post del sess\n",
1604 __func__
, __LINE__
, fcport
->port_name
);
1605 qlt_schedule_sess_for_deletion(fcport
);
1608 __qla24xx_handle_gpdb_event(vha
, ea
);
1611 static void qla_chk_n2n_b4_login(struct scsi_qla_host
*vha
, fc_port_t
*fcport
)
1616 ql_dbg(ql_dbg_disc
, vha
, 0x307b,
1617 "%s %8phC DS %d LS %d lid %d retries=%d\n",
1618 __func__
, fcport
->port_name
, fcport
->disc_state
,
1619 fcport
->fw_login_state
, fcport
->loop_id
, fcport
->login_retry
);
1621 if (qla_tgt_mode_enabled(vha
))
1624 if (qla_dual_mode_enabled(vha
)) {
1625 if (N2N_TOPO(vha
->hw
)) {
1628 mywwn
= wwn_to_u64(vha
->port_name
);
1629 wwn
= wwn_to_u64(fcport
->port_name
);
1632 else if ((fcport
->fw_login_state
== DSC_LS_PLOGI_COMP
)
1633 && time_after_eq(jiffies
,
1634 fcport
->plogi_nack_done_deadline
))
1640 /* initiator mode */
1644 if (login
&& fcport
->login_retry
) {
1645 fcport
->login_retry
--;
1646 if (fcport
->loop_id
== FC_NO_LOOP_ID
) {
1647 fcport
->fw_login_state
= DSC_LS_PORT_UNAVAIL
;
1648 rc
= qla2x00_find_new_loop_id(vha
, fcport
);
1650 ql_dbg(ql_dbg_disc
, vha
, 0x20e6,
1651 "%s %d %8phC post del sess - out of loopid\n",
1652 __func__
, __LINE__
, fcport
->port_name
);
1653 fcport
->scan_state
= 0;
1654 qlt_schedule_sess_for_deletion(fcport
);
1658 ql_dbg(ql_dbg_disc
, vha
, 0x20bf,
1659 "%s %d %8phC post login\n",
1660 __func__
, __LINE__
, fcport
->port_name
);
1661 qla2x00_post_async_login_work(vha
, fcport
, NULL
);
1665 int qla24xx_fcport_handle_login(struct scsi_qla_host
*vha
, fc_port_t
*fcport
)
1670 ql_dbg(ql_dbg_disc
, vha
, 0x20d8,
1671 "%s %8phC DS %d LS %d P %d fl %x confl %p rscn %d|%d login %d lid %d scan %d fc4type %x\n",
1672 __func__
, fcport
->port_name
, fcport
->disc_state
,
1673 fcport
->fw_login_state
, fcport
->login_pause
, fcport
->flags
,
1674 fcport
->conflict
, fcport
->last_rscn_gen
, fcport
->rscn_gen
,
1675 fcport
->login_gen
, fcport
->loop_id
, fcport
->scan_state
,
1678 if (fcport
->scan_state
!= QLA_FCPORT_FOUND
||
1679 fcport
->disc_state
== DSC_DELETE_PEND
)
1682 if ((fcport
->loop_id
!= FC_NO_LOOP_ID
) &&
1683 qla_dual_mode_enabled(vha
) &&
1684 ((fcport
->fw_login_state
== DSC_LS_PLOGI_PEND
) ||
1685 (fcport
->fw_login_state
== DSC_LS_PRLI_PEND
)))
1688 if (fcport
->fw_login_state
== DSC_LS_PLOGI_COMP
&&
1689 !N2N_TOPO(vha
->hw
)) {
1690 if (time_before_eq(jiffies
, fcport
->plogi_nack_done_deadline
)) {
1691 set_bit(RELOGIN_NEEDED
, &vha
->dpc_flags
);
1696 /* Target won't initiate port login if fabric is present */
1697 if (vha
->host
->active_mode
== MODE_TARGET
&& !N2N_TOPO(vha
->hw
))
1700 if (fcport
->flags
& (FCF_ASYNC_SENT
| FCF_ASYNC_ACTIVE
)) {
1701 set_bit(RELOGIN_NEEDED
, &vha
->dpc_flags
);
1705 switch (fcport
->disc_state
) {
1707 switch (vha
->hw
->current_topology
) {
1709 if (fcport_is_smaller(fcport
)) {
1710 /* this adapter is bigger */
1711 if (fcport
->login_retry
) {
1712 if (fcport
->loop_id
== FC_NO_LOOP_ID
) {
1713 qla2x00_find_new_loop_id(vha
,
1715 fcport
->fw_login_state
=
1716 DSC_LS_PORT_UNAVAIL
;
1718 fcport
->login_retry
--;
1719 qla_post_els_plogi_work(vha
, fcport
);
1721 ql_log(ql_log_info
, vha
, 0x705d,
1722 "Unable to reach remote port %8phC",
1726 qla24xx_post_gnl_work(vha
, fcport
);
1730 if (fcport
->loop_id
== FC_NO_LOOP_ID
) {
1731 ql_dbg(ql_dbg_disc
, vha
, 0x20bd,
1732 "%s %d %8phC post gnl\n",
1733 __func__
, __LINE__
, fcport
->port_name
);
1734 qla24xx_post_gnl_work(vha
, fcport
);
1736 qla_chk_n2n_b4_login(vha
, fcport
);
1743 switch (vha
->hw
->current_topology
) {
1745 if ((fcport
->current_login_state
& 0xf) == 0x6) {
1746 ql_dbg(ql_dbg_disc
, vha
, 0x2118,
1747 "%s %d %8phC post GPDB work\n",
1748 __func__
, __LINE__
, fcport
->port_name
);
1749 fcport
->chip_reset
=
1750 vha
->hw
->base_qpair
->chip_reset
;
1751 qla24xx_post_gpdb_work(vha
, fcport
, 0);
1753 ql_dbg(ql_dbg_disc
, vha
, 0x2118,
1754 "%s %d %8phC post %s PRLI\n",
1755 __func__
, __LINE__
, fcport
->port_name
,
1756 NVME_TARGET(vha
->hw
, fcport
) ? "NVME" :
1758 qla24xx_post_prli_work(vha
, fcport
);
1762 if (fcport
->login_pause
) {
1763 ql_dbg(ql_dbg_disc
, vha
, 0x20d8,
1764 "%s %d %8phC exit\n",
1767 fcport
->last_rscn_gen
= fcport
->rscn_gen
;
1768 fcport
->last_login_gen
= fcport
->login_gen
;
1769 set_bit(RELOGIN_NEEDED
, &vha
->dpc_flags
);
1772 qla_chk_n2n_b4_login(vha
, fcport
);
1777 case DSC_LOGIN_FAILED
:
1778 if (N2N_TOPO(vha
->hw
))
1779 qla_chk_n2n_b4_login(vha
, fcport
);
1781 qlt_schedule_sess_for_deletion(fcport
);
1784 case DSC_LOGIN_COMPLETE
:
1785 /* recheck login state */
1786 data
[0] = data
[1] = 0;
1787 qla2x00_post_async_adisc_work(vha
, fcport
, data
);
1790 case DSC_LOGIN_PEND
:
1791 if (vha
->hw
->flags
.edif_enabled
)
1794 if (fcport
->fw_login_state
== DSC_LS_PLOGI_COMP
) {
1795 ql_dbg(ql_dbg_disc
, vha
, 0x2118,
1796 "%s %d %8phC post %s PRLI\n",
1797 __func__
, __LINE__
, fcport
->port_name
,
1798 NVME_TARGET(vha
->hw
, fcport
) ? "NVME" : "FC");
1799 qla24xx_post_prli_work(vha
, fcport
);
1803 case DSC_UPD_FCPORT
:
1804 sec
= jiffies_to_msecs(jiffies
-
1805 fcport
->jiffies_at_registration
)/1000;
1806 if (fcport
->sec_since_registration
< sec
&& sec
&&
1808 fcport
->sec_since_registration
= sec
;
1809 ql_dbg(ql_dbg_disc
, fcport
->vha
, 0xffff,
1810 "%s %8phC - Slow Rport registration(%d Sec)\n",
1811 __func__
, fcport
->port_name
, sec
);
1814 if (fcport
->next_disc_state
!= DSC_DELETE_PEND
)
1815 fcport
->next_disc_state
= DSC_ADISC
;
1816 set_bit(RELOGIN_NEEDED
, &vha
->dpc_flags
);
1826 int qla24xx_post_newsess_work(struct scsi_qla_host
*vha
, port_id_t
*id
,
1827 u8
*port_name
, u8
*node_name
, void *pla
, u8 fc4_type
)
1829 struct qla_work_evt
*e
;
1831 e
= qla2x00_alloc_work(vha
, QLA_EVT_NEW_SESS
);
1833 return QLA_FUNCTION_FAILED
;
1835 e
->u
.new_sess
.id
= *id
;
1836 e
->u
.new_sess
.pla
= pla
;
1837 e
->u
.new_sess
.fc4_type
= fc4_type
;
1838 memcpy(e
->u
.new_sess
.port_name
, port_name
, WWN_SIZE
);
1840 memcpy(e
->u
.new_sess
.node_name
, node_name
, WWN_SIZE
);
1842 return qla2x00_post_work(vha
, e
);
1845 static void qla_rscn_gen_tick(scsi_qla_host_t
*vha
, u32
*ret_rscn_gen
)
1847 *ret_rscn_gen
= atomic_inc_return(&vha
->rscn_gen
);
1848 /* memory barrier */
1852 void qla2x00_handle_rscn(scsi_qla_host_t
*vha
, struct event_arg
*ea
)
1855 unsigned long flags
;
1858 switch (ea
->id
.b
.rsvd_1
) {
1859 case RSCN_PORT_ADDR
:
1860 fcport
= qla2x00_find_fcport_by_nportid(vha
, &ea
->id
, 1);
1862 if (ql2xfc2target
&&
1863 fcport
->flags
& FCF_FCP2_DEVICE
&&
1864 atomic_read(&fcport
->state
) == FCS_ONLINE
) {
1865 ql_dbg(ql_dbg_disc
, vha
, 0x2115,
1866 "Delaying session delete for FCP2 portid=%06x %8phC ",
1867 fcport
->d_id
.b24
, fcport
->port_name
);
1871 if (vha
->hw
->flags
.edif_enabled
&& DBELL_ACTIVE(vha
)) {
1873 * On ipsec start by remote port, Target port
1874 * may use RSCN to trigger initiator to
1875 * relogin. If driver is already in the
1876 * process of a relogin, then ignore the RSCN
1877 * and allow the current relogin to continue.
1878 * This reduces thrashing of the connection.
1880 if (atomic_read(&fcport
->state
) == FCS_ONLINE
) {
1882 * If state = online, then set scan_needed=1 to do relogin.
1883 * Otherwise we're already in the middle of a relogin
1885 fcport
->scan_needed
= 1;
1886 qla_rscn_gen_tick(vha
, &fcport
->rscn_gen
);
1889 fcport
->scan_needed
= 1;
1890 qla_rscn_gen_tick(vha
, &fcport
->rscn_gen
);
1894 case RSCN_AREA_ADDR
:
1895 qla_rscn_gen_tick(vha
, &rscn_gen
);
1896 list_for_each_entry(fcport
, &vha
->vp_fcports
, list
) {
1897 if (fcport
->flags
& FCF_FCP2_DEVICE
&&
1898 atomic_read(&fcport
->state
) == FCS_ONLINE
)
1901 if ((ea
->id
.b24
& 0xffff00) == (fcport
->d_id
.b24
& 0xffff00)) {
1902 fcport
->scan_needed
= 1;
1903 fcport
->rscn_gen
= rscn_gen
;
1908 qla_rscn_gen_tick(vha
, &rscn_gen
);
1909 list_for_each_entry(fcport
, &vha
->vp_fcports
, list
) {
1910 if (fcport
->flags
& FCF_FCP2_DEVICE
&&
1911 atomic_read(&fcport
->state
) == FCS_ONLINE
)
1914 if ((ea
->id
.b24
& 0xff0000) == (fcport
->d_id
.b24
& 0xff0000)) {
1915 fcport
->scan_needed
= 1;
1916 fcport
->rscn_gen
= rscn_gen
;
1922 qla_rscn_gen_tick(vha
, &rscn_gen
);
1923 list_for_each_entry(fcport
, &vha
->vp_fcports
, list
) {
1924 if (fcport
->flags
& FCF_FCP2_DEVICE
&&
1925 atomic_read(&fcport
->state
) == FCS_ONLINE
)
1928 fcport
->scan_needed
= 1;
1929 fcport
->rscn_gen
= rscn_gen
;
1934 spin_lock_irqsave(&vha
->work_lock
, flags
);
1935 if (vha
->scan
.scan_flags
== 0) {
1936 ql_dbg(ql_dbg_disc
, vha
, 0xffff, "%s: schedule\n", __func__
);
1937 vha
->scan
.scan_flags
|= SF_QUEUED
;
1938 vha
->scan
.rscn_gen_start
= atomic_read(&vha
->rscn_gen
);
1939 schedule_delayed_work(&vha
->scan
.scan_work
, 5);
1941 spin_unlock_irqrestore(&vha
->work_lock
, flags
);
1944 void qla24xx_handle_relogin_event(scsi_qla_host_t
*vha
,
1945 struct event_arg
*ea
)
1947 fc_port_t
*fcport
= ea
->fcport
;
1949 if (test_bit(UNLOADING
, &vha
->dpc_flags
))
1952 ql_dbg(ql_dbg_disc
, vha
, 0x2102,
1953 "%s %8phC DS %d LS %d P %d del %d cnfl %p rscn %d|%d login %d|%d fl %x\n",
1954 __func__
, fcport
->port_name
, fcport
->disc_state
,
1955 fcport
->fw_login_state
, fcport
->login_pause
,
1956 fcport
->deleted
, fcport
->conflict
,
1957 fcport
->last_rscn_gen
, fcport
->rscn_gen
,
1958 fcport
->last_login_gen
, fcport
->login_gen
,
1961 if (fcport
->last_rscn_gen
!= fcport
->rscn_gen
) {
1962 ql_dbg(ql_dbg_disc
, vha
, 0x20e9, "%s %d %8phC post gnl\n",
1963 __func__
, __LINE__
, fcport
->port_name
);
1964 qla24xx_post_gnl_work(vha
, fcport
);
1968 qla24xx_fcport_handle_login(vha
, fcport
);
1971 void qla_handle_els_plogi_done(scsi_qla_host_t
*vha
,
1972 struct event_arg
*ea
)
1974 if (N2N_TOPO(vha
->hw
) && fcport_is_smaller(ea
->fcport
) &&
1975 vha
->hw
->flags
.edif_enabled
) {
1976 /* check to see if App support Secure */
1977 qla24xx_post_gpdb_work(vha
, ea
->fcport
, 0);
1981 /* for pure Target Mode, PRLI will not be initiated */
1982 if (vha
->host
->active_mode
== MODE_TARGET
)
1985 ql_dbg(ql_dbg_disc
, vha
, 0x2118,
1986 "%s %d %8phC post PRLI\n",
1987 __func__
, __LINE__
, ea
->fcport
->port_name
);
1988 qla24xx_post_prli_work(vha
, ea
->fcport
);
1992 * RSCN(s) came in for this fcport, but the RSCN(s) was not able
1993 * to be consumed by the fcport
1995 void qla_rscn_replay(fc_port_t
*fcport
)
1997 struct event_arg ea
;
1999 switch (fcport
->disc_state
) {
2000 case DSC_DELETE_PEND
:
2006 if (fcport
->scan_needed
) {
2007 memset(&ea
, 0, sizeof(ea
));
2008 ea
.id
= fcport
->d_id
;
2009 ea
.id
.b
.rsvd_1
= RSCN_PORT_ADDR
;
2010 qla2x00_handle_rscn(fcport
->vha
, &ea
);
2015 qla2x00_tmf_iocb_timeout(void *data
)
2018 struct srb_iocb
*tmf
= &sp
->u
.iocb_cmd
;
2020 unsigned long flags
;
2022 if (sp
->type
== SRB_MARKER
)
2023 rc
= QLA_FUNCTION_FAILED
;
2025 rc
= qla24xx_async_abort_cmd(sp
, false);
2028 spin_lock_irqsave(sp
->qpair
->qp_lock_ptr
, flags
);
2029 for (h
= 1; h
< sp
->qpair
->req
->num_outstanding_cmds
; h
++) {
2030 if (sp
->qpair
->req
->outstanding_cmds
[h
] == sp
) {
2031 sp
->qpair
->req
->outstanding_cmds
[h
] = NULL
;
2032 qla_put_fw_resources(sp
->qpair
, &sp
->iores
);
2036 spin_unlock_irqrestore(sp
->qpair
->qp_lock_ptr
, flags
);
2037 tmf
->u
.tmf
.comp_status
= cpu_to_le16(CS_TIMEOUT
);
2038 tmf
->u
.tmf
.data
= QLA_FUNCTION_FAILED
;
2039 complete(&tmf
->u
.tmf
.comp
);
2043 static void qla_marker_sp_done(srb_t
*sp
, int res
)
2045 struct srb_iocb
*tmf
= &sp
->u
.iocb_cmd
;
2047 if (res
!= QLA_SUCCESS
)
2048 ql_dbg(ql_dbg_taskm
, sp
->vha
, 0x8004,
2049 "Async-marker fail hdl=%x portid=%06x ctrl=%x lun=%lld qp=%d.\n",
2050 sp
->handle
, sp
->fcport
->d_id
.b24
, sp
->u
.iocb_cmd
.u
.tmf
.flags
,
2051 sp
->u
.iocb_cmd
.u
.tmf
.lun
, sp
->qpair
->id
);
2053 sp
->u
.iocb_cmd
.u
.tmf
.data
= res
;
2054 complete(&tmf
->u
.tmf
.comp
);
2057 #define START_SP_W_RETRIES(_sp, _rval, _chip_gen, _login_gen) \
2061 if (_chip_gen != sp->vha->hw->chip_reset || _login_gen != sp->fcport->login_gen) {\
2065 _rval = qla2x00_start_sp(_sp); \
2066 if (_rval == EAGAIN) \
2075 * qla26xx_marker: send marker IOCB and wait for the completion of it.
2076 * @arg: pointer to argument list.
2077 * It is assume caller will provide an fcport pointer and modifier
2080 qla26xx_marker(struct tmf_arg
*arg
)
2082 struct scsi_qla_host
*vha
= arg
->vha
;
2083 struct srb_iocb
*tm_iocb
;
2085 int rval
= QLA_FUNCTION_FAILED
;
2086 fc_port_t
*fcport
= arg
->fcport
;
2087 u32 chip_gen
, login_gen
;
2089 if (TMF_NOT_READY(arg
->fcport
)) {
2090 ql_dbg(ql_dbg_taskm
, vha
, 0x8039,
2091 "FC port not ready for marker loop-id=%x portid=%06x modifier=%x lun=%lld qp=%d.\n",
2092 fcport
->loop_id
, fcport
->d_id
.b24
,
2093 arg
->modifier
, arg
->lun
, arg
->qpair
->id
);
2094 return QLA_SUSPENDED
;
2097 chip_gen
= vha
->hw
->chip_reset
;
2098 login_gen
= fcport
->login_gen
;
2101 sp
= qla2xxx_get_qpair_sp(vha
, arg
->qpair
, fcport
, GFP_KERNEL
);
2105 sp
->type
= SRB_MARKER
;
2106 sp
->name
= "marker";
2107 qla2x00_init_async_sp(sp
, qla2x00_get_async_timeout(vha
), qla_marker_sp_done
);
2108 sp
->u
.iocb_cmd
.timeout
= qla2x00_tmf_iocb_timeout
;
2110 tm_iocb
= &sp
->u
.iocb_cmd
;
2111 init_completion(&tm_iocb
->u
.tmf
.comp
);
2112 tm_iocb
->u
.tmf
.modifier
= arg
->modifier
;
2113 tm_iocb
->u
.tmf
.lun
= arg
->lun
;
2114 tm_iocb
->u
.tmf
.loop_id
= fcport
->loop_id
;
2115 tm_iocb
->u
.tmf
.vp_index
= vha
->vp_idx
;
2117 START_SP_W_RETRIES(sp
, rval
, chip_gen
, login_gen
);
2119 ql_dbg(ql_dbg_taskm
, vha
, 0x8006,
2120 "Async-marker hdl=%x loop-id=%x portid=%06x modifier=%x lun=%lld qp=%d rval %d.\n",
2121 sp
->handle
, fcport
->loop_id
, fcport
->d_id
.b24
,
2122 arg
->modifier
, arg
->lun
, sp
->qpair
->id
, rval
);
2124 if (rval
!= QLA_SUCCESS
) {
2125 ql_log(ql_log_warn
, vha
, 0x8031,
2126 "Marker IOCB send failure (%x).\n", rval
);
2130 wait_for_completion(&tm_iocb
->u
.tmf
.comp
);
2131 rval
= tm_iocb
->u
.tmf
.data
;
2133 if (rval
!= QLA_SUCCESS
) {
2134 ql_log(ql_log_warn
, vha
, 0x8019,
2135 "Marker failed hdl=%x loop-id=%x portid=%06x modifier=%x lun=%lld qp=%d rval %d.\n",
2136 sp
->handle
, fcport
->loop_id
, fcport
->d_id
.b24
,
2137 arg
->modifier
, arg
->lun
, sp
->qpair
->id
, rval
);
2142 kref_put(&sp
->cmd_kref
, qla2x00_sp_release
);
2147 static void qla2x00_tmf_sp_done(srb_t
*sp
, int res
)
2149 struct srb_iocb
*tmf
= &sp
->u
.iocb_cmd
;
2152 tmf
->u
.tmf
.data
= res
;
2153 complete(&tmf
->u
.tmf
.comp
);
2156 static int qla_tmf_wait(struct tmf_arg
*arg
)
2158 /* there are only 2 types of error handling that reaches here, lun or target reset */
2159 if (arg
->flags
& (TCF_LUN_RESET
| TCF_ABORT_TASK_SET
| TCF_CLEAR_TASK_SET
))
2160 return qla2x00_eh_wait_for_pending_commands(arg
->vha
,
2161 arg
->fcport
->d_id
.b24
, arg
->lun
, WAIT_LUN
);
2163 return qla2x00_eh_wait_for_pending_commands(arg
->vha
,
2164 arg
->fcport
->d_id
.b24
, arg
->lun
, WAIT_TARGET
);
2168 __qla2x00_async_tm_cmd(struct tmf_arg
*arg
)
2170 struct scsi_qla_host
*vha
= arg
->vha
;
2171 struct srb_iocb
*tm_iocb
;
2173 int rval
= QLA_FUNCTION_FAILED
;
2174 fc_port_t
*fcport
= arg
->fcport
;
2175 u32 chip_gen
, login_gen
;
2178 if (TMF_NOT_READY(arg
->fcport
)) {
2179 ql_dbg(ql_dbg_taskm
, vha
, 0x8032,
2180 "FC port not ready for TM command loop-id=%x portid=%06x modifier=%x lun=%lld qp=%d.\n",
2181 fcport
->loop_id
, fcport
->d_id
.b24
,
2182 arg
->modifier
, arg
->lun
, arg
->qpair
->id
);
2183 return QLA_SUSPENDED
;
2186 chip_gen
= vha
->hw
->chip_reset
;
2187 login_gen
= fcport
->login_gen
;
2190 sp
= qla2xxx_get_qpair_sp(vha
, arg
->qpair
, fcport
, GFP_KERNEL
);
2194 qla_vha_mark_busy(vha
);
2195 sp
->type
= SRB_TM_CMD
;
2197 qla2x00_init_async_sp(sp
, qla2x00_get_async_timeout(vha
),
2198 qla2x00_tmf_sp_done
);
2199 sp
->u
.iocb_cmd
.timeout
= qla2x00_tmf_iocb_timeout
;
2201 tm_iocb
= &sp
->u
.iocb_cmd
;
2202 init_completion(&tm_iocb
->u
.tmf
.comp
);
2203 tm_iocb
->u
.tmf
.flags
= arg
->flags
;
2204 tm_iocb
->u
.tmf
.lun
= arg
->lun
;
2206 START_SP_W_RETRIES(sp
, rval
, chip_gen
, login_gen
);
2208 ql_dbg(ql_dbg_taskm
, vha
, 0x802f,
2209 "Async-tmf hdl=%x loop-id=%x portid=%06x ctrl=%x lun=%lld qp=%d rval=%x.\n",
2210 sp
->handle
, fcport
->loop_id
, fcport
->d_id
.b24
,
2211 arg
->flags
, arg
->lun
, sp
->qpair
->id
, rval
);
2213 if (rval
!= QLA_SUCCESS
)
2215 wait_for_completion(&tm_iocb
->u
.tmf
.comp
);
2217 rval
= tm_iocb
->u
.tmf
.data
;
2219 if (rval
!= QLA_SUCCESS
) {
2220 ql_log(ql_log_warn
, vha
, 0x8030,
2221 "TM IOCB failed (%x).\n", rval
);
2224 if (!test_bit(UNLOADING
, &vha
->dpc_flags
) && !IS_QLAFX00(vha
->hw
)) {
2226 if (qla_tmf_wait(arg
)) {
2227 ql_log(ql_log_info
, vha
, 0x803e,
2228 "Waited %u ms Nexus=%ld:%06x:%llu.\n",
2229 jiffies_to_msecs(jiffies
- jif
), vha
->host_no
,
2230 fcport
->d_id
.b24
, arg
->lun
);
2233 if (chip_gen
== vha
->hw
->chip_reset
&& login_gen
== fcport
->login_gen
) {
2234 rval
= qla26xx_marker(arg
);
2236 ql_log(ql_log_info
, vha
, 0x803e,
2237 "Skip Marker due to disruption. Nexus=%ld:%06x:%llu.\n",
2238 vha
->host_no
, fcport
->d_id
.b24
, arg
->lun
);
2239 rval
= QLA_FUNCTION_FAILED
;
2242 if (tm_iocb
->u
.tmf
.data
)
2243 rval
= tm_iocb
->u
.tmf
.data
;
2247 kref_put(&sp
->cmd_kref
, qla2x00_sp_release
);
2252 static void qla_put_tmf(struct tmf_arg
*arg
)
2254 struct scsi_qla_host
*vha
= arg
->vha
;
2255 struct qla_hw_data
*ha
= vha
->hw
;
2256 unsigned long flags
;
2258 spin_lock_irqsave(&ha
->tgt
.sess_lock
, flags
);
2260 list_del(&arg
->tmf_elem
);
2261 spin_unlock_irqrestore(&ha
->tgt
.sess_lock
, flags
);
2265 int qla_get_tmf(struct tmf_arg
*arg
)
2267 struct scsi_qla_host
*vha
= arg
->vha
;
2268 struct qla_hw_data
*ha
= vha
->hw
;
2269 unsigned long flags
;
2270 fc_port_t
*fcport
= arg
->fcport
;
2274 spin_lock_irqsave(&ha
->tgt
.sess_lock
, flags
);
2275 list_for_each_entry(t
, &ha
->tmf_active
, tmf_elem
) {
2276 if (t
->fcport
== arg
->fcport
&& t
->lun
== arg
->lun
) {
2277 /* reject duplicate TMF */
2278 ql_log(ql_log_warn
, vha
, 0x802c,
2279 "found duplicate TMF. Nexus=%ld:%06x:%llu.\n",
2280 vha
->host_no
, fcport
->d_id
.b24
, arg
->lun
);
2281 spin_unlock_irqrestore(&ha
->tgt
.sess_lock
, flags
);
2286 list_add_tail(&arg
->tmf_elem
, &ha
->tmf_pending
);
2287 while (ha
->active_tmf
>= MAX_ACTIVE_TMF
) {
2288 spin_unlock_irqrestore(&ha
->tgt
.sess_lock
, flags
);
2292 spin_lock_irqsave(&ha
->tgt
.sess_lock
, flags
);
2293 if (TMF_NOT_READY(fcport
)) {
2294 ql_log(ql_log_warn
, vha
, 0x802c,
2295 "Unable to acquire TM resource due to disruption.\n");
2299 if (ha
->active_tmf
< MAX_ACTIVE_TMF
&&
2300 list_is_first(&arg
->tmf_elem
, &ha
->tmf_pending
))
2304 list_del(&arg
->tmf_elem
);
2308 list_add_tail(&arg
->tmf_elem
, &ha
->tmf_active
);
2311 spin_unlock_irqrestore(&ha
->tgt
.sess_lock
, flags
);
2317 qla2x00_async_tm_cmd(fc_port_t
*fcport
, uint32_t flags
, uint64_t lun
,
2320 struct scsi_qla_host
*vha
= fcport
->vha
;
2322 int rval
= QLA_SUCCESS
;
2324 if (TMF_NOT_READY(fcport
))
2325 return QLA_SUSPENDED
;
2327 a
.vha
= fcport
->vha
;
2331 INIT_LIST_HEAD(&a
.tmf_elem
);
2333 if (flags
& (TCF_LUN_RESET
|TCF_ABORT_TASK_SET
|TCF_CLEAR_TASK_SET
|TCF_CLEAR_ACA
)) {
2334 a
.modifier
= MK_SYNC_ID_LUN
;
2336 a
.modifier
= MK_SYNC_ID
;
2339 if (qla_get_tmf(&a
))
2340 return QLA_FUNCTION_FAILED
;
2342 a
.qpair
= vha
->hw
->base_qpair
;
2343 rval
= __qla2x00_async_tm_cmd(&a
);
2350 qla24xx_async_abort_command(srb_t
*sp
)
2352 unsigned long flags
= 0;
2355 fc_port_t
*fcport
= sp
->fcport
;
2356 struct qla_qpair
*qpair
= sp
->qpair
;
2357 struct scsi_qla_host
*vha
= fcport
->vha
;
2358 struct req_que
*req
= qpair
->req
;
2360 spin_lock_irqsave(qpair
->qp_lock_ptr
, flags
);
2361 for (handle
= 1; handle
< req
->num_outstanding_cmds
; handle
++) {
2362 if (req
->outstanding_cmds
[handle
] == sp
)
2365 spin_unlock_irqrestore(qpair
->qp_lock_ptr
, flags
);
2367 if (handle
== req
->num_outstanding_cmds
) {
2368 /* Command not found. */
2369 return QLA_ERR_NOT_FOUND
;
2371 if (sp
->type
== SRB_FXIOCB_DCMD
)
2372 return qlafx00_fx_disc(vha
, &vha
->hw
->mr
.fcport
,
2373 FXDISC_ABORT_IOCTL
);
2375 return qla24xx_async_abort_cmd(sp
, true);
2379 qla24xx_handle_prli_done_event(struct scsi_qla_host
*vha
, struct event_arg
*ea
)
2382 WARN_ONCE(!qla2xxx_is_valid_mbs(ea
->data
[0]), "mbs: %#x\n",
2385 switch (ea
->data
[0]) {
2386 case MBS_COMMAND_COMPLETE
:
2387 ql_dbg(ql_dbg_disc
, vha
, 0x2118,
2388 "%s %d %8phC post gpdb\n",
2389 __func__
, __LINE__
, ea
->fcport
->port_name
);
2391 ea
->fcport
->chip_reset
= vha
->hw
->base_qpair
->chip_reset
;
2392 ea
->fcport
->logout_on_delete
= 1;
2393 ea
->fcport
->nvme_prli_service_param
= ea
->iop
[0];
2394 if (ea
->iop
[0] & NVME_PRLI_SP_FIRST_BURST
)
2395 ea
->fcport
->nvme_first_burst_size
=
2396 (ea
->iop
[1] & 0xffff) * 512;
2398 ea
->fcport
->nvme_first_burst_size
= 0;
2399 qla24xx_post_gpdb_work(vha
, ea
->fcport
, 0);
2403 ql_dbg(ql_dbg_disc
, vha
, 0x2118,
2404 "%s %d %8phC priority %s, fc4type %x prev try %s\n",
2405 __func__
, __LINE__
, ea
->fcport
->port_name
,
2406 vha
->hw
->fc4_type_priority
== FC4_PRIORITY_FCP
?
2407 "FCP" : "NVMe", ea
->fcport
->fc4_type
,
2408 (sp
->u
.iocb_cmd
.u
.logio
.flags
& SRB_LOGIN_NVME_PRLI
) ?
2411 if (NVME_FCP_TARGET(ea
->fcport
)) {
2412 if (sp
->u
.iocb_cmd
.u
.logio
.flags
& SRB_LOGIN_NVME_PRLI
)
2413 ea
->fcport
->do_prli_nvme
= 0;
2415 ea
->fcport
->do_prli_nvme
= 1;
2417 ea
->fcport
->do_prli_nvme
= 0;
2420 if (N2N_TOPO(vha
->hw
)) {
2421 if (ea
->fcport
->n2n_link_reset_cnt
==
2422 vha
->hw
->login_retry_count
&&
2423 ea
->fcport
->flags
& FCF_FCSP_DEVICE
) {
2424 /* remote authentication app just started */
2425 ea
->fcport
->n2n_link_reset_cnt
= 0;
2428 if (ea
->fcport
->n2n_link_reset_cnt
<
2429 vha
->hw
->login_retry_count
) {
2430 ea
->fcport
->n2n_link_reset_cnt
++;
2431 vha
->relogin_jif
= jiffies
+ 2 * HZ
;
2433 * PRLI failed. Reset link to kick start
2436 set_bit(N2N_LINK_RESET
, &vha
->dpc_flags
);
2437 qla2xxx_wake_dpc(vha
);
2439 ql_log(ql_log_warn
, vha
, 0x2119,
2440 "%s %d %8phC Unable to reconnect\n",
2442 ea
->fcport
->port_name
);
2446 * switch connect. login failed. Take connection down
2447 * and allow relogin to retrigger
2449 ea
->fcport
->flags
&= ~FCF_ASYNC_SENT
;
2450 ea
->fcport
->keep_nport_handle
= 0;
2451 ea
->fcport
->logout_on_delete
= 1;
2452 qlt_schedule_sess_for_deletion(ea
->fcport
);
2459 qla24xx_handle_plogi_done_event(struct scsi_qla_host
*vha
, struct event_arg
*ea
)
2461 port_id_t cid
; /* conflict Nport id */
2463 struct fc_port
*conflict_fcport
;
2464 unsigned long flags
;
2465 struct fc_port
*fcport
= ea
->fcport
;
2467 ql_dbg(ql_dbg_disc
, vha
, 0xffff,
2468 "%s %8phC DS %d LS %d rc %d login %d|%d rscn %d|%d data %x|%x iop %x|%x\n",
2469 __func__
, fcport
->port_name
, fcport
->disc_state
,
2470 fcport
->fw_login_state
, ea
->rc
, ea
->sp
->gen2
, fcport
->login_gen
,
2471 ea
->sp
->gen1
, fcport
->rscn_gen
,
2472 ea
->data
[0], ea
->data
[1], ea
->iop
[0], ea
->iop
[1]);
2474 if ((fcport
->fw_login_state
== DSC_LS_PLOGI_PEND
) ||
2475 (fcport
->fw_login_state
== DSC_LS_PRLI_PEND
)) {
2476 ql_dbg(ql_dbg_disc
, vha
, 0x20ea,
2477 "%s %d %8phC Remote is trying to login\n",
2478 __func__
, __LINE__
, fcport
->port_name
);
2482 if ((fcport
->disc_state
== DSC_DELETE_PEND
) ||
2483 (fcport
->disc_state
== DSC_DELETED
)) {
2484 set_bit(RELOGIN_NEEDED
, &vha
->dpc_flags
);
2488 if (ea
->sp
->gen2
!= fcport
->login_gen
) {
2489 /* target side must have changed it. */
2490 ql_dbg(ql_dbg_disc
, vha
, 0x20d3,
2491 "%s %8phC generation changed\n",
2492 __func__
, fcport
->port_name
);
2493 set_bit(RELOGIN_NEEDED
, &vha
->dpc_flags
);
2495 } else if (ea
->sp
->gen1
!= fcport
->rscn_gen
) {
2496 ql_dbg(ql_dbg_disc
, vha
, 0x20d3,
2497 "%s %8phC RSCN generation changed\n",
2498 __func__
, fcport
->port_name
);
2499 qla_rscn_replay(fcport
);
2500 qlt_schedule_sess_for_deletion(fcport
);
2504 WARN_ONCE(!qla2xxx_is_valid_mbs(ea
->data
[0]), "mbs: %#x\n",
2507 switch (ea
->data
[0]) {
2508 case MBS_COMMAND_COMPLETE
:
2510 * Driver must validate login state - If PRLI not complete,
2511 * force a relogin attempt via implicit LOGO, PLOGI, and PRLI
2514 if (vha
->hw
->flags
.edif_enabled
) {
2515 set_bit(ea
->fcport
->loop_id
, vha
->hw
->loop_id_map
);
2516 spin_lock_irqsave(&vha
->hw
->tgt
.sess_lock
, flags
);
2517 ea
->fcport
->chip_reset
= vha
->hw
->base_qpair
->chip_reset
;
2518 ea
->fcport
->logout_on_delete
= 1;
2519 ea
->fcport
->send_els_logo
= 0;
2520 ea
->fcport
->fw_login_state
= DSC_LS_PLOGI_COMP
;
2521 spin_unlock_irqrestore(&vha
->hw
->tgt
.sess_lock
, flags
);
2523 qla24xx_post_gpdb_work(vha
, ea
->fcport
, 0);
2525 if (NVME_TARGET(vha
->hw
, fcport
)) {
2526 ql_dbg(ql_dbg_disc
, vha
, 0x2117,
2527 "%s %d %8phC post prli\n",
2528 __func__
, __LINE__
, fcport
->port_name
);
2529 qla24xx_post_prli_work(vha
, fcport
);
2531 ql_dbg(ql_dbg_disc
, vha
, 0x20ea,
2532 "%s %d %8phC LoopID 0x%x in use with %06x. post gpdb\n",
2533 __func__
, __LINE__
, fcport
->port_name
,
2534 fcport
->loop_id
, fcport
->d_id
.b24
);
2536 set_bit(fcport
->loop_id
, vha
->hw
->loop_id_map
);
2537 spin_lock_irqsave(&vha
->hw
->tgt
.sess_lock
, flags
);
2538 fcport
->chip_reset
= vha
->hw
->base_qpair
->chip_reset
;
2539 fcport
->logout_on_delete
= 1;
2540 fcport
->send_els_logo
= 0;
2541 fcport
->fw_login_state
= DSC_LS_PRLI_COMP
;
2542 spin_unlock_irqrestore(&vha
->hw
->tgt
.sess_lock
, flags
);
2544 qla24xx_post_gpdb_work(vha
, fcport
, 0);
2548 case MBS_COMMAND_ERROR
:
2549 ql_dbg(ql_dbg_disc
, vha
, 0x20eb, "%s %d %8phC cmd error %x\n",
2550 __func__
, __LINE__
, ea
->fcport
->port_name
, ea
->data
[1]);
2552 qlt_schedule_sess_for_deletion(ea
->fcport
);
2554 case MBS_LOOP_ID_USED
:
2555 /* data[1] = IO PARAM 1 = nport ID */
2556 cid
.b
.domain
= (ea
->iop
[1] >> 16) & 0xff;
2557 cid
.b
.area
= (ea
->iop
[1] >> 8) & 0xff;
2558 cid
.b
.al_pa
= ea
->iop
[1] & 0xff;
2561 ql_dbg(ql_dbg_disc
, vha
, 0x20ec,
2562 "%s %d %8phC lid %#x in use with pid %06x post gnl\n",
2563 __func__
, __LINE__
, ea
->fcport
->port_name
,
2564 ea
->fcport
->loop_id
, cid
.b24
);
2566 set_bit(ea
->fcport
->loop_id
, vha
->hw
->loop_id_map
);
2567 ea
->fcport
->loop_id
= FC_NO_LOOP_ID
;
2568 qla24xx_post_gnl_work(vha
, ea
->fcport
);
2570 case MBS_PORT_ID_USED
:
2571 lid
= ea
->iop
[1] & 0xffff;
2572 qlt_find_sess_invalidate_other(vha
,
2573 wwn_to_u64(ea
->fcport
->port_name
),
2574 ea
->fcport
->d_id
, lid
, &conflict_fcport
);
2576 if (conflict_fcport
) {
2578 * Another fcport share the same loop_id/nport id.
2579 * Conflict fcport needs to finish cleanup before this
2580 * fcport can proceed to login.
2582 conflict_fcport
->conflict
= ea
->fcport
;
2583 ea
->fcport
->login_pause
= 1;
2585 ql_dbg(ql_dbg_disc
, vha
, 0x20ed,
2586 "%s %d %8phC NPortId %06x inuse with loopid 0x%x.\n",
2587 __func__
, __LINE__
, ea
->fcport
->port_name
,
2588 ea
->fcport
->d_id
.b24
, lid
);
2590 ql_dbg(ql_dbg_disc
, vha
, 0x20ed,
2591 "%s %d %8phC NPortId %06x inuse with loopid 0x%x. sched delete\n",
2592 __func__
, __LINE__
, ea
->fcport
->port_name
,
2593 ea
->fcport
->d_id
.b24
, lid
);
2595 qla2x00_clear_loop_id(ea
->fcport
);
2596 set_bit(lid
, vha
->hw
->loop_id_map
);
2597 ea
->fcport
->loop_id
= lid
;
2598 ea
->fcport
->keep_nport_handle
= 0;
2599 ea
->fcport
->logout_on_delete
= 1;
2600 qlt_schedule_sess_for_deletion(ea
->fcport
);
2607 /****************************************************************************/
2608 /* QLogic ISP2x00 Hardware Support Functions. */
2609 /****************************************************************************/
2612 qla83xx_nic_core_fw_load(scsi_qla_host_t
*vha
)
2614 int rval
= QLA_SUCCESS
;
2615 struct qla_hw_data
*ha
= vha
->hw
;
2616 uint32_t idc_major_ver
, idc_minor_ver
;
2619 qla83xx_idc_lock(vha
, 0);
2621 /* SV: TODO: Assign initialization timeout from
2622 * flash-info / other param
2624 ha
->fcoe_dev_init_timeout
= QLA83XX_IDC_INITIALIZATION_TIMEOUT
;
2625 ha
->fcoe_reset_timeout
= QLA83XX_IDC_RESET_ACK_TIMEOUT
;
2627 /* Set our fcoe function presence */
2628 if (__qla83xx_set_drv_presence(vha
) != QLA_SUCCESS
) {
2629 ql_dbg(ql_dbg_p3p
, vha
, 0xb077,
2630 "Error while setting DRV-Presence.\n");
2631 rval
= QLA_FUNCTION_FAILED
;
2635 /* Decide the reset ownership */
2636 qla83xx_reset_ownership(vha
);
2639 * On first protocol driver load:
2640 * Init-Owner: Set IDC-Major-Version and Clear IDC-Lock-Recovery
2642 * Others: Check compatibility with current IDC Major version.
2644 qla83xx_rd_reg(vha
, QLA83XX_IDC_MAJOR_VERSION
, &idc_major_ver
);
2645 if (ha
->flags
.nic_core_reset_owner
) {
2646 /* Set IDC Major version */
2647 idc_major_ver
= QLA83XX_SUPP_IDC_MAJOR_VERSION
;
2648 qla83xx_wr_reg(vha
, QLA83XX_IDC_MAJOR_VERSION
, idc_major_ver
);
2650 /* Clearing IDC-Lock-Recovery register */
2651 qla83xx_wr_reg(vha
, QLA83XX_IDC_LOCK_RECOVERY
, 0);
2652 } else if (idc_major_ver
!= QLA83XX_SUPP_IDC_MAJOR_VERSION
) {
2654 * Clear further IDC participation if we are not compatible with
2655 * the current IDC Major Version.
2657 ql_log(ql_log_warn
, vha
, 0xb07d,
2658 "Failing load, idc_major_ver=%d, expected_major_ver=%d.\n",
2659 idc_major_ver
, QLA83XX_SUPP_IDC_MAJOR_VERSION
);
2660 __qla83xx_clear_drv_presence(vha
);
2661 rval
= QLA_FUNCTION_FAILED
;
2664 /* Each function sets its supported Minor version. */
2665 qla83xx_rd_reg(vha
, QLA83XX_IDC_MINOR_VERSION
, &idc_minor_ver
);
2666 idc_minor_ver
|= (QLA83XX_SUPP_IDC_MINOR_VERSION
<< (ha
->portnum
* 2));
2667 qla83xx_wr_reg(vha
, QLA83XX_IDC_MINOR_VERSION
, idc_minor_ver
);
2669 if (ha
->flags
.nic_core_reset_owner
) {
2670 memset(config
, 0, sizeof(config
));
2671 if (!qla81xx_get_port_config(vha
, config
))
2672 qla83xx_wr_reg(vha
, QLA83XX_IDC_DEV_STATE
,
2676 rval
= qla83xx_idc_state_handler(vha
);
2679 qla83xx_idc_unlock(vha
, 0);
2684 static void qla_enable_fce_trace(scsi_qla_host_t
*vha
)
2687 struct qla_hw_data
*ha
= vha
->hw
;
2690 ha
->flags
.fce_enabled
= 1;
2691 memset(ha
->fce
, 0, fce_calc_size(ha
->fce_bufs
));
2692 rval
= qla2x00_enable_fce_trace(vha
,
2693 ha
->fce_dma
, ha
->fce_bufs
, ha
->fce_mb
, &ha
->fce_bufs
);
2696 ql_log(ql_log_warn
, vha
, 0x8033,
2697 "Unable to reinitialize FCE (%d).\n", rval
);
2698 ha
->flags
.fce_enabled
= 0;
2703 static void qla_enable_eft_trace(scsi_qla_host_t
*vha
)
2706 struct qla_hw_data
*ha
= vha
->hw
;
2709 memset(ha
->eft
, 0, EFT_SIZE
);
2710 rval
= qla2x00_enable_eft_trace(vha
, ha
->eft_dma
, EFT_NUM_BUFFERS
);
2713 ql_log(ql_log_warn
, vha
, 0x8034,
2714 "Unable to reinitialize EFT (%d).\n", rval
);
2719 * qla2x00_initialize_adapter
2723 * ha = adapter block pointer.
2729 qla2x00_initialize_adapter(scsi_qla_host_t
*vha
)
2732 struct qla_hw_data
*ha
= vha
->hw
;
2733 struct req_que
*req
= ha
->req_q_map
[0];
2734 struct device_reg_24xx __iomem
*reg
= &ha
->iobase
->isp24
;
2736 memset(&vha
->qla_stats
, 0, sizeof(vha
->qla_stats
));
2737 memset(&vha
->fc_host_stat
, 0, sizeof(vha
->fc_host_stat
));
2739 /* Clear adapter flags. */
2740 vha
->flags
.online
= 0;
2741 ha
->flags
.chip_reset_done
= 0;
2742 vha
->flags
.reset_active
= 0;
2743 ha
->flags
.pci_channel_io_perm_failure
= 0;
2744 ha
->flags
.eeh_busy
= 0;
2745 vha
->qla_stats
.jiffies_at_last_reset
= get_jiffies_64();
2746 atomic_set(&vha
->loop_down_timer
, LOOP_DOWN_TIME
);
2747 atomic_set(&vha
->loop_state
, LOOP_DOWN
);
2748 vha
->device_flags
= DFLG_NO_CABLE
;
2750 vha
->flags
.management_server_logged_in
= 0;
2751 vha
->marker_needed
= 0;
2752 ha
->isp_abort_cnt
= 0;
2753 ha
->beacon_blink_led
= 0;
2755 set_bit(0, ha
->req_qid_map
);
2756 set_bit(0, ha
->rsp_qid_map
);
2758 ql_dbg(ql_dbg_init
, vha
, 0x0040,
2759 "Configuring PCI space...\n");
2760 rval
= ha
->isp_ops
->pci_config(vha
);
2762 ql_log(ql_log_warn
, vha
, 0x0044,
2763 "Unable to configure PCI space.\n");
2767 ha
->isp_ops
->reset_chip(vha
);
2769 /* Check for secure flash support */
2770 if (IS_QLA28XX(ha
)) {
2771 if (rd_reg_word(®
->mailbox12
) & BIT_0
)
2772 ha
->flags
.secure_adapter
= 1;
2773 ql_log(ql_log_info
, vha
, 0xffff, "Secure Adapter: %s\n",
2774 (ha
->flags
.secure_adapter
) ? "Yes" : "No");
2778 rval
= qla2xxx_get_flash_info(vha
);
2780 ql_log(ql_log_fatal
, vha
, 0x004f,
2781 "Unable to validate FLASH data.\n");
2785 if (IS_QLA8044(ha
)) {
2786 qla8044_read_reset_template(vha
);
2788 /* NOTE: If ql2xdontresethba==1, set IDC_CTRL DONTRESET_BIT0.
2789 * If DONRESET_BIT0 is set, drivers should not set dev_state
2790 * to NEED_RESET. But if NEED_RESET is set, drivers should
2791 * should honor the reset. */
2792 if (ql2xdontresethba
== 1)
2793 qla8044_set_idc_dontreset(vha
);
2796 ha
->isp_ops
->get_flash_version(vha
, req
->ring
);
2797 ql_dbg(ql_dbg_init
, vha
, 0x0061,
2798 "Configure NVRAM parameters...\n");
2800 /* Let priority default to FCP, can be overridden by nvram_config */
2801 ha
->fc4_type_priority
= FC4_PRIORITY_FCP
;
2803 ha
->isp_ops
->nvram_config(vha
);
2805 if (ha
->fc4_type_priority
!= FC4_PRIORITY_FCP
&&
2806 ha
->fc4_type_priority
!= FC4_PRIORITY_NVME
)
2807 ha
->fc4_type_priority
= FC4_PRIORITY_FCP
;
2809 ql_log(ql_log_info
, vha
, 0xffff, "FC4 priority set to %s\n",
2810 ha
->fc4_type_priority
== FC4_PRIORITY_FCP
? "FCP" : "NVMe");
2812 if (ha
->flags
.disable_serdes
) {
2813 /* Mask HBA via NVRAM settings? */
2814 ql_log(ql_log_info
, vha
, 0x0077,
2815 "Masking HBA WWPN %8phN (via NVRAM).\n", vha
->port_name
);
2816 return QLA_FUNCTION_FAILED
;
2819 ql_dbg(ql_dbg_init
, vha
, 0x0078,
2820 "Verifying loaded RISC code...\n");
2822 /* If smartsan enabled then require fdmi and rdp enabled */
2828 if (qla2x00_isp_firmware(vha
) != QLA_SUCCESS
) {
2829 rval
= ha
->isp_ops
->chip_diag(vha
);
2832 rval
= qla2x00_setup_chip(vha
);
2837 if (IS_QLA84XX(ha
)) {
2838 ha
->cs84xx
= qla84xx_get_chip(vha
);
2840 ql_log(ql_log_warn
, vha
, 0x00d0,
2841 "Unable to configure ISP84XX.\n");
2842 return QLA_FUNCTION_FAILED
;
2846 if (qla_ini_mode_enabled(vha
) || qla_dual_mode_enabled(vha
))
2847 rval
= qla2x00_init_rings(vha
);
2849 /* No point in continuing if firmware initialization failed. */
2850 if (rval
!= QLA_SUCCESS
)
2853 ha
->flags
.chip_reset_done
= 1;
2855 if (rval
== QLA_SUCCESS
&& IS_QLA84XX(ha
)) {
2856 /* Issue verify 84xx FW IOCB to complete 84xx initialization */
2857 rval
= qla84xx_init_chip(vha
);
2858 if (rval
!= QLA_SUCCESS
) {
2859 ql_log(ql_log_warn
, vha
, 0x00d4,
2860 "Unable to initialize ISP84XX.\n");
2861 qla84xx_put_chip(vha
);
2865 /* Load the NIC Core f/w if we are the first protocol driver. */
2866 if (IS_QLA8031(ha
)) {
2867 rval
= qla83xx_nic_core_fw_load(vha
);
2869 ql_log(ql_log_warn
, vha
, 0x0124,
2870 "Error in initializing NIC Core f/w.\n");
2873 if (IS_QLA24XX_TYPE(ha
) || IS_QLA25XX(ha
))
2874 qla24xx_read_fcp_prio_cfg(vha
);
2876 if (IS_P3P_TYPE(ha
))
2877 qla82xx_set_driver_version(vha
, QLA2XXX_VERSION
);
2879 qla25xx_set_driver_version(vha
, QLA2XXX_VERSION
);
2885 * qla2100_pci_config() - Setup ISP21xx PCI configuration registers.
2888 * Returns 0 on success.
2891 qla2100_pci_config(scsi_qla_host_t
*vha
)
2894 unsigned long flags
;
2895 struct qla_hw_data
*ha
= vha
->hw
;
2896 struct device_reg_2xxx __iomem
*reg
= &ha
->iobase
->isp
;
2898 pci_set_master(ha
->pdev
);
2899 pci_try_set_mwi(ha
->pdev
);
2901 pci_read_config_word(ha
->pdev
, PCI_COMMAND
, &w
);
2902 w
|= (PCI_COMMAND_PARITY
| PCI_COMMAND_SERR
);
2903 pci_write_config_word(ha
->pdev
, PCI_COMMAND
, w
);
2905 pci_disable_rom(ha
->pdev
);
2907 /* Get PCI bus information. */
2908 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
2909 ha
->pci_attr
= rd_reg_word(®
->ctrl_status
);
2910 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
2916 * qla2300_pci_config() - Setup ISP23xx PCI configuration registers.
2919 * Returns 0 on success.
2922 qla2300_pci_config(scsi_qla_host_t
*vha
)
2925 unsigned long flags
= 0;
2927 struct qla_hw_data
*ha
= vha
->hw
;
2928 struct device_reg_2xxx __iomem
*reg
= &ha
->iobase
->isp
;
2930 pci_set_master(ha
->pdev
);
2931 pci_try_set_mwi(ha
->pdev
);
2933 pci_read_config_word(ha
->pdev
, PCI_COMMAND
, &w
);
2934 w
|= (PCI_COMMAND_PARITY
| PCI_COMMAND_SERR
);
2936 if (IS_QLA2322(ha
) || IS_QLA6322(ha
))
2937 w
&= ~PCI_COMMAND_INTX_DISABLE
;
2938 pci_write_config_word(ha
->pdev
, PCI_COMMAND
, w
);
2941 * If this is a 2300 card and not 2312, reset the
2942 * COMMAND_INVALIDATE due to a bug in the 2300. Unfortunately,
2943 * the 2310 also reports itself as a 2300 so we need to get the
2944 * fb revision level -- a 6 indicates it really is a 2300 and
2947 if (IS_QLA2300(ha
)) {
2948 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
2951 wrt_reg_word(®
->hccr
, HCCR_PAUSE_RISC
);
2952 for (cnt
= 0; cnt
< 30000; cnt
++) {
2953 if ((rd_reg_word(®
->hccr
) & HCCR_RISC_PAUSE
) != 0)
2959 /* Select FPM registers. */
2960 wrt_reg_word(®
->ctrl_status
, 0x20);
2961 rd_reg_word(®
->ctrl_status
);
2963 /* Get the fb rev level */
2964 ha
->fb_rev
= RD_FB_CMD_REG(ha
, reg
);
2966 if (ha
->fb_rev
== FPM_2300
)
2967 pci_clear_mwi(ha
->pdev
);
2969 /* Deselect FPM registers. */
2970 wrt_reg_word(®
->ctrl_status
, 0x0);
2971 rd_reg_word(®
->ctrl_status
);
2973 /* Release RISC module. */
2974 wrt_reg_word(®
->hccr
, HCCR_RELEASE_RISC
);
2975 for (cnt
= 0; cnt
< 30000; cnt
++) {
2976 if ((rd_reg_word(®
->hccr
) & HCCR_RISC_PAUSE
) == 0)
2982 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
2985 pci_write_config_byte(ha
->pdev
, PCI_LATENCY_TIMER
, 0x80);
2987 pci_disable_rom(ha
->pdev
);
2989 /* Get PCI bus information. */
2990 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
2991 ha
->pci_attr
= rd_reg_word(®
->ctrl_status
);
2992 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
2998 * qla24xx_pci_config() - Setup ISP24xx PCI configuration registers.
3001 * Returns 0 on success.
3004 qla24xx_pci_config(scsi_qla_host_t
*vha
)
3007 unsigned long flags
= 0;
3008 struct qla_hw_data
*ha
= vha
->hw
;
3009 struct device_reg_24xx __iomem
*reg
= &ha
->iobase
->isp24
;
3011 pci_set_master(ha
->pdev
);
3012 pci_try_set_mwi(ha
->pdev
);
3014 pci_read_config_word(ha
->pdev
, PCI_COMMAND
, &w
);
3015 w
|= (PCI_COMMAND_PARITY
| PCI_COMMAND_SERR
);
3016 w
&= ~PCI_COMMAND_INTX_DISABLE
;
3017 pci_write_config_word(ha
->pdev
, PCI_COMMAND
, w
);
3019 pci_write_config_byte(ha
->pdev
, PCI_LATENCY_TIMER
, 0x80);
3021 /* PCI-X -- adjust Maximum Memory Read Byte Count (2048). */
3022 if (pci_find_capability(ha
->pdev
, PCI_CAP_ID_PCIX
))
3023 pcix_set_mmrbc(ha
->pdev
, 2048);
3025 /* PCIe -- adjust Maximum Read Request Size (2048). */
3026 if (pci_is_pcie(ha
->pdev
))
3027 pcie_set_readrq(ha
->pdev
, 4096);
3029 pci_disable_rom(ha
->pdev
);
3031 ha
->chip_revision
= ha
->pdev
->revision
;
3033 /* Get PCI bus information. */
3034 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
3035 ha
->pci_attr
= rd_reg_dword(®
->ctrl_status
);
3036 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
3042 * qla25xx_pci_config() - Setup ISP25xx PCI configuration registers.
3045 * Returns 0 on success.
3048 qla25xx_pci_config(scsi_qla_host_t
*vha
)
3051 struct qla_hw_data
*ha
= vha
->hw
;
3053 pci_set_master(ha
->pdev
);
3054 pci_try_set_mwi(ha
->pdev
);
3056 pci_read_config_word(ha
->pdev
, PCI_COMMAND
, &w
);
3057 w
|= (PCI_COMMAND_PARITY
| PCI_COMMAND_SERR
);
3058 w
&= ~PCI_COMMAND_INTX_DISABLE
;
3059 pci_write_config_word(ha
->pdev
, PCI_COMMAND
, w
);
3061 /* PCIe -- adjust Maximum Read Request Size (2048). */
3062 if (pci_is_pcie(ha
->pdev
))
3063 pcie_set_readrq(ha
->pdev
, 4096);
3065 pci_disable_rom(ha
->pdev
);
3067 ha
->chip_revision
= ha
->pdev
->revision
;
3073 * qla2x00_isp_firmware() - Choose firmware image.
3076 * Returns 0 on success.
3079 qla2x00_isp_firmware(scsi_qla_host_t
*vha
)
3082 uint16_t loop_id
, topo
, sw_cap
;
3083 uint8_t domain
, area
, al_pa
;
3084 struct qla_hw_data
*ha
= vha
->hw
;
3086 /* Assume loading risc code */
3087 rval
= QLA_FUNCTION_FAILED
;
3089 if (ha
->flags
.disable_risc_code_load
) {
3090 ql_log(ql_log_info
, vha
, 0x0079, "RISC CODE NOT loaded.\n");
3092 /* Verify checksum of loaded RISC code. */
3093 rval
= qla2x00_verify_checksum(vha
, ha
->fw_srisc_address
);
3094 if (rval
== QLA_SUCCESS
) {
3095 /* And, verify we are not in ROM code. */
3096 rval
= qla2x00_get_adapter_id(vha
, &loop_id
, &al_pa
,
3097 &area
, &domain
, &topo
, &sw_cap
);
3102 ql_dbg(ql_dbg_init
, vha
, 0x007a,
3103 "**** Load RISC code ****.\n");
3109 * qla2x00_reset_chip() - Reset ISP chip.
3112 * Returns 0 on success.
3115 qla2x00_reset_chip(scsi_qla_host_t
*vha
)
3117 unsigned long flags
= 0;
3118 struct qla_hw_data
*ha
= vha
->hw
;
3119 struct device_reg_2xxx __iomem
*reg
= &ha
->iobase
->isp
;
3122 int rval
= QLA_FUNCTION_FAILED
;
3124 if (unlikely(pci_channel_offline(ha
->pdev
)))
3127 ha
->isp_ops
->disable_intrs(ha
);
3129 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
3131 /* Turn off master enable */
3133 pci_read_config_word(ha
->pdev
, PCI_COMMAND
, &cmd
);
3134 cmd
&= ~PCI_COMMAND_MASTER
;
3135 pci_write_config_word(ha
->pdev
, PCI_COMMAND
, cmd
);
3137 if (!IS_QLA2100(ha
)) {
3139 wrt_reg_word(®
->hccr
, HCCR_PAUSE_RISC
);
3140 if (IS_QLA2200(ha
) || IS_QLA2300(ha
)) {
3141 for (cnt
= 0; cnt
< 30000; cnt
++) {
3142 if ((rd_reg_word(®
->hccr
) &
3143 HCCR_RISC_PAUSE
) != 0)
3148 rd_reg_word(®
->hccr
); /* PCI Posting. */
3152 /* Select FPM registers. */
3153 wrt_reg_word(®
->ctrl_status
, 0x20);
3154 rd_reg_word(®
->ctrl_status
); /* PCI Posting. */
3156 /* FPM Soft Reset. */
3157 wrt_reg_word(®
->fpm_diag_config
, 0x100);
3158 rd_reg_word(®
->fpm_diag_config
); /* PCI Posting. */
3160 /* Toggle Fpm Reset. */
3161 if (!IS_QLA2200(ha
)) {
3162 wrt_reg_word(®
->fpm_diag_config
, 0x0);
3163 rd_reg_word(®
->fpm_diag_config
); /* PCI Posting. */
3166 /* Select frame buffer registers. */
3167 wrt_reg_word(®
->ctrl_status
, 0x10);
3168 rd_reg_word(®
->ctrl_status
); /* PCI Posting. */
3170 /* Reset frame buffer FIFOs. */
3171 if (IS_QLA2200(ha
)) {
3172 WRT_FB_CMD_REG(ha
, reg
, 0xa000);
3173 RD_FB_CMD_REG(ha
, reg
); /* PCI Posting. */
3175 WRT_FB_CMD_REG(ha
, reg
, 0x00fc);
3177 /* Read back fb_cmd until zero or 3 seconds max */
3178 for (cnt
= 0; cnt
< 3000; cnt
++) {
3179 if ((RD_FB_CMD_REG(ha
, reg
) & 0xff) == 0)
3185 /* Select RISC module registers. */
3186 wrt_reg_word(®
->ctrl_status
, 0);
3187 rd_reg_word(®
->ctrl_status
); /* PCI Posting. */
3189 /* Reset RISC processor. */
3190 wrt_reg_word(®
->hccr
, HCCR_RESET_RISC
);
3191 rd_reg_word(®
->hccr
); /* PCI Posting. */
3193 /* Release RISC processor. */
3194 wrt_reg_word(®
->hccr
, HCCR_RELEASE_RISC
);
3195 rd_reg_word(®
->hccr
); /* PCI Posting. */
3198 wrt_reg_word(®
->hccr
, HCCR_CLR_RISC_INT
);
3199 wrt_reg_word(®
->hccr
, HCCR_CLR_HOST_INT
);
3201 /* Reset ISP chip. */
3202 wrt_reg_word(®
->ctrl_status
, CSR_ISP_SOFT_RESET
);
3204 /* Wait for RISC to recover from reset. */
3205 if (IS_QLA2100(ha
) || IS_QLA2200(ha
) || IS_QLA2300(ha
)) {
3207 * It is necessary to for a delay here since the card doesn't
3208 * respond to PCI reads during a reset. On some architectures
3209 * this will result in an MCA.
3212 for (cnt
= 30000; cnt
; cnt
--) {
3213 if ((rd_reg_word(®
->ctrl_status
) &
3214 CSR_ISP_SOFT_RESET
) == 0)
3221 /* Reset RISC processor. */
3222 wrt_reg_word(®
->hccr
, HCCR_RESET_RISC
);
3224 wrt_reg_word(®
->semaphore
, 0);
3226 /* Release RISC processor. */
3227 wrt_reg_word(®
->hccr
, HCCR_RELEASE_RISC
);
3228 rd_reg_word(®
->hccr
); /* PCI Posting. */
3230 if (IS_QLA2100(ha
) || IS_QLA2200(ha
) || IS_QLA2300(ha
)) {
3231 for (cnt
= 0; cnt
< 30000; cnt
++) {
3232 if (RD_MAILBOX_REG(ha
, reg
, 0) != MBS_BUSY
)
3240 /* Turn on master enable */
3241 cmd
|= PCI_COMMAND_MASTER
;
3242 pci_write_config_word(ha
->pdev
, PCI_COMMAND
, cmd
);
3244 /* Disable RISC pause on FPM parity error. */
3245 if (!IS_QLA2100(ha
)) {
3246 wrt_reg_word(®
->hccr
, HCCR_DISABLE_PARITY_PAUSE
);
3247 rd_reg_word(®
->hccr
); /* PCI Posting. */
3250 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
3256 * qla81xx_reset_mpi() - Reset's MPI FW via Write MPI Register MBC.
3259 * Returns 0 on success.
3262 qla81xx_reset_mpi(scsi_qla_host_t
*vha
)
3264 uint16_t mb
[4] = {0x1010, 0, 1, 0};
3266 if (!IS_QLA81XX(vha
->hw
))
3269 return qla81xx_write_mpi_register(vha
, mb
);
3273 qla_chk_risc_recovery(scsi_qla_host_t
*vha
)
3275 struct qla_hw_data
*ha
= vha
->hw
;
3276 struct device_reg_24xx __iomem
*reg
= &ha
->iobase
->isp24
;
3277 __le16 __iomem
*mbptr
= ®
->mailbox0
;
3280 int rc
= QLA_SUCCESS
;
3282 if (!IS_QLA27XX(ha
) && !IS_QLA28XX(ha
))
3285 /* this check is only valid after RISC reset */
3286 mb
[0] = rd_reg_word(mbptr
);
3289 rc
= QLA_FUNCTION_FAILED
;
3291 for (i
= 1; i
< 32; i
++) {
3292 mb
[i
] = rd_reg_word(mbptr
);
3296 ql_log(ql_log_warn
, vha
, 0x1015,
3297 "RISC reset failed. mb[0-7] %04xh %04xh %04xh %04xh %04xh %04xh %04xh %04xh\n",
3298 mb
[0], mb
[1], mb
[2], mb
[3], mb
[4], mb
[5], mb
[6], mb
[7]);
3299 ql_log(ql_log_warn
, vha
, 0x1015,
3300 "RISC reset failed. mb[8-15] %04xh %04xh %04xh %04xh %04xh %04xh %04xh %04xh\n",
3301 mb
[8], mb
[9], mb
[10], mb
[11], mb
[12], mb
[13], mb
[14],
3303 ql_log(ql_log_warn
, vha
, 0x1015,
3304 "RISC reset failed. mb[16-23] %04xh %04xh %04xh %04xh %04xh %04xh %04xh %04xh\n",
3305 mb
[16], mb
[17], mb
[18], mb
[19], mb
[20], mb
[21], mb
[22],
3307 ql_log(ql_log_warn
, vha
, 0x1015,
3308 "RISC reset failed. mb[24-31] %04xh %04xh %04xh %04xh %04xh %04xh %04xh %04xh\n",
3309 mb
[24], mb
[25], mb
[26], mb
[27], mb
[28], mb
[29], mb
[30],
3316 * qla24xx_reset_risc() - Perform full reset of ISP24xx RISC.
3319 * Returns 0 on success.
3322 qla24xx_reset_risc(scsi_qla_host_t
*vha
)
3324 unsigned long flags
= 0;
3325 struct qla_hw_data
*ha
= vha
->hw
;
3326 struct device_reg_24xx __iomem
*reg
= &ha
->iobase
->isp24
;
3329 static int abts_cnt
; /* ISP abort retry counts */
3330 int rval
= QLA_SUCCESS
;
3333 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
3336 wrt_reg_dword(®
->ctrl_status
, CSRX_DMA_SHUTDOWN
|MWB_4096_BYTES
);
3337 for (cnt
= 0; cnt
< 30000; cnt
++) {
3338 if ((rd_reg_dword(®
->ctrl_status
) & CSRX_DMA_ACTIVE
) == 0)
3344 if (!(rd_reg_dword(®
->ctrl_status
) & CSRX_DMA_ACTIVE
))
3345 set_bit(DMA_SHUTDOWN_CMPL
, &ha
->fw_dump_cap_flags
);
3347 ql_dbg(ql_dbg_init
+ ql_dbg_verbose
, vha
, 0x017e,
3348 "HCCR: 0x%x, Control Status %x, DMA active status:0x%x\n",
3349 rd_reg_dword(®
->hccr
),
3350 rd_reg_dword(®
->ctrl_status
),
3351 (rd_reg_dword(®
->ctrl_status
) & CSRX_DMA_ACTIVE
));
3353 wrt_reg_dword(®
->ctrl_status
,
3354 CSRX_ISP_SOFT_RESET
|CSRX_DMA_SHUTDOWN
|MWB_4096_BYTES
);
3355 pci_read_config_word(ha
->pdev
, PCI_COMMAND
, &wd
);
3359 /* Wait for firmware to complete NVRAM accesses. */
3360 rd_reg_word(®
->mailbox0
);
3361 for (cnt
= 10000; rd_reg_word(®
->mailbox0
) != 0 &&
3362 rval
== QLA_SUCCESS
; cnt
--) {
3367 rval
= QLA_FUNCTION_TIMEOUT
;
3370 if (rval
== QLA_SUCCESS
)
3371 set_bit(ISP_MBX_RDY
, &ha
->fw_dump_cap_flags
);
3373 ql_dbg(ql_dbg_init
+ ql_dbg_verbose
, vha
, 0x017f,
3374 "HCCR: 0x%x, MailBox0 Status 0x%x\n",
3375 rd_reg_dword(®
->hccr
),
3376 rd_reg_word(®
->mailbox0
));
3378 /* Wait for soft-reset to complete. */
3379 rd_reg_dword(®
->ctrl_status
);
3380 for (cnt
= 0; cnt
< 60; cnt
++) {
3382 if ((rd_reg_dword(®
->ctrl_status
) &
3383 CSRX_ISP_SOFT_RESET
) == 0)
3388 if (!(rd_reg_dword(®
->ctrl_status
) & CSRX_ISP_SOFT_RESET
))
3389 set_bit(ISP_SOFT_RESET_CMPL
, &ha
->fw_dump_cap_flags
);
3391 ql_dbg(ql_dbg_init
+ ql_dbg_verbose
, vha
, 0x015d,
3392 "HCCR: 0x%x, Soft Reset status: 0x%x\n",
3393 rd_reg_dword(®
->hccr
),
3394 rd_reg_dword(®
->ctrl_status
));
3396 /* If required, do an MPI FW reset now */
3397 if (test_and_clear_bit(MPI_RESET_NEEDED
, &vha
->dpc_flags
)) {
3398 if (qla81xx_reset_mpi(vha
) != QLA_SUCCESS
) {
3399 if (++abts_cnt
< 5) {
3400 set_bit(ISP_ABORT_NEEDED
, &vha
->dpc_flags
);
3401 set_bit(MPI_RESET_NEEDED
, &vha
->dpc_flags
);
3404 * We exhausted the ISP abort retries. We have to
3405 * set the board offline.
3408 vha
->flags
.online
= 0;
3413 wrt_reg_dword(®
->hccr
, HCCRX_SET_RISC_RESET
);
3414 rd_reg_dword(®
->hccr
);
3416 wrt_reg_dword(®
->hccr
, HCCRX_REL_RISC_PAUSE
);
3417 rd_reg_dword(®
->hccr
);
3419 wrt_reg_dword(®
->hccr
, HCCRX_CLR_RISC_RESET
);
3421 rd_reg_dword(®
->hccr
);
3423 wd
= rd_reg_word(®
->mailbox0
);
3424 for (cnt
= 300; wd
!= 0 && rval
== QLA_SUCCESS
; cnt
--) {
3428 if (print
&& qla_chk_risc_recovery(vha
))
3431 wd
= rd_reg_word(®
->mailbox0
);
3433 rval
= QLA_FUNCTION_TIMEOUT
;
3435 ql_log(ql_log_warn
, vha
, 0x015e,
3436 "RISC reset timeout\n");
3440 if (rval
== QLA_SUCCESS
)
3441 set_bit(RISC_RDY_AFT_RESET
, &ha
->fw_dump_cap_flags
);
3443 ql_dbg(ql_dbg_init
+ ql_dbg_verbose
, vha
, 0x015e,
3444 "Host Risc 0x%x, mailbox0 0x%x\n",
3445 rd_reg_dword(®
->hccr
),
3446 rd_reg_word(®
->mailbox0
));
3448 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
3450 ql_dbg(ql_dbg_init
+ ql_dbg_verbose
, vha
, 0x015f,
3451 "Driver in %s mode\n",
3452 IS_NOPOLLING_TYPE(ha
) ? "Interrupt" : "Polling");
3454 if (IS_NOPOLLING_TYPE(ha
))
3455 ha
->isp_ops
->enable_intrs(ha
);
3461 qla25xx_read_risc_sema_reg(scsi_qla_host_t
*vha
, uint32_t *data
)
3463 struct device_reg_24xx __iomem
*reg
= &vha
->hw
->iobase
->isp24
;
3465 wrt_reg_dword(®
->iobase_addr
, RISC_REGISTER_BASE_OFFSET
);
3466 *data
= rd_reg_dword(®
->iobase_window
+ RISC_REGISTER_WINDOW_OFFSET
);
3470 qla25xx_write_risc_sema_reg(scsi_qla_host_t
*vha
, uint32_t data
)
3472 struct device_reg_24xx __iomem
*reg
= &vha
->hw
->iobase
->isp24
;
3474 wrt_reg_dword(®
->iobase_addr
, RISC_REGISTER_BASE_OFFSET
);
3475 wrt_reg_dword(®
->iobase_window
+ RISC_REGISTER_WINDOW_OFFSET
, data
);
3479 qla25xx_manipulate_risc_semaphore(scsi_qla_host_t
*vha
)
3482 uint delta_msec
= 100;
3483 uint elapsed_msec
= 0;
3487 if (vha
->hw
->pdev
->subsystem_device
!= 0x0175 &&
3488 vha
->hw
->pdev
->subsystem_device
!= 0x0240)
3491 wrt_reg_dword(&vha
->hw
->iobase
->isp24
.hccr
, HCCRX_SET_RISC_PAUSE
);
3495 timeout_msec
= TIMEOUT_SEMAPHORE
;
3496 n
= timeout_msec
/ delta_msec
;
3498 qla25xx_write_risc_sema_reg(vha
, RISC_SEMAPHORE_SET
);
3499 qla25xx_read_risc_sema_reg(vha
, &wd32
);
3500 if (wd32
& RISC_SEMAPHORE
)
3503 elapsed_msec
+= delta_msec
;
3504 if (elapsed_msec
> TIMEOUT_TOTAL_ELAPSED
)
3508 if (!(wd32
& RISC_SEMAPHORE
))
3511 if (!(wd32
& RISC_SEMAPHORE_FORCE
))
3514 qla25xx_write_risc_sema_reg(vha
, RISC_SEMAPHORE_CLR
);
3515 timeout_msec
= TIMEOUT_SEMAPHORE_FORCE
;
3516 n
= timeout_msec
/ delta_msec
;
3518 qla25xx_read_risc_sema_reg(vha
, &wd32
);
3519 if (!(wd32
& RISC_SEMAPHORE_FORCE
))
3522 elapsed_msec
+= delta_msec
;
3523 if (elapsed_msec
> TIMEOUT_TOTAL_ELAPSED
)
3527 if (wd32
& RISC_SEMAPHORE_FORCE
)
3528 qla25xx_write_risc_sema_reg(vha
, RISC_SEMAPHORE_FORCE_CLR
);
3533 qla25xx_write_risc_sema_reg(vha
, RISC_SEMAPHORE_FORCE_SET
);
3540 * qla24xx_reset_chip() - Reset ISP24xx chip.
3543 * Returns 0 on success.
3546 qla24xx_reset_chip(scsi_qla_host_t
*vha
)
3548 struct qla_hw_data
*ha
= vha
->hw
;
3549 int rval
= QLA_FUNCTION_FAILED
;
3551 if (pci_channel_offline(ha
->pdev
) &&
3552 ha
->flags
.pci_channel_io_perm_failure
) {
3556 ha
->isp_ops
->disable_intrs(ha
);
3558 qla25xx_manipulate_risc_semaphore(vha
);
3560 /* Perform RISC reset. */
3561 rval
= qla24xx_reset_risc(vha
);
3567 * qla2x00_chip_diag() - Test chip for proper operation.
3570 * Returns 0 on success.
3573 qla2x00_chip_diag(scsi_qla_host_t
*vha
)
3576 struct qla_hw_data
*ha
= vha
->hw
;
3577 struct device_reg_2xxx __iomem
*reg
= &ha
->iobase
->isp
;
3578 unsigned long flags
= 0;
3582 struct req_que
*req
= ha
->req_q_map
[0];
3584 /* Assume a failed state */
3585 rval
= QLA_FUNCTION_FAILED
;
3587 ql_dbg(ql_dbg_init
, vha
, 0x007b, "Testing device at %p.\n",
3588 ®
->flash_address
);
3590 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
3592 /* Reset ISP chip. */
3593 wrt_reg_word(®
->ctrl_status
, CSR_ISP_SOFT_RESET
);
3596 * We need to have a delay here since the card will not respond while
3597 * in reset causing an MCA on some architectures.
3600 data
= qla2x00_debounce_register(®
->ctrl_status
);
3601 for (cnt
= 6000000 ; cnt
&& (data
& CSR_ISP_SOFT_RESET
); cnt
--) {
3603 data
= rd_reg_word(®
->ctrl_status
);
3608 goto chip_diag_failed
;
3610 ql_dbg(ql_dbg_init
, vha
, 0x007c,
3611 "Reset register cleared by chip reset.\n");
3613 /* Reset RISC processor. */
3614 wrt_reg_word(®
->hccr
, HCCR_RESET_RISC
);
3615 wrt_reg_word(®
->hccr
, HCCR_RELEASE_RISC
);
3617 /* Workaround for QLA2312 PCI parity error */
3618 if (IS_QLA2100(ha
) || IS_QLA2200(ha
) || IS_QLA2300(ha
)) {
3619 data
= qla2x00_debounce_register(MAILBOX_REG(ha
, reg
, 0));
3620 for (cnt
= 6000000; cnt
&& (data
== MBS_BUSY
); cnt
--) {
3622 data
= RD_MAILBOX_REG(ha
, reg
, 0);
3629 goto chip_diag_failed
;
3631 /* Check product ID of chip */
3632 ql_dbg(ql_dbg_init
, vha
, 0x007d, "Checking product ID of chip.\n");
3634 mb
[1] = RD_MAILBOX_REG(ha
, reg
, 1);
3635 mb
[2] = RD_MAILBOX_REG(ha
, reg
, 2);
3636 mb
[3] = RD_MAILBOX_REG(ha
, reg
, 3);
3637 mb
[4] = qla2x00_debounce_register(MAILBOX_REG(ha
, reg
, 4));
3638 if (mb
[1] != PROD_ID_1
|| (mb
[2] != PROD_ID_2
&& mb
[2] != PROD_ID_2a
) ||
3639 mb
[3] != PROD_ID_3
) {
3640 ql_log(ql_log_warn
, vha
, 0x0062,
3641 "Wrong product ID = 0x%x,0x%x,0x%x.\n",
3642 mb
[1], mb
[2], mb
[3]);
3644 goto chip_diag_failed
;
3646 ha
->product_id
[0] = mb
[1];
3647 ha
->product_id
[1] = mb
[2];
3648 ha
->product_id
[2] = mb
[3];
3649 ha
->product_id
[3] = mb
[4];
3651 /* Adjust fw RISC transfer size */
3652 if (req
->length
> 1024)
3653 ha
->fw_transfer_size
= REQUEST_ENTRY_SIZE
* 1024;
3655 ha
->fw_transfer_size
= REQUEST_ENTRY_SIZE
*
3658 if (IS_QLA2200(ha
) &&
3659 RD_MAILBOX_REG(ha
, reg
, 7) == QLA2200A_RISC_ROM_VER
) {
3660 /* Limit firmware transfer size with a 2200A */
3661 ql_dbg(ql_dbg_init
, vha
, 0x007e, "Found QLA2200A Chip.\n");
3663 ha
->device_type
|= DT_ISP2200A
;
3664 ha
->fw_transfer_size
= 128;
3667 /* Wrap Incoming Mailboxes Test. */
3668 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
3670 ql_dbg(ql_dbg_init
, vha
, 0x007f, "Checking mailboxes.\n");
3671 rval
= qla2x00_mbx_reg_test(vha
);
3673 ql_log(ql_log_warn
, vha
, 0x0080,
3674 "Failed mailbox send register test.\n");
3676 /* Flag a successful rval */
3678 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
3682 ql_log(ql_log_info
, vha
, 0x0081,
3683 "Chip diagnostics **** FAILED ****.\n");
3685 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
3691 * qla24xx_chip_diag() - Test ISP24xx for proper operation.
3694 * Returns 0 on success.
3697 qla24xx_chip_diag(scsi_qla_host_t
*vha
)
3700 struct qla_hw_data
*ha
= vha
->hw
;
3701 struct req_que
*req
= ha
->req_q_map
[0];
3703 if (IS_P3P_TYPE(ha
))
3706 ha
->fw_transfer_size
= REQUEST_ENTRY_SIZE
* req
->length
;
3708 rval
= qla2x00_mbx_reg_test(vha
);
3710 ql_log(ql_log_warn
, vha
, 0x0082,
3711 "Failed mailbox send register test.\n");
3713 /* Flag a successful rval */
3721 qla2x00_alloc_fce_trace(scsi_qla_host_t
*vha
)
3725 struct qla_hw_data
*ha
= vha
->hw
;
3727 if (!IS_FWI2_CAPABLE(ha
))
3730 if (!IS_QLA25XX(ha
) && !IS_QLA81XX(ha
) && !IS_QLA83XX(ha
) &&
3731 !IS_QLA27XX(ha
) && !IS_QLA28XX(ha
))
3735 ql_dbg(ql_dbg_init
, vha
, 0x00bd,
3736 "%s: FCE Mem is already allocated.\n",
3741 /* Allocate memory for Fibre Channel Event Buffer. */
3742 tc
= dma_alloc_coherent(&ha
->pdev
->dev
, FCE_SIZE
, &tc_dma
,
3745 ql_log(ql_log_warn
, vha
, 0x00be,
3746 "Unable to allocate (%d KB) for FCE.\n",
3751 ql_dbg(ql_dbg_init
, vha
, 0x00c0,
3752 "Allocated (%d KB) for FCE...\n", FCE_SIZE
/ 1024);
3754 ha
->fce_dma
= tc_dma
;
3756 ha
->fce_bufs
= FCE_NUM_BUFFERS
;
3760 qla2x00_alloc_eft_trace(scsi_qla_host_t
*vha
)
3764 struct qla_hw_data
*ha
= vha
->hw
;
3766 if (!IS_FWI2_CAPABLE(ha
))
3770 ql_dbg(ql_dbg_init
, vha
, 0x00bd,
3771 "%s: EFT Mem is already allocated.\n",
3776 /* Allocate memory for Extended Trace Buffer. */
3777 tc
= dma_alloc_coherent(&ha
->pdev
->dev
, EFT_SIZE
, &tc_dma
,
3780 ql_log(ql_log_warn
, vha
, 0x00c1,
3781 "Unable to allocate (%d KB) for EFT.\n",
3786 ql_dbg(ql_dbg_init
, vha
, 0x00c3,
3787 "Allocated (%d KB) EFT ...\n", EFT_SIZE
/ 1024);
3789 ha
->eft_dma
= tc_dma
;
3794 qla2x00_alloc_fw_dump(scsi_qla_host_t
*vha
)
3796 uint32_t dump_size
, fixed_size
, mem_size
, req_q_size
, rsp_q_size
,
3797 eft_size
, fce_size
, mq_size
;
3798 struct qla_hw_data
*ha
= vha
->hw
;
3799 struct req_que
*req
= ha
->req_q_map
[0];
3800 struct rsp_que
*rsp
= ha
->rsp_q_map
[0];
3801 struct qla2xxx_fw_dump
*fw_dump
;
3804 ql_dbg(ql_dbg_init
, vha
, 0x00bd,
3805 "Firmware dump already allocated.\n");
3810 ha
->fw_dump_cap_flags
= 0;
3811 dump_size
= fixed_size
= mem_size
= eft_size
= fce_size
= mq_size
= 0;
3812 req_q_size
= rsp_q_size
= 0;
3814 if (IS_QLA2100(ha
) || IS_QLA2200(ha
)) {
3815 fixed_size
= sizeof(struct qla2100_fw_dump
);
3816 } else if (IS_QLA23XX(ha
)) {
3817 fixed_size
= offsetof(struct qla2300_fw_dump
, data_ram
);
3818 mem_size
= (ha
->fw_memory_size
- 0x11000 + 1) *
3820 } else if (IS_FWI2_CAPABLE(ha
)) {
3822 fixed_size
= offsetof(struct qla83xx_fw_dump
, ext_mem
);
3823 else if (IS_QLA81XX(ha
))
3824 fixed_size
= offsetof(struct qla81xx_fw_dump
, ext_mem
);
3825 else if (IS_QLA25XX(ha
))
3826 fixed_size
= offsetof(struct qla25xx_fw_dump
, ext_mem
);
3828 fixed_size
= offsetof(struct qla24xx_fw_dump
, ext_mem
);
3830 mem_size
= (ha
->fw_memory_size
- 0x100000 + 1) *
3833 if (!IS_QLA83XX(ha
))
3834 mq_size
= sizeof(struct qla2xxx_mq_chain
);
3836 * Allocate maximum buffer size for all queues - Q0.
3837 * Resizing must be done at end-of-dump processing.
3839 mq_size
+= (ha
->max_req_queues
- 1) *
3840 (req
->length
* sizeof(request_t
));
3841 mq_size
+= (ha
->max_rsp_queues
- 1) *
3842 (rsp
->length
* sizeof(response_t
));
3844 if (ha
->tgt
.atio_ring
)
3845 mq_size
+= ha
->tgt
.atio_q_length
* sizeof(request_t
);
3847 qla2x00_alloc_fce_trace(vha
);
3849 fce_size
= sizeof(struct qla2xxx_fce_chain
) + FCE_SIZE
;
3850 qla2x00_alloc_eft_trace(vha
);
3852 eft_size
= EFT_SIZE
;
3855 if (IS_QLA27XX(ha
) || IS_QLA28XX(ha
)) {
3856 struct fwdt
*fwdt
= ha
->fwdt
;
3859 for (j
= 0; j
< 2; j
++, fwdt
++) {
3860 if (!fwdt
->template) {
3861 ql_dbg(ql_dbg_init
, vha
, 0x00ba,
3862 "-> fwdt%u no template\n", j
);
3865 ql_dbg(ql_dbg_init
, vha
, 0x00fa,
3866 "-> fwdt%u calculating fwdump size...\n", j
);
3867 fwdt
->dump_size
= qla27xx_fwdt_calculate_dump_size(
3868 vha
, fwdt
->template);
3869 ql_dbg(ql_dbg_init
, vha
, 0x00fa,
3870 "-> fwdt%u calculated fwdump size = %#lx bytes\n",
3871 j
, fwdt
->dump_size
);
3872 dump_size
+= fwdt
->dump_size
;
3874 /* Add space for spare MPI fw dump. */
3875 dump_size
+= ha
->fwdt
[1].dump_size
;
3877 req_q_size
= req
->length
* sizeof(request_t
);
3878 rsp_q_size
= rsp
->length
* sizeof(response_t
);
3879 dump_size
= offsetof(struct qla2xxx_fw_dump
, isp
);
3880 dump_size
+= fixed_size
+ mem_size
+ req_q_size
+ rsp_q_size
3882 ha
->chain_offset
= dump_size
;
3883 dump_size
+= mq_size
+ fce_size
;
3884 if (ha
->exchoffld_buf
)
3885 dump_size
+= sizeof(struct qla2xxx_offld_chain
) +
3887 if (ha
->exlogin_buf
)
3888 dump_size
+= sizeof(struct qla2xxx_offld_chain
) +
3892 if (!ha
->fw_dump_len
|| dump_size
> ha
->fw_dump_alloc_len
) {
3894 ql_dbg(ql_dbg_init
, vha
, 0x00c5,
3895 "%s dump_size %d fw_dump_len %d fw_dump_alloc_len %d\n",
3896 __func__
, dump_size
, ha
->fw_dump_len
,
3897 ha
->fw_dump_alloc_len
);
3899 fw_dump
= vmalloc(dump_size
);
3901 ql_log(ql_log_warn
, vha
, 0x00c4,
3902 "Unable to allocate (%d KB) for firmware dump.\n",
3905 mutex_lock(&ha
->optrom_mutex
);
3906 if (ha
->fw_dumped
) {
3907 memcpy(fw_dump
, ha
->fw_dump
, ha
->fw_dump_len
);
3909 ha
->fw_dump
= fw_dump
;
3910 ha
->fw_dump_alloc_len
= dump_size
;
3911 ql_dbg(ql_dbg_init
, vha
, 0x00c5,
3912 "Re-Allocated (%d KB) and save firmware dump.\n",
3916 ha
->fw_dump
= fw_dump
;
3918 ha
->fw_dump_len
= ha
->fw_dump_alloc_len
=
3920 ql_dbg(ql_dbg_init
, vha
, 0x00c5,
3921 "Allocated (%d KB) for firmware dump.\n",
3924 if (IS_QLA27XX(ha
) || IS_QLA28XX(ha
)) {
3925 ha
->mpi_fw_dump
= (char *)fw_dump
+
3926 ha
->fwdt
[1].dump_size
;
3927 mutex_unlock(&ha
->optrom_mutex
);
3931 ha
->fw_dump
->signature
[0] = 'Q';
3932 ha
->fw_dump
->signature
[1] = 'L';
3933 ha
->fw_dump
->signature
[2] = 'G';
3934 ha
->fw_dump
->signature
[3] = 'C';
3935 ha
->fw_dump
->version
= htonl(1);
3937 ha
->fw_dump
->fixed_size
= htonl(fixed_size
);
3938 ha
->fw_dump
->mem_size
= htonl(mem_size
);
3939 ha
->fw_dump
->req_q_size
= htonl(req_q_size
);
3940 ha
->fw_dump
->rsp_q_size
= htonl(rsp_q_size
);
3942 ha
->fw_dump
->eft_size
= htonl(eft_size
);
3943 ha
->fw_dump
->eft_addr_l
=
3944 htonl(LSD(ha
->eft_dma
));
3945 ha
->fw_dump
->eft_addr_h
=
3946 htonl(MSD(ha
->eft_dma
));
3948 ha
->fw_dump
->header_size
=
3950 (struct qla2xxx_fw_dump
, isp
));
3952 mutex_unlock(&ha
->optrom_mutex
);
3958 qla81xx_mpi_sync(scsi_qla_host_t
*vha
)
3960 #define MPS_MASK 0xe0
3965 if (!IS_QLA81XX(vha
->hw
))
3968 rval
= qla2x00_write_ram_word(vha
, 0x7c00, 1);
3969 if (rval
!= QLA_SUCCESS
) {
3970 ql_log(ql_log_warn
, vha
, 0x0105,
3971 "Unable to acquire semaphore.\n");
3975 pci_read_config_word(vha
->hw
->pdev
, 0x54, &dc
);
3976 rval
= qla2x00_read_ram_word(vha
, 0x7a15, &dw
);
3977 if (rval
!= QLA_SUCCESS
) {
3978 ql_log(ql_log_warn
, vha
, 0x0067, "Unable to read sync.\n");
3983 if (dc
== (dw
& MPS_MASK
))
3988 rval
= qla2x00_write_ram_word(vha
, 0x7a15, dw
);
3989 if (rval
!= QLA_SUCCESS
) {
3990 ql_log(ql_log_warn
, vha
, 0x0114, "Unable to gain sync.\n");
3994 rval
= qla2x00_write_ram_word(vha
, 0x7c00, 0);
3995 if (rval
!= QLA_SUCCESS
) {
3996 ql_log(ql_log_warn
, vha
, 0x006d,
3997 "Unable to release semaphore.\n");
4005 qla2x00_alloc_outstanding_cmds(struct qla_hw_data
*ha
, struct req_que
*req
)
4007 /* Don't try to reallocate the array */
4008 if (req
->outstanding_cmds
)
4011 if (!IS_FWI2_CAPABLE(ha
))
4012 req
->num_outstanding_cmds
= DEFAULT_OUTSTANDING_COMMANDS
;
4014 if (ha
->cur_fw_xcb_count
<= ha
->cur_fw_iocb_count
)
4015 req
->num_outstanding_cmds
= ha
->cur_fw_xcb_count
;
4017 req
->num_outstanding_cmds
= ha
->cur_fw_iocb_count
;
4020 req
->outstanding_cmds
= kcalloc(req
->num_outstanding_cmds
,
4024 if (!req
->outstanding_cmds
) {
4026 * Try to allocate a minimal size just so we can get through
4029 req
->num_outstanding_cmds
= MIN_OUTSTANDING_COMMANDS
;
4030 req
->outstanding_cmds
= kcalloc(req
->num_outstanding_cmds
,
4034 if (!req
->outstanding_cmds
) {
4035 ql_log(ql_log_fatal
, NULL
, 0x0126,
4036 "Failed to allocate memory for "
4037 "outstanding_cmds for req_que %p.\n", req
);
4038 req
->num_outstanding_cmds
= 0;
4039 return QLA_FUNCTION_FAILED
;
4046 #define PRINT_FIELD(_field, _flag, _str) { \
4047 if (a0->_field & _flag) {\
4053 len = snprintf(ptr, leftover, "%s", _str); \
4060 static void qla2xxx_print_sfp_info(struct scsi_qla_host
*vha
)
4063 struct sff_8247_a0
*a0
= (struct sff_8247_a0
*)vha
->hw
->sfp_data
;
4064 u8 str
[STR_LEN
], *ptr
, p
;
4067 memset(str
, 0, STR_LEN
);
4068 snprintf(str
, SFF_VEN_NAME_LEN
+1, a0
->vendor_name
);
4069 ql_dbg(ql_dbg_init
, vha
, 0x015a,
4070 "SFP MFG Name: %s\n", str
);
4072 memset(str
, 0, STR_LEN
);
4073 snprintf(str
, SFF_PART_NAME_LEN
+1, a0
->vendor_pn
);
4074 ql_dbg(ql_dbg_init
, vha
, 0x015c,
4075 "SFP Part Name: %s\n", str
);
4078 memset(str
, 0, STR_LEN
);
4082 PRINT_FIELD(fc_med_cc9
, FC_MED_TW
, "Twin AX");
4083 PRINT_FIELD(fc_med_cc9
, FC_MED_TP
, "Twisted Pair");
4084 PRINT_FIELD(fc_med_cc9
, FC_MED_MI
, "Min Coax");
4085 PRINT_FIELD(fc_med_cc9
, FC_MED_TV
, "Video Coax");
4086 PRINT_FIELD(fc_med_cc9
, FC_MED_M6
, "MultiMode 62.5um");
4087 PRINT_FIELD(fc_med_cc9
, FC_MED_M5
, "MultiMode 50um");
4088 PRINT_FIELD(fc_med_cc9
, FC_MED_SM
, "SingleMode");
4089 ql_dbg(ql_dbg_init
, vha
, 0x0160,
4090 "SFP Media: %s\n", str
);
4093 memset(str
, 0, STR_LEN
);
4097 PRINT_FIELD(fc_ll_cc7
, FC_LL_VL
, "Very Long");
4098 PRINT_FIELD(fc_ll_cc7
, FC_LL_S
, "Short");
4099 PRINT_FIELD(fc_ll_cc7
, FC_LL_I
, "Intermediate");
4100 PRINT_FIELD(fc_ll_cc7
, FC_LL_L
, "Long");
4101 PRINT_FIELD(fc_ll_cc7
, FC_LL_M
, "Medium");
4102 ql_dbg(ql_dbg_init
, vha
, 0x0196,
4103 "SFP Link Length: %s\n", str
);
4105 memset(str
, 0, STR_LEN
);
4109 PRINT_FIELD(fc_ll_cc7
, FC_LL_SA
, "Short Wave (SA)");
4110 PRINT_FIELD(fc_ll_cc7
, FC_LL_LC
, "Long Wave(LC)");
4111 PRINT_FIELD(fc_tec_cc8
, FC_TEC_SN
, "Short Wave (SN)");
4112 PRINT_FIELD(fc_tec_cc8
, FC_TEC_SL
, "Short Wave (SL)");
4113 PRINT_FIELD(fc_tec_cc8
, FC_TEC_LL
, "Long Wave (LL)");
4114 ql_dbg(ql_dbg_init
, vha
, 0x016e,
4115 "SFP FC Link Tech: %s\n", str
);
4118 ql_dbg(ql_dbg_init
, vha
, 0x016f,
4119 "SFP Distant: %d km\n", a0
->length_km
);
4120 if (a0
->length_100m
)
4121 ql_dbg(ql_dbg_init
, vha
, 0x0170,
4122 "SFP Distant: %d m\n", a0
->length_100m
*100);
4123 if (a0
->length_50um_10m
)
4124 ql_dbg(ql_dbg_init
, vha
, 0x0189,
4125 "SFP Distant (WL=50um): %d m\n", a0
->length_50um_10m
* 10);
4126 if (a0
->length_62um_10m
)
4127 ql_dbg(ql_dbg_init
, vha
, 0x018a,
4128 "SFP Distant (WL=62.5um): %d m\n", a0
->length_62um_10m
* 10);
4129 if (a0
->length_om4_10m
)
4130 ql_dbg(ql_dbg_init
, vha
, 0x0194,
4131 "SFP Distant (OM4): %d m\n", a0
->length_om4_10m
* 10);
4132 if (a0
->length_om3_10m
)
4133 ql_dbg(ql_dbg_init
, vha
, 0x0195,
4134 "SFP Distant (OM3): %d m\n", a0
->length_om3_10m
* 10);
4139 * qla24xx_detect_sfp()
4141 * @vha: adapter state pointer.
4144 * 0 -- Configure firmware to use short-range settings -- normal
4145 * buffer-to-buffer credits.
4147 * 1 -- Configure firmware to use long-range settings -- extra
4148 * buffer-to-buffer credits should be allocated with
4149 * ha->lr_distance containing distance settings from NVRAM or SFP
4153 qla24xx_detect_sfp(scsi_qla_host_t
*vha
)
4156 struct sff_8247_a0
*a
;
4157 struct qla_hw_data
*ha
= vha
->hw
;
4158 struct nvram_81xx
*nv
= ha
->nvram
;
4159 #define LR_DISTANCE_UNKNOWN 2
4160 static const char * const types
[] = { "Short", "Long" };
4161 static const char * const lengths
[] = { "(10km)", "(5km)", "" };
4164 /* Seed with NVRAM settings. */
4166 ha
->flags
.lr_detected
= 0;
4167 if (IS_BPM_RANGE_CAPABLE(ha
) &&
4168 (nv
->enhanced_features
& NEF_LR_DIST_ENABLE
)) {
4170 ha
->flags
.lr_detected
= 1;
4172 (nv
->enhanced_features
>> LR_DIST_NV_POS
)
4176 if (!IS_BPM_ENABLED(vha
))
4178 /* Determine SR/LR capabilities of SFP/Transceiver. */
4179 rc
= qla2x00_read_sfp_dev(vha
, NULL
, 0);
4184 a
= (struct sff_8247_a0
*)vha
->hw
->sfp_data
;
4185 qla2xxx_print_sfp_info(vha
);
4187 ha
->flags
.lr_detected
= 0;
4189 if (ll
& FC_LL_VL
|| ll
& FC_LL_L
) {
4190 /* Long range, track length. */
4191 ha
->flags
.lr_detected
= 1;
4193 if (a
->length_km
> 5 || a
->length_100m
> 50)
4194 ha
->lr_distance
= LR_DISTANCE_10K
;
4196 ha
->lr_distance
= LR_DISTANCE_5K
;
4200 ql_dbg(ql_dbg_async
, vha
, 0x507b,
4201 "SFP detect: %s-Range SFP %s (nvr=%x ll=%x lr=%x lrd=%x).\n",
4202 types
[ha
->flags
.lr_detected
],
4203 ha
->flags
.lr_detected
? lengths
[ha
->lr_distance
] :
4204 lengths
[LR_DISTANCE_UNKNOWN
],
4205 used_nvram
, ll
, ha
->flags
.lr_detected
, ha
->lr_distance
);
4206 return ha
->flags
.lr_detected
;
4209 static void __qla_adjust_iocb_limit(struct qla_qpair
*qpair
)
4213 struct qla_hw_data
*ha
= qpair
->vha
->hw
;
4215 num_qps
= ha
->num_qpairs
+ 1;
4216 limit
= (ha
->orig_fw_iocb_count
* QLA_IOCB_PCT_LIMIT
) / 100;
4218 qpair
->fwres
.iocbs_total
= ha
->orig_fw_iocb_count
;
4219 qpair
->fwres
.iocbs_limit
= limit
;
4220 qpair
->fwres
.iocbs_qp_limit
= limit
/ num_qps
;
4222 qpair
->fwres
.exch_total
= ha
->orig_fw_xcb_count
;
4223 qpair
->fwres
.exch_limit
= (ha
->orig_fw_xcb_count
*
4224 QLA_IOCB_PCT_LIMIT
) / 100;
4227 void qla_init_iocb_limit(scsi_qla_host_t
*vha
)
4230 struct qla_hw_data
*ha
= vha
->hw
;
4232 __qla_adjust_iocb_limit(ha
->base_qpair
);
4233 ha
->base_qpair
->fwres
.iocbs_used
= 0;
4234 ha
->base_qpair
->fwres
.exch_used
= 0;
4236 for (i
= 0; i
< ha
->max_qpairs
; i
++) {
4237 if (ha
->queue_pair_map
[i
]) {
4238 __qla_adjust_iocb_limit(ha
->queue_pair_map
[i
]);
4239 ha
->queue_pair_map
[i
]->fwres
.iocbs_used
= 0;
4240 ha
->queue_pair_map
[i
]->fwres
.exch_used
= 0;
4244 ha
->fwres
.iocb_total
= ha
->orig_fw_iocb_count
;
4245 ha
->fwres
.iocb_limit
= (ha
->orig_fw_iocb_count
* QLA_IOCB_PCT_LIMIT
) / 100;
4246 ha
->fwres
.exch_total
= ha
->orig_fw_xcb_count
;
4247 ha
->fwres
.exch_limit
= (ha
->orig_fw_xcb_count
* QLA_IOCB_PCT_LIMIT
) / 100;
4249 atomic_set(&ha
->fwres
.iocb_used
, 0);
4250 atomic_set(&ha
->fwres
.exch_used
, 0);
4253 void qla_adjust_iocb_limit(scsi_qla_host_t
*vha
)
4256 struct qla_hw_data
*ha
= vha
->hw
;
4258 __qla_adjust_iocb_limit(ha
->base_qpair
);
4260 for (i
= 0; i
< ha
->max_qpairs
; i
++) {
4261 if (ha
->queue_pair_map
[i
])
4262 __qla_adjust_iocb_limit(ha
->queue_pair_map
[i
]);
4267 * qla2x00_setup_chip() - Load and start RISC firmware.
4270 * Returns 0 on success.
4273 qla2x00_setup_chip(scsi_qla_host_t
*vha
)
4276 uint32_t srisc_address
= 0;
4277 struct qla_hw_data
*ha
= vha
->hw
;
4278 struct device_reg_2xxx __iomem
*reg
= &ha
->iobase
->isp
;
4279 unsigned long flags
;
4282 if (IS_P3P_TYPE(ha
)) {
4283 rval
= ha
->isp_ops
->load_risc(vha
, &srisc_address
);
4284 if (rval
== QLA_SUCCESS
) {
4285 qla2x00_stop_firmware(vha
);
4286 goto enable_82xx_npiv
;
4291 if (!IS_FWI2_CAPABLE(ha
) && !IS_QLA2100(ha
) && !IS_QLA2200(ha
)) {
4292 /* Disable SRAM, Instruction RAM and GP RAM parity. */
4293 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
4294 wrt_reg_word(®
->hccr
, (HCCR_ENABLE_PARITY
+ 0x0));
4295 rd_reg_word(®
->hccr
);
4296 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
4299 qla81xx_mpi_sync(vha
);
4302 /* Load firmware sequences */
4303 rval
= ha
->isp_ops
->load_risc(vha
, &srisc_address
);
4304 if (rval
== QLA_SUCCESS
) {
4305 ql_dbg(ql_dbg_init
, vha
, 0x00c9,
4306 "Verifying Checksum of loaded RISC code.\n");
4308 rval
= qla2x00_verify_checksum(vha
, srisc_address
);
4309 if (rval
== QLA_SUCCESS
) {
4310 /* Start firmware execution. */
4311 ql_dbg(ql_dbg_init
, vha
, 0x00ca,
4312 "Starting firmware.\n");
4315 ha
->flags
.exlogins_enabled
= 1;
4317 if (qla_is_exch_offld_enabled(vha
))
4318 ha
->flags
.exchoffld_enabled
= 1;
4320 rval
= qla2x00_execute_fw(vha
, srisc_address
);
4321 /* Retrieve firmware information. */
4322 if (rval
== QLA_SUCCESS
) {
4323 /* Enable BPM support? */
4324 if (!done_once
++ && qla24xx_detect_sfp(vha
)) {
4325 ql_dbg(ql_dbg_init
, vha
, 0x00ca,
4326 "Re-starting firmware -- BPM.\n");
4327 /* Best-effort - re-init. */
4328 ha
->isp_ops
->reset_chip(vha
);
4329 ha
->isp_ops
->chip_diag(vha
);
4330 goto execute_fw_with_lr
;
4333 if (IS_ZIO_THRESHOLD_CAPABLE(ha
))
4334 qla27xx_set_zio_threshold(vha
,
4335 ha
->last_zio_threshold
);
4337 rval
= qla2x00_set_exlogins_buffer(vha
);
4338 if (rval
!= QLA_SUCCESS
)
4341 rval
= qla2x00_set_exchoffld_buffer(vha
);
4342 if (rval
!= QLA_SUCCESS
)
4346 if (IS_P3P_TYPE(ha
))
4347 qla82xx_check_md_needed(vha
);
4349 rval
= qla2x00_get_fw_version(vha
);
4350 if (rval
!= QLA_SUCCESS
)
4352 ha
->flags
.npiv_supported
= 0;
4353 if (IS_QLA2XXX_MIDTYPE(ha
) &&
4354 (ha
->fw_attributes
& BIT_2
)) {
4355 ha
->flags
.npiv_supported
= 1;
4356 if ((!ha
->max_npiv_vports
) ||
4357 ((ha
->max_npiv_vports
+ 1) %
4358 MIN_MULTI_ID_FABRIC
))
4359 ha
->max_npiv_vports
=
4360 MIN_MULTI_ID_FABRIC
- 1;
4362 qla2x00_get_resource_cnts(vha
);
4363 qla_init_iocb_limit(vha
);
4366 * Allocate the array of outstanding commands
4367 * now that we know the firmware resources.
4369 rval
= qla2x00_alloc_outstanding_cmds(ha
,
4371 if (rval
!= QLA_SUCCESS
)
4374 if (ql2xallocfwdump
&& !(IS_P3P_TYPE(ha
)))
4375 qla2x00_alloc_fw_dump(vha
);
4377 qla_enable_fce_trace(vha
);
4378 qla_enable_eft_trace(vha
);
4383 ql_log(ql_log_fatal
, vha
, 0x00cd,
4384 "ISP Firmware failed checksum.\n");
4388 /* Enable PUREX PASSTHRU */
4389 if (ql2xrdpenable
|| ha
->flags
.scm_supported_f
||
4390 ha
->flags
.edif_enabled
)
4391 qla25xx_set_els_cmds_supported(vha
);
4395 if (!IS_FWI2_CAPABLE(ha
) && !IS_QLA2100(ha
) && !IS_QLA2200(ha
)) {
4396 /* Enable proper parity. */
4397 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
4400 wrt_reg_word(®
->hccr
, HCCR_ENABLE_PARITY
+ 0x1);
4402 /* SRAM, Instruction RAM and GP RAM parity */
4403 wrt_reg_word(®
->hccr
, HCCR_ENABLE_PARITY
+ 0x7);
4404 rd_reg_word(®
->hccr
);
4405 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
4408 if (IS_QLA27XX(ha
) || IS_QLA28XX(ha
))
4409 ha
->flags
.fac_supported
= 1;
4410 else if (rval
== QLA_SUCCESS
&& IS_FAC_REQUIRED(ha
)) {
4413 rval
= qla81xx_fac_get_sector_size(vha
, &size
);
4414 if (rval
== QLA_SUCCESS
) {
4415 ha
->flags
.fac_supported
= 1;
4416 ha
->fdt_block_size
= size
<< 2;
4418 ql_log(ql_log_warn
, vha
, 0x00ce,
4419 "Unsupported FAC firmware (%d.%02d.%02d).\n",
4420 ha
->fw_major_version
, ha
->fw_minor_version
,
4421 ha
->fw_subminor_version
);
4423 if (IS_QLA83XX(ha
)) {
4424 ha
->flags
.fac_supported
= 0;
4431 ql_log(ql_log_fatal
, vha
, 0x00cf,
4432 "Setup chip ****FAILED****.\n");
4439 * qla2x00_init_response_q_entries() - Initializes response queue entries.
4440 * @rsp: response queue
4442 * Beginning of request ring has initialization control block already built
4443 * by nvram config routine.
4445 * Returns 0 on success.
4448 qla2x00_init_response_q_entries(struct rsp_que
*rsp
)
4453 rsp
->ring_ptr
= rsp
->ring
;
4454 rsp
->ring_index
= 0;
4455 rsp
->status_srb
= NULL
;
4456 pkt
= rsp
->ring_ptr
;
4457 for (cnt
= 0; cnt
< rsp
->length
; cnt
++) {
4458 pkt
->signature
= RESPONSE_PROCESSED
;
4464 * qla2x00_update_fw_options() - Read and process firmware options.
4467 * Returns 0 on success.
4470 qla2x00_update_fw_options(scsi_qla_host_t
*vha
)
4472 uint16_t swing
, emphasis
, tx_sens
, rx_sens
;
4473 struct qla_hw_data
*ha
= vha
->hw
;
4475 memset(ha
->fw_options
, 0, sizeof(ha
->fw_options
));
4476 qla2x00_get_fw_options(vha
, ha
->fw_options
);
4478 if (IS_QLA2100(ha
) || IS_QLA2200(ha
))
4481 /* Serial Link options. */
4482 ql_dbg(ql_dbg_init
+ ql_dbg_buffer
, vha
, 0x0115,
4483 "Serial link options.\n");
4484 ql_dump_buffer(ql_dbg_init
+ ql_dbg_buffer
, vha
, 0x0109,
4485 ha
->fw_seriallink_options
, sizeof(ha
->fw_seriallink_options
));
4487 ha
->fw_options
[1] &= ~FO1_SET_EMPHASIS_SWING
;
4488 if (ha
->fw_seriallink_options
[3] & BIT_2
) {
4489 ha
->fw_options
[1] |= FO1_SET_EMPHASIS_SWING
;
4492 swing
= ha
->fw_seriallink_options
[2] & (BIT_2
| BIT_1
| BIT_0
);
4493 emphasis
= (ha
->fw_seriallink_options
[2] &
4494 (BIT_4
| BIT_3
)) >> 3;
4495 tx_sens
= ha
->fw_seriallink_options
[0] &
4496 (BIT_3
| BIT_2
| BIT_1
| BIT_0
);
4497 rx_sens
= (ha
->fw_seriallink_options
[0] &
4498 (BIT_7
| BIT_6
| BIT_5
| BIT_4
)) >> 4;
4499 ha
->fw_options
[10] = (emphasis
<< 14) | (swing
<< 8);
4500 if (IS_QLA2300(ha
) || IS_QLA2312(ha
) || IS_QLA6312(ha
)) {
4503 ha
->fw_options
[10] |= (tx_sens
<< 4) | rx_sens
;
4504 } else if (IS_QLA2322(ha
) || IS_QLA6322(ha
))
4505 ha
->fw_options
[10] |= BIT_5
|
4506 ((rx_sens
& (BIT_1
| BIT_0
)) << 2) |
4507 (tx_sens
& (BIT_1
| BIT_0
));
4510 swing
= (ha
->fw_seriallink_options
[2] &
4511 (BIT_7
| BIT_6
| BIT_5
)) >> 5;
4512 emphasis
= ha
->fw_seriallink_options
[3] & (BIT_1
| BIT_0
);
4513 tx_sens
= ha
->fw_seriallink_options
[1] &
4514 (BIT_3
| BIT_2
| BIT_1
| BIT_0
);
4515 rx_sens
= (ha
->fw_seriallink_options
[1] &
4516 (BIT_7
| BIT_6
| BIT_5
| BIT_4
)) >> 4;
4517 ha
->fw_options
[11] = (emphasis
<< 14) | (swing
<< 8);
4518 if (IS_QLA2300(ha
) || IS_QLA2312(ha
) || IS_QLA6312(ha
)) {
4521 ha
->fw_options
[11] |= (tx_sens
<< 4) | rx_sens
;
4522 } else if (IS_QLA2322(ha
) || IS_QLA6322(ha
))
4523 ha
->fw_options
[11] |= BIT_5
|
4524 ((rx_sens
& (BIT_1
| BIT_0
)) << 2) |
4525 (tx_sens
& (BIT_1
| BIT_0
));
4529 /* Return command IOCBs without waiting for an ABTS to complete. */
4530 ha
->fw_options
[3] |= BIT_13
;
4533 if (ha
->flags
.enable_led_scheme
)
4534 ha
->fw_options
[2] |= BIT_12
;
4536 /* Detect ISP6312. */
4538 ha
->fw_options
[2] |= BIT_13
;
4540 /* Set Retry FLOGI in case of P2P connection */
4541 if (ha
->operating_mode
== P2P
) {
4542 ha
->fw_options
[2] |= BIT_3
;
4543 ql_dbg(ql_dbg_disc
, vha
, 0x2100,
4544 "(%s): Setting FLOGI retry BIT in fw_options[2]: 0x%x\n",
4545 __func__
, ha
->fw_options
[2]);
4548 /* Update firmware options. */
4549 qla2x00_set_fw_options(vha
, ha
->fw_options
);
4553 qla24xx_update_fw_options(scsi_qla_host_t
*vha
)
4556 struct qla_hw_data
*ha
= vha
->hw
;
4558 if (IS_P3P_TYPE(ha
))
4561 /* Hold status IOCBs until ABTS response received. */
4563 ha
->fw_options
[3] |= BIT_12
;
4565 /* Set Retry FLOGI in case of P2P connection */
4566 if (ha
->operating_mode
== P2P
) {
4567 ha
->fw_options
[2] |= BIT_3
;
4568 ql_dbg(ql_dbg_disc
, vha
, 0x2101,
4569 "(%s): Setting FLOGI retry BIT in fw_options[2]: 0x%x\n",
4570 __func__
, ha
->fw_options
[2]);
4573 /* Move PUREX, ABTS RX & RIDA to ATIOQ */
4574 if (ql2xmvasynctoatio
&& !ha
->flags
.edif_enabled
&&
4575 (IS_QLA83XX(ha
) || IS_QLA27XX(ha
) || IS_QLA28XX(ha
))) {
4576 if (qla_tgt_mode_enabled(vha
) ||
4577 qla_dual_mode_enabled(vha
))
4578 ha
->fw_options
[2] |= BIT_11
;
4580 ha
->fw_options
[2] &= ~BIT_11
;
4583 if (IS_QLA25XX(ha
) || IS_QLA83XX(ha
) || IS_QLA27XX(ha
) ||
4586 * Tell FW to track each exchange to prevent
4587 * driver from using stale exchange.
4589 if (qla_tgt_mode_enabled(vha
) ||
4590 qla_dual_mode_enabled(vha
))
4591 ha
->fw_options
[2] |= BIT_4
;
4593 ha
->fw_options
[2] &= ~(BIT_4
);
4595 /* Reserve 1/2 of emergency exchanges for ELS.*/
4596 if (qla2xuseresexchforels
)
4597 ha
->fw_options
[2] |= BIT_8
;
4599 ha
->fw_options
[2] &= ~BIT_8
;
4602 * N2N: set Secure=1 for PLOGI ACC and
4603 * fw shal not send PRLI after PLOGI Acc
4605 if (ha
->flags
.edif_enabled
&&
4606 DBELL_ACTIVE(vha
)) {
4607 ha
->fw_options
[3] |= BIT_15
;
4608 ha
->flags
.n2n_fw_acc_sec
= 1;
4610 ha
->fw_options
[3] &= ~BIT_15
;
4611 ha
->flags
.n2n_fw_acc_sec
= 0;
4615 if (ql2xrdpenable
|| ha
->flags
.scm_supported_f
||
4616 ha
->flags
.edif_enabled
)
4617 ha
->fw_options
[1] |= ADD_FO1_ENABLE_PUREX_IOCB
;
4619 /* Enable Async 8130/8131 events -- transceiver insertion/removal */
4620 if (IS_BPM_RANGE_CAPABLE(ha
))
4621 ha
->fw_options
[3] |= BIT_10
;
4623 ql_dbg(ql_dbg_init
, vha
, 0x00e8,
4624 "%s, add FW options 1-3 = 0x%04x 0x%04x 0x%04x mode %x\n",
4625 __func__
, ha
->fw_options
[1], ha
->fw_options
[2],
4626 ha
->fw_options
[3], vha
->host
->active_mode
);
4628 if (ha
->fw_options
[1] || ha
->fw_options
[2] || ha
->fw_options
[3])
4629 qla2x00_set_fw_options(vha
, ha
->fw_options
);
4631 /* Update Serial Link options. */
4632 if ((le16_to_cpu(ha
->fw_seriallink_options24
[0]) & BIT_0
) == 0)
4635 rval
= qla2x00_set_serdes_params(vha
,
4636 le16_to_cpu(ha
->fw_seriallink_options24
[1]),
4637 le16_to_cpu(ha
->fw_seriallink_options24
[2]),
4638 le16_to_cpu(ha
->fw_seriallink_options24
[3]));
4639 if (rval
!= QLA_SUCCESS
) {
4640 ql_log(ql_log_warn
, vha
, 0x0104,
4641 "Unable to update Serial Link options (%x).\n", rval
);
4646 qla2x00_config_rings(struct scsi_qla_host
*vha
)
4648 struct qla_hw_data
*ha
= vha
->hw
;
4649 struct device_reg_2xxx __iomem
*reg
= &ha
->iobase
->isp
;
4650 struct req_que
*req
= ha
->req_q_map
[0];
4651 struct rsp_que
*rsp
= ha
->rsp_q_map
[0];
4653 /* Setup ring parameters in initialization control block. */
4654 ha
->init_cb
->request_q_outpointer
= cpu_to_le16(0);
4655 ha
->init_cb
->response_q_inpointer
= cpu_to_le16(0);
4656 ha
->init_cb
->request_q_length
= cpu_to_le16(req
->length
);
4657 ha
->init_cb
->response_q_length
= cpu_to_le16(rsp
->length
);
4658 put_unaligned_le64(req
->dma
, &ha
->init_cb
->request_q_address
);
4659 put_unaligned_le64(rsp
->dma
, &ha
->init_cb
->response_q_address
);
4661 wrt_reg_word(ISP_REQ_Q_IN(ha
, reg
), 0);
4662 wrt_reg_word(ISP_REQ_Q_OUT(ha
, reg
), 0);
4663 wrt_reg_word(ISP_RSP_Q_IN(ha
, reg
), 0);
4664 wrt_reg_word(ISP_RSP_Q_OUT(ha
, reg
), 0);
4665 rd_reg_word(ISP_RSP_Q_OUT(ha
, reg
)); /* PCI Posting. */
4669 qla24xx_config_rings(struct scsi_qla_host
*vha
)
4671 struct qla_hw_data
*ha
= vha
->hw
;
4672 device_reg_t
*reg
= ISP_QUE_REG(ha
, 0);
4673 struct device_reg_2xxx __iomem
*ioreg
= &ha
->iobase
->isp
;
4674 struct qla_msix_entry
*msix
;
4675 struct init_cb_24xx
*icb
;
4677 struct req_que
*req
= ha
->req_q_map
[0];
4678 struct rsp_que
*rsp
= ha
->rsp_q_map
[0];
4680 /* Setup ring parameters in initialization control block. */
4681 icb
= (struct init_cb_24xx
*)ha
->init_cb
;
4682 icb
->request_q_outpointer
= cpu_to_le16(0);
4683 icb
->response_q_inpointer
= cpu_to_le16(0);
4684 icb
->request_q_length
= cpu_to_le16(req
->length
);
4685 icb
->response_q_length
= cpu_to_le16(rsp
->length
);
4686 put_unaligned_le64(req
->dma
, &icb
->request_q_address
);
4687 put_unaligned_le64(rsp
->dma
, &icb
->response_q_address
);
4689 /* Setup ATIO queue dma pointers for target mode */
4690 icb
->atio_q_inpointer
= cpu_to_le16(0);
4691 icb
->atio_q_length
= cpu_to_le16(ha
->tgt
.atio_q_length
);
4692 put_unaligned_le64(ha
->tgt
.atio_dma
, &icb
->atio_q_address
);
4694 if (IS_SHADOW_REG_CAPABLE(ha
))
4695 icb
->firmware_options_2
|= cpu_to_le32(BIT_30
|BIT_29
);
4697 if (ha
->mqenable
|| IS_QLA83XX(ha
) || IS_QLA27XX(ha
) ||
4699 icb
->qos
= cpu_to_le16(QLA_DEFAULT_QUE_QOS
);
4700 icb
->rid
= cpu_to_le16(rid
);
4701 if (ha
->flags
.msix_enabled
) {
4702 msix
= &ha
->msix_entries
[1];
4703 ql_dbg(ql_dbg_init
, vha
, 0x0019,
4704 "Registering vector 0x%x for base que.\n",
4706 icb
->msix
= cpu_to_le16(msix
->entry
);
4708 /* Use alternate PCI bus number */
4710 icb
->firmware_options_2
|= cpu_to_le32(BIT_19
);
4711 /* Use alternate PCI devfn */
4713 icb
->firmware_options_2
|= cpu_to_le32(BIT_18
);
4715 /* Use Disable MSIX Handshake mode for capable adapters */
4716 if ((ha
->fw_attributes
& BIT_6
) && (IS_MSIX_NACK_CAPABLE(ha
)) &&
4717 (ha
->flags
.msix_enabled
)) {
4718 icb
->firmware_options_2
&= cpu_to_le32(~BIT_22
);
4719 ha
->flags
.disable_msix_handshake
= 1;
4720 ql_dbg(ql_dbg_init
, vha
, 0x00fe,
4721 "MSIX Handshake Disable Mode turned on.\n");
4723 icb
->firmware_options_2
|= cpu_to_le32(BIT_22
);
4725 icb
->firmware_options_2
|= cpu_to_le32(BIT_23
);
4727 wrt_reg_dword(®
->isp25mq
.req_q_in
, 0);
4728 wrt_reg_dword(®
->isp25mq
.req_q_out
, 0);
4729 wrt_reg_dword(®
->isp25mq
.rsp_q_in
, 0);
4730 wrt_reg_dword(®
->isp25mq
.rsp_q_out
, 0);
4732 wrt_reg_dword(®
->isp24
.req_q_in
, 0);
4733 wrt_reg_dword(®
->isp24
.req_q_out
, 0);
4734 wrt_reg_dword(®
->isp24
.rsp_q_in
, 0);
4735 wrt_reg_dword(®
->isp24
.rsp_q_out
, 0);
4738 qlt_24xx_config_rings(vha
);
4740 /* If the user has configured the speed, set it here */
4741 if (ha
->set_data_rate
) {
4742 ql_dbg(ql_dbg_init
, vha
, 0x00fd,
4743 "Speed set by user : %s Gbps \n",
4744 qla2x00_get_link_speed_str(ha
, ha
->set_data_rate
));
4745 icb
->firmware_options_3
= cpu_to_le32(ha
->set_data_rate
<< 13);
4749 rd_reg_word(&ioreg
->hccr
);
4753 * qla2x00_init_rings() - Initializes firmware.
4756 * Beginning of request ring has initialization control block already built
4757 * by nvram config routine.
4759 * Returns 0 on success.
4762 qla2x00_init_rings(scsi_qla_host_t
*vha
)
4765 unsigned long flags
= 0;
4767 struct qla_hw_data
*ha
= vha
->hw
;
4768 struct req_que
*req
;
4769 struct rsp_que
*rsp
;
4770 struct mid_init_cb_24xx
*mid_init_cb
=
4771 (struct mid_init_cb_24xx
*) ha
->init_cb
;
4773 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
4775 /* Clear outstanding commands array. */
4776 for (que
= 0; que
< ha
->max_req_queues
; que
++) {
4777 req
= ha
->req_q_map
[que
];
4778 if (!req
|| !test_bit(que
, ha
->req_qid_map
))
4780 req
->out_ptr
= (uint16_t *)(req
->ring
+ req
->length
);
4782 for (cnt
= 1; cnt
< req
->num_outstanding_cmds
; cnt
++)
4783 req
->outstanding_cmds
[cnt
] = NULL
;
4785 req
->current_outstanding_cmd
= 1;
4787 /* Initialize firmware. */
4788 req
->ring_ptr
= req
->ring
;
4789 req
->ring_index
= 0;
4790 req
->cnt
= req
->length
;
4793 for (que
= 0; que
< ha
->max_rsp_queues
; que
++) {
4794 rsp
= ha
->rsp_q_map
[que
];
4795 if (!rsp
|| !test_bit(que
, ha
->rsp_qid_map
))
4797 rsp
->in_ptr
= (uint16_t *)(rsp
->ring
+ rsp
->length
);
4799 /* Initialize response queue entries */
4801 qlafx00_init_response_q_entries(rsp
);
4803 qla2x00_init_response_q_entries(rsp
);
4806 ha
->tgt
.atio_ring_ptr
= ha
->tgt
.atio_ring
;
4807 ha
->tgt
.atio_ring_index
= 0;
4808 /* Initialize ATIO queue entries */
4809 qlt_init_atio_q_entries(vha
);
4811 ha
->isp_ops
->config_rings(vha
);
4813 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
4815 if (IS_QLAFX00(ha
)) {
4816 rval
= qlafx00_init_firmware(vha
, ha
->init_cb_size
);
4820 /* Update any ISP specific firmware options before initialization. */
4821 ha
->isp_ops
->update_fw_options(vha
);
4823 ql_dbg(ql_dbg_init
, vha
, 0x00d1,
4824 "Issue init firmware FW opt 1-3= %08x %08x %08x.\n",
4825 le32_to_cpu(mid_init_cb
->init_cb
.firmware_options_1
),
4826 le32_to_cpu(mid_init_cb
->init_cb
.firmware_options_2
),
4827 le32_to_cpu(mid_init_cb
->init_cb
.firmware_options_3
));
4829 if (ha
->flags
.npiv_supported
) {
4830 if (ha
->operating_mode
== LOOP
&& !IS_CNA_CAPABLE(ha
))
4831 ha
->max_npiv_vports
= MIN_MULTI_ID_FABRIC
- 1;
4832 mid_init_cb
->count
= cpu_to_le16(ha
->max_npiv_vports
);
4835 if (IS_FWI2_CAPABLE(ha
)) {
4836 mid_init_cb
->options
= cpu_to_le16(BIT_1
);
4837 mid_init_cb
->init_cb
.execution_throttle
=
4838 cpu_to_le16(ha
->cur_fw_xcb_count
);
4839 ha
->flags
.dport_enabled
=
4840 (le32_to_cpu(mid_init_cb
->init_cb
.firmware_options_1
) &
4842 ql_dbg(ql_dbg_init
, vha
, 0x0191, "DPORT Support: %s.\n",
4843 (ha
->flags
.dport_enabled
) ? "enabled" : "disabled");
4844 /* FA-WWPN Status */
4845 ha
->flags
.fawwpn_enabled
=
4846 (le32_to_cpu(mid_init_cb
->init_cb
.firmware_options_1
) &
4848 ql_dbg(ql_dbg_init
, vha
, 0x00bc, "FA-WWPN Support: %s.\n",
4849 (ha
->flags
.fawwpn_enabled
) ? "enabled" : "disabled");
4850 /* Init_cb will be reused for other command(s). Save a backup copy of port_name */
4851 memcpy(ha
->port_name
, ha
->init_cb
->port_name
, WWN_SIZE
);
4854 /* ELS pass through payload is limit by frame size. */
4855 if (ha
->flags
.edif_enabled
)
4856 mid_init_cb
->init_cb
.frame_payload_size
= cpu_to_le16(ELS_MAX_PAYLOAD
);
4859 rval
= qla2x00_init_firmware(vha
, ha
->init_cb_size
);
4863 ql_log(ql_log_fatal
, vha
, 0x00d2,
4864 "Init Firmware **** FAILED ****.\n");
4866 ql_dbg(ql_dbg_init
, vha
, 0x00d3,
4867 "Init Firmware -- success.\n");
4868 vha
->u_ql2xexchoffld
= vha
->u_ql2xiniexchg
= 0;
4875 * qla2x00_fw_ready() - Waits for firmware ready.
4878 * Returns 0 on success.
4881 qla2x00_fw_ready(scsi_qla_host_t
*vha
)
4884 unsigned long wtime
, mtime
, cs84xx_time
;
4885 uint16_t min_wait
; /* Minimum wait time if loop is down */
4886 uint16_t wait_time
; /* Wait time if loop is coming ready */
4888 struct qla_hw_data
*ha
= vha
->hw
;
4890 if (IS_QLAFX00(vha
->hw
))
4891 return qlafx00_fw_ready(vha
);
4893 /* Time to wait for loop down */
4894 if (IS_P3P_TYPE(ha
))
4900 * Firmware should take at most one RATOV to login, plus 5 seconds for
4901 * our own processing.
4903 if ((wait_time
= (ha
->retry_count
*ha
->login_timeout
) + 5) < min_wait
) {
4904 wait_time
= min_wait
;
4907 /* Min wait time if loop down */
4908 mtime
= jiffies
+ (min_wait
* HZ
);
4910 /* wait time before firmware ready */
4911 wtime
= jiffies
+ (wait_time
* HZ
);
4913 /* Wait for ISP to finish LIP */
4914 if (!vha
->flags
.init_done
)
4915 ql_log(ql_log_info
, vha
, 0x801e,
4916 "Waiting for LIP to complete.\n");
4919 memset(state
, -1, sizeof(state
));
4920 rval
= qla2x00_get_firmware_state(vha
, state
);
4921 if (rval
== QLA_SUCCESS
) {
4922 if (state
[0] < FSTATE_LOSS_OF_SYNC
) {
4923 vha
->device_flags
&= ~DFLG_NO_CABLE
;
4925 if (IS_QLA84XX(ha
) && state
[0] != FSTATE_READY
) {
4926 ql_dbg(ql_dbg_taskm
, vha
, 0x801f,
4927 "fw_state=%x 84xx=%x.\n", state
[0],
4929 if ((state
[2] & FSTATE_LOGGED_IN
) &&
4930 (state
[2] & FSTATE_WAITING_FOR_VERIFY
)) {
4931 ql_dbg(ql_dbg_taskm
, vha
, 0x8028,
4932 "Sending verify iocb.\n");
4934 cs84xx_time
= jiffies
;
4935 rval
= qla84xx_init_chip(vha
);
4936 if (rval
!= QLA_SUCCESS
) {
4939 "Init chip failed.\n");
4943 /* Add time taken to initialize. */
4944 cs84xx_time
= jiffies
- cs84xx_time
;
4945 wtime
+= cs84xx_time
;
4946 mtime
+= cs84xx_time
;
4947 ql_dbg(ql_dbg_taskm
, vha
, 0x8008,
4948 "Increasing wait time by %ld. "
4949 "New time %ld.\n", cs84xx_time
,
4952 } else if (state
[0] == FSTATE_READY
) {
4953 ql_dbg(ql_dbg_taskm
, vha
, 0x8037,
4954 "F/W Ready - OK.\n");
4956 qla2x00_get_retry_cnt(vha
, &ha
->retry_count
,
4957 &ha
->login_timeout
, &ha
->r_a_tov
);
4963 rval
= QLA_FUNCTION_FAILED
;
4965 if (atomic_read(&vha
->loop_down_timer
) &&
4966 state
[0] != FSTATE_READY
) {
4967 /* Loop down. Timeout on min_wait for states
4968 * other than Wait for Login.
4970 if (time_after_eq(jiffies
, mtime
)) {
4971 ql_log(ql_log_info
, vha
, 0x8038,
4972 "Cable is unplugged...\n");
4974 vha
->device_flags
|= DFLG_NO_CABLE
;
4979 /* Mailbox cmd failed. Timeout on min_wait. */
4980 if (time_after_eq(jiffies
, mtime
) ||
4981 ha
->flags
.isp82xx_fw_hung
)
4985 if (time_after_eq(jiffies
, wtime
))
4988 /* Delay for a while */
4992 ql_dbg(ql_dbg_taskm
, vha
, 0x803a,
4993 "fw_state=%x (%x, %x, %x, %x %x) curr time=%lx.\n", state
[0],
4994 state
[1], state
[2], state
[3], state
[4], state
[5], jiffies
);
4996 if (rval
&& !(vha
->device_flags
& DFLG_NO_CABLE
)) {
4997 ql_log(ql_log_warn
, vha
, 0x803b,
4998 "Firmware ready **** FAILED ****.\n");
5005 * qla2x00_configure_hba
5006 * Setup adapter context.
5009 * ha = adapter state pointer.
5018 qla2x00_configure_hba(scsi_qla_host_t
*vha
)
5027 char connect_type
[22];
5028 struct qla_hw_data
*ha
= vha
->hw
;
5029 scsi_qla_host_t
*base_vha
= pci_get_drvdata(ha
->pdev
);
5031 unsigned long flags
;
5033 /* Get host addresses. */
5034 rval
= qla2x00_get_adapter_id(vha
,
5035 &loop_id
, &al_pa
, &area
, &domain
, &topo
, &sw_cap
);
5036 if (rval
!= QLA_SUCCESS
) {
5037 if (LOOP_TRANSITION(vha
) || atomic_read(&ha
->loop_down_timer
) ||
5038 IS_CNA_CAPABLE(ha
) ||
5039 (rval
== QLA_COMMAND_ERROR
&& loop_id
== 0x7)) {
5040 ql_dbg(ql_dbg_disc
, vha
, 0x2008,
5041 "Loop is in a transition state.\n");
5043 ql_log(ql_log_warn
, vha
, 0x2009,
5044 "Unable to get host loop ID.\n");
5045 if (IS_FWI2_CAPABLE(ha
) && (vha
== base_vha
) &&
5046 (rval
== QLA_COMMAND_ERROR
&& loop_id
== 0x1b)) {
5047 ql_log(ql_log_warn
, vha
, 0x1151,
5048 "Doing link init.\n");
5049 if (qla24xx_link_initialize(vha
) == QLA_SUCCESS
)
5052 set_bit(ISP_ABORT_NEEDED
, &vha
->dpc_flags
);
5058 ql_log(ql_log_info
, vha
, 0x200a,
5059 "Cannot get topology - retrying.\n");
5060 return (QLA_FUNCTION_FAILED
);
5063 vha
->loop_id
= loop_id
;
5066 ha
->min_external_loopid
= SNS_FIRST_LOOP_ID
;
5067 ha
->operating_mode
= LOOP
;
5071 ql_dbg(ql_dbg_disc
, vha
, 0x200b, "HBA in NL topology.\n");
5073 ha
->current_topology
= ISP_CFG_NL
;
5074 strcpy(connect_type
, "(Loop)");
5078 ql_dbg(ql_dbg_disc
, vha
, 0x200c, "HBA in FL topology.\n");
5079 ha
->switch_cap
= sw_cap
;
5080 ha
->current_topology
= ISP_CFG_FL
;
5081 strcpy(connect_type
, "(FL_Port)");
5085 ql_dbg(ql_dbg_disc
, vha
, 0x200d, "HBA in N P2P topology.\n");
5087 ha
->operating_mode
= P2P
;
5088 ha
->current_topology
= ISP_CFG_N
;
5089 strcpy(connect_type
, "(N_Port-to-N_Port)");
5093 ql_dbg(ql_dbg_disc
, vha
, 0x200e, "HBA in F P2P topology.\n");
5094 ha
->switch_cap
= sw_cap
;
5095 ha
->operating_mode
= P2P
;
5096 ha
->current_topology
= ISP_CFG_F
;
5097 strcpy(connect_type
, "(F_Port)");
5101 ql_dbg(ql_dbg_disc
, vha
, 0x200f,
5102 "HBA in unknown topology %x, using NL.\n", topo
);
5104 ha
->current_topology
= ISP_CFG_NL
;
5105 strcpy(connect_type
, "(Loop)");
5109 /* Save Host port and loop ID. */
5110 /* byte order - Big Endian */
5111 id
.b
.domain
= domain
;
5115 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
5116 if (vha
->hw
->flags
.edif_enabled
) {
5118 qla_update_host_map(vha
, id
);
5119 } else if (!(topo
== 2 && ha
->flags
.n2n_bigger
))
5120 qla_update_host_map(vha
, id
);
5121 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
5123 if (!vha
->flags
.init_done
)
5124 ql_log(ql_log_info
, vha
, 0x2010,
5125 "Topology - %s, Host Loop address 0x%x.\n",
5126 connect_type
, vha
->loop_id
);
5132 qla2x00_set_model_info(scsi_qla_host_t
*vha
, uint8_t *model
, size_t len
,
5137 uint64_t zero
[2] = { 0 };
5138 struct qla_hw_data
*ha
= vha
->hw
;
5139 int use_tbl
= !IS_QLA24XX_TYPE(ha
) && !IS_QLA25XX(ha
) &&
5140 !IS_CNA_CAPABLE(ha
) && !IS_QLA2031(ha
);
5142 if (len
> sizeof(zero
))
5144 if (memcmp(model
, &zero
, len
) != 0) {
5145 memcpy(ha
->model_number
, model
, len
);
5146 st
= en
= ha
->model_number
;
5149 if (*en
!= 0x20 && *en
!= 0x00)
5154 index
= (ha
->pdev
->subsystem_device
& 0xff);
5156 ha
->pdev
->subsystem_vendor
== PCI_VENDOR_ID_QLOGIC
&&
5157 index
< QLA_MODEL_NAMES
)
5158 strscpy(ha
->model_desc
,
5159 qla2x00_model_name
[index
* 2 + 1],
5160 sizeof(ha
->model_desc
));
5162 index
= (ha
->pdev
->subsystem_device
& 0xff);
5164 ha
->pdev
->subsystem_vendor
== PCI_VENDOR_ID_QLOGIC
&&
5165 index
< QLA_MODEL_NAMES
) {
5166 strscpy(ha
->model_number
,
5167 qla2x00_model_name
[index
* 2],
5168 sizeof(ha
->model_number
));
5169 strscpy(ha
->model_desc
,
5170 qla2x00_model_name
[index
* 2 + 1],
5171 sizeof(ha
->model_desc
));
5173 strscpy(ha
->model_number
, def
,
5174 sizeof(ha
->model_number
));
5177 if (IS_FWI2_CAPABLE(ha
))
5178 qla2xxx_get_vpd_field(vha
, "\x82", ha
->model_desc
,
5179 sizeof(ha
->model_desc
));
5182 /* On sparc systems, obtain port and node WWN from firmware
5185 static void qla2xxx_nvram_wwn_from_ofw(scsi_qla_host_t
*vha
, nvram_t
*nv
)
5188 struct qla_hw_data
*ha
= vha
->hw
;
5189 struct pci_dev
*pdev
= ha
->pdev
;
5190 struct device_node
*dp
= pci_device_to_OF_node(pdev
);
5194 val
= of_get_property(dp
, "port-wwn", &len
);
5195 if (val
&& len
>= WWN_SIZE
)
5196 memcpy(nv
->port_name
, val
, WWN_SIZE
);
5198 val
= of_get_property(dp
, "node-wwn", &len
);
5199 if (val
&& len
>= WWN_SIZE
)
5200 memcpy(nv
->node_name
, val
, WWN_SIZE
);
5205 * NVRAM configuration for ISP 2xxx
5208 * ha = adapter block pointer.
5211 * initialization control block in response_ring
5212 * host adapters parameters in host adapter block
5218 qla2x00_nvram_config(scsi_qla_host_t
*vha
)
5223 uint8_t *dptr1
, *dptr2
;
5224 struct qla_hw_data
*ha
= vha
->hw
;
5225 init_cb_t
*icb
= ha
->init_cb
;
5226 nvram_t
*nv
= ha
->nvram
;
5227 uint8_t *ptr
= ha
->nvram
;
5228 struct device_reg_2xxx __iomem
*reg
= &ha
->iobase
->isp
;
5232 /* Determine NVRAM starting address. */
5233 ha
->nvram_size
= sizeof(*nv
);
5235 if (!IS_QLA2100(ha
) && !IS_QLA2200(ha
) && !IS_QLA2300(ha
))
5236 if ((rd_reg_word(®
->ctrl_status
) >> 14) == 1)
5237 ha
->nvram_base
= 0x80;
5239 /* Get NVRAM data and calculate checksum. */
5240 ha
->isp_ops
->read_nvram(vha
, ptr
, ha
->nvram_base
, ha
->nvram_size
);
5241 for (cnt
= 0, chksum
= 0; cnt
< ha
->nvram_size
; cnt
++)
5244 ql_dbg(ql_dbg_init
+ ql_dbg_buffer
, vha
, 0x010f,
5245 "Contents of NVRAM.\n");
5246 ql_dump_buffer(ql_dbg_init
+ ql_dbg_buffer
, vha
, 0x0110,
5247 nv
, ha
->nvram_size
);
5249 /* Bad NVRAM data, set defaults parameters. */
5250 if (chksum
|| memcmp("ISP ", nv
->id
, sizeof(nv
->id
)) ||
5251 nv
->nvram_version
< 1) {
5252 /* Reset NVRAM data. */
5253 ql_log(ql_log_warn
, vha
, 0x0064,
5254 "Inconsistent NVRAM detected: checksum=%#x id=%.4s version=%#x.\n",
5255 chksum
, nv
->id
, nv
->nvram_version
);
5256 ql_log(ql_log_warn
, vha
, 0x0065,
5258 "functioning (yet invalid -- WWPN) defaults.\n");
5261 * Set default initialization control block.
5263 memset(nv
, 0, ha
->nvram_size
);
5264 nv
->parameter_block_version
= ICB_VERSION
;
5266 if (IS_QLA23XX(ha
)) {
5267 nv
->firmware_options
[0] = BIT_2
| BIT_1
;
5268 nv
->firmware_options
[1] = BIT_7
| BIT_5
;
5269 nv
->add_firmware_options
[0] = BIT_5
;
5270 nv
->add_firmware_options
[1] = BIT_5
| BIT_4
;
5271 nv
->frame_payload_size
= cpu_to_le16(2048);
5272 nv
->special_options
[1] = BIT_7
;
5273 } else if (IS_QLA2200(ha
)) {
5274 nv
->firmware_options
[0] = BIT_2
| BIT_1
;
5275 nv
->firmware_options
[1] = BIT_7
| BIT_5
;
5276 nv
->add_firmware_options
[0] = BIT_5
;
5277 nv
->add_firmware_options
[1] = BIT_5
| BIT_4
;
5278 nv
->frame_payload_size
= cpu_to_le16(1024);
5279 } else if (IS_QLA2100(ha
)) {
5280 nv
->firmware_options
[0] = BIT_3
| BIT_1
;
5281 nv
->firmware_options
[1] = BIT_5
;
5282 nv
->frame_payload_size
= cpu_to_le16(1024);
5285 nv
->max_iocb_allocation
= cpu_to_le16(256);
5286 nv
->execution_throttle
= cpu_to_le16(16);
5287 nv
->retry_count
= 8;
5288 nv
->retry_delay
= 1;
5290 nv
->port_name
[0] = 33;
5291 nv
->port_name
[3] = 224;
5292 nv
->port_name
[4] = 139;
5294 qla2xxx_nvram_wwn_from_ofw(vha
, nv
);
5296 nv
->login_timeout
= 4;
5299 * Set default host adapter parameters
5301 nv
->host_p
[1] = BIT_2
;
5302 nv
->reset_delay
= 5;
5303 nv
->port_down_retry_count
= 8;
5304 nv
->max_luns_per_target
= cpu_to_le16(8);
5305 nv
->link_down_timeout
= 60;
5310 /* Reset Initialization control block */
5311 memset(icb
, 0, ha
->init_cb_size
);
5314 * Setup driver NVRAM options.
5316 nv
->firmware_options
[0] |= (BIT_6
| BIT_1
);
5317 nv
->firmware_options
[0] &= ~(BIT_5
| BIT_4
);
5318 nv
->firmware_options
[1] |= (BIT_5
| BIT_0
);
5319 nv
->firmware_options
[1] &= ~BIT_4
;
5321 if (IS_QLA23XX(ha
)) {
5322 nv
->firmware_options
[0] |= BIT_2
;
5323 nv
->firmware_options
[0] &= ~BIT_3
;
5324 nv
->special_options
[0] &= ~BIT_6
;
5325 nv
->add_firmware_options
[1] |= BIT_5
| BIT_4
;
5327 if (IS_QLA2300(ha
)) {
5328 if (ha
->fb_rev
== FPM_2310
) {
5329 strcpy(ha
->model_number
, "QLA2310");
5331 strcpy(ha
->model_number
, "QLA2300");
5334 qla2x00_set_model_info(vha
, nv
->model_number
,
5335 sizeof(nv
->model_number
), "QLA23xx");
5337 } else if (IS_QLA2200(ha
)) {
5338 nv
->firmware_options
[0] |= BIT_2
;
5340 * 'Point-to-point preferred, else loop' is not a safe
5341 * connection mode setting.
5343 if ((nv
->add_firmware_options
[0] & (BIT_6
| BIT_5
| BIT_4
)) ==
5345 /* Force 'loop preferred, else point-to-point'. */
5346 nv
->add_firmware_options
[0] &= ~(BIT_6
| BIT_5
| BIT_4
);
5347 nv
->add_firmware_options
[0] |= BIT_5
;
5349 strcpy(ha
->model_number
, "QLA22xx");
5350 } else /*if (IS_QLA2100(ha))*/ {
5351 strcpy(ha
->model_number
, "QLA2100");
5355 * Copy over NVRAM RISC parameter block to initialization control block.
5357 dptr1
= (uint8_t *)icb
;
5358 dptr2
= (uint8_t *)&nv
->parameter_block_version
;
5359 cnt
= (uint8_t *)&icb
->request_q_outpointer
- (uint8_t *)&icb
->version
;
5361 *dptr1
++ = *dptr2
++;
5363 /* Copy 2nd half. */
5364 dptr1
= (uint8_t *)icb
->add_firmware_options
;
5365 cnt
= (uint8_t *)icb
->reserved_3
- (uint8_t *)icb
->add_firmware_options
;
5367 *dptr1
++ = *dptr2
++;
5368 ha
->frame_payload_size
= le16_to_cpu(icb
->frame_payload_size
);
5369 /* Use alternate WWN? */
5370 if (nv
->host_p
[1] & BIT_7
) {
5371 memcpy(icb
->node_name
, nv
->alternate_node_name
, WWN_SIZE
);
5372 memcpy(icb
->port_name
, nv
->alternate_port_name
, WWN_SIZE
);
5375 /* Prepare nodename */
5376 if ((icb
->firmware_options
[1] & BIT_6
) == 0) {
5378 * Firmware will apply the following mask if the nodename was
5381 memcpy(icb
->node_name
, icb
->port_name
, WWN_SIZE
);
5382 icb
->node_name
[0] &= 0xF0;
5386 * Set host adapter parameters.
5390 * BIT_7 in the host-parameters section allows for modification to
5391 * internal driver logging.
5393 if (nv
->host_p
[0] & BIT_7
)
5394 ql2xextended_error_logging
= QL_DBG_DEFAULT1_MASK
;
5395 ha
->flags
.disable_risc_code_load
= ((nv
->host_p
[0] & BIT_4
) ? 1 : 0);
5396 /* Always load RISC code on non ISP2[12]00 chips. */
5397 if (!IS_QLA2100(ha
) && !IS_QLA2200(ha
))
5398 ha
->flags
.disable_risc_code_load
= 0;
5399 ha
->flags
.enable_lip_reset
= ((nv
->host_p
[1] & BIT_1
) ? 1 : 0);
5400 ha
->flags
.enable_lip_full_login
= ((nv
->host_p
[1] & BIT_2
) ? 1 : 0);
5401 ha
->flags
.enable_target_reset
= ((nv
->host_p
[1] & BIT_3
) ? 1 : 0);
5402 ha
->flags
.enable_led_scheme
= (nv
->special_options
[1] & BIT_4
) ? 1 : 0;
5403 ha
->flags
.disable_serdes
= 0;
5405 ha
->operating_mode
=
5406 (icb
->add_firmware_options
[0] & (BIT_6
| BIT_5
| BIT_4
)) >> 4;
5408 memcpy(ha
->fw_seriallink_options
, nv
->seriallink_options
,
5409 sizeof(ha
->fw_seriallink_options
));
5411 /* save HBA serial number */
5412 ha
->serial0
= icb
->port_name
[5];
5413 ha
->serial1
= icb
->port_name
[6];
5414 ha
->serial2
= icb
->port_name
[7];
5415 memcpy(vha
->node_name
, icb
->node_name
, WWN_SIZE
);
5416 memcpy(vha
->port_name
, icb
->port_name
, WWN_SIZE
);
5418 icb
->execution_throttle
= cpu_to_le16(0xFFFF);
5420 ha
->retry_count
= nv
->retry_count
;
5422 /* Set minimum login_timeout to 4 seconds. */
5423 if (nv
->login_timeout
!= ql2xlogintimeout
)
5424 nv
->login_timeout
= ql2xlogintimeout
;
5425 if (nv
->login_timeout
< 4)
5426 nv
->login_timeout
= 4;
5427 ha
->login_timeout
= nv
->login_timeout
;
5429 /* Set minimum RATOV to 100 tenths of a second. */
5432 ha
->loop_reset_delay
= nv
->reset_delay
;
5434 /* Link Down Timeout = 0:
5436 * When Port Down timer expires we will start returning
5437 * I/O's to OS with "DID_NO_CONNECT".
5439 * Link Down Timeout != 0:
5441 * The driver waits for the link to come up after link down
5442 * before returning I/Os to OS with "DID_NO_CONNECT".
5444 if (nv
->link_down_timeout
== 0) {
5445 ha
->loop_down_abort_time
=
5446 (LOOP_DOWN_TIME
- LOOP_DOWN_TIMEOUT
);
5448 ha
->link_down_timeout
= nv
->link_down_timeout
;
5449 ha
->loop_down_abort_time
=
5450 (LOOP_DOWN_TIME
- ha
->link_down_timeout
);
5454 * Need enough time to try and get the port back.
5456 ha
->port_down_retry_count
= nv
->port_down_retry_count
;
5457 if (qlport_down_retry
)
5458 ha
->port_down_retry_count
= qlport_down_retry
;
5459 /* Set login_retry_count */
5460 ha
->login_retry_count
= nv
->retry_count
;
5461 if (ha
->port_down_retry_count
== nv
->port_down_retry_count
&&
5462 ha
->port_down_retry_count
> 3)
5463 ha
->login_retry_count
= ha
->port_down_retry_count
;
5464 else if (ha
->port_down_retry_count
> (int)ha
->login_retry_count
)
5465 ha
->login_retry_count
= ha
->port_down_retry_count
;
5466 if (ql2xloginretrycount
)
5467 ha
->login_retry_count
= ql2xloginretrycount
;
5469 icb
->lun_enables
= cpu_to_le16(0);
5470 icb
->command_resource_count
= 0;
5471 icb
->immediate_notify_resource_count
= 0;
5472 icb
->timeout
= cpu_to_le16(0);
5474 if (IS_QLA2100(ha
) || IS_QLA2200(ha
)) {
5476 icb
->firmware_options
[0] &= ~BIT_3
;
5477 icb
->add_firmware_options
[0] &=
5478 ~(BIT_3
| BIT_2
| BIT_1
| BIT_0
);
5479 icb
->add_firmware_options
[0] |= BIT_2
;
5480 icb
->response_accumulation_timer
= 3;
5481 icb
->interrupt_delay_timer
= 5;
5483 vha
->flags
.process_response_queue
= 1;
5486 if (!vha
->flags
.init_done
) {
5487 ha
->zio_mode
= icb
->add_firmware_options
[0] &
5488 (BIT_3
| BIT_2
| BIT_1
| BIT_0
);
5489 ha
->zio_timer
= icb
->interrupt_delay_timer
?
5490 icb
->interrupt_delay_timer
: 2;
5492 icb
->add_firmware_options
[0] &=
5493 ~(BIT_3
| BIT_2
| BIT_1
| BIT_0
);
5494 vha
->flags
.process_response_queue
= 0;
5495 if (ha
->zio_mode
!= QLA_ZIO_DISABLED
) {
5496 ha
->zio_mode
= QLA_ZIO_MODE_6
;
5498 ql_log(ql_log_info
, vha
, 0x0068,
5499 "ZIO mode %d enabled; timer delay (%d us).\n",
5500 ha
->zio_mode
, ha
->zio_timer
* 100);
5502 icb
->add_firmware_options
[0] |= (uint8_t)ha
->zio_mode
;
5503 icb
->interrupt_delay_timer
= (uint8_t)ha
->zio_timer
;
5504 vha
->flags
.process_response_queue
= 1;
5509 ql_log(ql_log_warn
, vha
, 0x0069,
5510 "NVRAM configuration failed.\n");
5515 void qla2x00_set_fcport_state(fc_port_t
*fcport
, int state
)
5519 old_state
= atomic_read(&fcport
->state
);
5520 atomic_set(&fcport
->state
, state
);
5522 /* Don't print state transitions during initial allocation of fcport */
5523 if (old_state
&& old_state
!= state
) {
5524 ql_dbg(ql_dbg_disc
, fcport
->vha
, 0x207d,
5525 "FCPort %8phC state transitioned from %s to %s - portid=%02x%02x%02x.\n",
5526 fcport
->port_name
, port_state_str
[old_state
],
5527 port_state_str
[state
], fcport
->d_id
.b
.domain
,
5528 fcport
->d_id
.b
.area
, fcport
->d_id
.b
.al_pa
);
5533 * qla2x00_alloc_fcport() - Allocate a generic fcport.
5535 * @flags: allocation flags
5537 * Returns a pointer to the allocated fcport, or NULL, if none available.
5540 qla2x00_alloc_fcport(scsi_qla_host_t
*vha
, gfp_t flags
)
5544 fcport
= kzalloc(sizeof(fc_port_t
), flags
);
5548 fcport
->ct_desc
.ct_sns
= dma_alloc_coherent(&vha
->hw
->pdev
->dev
,
5549 sizeof(struct ct_sns_pkt
), &fcport
->ct_desc
.ct_sns_dma
,
5551 if (!fcport
->ct_desc
.ct_sns
) {
5552 ql_log(ql_log_warn
, vha
, 0xd049,
5553 "Failed to allocate ct_sns request.\n");
5558 /* Setup fcport template structure. */
5560 fcport
->port_type
= FCT_UNKNOWN
;
5561 fcport
->loop_id
= FC_NO_LOOP_ID
;
5562 qla2x00_set_fcport_state(fcport
, FCS_UNCONFIGURED
);
5563 fcport
->supported_classes
= FC_COS_UNSPECIFIED
;
5564 fcport
->fp_speed
= PORT_SPEED_UNKNOWN
;
5566 fcport
->disc_state
= DSC_DELETED
;
5567 fcport
->fw_login_state
= DSC_LS_PORT_UNAVAIL
;
5568 fcport
->deleted
= QLA_SESS_DELETED
;
5569 fcport
->login_retry
= vha
->hw
->login_retry_count
;
5570 fcport
->chip_reset
= vha
->hw
->base_qpair
->chip_reset
;
5571 fcport
->logout_on_delete
= 1;
5572 fcport
->tgt_link_down_time
= QLA2XX_MAX_LINK_DOWN_TIME
;
5573 fcport
->tgt_short_link_down_cnt
= 0;
5574 fcport
->dev_loss_tmo
= 0;
5576 if (!fcport
->ct_desc
.ct_sns
) {
5577 ql_log(ql_log_warn
, vha
, 0xd049,
5578 "Failed to allocate ct_sns request.\n");
5583 INIT_WORK(&fcport
->del_work
, qla24xx_delete_sess_fn
);
5584 INIT_WORK(&fcport
->free_work
, qlt_free_session_done
);
5585 INIT_WORK(&fcport
->reg_work
, qla_register_fcport_fn
);
5586 INIT_LIST_HEAD(&fcport
->gnl_entry
);
5587 INIT_LIST_HEAD(&fcport
->list
);
5588 INIT_LIST_HEAD(&fcport
->unsol_ctx_head
);
5590 INIT_LIST_HEAD(&fcport
->sess_cmd_list
);
5591 spin_lock_init(&fcport
->sess_cmd_lock
);
5593 spin_lock_init(&fcport
->edif
.sa_list_lock
);
5594 INIT_LIST_HEAD(&fcport
->edif
.tx_sa_list
);
5595 INIT_LIST_HEAD(&fcport
->edif
.rx_sa_list
);
5597 spin_lock_init(&fcport
->edif
.indx_list_lock
);
5598 INIT_LIST_HEAD(&fcport
->edif
.edif_indx_list
);
5604 qla2x00_free_fcport(fc_port_t
*fcport
)
5606 if (fcport
->ct_desc
.ct_sns
) {
5607 dma_free_coherent(&fcport
->vha
->hw
->pdev
->dev
,
5608 sizeof(struct ct_sns_pkt
), fcport
->ct_desc
.ct_sns
,
5609 fcport
->ct_desc
.ct_sns_dma
);
5611 fcport
->ct_desc
.ct_sns
= NULL
;
5614 qla_edif_flush_sa_ctl_lists(fcport
);
5615 list_del(&fcport
->list
);
5616 qla2x00_clear_loop_id(fcport
);
5618 qla_edif_list_del(fcport
);
5623 static void qla_get_login_template(scsi_qla_host_t
*vha
)
5625 struct qla_hw_data
*ha
= vha
->hw
;
5630 memset(ha
->init_cb
, 0, ha
->init_cb_size
);
5631 sz
= min_t(int, sizeof(struct fc_els_flogi
), ha
->init_cb_size
);
5632 rval
= qla24xx_get_port_login_templ(vha
, ha
->init_cb_dma
,
5634 if (rval
!= QLA_SUCCESS
) {
5635 ql_dbg(ql_dbg_init
, vha
, 0x00d1,
5636 "PLOGI ELS param read fail.\n");
5639 q
= (__be32
*)&ha
->plogi_els_payld
.fl_csp
;
5641 bp
= (uint32_t *)ha
->init_cb
;
5642 cpu_to_be32_array(q
, bp
, sz
/ 4);
5643 ha
->flags
.plogi_template_valid
= 1;
5647 * qla2x00_configure_loop
5648 * Updates Fibre Channel Device Database with what is actually on loop.
5651 * ha = adapter block pointer.
5656 * 2 = database was full and device was not configured.
5659 qla2x00_configure_loop(scsi_qla_host_t
*vha
)
5662 unsigned long flags
, save_flags
;
5663 struct qla_hw_data
*ha
= vha
->hw
;
5667 /* Get Initiator ID */
5668 if (test_bit(LOCAL_LOOP_UPDATE
, &vha
->dpc_flags
)) {
5669 rval
= qla2x00_configure_hba(vha
);
5670 if (rval
!= QLA_SUCCESS
) {
5671 ql_dbg(ql_dbg_disc
, vha
, 0x2013,
5672 "Unable to configure HBA.\n");
5677 save_flags
= flags
= vha
->dpc_flags
;
5678 ql_dbg(ql_dbg_disc
, vha
, 0x2014,
5679 "Configure loop -- dpc flags = 0x%lx.\n", flags
);
5682 * If we have both an RSCN and PORT UPDATE pending then handle them
5683 * both at the same time.
5685 clear_bit(LOCAL_LOOP_UPDATE
, &vha
->dpc_flags
);
5686 clear_bit(RSCN_UPDATE
, &vha
->dpc_flags
);
5688 qla2x00_get_data_rate(vha
);
5689 qla_get_login_template(vha
);
5691 /* Determine what we need to do */
5692 if ((ha
->current_topology
== ISP_CFG_FL
||
5693 ha
->current_topology
== ISP_CFG_F
) &&
5694 (test_bit(LOCAL_LOOP_UPDATE
, &flags
))) {
5696 set_bit(RSCN_UPDATE
, &flags
);
5697 clear_bit(LOCAL_LOOP_UPDATE
, &flags
);
5699 } else if (ha
->current_topology
== ISP_CFG_NL
||
5700 ha
->current_topology
== ISP_CFG_N
) {
5701 clear_bit(RSCN_UPDATE
, &flags
);
5702 set_bit(LOCAL_LOOP_UPDATE
, &flags
);
5703 } else if (!vha
->flags
.online
||
5704 (test_bit(ABORT_ISP_ACTIVE
, &flags
))) {
5705 set_bit(RSCN_UPDATE
, &flags
);
5706 set_bit(LOCAL_LOOP_UPDATE
, &flags
);
5709 if (test_bit(LOCAL_LOOP_UPDATE
, &flags
)) {
5710 if (test_bit(LOOP_RESYNC_NEEDED
, &vha
->dpc_flags
)) {
5711 ql_dbg(ql_dbg_disc
, vha
, 0x2015,
5712 "Loop resync needed, failing.\n");
5713 rval
= QLA_FUNCTION_FAILED
;
5715 rval
= qla2x00_configure_local_loop(vha
);
5718 if (rval
== QLA_SUCCESS
&& test_bit(RSCN_UPDATE
, &flags
)) {
5719 if (LOOP_TRANSITION(vha
)) {
5720 ql_dbg(ql_dbg_disc
, vha
, 0x2099,
5721 "Needs RSCN update and loop transition.\n");
5722 rval
= QLA_FUNCTION_FAILED
;
5725 rval
= qla2x00_configure_fabric(vha
);
5728 if (rval
== QLA_SUCCESS
) {
5729 if (atomic_read(&vha
->loop_down_timer
) ||
5730 test_bit(LOOP_RESYNC_NEEDED
, &vha
->dpc_flags
)) {
5731 rval
= QLA_FUNCTION_FAILED
;
5733 atomic_set(&vha
->loop_state
, LOOP_READY
);
5734 ql_dbg(ql_dbg_disc
, vha
, 0x2069,
5736 ha
->flags
.fw_init_done
= 1;
5739 * use link up to wake up app to get ready for
5742 if (ha
->flags
.edif_enabled
&& DBELL_INACTIVE(vha
))
5743 qla2x00_post_aen_work(vha
, FCH_EVT_LINKUP
,
5744 ha
->link_data_rate
);
5747 * Process any ATIO queue entries that came in
5748 * while we weren't online.
5750 if (qla_tgt_mode_enabled(vha
) ||
5751 qla_dual_mode_enabled(vha
)) {
5752 spin_lock_irqsave(&ha
->tgt
.atio_lock
, flags
);
5753 qlt_24xx_process_atio_queue(vha
, 0);
5754 spin_unlock_irqrestore(&ha
->tgt
.atio_lock
,
5761 ql_dbg(ql_dbg_disc
, vha
, 0x206a,
5762 "%s *** FAILED ***.\n", __func__
);
5764 ql_dbg(ql_dbg_disc
, vha
, 0x206b,
5765 "%s: exiting normally. local port wwpn %8phN id %06x)\n",
5766 __func__
, vha
->port_name
, vha
->d_id
.b24
);
5769 /* Restore state if a resync event occurred during processing */
5770 if (test_bit(LOOP_RESYNC_NEEDED
, &vha
->dpc_flags
)) {
5771 if (test_bit(LOCAL_LOOP_UPDATE
, &save_flags
))
5772 set_bit(LOCAL_LOOP_UPDATE
, &vha
->dpc_flags
);
5773 if (test_bit(RSCN_UPDATE
, &save_flags
)) {
5774 set_bit(RSCN_UPDATE
, &vha
->dpc_flags
);
5781 static int qla2x00_configure_n2n_loop(scsi_qla_host_t
*vha
)
5783 unsigned long flags
;
5786 ql_dbg(ql_dbg_disc
, vha
, 0x206a, "%s %d.\n", __func__
, __LINE__
);
5788 if (test_and_clear_bit(N2N_LOGIN_NEEDED
, &vha
->dpc_flags
))
5789 set_bit(RELOGIN_NEEDED
, &vha
->dpc_flags
);
5791 list_for_each_entry(fcport
, &vha
->vp_fcports
, list
) {
5792 if (fcport
->n2n_flag
) {
5793 qla24xx_fcport_handle_login(vha
, fcport
);
5798 spin_lock_irqsave(&vha
->work_lock
, flags
);
5799 vha
->scan
.scan_retry
++;
5800 spin_unlock_irqrestore(&vha
->work_lock
, flags
);
5802 if (vha
->scan
.scan_retry
< MAX_SCAN_RETRIES
) {
5803 set_bit(LOCAL_LOOP_UPDATE
, &vha
->dpc_flags
);
5804 set_bit(LOOP_RESYNC_NEEDED
, &vha
->dpc_flags
);
5806 return QLA_FUNCTION_FAILED
;
5810 qla_reinitialize_link(scsi_qla_host_t
*vha
)
5814 atomic_set(&vha
->loop_state
, LOOP_DOWN
);
5815 atomic_set(&vha
->loop_down_timer
, LOOP_DOWN_TIME
);
5816 rval
= qla2x00_full_login_lip(vha
);
5817 if (rval
== QLA_SUCCESS
) {
5818 ql_dbg(ql_dbg_disc
, vha
, 0xd050, "Link reinitialized\n");
5820 ql_dbg(ql_dbg_disc
, vha
, 0xd051,
5821 "Link reinitialization failed (%d)\n", rval
);
5826 * qla2x00_configure_local_loop
5827 * Updates Fibre Channel Device Database with local loop devices.
5830 * ha = adapter block pointer.
5836 qla2x00_configure_local_loop(scsi_qla_host_t
*vha
)
5840 fc_port_t
*fcport
, *new_fcport
;
5843 struct gid_list_info
*gid
;
5845 uint8_t domain
, area
, al_pa
;
5846 struct qla_hw_data
*ha
= vha
->hw
;
5847 unsigned long flags
;
5849 /* Inititae N2N login. */
5851 return qla2x00_configure_n2n_loop(vha
);
5854 entries
= MAX_FIBRE_DEVICES_LOOP
;
5856 /* Get list of logged in devices. */
5857 memset(ha
->gid_list
, 0, qla2x00_gid_list_size(ha
));
5858 rval
= qla2x00_get_id_list(vha
, ha
->gid_list
, ha
->gid_list_dma
,
5860 if (rval
!= QLA_SUCCESS
)
5863 ql_dbg(ql_dbg_disc
, vha
, 0x2011,
5864 "Entries in ID list (%d).\n", entries
);
5865 ql_dump_buffer(ql_dbg_disc
+ ql_dbg_buffer
, vha
, 0x2075,
5866 ha
->gid_list
, entries
* sizeof(*ha
->gid_list
));
5869 spin_lock_irqsave(&vha
->work_lock
, flags
);
5870 vha
->scan
.scan_retry
++;
5871 spin_unlock_irqrestore(&vha
->work_lock
, flags
);
5873 if (vha
->scan
.scan_retry
< MAX_SCAN_RETRIES
) {
5874 u8 loop_map_entries
= 0;
5877 rc
= qla2x00_get_fcal_position_map(vha
, NULL
,
5879 if (rc
== QLA_SUCCESS
&& loop_map_entries
> 1) {
5881 * There are devices that are still not logged
5882 * in. Reinitialize to give them a chance.
5884 qla_reinitialize_link(vha
);
5885 return QLA_FUNCTION_FAILED
;
5887 set_bit(LOCAL_LOOP_UPDATE
, &vha
->dpc_flags
);
5888 set_bit(LOOP_RESYNC_NEEDED
, &vha
->dpc_flags
);
5891 vha
->scan
.scan_retry
= 0;
5894 list_for_each_entry(fcport
, &vha
->vp_fcports
, list
) {
5895 fcport
->scan_state
= QLA_FCPORT_SCAN
;
5898 /* Allocate temporary fcport for any new fcports discovered. */
5899 new_fcport
= qla2x00_alloc_fcport(vha
, GFP_KERNEL
);
5900 if (new_fcport
== NULL
) {
5901 ql_log(ql_log_warn
, vha
, 0x2012,
5902 "Memory allocation failed for fcport.\n");
5903 rval
= QLA_MEMORY_ALLOC_FAILED
;
5906 new_fcport
->flags
&= ~FCF_FABRIC_DEVICE
;
5908 /* Add devices to port list. */
5910 for (index
= 0; index
< entries
; index
++) {
5911 domain
= gid
->domain
;
5914 if (IS_QLA2100(ha
) || IS_QLA2200(ha
))
5915 loop_id
= gid
->loop_id_2100
;
5917 loop_id
= le16_to_cpu(gid
->loop_id
);
5918 gid
= (void *)gid
+ ha
->gid_list_info_size
;
5920 /* Bypass reserved domain fields. */
5921 if ((domain
& 0xf0) == 0xf0)
5924 /* Bypass if not same domain and area of adapter. */
5925 if (area
&& domain
&& ((area
!= vha
->d_id
.b
.area
) ||
5926 (domain
!= vha
->d_id
.b
.domain
)) &&
5927 (ha
->current_topology
== ISP_CFG_NL
))
5931 /* Bypass invalid local loop ID. */
5932 if (loop_id
> LAST_LOCAL_LOOP_ID
)
5935 memset(new_fcport
->port_name
, 0, WWN_SIZE
);
5937 /* Fill in member data. */
5938 new_fcport
->d_id
.b
.domain
= domain
;
5939 new_fcport
->d_id
.b
.area
= area
;
5940 new_fcport
->d_id
.b
.al_pa
= al_pa
;
5941 new_fcport
->loop_id
= loop_id
;
5942 new_fcport
->scan_state
= QLA_FCPORT_FOUND
;
5944 rval2
= qla2x00_get_port_database(vha
, new_fcport
, 0);
5945 if (rval2
!= QLA_SUCCESS
) {
5946 ql_dbg(ql_dbg_disc
, vha
, 0x2097,
5947 "Failed to retrieve fcport information "
5948 "-- get_port_database=%x, loop_id=0x%04x.\n",
5949 rval2
, new_fcport
->loop_id
);
5950 /* Skip retry if N2N */
5951 if (ha
->current_topology
!= ISP_CFG_N
) {
5952 ql_dbg(ql_dbg_disc
, vha
, 0x2105,
5953 "Scheduling resync.\n");
5954 set_bit(LOOP_RESYNC_NEEDED
, &vha
->dpc_flags
);
5959 spin_lock_irqsave(&vha
->hw
->tgt
.sess_lock
, flags
);
5960 /* Check for matching device in port list. */
5963 list_for_each_entry(fcport
, &vha
->vp_fcports
, list
) {
5964 if (memcmp(new_fcport
->port_name
, fcport
->port_name
,
5968 fcport
->flags
&= ~FCF_FABRIC_DEVICE
;
5969 fcport
->loop_id
= new_fcport
->loop_id
;
5970 fcport
->port_type
= new_fcport
->port_type
;
5971 fcport
->d_id
.b24
= new_fcport
->d_id
.b24
;
5972 memcpy(fcport
->node_name
, new_fcport
->node_name
,
5974 fcport
->scan_state
= QLA_FCPORT_FOUND
;
5975 if (fcport
->login_retry
== 0) {
5976 fcport
->login_retry
= vha
->hw
->login_retry_count
;
5977 ql_dbg(ql_dbg_disc
, vha
, 0x2135,
5978 "Port login retry %8phN, lid 0x%04x retry cnt=%d.\n",
5979 fcport
->port_name
, fcport
->loop_id
,
5980 fcport
->login_retry
);
5987 /* New device, add to fcports list. */
5988 list_add_tail(&new_fcport
->list
, &vha
->vp_fcports
);
5990 /* Allocate a new replacement fcport. */
5991 fcport
= new_fcport
;
5993 spin_unlock_irqrestore(&vha
->hw
->tgt
.sess_lock
, flags
);
5995 new_fcport
= qla2x00_alloc_fcport(vha
, GFP_KERNEL
);
5997 if (new_fcport
== NULL
) {
5998 ql_log(ql_log_warn
, vha
, 0xd031,
5999 "Failed to allocate memory for fcport.\n");
6000 rval
= QLA_MEMORY_ALLOC_FAILED
;
6003 spin_lock_irqsave(&vha
->hw
->tgt
.sess_lock
, flags
);
6004 new_fcport
->flags
&= ~FCF_FABRIC_DEVICE
;
6007 spin_unlock_irqrestore(&vha
->hw
->tgt
.sess_lock
, flags
);
6009 /* Base iIDMA settings on HBA port speed. */
6010 fcport
->fp_speed
= ha
->link_data_rate
;
6013 list_for_each_entry(fcport
, &vha
->vp_fcports
, list
) {
6014 if (test_bit(LOOP_RESYNC_NEEDED
, &vha
->dpc_flags
))
6017 if (fcport
->scan_state
== QLA_FCPORT_SCAN
) {
6018 if ((qla_dual_mode_enabled(vha
) ||
6019 qla_ini_mode_enabled(vha
)) &&
6020 atomic_read(&fcport
->state
) == FCS_ONLINE
) {
6021 qla2x00_mark_device_lost(vha
, fcport
,
6022 ql2xplogiabsentdevice
);
6023 if (fcport
->loop_id
!= FC_NO_LOOP_ID
&&
6024 (fcport
->flags
& FCF_FCP2_DEVICE
) == 0 &&
6025 fcport
->port_type
!= FCT_INITIATOR
&&
6026 fcport
->port_type
!= FCT_BROADCAST
) {
6027 ql_dbg(ql_dbg_disc
, vha
, 0x20f0,
6028 "%s %d %8phC post del sess\n",
6032 qlt_schedule_sess_for_deletion(fcport
);
6038 if (fcport
->scan_state
== QLA_FCPORT_FOUND
)
6039 qla24xx_fcport_handle_login(vha
, fcport
);
6042 qla2x00_free_fcport(new_fcport
);
6047 ql_dbg(ql_dbg_disc
, vha
, 0x2098,
6048 "Configure local loop error exit: rval=%x.\n", rval
);
6053 qla2x00_iidma_fcport(scsi_qla_host_t
*vha
, fc_port_t
*fcport
)
6056 uint16_t mb
[MAILBOX_REGISTER_COUNT
];
6057 struct qla_hw_data
*ha
= vha
->hw
;
6059 if (!IS_IIDMA_CAPABLE(ha
))
6062 if (atomic_read(&fcport
->state
) != FCS_ONLINE
)
6065 if (fcport
->fp_speed
== PORT_SPEED_UNKNOWN
||
6066 fcport
->fp_speed
> ha
->link_data_rate
||
6067 !ha
->flags
.gpsc_supported
)
6070 rval
= qla2x00_set_idma_speed(vha
, fcport
->loop_id
, fcport
->fp_speed
,
6072 if (rval
!= QLA_SUCCESS
) {
6073 ql_dbg(ql_dbg_disc
, vha
, 0x2004,
6074 "Unable to adjust iIDMA %8phN -- %04x %x %04x %04x.\n",
6075 fcport
->port_name
, rval
, fcport
->fp_speed
, mb
[0], mb
[1]);
6077 ql_dbg(ql_dbg_disc
, vha
, 0x2005,
6078 "iIDMA adjusted to %s GB/s (%X) on %8phN.\n",
6079 qla2x00_get_link_speed_str(ha
, fcport
->fp_speed
),
6080 fcport
->fp_speed
, fcport
->port_name
);
6084 void qla_do_iidma_work(struct scsi_qla_host
*vha
, fc_port_t
*fcport
)
6086 qla2x00_iidma_fcport(vha
, fcport
);
6087 qla24xx_update_fcport_fcp_prio(vha
, fcport
);
6090 int qla_post_iidma_work(struct scsi_qla_host
*vha
, fc_port_t
*fcport
)
6092 struct qla_work_evt
*e
;
6094 e
= qla2x00_alloc_work(vha
, QLA_EVT_IIDMA
);
6096 return QLA_FUNCTION_FAILED
;
6098 e
->u
.fcport
.fcport
= fcport
;
6099 return qla2x00_post_work(vha
, e
);
6102 /* qla2x00_reg_remote_port is reserved for Initiator Mode only.*/
6104 qla2x00_reg_remote_port(scsi_qla_host_t
*vha
, fc_port_t
*fcport
)
6106 struct fc_rport_identifiers rport_ids
;
6107 struct fc_rport
*rport
;
6108 unsigned long flags
;
6110 if (atomic_read(&fcport
->state
) == FCS_ONLINE
)
6113 rport_ids
.node_name
= wwn_to_u64(fcport
->node_name
);
6114 rport_ids
.port_name
= wwn_to_u64(fcport
->port_name
);
6115 rport_ids
.port_id
= fcport
->d_id
.b
.domain
<< 16 |
6116 fcport
->d_id
.b
.area
<< 8 | fcport
->d_id
.b
.al_pa
;
6117 rport_ids
.roles
= FC_RPORT_ROLE_UNKNOWN
;
6118 fcport
->rport
= rport
= fc_remote_port_add(vha
->host
, 0, &rport_ids
);
6120 ql_log(ql_log_warn
, vha
, 0x2006,
6121 "Unable to allocate fc remote port.\n");
6125 spin_lock_irqsave(fcport
->vha
->host
->host_lock
, flags
);
6126 *((fc_port_t
**)rport
->dd_data
) = fcport
;
6127 spin_unlock_irqrestore(fcport
->vha
->host
->host_lock
, flags
);
6128 fcport
->dev_loss_tmo
= rport
->dev_loss_tmo
;
6130 rport
->supported_classes
= fcport
->supported_classes
;
6132 rport_ids
.roles
= FC_PORT_ROLE_UNKNOWN
;
6133 if (fcport
->port_type
== FCT_INITIATOR
)
6134 rport_ids
.roles
|= FC_PORT_ROLE_FCP_INITIATOR
;
6135 if (fcport
->port_type
== FCT_TARGET
)
6136 rport_ids
.roles
|= FC_PORT_ROLE_FCP_TARGET
;
6137 if (fcport
->port_type
& FCT_NVME_INITIATOR
)
6138 rport_ids
.roles
|= FC_PORT_ROLE_NVME_INITIATOR
;
6139 if (fcport
->port_type
& FCT_NVME_TARGET
)
6140 rport_ids
.roles
|= FC_PORT_ROLE_NVME_TARGET
;
6141 if (fcport
->port_type
& FCT_NVME_DISCOVERY
)
6142 rport_ids
.roles
|= FC_PORT_ROLE_NVME_DISCOVERY
;
6144 fc_remote_port_rolechg(rport
, rport_ids
.roles
);
6146 ql_dbg(ql_dbg_disc
, vha
, 0x20ee,
6147 "%s: %8phN. rport %ld:0:%d (%p) is %s mode\n",
6148 __func__
, fcport
->port_name
, vha
->host_no
,
6149 rport
->scsi_target_id
, rport
,
6150 (fcport
->port_type
== FCT_TARGET
) ? "tgt" :
6151 ((fcport
->port_type
& FCT_NVME
) ? "nvme" : "ini"));
6155 * qla2x00_update_fcport
6156 * Updates device on list.
6159 * ha = adapter block pointer.
6160 * fcport = port structure pointer.
6170 qla2x00_update_fcport(scsi_qla_host_t
*vha
, fc_port_t
*fcport
)
6172 unsigned long flags
;
6174 if (IS_SW_RESV_ADDR(fcport
->d_id
))
6177 ql_dbg(ql_dbg_disc
, vha
, 0x20ef, "%s %8phC\n",
6178 __func__
, fcport
->port_name
);
6180 qla2x00_set_fcport_disc_state(fcport
, DSC_UPD_FCPORT
);
6181 fcport
->login_retry
= vha
->hw
->login_retry_count
;
6182 fcport
->flags
&= ~(FCF_LOGIN_NEEDED
| FCF_ASYNC_SENT
);
6184 spin_lock_irqsave(&vha
->work_lock
, flags
);
6185 fcport
->deleted
= 0;
6186 spin_unlock_irqrestore(&vha
->work_lock
, flags
);
6188 if (vha
->hw
->current_topology
== ISP_CFG_NL
)
6189 fcport
->logout_on_delete
= 0;
6191 fcport
->logout_on_delete
= 1;
6192 fcport
->n2n_chip_reset
= fcport
->n2n_link_reset_cnt
= 0;
6194 if (fcport
->tgt_link_down_time
< fcport
->dev_loss_tmo
) {
6195 fcport
->tgt_short_link_down_cnt
++;
6196 fcport
->tgt_link_down_time
= QLA2XX_MAX_LINK_DOWN_TIME
;
6199 switch (vha
->hw
->current_topology
) {
6202 fcport
->keep_nport_handle
= 1;
6208 qla2x00_iidma_fcport(vha
, fcport
);
6210 qla2x00_dfs_create_rport(vha
, fcport
);
6212 qla24xx_update_fcport_fcp_prio(vha
, fcport
);
6214 switch (vha
->host
->active_mode
) {
6215 case MODE_INITIATOR
:
6216 qla2x00_reg_remote_port(vha
, fcport
);
6219 if (!vha
->vha_tgt
.qla_tgt
->tgt_stop
&&
6220 !vha
->vha_tgt
.qla_tgt
->tgt_stopped
)
6221 qlt_fc_port_added(vha
, fcport
);
6224 qla2x00_reg_remote_port(vha
, fcport
);
6225 if (!vha
->vha_tgt
.qla_tgt
->tgt_stop
&&
6226 !vha
->vha_tgt
.qla_tgt
->tgt_stopped
)
6227 qlt_fc_port_added(vha
, fcport
);
6233 if (NVME_TARGET(vha
->hw
, fcport
))
6234 qla_nvme_register_remote(vha
, fcport
);
6236 qla2x00_set_fcport_state(fcport
, FCS_ONLINE
);
6238 if (IS_IIDMA_CAPABLE(vha
->hw
) && vha
->hw
->flags
.gpsc_supported
) {
6239 if (fcport
->id_changed
) {
6240 fcport
->id_changed
= 0;
6241 ql_dbg(ql_dbg_disc
, vha
, 0x20d7,
6242 "%s %d %8phC post gfpnid fcp_cnt %d\n",
6243 __func__
, __LINE__
, fcport
->port_name
,
6245 qla24xx_post_gfpnid_work(vha
, fcport
);
6247 ql_dbg(ql_dbg_disc
, vha
, 0x20d7,
6248 "%s %d %8phC post gpsc fcp_cnt %d\n",
6249 __func__
, __LINE__
, fcport
->port_name
,
6251 qla24xx_post_gpsc_work(vha
, fcport
);
6255 qla2x00_set_fcport_disc_state(fcport
, DSC_LOGIN_COMPLETE
);
6258 void qla_register_fcport_fn(struct work_struct
*work
)
6260 fc_port_t
*fcport
= container_of(work
, struct fc_port
, reg_work
);
6261 u32 rscn_gen
= fcport
->rscn_gen
;
6264 if (IS_SW_RESV_ADDR(fcport
->d_id
))
6267 qla2x00_update_fcport(fcport
->vha
, fcport
);
6269 ql_dbg(ql_dbg_disc
, fcport
->vha
, 0x911e,
6270 "%s rscn gen %d/%d next DS %d\n", __func__
,
6271 rscn_gen
, fcport
->rscn_gen
, fcport
->next_disc_state
);
6273 if (rscn_gen
!= fcport
->rscn_gen
) {
6274 /* RSCN(s) came in while registration */
6275 switch (fcport
->next_disc_state
) {
6276 case DSC_DELETE_PEND
:
6277 qlt_schedule_sess_for_deletion(fcport
);
6280 data
[0] = data
[1] = 0;
6281 qla2x00_post_async_adisc_work(fcport
->vha
, fcport
,
6291 * qla2x00_configure_fabric
6292 * Setup SNS devices with loop ID's.
6295 * ha = adapter block pointer.
6302 qla2x00_configure_fabric(scsi_qla_host_t
*vha
)
6306 uint16_t mb
[MAILBOX_REGISTER_COUNT
];
6308 struct qla_hw_data
*ha
= vha
->hw
;
6311 /* If FL port exists, then SNS is present */
6312 if (IS_FWI2_CAPABLE(ha
))
6313 loop_id
= NPH_F_PORT
;
6315 loop_id
= SNS_FL_PORT
;
6316 rval
= qla2x00_get_port_name(vha
, loop_id
, vha
->fabric_node_name
, 1);
6317 if (rval
!= QLA_SUCCESS
) {
6318 ql_dbg(ql_dbg_disc
, vha
, 0x20a0,
6319 "MBX_GET_PORT_NAME failed, No FL Port.\n");
6321 vha
->device_flags
&= ~SWITCH_FOUND
;
6322 return (QLA_SUCCESS
);
6324 vha
->device_flags
|= SWITCH_FOUND
;
6326 rval
= qla2x00_get_port_name(vha
, loop_id
, vha
->fabric_port_name
, 0);
6327 if (rval
!= QLA_SUCCESS
)
6328 ql_dbg(ql_dbg_disc
, vha
, 0x20ff,
6329 "Failed to get Fabric Port Name\n");
6331 if (qla_tgt_mode_enabled(vha
) || qla_dual_mode_enabled(vha
)) {
6332 rval
= qla2x00_send_change_request(vha
, 0x3, 0);
6333 if (rval
!= QLA_SUCCESS
)
6334 ql_log(ql_log_warn
, vha
, 0x121,
6335 "Failed to enable receiving of RSCN requests: 0x%x.\n",
6340 qla2x00_mgmt_svr_login(vha
);
6342 /* Ensure we are logged into the SNS. */
6343 loop_id
= NPH_SNS_LID(ha
);
6344 rval
= ha
->isp_ops
->fabric_login(vha
, loop_id
, 0xff, 0xff,
6345 0xfc, mb
, BIT_1
|BIT_0
);
6346 if (rval
!= QLA_SUCCESS
|| mb
[0] != MBS_COMMAND_COMPLETE
) {
6347 ql_dbg(ql_dbg_disc
, vha
, 0x20a1,
6348 "Failed SNS login: loop_id=%x mb[0]=%x mb[1]=%x mb[2]=%x mb[6]=%x mb[7]=%x (%x).\n",
6349 loop_id
, mb
[0], mb
[1], mb
[2], mb
[6], mb
[7], rval
);
6350 set_bit(LOOP_RESYNC_NEEDED
, &vha
->dpc_flags
);
6355 if (ql2xfdmienable
&&
6356 test_and_clear_bit(REGISTER_FDMI_NEEDED
, &vha
->dpc_flags
))
6357 qla2x00_fdmi_register(vha
);
6359 if (test_and_clear_bit(REGISTER_FC4_NEEDED
, &vha
->dpc_flags
)) {
6360 if (qla2x00_rft_id(vha
)) {
6362 ql_dbg(ql_dbg_disc
, vha
, 0x20a2,
6363 "Register FC-4 TYPE failed.\n");
6364 if (test_bit(LOOP_RESYNC_NEEDED
,
6368 if (qla2x00_rff_id(vha
, FC4_TYPE_FCP_SCSI
)) {
6370 ql_dbg(ql_dbg_disc
, vha
, 0x209a,
6371 "Register FC-4 Features failed.\n");
6372 if (test_bit(LOOP_RESYNC_NEEDED
,
6376 if (vha
->flags
.nvme_enabled
) {
6377 if (qla2x00_rff_id(vha
, FC_TYPE_NVME
)) {
6378 ql_dbg(ql_dbg_disc
, vha
, 0x2049,
6379 "Register NVME FC Type Features failed.\n");
6382 if (qla2x00_rnn_id(vha
)) {
6384 ql_dbg(ql_dbg_disc
, vha
, 0x2104,
6385 "Register Node Name failed.\n");
6386 if (test_bit(LOOP_RESYNC_NEEDED
,
6389 } else if (qla2x00_rsnn_nn(vha
)) {
6391 ql_dbg(ql_dbg_disc
, vha
, 0x209b,
6392 "Register Symbolic Node Name failed.\n");
6393 if (test_bit(LOOP_RESYNC_NEEDED
, &vha
->dpc_flags
))
6399 /* Mark the time right before querying FW for connected ports.
6400 * This process is long, asynchronous and by the time it's done,
6401 * collected information might not be accurate anymore. E.g.
6402 * disconnected port might have re-connected and a brand new
6403 * session has been created. In this case session's generation
6404 * will be newer than discovery_gen. */
6405 qlt_do_generation_tick(vha
, &discovery_gen
);
6407 if (USE_ASYNC_SCAN(ha
)) {
6408 /* start of scan begins here */
6409 vha
->scan
.rscn_gen_end
= atomic_read(&vha
->rscn_gen
);
6410 qla_fab_scan_start(vha
);
6412 list_for_each_entry(fcport
, &vha
->vp_fcports
, list
)
6413 fcport
->scan_state
= QLA_FCPORT_SCAN
;
6415 rval
= qla2x00_find_all_fabric_devs(vha
);
6417 if (rval
!= QLA_SUCCESS
)
6421 if (!vha
->nvme_local_port
&& vha
->flags
.nvme_enabled
)
6422 qla_nvme_register_hba(vha
);
6425 ql_dbg(ql_dbg_disc
, vha
, 0x2068,
6426 "Configure fabric error exit rval=%d.\n", rval
);
6432 * qla2x00_find_all_fabric_devs
6435 * ha = adapter block pointer.
6436 * dev = database device entry pointer.
6445 qla2x00_find_all_fabric_devs(scsi_qla_host_t
*vha
)
6449 fc_port_t
*fcport
, *new_fcport
;
6454 int first_dev
, last_dev
;
6455 port_id_t wrap
= {}, nxt_d_id
;
6456 struct qla_hw_data
*ha
= vha
->hw
;
6457 struct scsi_qla_host
*base_vha
= pci_get_drvdata(ha
->pdev
);
6458 unsigned long flags
;
6462 /* Try GID_PT to get device list, else GAN. */
6464 ha
->swl
= kcalloc(ha
->max_fibre_devices
, sizeof(sw_info_t
),
6469 ql_dbg(ql_dbg_disc
, vha
, 0x209c,
6470 "GID_PT allocations failed, fallback on GA_NXT.\n");
6472 memset(swl
, 0, ha
->max_fibre_devices
* sizeof(sw_info_t
));
6473 if (qla2x00_gid_pt(vha
, swl
) != QLA_SUCCESS
) {
6475 if (test_bit(LOOP_RESYNC_NEEDED
, &vha
->dpc_flags
))
6477 } else if (qla2x00_gpn_id(vha
, swl
) != QLA_SUCCESS
) {
6479 if (test_bit(LOOP_RESYNC_NEEDED
, &vha
->dpc_flags
))
6481 } else if (qla2x00_gnn_id(vha
, swl
) != QLA_SUCCESS
) {
6483 if (test_bit(LOOP_RESYNC_NEEDED
, &vha
->dpc_flags
))
6485 } else if (qla2x00_gfpn_id(vha
, swl
) != QLA_SUCCESS
) {
6487 if (test_bit(LOOP_RESYNC_NEEDED
, &vha
->dpc_flags
))
6491 /* If other queries succeeded probe for FC-4 type */
6493 qla2x00_gff_id(vha
, swl
);
6494 if (test_bit(LOOP_RESYNC_NEEDED
, &vha
->dpc_flags
))
6500 /* Allocate temporary fcport for any new fcports discovered. */
6501 new_fcport
= qla2x00_alloc_fcport(vha
, GFP_KERNEL
);
6502 if (new_fcport
== NULL
) {
6503 ql_log(ql_log_warn
, vha
, 0x209d,
6504 "Failed to allocate memory for fcport.\n");
6505 return (QLA_MEMORY_ALLOC_FAILED
);
6507 new_fcport
->flags
|= (FCF_FABRIC_DEVICE
| FCF_LOGIN_NEEDED
);
6508 /* Set start port ID scan at adapter ID. */
6512 /* Starting free loop ID. */
6513 loop_id
= ha
->min_external_loopid
;
6514 for (; loop_id
<= ha
->max_loop_id
; loop_id
++) {
6515 if (qla2x00_is_reserved_id(vha
, loop_id
))
6518 if (ha
->current_topology
== ISP_CFG_FL
&&
6519 (atomic_read(&vha
->loop_down_timer
) ||
6520 LOOP_TRANSITION(vha
))) {
6521 atomic_set(&vha
->loop_down_timer
, 0);
6522 set_bit(LOOP_RESYNC_NEEDED
, &vha
->dpc_flags
);
6523 set_bit(LOCAL_LOOP_UPDATE
, &vha
->dpc_flags
);
6529 wrap
.b24
= new_fcport
->d_id
.b24
;
6531 new_fcport
->d_id
.b24
= swl
[swl_idx
].d_id
.b24
;
6532 memcpy(new_fcport
->node_name
,
6533 swl
[swl_idx
].node_name
, WWN_SIZE
);
6534 memcpy(new_fcport
->port_name
,
6535 swl
[swl_idx
].port_name
, WWN_SIZE
);
6536 memcpy(new_fcport
->fabric_port_name
,
6537 swl
[swl_idx
].fabric_port_name
, WWN_SIZE
);
6538 new_fcport
->fp_speed
= swl
[swl_idx
].fp_speed
;
6539 new_fcport
->fc4_type
= swl
[swl_idx
].fc4_type
;
6541 new_fcport
->nvme_flag
= 0;
6542 if (vha
->flags
.nvme_enabled
&&
6543 swl
[swl_idx
].fc4_type
& FS_FC4TYPE_NVME
) {
6544 ql_log(ql_log_info
, vha
, 0x2131,
6545 "FOUND: NVME port %8phC as FC Type 28h\n",
6546 new_fcport
->port_name
);
6549 if (swl
[swl_idx
].d_id
.b
.rsvd_1
!= 0) {
6555 /* Send GA_NXT to the switch */
6556 rval
= qla2x00_ga_nxt(vha
, new_fcport
);
6557 if (rval
!= QLA_SUCCESS
) {
6558 ql_log(ql_log_warn
, vha
, 0x209e,
6559 "SNS scan failed -- assuming "
6560 "zero-entry result.\n");
6566 /* If wrap on switch device list, exit. */
6568 wrap
.b24
= new_fcport
->d_id
.b24
;
6570 } else if (new_fcport
->d_id
.b24
== wrap
.b24
) {
6571 ql_dbg(ql_dbg_disc
, vha
, 0x209f,
6572 "Device wrap (%02x%02x%02x).\n",
6573 new_fcport
->d_id
.b
.domain
,
6574 new_fcport
->d_id
.b
.area
,
6575 new_fcport
->d_id
.b
.al_pa
);
6579 /* Bypass if same physical adapter. */
6580 if (new_fcport
->d_id
.b24
== base_vha
->d_id
.b24
)
6583 /* Bypass virtual ports of the same host. */
6584 if (qla2x00_is_a_vp_did(vha
, new_fcport
->d_id
.b24
))
6587 /* Bypass if same domain and area of adapter. */
6588 if (((new_fcport
->d_id
.b24
& 0xffff00) ==
6589 (vha
->d_id
.b24
& 0xffff00)) && ha
->current_topology
==
6593 /* Bypass reserved domain fields. */
6594 if ((new_fcport
->d_id
.b
.domain
& 0xf0) == 0xf0)
6597 /* Bypass ports whose FCP-4 type is not FCP_SCSI */
6598 if (ql2xgffidenable
&&
6599 (!(new_fcport
->fc4_type
& FS_FC4TYPE_FCP
) &&
6600 new_fcport
->fc4_type
!= 0))
6603 spin_lock_irqsave(&vha
->hw
->tgt
.sess_lock
, flags
);
6605 /* Locate matching device in database. */
6607 list_for_each_entry(fcport
, &vha
->vp_fcports
, list
) {
6608 if (memcmp(new_fcport
->port_name
, fcport
->port_name
,
6612 fcport
->scan_state
= QLA_FCPORT_FOUND
;
6616 /* Update port state. */
6617 memcpy(fcport
->fabric_port_name
,
6618 new_fcport
->fabric_port_name
, WWN_SIZE
);
6619 fcport
->fp_speed
= new_fcport
->fp_speed
;
6622 * If address the same and state FCS_ONLINE
6623 * (or in target mode), nothing changed.
6625 if (fcport
->d_id
.b24
== new_fcport
->d_id
.b24
&&
6626 (atomic_read(&fcport
->state
) == FCS_ONLINE
||
6627 (vha
->host
->active_mode
== MODE_TARGET
))) {
6631 if (fcport
->login_retry
== 0)
6632 fcport
->login_retry
=
6633 vha
->hw
->login_retry_count
;
6635 * If device was not a fabric device before.
6637 if ((fcport
->flags
& FCF_FABRIC_DEVICE
) == 0) {
6638 fcport
->d_id
.b24
= new_fcport
->d_id
.b24
;
6639 qla2x00_clear_loop_id(fcport
);
6640 fcport
->flags
|= (FCF_FABRIC_DEVICE
|
6646 * Port ID changed or device was marked to be updated;
6647 * Log it out if still logged in and mark it for
6650 if (qla_tgt_mode_enabled(base_vha
)) {
6651 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf080,
6652 "port changed FC ID, %8phC"
6653 " old %x:%x:%x (loop_id 0x%04x)-> new %x:%x:%x\n",
6655 fcport
->d_id
.b
.domain
,
6656 fcport
->d_id
.b
.area
,
6657 fcport
->d_id
.b
.al_pa
,
6659 new_fcport
->d_id
.b
.domain
,
6660 new_fcport
->d_id
.b
.area
,
6661 new_fcport
->d_id
.b
.al_pa
);
6662 fcport
->d_id
.b24
= new_fcport
->d_id
.b24
;
6666 fcport
->d_id
.b24
= new_fcport
->d_id
.b24
;
6667 fcport
->flags
|= FCF_LOGIN_NEEDED
;
6671 if (found
&& NVME_TARGET(vha
->hw
, fcport
)) {
6672 if (fcport
->disc_state
== DSC_DELETE_PEND
) {
6673 qla2x00_set_fcport_disc_state(fcport
, DSC_GNL
);
6674 vha
->fcport_count
--;
6675 fcport
->login_succ
= 0;
6680 spin_unlock_irqrestore(&vha
->hw
->tgt
.sess_lock
, flags
);
6683 /* If device was not in our fcports list, then add it. */
6684 new_fcport
->scan_state
= QLA_FCPORT_FOUND
;
6685 list_add_tail(&new_fcport
->list
, &vha
->vp_fcports
);
6687 spin_unlock_irqrestore(&vha
->hw
->tgt
.sess_lock
, flags
);
6690 /* Allocate a new replacement fcport. */
6691 nxt_d_id
.b24
= new_fcport
->d_id
.b24
;
6692 new_fcport
= qla2x00_alloc_fcport(vha
, GFP_KERNEL
);
6693 if (new_fcport
== NULL
) {
6694 ql_log(ql_log_warn
, vha
, 0xd032,
6695 "Memory allocation failed for fcport.\n");
6696 return (QLA_MEMORY_ALLOC_FAILED
);
6698 new_fcport
->flags
|= (FCF_FABRIC_DEVICE
| FCF_LOGIN_NEEDED
);
6699 new_fcport
->d_id
.b24
= nxt_d_id
.b24
;
6702 qla2x00_free_fcport(new_fcport
);
6705 * Logout all previous fabric dev marked lost, except FCP2 devices.
6707 list_for_each_entry(fcport
, &vha
->vp_fcports
, list
) {
6708 if (test_bit(LOOP_RESYNC_NEEDED
, &vha
->dpc_flags
))
6711 if ((fcport
->flags
& FCF_FABRIC_DEVICE
) == 0)
6714 if (fcport
->scan_state
== QLA_FCPORT_SCAN
) {
6715 if ((qla_dual_mode_enabled(vha
) ||
6716 qla_ini_mode_enabled(vha
)) &&
6717 atomic_read(&fcport
->state
) == FCS_ONLINE
) {
6718 qla2x00_mark_device_lost(vha
, fcport
,
6719 ql2xplogiabsentdevice
);
6720 if (fcport
->loop_id
!= FC_NO_LOOP_ID
&&
6721 (fcport
->flags
& FCF_FCP2_DEVICE
) == 0 &&
6722 fcport
->port_type
!= FCT_INITIATOR
&&
6723 fcport
->port_type
!= FCT_BROADCAST
) {
6724 ql_dbg(ql_dbg_disc
, vha
, 0x20f0,
6725 "%s %d %8phC post del sess\n",
6728 qlt_schedule_sess_for_deletion(fcport
);
6734 if (fcport
->scan_state
== QLA_FCPORT_FOUND
&&
6735 (fcport
->flags
& FCF_LOGIN_NEEDED
) != 0)
6736 qla24xx_fcport_handle_login(vha
, fcport
);
6741 /* FW does not set aside Loop id for MGMT Server/FFFFFAh */
6743 qla2x00_reserve_mgmt_server_loop_id(scsi_qla_host_t
*vha
)
6745 int loop_id
= FC_NO_LOOP_ID
;
6746 int lid
= NPH_MGMT_SERVER
- vha
->vp_idx
;
6747 unsigned long flags
;
6748 struct qla_hw_data
*ha
= vha
->hw
;
6750 if (vha
->vp_idx
== 0) {
6751 set_bit(NPH_MGMT_SERVER
, ha
->loop_id_map
);
6752 return NPH_MGMT_SERVER
;
6755 /* pick id from high and work down to low */
6756 spin_lock_irqsave(&ha
->vport_slock
, flags
);
6757 for (; lid
> 0; lid
--) {
6758 if (!test_bit(lid
, vha
->hw
->loop_id_map
)) {
6759 set_bit(lid
, vha
->hw
->loop_id_map
);
6764 spin_unlock_irqrestore(&ha
->vport_slock
, flags
);
6770 * qla2x00_fabric_login
6771 * Issue fabric login command.
6774 * ha = adapter block pointer.
6775 * device = pointer to FC device type structure.
6778 * 0 - Login successfully
6780 * 2 - Initiator device
6784 qla2x00_fabric_login(scsi_qla_host_t
*vha
, fc_port_t
*fcport
,
6785 uint16_t *next_loopid
)
6789 uint16_t tmp_loopid
;
6790 uint16_t mb
[MAILBOX_REGISTER_COUNT
];
6791 struct qla_hw_data
*ha
= vha
->hw
;
6797 ql_dbg(ql_dbg_disc
, vha
, 0x2000,
6798 "Trying Fabric Login w/loop id 0x%04x for port "
6800 fcport
->loop_id
, fcport
->d_id
.b
.domain
,
6801 fcport
->d_id
.b
.area
, fcport
->d_id
.b
.al_pa
);
6803 /* Login fcport on switch. */
6804 rval
= ha
->isp_ops
->fabric_login(vha
, fcport
->loop_id
,
6805 fcport
->d_id
.b
.domain
, fcport
->d_id
.b
.area
,
6806 fcport
->d_id
.b
.al_pa
, mb
, BIT_0
);
6807 if (rval
!= QLA_SUCCESS
) {
6810 if (mb
[0] == MBS_PORT_ID_USED
) {
6812 * Device has another loop ID. The firmware team
6813 * recommends the driver perform an implicit login with
6814 * the specified ID again. The ID we just used is save
6815 * here so we return with an ID that can be tried by
6819 tmp_loopid
= fcport
->loop_id
;
6820 fcport
->loop_id
= mb
[1];
6822 ql_dbg(ql_dbg_disc
, vha
, 0x2001,
6823 "Fabric Login: port in use - next loop "
6824 "id=0x%04x, port id= %02x%02x%02x.\n",
6825 fcport
->loop_id
, fcport
->d_id
.b
.domain
,
6826 fcport
->d_id
.b
.area
, fcport
->d_id
.b
.al_pa
);
6828 } else if (mb
[0] == MBS_COMMAND_COMPLETE
) {
6833 /* A retry occurred before. */
6834 *next_loopid
= tmp_loopid
;
6837 * No retry occurred before. Just increment the
6838 * ID value for next login.
6840 *next_loopid
= (fcport
->loop_id
+ 1);
6843 if (mb
[1] & BIT_0
) {
6844 fcport
->port_type
= FCT_INITIATOR
;
6846 fcport
->port_type
= FCT_TARGET
;
6847 if (mb
[1] & BIT_1
) {
6848 fcport
->flags
|= FCF_FCP2_DEVICE
;
6853 fcport
->supported_classes
|= FC_COS_CLASS2
;
6855 fcport
->supported_classes
|= FC_COS_CLASS3
;
6857 if (IS_FWI2_CAPABLE(ha
)) {
6860 FCF_CONF_COMP_SUPPORTED
;
6865 } else if (mb
[0] == MBS_LOOP_ID_USED
) {
6867 * Loop ID already used, try next loop ID.
6870 rval
= qla2x00_find_new_loop_id(vha
, fcport
);
6871 if (rval
!= QLA_SUCCESS
) {
6872 /* Ran out of loop IDs to use */
6875 } else if (mb
[0] == MBS_COMMAND_ERROR
) {
6877 * Firmware possibly timed out during login. If NO
6878 * retries are left to do then the device is declared
6881 *next_loopid
= fcport
->loop_id
;
6882 ha
->isp_ops
->fabric_logout(vha
, fcport
->loop_id
,
6883 fcport
->d_id
.b
.domain
, fcport
->d_id
.b
.area
,
6884 fcport
->d_id
.b
.al_pa
);
6885 qla2x00_mark_device_lost(vha
, fcport
, 1);
6891 * unrecoverable / not handled error
6893 ql_dbg(ql_dbg_disc
, vha
, 0x2002,
6894 "Failed=%x port_id=%02x%02x%02x loop_id=%x "
6895 "jiffies=%lx.\n", mb
[0], fcport
->d_id
.b
.domain
,
6896 fcport
->d_id
.b
.area
, fcport
->d_id
.b
.al_pa
,
6897 fcport
->loop_id
, jiffies
);
6899 *next_loopid
= fcport
->loop_id
;
6900 ha
->isp_ops
->fabric_logout(vha
, fcport
->loop_id
,
6901 fcport
->d_id
.b
.domain
, fcport
->d_id
.b
.area
,
6902 fcport
->d_id
.b
.al_pa
);
6903 qla2x00_clear_loop_id(fcport
);
6904 fcport
->login_retry
= 0;
6915 * qla2x00_local_device_login
6916 * Issue local device login command.
6919 * ha = adapter block pointer.
6920 * loop_id = loop id of device to login to.
6922 * Returns (Where's the #define!!!!):
6923 * 0 - Login successfully
6928 qla2x00_local_device_login(scsi_qla_host_t
*vha
, fc_port_t
*fcport
)
6931 uint16_t mb
[MAILBOX_REGISTER_COUNT
];
6933 memset(mb
, 0, sizeof(mb
));
6934 rval
= qla2x00_login_local_device(vha
, fcport
, mb
, BIT_0
);
6935 if (rval
== QLA_SUCCESS
) {
6936 /* Interrogate mailbox registers for any errors */
6937 if (mb
[0] == MBS_COMMAND_ERROR
)
6939 else if (mb
[0] == MBS_COMMAND_PARAMETER_ERROR
)
6940 /* device not in PCB table */
6948 * qla2x00_loop_resync
6949 * Resync with fibre channel devices.
6952 * ha = adapter block pointer.
6958 qla2x00_loop_resync(scsi_qla_host_t
*vha
)
6960 int rval
= QLA_SUCCESS
;
6963 clear_bit(ISP_ABORT_RETRY
, &vha
->dpc_flags
);
6964 if (vha
->flags
.online
) {
6965 if (!(rval
= qla2x00_fw_ready(vha
))) {
6966 /* Wait at most MAX_TARGET RSCNs for a stable link. */
6969 if (!IS_QLAFX00(vha
->hw
)) {
6971 * Issue a marker after FW becomes
6974 qla2x00_marker(vha
, vha
->hw
->base_qpair
,
6976 vha
->marker_needed
= 0;
6979 /* Remap devices on Loop. */
6980 clear_bit(LOOP_RESYNC_NEEDED
, &vha
->dpc_flags
);
6982 if (IS_QLAFX00(vha
->hw
))
6983 qlafx00_configure_devices(vha
);
6985 qla2x00_configure_loop(vha
);
6988 } while (!atomic_read(&vha
->loop_down_timer
) &&
6989 !(test_bit(ISP_ABORT_NEEDED
, &vha
->dpc_flags
))
6990 && wait_time
&& (test_bit(LOOP_RESYNC_NEEDED
,
6995 if (test_bit(ISP_ABORT_NEEDED
, &vha
->dpc_flags
))
6996 return (QLA_FUNCTION_FAILED
);
6999 ql_dbg(ql_dbg_disc
, vha
, 0x206c,
7000 "%s *** FAILED ***.\n", __func__
);
7006 * qla2x00_perform_loop_resync
7007 * Description: This function will set the appropriate flags and call
7008 * qla2x00_loop_resync. If successful loop will be resynced
7009 * Arguments : scsi_qla_host_t pointer
7010 * returm : Success or Failure
7013 int qla2x00_perform_loop_resync(scsi_qla_host_t
*ha
)
7017 if (!test_and_set_bit(LOOP_RESYNC_ACTIVE
, &ha
->dpc_flags
)) {
7018 /*Configure the flags so that resync happens properly*/
7019 atomic_set(&ha
->loop_down_timer
, 0);
7020 if (!(ha
->device_flags
& DFLG_NO_CABLE
)) {
7021 atomic_set(&ha
->loop_state
, LOOP_UP
);
7022 set_bit(LOCAL_LOOP_UPDATE
, &ha
->dpc_flags
);
7023 set_bit(REGISTER_FC4_NEEDED
, &ha
->dpc_flags
);
7024 set_bit(LOOP_RESYNC_NEEDED
, &ha
->dpc_flags
);
7026 rval
= qla2x00_loop_resync(ha
);
7028 atomic_set(&ha
->loop_state
, LOOP_DEAD
);
7030 clear_bit(LOOP_RESYNC_ACTIVE
, &ha
->dpc_flags
);
7036 /* Assumes idc_lock always held on entry */
7038 qla83xx_reset_ownership(scsi_qla_host_t
*vha
)
7040 struct qla_hw_data
*ha
= vha
->hw
;
7041 uint32_t drv_presence
, drv_presence_mask
;
7042 uint32_t dev_part_info1
, dev_part_info2
, class_type
;
7043 uint32_t class_type_mask
= 0x3;
7044 uint16_t fcoe_other_function
= 0xffff, i
;
7046 if (IS_QLA8044(ha
)) {
7047 drv_presence
= qla8044_rd_direct(vha
,
7048 QLA8044_CRB_DRV_ACTIVE_INDEX
);
7049 dev_part_info1
= qla8044_rd_direct(vha
,
7050 QLA8044_CRB_DEV_PART_INFO_INDEX
);
7051 dev_part_info2
= qla8044_rd_direct(vha
,
7052 QLA8044_CRB_DEV_PART_INFO2
);
7054 qla83xx_rd_reg(vha
, QLA83XX_IDC_DRV_PRESENCE
, &drv_presence
);
7055 qla83xx_rd_reg(vha
, QLA83XX_DEV_PARTINFO1
, &dev_part_info1
);
7056 qla83xx_rd_reg(vha
, QLA83XX_DEV_PARTINFO2
, &dev_part_info2
);
7058 for (i
= 0; i
< 8; i
++) {
7059 class_type
= ((dev_part_info1
>> (i
* 4)) & class_type_mask
);
7060 if ((class_type
== QLA83XX_CLASS_TYPE_FCOE
) &&
7061 (i
!= ha
->portnum
)) {
7062 fcoe_other_function
= i
;
7066 if (fcoe_other_function
== 0xffff) {
7067 for (i
= 0; i
< 8; i
++) {
7068 class_type
= ((dev_part_info2
>> (i
* 4)) &
7070 if ((class_type
== QLA83XX_CLASS_TYPE_FCOE
) &&
7071 ((i
+ 8) != ha
->portnum
)) {
7072 fcoe_other_function
= i
+ 8;
7078 * Prepare drv-presence mask based on fcoe functions present.
7079 * However consider only valid physical fcoe function numbers (0-15).
7081 drv_presence_mask
= ~((1 << (ha
->portnum
)) |
7082 ((fcoe_other_function
== 0xffff) ?
7083 0 : (1 << (fcoe_other_function
))));
7085 /* We are the reset owner iff:
7086 * - No other protocol drivers present.
7087 * - This is the lowest among fcoe functions. */
7088 if (!(drv_presence
& drv_presence_mask
) &&
7089 (ha
->portnum
< fcoe_other_function
)) {
7090 ql_dbg(ql_dbg_p3p
, vha
, 0xb07f,
7091 "This host is Reset owner.\n");
7092 ha
->flags
.nic_core_reset_owner
= 1;
7097 __qla83xx_set_drv_ack(scsi_qla_host_t
*vha
)
7099 int rval
= QLA_SUCCESS
;
7100 struct qla_hw_data
*ha
= vha
->hw
;
7103 rval
= qla83xx_rd_reg(vha
, QLA83XX_IDC_DRIVER_ACK
, &drv_ack
);
7104 if (rval
== QLA_SUCCESS
) {
7105 drv_ack
|= (1 << ha
->portnum
);
7106 rval
= qla83xx_wr_reg(vha
, QLA83XX_IDC_DRIVER_ACK
, drv_ack
);
7113 __qla83xx_clear_drv_ack(scsi_qla_host_t
*vha
)
7115 int rval
= QLA_SUCCESS
;
7116 struct qla_hw_data
*ha
= vha
->hw
;
7119 rval
= qla83xx_rd_reg(vha
, QLA83XX_IDC_DRIVER_ACK
, &drv_ack
);
7120 if (rval
== QLA_SUCCESS
) {
7121 drv_ack
&= ~(1 << ha
->portnum
);
7122 rval
= qla83xx_wr_reg(vha
, QLA83XX_IDC_DRIVER_ACK
, drv_ack
);
7128 /* Assumes idc-lock always held on entry */
7130 qla83xx_idc_audit(scsi_qla_host_t
*vha
, int audit_type
)
7132 struct qla_hw_data
*ha
= vha
->hw
;
7133 uint32_t idc_audit_reg
= 0, duration_secs
= 0;
7135 switch (audit_type
) {
7136 case IDC_AUDIT_TIMESTAMP
:
7137 ha
->idc_audit_ts
= (jiffies_to_msecs(jiffies
) / 1000);
7138 idc_audit_reg
= (ha
->portnum
) |
7139 (IDC_AUDIT_TIMESTAMP
<< 7) | (ha
->idc_audit_ts
<< 8);
7140 qla83xx_wr_reg(vha
, QLA83XX_IDC_AUDIT
, idc_audit_reg
);
7143 case IDC_AUDIT_COMPLETION
:
7144 duration_secs
= ((jiffies_to_msecs(jiffies
) -
7145 jiffies_to_msecs(ha
->idc_audit_ts
)) / 1000);
7146 idc_audit_reg
= (ha
->portnum
) |
7147 (IDC_AUDIT_COMPLETION
<< 7) | (duration_secs
<< 8);
7148 qla83xx_wr_reg(vha
, QLA83XX_IDC_AUDIT
, idc_audit_reg
);
7152 ql_log(ql_log_warn
, vha
, 0xb078,
7153 "Invalid audit type specified.\n");
7158 /* Assumes idc_lock always held on entry */
7160 qla83xx_initiating_reset(scsi_qla_host_t
*vha
)
7162 struct qla_hw_data
*ha
= vha
->hw
;
7163 uint32_t idc_control
, dev_state
;
7165 __qla83xx_get_idc_control(vha
, &idc_control
);
7166 if ((idc_control
& QLA83XX_IDC_RESET_DISABLED
)) {
7167 ql_log(ql_log_info
, vha
, 0xb080,
7168 "NIC Core reset has been disabled. idc-control=0x%x\n",
7170 return QLA_FUNCTION_FAILED
;
7173 /* Set NEED-RESET iff in READY state and we are the reset-owner */
7174 qla83xx_rd_reg(vha
, QLA83XX_IDC_DEV_STATE
, &dev_state
);
7175 if (ha
->flags
.nic_core_reset_owner
&& dev_state
== QLA8XXX_DEV_READY
) {
7176 qla83xx_wr_reg(vha
, QLA83XX_IDC_DEV_STATE
,
7177 QLA8XXX_DEV_NEED_RESET
);
7178 ql_log(ql_log_info
, vha
, 0xb056, "HW State: NEED RESET.\n");
7179 qla83xx_idc_audit(vha
, IDC_AUDIT_TIMESTAMP
);
7181 ql_log(ql_log_info
, vha
, 0xb057, "HW State: %s.\n",
7182 qdev_state(dev_state
));
7184 /* SV: XXX: Is timeout required here? */
7185 /* Wait for IDC state change READY -> NEED_RESET */
7186 while (dev_state
== QLA8XXX_DEV_READY
) {
7187 qla83xx_idc_unlock(vha
, 0);
7189 qla83xx_idc_lock(vha
, 0);
7190 qla83xx_rd_reg(vha
, QLA83XX_IDC_DEV_STATE
, &dev_state
);
7194 /* Send IDC ack by writing to drv-ack register */
7195 __qla83xx_set_drv_ack(vha
);
7201 __qla83xx_set_idc_control(scsi_qla_host_t
*vha
, uint32_t idc_control
)
7203 return qla83xx_wr_reg(vha
, QLA83XX_IDC_CONTROL
, idc_control
);
7207 __qla83xx_get_idc_control(scsi_qla_host_t
*vha
, uint32_t *idc_control
)
7209 return qla83xx_rd_reg(vha
, QLA83XX_IDC_CONTROL
, idc_control
);
7213 qla83xx_check_driver_presence(scsi_qla_host_t
*vha
)
7215 uint32_t drv_presence
= 0;
7216 struct qla_hw_data
*ha
= vha
->hw
;
7218 qla83xx_rd_reg(vha
, QLA83XX_IDC_DRV_PRESENCE
, &drv_presence
);
7219 if (drv_presence
& (1 << ha
->portnum
))
7222 return QLA_TEST_FAILED
;
7226 qla83xx_nic_core_reset(scsi_qla_host_t
*vha
)
7228 int rval
= QLA_SUCCESS
;
7229 struct qla_hw_data
*ha
= vha
->hw
;
7231 ql_dbg(ql_dbg_p3p
, vha
, 0xb058,
7232 "Entered %s().\n", __func__
);
7234 if (vha
->device_flags
& DFLG_DEV_FAILED
) {
7235 ql_log(ql_log_warn
, vha
, 0xb059,
7236 "Device in unrecoverable FAILED state.\n");
7237 return QLA_FUNCTION_FAILED
;
7240 qla83xx_idc_lock(vha
, 0);
7242 if (qla83xx_check_driver_presence(vha
) != QLA_SUCCESS
) {
7243 ql_log(ql_log_warn
, vha
, 0xb05a,
7244 "Function=0x%x has been removed from IDC participation.\n",
7246 rval
= QLA_FUNCTION_FAILED
;
7250 qla83xx_reset_ownership(vha
);
7252 rval
= qla83xx_initiating_reset(vha
);
7255 * Perform reset if we are the reset-owner,
7256 * else wait till IDC state changes to READY/FAILED.
7258 if (rval
== QLA_SUCCESS
) {
7259 rval
= qla83xx_idc_state_handler(vha
);
7261 if (rval
== QLA_SUCCESS
)
7262 ha
->flags
.nic_core_hung
= 0;
7263 __qla83xx_clear_drv_ack(vha
);
7267 qla83xx_idc_unlock(vha
, 0);
7269 ql_dbg(ql_dbg_p3p
, vha
, 0xb05b, "Exiting %s.\n", __func__
);
7275 qla2xxx_mctp_dump(scsi_qla_host_t
*vha
)
7277 struct qla_hw_data
*ha
= vha
->hw
;
7278 int rval
= QLA_FUNCTION_FAILED
;
7280 if (!IS_MCTP_CAPABLE(ha
)) {
7281 /* This message can be removed from the final version */
7282 ql_log(ql_log_info
, vha
, 0x506d,
7283 "This board is not MCTP capable\n");
7287 if (!ha
->mctp_dump
) {
7288 ha
->mctp_dump
= dma_alloc_coherent(&ha
->pdev
->dev
,
7289 MCTP_DUMP_SIZE
, &ha
->mctp_dump_dma
, GFP_KERNEL
);
7291 if (!ha
->mctp_dump
) {
7292 ql_log(ql_log_warn
, vha
, 0x506e,
7293 "Failed to allocate memory for mctp dump\n");
7298 #define MCTP_DUMP_STR_ADDR 0x00000000
7299 rval
= qla2x00_dump_mctp_data(vha
, ha
->mctp_dump_dma
,
7300 MCTP_DUMP_STR_ADDR
, MCTP_DUMP_SIZE
/4);
7301 if (rval
!= QLA_SUCCESS
) {
7302 ql_log(ql_log_warn
, vha
, 0x506f,
7303 "Failed to capture mctp dump\n");
7305 ql_log(ql_log_info
, vha
, 0x5070,
7306 "Mctp dump capture for host (%ld/%p).\n",
7307 vha
->host_no
, ha
->mctp_dump
);
7308 ha
->mctp_dumped
= 1;
7311 if (!ha
->flags
.nic_core_reset_hdlr_active
&& !ha
->portnum
) {
7312 ha
->flags
.nic_core_reset_hdlr_active
= 1;
7313 rval
= qla83xx_restart_nic_firmware(vha
);
7315 /* NIC Core reset failed. */
7316 ql_log(ql_log_warn
, vha
, 0x5071,
7317 "Failed to restart nic firmware\n");
7319 ql_dbg(ql_dbg_p3p
, vha
, 0xb084,
7320 "Restarted NIC firmware successfully.\n");
7321 ha
->flags
.nic_core_reset_hdlr_active
= 0;
7329 * qla2x00_quiesce_io
7330 * Description: This function will block the new I/Os
7331 * Its not aborting any I/Os as context
7332 * is not destroyed during quiescence
7333 * Arguments: scsi_qla_host_t
7337 qla2x00_quiesce_io(scsi_qla_host_t
*vha
)
7339 struct qla_hw_data
*ha
= vha
->hw
;
7340 struct scsi_qla_host
*vp
, *tvp
;
7341 unsigned long flags
;
7343 ql_dbg(ql_dbg_dpc
, vha
, 0x401d,
7344 "Quiescing I/O - ha=%p.\n", ha
);
7346 atomic_set(&ha
->loop_down_timer
, LOOP_DOWN_TIME
);
7347 if (atomic_read(&vha
->loop_state
) != LOOP_DOWN
) {
7348 atomic_set(&vha
->loop_state
, LOOP_DOWN
);
7349 qla2x00_mark_all_devices_lost(vha
);
7351 spin_lock_irqsave(&ha
->vport_slock
, flags
);
7352 list_for_each_entry_safe(vp
, tvp
, &ha
->vp_list
, list
) {
7353 atomic_inc(&vp
->vref_count
);
7354 spin_unlock_irqrestore(&ha
->vport_slock
, flags
);
7356 qla2x00_mark_all_devices_lost(vp
);
7358 spin_lock_irqsave(&ha
->vport_slock
, flags
);
7359 atomic_dec(&vp
->vref_count
);
7361 spin_unlock_irqrestore(&ha
->vport_slock
, flags
);
7363 if (!atomic_read(&vha
->loop_down_timer
))
7364 atomic_set(&vha
->loop_down_timer
,
7367 /* Wait for pending cmds to complete */
7368 WARN_ON_ONCE(qla2x00_eh_wait_for_pending_commands(vha
, 0, 0, WAIT_HOST
)
7373 qla2x00_abort_isp_cleanup(scsi_qla_host_t
*vha
)
7375 struct qla_hw_data
*ha
= vha
->hw
;
7376 struct scsi_qla_host
*vp
, *tvp
;
7377 unsigned long flags
;
7381 /* For ISP82XX, driver waits for completion of the commands.
7382 * online flag should be set.
7384 if (!(IS_P3P_TYPE(ha
)))
7385 vha
->flags
.online
= 0;
7386 ha
->flags
.chip_reset_done
= 0;
7387 clear_bit(ISP_ABORT_NEEDED
, &vha
->dpc_flags
);
7388 vha
->qla_stats
.total_isp_aborts
++;
7390 ql_log(ql_log_info
, vha
, 0x00af,
7391 "Performing ISP error recovery - ha=%p.\n", ha
);
7393 ha
->flags
.purge_mbox
= 1;
7394 /* For ISP82XX, reset_chip is just disabling interrupts.
7395 * Driver waits for the completion of the commands.
7396 * the interrupts need to be enabled.
7398 if (!(IS_P3P_TYPE(ha
)))
7399 ha
->isp_ops
->reset_chip(vha
);
7401 ha
->link_data_rate
= PORT_SPEED_UNKNOWN
;
7403 ha
->flags
.rida_fmt2
= 0;
7404 ha
->flags
.n2n_ae
= 0;
7405 ha
->flags
.lip_ae
= 0;
7406 ha
->current_topology
= 0;
7408 ha
->flags
.fw_init_done
= 0;
7410 ha
->base_qpair
->chip_reset
= ha
->chip_reset
;
7411 ha
->base_qpair
->cmd_cnt
= ha
->base_qpair
->cmd_completion_cnt
= 0;
7412 ha
->base_qpair
->prev_completion_cnt
= 0;
7413 for (i
= 0; i
< ha
->max_qpairs
; i
++) {
7414 if (ha
->queue_pair_map
[i
]) {
7415 ha
->queue_pair_map
[i
]->chip_reset
=
7416 ha
->base_qpair
->chip_reset
;
7417 ha
->queue_pair_map
[i
]->cmd_cnt
=
7418 ha
->queue_pair_map
[i
]->cmd_completion_cnt
= 0;
7419 ha
->base_qpair
->prev_completion_cnt
= 0;
7423 /* purge MBox commands */
7424 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
7425 if (test_bit(MBX_INTR_WAIT
, &ha
->mbx_cmd_flags
)) {
7426 clear_bit(MBX_INTR_WAIT
, &ha
->mbx_cmd_flags
);
7427 complete(&ha
->mbx_intr_comp
);
7429 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
7432 while (atomic_read(&ha
->num_pend_mbx_stage2
) ||
7433 atomic_read(&ha
->num_pend_mbx_stage1
)) {
7439 ha
->flags
.purge_mbox
= 0;
7441 atomic_set(&vha
->loop_down_timer
, LOOP_DOWN_TIME
);
7442 if (atomic_read(&vha
->loop_state
) != LOOP_DOWN
) {
7443 atomic_set(&vha
->loop_state
, LOOP_DOWN
);
7444 qla2x00_mark_all_devices_lost(vha
);
7446 spin_lock_irqsave(&ha
->vport_slock
, flags
);
7447 list_for_each_entry_safe(vp
, tvp
, &ha
->vp_list
, list
) {
7448 atomic_inc(&vp
->vref_count
);
7449 spin_unlock_irqrestore(&ha
->vport_slock
, flags
);
7451 qla2x00_mark_all_devices_lost(vp
);
7453 spin_lock_irqsave(&ha
->vport_slock
, flags
);
7454 atomic_dec(&vp
->vref_count
);
7456 spin_unlock_irqrestore(&ha
->vport_slock
, flags
);
7458 if (!atomic_read(&vha
->loop_down_timer
))
7459 atomic_set(&vha
->loop_down_timer
,
7463 /* Clear all async request states across all VPs. */
7464 list_for_each_entry(fcport
, &vha
->vp_fcports
, list
) {
7465 fcport
->flags
&= ~(FCF_LOGIN_NEEDED
| FCF_ASYNC_SENT
);
7466 fcport
->scan_state
= 0;
7468 spin_lock_irqsave(&ha
->vport_slock
, flags
);
7469 list_for_each_entry_safe(vp
, tvp
, &ha
->vp_list
, list
) {
7470 atomic_inc(&vp
->vref_count
);
7471 spin_unlock_irqrestore(&ha
->vport_slock
, flags
);
7473 list_for_each_entry(fcport
, &vp
->vp_fcports
, list
)
7474 fcport
->flags
&= ~(FCF_LOGIN_NEEDED
| FCF_ASYNC_SENT
);
7476 spin_lock_irqsave(&ha
->vport_slock
, flags
);
7477 atomic_dec(&vp
->vref_count
);
7479 spin_unlock_irqrestore(&ha
->vport_slock
, flags
);
7481 /* Make sure for ISP 82XX IO DMA is complete */
7482 if (IS_P3P_TYPE(ha
)) {
7483 qla82xx_chip_reset_cleanup(vha
);
7484 ql_log(ql_log_info
, vha
, 0x00b4,
7485 "Done chip reset cleanup.\n");
7487 /* Done waiting for pending commands. Reset online flag */
7488 vha
->flags
.online
= 0;
7491 /* Requeue all commands in outstanding command list. */
7492 qla2x00_abort_all_cmds(vha
, DID_RESET
<< 16);
7493 /* memory barrier */
7499 * Resets ISP and aborts all outstanding commands.
7502 * ha = adapter block pointer.
7508 qla2x00_abort_isp(scsi_qla_host_t
*vha
)
7511 struct qla_hw_data
*ha
= vha
->hw
;
7512 struct scsi_qla_host
*vp
, *tvp
;
7513 struct req_que
*req
= ha
->req_q_map
[0];
7514 unsigned long flags
;
7517 if (vha
->flags
.online
) {
7518 qla2x00_abort_isp_cleanup(vha
);
7520 vha
->dport_status
|= DPORT_DIAG_CHIP_RESET_IN_PROGRESS
;
7521 vha
->dport_status
&= ~DPORT_DIAG_IN_PROGRESS
;
7523 if (vha
->hw
->flags
.port_isolated
)
7526 if (qla2x00_isp_reg_stat(ha
)) {
7527 ql_log(ql_log_info
, vha
, 0x803f,
7528 "ISP Abort - ISP reg disconnect, exiting.\n");
7532 if (test_and_clear_bit(ISP_ABORT_TO_ROM
, &vha
->dpc_flags
)) {
7533 ha
->flags
.chip_reset_done
= 1;
7534 vha
->flags
.online
= 1;
7536 clear_bit(ISP_ABORT_RETRY
, &vha
->dpc_flags
);
7540 if (IS_QLA8031(ha
)) {
7541 ql_dbg(ql_dbg_p3p
, vha
, 0xb05c,
7542 "Clearing fcoe driver presence.\n");
7543 if (qla83xx_clear_drv_presence(vha
) != QLA_SUCCESS
)
7544 ql_dbg(ql_dbg_p3p
, vha
, 0xb073,
7545 "Error while clearing DRV-Presence.\n");
7548 if (unlikely(pci_channel_offline(ha
->pdev
) &&
7549 ha
->flags
.pci_channel_io_perm_failure
)) {
7550 clear_bit(ISP_ABORT_RETRY
, &vha
->dpc_flags
);
7555 switch (vha
->qlini_mode
) {
7556 case QLA2XXX_INI_MODE_DISABLED
:
7557 if (!qla_tgt_mode_enabled(vha
))
7560 case QLA2XXX_INI_MODE_DUAL
:
7561 if (!qla_dual_mode_enabled(vha
) &&
7562 !qla_ini_mode_enabled(vha
))
7565 case QLA2XXX_INI_MODE_ENABLED
:
7570 ha
->isp_ops
->get_flash_version(vha
, req
->ring
);
7572 if (qla2x00_isp_reg_stat(ha
)) {
7573 ql_log(ql_log_info
, vha
, 0x803f,
7574 "ISP Abort - ISP reg disconnect pre nvram config, exiting.\n");
7577 ha
->isp_ops
->nvram_config(vha
);
7579 if (qla2x00_isp_reg_stat(ha
)) {
7580 ql_log(ql_log_info
, vha
, 0x803f,
7581 "ISP Abort - ISP reg disconnect post nvmram config, exiting.\n");
7585 /* User may have updated [fcp|nvme] prefer in flash */
7586 list_for_each_entry(fcport
, &vha
->vp_fcports
, list
) {
7587 if (NVME_PRIORITY(ha
, fcport
))
7588 fcport
->do_prli_nvme
= 1;
7590 fcport
->do_prli_nvme
= 0;
7593 if (!qla2x00_restart_isp(vha
)) {
7594 clear_bit(RESET_MARKER_NEEDED
, &vha
->dpc_flags
);
7596 if (!atomic_read(&vha
->loop_down_timer
)) {
7598 * Issue marker command only when we are going
7599 * to start the I/O .
7601 vha
->marker_needed
= 1;
7604 vha
->flags
.online
= 1;
7606 ha
->isp_ops
->enable_intrs(ha
);
7608 ha
->isp_abort_cnt
= 0;
7609 clear_bit(ISP_ABORT_RETRY
, &vha
->dpc_flags
);
7611 if (IS_QLA81XX(ha
) || IS_QLA8031(ha
))
7612 qla2x00_get_fw_version(vha
);
7614 } else { /* failed the ISP abort */
7615 vha
->flags
.online
= 1;
7616 if (test_bit(ISP_ABORT_RETRY
, &vha
->dpc_flags
)) {
7617 if (ha
->isp_abort_cnt
== 0) {
7618 ql_log(ql_log_fatal
, vha
, 0x8035,
7619 "ISP error recover failed - "
7620 "board disabled.\n");
7622 * The next call disables the board
7625 qla2x00_abort_isp_cleanup(vha
);
7626 vha
->flags
.online
= 0;
7627 clear_bit(ISP_ABORT_RETRY
,
7630 } else { /* schedule another ISP abort */
7631 ha
->isp_abort_cnt
--;
7632 ql_dbg(ql_dbg_taskm
, vha
, 0x8020,
7633 "ISP abort - retry remaining %d.\n",
7638 ha
->isp_abort_cnt
= MAX_RETRIES_OF_ISP_ABORT
;
7639 ql_dbg(ql_dbg_taskm
, vha
, 0x8021,
7640 "ISP error recovery - retrying (%d) "
7641 "more times.\n", ha
->isp_abort_cnt
);
7642 set_bit(ISP_ABORT_RETRY
, &vha
->dpc_flags
);
7649 if (vha
->hw
->flags
.port_isolated
) {
7650 qla2x00_abort_isp_cleanup(vha
);
7655 ql_dbg(ql_dbg_taskm
, vha
, 0x8022, "%s succeeded.\n", __func__
);
7656 qla2x00_configure_hba(vha
);
7657 spin_lock_irqsave(&ha
->vport_slock
, flags
);
7658 list_for_each_entry_safe(vp
, tvp
, &ha
->vp_list
, list
) {
7660 atomic_inc(&vp
->vref_count
);
7661 spin_unlock_irqrestore(&ha
->vport_slock
, flags
);
7663 /* User may have updated [fcp|nvme] prefer in flash */
7664 list_for_each_entry(fcport
, &vp
->vp_fcports
, list
) {
7665 if (NVME_PRIORITY(ha
, fcport
))
7666 fcport
->do_prli_nvme
= 1;
7668 fcport
->do_prli_nvme
= 0;
7671 qla2x00_vp_abort_isp(vp
);
7673 spin_lock_irqsave(&ha
->vport_slock
, flags
);
7674 atomic_dec(&vp
->vref_count
);
7677 spin_unlock_irqrestore(&ha
->vport_slock
, flags
);
7679 if (IS_QLA8031(ha
)) {
7680 ql_dbg(ql_dbg_p3p
, vha
, 0xb05d,
7681 "Setting back fcoe driver presence.\n");
7682 if (qla83xx_set_drv_presence(vha
) != QLA_SUCCESS
)
7683 ql_dbg(ql_dbg_p3p
, vha
, 0xb074,
7684 "Error while setting DRV-Presence.\n");
7687 ql_log(ql_log_warn
, vha
, 0x8023, "%s **** FAILED ****.\n",
7695 * qla2x00_restart_isp
7696 * restarts the ISP after a reset
7699 * ha = adapter block pointer.
7705 qla2x00_restart_isp(scsi_qla_host_t
*vha
)
7708 struct qla_hw_data
*ha
= vha
->hw
;
7710 /* If firmware needs to be loaded */
7711 if (qla2x00_isp_firmware(vha
)) {
7712 vha
->flags
.online
= 0;
7713 status
= ha
->isp_ops
->chip_diag(vha
);
7716 status
= qla2x00_setup_chip(vha
);
7721 status
= qla2x00_init_rings(vha
);
7725 clear_bit(RESET_MARKER_NEEDED
, &vha
->dpc_flags
);
7726 ha
->flags
.chip_reset_done
= 1;
7728 /* Initialize the queues in use */
7729 qla25xx_init_queues(ha
);
7731 status
= qla2x00_fw_ready(vha
);
7733 /* if no cable then assume it's good */
7734 return vha
->device_flags
& DFLG_NO_CABLE
? 0 : status
;
7737 /* Issue a marker after FW becomes ready. */
7738 qla2x00_marker(vha
, ha
->base_qpair
, 0, 0, MK_SYNC_ALL
);
7739 set_bit(LOOP_RESYNC_NEEDED
, &vha
->dpc_flags
);
7745 qla25xx_init_queues(struct qla_hw_data
*ha
)
7747 struct rsp_que
*rsp
= NULL
;
7748 struct req_que
*req
= NULL
;
7749 struct scsi_qla_host
*base_vha
= pci_get_drvdata(ha
->pdev
);
7753 for (i
= 1; i
< ha
->max_rsp_queues
; i
++) {
7754 rsp
= ha
->rsp_q_map
[i
];
7755 if (rsp
&& test_bit(i
, ha
->rsp_qid_map
)) {
7756 rsp
->options
&= ~BIT_0
;
7757 ret
= qla25xx_init_rsp_que(base_vha
, rsp
);
7758 if (ret
!= QLA_SUCCESS
)
7759 ql_dbg(ql_dbg_init
, base_vha
, 0x00ff,
7760 "%s Rsp que: %d init failed.\n",
7763 ql_dbg(ql_dbg_init
, base_vha
, 0x0100,
7764 "%s Rsp que: %d inited.\n",
7768 for (i
= 1; i
< ha
->max_req_queues
; i
++) {
7769 req
= ha
->req_q_map
[i
];
7770 if (req
&& test_bit(i
, ha
->req_qid_map
)) {
7771 /* Clear outstanding commands array. */
7772 req
->options
&= ~BIT_0
;
7773 ret
= qla25xx_init_req_que(base_vha
, req
);
7774 if (ret
!= QLA_SUCCESS
)
7775 ql_dbg(ql_dbg_init
, base_vha
, 0x0101,
7776 "%s Req que: %d init failed.\n",
7779 ql_dbg(ql_dbg_init
, base_vha
, 0x0102,
7780 "%s Req que: %d inited.\n",
7788 * qla2x00_reset_adapter
7792 * ha = adapter block pointer.
7795 qla2x00_reset_adapter(scsi_qla_host_t
*vha
)
7797 unsigned long flags
= 0;
7798 struct qla_hw_data
*ha
= vha
->hw
;
7799 struct device_reg_2xxx __iomem
*reg
= &ha
->iobase
->isp
;
7801 vha
->flags
.online
= 0;
7802 ha
->isp_ops
->disable_intrs(ha
);
7804 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
7805 wrt_reg_word(®
->hccr
, HCCR_RESET_RISC
);
7806 rd_reg_word(®
->hccr
); /* PCI Posting. */
7807 wrt_reg_word(®
->hccr
, HCCR_RELEASE_RISC
);
7808 rd_reg_word(®
->hccr
); /* PCI Posting. */
7809 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
7815 qla24xx_reset_adapter(scsi_qla_host_t
*vha
)
7817 unsigned long flags
= 0;
7818 struct qla_hw_data
*ha
= vha
->hw
;
7819 struct device_reg_24xx __iomem
*reg
= &ha
->iobase
->isp24
;
7821 if (IS_P3P_TYPE(ha
))
7824 vha
->flags
.online
= 0;
7825 ha
->isp_ops
->disable_intrs(ha
);
7827 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
7828 wrt_reg_dword(®
->hccr
, HCCRX_SET_RISC_RESET
);
7829 rd_reg_dword(®
->hccr
);
7830 wrt_reg_dword(®
->hccr
, HCCRX_REL_RISC_PAUSE
);
7831 rd_reg_dword(®
->hccr
);
7832 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
7834 if (IS_NOPOLLING_TYPE(ha
))
7835 ha
->isp_ops
->enable_intrs(ha
);
7840 /* On sparc systems, obtain port and node WWN from firmware
7843 static void qla24xx_nvram_wwn_from_ofw(scsi_qla_host_t
*vha
,
7844 struct nvram_24xx
*nv
)
7847 struct qla_hw_data
*ha
= vha
->hw
;
7848 struct pci_dev
*pdev
= ha
->pdev
;
7849 struct device_node
*dp
= pci_device_to_OF_node(pdev
);
7853 val
= of_get_property(dp
, "port-wwn", &len
);
7854 if (val
&& len
>= WWN_SIZE
)
7855 memcpy(nv
->port_name
, val
, WWN_SIZE
);
7857 val
= of_get_property(dp
, "node-wwn", &len
);
7858 if (val
&& len
>= WWN_SIZE
)
7859 memcpy(nv
->node_name
, val
, WWN_SIZE
);
7864 qla24xx_nvram_config(scsi_qla_host_t
*vha
)
7867 struct init_cb_24xx
*icb
;
7868 struct nvram_24xx
*nv
;
7870 uint8_t *dptr1
, *dptr2
;
7873 struct qla_hw_data
*ha
= vha
->hw
;
7876 icb
= (struct init_cb_24xx
*)ha
->init_cb
;
7879 /* Determine NVRAM starting address. */
7880 if (ha
->port_no
== 0) {
7881 ha
->nvram_base
= FA_NVRAM_FUNC0_ADDR
;
7882 ha
->vpd_base
= FA_NVRAM_VPD0_ADDR
;
7884 ha
->nvram_base
= FA_NVRAM_FUNC1_ADDR
;
7885 ha
->vpd_base
= FA_NVRAM_VPD1_ADDR
;
7888 ha
->nvram_size
= sizeof(*nv
);
7889 ha
->vpd_size
= FA_NVRAM_VPD_SIZE
;
7891 /* Get VPD data into cache */
7892 ha
->vpd
= ha
->nvram
+ VPD_OFFSET
;
7893 ha
->isp_ops
->read_nvram(vha
, ha
->vpd
,
7894 ha
->nvram_base
- FA_NVRAM_FUNC0_ADDR
, FA_NVRAM_VPD_SIZE
* 4);
7896 /* Get NVRAM data into cache and calculate checksum. */
7897 dptr
= (__force __le32
*)nv
;
7898 ha
->isp_ops
->read_nvram(vha
, dptr
, ha
->nvram_base
, ha
->nvram_size
);
7899 for (cnt
= 0, chksum
= 0; cnt
< ha
->nvram_size
>> 2; cnt
++, dptr
++)
7900 chksum
+= le32_to_cpu(*dptr
);
7902 ql_dbg(ql_dbg_init
+ ql_dbg_buffer
, vha
, 0x006a,
7903 "Contents of NVRAM\n");
7904 ql_dump_buffer(ql_dbg_init
+ ql_dbg_buffer
, vha
, 0x010d,
7905 nv
, ha
->nvram_size
);
7907 /* Bad NVRAM data, set defaults parameters. */
7908 if (chksum
|| memcmp("ISP ", nv
->id
, sizeof(nv
->id
)) ||
7909 le16_to_cpu(nv
->nvram_version
) < ICB_VERSION
) {
7910 /* Reset NVRAM data. */
7911 ql_log(ql_log_warn
, vha
, 0x006b,
7912 "Inconsistent NVRAM checksum=%#x id=%.4s version=%#x.\n",
7913 chksum
, nv
->id
, nv
->nvram_version
);
7914 ql_dump_buffer(ql_dbg_init
, vha
, 0x006b, nv
, sizeof(*nv
));
7915 ql_log(ql_log_warn
, vha
, 0x006c,
7916 "Falling back to functioning (yet invalid -- WWPN) "
7920 * Set default initialization control block.
7922 memset(nv
, 0, ha
->nvram_size
);
7923 nv
->nvram_version
= cpu_to_le16(ICB_VERSION
);
7924 nv
->version
= cpu_to_le16(ICB_VERSION
);
7925 nv
->frame_payload_size
= cpu_to_le16(2048);
7926 nv
->execution_throttle
= cpu_to_le16(0xFFFF);
7927 nv
->exchange_count
= cpu_to_le16(0);
7928 nv
->hard_address
= cpu_to_le16(124);
7929 nv
->port_name
[0] = 0x21;
7930 nv
->port_name
[1] = 0x00 + ha
->port_no
+ 1;
7931 nv
->port_name
[2] = 0x00;
7932 nv
->port_name
[3] = 0xe0;
7933 nv
->port_name
[4] = 0x8b;
7934 nv
->port_name
[5] = 0x1c;
7935 nv
->port_name
[6] = 0x55;
7936 nv
->port_name
[7] = 0x86;
7937 nv
->node_name
[0] = 0x20;
7938 nv
->node_name
[1] = 0x00;
7939 nv
->node_name
[2] = 0x00;
7940 nv
->node_name
[3] = 0xe0;
7941 nv
->node_name
[4] = 0x8b;
7942 nv
->node_name
[5] = 0x1c;
7943 nv
->node_name
[6] = 0x55;
7944 nv
->node_name
[7] = 0x86;
7945 qla24xx_nvram_wwn_from_ofw(vha
, nv
);
7946 nv
->login_retry_count
= cpu_to_le16(8);
7947 nv
->interrupt_delay_timer
= cpu_to_le16(0);
7948 nv
->login_timeout
= cpu_to_le16(0);
7949 nv
->firmware_options_1
=
7950 cpu_to_le32(BIT_14
|BIT_13
|BIT_2
|BIT_1
);
7951 nv
->firmware_options_2
= cpu_to_le32(2 << 4);
7952 nv
->firmware_options_2
|= cpu_to_le32(BIT_12
);
7953 nv
->firmware_options_3
= cpu_to_le32(2 << 13);
7954 nv
->host_p
= cpu_to_le32(BIT_11
|BIT_10
);
7955 nv
->efi_parameters
= cpu_to_le32(0);
7956 nv
->reset_delay
= 5;
7957 nv
->max_luns_per_target
= cpu_to_le16(128);
7958 nv
->port_down_retry_count
= cpu_to_le16(30);
7959 nv
->link_down_timeout
= cpu_to_le16(30);
7964 if (qla_tgt_mode_enabled(vha
)) {
7965 /* Don't enable full login after initial LIP */
7966 nv
->firmware_options_1
&= cpu_to_le32(~BIT_13
);
7967 /* Don't enable LIP full login for initiator */
7968 nv
->host_p
&= cpu_to_le32(~BIT_10
);
7971 qlt_24xx_config_nvram_stage1(vha
, nv
);
7973 /* Reset Initialization control block */
7974 memset(icb
, 0, ha
->init_cb_size
);
7976 /* Copy 1st segment. */
7977 dptr1
= (uint8_t *)icb
;
7978 dptr2
= (uint8_t *)&nv
->version
;
7979 cnt
= (uint8_t *)&icb
->response_q_inpointer
- (uint8_t *)&icb
->version
;
7981 *dptr1
++ = *dptr2
++;
7983 icb
->login_retry_count
= nv
->login_retry_count
;
7984 icb
->link_down_on_nos
= nv
->link_down_on_nos
;
7986 /* Copy 2nd segment. */
7987 dptr1
= (uint8_t *)&icb
->interrupt_delay_timer
;
7988 dptr2
= (uint8_t *)&nv
->interrupt_delay_timer
;
7989 cnt
= (uint8_t *)&icb
->reserved_3
-
7990 (uint8_t *)&icb
->interrupt_delay_timer
;
7992 *dptr1
++ = *dptr2
++;
7993 ha
->frame_payload_size
= le16_to_cpu(icb
->frame_payload_size
);
7995 * Setup driver NVRAM options.
7997 qla2x00_set_model_info(vha
, nv
->model_name
, sizeof(nv
->model_name
),
8000 qlt_24xx_config_nvram_stage2(vha
, icb
);
8002 if (nv
->host_p
& cpu_to_le32(BIT_15
)) {
8003 /* Use alternate WWN? */
8004 memcpy(icb
->node_name
, nv
->alternate_node_name
, WWN_SIZE
);
8005 memcpy(icb
->port_name
, nv
->alternate_port_name
, WWN_SIZE
);
8008 /* Prepare nodename */
8009 if ((icb
->firmware_options_1
& cpu_to_le32(BIT_14
)) == 0) {
8011 * Firmware will apply the following mask if the nodename was
8014 memcpy(icb
->node_name
, icb
->port_name
, WWN_SIZE
);
8015 icb
->node_name
[0] &= 0xF0;
8018 /* Set host adapter parameters. */
8019 ha
->flags
.disable_risc_code_load
= 0;
8020 ha
->flags
.enable_lip_reset
= 0;
8021 ha
->flags
.enable_lip_full_login
=
8022 le32_to_cpu(nv
->host_p
) & BIT_10
? 1 : 0;
8023 ha
->flags
.enable_target_reset
=
8024 le32_to_cpu(nv
->host_p
) & BIT_11
? 1 : 0;
8025 ha
->flags
.enable_led_scheme
= 0;
8026 ha
->flags
.disable_serdes
= le32_to_cpu(nv
->host_p
) & BIT_5
? 1 : 0;
8028 ha
->operating_mode
= (le32_to_cpu(icb
->firmware_options_2
) &
8029 (BIT_6
| BIT_5
| BIT_4
)) >> 4;
8031 memcpy(ha
->fw_seriallink_options24
, nv
->seriallink_options
,
8032 sizeof(ha
->fw_seriallink_options24
));
8034 /* save HBA serial number */
8035 ha
->serial0
= icb
->port_name
[5];
8036 ha
->serial1
= icb
->port_name
[6];
8037 ha
->serial2
= icb
->port_name
[7];
8038 memcpy(vha
->node_name
, icb
->node_name
, WWN_SIZE
);
8039 memcpy(vha
->port_name
, icb
->port_name
, WWN_SIZE
);
8041 icb
->execution_throttle
= cpu_to_le16(0xFFFF);
8043 ha
->retry_count
= le16_to_cpu(nv
->login_retry_count
);
8045 /* Set minimum login_timeout to 4 seconds. */
8046 if (le16_to_cpu(nv
->login_timeout
) < ql2xlogintimeout
)
8047 nv
->login_timeout
= cpu_to_le16(ql2xlogintimeout
);
8048 if (le16_to_cpu(nv
->login_timeout
) < 4)
8049 nv
->login_timeout
= cpu_to_le16(4);
8050 ha
->login_timeout
= le16_to_cpu(nv
->login_timeout
);
8052 /* Set minimum RATOV to 100 tenths of a second. */
8055 ha
->loop_reset_delay
= nv
->reset_delay
;
8057 /* Link Down Timeout = 0:
8059 * When Port Down timer expires we will start returning
8060 * I/O's to OS with "DID_NO_CONNECT".
8062 * Link Down Timeout != 0:
8064 * The driver waits for the link to come up after link down
8065 * before returning I/Os to OS with "DID_NO_CONNECT".
8067 if (le16_to_cpu(nv
->link_down_timeout
) == 0) {
8068 ha
->loop_down_abort_time
=
8069 (LOOP_DOWN_TIME
- LOOP_DOWN_TIMEOUT
);
8071 ha
->link_down_timeout
= le16_to_cpu(nv
->link_down_timeout
);
8072 ha
->loop_down_abort_time
=
8073 (LOOP_DOWN_TIME
- ha
->link_down_timeout
);
8076 /* Need enough time to try and get the port back. */
8077 ha
->port_down_retry_count
= le16_to_cpu(nv
->port_down_retry_count
);
8078 if (qlport_down_retry
)
8079 ha
->port_down_retry_count
= qlport_down_retry
;
8081 /* Set login_retry_count */
8082 ha
->login_retry_count
= le16_to_cpu(nv
->login_retry_count
);
8083 if (ha
->port_down_retry_count
==
8084 le16_to_cpu(nv
->port_down_retry_count
) &&
8085 ha
->port_down_retry_count
> 3)
8086 ha
->login_retry_count
= ha
->port_down_retry_count
;
8087 else if (ha
->port_down_retry_count
> (int)ha
->login_retry_count
)
8088 ha
->login_retry_count
= ha
->port_down_retry_count
;
8089 if (ql2xloginretrycount
)
8090 ha
->login_retry_count
= ql2xloginretrycount
;
8092 /* N2N: driver will initiate Login instead of FW */
8093 icb
->firmware_options_3
|= cpu_to_le32(BIT_8
);
8096 if (!vha
->flags
.init_done
) {
8097 ha
->zio_mode
= le32_to_cpu(icb
->firmware_options_2
) &
8098 (BIT_3
| BIT_2
| BIT_1
| BIT_0
);
8099 ha
->zio_timer
= le16_to_cpu(icb
->interrupt_delay_timer
) ?
8100 le16_to_cpu(icb
->interrupt_delay_timer
) : 2;
8102 icb
->firmware_options_2
&= cpu_to_le32(
8103 ~(BIT_3
| BIT_2
| BIT_1
| BIT_0
));
8104 if (ha
->zio_mode
!= QLA_ZIO_DISABLED
) {
8105 ha
->zio_mode
= QLA_ZIO_MODE_6
;
8107 ql_log(ql_log_info
, vha
, 0x006f,
8108 "ZIO mode %d enabled; timer delay (%d us).\n",
8109 ha
->zio_mode
, ha
->zio_timer
* 100);
8111 icb
->firmware_options_2
|= cpu_to_le32(
8112 (uint32_t)ha
->zio_mode
);
8113 icb
->interrupt_delay_timer
= cpu_to_le16(ha
->zio_timer
);
8117 ql_log(ql_log_warn
, vha
, 0x0070,
8118 "NVRAM configuration failed.\n");
8124 qla27xx_print_image(struct scsi_qla_host
*vha
, char *name
,
8125 struct qla27xx_image_status
*image_status
)
8127 ql_dbg(ql_dbg_init
, vha
, 0x018b,
8128 "%s %s: mask=%#02x gen=%#04x ver=%u.%u map=%#01x sum=%#08x sig=%#08x\n",
8130 image_status
->image_status_mask
,
8131 le16_to_cpu(image_status
->generation
),
8132 image_status
->ver_major
,
8133 image_status
->ver_minor
,
8134 image_status
->bitmap
,
8135 le32_to_cpu(image_status
->checksum
),
8136 le32_to_cpu(image_status
->signature
));
8140 qla28xx_check_aux_image_status_signature(
8141 struct qla27xx_image_status
*image_status
)
8143 ulong signature
= le32_to_cpu(image_status
->signature
);
8145 return signature
!= QLA28XX_AUX_IMG_STATUS_SIGN
;
8149 qla27xx_check_image_status_signature(struct qla27xx_image_status
*image_status
)
8151 ulong signature
= le32_to_cpu(image_status
->signature
);
8154 signature
!= QLA27XX_IMG_STATUS_SIGN
&&
8155 signature
!= QLA28XX_IMG_STATUS_SIGN
;
8159 qla27xx_image_status_checksum(struct qla27xx_image_status
*image_status
)
8161 __le32
*p
= (__force __le32
*)image_status
;
8162 uint n
= sizeof(*image_status
) / sizeof(*p
);
8166 sum
+= le32_to_cpup(p
);
8172 qla28xx_component_bitmask(struct qla27xx_image_status
*aux
, uint bitmask
)
8174 return aux
->bitmap
& bitmask
?
8175 QLA27XX_SECONDARY_IMAGE
: QLA27XX_PRIMARY_IMAGE
;
8179 qla28xx_component_status(
8180 struct active_regions
*active_regions
, struct qla27xx_image_status
*aux
)
8182 active_regions
->aux
.board_config
=
8183 qla28xx_component_bitmask(aux
, QLA28XX_AUX_IMG_BOARD_CONFIG
);
8185 active_regions
->aux
.vpd_nvram
=
8186 qla28xx_component_bitmask(aux
, QLA28XX_AUX_IMG_VPD_NVRAM
);
8188 active_regions
->aux
.npiv_config_0_1
=
8189 qla28xx_component_bitmask(aux
, QLA28XX_AUX_IMG_NPIV_CONFIG_0_1
);
8191 active_regions
->aux
.npiv_config_2_3
=
8192 qla28xx_component_bitmask(aux
, QLA28XX_AUX_IMG_NPIV_CONFIG_2_3
);
8194 active_regions
->aux
.nvme_params
=
8195 qla28xx_component_bitmask(aux
, QLA28XX_AUX_IMG_NVME_PARAMS
);
8199 qla27xx_compare_image_generation(
8200 struct qla27xx_image_status
*pri_image_status
,
8201 struct qla27xx_image_status
*sec_image_status
)
8203 /* calculate generation delta as uint16 (this accounts for wrap) */
8205 le16_to_cpu(pri_image_status
->generation
) -
8206 le16_to_cpu(sec_image_status
->generation
);
8208 ql_dbg(ql_dbg_init
, NULL
, 0x0180, "generation delta = %d\n", delta
);
8214 qla28xx_get_aux_images(
8215 struct scsi_qla_host
*vha
, struct active_regions
*active_regions
)
8217 struct qla_hw_data
*ha
= vha
->hw
;
8218 struct qla27xx_image_status pri_aux_image_status
, sec_aux_image_status
;
8219 bool valid_pri_image
= false, valid_sec_image
= false;
8220 bool active_pri_image
= false, active_sec_image
= false;
8223 if (!ha
->flt_region_aux_img_status_pri
) {
8224 ql_dbg(ql_dbg_init
, vha
, 0x018a, "Primary aux image not addressed\n");
8225 goto check_sec_image
;
8228 rc
= qla24xx_read_flash_data(vha
, (uint32_t *)&pri_aux_image_status
,
8229 ha
->flt_region_aux_img_status_pri
,
8230 sizeof(pri_aux_image_status
) >> 2);
8232 ql_log(ql_log_info
, vha
, 0x01a1,
8233 "Unable to read Primary aux image(%x).\n", rc
);
8234 goto check_sec_image
;
8236 qla27xx_print_image(vha
, "Primary aux image", &pri_aux_image_status
);
8238 if (qla28xx_check_aux_image_status_signature(&pri_aux_image_status
)) {
8239 ql_dbg(ql_dbg_init
, vha
, 0x018b,
8240 "Primary aux image signature (%#x) not valid\n",
8241 le32_to_cpu(pri_aux_image_status
.signature
));
8242 goto check_sec_image
;
8245 if (qla27xx_image_status_checksum(&pri_aux_image_status
)) {
8246 ql_dbg(ql_dbg_init
, vha
, 0x018c,
8247 "Primary aux image checksum failed\n");
8248 goto check_sec_image
;
8251 valid_pri_image
= true;
8253 if (pri_aux_image_status
.image_status_mask
& 1) {
8254 ql_dbg(ql_dbg_init
, vha
, 0x018d,
8255 "Primary aux image is active\n");
8256 active_pri_image
= true;
8260 if (!ha
->flt_region_aux_img_status_sec
) {
8261 ql_dbg(ql_dbg_init
, vha
, 0x018a,
8262 "Secondary aux image not addressed\n");
8263 goto check_valid_image
;
8266 rc
= qla24xx_read_flash_data(vha
, (uint32_t *)&sec_aux_image_status
,
8267 ha
->flt_region_aux_img_status_sec
,
8268 sizeof(sec_aux_image_status
) >> 2);
8270 ql_log(ql_log_info
, vha
, 0x01a2,
8271 "Unable to read Secondary aux image(%x).\n", rc
);
8272 goto check_valid_image
;
8275 qla27xx_print_image(vha
, "Secondary aux image", &sec_aux_image_status
);
8277 if (qla28xx_check_aux_image_status_signature(&sec_aux_image_status
)) {
8278 ql_dbg(ql_dbg_init
, vha
, 0x018b,
8279 "Secondary aux image signature (%#x) not valid\n",
8280 le32_to_cpu(sec_aux_image_status
.signature
));
8281 goto check_valid_image
;
8284 if (qla27xx_image_status_checksum(&sec_aux_image_status
)) {
8285 ql_dbg(ql_dbg_init
, vha
, 0x018c,
8286 "Secondary aux image checksum failed\n");
8287 goto check_valid_image
;
8290 valid_sec_image
= true;
8292 if (sec_aux_image_status
.image_status_mask
& 1) {
8293 ql_dbg(ql_dbg_init
, vha
, 0x018d,
8294 "Secondary aux image is active\n");
8295 active_sec_image
= true;
8299 if (valid_pri_image
&& active_pri_image
&&
8300 valid_sec_image
&& active_sec_image
) {
8301 if (qla27xx_compare_image_generation(&pri_aux_image_status
,
8302 &sec_aux_image_status
) >= 0) {
8303 qla28xx_component_status(active_regions
,
8304 &pri_aux_image_status
);
8306 qla28xx_component_status(active_regions
,
8307 &sec_aux_image_status
);
8309 } else if (valid_pri_image
&& active_pri_image
) {
8310 qla28xx_component_status(active_regions
, &pri_aux_image_status
);
8311 } else if (valid_sec_image
&& active_sec_image
) {
8312 qla28xx_component_status(active_regions
, &sec_aux_image_status
);
8315 ql_dbg(ql_dbg_init
, vha
, 0x018f,
8316 "aux images active: BCFG=%u VPD/NVR=%u NPIV0/1=%u NPIV2/3=%u, NVME=%u\n",
8317 active_regions
->aux
.board_config
,
8318 active_regions
->aux
.vpd_nvram
,
8319 active_regions
->aux
.npiv_config_0_1
,
8320 active_regions
->aux
.npiv_config_2_3
,
8321 active_regions
->aux
.nvme_params
);
8325 qla27xx_get_active_image(struct scsi_qla_host
*vha
,
8326 struct active_regions
*active_regions
)
8328 struct qla_hw_data
*ha
= vha
->hw
;
8329 struct qla27xx_image_status pri_image_status
, sec_image_status
;
8330 bool valid_pri_image
= false, valid_sec_image
= false;
8331 bool active_pri_image
= false, active_sec_image
= false;
8334 if (!ha
->flt_region_img_status_pri
) {
8335 ql_dbg(ql_dbg_init
, vha
, 0x018a, "Primary image not addressed\n");
8336 goto check_sec_image
;
8339 if (qla24xx_read_flash_data(vha
, (uint32_t *)&pri_image_status
,
8340 ha
->flt_region_img_status_pri
, sizeof(pri_image_status
) >> 2) !=
8343 goto check_sec_image
;
8345 qla27xx_print_image(vha
, "Primary image", &pri_image_status
);
8347 if (qla27xx_check_image_status_signature(&pri_image_status
)) {
8348 ql_dbg(ql_dbg_init
, vha
, 0x018b,
8349 "Primary image signature (%#x) not valid\n",
8350 le32_to_cpu(pri_image_status
.signature
));
8351 goto check_sec_image
;
8354 if (qla27xx_image_status_checksum(&pri_image_status
)) {
8355 ql_dbg(ql_dbg_init
, vha
, 0x018c,
8356 "Primary image checksum failed\n");
8357 goto check_sec_image
;
8360 valid_pri_image
= true;
8362 if (pri_image_status
.image_status_mask
& 1) {
8363 ql_dbg(ql_dbg_init
, vha
, 0x018d,
8364 "Primary image is active\n");
8365 active_pri_image
= true;
8369 if (!ha
->flt_region_img_status_sec
) {
8370 ql_dbg(ql_dbg_init
, vha
, 0x018a, "Secondary image not addressed\n");
8371 goto check_valid_image
;
8374 rc
= qla24xx_read_flash_data(vha
, (uint32_t *)(&sec_image_status
),
8375 ha
->flt_region_img_status_sec
, sizeof(sec_image_status
) >> 2);
8377 ql_log(ql_log_info
, vha
, 0x01a3,
8378 "Unable to read Secondary image status(%x).\n", rc
);
8379 goto check_valid_image
;
8382 qla27xx_print_image(vha
, "Secondary image", &sec_image_status
);
8384 if (qla27xx_check_image_status_signature(&sec_image_status
)) {
8385 ql_dbg(ql_dbg_init
, vha
, 0x018b,
8386 "Secondary image signature (%#x) not valid\n",
8387 le32_to_cpu(sec_image_status
.signature
));
8388 goto check_valid_image
;
8391 if (qla27xx_image_status_checksum(&sec_image_status
)) {
8392 ql_dbg(ql_dbg_init
, vha
, 0x018c,
8393 "Secondary image checksum failed\n");
8394 goto check_valid_image
;
8397 valid_sec_image
= true;
8399 if (sec_image_status
.image_status_mask
& 1) {
8400 ql_dbg(ql_dbg_init
, vha
, 0x018d,
8401 "Secondary image is active\n");
8402 active_sec_image
= true;
8406 if (valid_pri_image
&& active_pri_image
)
8407 active_regions
->global
= QLA27XX_PRIMARY_IMAGE
;
8409 if (valid_sec_image
&& active_sec_image
) {
8410 if (!active_regions
->global
||
8411 qla27xx_compare_image_generation(
8412 &pri_image_status
, &sec_image_status
) < 0) {
8413 active_regions
->global
= QLA27XX_SECONDARY_IMAGE
;
8417 ql_dbg(ql_dbg_init
, vha
, 0x018f, "active image %s (%u)\n",
8418 active_regions
->global
== QLA27XX_DEFAULT_IMAGE
?
8419 "default (boot/fw)" :
8420 active_regions
->global
== QLA27XX_PRIMARY_IMAGE
?
8422 active_regions
->global
== QLA27XX_SECONDARY_IMAGE
?
8423 "secondary" : "invalid",
8424 active_regions
->global
);
8427 bool qla24xx_risc_firmware_invalid(uint32_t *dword
)
8430 !(dword
[4] | dword
[5] | dword
[6] | dword
[7]) ||
8431 !(~dword
[4] | ~dword
[5] | ~dword
[6] | ~dword
[7]);
8435 qla24xx_load_risc_flash(scsi_qla_host_t
*vha
, uint32_t *srisc_addr
,
8439 uint templates
, segments
, fragment
;
8444 uint32_t risc_addr
, risc_size
, risc_attr
= 0;
8445 struct qla_hw_data
*ha
= vha
->hw
;
8446 struct req_que
*req
= ha
->req_q_map
[0];
8447 struct fwdt
*fwdt
= ha
->fwdt
;
8449 ql_dbg(ql_dbg_init
, vha
, 0x008b,
8450 "FW: Loading firmware from flash (%x).\n", faddr
);
8452 dcode
= (uint32_t *)req
->ring
;
8453 rval
= qla24xx_read_flash_data(vha
, dcode
, faddr
, 8);
8454 if (rval
|| qla24xx_risc_firmware_invalid(dcode
)) {
8455 ql_log(ql_log_fatal
, vha
, 0x008c,
8456 "Unable to verify the integrity of flash firmware image (rval %x).\n", rval
);
8457 ql_log(ql_log_fatal
, vha
, 0x008d,
8458 "Firmware data: %08x %08x %08x %08x.\n",
8459 dcode
[0], dcode
[1], dcode
[2], dcode
[3]);
8461 return QLA_FUNCTION_FAILED
;
8464 dcode
= (uint32_t *)req
->ring
;
8466 segments
= FA_RISC_CODE_SEGMENTS
;
8467 for (j
= 0; j
< segments
; j
++) {
8468 ql_dbg(ql_dbg_init
, vha
, 0x008d,
8469 "-> Loading segment %u...\n", j
);
8470 rval
= qla24xx_read_flash_data(vha
, dcode
, faddr
, 10);
8472 ql_log(ql_log_fatal
, vha
, 0x016a,
8473 "-> Unable to read segment addr + size .\n");
8474 return QLA_FUNCTION_FAILED
;
8476 risc_addr
= be32_to_cpu((__force __be32
)dcode
[2]);
8477 risc_size
= be32_to_cpu((__force __be32
)dcode
[3]);
8479 *srisc_addr
= risc_addr
;
8480 risc_attr
= be32_to_cpu((__force __be32
)dcode
[9]);
8483 dlen
= ha
->fw_transfer_size
>> 2;
8484 for (fragment
= 0; risc_size
; fragment
++) {
8485 if (dlen
> risc_size
)
8488 ql_dbg(ql_dbg_init
, vha
, 0x008e,
8489 "-> Loading fragment %u: %#x <- %#x (%#lx dwords)...\n",
8490 fragment
, risc_addr
, faddr
, dlen
);
8491 rval
= qla24xx_read_flash_data(vha
, dcode
, faddr
, dlen
);
8493 ql_log(ql_log_fatal
, vha
, 0x016b,
8494 "-> Unable to read fragment(faddr %#x dlen %#lx).\n",
8496 return QLA_FUNCTION_FAILED
;
8498 for (i
= 0; i
< dlen
; i
++)
8499 dcode
[i
] = swab32(dcode
[i
]);
8501 rval
= qla2x00_load_ram(vha
, req
->dma
, risc_addr
, dlen
);
8503 ql_log(ql_log_fatal
, vha
, 0x008f,
8504 "-> Failed load firmware fragment %u.\n",
8506 return QLA_FUNCTION_FAILED
;
8515 if (!IS_QLA27XX(ha
) && !IS_QLA28XX(ha
))
8518 templates
= (risc_attr
& BIT_9
) ? 2 : 1;
8519 ql_dbg(ql_dbg_init
, vha
, 0x0160, "-> templates = %u\n", templates
);
8520 for (j
= 0; j
< templates
; j
++, fwdt
++) {
8521 vfree(fwdt
->template);
8522 fwdt
->template = NULL
;
8525 dcode
= (uint32_t *)req
->ring
;
8527 rval
= qla24xx_read_flash_data(vha
, dcode
, faddr
, 7);
8529 ql_log(ql_log_fatal
, vha
, 0x016c,
8530 "-> Unable to read template size.\n");
8534 risc_size
= be32_to_cpu((__force __be32
)dcode
[2]);
8535 ql_dbg(ql_dbg_init
, vha
, 0x0161,
8536 "-> fwdt%u template array at %#x (%#x dwords)\n",
8537 j
, faddr
, risc_size
);
8538 if (!risc_size
|| !~risc_size
) {
8539 ql_dbg(ql_dbg_init
, vha
, 0x0162,
8540 "-> fwdt%u failed to read array\n", j
);
8544 /* skip header and ignore checksum */
8548 ql_dbg(ql_dbg_init
, vha
, 0x0163,
8549 "-> fwdt%u template allocate template %#x words...\n",
8551 fwdt
->template = vmalloc_array(risc_size
, sizeof(*dcode
));
8552 if (!fwdt
->template) {
8553 ql_log(ql_log_warn
, vha
, 0x0164,
8554 "-> fwdt%u failed allocate template.\n", j
);
8558 dcode
= fwdt
->template;
8559 rval
= qla24xx_read_flash_data(vha
, dcode
, faddr
, risc_size
);
8561 if (rval
|| !qla27xx_fwdt_template_valid(dcode
)) {
8562 ql_log(ql_log_warn
, vha
, 0x0165,
8563 "-> fwdt%u failed template validate (rval %x)\n",
8568 dlen
= qla27xx_fwdt_template_size(dcode
);
8569 ql_dbg(ql_dbg_init
, vha
, 0x0166,
8570 "-> fwdt%u template size %#lx bytes (%#lx words)\n",
8571 j
, dlen
, dlen
/ sizeof(*dcode
));
8572 if (dlen
> risc_size
* sizeof(*dcode
)) {
8573 ql_log(ql_log_warn
, vha
, 0x0167,
8574 "-> fwdt%u template exceeds array (%-lu bytes)\n",
8575 j
, dlen
- risc_size
* sizeof(*dcode
));
8579 fwdt
->length
= dlen
;
8580 ql_dbg(ql_dbg_init
, vha
, 0x0168,
8581 "-> fwdt%u loaded template ok\n", j
);
8583 faddr
+= risc_size
+ 1;
8589 vfree(fwdt
->template);
8590 fwdt
->template = NULL
;
8596 #define QLA_FW_URL "http://ldriver.qlogic.com/firmware/"
8599 qla2x00_load_risc(scsi_qla_host_t
*vha
, uint32_t *srisc_addr
)
8605 uint32_t risc_addr
, risc_size
, fwclen
, wlen
, *seg
;
8606 struct fw_blob
*blob
;
8607 struct qla_hw_data
*ha
= vha
->hw
;
8608 struct req_que
*req
= ha
->req_q_map
[0];
8610 /* Load firmware blob. */
8611 blob
= qla2x00_request_firmware(vha
);
8613 ql_log(ql_log_info
, vha
, 0x0083,
8614 "Firmware image unavailable.\n");
8615 ql_log(ql_log_info
, vha
, 0x0084,
8616 "Firmware images can be retrieved from: "QLA_FW_URL
".\n");
8617 return QLA_FUNCTION_FAILED
;
8622 wcode
= (uint16_t *)req
->ring
;
8624 fwcode
= (__force __be16
*)blob
->fw
->data
;
8627 /* Validate firmware image by checking version. */
8628 if (blob
->fw
->size
< 8 * sizeof(uint16_t)) {
8629 ql_log(ql_log_fatal
, vha
, 0x0085,
8630 "Unable to verify integrity of firmware image (%zd).\n",
8632 goto fail_fw_integrity
;
8634 for (i
= 0; i
< 4; i
++)
8635 wcode
[i
] = be16_to_cpu(fwcode
[i
+ 4]);
8636 if ((wcode
[0] == 0xffff && wcode
[1] == 0xffff && wcode
[2] == 0xffff &&
8637 wcode
[3] == 0xffff) || (wcode
[0] == 0 && wcode
[1] == 0 &&
8638 wcode
[2] == 0 && wcode
[3] == 0)) {
8639 ql_log(ql_log_fatal
, vha
, 0x0086,
8640 "Unable to verify integrity of firmware image.\n");
8641 ql_log(ql_log_fatal
, vha
, 0x0087,
8642 "Firmware data: %04x %04x %04x %04x.\n",
8643 wcode
[0], wcode
[1], wcode
[2], wcode
[3]);
8644 goto fail_fw_integrity
;
8648 while (*seg
&& rval
== QLA_SUCCESS
) {
8650 *srisc_addr
= *srisc_addr
== 0 ? *seg
: *srisc_addr
;
8651 risc_size
= be16_to_cpu(fwcode
[3]);
8653 /* Validate firmware image size. */
8654 fwclen
+= risc_size
* sizeof(uint16_t);
8655 if (blob
->fw
->size
< fwclen
) {
8656 ql_log(ql_log_fatal
, vha
, 0x0088,
8657 "Unable to verify integrity of firmware image "
8658 "(%zd).\n", blob
->fw
->size
);
8659 goto fail_fw_integrity
;
8663 while (risc_size
> 0 && rval
== QLA_SUCCESS
) {
8664 wlen
= (uint16_t)(ha
->fw_transfer_size
>> 1);
8665 if (wlen
> risc_size
)
8667 ql_dbg(ql_dbg_init
, vha
, 0x0089,
8668 "Loading risc segment@ risc addr %x number of "
8669 "words 0x%x.\n", risc_addr
, wlen
);
8671 for (i
= 0; i
< wlen
; i
++)
8672 wcode
[i
] = swab16((__force u32
)fwcode
[i
]);
8674 rval
= qla2x00_load_ram(vha
, req
->dma
, risc_addr
,
8677 ql_log(ql_log_fatal
, vha
, 0x008a,
8678 "Failed to load segment %d of firmware.\n",
8695 return QLA_FUNCTION_FAILED
;
8699 qla24xx_load_risc_blob(scsi_qla_host_t
*vha
, uint32_t *srisc_addr
)
8702 uint templates
, segments
, fragment
;
8705 uint32_t risc_addr
, risc_size
, risc_attr
= 0;
8708 struct fw_blob
*blob
;
8710 struct qla_hw_data
*ha
= vha
->hw
;
8711 struct req_que
*req
= ha
->req_q_map
[0];
8712 struct fwdt
*fwdt
= ha
->fwdt
;
8714 ql_dbg(ql_dbg_init
, vha
, 0x0090,
8715 "-> FW: Loading via request-firmware.\n");
8717 blob
= qla2x00_request_firmware(vha
);
8719 ql_log(ql_log_warn
, vha
, 0x0092,
8720 "-> Firmware file not found.\n");
8722 return QLA_FUNCTION_FAILED
;
8725 fwcode
= (__force __be32
*)blob
->fw
->data
;
8726 dcode
= (__force
uint32_t *)fwcode
;
8727 if (qla24xx_risc_firmware_invalid(dcode
)) {
8728 ql_log(ql_log_fatal
, vha
, 0x0093,
8729 "Unable to verify integrity of firmware image (%zd).\n",
8731 ql_log(ql_log_fatal
, vha
, 0x0095,
8732 "Firmware data: %08x %08x %08x %08x.\n",
8733 dcode
[0], dcode
[1], dcode
[2], dcode
[3]);
8734 return QLA_FUNCTION_FAILED
;
8737 dcode
= (uint32_t *)req
->ring
;
8739 segments
= FA_RISC_CODE_SEGMENTS
;
8740 for (j
= 0; j
< segments
; j
++) {
8741 ql_dbg(ql_dbg_init
, vha
, 0x0096,
8742 "-> Loading segment %u...\n", j
);
8743 risc_addr
= be32_to_cpu(fwcode
[2]);
8744 risc_size
= be32_to_cpu(fwcode
[3]);
8747 *srisc_addr
= risc_addr
;
8748 risc_attr
= be32_to_cpu(fwcode
[9]);
8751 dlen
= ha
->fw_transfer_size
>> 2;
8752 for (fragment
= 0; risc_size
; fragment
++) {
8753 if (dlen
> risc_size
)
8756 ql_dbg(ql_dbg_init
, vha
, 0x0097,
8757 "-> Loading fragment %u: %#x <- %#x (%#lx words)...\n",
8758 fragment
, risc_addr
,
8759 (uint32_t)(fwcode
- (typeof(fwcode
))blob
->fw
->data
),
8762 for (i
= 0; i
< dlen
; i
++)
8763 dcode
[i
] = swab32((__force u32
)fwcode
[i
]);
8765 rval
= qla2x00_load_ram(vha
, req
->dma
, risc_addr
, dlen
);
8767 ql_log(ql_log_fatal
, vha
, 0x0098,
8768 "-> Failed load firmware fragment %u.\n",
8770 return QLA_FUNCTION_FAILED
;
8779 if (!IS_QLA27XX(ha
) && !IS_QLA28XX(ha
))
8782 templates
= (risc_attr
& BIT_9
) ? 2 : 1;
8783 ql_dbg(ql_dbg_init
, vha
, 0x0170, "-> templates = %u\n", templates
);
8784 for (j
= 0; j
< templates
; j
++, fwdt
++) {
8785 vfree(fwdt
->template);
8786 fwdt
->template = NULL
;
8789 risc_size
= be32_to_cpu(fwcode
[2]);
8790 ql_dbg(ql_dbg_init
, vha
, 0x0171,
8791 "-> fwdt%u template array at %#x (%#x dwords)\n",
8792 j
, (uint32_t)((void *)fwcode
- (void *)blob
->fw
->data
),
8794 if (!risc_size
|| !~risc_size
) {
8795 ql_dbg(ql_dbg_init
, vha
, 0x0172,
8796 "-> fwdt%u failed to read array\n", j
);
8800 /* skip header and ignore checksum */
8804 ql_dbg(ql_dbg_init
, vha
, 0x0173,
8805 "-> fwdt%u template allocate template %#x words...\n",
8807 fwdt
->template = vmalloc_array(risc_size
, sizeof(*dcode
));
8808 if (!fwdt
->template) {
8809 ql_log(ql_log_warn
, vha
, 0x0174,
8810 "-> fwdt%u failed allocate template.\n", j
);
8814 dcode
= fwdt
->template;
8815 for (i
= 0; i
< risc_size
; i
++)
8816 dcode
[i
] = (__force u32
)fwcode
[i
];
8818 if (!qla27xx_fwdt_template_valid(dcode
)) {
8819 ql_log(ql_log_warn
, vha
, 0x0175,
8820 "-> fwdt%u failed template validate\n", j
);
8824 dlen
= qla27xx_fwdt_template_size(dcode
);
8825 ql_dbg(ql_dbg_init
, vha
, 0x0176,
8826 "-> fwdt%u template size %#lx bytes (%#lx words)\n",
8827 j
, dlen
, dlen
/ sizeof(*dcode
));
8828 if (dlen
> risc_size
* sizeof(*dcode
)) {
8829 ql_log(ql_log_warn
, vha
, 0x0177,
8830 "-> fwdt%u template exceeds array (%-lu bytes)\n",
8831 j
, dlen
- risc_size
* sizeof(*dcode
));
8835 fwdt
->length
= dlen
;
8836 ql_dbg(ql_dbg_init
, vha
, 0x0178,
8837 "-> fwdt%u loaded template ok\n", j
);
8839 fwcode
+= risc_size
+ 1;
8845 vfree(fwdt
->template);
8846 fwdt
->template = NULL
;
8853 qla24xx_load_risc(scsi_qla_host_t
*vha
, uint32_t *srisc_addr
)
8857 if (ql2xfwloadbin
== 1)
8858 return qla81xx_load_risc(vha
, srisc_addr
);
8862 * 1) Firmware via request-firmware interface (.bin file).
8863 * 2) Firmware residing in flash.
8865 rval
= qla24xx_load_risc_blob(vha
, srisc_addr
);
8866 if (rval
== QLA_SUCCESS
)
8869 return qla24xx_load_risc_flash(vha
, srisc_addr
,
8870 vha
->hw
->flt_region_fw
);
8874 qla81xx_load_risc(scsi_qla_host_t
*vha
, uint32_t *srisc_addr
)
8877 struct qla_hw_data
*ha
= vha
->hw
;
8878 struct active_regions active_regions
= { };
8880 if (ql2xfwloadbin
== 2)
8883 /* FW Load priority:
8884 * 1) Firmware residing in flash.
8885 * 2) Firmware via request-firmware interface (.bin file).
8886 * 3) Golden-Firmware residing in flash -- (limited operation).
8889 if (!IS_QLA27XX(ha
) && !IS_QLA28XX(ha
))
8890 goto try_primary_fw
;
8892 qla27xx_get_active_image(vha
, &active_regions
);
8894 if (active_regions
.global
!= QLA27XX_SECONDARY_IMAGE
)
8895 goto try_primary_fw
;
8897 ql_dbg(ql_dbg_init
, vha
, 0x008b,
8898 "Loading secondary firmware image.\n");
8899 rval
= qla24xx_load_risc_flash(vha
, srisc_addr
, ha
->flt_region_fw_sec
);
8904 ql_dbg(ql_dbg_init
, vha
, 0x008b,
8905 "Loading primary firmware image.\n");
8906 rval
= qla24xx_load_risc_flash(vha
, srisc_addr
, ha
->flt_region_fw
);
8911 rval
= qla24xx_load_risc_blob(vha
, srisc_addr
);
8912 if (!rval
|| !ha
->flt_region_gold_fw
)
8915 ql_log(ql_log_info
, vha
, 0x0099,
8916 "Attempting to fallback to golden firmware.\n");
8917 rval
= qla24xx_load_risc_flash(vha
, srisc_addr
, ha
->flt_region_gold_fw
);
8921 ql_log(ql_log_info
, vha
, 0x009a, "Need firmware flash update.\n");
8922 ha
->flags
.running_gold_fw
= 1;
8927 qla2x00_try_to_stop_firmware(scsi_qla_host_t
*vha
)
8930 struct qla_hw_data
*ha
= vha
->hw
;
8932 if (ha
->flags
.pci_channel_io_perm_failure
)
8934 if (!IS_FWI2_CAPABLE(ha
))
8936 if (!ha
->fw_major_version
)
8938 if (!ha
->flags
.fw_started
)
8941 ret
= qla2x00_stop_firmware(vha
);
8942 for (retries
= 5; ret
!= QLA_SUCCESS
&& ret
!= QLA_FUNCTION_TIMEOUT
&&
8943 ret
!= QLA_INVALID_COMMAND
&& retries
; retries
--) {
8944 ha
->isp_ops
->reset_chip(vha
);
8945 if (ha
->isp_ops
->chip_diag(vha
) != QLA_SUCCESS
)
8947 if (qla2x00_setup_chip(vha
) != QLA_SUCCESS
)
8949 ql_log(ql_log_info
, vha
, 0x8015,
8950 "Attempting retry of stop-firmware command.\n");
8951 ret
= qla2x00_stop_firmware(vha
);
8955 ha
->flags
.fw_init_done
= 0;
8959 qla24xx_configure_vhba(scsi_qla_host_t
*vha
)
8961 int rval
= QLA_SUCCESS
;
8963 uint16_t mb
[MAILBOX_REGISTER_COUNT
];
8964 struct qla_hw_data
*ha
= vha
->hw
;
8965 struct scsi_qla_host
*base_vha
= pci_get_drvdata(ha
->pdev
);
8970 rval
= qla2x00_fw_ready(base_vha
);
8972 if (rval
== QLA_SUCCESS
) {
8973 clear_bit(RESET_MARKER_NEEDED
, &vha
->dpc_flags
);
8974 qla2x00_marker(vha
, ha
->base_qpair
, 0, 0, MK_SYNC_ALL
);
8977 vha
->flags
.management_server_logged_in
= 0;
8979 /* Login to SNS first */
8980 rval2
= ha
->isp_ops
->fabric_login(vha
, NPH_SNS
, 0xff, 0xff, 0xfc, mb
,
8982 if (rval2
!= QLA_SUCCESS
|| mb
[0] != MBS_COMMAND_COMPLETE
) {
8983 if (rval2
== QLA_MEMORY_ALLOC_FAILED
)
8984 ql_dbg(ql_dbg_init
, vha
, 0x0120,
8985 "Failed SNS login: loop_id=%x, rval2=%d\n",
8988 ql_dbg(ql_dbg_init
, vha
, 0x0103,
8989 "Failed SNS login: loop_id=%x mb[0]=%x mb[1]=%x "
8990 "mb[2]=%x mb[6]=%x mb[7]=%x.\n",
8991 NPH_SNS
, mb
[0], mb
[1], mb
[2], mb
[6], mb
[7]);
8992 return (QLA_FUNCTION_FAILED
);
8995 atomic_set(&vha
->loop_down_timer
, 0);
8996 atomic_set(&vha
->loop_state
, LOOP_UP
);
8997 set_bit(LOOP_RESYNC_NEEDED
, &vha
->dpc_flags
);
8998 set_bit(LOCAL_LOOP_UPDATE
, &vha
->dpc_flags
);
8999 rval
= qla2x00_loop_resync(base_vha
);
9004 /* 84XX Support **************************************************************/
9006 static LIST_HEAD(qla_cs84xx_list
);
9007 static DEFINE_MUTEX(qla_cs84xx_mutex
);
9009 static struct qla_chip_state_84xx
*
9010 qla84xx_get_chip(struct scsi_qla_host
*vha
)
9012 struct qla_chip_state_84xx
*cs84xx
;
9013 struct qla_hw_data
*ha
= vha
->hw
;
9015 mutex_lock(&qla_cs84xx_mutex
);
9017 /* Find any shared 84xx chip. */
9018 list_for_each_entry(cs84xx
, &qla_cs84xx_list
, list
) {
9019 if (cs84xx
->bus
== ha
->pdev
->bus
) {
9020 kref_get(&cs84xx
->kref
);
9025 cs84xx
= kzalloc(sizeof(*cs84xx
), GFP_KERNEL
);
9029 kref_init(&cs84xx
->kref
);
9030 spin_lock_init(&cs84xx
->access_lock
);
9031 mutex_init(&cs84xx
->fw_update_mutex
);
9032 cs84xx
->bus
= ha
->pdev
->bus
;
9034 list_add_tail(&cs84xx
->list
, &qla_cs84xx_list
);
9036 mutex_unlock(&qla_cs84xx_mutex
);
9041 __qla84xx_chip_release(struct kref
*kref
)
9043 struct qla_chip_state_84xx
*cs84xx
=
9044 container_of(kref
, struct qla_chip_state_84xx
, kref
);
9046 mutex_lock(&qla_cs84xx_mutex
);
9047 list_del(&cs84xx
->list
);
9048 mutex_unlock(&qla_cs84xx_mutex
);
9053 qla84xx_put_chip(struct scsi_qla_host
*vha
)
9055 struct qla_hw_data
*ha
= vha
->hw
;
9058 kref_put(&ha
->cs84xx
->kref
, __qla84xx_chip_release
);
9062 qla84xx_init_chip(scsi_qla_host_t
*vha
)
9066 struct qla_hw_data
*ha
= vha
->hw
;
9068 mutex_lock(&ha
->cs84xx
->fw_update_mutex
);
9070 rval
= qla84xx_verify_chip(vha
, status
);
9072 mutex_unlock(&ha
->cs84xx
->fw_update_mutex
);
9074 return rval
!= QLA_SUCCESS
|| status
[0] ? QLA_FUNCTION_FAILED
:
9078 /* 81XX Support **************************************************************/
9081 qla81xx_nvram_config(scsi_qla_host_t
*vha
)
9084 struct init_cb_81xx
*icb
;
9085 struct nvram_81xx
*nv
;
9087 uint8_t *dptr1
, *dptr2
;
9090 struct qla_hw_data
*ha
= vha
->hw
;
9092 struct active_regions active_regions
= { };
9095 icb
= (struct init_cb_81xx
*)ha
->init_cb
;
9098 /* Determine NVRAM starting address. */
9099 ha
->nvram_size
= sizeof(*nv
);
9100 ha
->vpd_size
= FA_NVRAM_VPD_SIZE
;
9101 if (IS_P3P_TYPE(ha
) || IS_QLA8031(ha
))
9102 ha
->vpd_size
= FA_VPD_SIZE_82XX
;
9104 if (IS_QLA28XX(ha
) || IS_QLA27XX(ha
))
9105 qla28xx_get_aux_images(vha
, &active_regions
);
9107 /* Get VPD data into cache */
9108 ha
->vpd
= ha
->nvram
+ VPD_OFFSET
;
9110 faddr
= ha
->flt_region_vpd
;
9111 if (IS_QLA28XX(ha
)) {
9112 if (active_regions
.aux
.vpd_nvram
== QLA27XX_SECONDARY_IMAGE
)
9113 faddr
= ha
->flt_region_vpd_sec
;
9114 ql_dbg(ql_dbg_init
, vha
, 0x0110,
9115 "Loading %s nvram image.\n",
9116 active_regions
.aux
.vpd_nvram
== QLA27XX_PRIMARY_IMAGE
?
9117 "primary" : "secondary");
9119 ha
->isp_ops
->read_optrom(vha
, ha
->vpd
, faddr
<< 2, ha
->vpd_size
);
9121 /* Get NVRAM data into cache and calculate checksum. */
9122 faddr
= ha
->flt_region_nvram
;
9123 if (IS_QLA28XX(ha
)) {
9124 if (active_regions
.aux
.vpd_nvram
== QLA27XX_SECONDARY_IMAGE
)
9125 faddr
= ha
->flt_region_nvram_sec
;
9127 ql_dbg(ql_dbg_init
, vha
, 0x0110,
9128 "Loading %s nvram image.\n",
9129 active_regions
.aux
.vpd_nvram
== QLA27XX_PRIMARY_IMAGE
?
9130 "primary" : "secondary");
9131 ha
->isp_ops
->read_optrom(vha
, ha
->nvram
, faddr
<< 2, ha
->nvram_size
);
9133 dptr
= (__force __le32
*)nv
;
9134 for (cnt
= 0, chksum
= 0; cnt
< ha
->nvram_size
>> 2; cnt
++, dptr
++)
9135 chksum
+= le32_to_cpu(*dptr
);
9137 ql_dbg(ql_dbg_init
+ ql_dbg_buffer
, vha
, 0x0111,
9138 "Contents of NVRAM:\n");
9139 ql_dump_buffer(ql_dbg_init
+ ql_dbg_buffer
, vha
, 0x0112,
9140 nv
, ha
->nvram_size
);
9142 /* Bad NVRAM data, set defaults parameters. */
9143 if (chksum
|| memcmp("ISP ", nv
->id
, sizeof(nv
->id
)) ||
9144 le16_to_cpu(nv
->nvram_version
) < ICB_VERSION
) {
9145 /* Reset NVRAM data. */
9146 ql_log(ql_log_info
, vha
, 0x0073,
9147 "Inconsistent NVRAM checksum=%#x id=%.4s version=%#x.\n",
9148 chksum
, nv
->id
, le16_to_cpu(nv
->nvram_version
));
9149 ql_dump_buffer(ql_dbg_init
, vha
, 0x0073, nv
, sizeof(*nv
));
9150 ql_log(ql_log_info
, vha
, 0x0074,
9151 "Falling back to functioning (yet invalid -- WWPN) "
9155 * Set default initialization control block.
9157 memset(nv
, 0, ha
->nvram_size
);
9158 nv
->nvram_version
= cpu_to_le16(ICB_VERSION
);
9159 nv
->version
= cpu_to_le16(ICB_VERSION
);
9160 nv
->frame_payload_size
= cpu_to_le16(2048);
9161 nv
->execution_throttle
= cpu_to_le16(0xFFFF);
9162 nv
->exchange_count
= cpu_to_le16(0);
9163 nv
->port_name
[0] = 0x21;
9164 nv
->port_name
[1] = 0x00 + ha
->port_no
+ 1;
9165 nv
->port_name
[2] = 0x00;
9166 nv
->port_name
[3] = 0xe0;
9167 nv
->port_name
[4] = 0x8b;
9168 nv
->port_name
[5] = 0x1c;
9169 nv
->port_name
[6] = 0x55;
9170 nv
->port_name
[7] = 0x86;
9171 nv
->node_name
[0] = 0x20;
9172 nv
->node_name
[1] = 0x00;
9173 nv
->node_name
[2] = 0x00;
9174 nv
->node_name
[3] = 0xe0;
9175 nv
->node_name
[4] = 0x8b;
9176 nv
->node_name
[5] = 0x1c;
9177 nv
->node_name
[6] = 0x55;
9178 nv
->node_name
[7] = 0x86;
9179 nv
->login_retry_count
= cpu_to_le16(8);
9180 nv
->interrupt_delay_timer
= cpu_to_le16(0);
9181 nv
->login_timeout
= cpu_to_le16(0);
9182 nv
->firmware_options_1
=
9183 cpu_to_le32(BIT_14
|BIT_13
|BIT_2
|BIT_1
);
9184 nv
->firmware_options_2
= cpu_to_le32(2 << 4);
9185 nv
->firmware_options_2
|= cpu_to_le32(BIT_12
);
9186 nv
->firmware_options_3
= cpu_to_le32(2 << 13);
9187 nv
->host_p
= cpu_to_le32(BIT_11
|BIT_10
);
9188 nv
->efi_parameters
= cpu_to_le32(0);
9189 nv
->reset_delay
= 5;
9190 nv
->max_luns_per_target
= cpu_to_le16(128);
9191 nv
->port_down_retry_count
= cpu_to_le16(30);
9192 nv
->link_down_timeout
= cpu_to_le16(180);
9193 nv
->enode_mac
[0] = 0x00;
9194 nv
->enode_mac
[1] = 0xC0;
9195 nv
->enode_mac
[2] = 0xDD;
9196 nv
->enode_mac
[3] = 0x04;
9197 nv
->enode_mac
[4] = 0x05;
9198 nv
->enode_mac
[5] = 0x06 + ha
->port_no
+ 1;
9203 if (IS_T10_PI_CAPABLE(ha
))
9204 nv
->frame_payload_size
&= cpu_to_le16(~7);
9206 qlt_81xx_config_nvram_stage1(vha
, nv
);
9208 /* Reset Initialization control block */
9209 memset(icb
, 0, ha
->init_cb_size
);
9211 /* Copy 1st segment. */
9212 dptr1
= (uint8_t *)icb
;
9213 dptr2
= (uint8_t *)&nv
->version
;
9214 cnt
= (uint8_t *)&icb
->response_q_inpointer
- (uint8_t *)&icb
->version
;
9216 *dptr1
++ = *dptr2
++;
9218 icb
->login_retry_count
= nv
->login_retry_count
;
9220 /* Copy 2nd segment. */
9221 dptr1
= (uint8_t *)&icb
->interrupt_delay_timer
;
9222 dptr2
= (uint8_t *)&nv
->interrupt_delay_timer
;
9223 cnt
= (uint8_t *)&icb
->reserved_5
-
9224 (uint8_t *)&icb
->interrupt_delay_timer
;
9226 *dptr1
++ = *dptr2
++;
9228 memcpy(icb
->enode_mac
, nv
->enode_mac
, sizeof(icb
->enode_mac
));
9229 /* Some boards (with valid NVRAMs) still have NULL enode_mac!! */
9230 if (!memcmp(icb
->enode_mac
, "\0\0\0\0\0\0", sizeof(icb
->enode_mac
))) {
9231 icb
->enode_mac
[0] = 0x00;
9232 icb
->enode_mac
[1] = 0xC0;
9233 icb
->enode_mac
[2] = 0xDD;
9234 icb
->enode_mac
[3] = 0x04;
9235 icb
->enode_mac
[4] = 0x05;
9236 icb
->enode_mac
[5] = 0x06 + ha
->port_no
+ 1;
9239 /* Use extended-initialization control block. */
9240 memcpy(ha
->ex_init_cb
, &nv
->ex_version
, sizeof(*ha
->ex_init_cb
));
9241 ha
->frame_payload_size
= le16_to_cpu(icb
->frame_payload_size
);
9243 * Setup driver NVRAM options.
9245 qla2x00_set_model_info(vha
, nv
->model_name
, sizeof(nv
->model_name
),
9248 qlt_81xx_config_nvram_stage2(vha
, icb
);
9250 /* Use alternate WWN? */
9251 if (nv
->host_p
& cpu_to_le32(BIT_15
)) {
9252 memcpy(icb
->node_name
, nv
->alternate_node_name
, WWN_SIZE
);
9253 memcpy(icb
->port_name
, nv
->alternate_port_name
, WWN_SIZE
);
9256 /* Prepare nodename */
9257 if ((icb
->firmware_options_1
& cpu_to_le32(BIT_14
)) == 0) {
9259 * Firmware will apply the following mask if the nodename was
9262 memcpy(icb
->node_name
, icb
->port_name
, WWN_SIZE
);
9263 icb
->node_name
[0] &= 0xF0;
9266 if (IS_QLA28XX(ha
) || IS_QLA27XX(ha
)) {
9267 if ((nv
->enhanced_features
& BIT_7
) == 0)
9268 ha
->flags
.scm_supported_a
= 1;
9271 /* Set host adapter parameters. */
9272 ha
->flags
.disable_risc_code_load
= 0;
9273 ha
->flags
.enable_lip_reset
= 0;
9274 ha
->flags
.enable_lip_full_login
=
9275 le32_to_cpu(nv
->host_p
) & BIT_10
? 1 : 0;
9276 ha
->flags
.enable_target_reset
=
9277 le32_to_cpu(nv
->host_p
) & BIT_11
? 1 : 0;
9278 ha
->flags
.enable_led_scheme
= 0;
9279 ha
->flags
.disable_serdes
= le32_to_cpu(nv
->host_p
) & BIT_5
? 1 : 0;
9281 ha
->operating_mode
= (le32_to_cpu(icb
->firmware_options_2
) &
9282 (BIT_6
| BIT_5
| BIT_4
)) >> 4;
9284 /* save HBA serial number */
9285 ha
->serial0
= icb
->port_name
[5];
9286 ha
->serial1
= icb
->port_name
[6];
9287 ha
->serial2
= icb
->port_name
[7];
9288 memcpy(vha
->node_name
, icb
->node_name
, WWN_SIZE
);
9289 memcpy(vha
->port_name
, icb
->port_name
, WWN_SIZE
);
9291 icb
->execution_throttle
= cpu_to_le16(0xFFFF);
9293 ha
->retry_count
= le16_to_cpu(nv
->login_retry_count
);
9295 /* Set minimum login_timeout to 4 seconds. */
9296 if (le16_to_cpu(nv
->login_timeout
) < ql2xlogintimeout
)
9297 nv
->login_timeout
= cpu_to_le16(ql2xlogintimeout
);
9298 if (le16_to_cpu(nv
->login_timeout
) < 4)
9299 nv
->login_timeout
= cpu_to_le16(4);
9300 ha
->login_timeout
= le16_to_cpu(nv
->login_timeout
);
9302 /* Set minimum RATOV to 100 tenths of a second. */
9305 ha
->loop_reset_delay
= nv
->reset_delay
;
9307 /* Link Down Timeout = 0:
9309 * When Port Down timer expires we will start returning
9310 * I/O's to OS with "DID_NO_CONNECT".
9312 * Link Down Timeout != 0:
9314 * The driver waits for the link to come up after link down
9315 * before returning I/Os to OS with "DID_NO_CONNECT".
9317 if (le16_to_cpu(nv
->link_down_timeout
) == 0) {
9318 ha
->loop_down_abort_time
=
9319 (LOOP_DOWN_TIME
- LOOP_DOWN_TIMEOUT
);
9321 ha
->link_down_timeout
= le16_to_cpu(nv
->link_down_timeout
);
9322 ha
->loop_down_abort_time
=
9323 (LOOP_DOWN_TIME
- ha
->link_down_timeout
);
9326 /* Need enough time to try and get the port back. */
9327 ha
->port_down_retry_count
= le16_to_cpu(nv
->port_down_retry_count
);
9328 if (qlport_down_retry
)
9329 ha
->port_down_retry_count
= qlport_down_retry
;
9331 /* Set login_retry_count */
9332 ha
->login_retry_count
= le16_to_cpu(nv
->login_retry_count
);
9333 if (ha
->port_down_retry_count
==
9334 le16_to_cpu(nv
->port_down_retry_count
) &&
9335 ha
->port_down_retry_count
> 3)
9336 ha
->login_retry_count
= ha
->port_down_retry_count
;
9337 else if (ha
->port_down_retry_count
> (int)ha
->login_retry_count
)
9338 ha
->login_retry_count
= ha
->port_down_retry_count
;
9339 if (ql2xloginretrycount
)
9340 ha
->login_retry_count
= ql2xloginretrycount
;
9342 /* if not running MSI-X we need handshaking on interrupts */
9343 if (!vha
->hw
->flags
.msix_enabled
&&
9344 (IS_QLA83XX(ha
) || IS_QLA27XX(ha
) || IS_QLA28XX(ha
)))
9345 icb
->firmware_options_2
|= cpu_to_le32(BIT_22
);
9348 if (!vha
->flags
.init_done
) {
9349 ha
->zio_mode
= le32_to_cpu(icb
->firmware_options_2
) &
9350 (BIT_3
| BIT_2
| BIT_1
| BIT_0
);
9351 ha
->zio_timer
= le16_to_cpu(icb
->interrupt_delay_timer
) ?
9352 le16_to_cpu(icb
->interrupt_delay_timer
) : 2;
9354 icb
->firmware_options_2
&= cpu_to_le32(
9355 ~(BIT_3
| BIT_2
| BIT_1
| BIT_0
));
9356 vha
->flags
.process_response_queue
= 0;
9357 if (ha
->zio_mode
!= QLA_ZIO_DISABLED
) {
9358 ha
->zio_mode
= QLA_ZIO_MODE_6
;
9360 ql_log(ql_log_info
, vha
, 0x0075,
9361 "ZIO mode %d enabled; timer delay (%d us).\n",
9363 ha
->zio_timer
* 100);
9365 icb
->firmware_options_2
|= cpu_to_le32(
9366 (uint32_t)ha
->zio_mode
);
9367 icb
->interrupt_delay_timer
= cpu_to_le16(ha
->zio_timer
);
9368 vha
->flags
.process_response_queue
= 1;
9371 /* enable RIDA Format2 */
9372 icb
->firmware_options_3
|= cpu_to_le32(BIT_0
);
9374 /* N2N: driver will initiate Login instead of FW */
9375 icb
->firmware_options_3
|= cpu_to_le32(BIT_8
);
9377 /* Determine NVMe/FCP priority for target ports */
9378 ha
->fc4_type_priority
= qla2xxx_get_fc4_priority(vha
);
9381 ql_log(ql_log_warn
, vha
, 0x0076,
9382 "NVRAM configuration failed.\n");
9388 qla82xx_restart_isp(scsi_qla_host_t
*vha
)
9391 struct qla_hw_data
*ha
= vha
->hw
;
9392 struct scsi_qla_host
*vp
, *tvp
;
9393 unsigned long flags
;
9395 status
= qla2x00_init_rings(vha
);
9397 clear_bit(RESET_MARKER_NEEDED
, &vha
->dpc_flags
);
9398 ha
->flags
.chip_reset_done
= 1;
9400 status
= qla2x00_fw_ready(vha
);
9402 /* Issue a marker after FW becomes ready. */
9403 qla2x00_marker(vha
, ha
->base_qpair
, 0, 0, MK_SYNC_ALL
);
9404 vha
->flags
.online
= 1;
9405 set_bit(LOOP_RESYNC_NEEDED
, &vha
->dpc_flags
);
9408 /* if no cable then assume it's good */
9409 if ((vha
->device_flags
& DFLG_NO_CABLE
))
9414 clear_bit(RESET_MARKER_NEEDED
, &vha
->dpc_flags
);
9416 if (!atomic_read(&vha
->loop_down_timer
)) {
9418 * Issue marker command only when we are going
9419 * to start the I/O .
9421 vha
->marker_needed
= 1;
9424 ha
->isp_ops
->enable_intrs(ha
);
9426 ha
->isp_abort_cnt
= 0;
9427 clear_bit(ISP_ABORT_RETRY
, &vha
->dpc_flags
);
9429 /* Update the firmware version */
9430 status
= qla82xx_check_md_needed(vha
);
9433 ha
->flags
.fce_enabled
= 1;
9435 fce_calc_size(ha
->fce_bufs
));
9436 rval
= qla2x00_enable_fce_trace(vha
,
9437 ha
->fce_dma
, ha
->fce_bufs
, ha
->fce_mb
,
9440 ql_log(ql_log_warn
, vha
, 0x8001,
9441 "Unable to reinitialize FCE (%d).\n",
9443 ha
->flags
.fce_enabled
= 0;
9448 memset(ha
->eft
, 0, EFT_SIZE
);
9449 rval
= qla2x00_enable_eft_trace(vha
,
9450 ha
->eft_dma
, EFT_NUM_BUFFERS
);
9452 ql_log(ql_log_warn
, vha
, 0x8010,
9453 "Unable to reinitialize EFT (%d).\n",
9460 ql_dbg(ql_dbg_taskm
, vha
, 0x8011,
9461 "qla82xx_restart_isp succeeded.\n");
9463 spin_lock_irqsave(&ha
->vport_slock
, flags
);
9464 list_for_each_entry_safe(vp
, tvp
, &ha
->vp_list
, list
) {
9466 atomic_inc(&vp
->vref_count
);
9467 spin_unlock_irqrestore(&ha
->vport_slock
, flags
);
9469 qla2x00_vp_abort_isp(vp
);
9471 spin_lock_irqsave(&ha
->vport_slock
, flags
);
9472 atomic_dec(&vp
->vref_count
);
9475 spin_unlock_irqrestore(&ha
->vport_slock
, flags
);
9478 ql_log(ql_log_warn
, vha
, 0x8016,
9479 "qla82xx_restart_isp **** FAILED ****.\n");
9486 * qla24xx_get_fcp_prio
9487 * Gets the fcp cmd priority value for the logged in port.
9488 * Looks for a match of the port descriptors within
9489 * each of the fcp prio config entries. If a match is found,
9490 * the tag (priority) value is returned.
9493 * vha = scsi host structure pointer.
9494 * fcport = port structure pointer.
9497 * non-zero (if found)
9504 qla24xx_get_fcp_prio(scsi_qla_host_t
*vha
, fc_port_t
*fcport
)
9507 uint8_t pid_match
, wwn_match
;
9509 uint32_t pid1
, pid2
;
9510 uint64_t wwn1
, wwn2
;
9511 struct qla_fcp_prio_entry
*pri_entry
;
9512 struct qla_hw_data
*ha
= vha
->hw
;
9514 if (!ha
->fcp_prio_cfg
|| !ha
->flags
.fcp_prio_enabled
)
9518 entries
= ha
->fcp_prio_cfg
->num_entries
;
9519 pri_entry
= &ha
->fcp_prio_cfg
->entry
[0];
9521 for (i
= 0; i
< entries
; i
++) {
9522 pid_match
= wwn_match
= 0;
9524 if (!(pri_entry
->flags
& FCP_PRIO_ENTRY_VALID
)) {
9529 /* check source pid for a match */
9530 if (pri_entry
->flags
& FCP_PRIO_ENTRY_SPID_VALID
) {
9531 pid1
= pri_entry
->src_pid
& INVALID_PORT_ID
;
9532 pid2
= vha
->d_id
.b24
& INVALID_PORT_ID
;
9533 if (pid1
== INVALID_PORT_ID
)
9535 else if (pid1
== pid2
)
9539 /* check destination pid for a match */
9540 if (pri_entry
->flags
& FCP_PRIO_ENTRY_DPID_VALID
) {
9541 pid1
= pri_entry
->dst_pid
& INVALID_PORT_ID
;
9542 pid2
= fcport
->d_id
.b24
& INVALID_PORT_ID
;
9543 if (pid1
== INVALID_PORT_ID
)
9545 else if (pid1
== pid2
)
9549 /* check source WWN for a match */
9550 if (pri_entry
->flags
& FCP_PRIO_ENTRY_SWWN_VALID
) {
9551 wwn1
= wwn_to_u64(vha
->port_name
);
9552 wwn2
= wwn_to_u64(pri_entry
->src_wwpn
);
9553 if (wwn2
== (uint64_t)-1)
9555 else if (wwn1
== wwn2
)
9559 /* check destination WWN for a match */
9560 if (pri_entry
->flags
& FCP_PRIO_ENTRY_DWWN_VALID
) {
9561 wwn1
= wwn_to_u64(fcport
->port_name
);
9562 wwn2
= wwn_to_u64(pri_entry
->dst_wwpn
);
9563 if (wwn2
== (uint64_t)-1)
9565 else if (wwn1
== wwn2
)
9569 if (pid_match
== 2 || wwn_match
== 2) {
9570 /* Found a matching entry */
9571 if (pri_entry
->flags
& FCP_PRIO_ENTRY_TAG_VALID
)
9572 priority
= pri_entry
->tag
;
9583 * qla24xx_update_fcport_fcp_prio
9584 * Activates fcp priority for the logged in fc port
9587 * vha = scsi host structure pointer.
9588 * fcp = port structure pointer.
9591 * QLA_SUCCESS or QLA_FUNCTION_FAILED
9597 qla24xx_update_fcport_fcp_prio(scsi_qla_host_t
*vha
, fc_port_t
*fcport
)
9603 if (fcport
->port_type
!= FCT_TARGET
||
9604 fcport
->loop_id
== FC_NO_LOOP_ID
)
9605 return QLA_FUNCTION_FAILED
;
9607 priority
= qla24xx_get_fcp_prio(vha
, fcport
);
9609 return QLA_FUNCTION_FAILED
;
9611 if (IS_P3P_TYPE(vha
->hw
)) {
9612 fcport
->fcp_prio
= priority
& 0xf;
9616 ret
= qla24xx_set_fcp_prio(vha
, fcport
->loop_id
, priority
, mb
);
9617 if (ret
== QLA_SUCCESS
) {
9618 if (fcport
->fcp_prio
!= priority
)
9619 ql_dbg(ql_dbg_user
, vha
, 0x709e,
9620 "Updated FCP_CMND priority - value=%d loop_id=%d "
9621 "port_id=%02x%02x%02x.\n", priority
,
9622 fcport
->loop_id
, fcport
->d_id
.b
.domain
,
9623 fcport
->d_id
.b
.area
, fcport
->d_id
.b
.al_pa
);
9624 fcport
->fcp_prio
= priority
& 0xf;
9626 ql_dbg(ql_dbg_user
, vha
, 0x704f,
9627 "Unable to update FCP_CMND priority - ret=0x%x for "
9628 "loop_id=%d port_id=%02x%02x%02x.\n", ret
, fcport
->loop_id
,
9629 fcport
->d_id
.b
.domain
, fcport
->d_id
.b
.area
,
9630 fcport
->d_id
.b
.al_pa
);
9635 * qla24xx_update_all_fcp_prio
9636 * Activates fcp priority for all the logged in ports
9639 * ha = adapter block pointer.
9642 * QLA_SUCCESS or QLA_FUNCTION_FAILED
9648 qla24xx_update_all_fcp_prio(scsi_qla_host_t
*vha
)
9653 ret
= QLA_FUNCTION_FAILED
;
9654 /* We need to set priority for all logged in ports */
9655 list_for_each_entry(fcport
, &vha
->vp_fcports
, list
)
9656 ret
= qla24xx_update_fcport_fcp_prio(vha
, fcport
);
9661 struct qla_qpair
*qla2xxx_create_qpair(struct scsi_qla_host
*vha
, int qos
,
9662 int vp_idx
, bool startqp
)
9667 struct qla_hw_data
*ha
= vha
->hw
;
9668 uint16_t qpair_id
= 0;
9669 struct qla_qpair
*qpair
= NULL
;
9670 struct qla_msix_entry
*msix
;
9672 if (!(ha
->fw_attributes
& BIT_6
) || !ha
->flags
.msix_enabled
) {
9673 ql_log(ql_log_warn
, vha
, 0x00181,
9674 "FW/Driver is not multi-queue capable.\n");
9678 if (ql2xmqsupport
|| ql2xnvmeenable
) {
9679 qpair
= kzalloc(sizeof(struct qla_qpair
), GFP_KERNEL
);
9680 if (qpair
== NULL
) {
9681 ql_log(ql_log_warn
, vha
, 0x0182,
9682 "Failed to allocate memory for queue pair.\n");
9686 qpair
->hw
= vha
->hw
;
9688 qpair
->qp_lock_ptr
= &qpair
->qp_lock
;
9689 spin_lock_init(&qpair
->qp_lock
);
9690 qpair
->use_shadow_reg
= IS_SHADOW_REG_CAPABLE(ha
) ? 1 : 0;
9692 /* Assign available que pair id */
9693 mutex_lock(&ha
->mq_lock
);
9694 qpair_id
= find_first_zero_bit(ha
->qpair_qid_map
, ha
->max_qpairs
);
9695 if (ha
->num_qpairs
>= ha
->max_qpairs
) {
9696 mutex_unlock(&ha
->mq_lock
);
9697 ql_log(ql_log_warn
, vha
, 0x0183,
9698 "No resources to create additional q pair.\n");
9702 set_bit(qpair_id
, ha
->qpair_qid_map
);
9703 ha
->queue_pair_map
[qpair_id
] = qpair
;
9704 qpair
->id
= qpair_id
;
9705 qpair
->vp_idx
= vp_idx
;
9706 qpair
->fw_started
= ha
->flags
.fw_started
;
9707 INIT_LIST_HEAD(&qpair
->hints_list
);
9708 INIT_LIST_HEAD(&qpair
->dsd_list
);
9709 qpair
->chip_reset
= ha
->base_qpair
->chip_reset
;
9710 qpair
->enable_class_2
= ha
->base_qpair
->enable_class_2
;
9711 qpair
->enable_explicit_conf
=
9712 ha
->base_qpair
->enable_explicit_conf
;
9714 for (i
= 0; i
< ha
->msix_count
; i
++) {
9715 msix
= &ha
->msix_entries
[i
];
9719 ql_dbg(ql_dbg_multiq
, vha
, 0xc00f,
9720 "Vector %x selected for qpair\n", msix
->vector
);
9724 ql_log(ql_log_warn
, vha
, 0x0184,
9725 "Out of MSI-X vectors!.\n");
9729 qpair
->msix
->in_use
= 1;
9730 list_add_tail(&qpair
->qp_list_elem
, &vha
->qp_list
);
9731 qpair
->pdev
= ha
->pdev
;
9732 if (IS_QLA27XX(ha
) || IS_QLA83XX(ha
) || IS_QLA28XX(ha
))
9733 qpair
->reqq_start_iocbs
= qla_83xx_start_iocbs
;
9735 mutex_unlock(&ha
->mq_lock
);
9737 /* Create response queue first */
9738 rsp_id
= qla25xx_create_rsp_que(ha
, 0, 0, 0, qpair
, startqp
);
9740 ql_log(ql_log_warn
, vha
, 0x0185,
9741 "Failed to create response queue.\n");
9745 qpair
->rsp
= ha
->rsp_q_map
[rsp_id
];
9747 /* Create request queue */
9748 req_id
= qla25xx_create_req_que(ha
, 0, vp_idx
, 0, rsp_id
, qos
,
9751 ql_log(ql_log_warn
, vha
, 0x0186,
9752 "Failed to create request queue.\n");
9756 qpair
->req
= ha
->req_q_map
[req_id
];
9757 qpair
->rsp
->req
= qpair
->req
;
9758 qpair
->rsp
->qpair
= qpair
;
9760 if (!qpair
->cpu_mapped
)
9761 qla_cpu_update(qpair
, raw_smp_processor_id());
9763 if (IS_T10_PI_CAPABLE(ha
) && ql2xenabledif
) {
9764 if (ha
->fw_attributes
& BIT_4
)
9765 qpair
->difdix_supported
= 1;
9768 qpair
->srb_mempool
= mempool_create_slab_pool(SRB_MIN_REQ
, srb_cachep
);
9769 if (!qpair
->srb_mempool
) {
9770 ql_log(ql_log_warn
, vha
, 0xd036,
9771 "Failed to create srb mempool for qpair %d\n",
9776 if (qla_create_buf_pool(vha
, qpair
)) {
9777 ql_log(ql_log_warn
, vha
, 0xd036,
9778 "Failed to initialize buf pool for qpair %d\n",
9783 /* Mark as online */
9786 if (!vha
->flags
.qpairs_available
)
9787 vha
->flags
.qpairs_available
= 1;
9789 ql_dbg(ql_dbg_multiq
, vha
, 0xc00d,
9790 "Request/Response queue pair created, id %d\n",
9792 ql_dbg(ql_dbg_init
, vha
, 0x0187,
9793 "Request/Response queue pair created, id %d\n",
9799 mempool_destroy(qpair
->srb_mempool
);
9801 qla25xx_delete_req_que(vha
, qpair
->req
);
9803 qla25xx_delete_rsp_que(vha
, qpair
->rsp
);
9805 mutex_lock(&ha
->mq_lock
);
9806 qpair
->msix
->in_use
= 0;
9807 list_del(&qpair
->qp_list_elem
);
9808 if (list_empty(&vha
->qp_list
))
9809 vha
->flags
.qpairs_available
= 0;
9811 ha
->queue_pair_map
[qpair_id
] = NULL
;
9812 clear_bit(qpair_id
, ha
->qpair_qid_map
);
9814 mutex_unlock(&ha
->mq_lock
);
9820 int qla2xxx_delete_qpair(struct scsi_qla_host
*vha
, struct qla_qpair
*qpair
)
9822 int ret
= QLA_FUNCTION_FAILED
;
9823 struct qla_hw_data
*ha
= qpair
->hw
;
9825 qpair
->delete_in_progress
= 1;
9827 qla_free_buf_pool(qpair
);
9829 ret
= qla25xx_delete_req_que(vha
, qpair
->req
);
9830 if (ret
!= QLA_SUCCESS
)
9833 ret
= qla25xx_delete_rsp_que(vha
, qpair
->rsp
);
9834 if (ret
!= QLA_SUCCESS
)
9837 if (!list_empty(&qpair
->dsd_list
)) {
9838 struct dsd_dma
*dsd_ptr
, *tdsd_ptr
;
9840 /* clean up allocated prev pool */
9841 list_for_each_entry_safe(dsd_ptr
, tdsd_ptr
,
9842 &qpair
->dsd_list
, list
) {
9843 dma_pool_free(ha
->dl_dma_pool
, dsd_ptr
->dsd_addr
,
9844 dsd_ptr
->dsd_list_dma
);
9845 list_del(&dsd_ptr
->list
);
9850 mutex_lock(&ha
->mq_lock
);
9851 ha
->queue_pair_map
[qpair
->id
] = NULL
;
9852 clear_bit(qpair
->id
, ha
->qpair_qid_map
);
9854 list_del(&qpair
->qp_list_elem
);
9855 if (list_empty(&vha
->qp_list
)) {
9856 vha
->flags
.qpairs_available
= 0;
9857 vha
->flags
.qpairs_req_created
= 0;
9858 vha
->flags
.qpairs_rsp_created
= 0;
9860 mempool_destroy(qpair
->srb_mempool
);
9862 mutex_unlock(&ha
->mq_lock
);
9870 qla2x00_count_set_bits(uint32_t num
)
9872 /* Brian Kernighan's Algorithm */
9883 qla2x00_get_num_tgts(scsi_qla_host_t
*vha
)
9891 list_for_each_entry_safe(f
, tf
, &vha
->vp_fcports
, list
) {
9892 if (f
->port_type
!= FCT_TARGET
)
9899 int qla2xxx_reset_stats(struct Scsi_Host
*host
, u32 flags
)
9901 scsi_qla_host_t
*vha
= shost_priv(host
);
9902 fc_port_t
*fcport
= NULL
;
9903 unsigned long int_flags
;
9905 if (flags
& QLA2XX_HW_ERROR
)
9906 vha
->hw_err_cnt
= 0;
9907 if (flags
& QLA2XX_SHT_LNK_DWN
)
9908 vha
->short_link_down_cnt
= 0;
9909 if (flags
& QLA2XX_INT_ERR
)
9910 vha
->interface_err_cnt
= 0;
9911 if (flags
& QLA2XX_CMD_TIMEOUT
)
9912 vha
->cmd_timeout_cnt
= 0;
9913 if (flags
& QLA2XX_RESET_CMD_ERR
)
9914 vha
->reset_cmd_err_cnt
= 0;
9915 if (flags
& QLA2XX_TGT_SHT_LNK_DOWN
) {
9916 spin_lock_irqsave(&vha
->hw
->tgt
.sess_lock
, int_flags
);
9917 list_for_each_entry(fcport
, &vha
->vp_fcports
, list
) {
9918 fcport
->tgt_short_link_down_cnt
= 0;
9919 fcport
->tgt_link_down_time
= QLA2XX_MAX_LINK_DOWN_TIME
;
9921 spin_unlock_irqrestore(&vha
->hw
->tgt
.sess_lock
, int_flags
);
9923 vha
->link_down_time
= QLA2XX_MAX_LINK_DOWN_TIME
;
9927 int qla2xxx_start_stats(struct Scsi_Host
*host
, u32 flags
)
9929 return qla2xxx_reset_stats(host
, flags
);
9932 int qla2xxx_stop_stats(struct Scsi_Host
*host
, u32 flags
)
9934 return qla2xxx_reset_stats(host
, flags
);
9937 int qla2xxx_get_ini_stats(struct Scsi_Host
*host
, u32 flags
,
9938 void *data
, u64 size
)
9940 scsi_qla_host_t
*vha
= shost_priv(host
);
9941 struct ql_vnd_host_stats_resp
*resp
= (struct ql_vnd_host_stats_resp
*)data
;
9942 struct ql_vnd_stats
*rsp_data
= &resp
->stats
;
9943 u64 ini_entry_count
= 0;
9945 u64 entry_count
= 0;
9947 u32 tmp_stat_type
= 0;
9948 fc_port_t
*fcport
= NULL
;
9949 unsigned long int_flags
;
9951 /* Copy stat type to work on it */
9952 tmp_stat_type
= flags
;
9954 if (tmp_stat_type
& BIT_17
) {
9955 num_tgt
= qla2x00_get_num_tgts(vha
);
9957 tmp_stat_type
&= ~(1 << 17);
9959 ini_entry_count
= qla2x00_count_set_bits(tmp_stat_type
);
9961 entry_count
= ini_entry_count
+ num_tgt
;
9963 rsp_data
->entry_count
= entry_count
;
9966 if (flags
& QLA2XX_HW_ERROR
) {
9967 rsp_data
->entry
[i
].stat_type
= QLA2XX_HW_ERROR
;
9968 rsp_data
->entry
[i
].tgt_num
= 0x0;
9969 rsp_data
->entry
[i
].cnt
= vha
->hw_err_cnt
;
9973 if (flags
& QLA2XX_SHT_LNK_DWN
) {
9974 rsp_data
->entry
[i
].stat_type
= QLA2XX_SHT_LNK_DWN
;
9975 rsp_data
->entry
[i
].tgt_num
= 0x0;
9976 rsp_data
->entry
[i
].cnt
= vha
->short_link_down_cnt
;
9980 if (flags
& QLA2XX_INT_ERR
) {
9981 rsp_data
->entry
[i
].stat_type
= QLA2XX_INT_ERR
;
9982 rsp_data
->entry
[i
].tgt_num
= 0x0;
9983 rsp_data
->entry
[i
].cnt
= vha
->interface_err_cnt
;
9987 if (flags
& QLA2XX_CMD_TIMEOUT
) {
9988 rsp_data
->entry
[i
].stat_type
= QLA2XX_CMD_TIMEOUT
;
9989 rsp_data
->entry
[i
].tgt_num
= 0x0;
9990 rsp_data
->entry
[i
].cnt
= vha
->cmd_timeout_cnt
;
9994 if (flags
& QLA2XX_RESET_CMD_ERR
) {
9995 rsp_data
->entry
[i
].stat_type
= QLA2XX_RESET_CMD_ERR
;
9996 rsp_data
->entry
[i
].tgt_num
= 0x0;
9997 rsp_data
->entry
[i
].cnt
= vha
->reset_cmd_err_cnt
;
10001 /* i will continue from previous loop, as target
10002 * entries are after initiator
10004 if (flags
& QLA2XX_TGT_SHT_LNK_DOWN
) {
10005 spin_lock_irqsave(&vha
->hw
->tgt
.sess_lock
, int_flags
);
10006 list_for_each_entry(fcport
, &vha
->vp_fcports
, list
) {
10007 if (fcport
->port_type
!= FCT_TARGET
)
10009 if (!fcport
->rport
)
10011 rsp_data
->entry
[i
].stat_type
= QLA2XX_TGT_SHT_LNK_DOWN
;
10012 rsp_data
->entry
[i
].tgt_num
= fcport
->rport
->number
;
10013 rsp_data
->entry
[i
].cnt
= fcport
->tgt_short_link_down_cnt
;
10016 spin_unlock_irqrestore(&vha
->hw
->tgt
.sess_lock
, int_flags
);
10018 resp
->status
= EXT_STATUS_OK
;
10023 int qla2xxx_get_tgt_stats(struct Scsi_Host
*host
, u32 flags
,
10024 struct fc_rport
*rport
, void *data
, u64 size
)
10026 struct ql_vnd_tgt_stats_resp
*tgt_data
= data
;
10027 fc_port_t
*fcport
= *(fc_port_t
**)rport
->dd_data
;
10029 tgt_data
->status
= 0;
10030 tgt_data
->stats
.entry_count
= 1;
10031 tgt_data
->stats
.entry
[0].stat_type
= flags
;
10032 tgt_data
->stats
.entry
[0].tgt_num
= rport
->number
;
10033 tgt_data
->stats
.entry
[0].cnt
= fcport
->tgt_short_link_down_cnt
;
10038 int qla2xxx_disable_port(struct Scsi_Host
*host
)
10040 scsi_qla_host_t
*vha
= shost_priv(host
);
10042 vha
->hw
->flags
.port_isolated
= 1;
10044 if (qla2x00_isp_reg_stat(vha
->hw
)) {
10045 ql_log(ql_log_info
, vha
, 0x9006,
10046 "PCI/Register disconnect, exiting.\n");
10047 qla_pci_set_eeh_busy(vha
);
10050 if (qla2x00_chip_is_down(vha
))
10053 if (vha
->flags
.online
) {
10054 qla2x00_abort_isp_cleanup(vha
);
10055 qla2x00_wait_for_sess_deletion(vha
);
10061 int qla2xxx_enable_port(struct Scsi_Host
*host
)
10063 scsi_qla_host_t
*vha
= shost_priv(host
);
10065 if (qla2x00_isp_reg_stat(vha
->hw
)) {
10066 ql_log(ql_log_info
, vha
, 0x9001,
10067 "PCI/Register disconnect, exiting.\n");
10068 qla_pci_set_eeh_busy(vha
);
10072 vha
->hw
->flags
.port_isolated
= 0;
10073 /* Set the flag to 1, so that isp_abort can proceed */
10074 vha
->flags
.online
= 1;
10075 set_bit(ISP_ABORT_NEEDED
, &vha
->dpc_flags
);
10076 qla2xxx_wake_dpc(vha
);