1 // SPDX-License-Identifier: GPL-2.0-only
3 * Marvell Fibre Channel HBA Driver
4 * Copyright (c) 2021 Marvell
9 #include <linux/kthread.h>
10 #include <linux/vmalloc.h>
11 #include <linux/delay.h>
12 #include <scsi/scsi_tcq.h>
14 static struct edif_sa_index_entry
*qla_edif_sadb_find_sa_index_entry(uint16_t nport_handle
,
15 struct list_head
*sa_list
);
16 static uint16_t qla_edif_sadb_get_sa_index(fc_port_t
*fcport
,
17 struct qla_sa_update_frame
*sa_frame
);
18 static int qla_edif_sadb_delete_sa_index(fc_port_t
*fcport
, uint16_t nport_handle
,
20 static int qla_pur_get_pending(scsi_qla_host_t
*, fc_port_t
*, struct bsg_job
*);
23 struct list_head list
;
29 struct edif_sa_update_aen sa_aen
;
33 static struct els_sub_cmd
{
37 {SEND_ELS
, "send ELS"},
38 {SEND_ELS_REPLY
, "send ELS Reply"},
39 {PULL_ELS
, "retrieve ELS"},
42 const char *sc_to_str(uint16_t cmd
)
45 struct els_sub_cmd
*e
;
47 for (i
= 0; i
< ARRAY_SIZE(sc_str
); i
++) {
55 static struct edb_node
*qla_edb_getnext(scsi_qla_host_t
*vha
)
58 struct edb_node
*edbnode
= NULL
;
60 spin_lock_irqsave(&vha
->e_dbell
.db_lock
, flags
);
62 /* db nodes are fifo - no qualifications done */
63 if (!list_empty(&vha
->e_dbell
.head
)) {
64 edbnode
= list_first_entry(&vha
->e_dbell
.head
,
65 struct edb_node
, list
);
66 list_del_init(&edbnode
->list
);
69 spin_unlock_irqrestore(&vha
->e_dbell
.db_lock
, flags
);
74 static void qla_edb_node_free(scsi_qla_host_t
*vha
, struct edb_node
*node
)
76 list_del_init(&node
->list
);
80 static struct edif_list_entry
*qla_edif_list_find_sa_index(fc_port_t
*fcport
,
83 struct edif_list_entry
*entry
;
84 struct edif_list_entry
*tentry
;
85 struct list_head
*indx_list
= &fcport
->edif
.edif_indx_list
;
87 list_for_each_entry_safe(entry
, tentry
, indx_list
, next
) {
88 if (entry
->handle
== handle
)
94 /* timeout called when no traffic and delayed rx sa_index delete */
95 static void qla2x00_sa_replace_iocb_timeout(struct timer_list
*t
)
97 struct edif_list_entry
*edif_entry
= from_timer(edif_entry
, t
, timer
);
98 fc_port_t
*fcport
= edif_entry
->fcport
;
99 struct scsi_qla_host
*vha
= fcport
->vha
;
100 struct edif_sa_ctl
*sa_ctl
;
101 uint16_t nport_handle
;
102 unsigned long flags
= 0;
104 ql_dbg(ql_dbg_edif
, vha
, 0x3069,
105 "%s: nport_handle 0x%x, SA REPL Delay Timeout, %8phC portid=%06x\n",
106 __func__
, edif_entry
->handle
, fcport
->port_name
, fcport
->d_id
.b24
);
109 * if delete_sa_index is valid then no one has serviced this
112 spin_lock_irqsave(&fcport
->edif
.indx_list_lock
, flags
);
115 * delete_sa_index is invalidated when we find the new sa_index in
116 * the incoming data stream. If it is not invalidated then we are
117 * still looking for the new sa_index because there is no I/O and we
118 * need to just force the rx delete and move on. Otherwise
119 * we could get another rekey which will result in an error 66.
121 if (edif_entry
->delete_sa_index
!= INVALID_EDIF_SA_INDEX
) {
122 uint16_t delete_sa_index
= edif_entry
->delete_sa_index
;
124 edif_entry
->delete_sa_index
= INVALID_EDIF_SA_INDEX
;
125 nport_handle
= edif_entry
->handle
;
126 spin_unlock_irqrestore(&fcport
->edif
.indx_list_lock
, flags
);
128 sa_ctl
= qla_edif_find_sa_ctl_by_index(fcport
,
132 ql_dbg(ql_dbg_edif
, vha
, 0x3063,
133 "%s: sa_ctl: %p, delete index %d, update index: %d, lid: 0x%x\n",
134 __func__
, sa_ctl
, delete_sa_index
, edif_entry
->update_sa_index
,
137 sa_ctl
->flags
= EDIF_SA_CTL_FLG_DEL
;
138 set_bit(EDIF_SA_CTL_REPL
, &sa_ctl
->state
);
139 qla_post_sa_replace_work(fcport
->vha
, fcport
,
140 nport_handle
, sa_ctl
);
143 ql_dbg(ql_dbg_edif
, vha
, 0x3063,
144 "%s: sa_ctl not found for delete_sa_index: %d\n",
145 __func__
, edif_entry
->delete_sa_index
);
148 spin_unlock_irqrestore(&fcport
->edif
.indx_list_lock
, flags
);
153 * create a new list entry for this nport handle and
154 * add an sa_update index to the list - called for sa_update
156 static int qla_edif_list_add_sa_update_index(fc_port_t
*fcport
,
157 uint16_t sa_index
, uint16_t handle
)
159 struct edif_list_entry
*entry
;
160 unsigned long flags
= 0;
162 /* if the entry exists, then just update the sa_index */
163 entry
= qla_edif_list_find_sa_index(fcport
, handle
);
165 entry
->update_sa_index
= sa_index
;
171 * This is the normal path - there should be no existing entry
172 * when update is called. The exception is at startup
173 * when update is called for the first two sa_indexes
174 * followed by a delete of the first sa_index
176 entry
= kzalloc((sizeof(struct edif_list_entry
)), GFP_ATOMIC
);
180 INIT_LIST_HEAD(&entry
->next
);
181 entry
->handle
= handle
;
182 entry
->update_sa_index
= sa_index
;
183 entry
->delete_sa_index
= INVALID_EDIF_SA_INDEX
;
186 timer_setup(&entry
->timer
, qla2x00_sa_replace_iocb_timeout
, 0);
187 spin_lock_irqsave(&fcport
->edif
.indx_list_lock
, flags
);
188 list_add_tail(&entry
->next
, &fcport
->edif
.edif_indx_list
);
189 spin_unlock_irqrestore(&fcport
->edif
.indx_list_lock
, flags
);
193 /* remove an entry from the list */
194 static void qla_edif_list_delete_sa_index(fc_port_t
*fcport
, struct edif_list_entry
*entry
)
196 unsigned long flags
= 0;
198 spin_lock_irqsave(&fcport
->edif
.indx_list_lock
, flags
);
199 list_del(&entry
->next
);
200 spin_unlock_irqrestore(&fcport
->edif
.indx_list_lock
, flags
);
203 int qla_post_sa_replace_work(struct scsi_qla_host
*vha
,
204 fc_port_t
*fcport
, uint16_t nport_handle
, struct edif_sa_ctl
*sa_ctl
)
206 struct qla_work_evt
*e
;
208 e
= qla2x00_alloc_work(vha
, QLA_EVT_SA_REPLACE
);
210 return QLA_FUNCTION_FAILED
;
212 e
->u
.sa_update
.fcport
= fcport
;
213 e
->u
.sa_update
.sa_ctl
= sa_ctl
;
214 e
->u
.sa_update
.nport_handle
= nport_handle
;
215 fcport
->flags
|= FCF_ASYNC_ACTIVE
;
216 return qla2x00_post_work(vha
, e
);
220 qla_edif_sa_ctl_init(scsi_qla_host_t
*vha
, struct fc_port
*fcport
)
222 ql_dbg(ql_dbg_edif
, vha
, 0x2058,
223 "Init SA_CTL List for fcport - nn %8phN pn %8phN portid=%06x.\n",
224 fcport
->node_name
, fcport
->port_name
, fcport
->d_id
.b24
);
226 fcport
->edif
.tx_rekey_cnt
= 0;
227 fcport
->edif
.rx_rekey_cnt
= 0;
229 fcport
->edif
.tx_bytes
= 0;
230 fcport
->edif
.rx_bytes
= 0;
233 static int qla_bsg_check(scsi_qla_host_t
*vha
, struct bsg_job
*bsg_job
,
236 struct extra_auth_els
*p
;
237 struct fc_bsg_reply
*bsg_reply
= bsg_job
->reply
;
238 struct qla_bsg_auth_els_request
*req
=
239 (struct qla_bsg_auth_els_request
*)bsg_job
->request
;
241 if (!vha
->hw
->flags
.edif_enabled
) {
242 ql_dbg(ql_dbg_edif
, vha
, 0x9105,
243 "%s edif not enabled\n", __func__
);
246 if (DBELL_INACTIVE(vha
)) {
247 ql_dbg(ql_dbg_edif
, vha
, 0x09102,
248 "%s doorbell not enabled\n", __func__
);
255 if (p
->sub_cmd
== PULL_ELS
) {
256 struct qla_bsg_auth_els_reply
*rpl
=
257 (struct qla_bsg_auth_els_reply
*)bsg_job
->reply
;
259 qla_pur_get_pending(vha
, fcport
, bsg_job
);
261 ql_dbg(ql_dbg_edif
, vha
, 0x911d,
262 "%s %s %8phN sid=%x. xchg %x, nb=%xh bsg ptr %p\n",
263 __func__
, sc_to_str(p
->sub_cmd
), fcport
->port_name
,
264 fcport
->d_id
.b24
, rpl
->rx_xchg_address
,
265 rpl
->r
.reply_payload_rcv_len
, bsg_job
);
273 bsg_job_done(bsg_job
, bsg_reply
->result
,
274 bsg_reply
->reply_payload_rcv_len
);
279 qla2x00_find_fcport_by_pid(scsi_qla_host_t
*vha
, port_id_t
*id
)
284 list_for_each_entry_safe(f
, tf
, &vha
->vp_fcports
, list
) {
285 if (f
->d_id
.b24
== id
->b24
)
292 * qla_edif_app_check(): check for valid application id.
293 * @vha: host adapter pointer
294 * @appid: application id
295 * Return: false = fail, true = pass
298 qla_edif_app_check(scsi_qla_host_t
*vha
, struct app_id appid
)
300 /* check that the app is allow/known to the driver */
302 if (appid
.app_vid
!= EDIF_APP_ID
) {
303 ql_dbg(ql_dbg_edif
, vha
, 0x911d, "%s app id not ok (%x)",
304 __func__
, appid
.app_vid
);
308 if (appid
.version
!= EDIF_VERSION1
) {
309 ql_dbg(ql_dbg_edif
, vha
, 0x911d, "%s app version is not ok (%x)",
310 __func__
, appid
.version
);
318 qla_edif_free_sa_ctl(fc_port_t
*fcport
, struct edif_sa_ctl
*sa_ctl
,
321 unsigned long flags
= 0;
323 spin_lock_irqsave(&fcport
->edif
.sa_list_lock
, flags
);
324 list_del(&sa_ctl
->next
);
325 spin_unlock_irqrestore(&fcport
->edif
.sa_list_lock
, flags
);
327 fcport
->edif
.tx_rekey_cnt
--;
329 fcport
->edif
.rx_rekey_cnt
--;
333 /* return an index to the freepool */
334 static void qla_edif_add_sa_index_to_freepool(fc_port_t
*fcport
, int dir
,
338 struct scsi_qla_host
*vha
= fcport
->vha
;
339 struct qla_hw_data
*ha
= vha
->hw
;
340 unsigned long flags
= 0;
341 u16 lsa_index
= sa_index
;
343 ql_dbg(ql_dbg_edif
+ ql_dbg_verbose
, vha
, 0x3063,
344 "%s: entry\n", __func__
);
347 sa_id_map
= ha
->edif_tx_sa_id_map
;
348 lsa_index
-= EDIF_TX_SA_INDEX_BASE
;
350 sa_id_map
= ha
->edif_rx_sa_id_map
;
353 spin_lock_irqsave(&ha
->sadb_fp_lock
, flags
);
354 clear_bit(lsa_index
, sa_id_map
);
355 spin_unlock_irqrestore(&ha
->sadb_fp_lock
, flags
);
356 ql_dbg(ql_dbg_edif
, vha
, 0x3063,
357 "%s: index %d added to free pool\n", __func__
, sa_index
);
360 static void __qla2x00_release_all_sadb(struct scsi_qla_host
*vha
,
361 struct fc_port
*fcport
, struct edif_sa_index_entry
*entry
,
364 struct edif_list_entry
*edif_entry
;
365 struct edif_sa_ctl
*sa_ctl
;
369 for (i
= 0; i
< 2; i
++) {
370 if (entry
->sa_pair
[i
].sa_index
== INVALID_EDIF_SA_INDEX
)
373 if (fcport
->loop_id
!= entry
->handle
) {
374 ql_dbg(ql_dbg_edif
, vha
, 0x3063,
375 "%s: ** WARNING %d** entry handle: 0x%x, lid: 0x%x, sa_index: %d\n",
376 __func__
, i
, entry
->handle
, fcport
->loop_id
,
377 entry
->sa_pair
[i
].sa_index
);
380 /* release the sa_ctl */
381 sa_ctl
= qla_edif_find_sa_ctl_by_index(fcport
,
382 entry
->sa_pair
[i
].sa_index
, pdir
);
384 qla_edif_find_sa_ctl_by_index(fcport
, sa_ctl
->index
, pdir
)) {
385 ql_dbg(ql_dbg_edif
, vha
, 0x3063,
386 "%s: freeing sa_ctl for index %d\n", __func__
, sa_ctl
->index
);
387 qla_edif_free_sa_ctl(fcport
, sa_ctl
, sa_ctl
->index
);
389 ql_dbg(ql_dbg_edif
, vha
, 0x3063,
390 "%s: sa_ctl NOT freed, sa_ctl: %p\n", __func__
, sa_ctl
);
393 /* Release the index */
394 ql_dbg(ql_dbg_edif
, vha
, 0x3063,
395 "%s: freeing sa_index %d, nph: 0x%x\n",
396 __func__
, entry
->sa_pair
[i
].sa_index
, entry
->handle
);
398 dir
= (entry
->sa_pair
[i
].sa_index
<
399 EDIF_TX_SA_INDEX_BASE
) ? 0 : 1;
400 qla_edif_add_sa_index_to_freepool(fcport
, dir
,
401 entry
->sa_pair
[i
].sa_index
);
403 /* Delete timer on RX */
404 if (pdir
!= SAU_FLG_TX
) {
406 qla_edif_list_find_sa_index(fcport
, entry
->handle
);
408 ql_dbg(ql_dbg_edif
, vha
, 0x5033,
409 "%s: remove edif_entry %p, update_sa_index: 0x%x, delete_sa_index: 0x%x\n",
410 __func__
, edif_entry
, edif_entry
->update_sa_index
,
411 edif_entry
->delete_sa_index
);
412 qla_edif_list_delete_sa_index(fcport
, edif_entry
);
414 * valid delete_sa_index indicates there is a rx
415 * delayed delete queued
417 if (edif_entry
->delete_sa_index
!=
418 INVALID_EDIF_SA_INDEX
) {
419 timer_shutdown(&edif_entry
->timer
);
421 /* build and send the aen */
422 fcport
->edif
.rx_sa_set
= 1;
423 fcport
->edif
.rx_sa_pending
= 0;
424 qla_edb_eventcreate(vha
,
425 VND_CMD_AUTH_STATE_SAUPDATE_COMPL
,
426 QL_VND_SA_STAT_SUCCESS
,
427 QL_VND_RX_SA_KEY
, fcport
);
429 ql_dbg(ql_dbg_edif
, vha
, 0x5033,
430 "%s: release edif_entry %p, update_sa_index: 0x%x, delete_sa_index: 0x%x\n",
431 __func__
, edif_entry
, edif_entry
->update_sa_index
,
432 edif_entry
->delete_sa_index
);
439 ql_dbg(ql_dbg_edif
, vha
, 0x3063,
440 "%s: %d %s keys released\n",
441 __func__
, key_cnt
, pdir
? "tx" : "rx");
444 /* find an release all outstanding sadb sa_indicies */
445 void qla2x00_release_all_sadb(struct scsi_qla_host
*vha
, struct fc_port
*fcport
)
447 struct edif_sa_index_entry
*entry
, *tmp
;
448 struct qla_hw_data
*ha
= vha
->hw
;
451 ql_dbg(ql_dbg_edif
+ ql_dbg_verbose
, vha
, 0x3063,
452 "%s: Starting...\n", __func__
);
454 spin_lock_irqsave(&ha
->sadb_lock
, flags
);
456 list_for_each_entry_safe(entry
, tmp
, &ha
->sadb_rx_index_list
, next
) {
457 if (entry
->fcport
== fcport
) {
458 list_del(&entry
->next
);
459 spin_unlock_irqrestore(&ha
->sadb_lock
, flags
);
460 __qla2x00_release_all_sadb(vha
, fcport
, entry
, 0);
462 spin_lock_irqsave(&ha
->sadb_lock
, flags
);
467 list_for_each_entry_safe(entry
, tmp
, &ha
->sadb_tx_index_list
, next
) {
468 if (entry
->fcport
== fcport
) {
469 list_del(&entry
->next
);
470 spin_unlock_irqrestore(&ha
->sadb_lock
, flags
);
472 __qla2x00_release_all_sadb(vha
, fcport
, entry
, SAU_FLG_TX
);
475 spin_lock_irqsave(&ha
->sadb_lock
, flags
);
479 spin_unlock_irqrestore(&ha
->sadb_lock
, flags
);
483 * qla_delete_n2n_sess_and_wait: search for N2N session, tear it down and
484 * wait for tear down to complete. In N2N topology, there is only one
485 * session being active in tracking the remote device.
486 * @vha: host adapter pointer
487 * return code: 0 - found the session and completed the tear down.
488 * 1 - timeout occurred. Caller to use link bounce to reset.
490 static int qla_delete_n2n_sess_and_wait(scsi_qla_host_t
*vha
)
492 struct fc_port
*fcport
;
494 ulong expire
= jiffies
+ 23 * HZ
;
496 if (!N2N_TOPO(vha
->hw
))
500 list_for_each_entry(fcport
, &vha
->vp_fcports
, list
) {
501 if (!fcport
->n2n_flag
)
504 ql_dbg(ql_dbg_disc
, fcport
->vha
, 0x2016,
505 "%s reset sess at app start \n", __func__
);
507 qla_edif_sa_ctl_init(vha
, fcport
);
508 qlt_schedule_sess_for_deletion(fcport
);
510 while (time_before_eq(jiffies
, expire
)) {
511 if (fcport
->disc_state
!= DSC_DELETE_PEND
) {
518 set_bit(RELOGIN_NEEDED
, &vha
->dpc_flags
);
526 * qla_edif_app_start: application has announce its present
527 * @vha: host adapter pointer
528 * @bsg_job: user request
530 * Set/activate doorbell. Reset current sessions and re-login with
534 qla_edif_app_start(scsi_qla_host_t
*vha
, struct bsg_job
*bsg_job
)
537 struct fc_bsg_reply
*bsg_reply
= bsg_job
->reply
;
538 struct app_start appstart
;
539 struct app_start_reply appreply
;
540 struct fc_port
*fcport
, *tf
;
542 ql_log(ql_log_info
, vha
, 0x1313,
543 "EDIF application registration with driver, FC device connections will be re-established.\n");
545 sg_copy_to_buffer(bsg_job
->request_payload
.sg_list
,
546 bsg_job
->request_payload
.sg_cnt
, &appstart
,
547 sizeof(struct app_start
));
549 ql_dbg(ql_dbg_edif
, vha
, 0x911d, "%s app_vid=%x app_start_flags %x\n",
550 __func__
, appstart
.app_info
.app_vid
, appstart
.app_start_flags
);
552 if (DBELL_INACTIVE(vha
)) {
553 /* mark doorbell as active since an app is now present */
554 vha
->e_dbell
.db_flags
|= EDB_ACTIVE
;
559 if (N2N_TOPO(vha
->hw
)) {
560 list_for_each_entry_safe(fcport
, tf
, &vha
->vp_fcports
, list
)
561 fcport
->n2n_link_reset_cnt
= 0;
563 if (vha
->hw
->flags
.n2n_fw_acc_sec
) {
564 bool link_bounce
= false;
566 * While authentication app was not running, remote device
567 * could still try to login with this local port. Let's
568 * reset the session, reconnect and re-authenticate.
570 if (qla_delete_n2n_sess_and_wait(vha
))
573 /* bounce the link to start login */
574 if (!vha
->hw
->flags
.n2n_bigger
|| link_bounce
) {
575 set_bit(N2N_LINK_RESET
, &vha
->dpc_flags
);
576 qla2xxx_wake_dpc(vha
);
579 qla2x00_wait_for_hba_online(vha
);
580 set_bit(ISP_ABORT_NEEDED
, &vha
->dpc_flags
);
581 qla2xxx_wake_dpc(vha
);
582 qla2x00_wait_for_hba_online(vha
);
585 list_for_each_entry_safe(fcport
, tf
, &vha
->vp_fcports
, list
) {
586 ql_dbg(ql_dbg_edif
, vha
, 0x2058,
587 "FCSP - nn %8phN pn %8phN portid=%06x.\n",
588 fcport
->node_name
, fcport
->port_name
,
590 ql_dbg(ql_dbg_edif
, vha
, 0xf084,
591 "%s: se_sess %p / sess %p from port %8phC "
592 "loop_id %#04x s_id %06x logout %d "
593 "keep %d els_logo %d disc state %d auth state %d"
595 __func__
, fcport
->se_sess
, fcport
,
596 fcport
->port_name
, fcport
->loop_id
,
597 fcport
->d_id
.b24
, fcport
->logout_on_delete
,
598 fcport
->keep_nport_handle
, fcport
->send_els_logo
,
599 fcport
->disc_state
, fcport
->edif
.auth_state
,
600 fcport
->edif
.app_stop
);
602 if (atomic_read(&vha
->loop_state
) == LOOP_DOWN
)
605 fcport
->login_retry
= vha
->hw
->login_retry_count
;
607 fcport
->edif
.app_stop
= 0;
608 fcport
->edif
.app_sess_online
= 0;
610 if (fcport
->scan_state
!= QLA_FCPORT_FOUND
)
613 if (fcport
->port_type
== FCT_UNKNOWN
&&
614 !fcport
->fc4_features
)
615 rval
= qla24xx_async_gffid(vha
, fcport
, true);
617 if (!rval
&& !(fcport
->fc4_features
& FC4_FF_TARGET
||
618 fcport
->port_type
& (FCT_TARGET
|FCT_NVME_TARGET
)))
623 ql_dbg(ql_dbg_edif
, vha
, 0x911e,
624 "%s wwpn %8phC calling qla_edif_reset_auth_wait\n",
625 __func__
, fcport
->port_name
);
626 qlt_schedule_sess_for_deletion(fcport
);
627 qla_edif_sa_ctl_init(vha
, fcport
);
629 set_bit(RELOGIN_NEEDED
, &vha
->dpc_flags
);
632 if (vha
->pur_cinfo
.enode_flags
!= ENODE_ACTIVE
) {
633 /* mark as active since an app is now present */
634 vha
->pur_cinfo
.enode_flags
= ENODE_ACTIVE
;
636 ql_dbg(ql_dbg_edif
, vha
, 0x911f, "%s enode already active\n",
641 appreply
.host_support_edif
= vha
->hw
->flags
.edif_enabled
;
642 appreply
.edif_enode_active
= vha
->pur_cinfo
.enode_flags
;
643 appreply
.edif_edb_active
= vha
->e_dbell
.db_flags
;
644 appreply
.version
= EDIF_VERSION1
;
646 bsg_job
->reply_len
= sizeof(struct fc_bsg_reply
);
648 SET_DID_STATUS(bsg_reply
->result
, DID_OK
);
650 bsg_reply
->reply_payload_rcv_len
= sg_copy_from_buffer(bsg_job
->reply_payload
.sg_list
,
651 bsg_job
->reply_payload
.sg_cnt
,
653 sizeof(struct app_start_reply
));
655 ql_dbg(ql_dbg_edif
, vha
, 0x911d,
656 "%s app start completed with 0x%x\n",
663 * qla_edif_app_stop - app has announced it's exiting.
664 * @vha: host adapter pointer
665 * @bsg_job: user space command pointer
667 * Free any in flight messages, clear all doorbell events
668 * to application. Reject any message relate to security.
671 qla_edif_app_stop(scsi_qla_host_t
*vha
, struct bsg_job
*bsg_job
)
673 struct app_stop appstop
;
674 struct fc_bsg_reply
*bsg_reply
= bsg_job
->reply
;
675 struct fc_port
*fcport
, *tf
;
677 sg_copy_to_buffer(bsg_job
->request_payload
.sg_list
,
678 bsg_job
->request_payload
.sg_cnt
, &appstop
,
679 sizeof(struct app_stop
));
681 ql_dbg(ql_dbg_edif
, vha
, 0x911d, "%s Stopping APP: app_vid=%x\n",
682 __func__
, appstop
.app_info
.app_vid
);
684 /* Call db stop and enode stop functions */
686 /* if we leave this running short waits are operational < 16 secs */
687 qla_enode_stop(vha
); /* stop enode */
688 qla_edb_stop(vha
); /* stop db */
690 list_for_each_entry_safe(fcport
, tf
, &vha
->vp_fcports
, list
) {
691 if (!(fcport
->flags
& FCF_FCSP_DEVICE
))
694 if (fcport
->flags
& FCF_FCSP_DEVICE
) {
695 ql_dbg(ql_dbg_edif
, vha
, 0xf084,
696 "%s: sess %p from port %8phC lid %#04x s_id %06x logout %d keep %d els_logo %d\n",
698 fcport
->port_name
, fcport
->loop_id
, fcport
->d_id
.b24
,
699 fcport
->logout_on_delete
, fcport
->keep_nport_handle
,
700 fcport
->send_els_logo
);
702 if (atomic_read(&vha
->loop_state
) == LOOP_DOWN
)
705 fcport
->edif
.app_stop
= 1;
706 ql_dbg(ql_dbg_edif
, vha
, 0x911e,
707 "%s wwpn %8phC calling qla_edif_reset_auth_wait\n",
708 __func__
, fcport
->port_name
);
710 fcport
->send_els_logo
= 1;
711 qlt_schedule_sess_for_deletion(fcport
);
715 bsg_job
->reply_len
= sizeof(struct fc_bsg_reply
);
716 SET_DID_STATUS(bsg_reply
->result
, DID_OK
);
718 /* no return interface to app - it assumes we cleaned up ok */
724 qla_edif_app_chk_sa_update(scsi_qla_host_t
*vha
, fc_port_t
*fcport
,
725 struct app_plogi_reply
*appplogireply
)
729 if (!(fcport
->edif
.rx_sa_set
&& fcport
->edif
.tx_sa_set
)) {
730 ql_dbg(ql_dbg_edif
, vha
, 0x911e,
731 "%s: wwpn %8phC Both SA indexes has not been SET TX %d, RX %d.\n",
732 __func__
, fcport
->port_name
, fcport
->edif
.tx_sa_set
,
733 fcport
->edif
.rx_sa_set
);
734 appplogireply
->prli_status
= 0;
737 ql_dbg(ql_dbg_edif
, vha
, 0x911e,
738 "%s wwpn %8phC Both SA(s) updated.\n", __func__
,
740 fcport
->edif
.rx_sa_set
= fcport
->edif
.tx_sa_set
= 0;
741 fcport
->edif
.rx_sa_pending
= fcport
->edif
.tx_sa_pending
= 0;
742 appplogireply
->prli_status
= 1;
748 * qla_edif_app_authok - authentication by app succeeded. Driver can proceed
750 * @vha: host adapter pointer
751 * @bsg_job: user request
754 qla_edif_app_authok(scsi_qla_host_t
*vha
, struct bsg_job
*bsg_job
)
756 struct auth_complete_cmd appplogiok
;
757 struct app_plogi_reply appplogireply
= {0};
758 struct fc_bsg_reply
*bsg_reply
= bsg_job
->reply
;
759 fc_port_t
*fcport
= NULL
;
760 port_id_t portid
= {0};
762 sg_copy_to_buffer(bsg_job
->request_payload
.sg_list
,
763 bsg_job
->request_payload
.sg_cnt
, &appplogiok
,
764 sizeof(struct auth_complete_cmd
));
766 /* silent unaligned access warning */
767 portid
.b
.domain
= appplogiok
.u
.d_id
.b
.domain
;
768 portid
.b
.area
= appplogiok
.u
.d_id
.b
.area
;
769 portid
.b
.al_pa
= appplogiok
.u
.d_id
.b
.al_pa
;
771 appplogireply
.version
= EDIF_VERSION1
;
772 switch (appplogiok
.type
) {
774 fcport
= qla2x00_find_fcport_by_wwpn(vha
,
775 appplogiok
.u
.wwpn
, 0);
777 ql_dbg(ql_dbg_edif
, vha
, 0x911d,
778 "%s wwpn lookup failed: %8phC\n",
779 __func__
, appplogiok
.u
.wwpn
);
782 fcport
= qla2x00_find_fcport_by_pid(vha
, &portid
);
784 ql_dbg(ql_dbg_edif
, vha
, 0x911d,
785 "%s d_id lookup failed: %x\n", __func__
,
789 ql_dbg(ql_dbg_edif
, vha
, 0x911d,
790 "%s undefined type: %x\n", __func__
,
796 SET_DID_STATUS(bsg_reply
->result
, DID_ERROR
);
801 * if port is online then this is a REKEY operation
802 * Only do sa update checking
804 if (atomic_read(&fcport
->state
) == FCS_ONLINE
) {
805 ql_dbg(ql_dbg_edif
, vha
, 0x911d,
806 "%s Skipping PRLI complete based on rekey\n", __func__
);
807 appplogireply
.prli_status
= 1;
808 SET_DID_STATUS(bsg_reply
->result
, DID_OK
);
809 qla_edif_app_chk_sa_update(vha
, fcport
, &appplogireply
);
813 /* make sure in AUTH_PENDING or else reject */
814 if (fcport
->disc_state
!= DSC_LOGIN_AUTH_PEND
) {
815 ql_dbg(ql_dbg_edif
, vha
, 0x911e,
816 "%s wwpn %8phC is not in auth pending state (%x)\n",
817 __func__
, fcport
->port_name
, fcport
->disc_state
);
818 SET_DID_STATUS(bsg_reply
->result
, DID_OK
);
819 appplogireply
.prli_status
= 0;
823 SET_DID_STATUS(bsg_reply
->result
, DID_OK
);
824 appplogireply
.prli_status
= 1;
825 fcport
->edif
.authok
= 1;
826 if (!(fcport
->edif
.rx_sa_set
&& fcport
->edif
.tx_sa_set
)) {
827 ql_dbg(ql_dbg_edif
, vha
, 0x911e,
828 "%s: wwpn %8phC Both SA indexes has not been SET TX %d, RX %d.\n",
829 __func__
, fcport
->port_name
, fcport
->edif
.tx_sa_set
,
830 fcport
->edif
.rx_sa_set
);
831 SET_DID_STATUS(bsg_reply
->result
, DID_OK
);
832 appplogireply
.prli_status
= 0;
836 ql_dbg(ql_dbg_edif
, vha
, 0x911e,
837 "%s wwpn %8phC Both SA(s) updated.\n", __func__
,
839 fcport
->edif
.rx_sa_set
= fcport
->edif
.tx_sa_set
= 0;
840 fcport
->edif
.rx_sa_pending
= fcport
->edif
.tx_sa_pending
= 0;
843 if (qla_ini_mode_enabled(vha
)) {
844 ql_dbg(ql_dbg_edif
, vha
, 0x911e,
845 "%s AUTH complete - RESUME with prli for wwpn %8phC\n",
846 __func__
, fcport
->port_name
);
847 qla24xx_post_prli_work(vha
, fcport
);
851 bsg_job
->reply_len
= sizeof(struct fc_bsg_reply
);
852 bsg_reply
->reply_payload_rcv_len
= sg_copy_from_buffer(bsg_job
->reply_payload
.sg_list
,
853 bsg_job
->reply_payload
.sg_cnt
,
855 sizeof(struct app_plogi_reply
));
861 * qla_edif_app_authfail - authentication by app has failed. Driver is given
862 * notice to tear down current session.
863 * @vha: host adapter pointer
864 * @bsg_job: user request
867 qla_edif_app_authfail(scsi_qla_host_t
*vha
, struct bsg_job
*bsg_job
)
870 struct auth_complete_cmd appplogifail
;
871 struct fc_bsg_reply
*bsg_reply
= bsg_job
->reply
;
872 fc_port_t
*fcport
= NULL
;
873 port_id_t portid
= {0};
875 ql_dbg(ql_dbg_edif
, vha
, 0x911d, "%s app auth fail\n", __func__
);
877 sg_copy_to_buffer(bsg_job
->request_payload
.sg_list
,
878 bsg_job
->request_payload
.sg_cnt
, &appplogifail
,
879 sizeof(struct auth_complete_cmd
));
881 /* silent unaligned access warning */
882 portid
.b
.domain
= appplogifail
.u
.d_id
.b
.domain
;
883 portid
.b
.area
= appplogifail
.u
.d_id
.b
.area
;
884 portid
.b
.al_pa
= appplogifail
.u
.d_id
.b
.al_pa
;
887 * TODO: edif: app has failed this plogi. Inform driver to
888 * take any action (if any).
890 switch (appplogifail
.type
) {
892 fcport
= qla2x00_find_fcport_by_wwpn(vha
,
893 appplogifail
.u
.wwpn
, 0);
894 SET_DID_STATUS(bsg_reply
->result
, DID_OK
);
897 fcport
= qla2x00_find_fcport_by_pid(vha
, &portid
);
899 ql_dbg(ql_dbg_edif
, vha
, 0x911d,
900 "%s d_id lookup failed: %x\n", __func__
,
902 SET_DID_STATUS(bsg_reply
->result
, DID_OK
);
905 ql_dbg(ql_dbg_edif
, vha
, 0x911e,
906 "%s undefined type: %x\n", __func__
,
908 bsg_job
->reply_len
= sizeof(struct fc_bsg_reply
);
909 SET_DID_STATUS(bsg_reply
->result
, DID_ERROR
);
914 ql_dbg(ql_dbg_edif
, vha
, 0x911d,
915 "%s fcport is 0x%p\n", __func__
, fcport
);
918 /* set/reset edif values and flags */
919 ql_dbg(ql_dbg_edif
, vha
, 0x911e,
920 "%s reset the auth process - %8phC, loopid=%x portid=%06x.\n",
921 __func__
, fcport
->port_name
, fcport
->loop_id
, fcport
->d_id
.b24
);
923 if (qla_ini_mode_enabled(fcport
->vha
)) {
924 fcport
->send_els_logo
= 1;
925 qlt_schedule_sess_for_deletion(fcport
);
933 * qla_edif_app_getfcinfo - app would like to read session info (wwpn, nportid,
934 * [initiator|target] mode. It can specific session with specific nport id or
936 * @vha: host adapter pointer
937 * @bsg_job: user request pointer
940 qla_edif_app_getfcinfo(scsi_qla_host_t
*vha
, struct bsg_job
*bsg_job
)
944 struct fc_bsg_reply
*bsg_reply
= bsg_job
->reply
;
945 struct app_pinfo_req app_req
;
946 struct app_pinfo_reply
*app_reply
;
949 ql_dbg(ql_dbg_edif
, vha
, 0x911d, "%s app get fcinfo\n", __func__
);
951 sg_copy_to_buffer(bsg_job
->request_payload
.sg_list
,
952 bsg_job
->request_payload
.sg_cnt
, &app_req
,
953 sizeof(struct app_pinfo_req
));
955 app_reply
= kzalloc((sizeof(struct app_pinfo_reply
) +
956 sizeof(struct app_pinfo
) * app_req
.num_ports
), GFP_KERNEL
);
959 SET_DID_STATUS(bsg_reply
->result
, DID_ERROR
);
962 struct fc_port
*fcport
= NULL
, *tf
;
964 app_reply
->version
= EDIF_VERSION1
;
966 list_for_each_entry_safe(fcport
, tf
, &vha
->vp_fcports
, list
) {
967 if (!(fcport
->flags
& FCF_FCSP_DEVICE
))
970 tdid
.b
.domain
= app_req
.remote_pid
.domain
;
971 tdid
.b
.area
= app_req
.remote_pid
.area
;
972 tdid
.b
.al_pa
= app_req
.remote_pid
.al_pa
;
974 ql_dbg(ql_dbg_edif
, vha
, 0x2058,
975 "APP request entry - portid=%06x.\n", tdid
.b24
);
977 /* Ran out of space */
978 if (pcnt
>= app_req
.num_ports
)
981 if (tdid
.b24
!= 0 && tdid
.b24
!= fcport
->d_id
.b24
)
984 if (!N2N_TOPO(vha
->hw
)) {
985 if (fcport
->scan_state
!= QLA_FCPORT_FOUND
)
988 if (fcport
->port_type
== FCT_UNKNOWN
&&
989 !fcport
->fc4_features
)
990 rval
= qla24xx_async_gffid(vha
, fcport
,
994 !(fcport
->fc4_features
& FC4_FF_TARGET
||
996 (FCT_TARGET
| FCT_NVME_TARGET
)))
1002 app_reply
->ports
[pcnt
].version
= EDIF_VERSION1
;
1003 app_reply
->ports
[pcnt
].remote_type
=
1004 VND_CMD_RTYPE_UNKNOWN
;
1005 if (fcport
->port_type
& (FCT_NVME_TARGET
| FCT_TARGET
))
1006 app_reply
->ports
[pcnt
].remote_type
|=
1007 VND_CMD_RTYPE_TARGET
;
1008 if (fcport
->port_type
& (FCT_NVME_INITIATOR
| FCT_INITIATOR
))
1009 app_reply
->ports
[pcnt
].remote_type
|=
1010 VND_CMD_RTYPE_INITIATOR
;
1012 app_reply
->ports
[pcnt
].remote_pid
= fcport
->d_id
;
1014 ql_dbg(ql_dbg_edif
, vha
, 0x2058,
1015 "Found FC_SP fcport - nn %8phN pn %8phN pcnt %d portid=%06x secure %d.\n",
1016 fcport
->node_name
, fcport
->port_name
, pcnt
,
1017 fcport
->d_id
.b24
, fcport
->flags
& FCF_FCSP_DEVICE
);
1019 switch (fcport
->edif
.auth_state
) {
1020 case VND_CMD_AUTH_STATE_ELS_RCVD
:
1021 if (fcport
->disc_state
== DSC_LOGIN_AUTH_PEND
) {
1022 fcport
->edif
.auth_state
= VND_CMD_AUTH_STATE_NEEDED
;
1023 app_reply
->ports
[pcnt
].auth_state
=
1024 VND_CMD_AUTH_STATE_NEEDED
;
1026 app_reply
->ports
[pcnt
].auth_state
=
1027 VND_CMD_AUTH_STATE_ELS_RCVD
;
1031 app_reply
->ports
[pcnt
].auth_state
= fcport
->edif
.auth_state
;
1035 memcpy(app_reply
->ports
[pcnt
].remote_wwpn
,
1036 fcport
->port_name
, 8);
1038 app_reply
->ports
[pcnt
].remote_state
=
1039 (atomic_read(&fcport
->state
) ==
1040 FCS_ONLINE
? 1 : 0);
1047 app_reply
->port_count
= pcnt
;
1048 SET_DID_STATUS(bsg_reply
->result
, DID_OK
);
1051 bsg_job
->reply_len
= sizeof(struct fc_bsg_reply
);
1052 bsg_reply
->reply_payload_rcv_len
= sg_copy_from_buffer(bsg_job
->reply_payload
.sg_list
,
1053 bsg_job
->reply_payload
.sg_cnt
,
1055 sizeof(struct app_pinfo_reply
) + sizeof(struct app_pinfo
) * pcnt
);
1063 * qla_edif_app_getstats - app would like to read various statistics info
1064 * @vha: host adapter pointer
1065 * @bsg_job: user request
1068 qla_edif_app_getstats(scsi_qla_host_t
*vha
, struct bsg_job
*bsg_job
)
1071 struct fc_bsg_reply
*bsg_reply
= bsg_job
->reply
;
1074 struct app_sinfo_req app_req
;
1075 struct app_stats_reply
*app_reply
;
1078 sg_copy_to_buffer(bsg_job
->request_payload
.sg_list
,
1079 bsg_job
->request_payload
.sg_cnt
, &app_req
,
1080 sizeof(struct app_sinfo_req
));
1081 if (app_req
.num_ports
== 0) {
1082 ql_dbg(ql_dbg_async
, vha
, 0x911d,
1083 "%s app did not indicate number of ports to return\n",
1085 SET_DID_STATUS(bsg_reply
->result
, DID_ERROR
);
1089 size
= sizeof(struct app_stats_reply
) +
1090 (sizeof(struct app_sinfo
) * app_req
.num_ports
);
1092 app_reply
= kzalloc(size
, GFP_KERNEL
);
1094 SET_DID_STATUS(bsg_reply
->result
, DID_ERROR
);
1097 struct fc_port
*fcport
= NULL
, *tf
;
1099 app_reply
->version
= EDIF_VERSION1
;
1101 list_for_each_entry_safe(fcport
, tf
, &vha
->vp_fcports
, list
) {
1102 if (fcport
->edif
.enable
) {
1103 if (pcnt
>= app_req
.num_ports
)
1106 app_reply
->elem
[pcnt
].rekey_count
=
1107 fcport
->edif
.rekey_cnt
;
1108 app_reply
->elem
[pcnt
].tx_bytes
=
1109 fcport
->edif
.tx_bytes
;
1110 app_reply
->elem
[pcnt
].rx_bytes
=
1111 fcport
->edif
.rx_bytes
;
1113 memcpy(app_reply
->elem
[pcnt
].remote_wwpn
,
1114 fcport
->port_name
, 8);
1119 app_reply
->elem_count
= pcnt
;
1120 SET_DID_STATUS(bsg_reply
->result
, DID_OK
);
1123 bsg_job
->reply_len
= sizeof(struct fc_bsg_reply
);
1124 bsg_reply
->reply_payload_rcv_len
=
1125 sg_copy_from_buffer(bsg_job
->reply_payload
.sg_list
,
1126 bsg_job
->reply_payload
.sg_cnt
, app_reply
,
1127 sizeof(struct app_stats_reply
) + (sizeof(struct app_sinfo
) * pcnt
));
1135 qla_edif_ack(scsi_qla_host_t
*vha
, struct bsg_job
*bsg_job
)
1137 struct fc_port
*fcport
;
1138 struct aen_complete_cmd ack
;
1139 struct fc_bsg_reply
*bsg_reply
= bsg_job
->reply
;
1141 sg_copy_to_buffer(bsg_job
->request_payload
.sg_list
,
1142 bsg_job
->request_payload
.sg_cnt
, &ack
, sizeof(ack
));
1144 ql_dbg(ql_dbg_edif
, vha
, 0x70cf,
1145 "%s: %06x event_code %x\n",
1146 __func__
, ack
.port_id
.b24
, ack
.event_code
);
1148 fcport
= qla2x00_find_fcport_by_pid(vha
, &ack
.port_id
);
1149 SET_DID_STATUS(bsg_reply
->result
, DID_OK
);
1152 ql_dbg(ql_dbg_edif
, vha
, 0x70cf,
1153 "%s: unable to find fcport %06x \n",
1154 __func__
, ack
.port_id
.b24
);
1158 switch (ack
.event_code
) {
1159 case VND_CMD_AUTH_STATE_SESSION_SHUTDOWN
:
1160 fcport
->edif
.sess_down_acked
= 1;
1168 static int qla_edif_consume_dbell(scsi_qla_host_t
*vha
, struct bsg_job
*bsg_job
)
1170 struct fc_bsg_reply
*bsg_reply
= bsg_job
->reply
;
1171 u32 sg_skip
, reply_payload_len
;
1173 struct edb_node
*dbnode
= NULL
;
1174 struct edif_app_dbell ap
;
1178 reply_payload_len
= bsg_job
->reply_payload
.payload_len
;
1180 while ((reply_payload_len
- sg_skip
) >= sizeof(struct edb_node
)) {
1181 dbnode
= qla_edb_getnext(vha
);
1185 ap
.event_code
= dbnode
->ntype
;
1186 switch (dbnode
->ntype
) {
1187 case VND_CMD_AUTH_STATE_SESSION_SHUTDOWN
:
1188 case VND_CMD_AUTH_STATE_NEEDED
:
1189 ap
.port_id
= dbnode
->u
.plogi_did
;
1190 dat_size
+= sizeof(ap
.port_id
);
1192 case VND_CMD_AUTH_STATE_ELS_RCVD
:
1193 ap
.port_id
= dbnode
->u
.els_sid
;
1194 dat_size
+= sizeof(ap
.port_id
);
1196 case VND_CMD_AUTH_STATE_SAUPDATE_COMPL
:
1197 ap
.port_id
= dbnode
->u
.sa_aen
.port_id
;
1198 memcpy(&ap
.event_data
, &dbnode
->u
,
1199 sizeof(struct edif_sa_update_aen
));
1200 dat_size
+= sizeof(struct edif_sa_update_aen
);
1204 ql_log(ql_log_warn
, vha
, 0x09102,
1205 "%s unknown DB type=%d %p\n",
1206 __func__
, dbnode
->ntype
, dbnode
);
1209 ap
.event_data_size
= dat_size
;
1210 /* 8 = sizeof(ap.event_code + ap.event_data_size) */
1213 sg_skip
+= sg_copy_buffer(bsg_job
->reply_payload
.sg_list
,
1214 bsg_job
->reply_payload
.sg_cnt
,
1215 &ap
, dat_size
, sg_skip
, false);
1217 ql_dbg(ql_dbg_edif
, vha
, 0x09102,
1218 "%s Doorbell consumed : type=%d %p\n",
1219 __func__
, dbnode
->ntype
, dbnode
);
1227 SET_DID_STATUS(bsg_reply
->result
, DID_OK
);
1228 bsg_reply
->reply_payload_rcv_len
= sg_skip
;
1229 bsg_job
->reply_len
= sizeof(struct fc_bsg_reply
);
1234 static void __qla_edif_dbell_bsg_done(scsi_qla_host_t
*vha
, struct bsg_job
*bsg_job
,
1237 struct fc_bsg_reply
*bsg_reply
= bsg_job
->reply
;
1239 /* small sleep for doorbell events to accumulate */
1243 qla_edif_consume_dbell(vha
, bsg_job
);
1245 bsg_job_done(bsg_job
, bsg_reply
->result
, bsg_reply
->reply_payload_rcv_len
);
1248 static void qla_edif_dbell_bsg_done(scsi_qla_host_t
*vha
)
1250 unsigned long flags
;
1251 struct bsg_job
*prev_bsg_job
= NULL
;
1253 spin_lock_irqsave(&vha
->e_dbell
.db_lock
, flags
);
1254 if (vha
->e_dbell
.dbell_bsg_job
) {
1255 prev_bsg_job
= vha
->e_dbell
.dbell_bsg_job
;
1256 vha
->e_dbell
.dbell_bsg_job
= NULL
;
1258 spin_unlock_irqrestore(&vha
->e_dbell
.db_lock
, flags
);
1261 __qla_edif_dbell_bsg_done(vha
, prev_bsg_job
, 0);
1265 qla_edif_dbell_bsg(scsi_qla_host_t
*vha
, struct bsg_job
*bsg_job
)
1267 unsigned long flags
;
1268 bool return_bsg
= false;
1270 /* flush previous dbell bsg */
1271 qla_edif_dbell_bsg_done(vha
);
1273 spin_lock_irqsave(&vha
->e_dbell
.db_lock
, flags
);
1274 if (list_empty(&vha
->e_dbell
.head
) && DBELL_ACTIVE(vha
)) {
1276 * when the next db event happens, bsg_job will return.
1277 * Otherwise, timer will return it.
1279 vha
->e_dbell
.dbell_bsg_job
= bsg_job
;
1280 vha
->e_dbell
.bsg_expire
= jiffies
+ 10 * HZ
;
1284 spin_unlock_irqrestore(&vha
->e_dbell
.db_lock
, flags
);
1287 __qla_edif_dbell_bsg_done(vha
, bsg_job
, 1);
1293 qla_edif_app_mgmt(struct bsg_job
*bsg_job
)
1295 struct fc_bsg_request
*bsg_request
= bsg_job
->request
;
1296 struct fc_bsg_reply
*bsg_reply
= bsg_job
->reply
;
1297 struct Scsi_Host
*host
= fc_bsg_to_shost(bsg_job
);
1298 scsi_qla_host_t
*vha
= shost_priv(host
);
1299 struct app_id appcheck
;
1302 uint32_t vnd_sc
= bsg_request
->rqst_data
.h_vendor
.vendor_cmd
[1];
1303 u32 level
= ql_dbg_edif
;
1305 /* doorbell is high traffic */
1306 if (vnd_sc
== QL_VND_SC_READ_DBELL
)
1309 ql_dbg(level
, vha
, 0x911d, "%s vnd subcmd=%x\n",
1312 sg_copy_to_buffer(bsg_job
->request_payload
.sg_list
,
1313 bsg_job
->request_payload
.sg_cnt
, &appcheck
,
1314 sizeof(struct app_id
));
1316 if (!vha
->hw
->flags
.edif_enabled
||
1317 test_bit(VPORT_DELETE
, &vha
->dpc_flags
)) {
1318 ql_dbg(level
, vha
, 0x911d,
1319 "%s edif not enabled or vp delete. bsg ptr done %p. dpc_flags %lx\n",
1320 __func__
, bsg_job
, vha
->dpc_flags
);
1322 SET_DID_STATUS(bsg_reply
->result
, DID_ERROR
);
1326 if (!qla_edif_app_check(vha
, appcheck
)) {
1327 ql_dbg(level
, vha
, 0x911d,
1328 "%s app checked failed.\n",
1331 bsg_job
->reply_len
= sizeof(struct fc_bsg_reply
);
1332 SET_DID_STATUS(bsg_reply
->result
, DID_ERROR
);
1337 case QL_VND_SC_SA_UPDATE
:
1339 rval
= qla24xx_sadb_update(bsg_job
);
1341 case QL_VND_SC_APP_START
:
1342 rval
= qla_edif_app_start(vha
, bsg_job
);
1344 case QL_VND_SC_APP_STOP
:
1345 rval
= qla_edif_app_stop(vha
, bsg_job
);
1347 case QL_VND_SC_AUTH_OK
:
1348 rval
= qla_edif_app_authok(vha
, bsg_job
);
1350 case QL_VND_SC_AUTH_FAIL
:
1351 rval
= qla_edif_app_authfail(vha
, bsg_job
);
1353 case QL_VND_SC_GET_FCINFO
:
1354 rval
= qla_edif_app_getfcinfo(vha
, bsg_job
);
1356 case QL_VND_SC_GET_STATS
:
1357 rval
= qla_edif_app_getstats(vha
, bsg_job
);
1359 case QL_VND_SC_AEN_COMPLETE
:
1360 rval
= qla_edif_ack(vha
, bsg_job
);
1362 case QL_VND_SC_READ_DBELL
:
1363 rval
= qla_edif_dbell_bsg(vha
, bsg_job
);
1367 ql_dbg(ql_dbg_edif
, vha
, 0x911d, "%s unknown cmd=%x\n",
1369 bsg_request
->rqst_data
.h_vendor
.vendor_cmd
[1]);
1370 rval
= EXT_STATUS_INVALID_PARAM
;
1377 ql_dbg(level
, vha
, 0x7009,
1378 "%s: %d bsg ptr done %p\n", __func__
, __LINE__
, bsg_job
);
1379 bsg_job_done(bsg_job
, bsg_reply
->result
,
1380 bsg_reply
->reply_payload_rcv_len
);
1386 static struct edif_sa_ctl
*
1387 qla_edif_add_sa_ctl(fc_port_t
*fcport
, struct qla_sa_update_frame
*sa_frame
,
1390 struct edif_sa_ctl
*sa_ctl
;
1391 struct qla_sa_update_frame
*sap
;
1392 int index
= sa_frame
->fast_sa_index
;
1393 unsigned long flags
= 0;
1395 sa_ctl
= kzalloc(sizeof(*sa_ctl
), GFP_KERNEL
);
1397 /* couldn't get space */
1398 ql_dbg(ql_dbg_edif
, fcport
->vha
, 0x9100,
1399 "unable to allocate SA CTL\n");
1404 * need to allocate sa_index here and save it
1405 * in both sa_ctl->index and sa_frame->fast_sa_index;
1406 * If alloc fails then delete sa_ctl and return NULL
1408 INIT_LIST_HEAD(&sa_ctl
->next
);
1409 sap
= &sa_ctl
->sa_frame
;
1411 sa_ctl
->index
= index
;
1412 sa_ctl
->fcport
= fcport
;
1415 ql_dbg(ql_dbg_edif
, fcport
->vha
, 0x9100,
1416 "%s: Added sa_ctl %p, index %d, state 0x%lx\n",
1417 __func__
, sa_ctl
, sa_ctl
->index
, sa_ctl
->state
);
1418 spin_lock_irqsave(&fcport
->edif
.sa_list_lock
, flags
);
1419 if (dir
== SAU_FLG_TX
)
1420 list_add_tail(&sa_ctl
->next
, &fcport
->edif
.tx_sa_list
);
1422 list_add_tail(&sa_ctl
->next
, &fcport
->edif
.rx_sa_list
);
1423 spin_unlock_irqrestore(&fcport
->edif
.sa_list_lock
, flags
);
1429 qla_edif_flush_sa_ctl_lists(fc_port_t
*fcport
)
1431 struct edif_sa_ctl
*sa_ctl
, *tsa_ctl
;
1432 unsigned long flags
= 0;
1434 spin_lock_irqsave(&fcport
->edif
.sa_list_lock
, flags
);
1436 list_for_each_entry_safe(sa_ctl
, tsa_ctl
, &fcport
->edif
.tx_sa_list
,
1438 list_del(&sa_ctl
->next
);
1442 list_for_each_entry_safe(sa_ctl
, tsa_ctl
, &fcport
->edif
.rx_sa_list
,
1444 list_del(&sa_ctl
->next
);
1448 spin_unlock_irqrestore(&fcport
->edif
.sa_list_lock
, flags
);
1451 struct edif_sa_ctl
*
1452 qla_edif_find_sa_ctl_by_index(fc_port_t
*fcport
, int index
, int dir
)
1454 struct edif_sa_ctl
*sa_ctl
, *tsa_ctl
;
1455 struct list_head
*sa_list
;
1457 if (dir
== SAU_FLG_TX
)
1458 sa_list
= &fcport
->edif
.tx_sa_list
;
1460 sa_list
= &fcport
->edif
.rx_sa_list
;
1462 list_for_each_entry_safe(sa_ctl
, tsa_ctl
, sa_list
, next
) {
1463 if (test_bit(EDIF_SA_CTL_USED
, &sa_ctl
->state
) &&
1464 sa_ctl
->index
== index
)
1470 /* add the sa to the correct list */
1472 qla24xx_check_sadb_avail_slot(struct bsg_job
*bsg_job
, fc_port_t
*fcport
,
1473 struct qla_sa_update_frame
*sa_frame
)
1475 struct edif_sa_ctl
*sa_ctl
= NULL
;
1479 dir
= (sa_frame
->flags
& SAU_FLG_TX
);
1481 /* map the spi to an sa_index */
1482 sa_index
= qla_edif_sadb_get_sa_index(fcport
, sa_frame
);
1483 if (sa_index
== RX_DELETE_NO_EDIF_SA_INDEX
) {
1484 /* process rx delete */
1485 ql_dbg(ql_dbg_edif
, fcport
->vha
, 0x3063,
1486 "%s: rx delete for lid 0x%x, spi 0x%x, no entry found\n",
1487 __func__
, fcport
->loop_id
, sa_frame
->spi
);
1489 /* build and send the aen */
1490 fcport
->edif
.rx_sa_set
= 1;
1491 fcport
->edif
.rx_sa_pending
= 0;
1492 qla_edb_eventcreate(fcport
->vha
,
1493 VND_CMD_AUTH_STATE_SAUPDATE_COMPL
,
1494 QL_VND_SA_STAT_SUCCESS
,
1495 QL_VND_RX_SA_KEY
, fcport
);
1497 /* force a return of good bsg status; */
1498 return RX_DELETE_NO_EDIF_SA_INDEX
;
1499 } else if (sa_index
== INVALID_EDIF_SA_INDEX
) {
1500 ql_dbg(ql_dbg_edif
, fcport
->vha
, 0x9100,
1501 "%s: Failed to get sa_index for spi 0x%x, dir: %d\n",
1502 __func__
, sa_frame
->spi
, dir
);
1503 return INVALID_EDIF_SA_INDEX
;
1506 ql_dbg(ql_dbg_edif
, fcport
->vha
, 0x9100,
1507 "%s: index %d allocated to spi 0x%x, dir: %d, nport_handle: 0x%x\n",
1508 __func__
, sa_index
, sa_frame
->spi
, dir
, fcport
->loop_id
);
1510 /* This is a local copy of sa_frame. */
1511 sa_frame
->fast_sa_index
= sa_index
;
1512 /* create the sa_ctl */
1513 sa_ctl
= qla_edif_add_sa_ctl(fcport
, sa_frame
, dir
);
1515 ql_dbg(ql_dbg_edif
, fcport
->vha
, 0x9100,
1516 "%s: Failed to add sa_ctl for spi 0x%x, dir: %d, sa_index: %d\n",
1517 __func__
, sa_frame
->spi
, dir
, sa_index
);
1521 set_bit(EDIF_SA_CTL_USED
, &sa_ctl
->state
);
1523 if (dir
== SAU_FLG_TX
)
1524 fcport
->edif
.tx_rekey_cnt
++;
1526 fcport
->edif
.rx_rekey_cnt
++;
1528 ql_dbg(ql_dbg_edif
, fcport
->vha
, 0x9100,
1529 "%s: Found sa_ctl %p, index %d, state 0x%lx, tx_cnt %d, rx_cnt %d, nport_handle: 0x%x\n",
1530 __func__
, sa_ctl
, sa_ctl
->index
, sa_ctl
->state
,
1531 fcport
->edif
.tx_rekey_cnt
,
1532 fcport
->edif
.rx_rekey_cnt
, fcport
->loop_id
);
1537 #define QLA_SA_UPDATE_FLAGS_RX_KEY 0x0
1538 #define QLA_SA_UPDATE_FLAGS_TX_KEY 0x2
1539 #define EDIF_MSLEEP_INTERVAL 100
1540 #define EDIF_RETRY_COUNT 50
1543 qla24xx_sadb_update(struct bsg_job
*bsg_job
)
1545 struct fc_bsg_reply
*bsg_reply
= bsg_job
->reply
;
1546 struct Scsi_Host
*host
= fc_bsg_to_shost(bsg_job
);
1547 scsi_qla_host_t
*vha
= shost_priv(host
);
1548 fc_port_t
*fcport
= NULL
;
1550 struct edif_list_entry
*edif_entry
= NULL
;
1553 int result
= 0, cnt
;
1554 struct qla_sa_update_frame sa_frame
;
1555 struct srb_iocb
*iocb_cmd
;
1558 ql_dbg(ql_dbg_edif
+ ql_dbg_verbose
, vha
, 0x911d,
1559 "%s entered, vha: 0x%p\n", __func__
, vha
);
1561 sg_copy_to_buffer(bsg_job
->request_payload
.sg_list
,
1562 bsg_job
->request_payload
.sg_cnt
, &sa_frame
,
1563 sizeof(struct qla_sa_update_frame
));
1565 /* Check if host is online */
1566 if (!vha
->flags
.online
) {
1567 ql_log(ql_log_warn
, vha
, 0x70a1, "Host is not online\n");
1569 SET_DID_STATUS(bsg_reply
->result
, DID_ERROR
);
1573 if (DBELL_INACTIVE(vha
)) {
1574 ql_log(ql_log_warn
, vha
, 0x70a1, "App not started\n");
1576 SET_DID_STATUS(bsg_reply
->result
, DID_ERROR
);
1580 /* silent unaligned access warning */
1581 portid
.b
.domain
= sa_frame
.port_id
.b
.domain
;
1582 portid
.b
.area
= sa_frame
.port_id
.b
.area
;
1583 portid
.b
.al_pa
= sa_frame
.port_id
.b
.al_pa
;
1585 fcport
= qla2x00_find_fcport_by_pid(vha
, &portid
);
1588 if (sa_frame
.flags
== QLA_SA_UPDATE_FLAGS_TX_KEY
)
1589 fcport
->edif
.tx_bytes
= 0;
1590 if (sa_frame
.flags
== QLA_SA_UPDATE_FLAGS_RX_KEY
)
1591 fcport
->edif
.rx_bytes
= 0;
1595 ql_dbg(ql_dbg_edif
, vha
, 0x70a3, "Failed to find port= %06x\n",
1596 sa_frame
.port_id
.b24
);
1598 SET_DID_STATUS(bsg_reply
->result
, DID_NO_CONNECT
);
1602 /* make sure the nport_handle is valid */
1603 if (fcport
->loop_id
== FC_NO_LOOP_ID
) {
1604 ql_dbg(ql_dbg_edif
, vha
, 0x70e1,
1605 "%s: %8phN lid=FC_NO_LOOP_ID, spi: 0x%x, DS %d, returning NO_CONNECT\n",
1606 __func__
, fcport
->port_name
, sa_frame
.spi
,
1607 fcport
->disc_state
);
1609 SET_DID_STATUS(bsg_reply
->result
, DID_NO_CONNECT
);
1613 /* allocate and queue an sa_ctl */
1614 result
= qla24xx_check_sadb_avail_slot(bsg_job
, fcport
, &sa_frame
);
1616 /* failure of bsg */
1617 if (result
== INVALID_EDIF_SA_INDEX
) {
1618 ql_dbg(ql_dbg_edif
, vha
, 0x70e1,
1619 "%s: %8phN, skipping update.\n",
1620 __func__
, fcport
->port_name
);
1622 SET_DID_STATUS(bsg_reply
->result
, DID_ERROR
);
1625 /* rx delete failure */
1626 } else if (result
== RX_DELETE_NO_EDIF_SA_INDEX
) {
1627 ql_dbg(ql_dbg_edif
, vha
, 0x70e1,
1628 "%s: %8phN, skipping rx delete.\n",
1629 __func__
, fcport
->port_name
);
1630 SET_DID_STATUS(bsg_reply
->result
, DID_OK
);
1634 ql_dbg(ql_dbg_edif
, vha
, 0x70e1,
1635 "%s: %8phN, sa_index in sa_frame: %d flags %xh\n",
1636 __func__
, fcport
->port_name
, sa_frame
.fast_sa_index
,
1639 /* looking for rx index and delete */
1640 if (((sa_frame
.flags
& SAU_FLG_TX
) == 0) &&
1641 (sa_frame
.flags
& SAU_FLG_INV
)) {
1642 uint16_t nport_handle
= fcport
->loop_id
;
1643 uint16_t sa_index
= sa_frame
.fast_sa_index
;
1646 * make sure we have an existing rx key, otherwise just process
1647 * this as a straight delete just like TX
1648 * This is NOT a normal case, it indicates an error recovery or key cleanup
1649 * by the ipsec code above us.
1651 edif_entry
= qla_edif_list_find_sa_index(fcport
, fcport
->loop_id
);
1653 ql_dbg(ql_dbg_edif
, vha
, 0x911d,
1654 "%s: WARNING: no active sa_index for nport_handle 0x%x, forcing delete for sa_index 0x%x\n",
1655 __func__
, fcport
->loop_id
, sa_index
);
1656 goto force_rx_delete
;
1660 * if we have a forced delete for rx, remove the sa_index from the edif list
1661 * and proceed with normal delete. The rx delay timer should not be running
1663 if ((sa_frame
.flags
& SAU_FLG_FORCE_DELETE
) == SAU_FLG_FORCE_DELETE
) {
1664 qla_edif_list_delete_sa_index(fcport
, edif_entry
);
1665 ql_dbg(ql_dbg_edif
, vha
, 0x911d,
1666 "%s: FORCE DELETE flag found for nport_handle 0x%x, sa_index 0x%x, forcing DELETE\n",
1667 __func__
, fcport
->loop_id
, sa_index
);
1669 goto force_rx_delete
;
1675 * if delete_sa_index is not invalid then there is already
1676 * a delayed index in progress, return bsg bad status
1678 if (edif_entry
->delete_sa_index
!= INVALID_EDIF_SA_INDEX
) {
1679 struct edif_sa_ctl
*sa_ctl
;
1681 ql_dbg(ql_dbg_edif
, vha
, 0x911d,
1682 "%s: delete for lid 0x%x, delete_sa_index %d is pending\n",
1683 __func__
, edif_entry
->handle
, edif_entry
->delete_sa_index
);
1685 /* free up the sa_ctl that was allocated with the sa_index */
1686 sa_ctl
= qla_edif_find_sa_ctl_by_index(fcport
, sa_index
,
1687 (sa_frame
.flags
& SAU_FLG_TX
));
1689 ql_dbg(ql_dbg_edif
, vha
, 0x3063,
1690 "%s: freeing sa_ctl for index %d\n",
1691 __func__
, sa_ctl
->index
);
1692 qla_edif_free_sa_ctl(fcport
, sa_ctl
, sa_ctl
->index
);
1695 /* release the sa_index */
1696 ql_dbg(ql_dbg_edif
, vha
, 0x3063,
1697 "%s: freeing sa_index %d, nph: 0x%x\n",
1698 __func__
, sa_index
, nport_handle
);
1699 qla_edif_sadb_delete_sa_index(fcport
, nport_handle
, sa_index
);
1702 SET_DID_STATUS(bsg_reply
->result
, DID_ERROR
);
1706 fcport
->edif
.rekey_cnt
++;
1708 /* configure and start the rx delay timer */
1709 edif_entry
->fcport
= fcport
;
1710 edif_entry
->timer
.expires
= jiffies
+ RX_DELAY_DELETE_TIMEOUT
* HZ
;
1712 ql_dbg(ql_dbg_edif
, vha
, 0x911d,
1713 "%s: adding timer, entry: %p, delete sa_index %d, lid 0x%x to edif_list\n",
1714 __func__
, edif_entry
, sa_index
, nport_handle
);
1717 * Start the timer when we queue the delayed rx delete.
1718 * This is an activity timer that goes off if we have not
1719 * received packets with the new sa_index
1721 add_timer(&edif_entry
->timer
);
1724 * sa_delete for rx key with an active rx key including this one
1725 * add the delete rx sa index to the hash so we can look for it
1726 * in the rsp queue. Do this after making any changes to the
1727 * edif_entry as part of the rx delete.
1730 ql_dbg(ql_dbg_edif
, vha
, 0x911d,
1731 "%s: delete sa_index %d, lid 0x%x to edif_list. bsg done ptr %p\n",
1732 __func__
, sa_index
, nport_handle
, bsg_job
);
1734 edif_entry
->delete_sa_index
= sa_index
;
1736 bsg_job
->reply_len
= sizeof(struct fc_bsg_reply
);
1737 bsg_reply
->result
= DID_OK
<< 16;
1742 * rx index and update
1743 * add the index to the list and continue with normal update
1745 } else if (((sa_frame
.flags
& SAU_FLG_TX
) == 0) &&
1746 ((sa_frame
.flags
& SAU_FLG_INV
) == 0)) {
1747 /* sa_update for rx key */
1748 uint32_t nport_handle
= fcport
->loop_id
;
1749 uint16_t sa_index
= sa_frame
.fast_sa_index
;
1753 * add the update rx sa index to the hash so we can look for it
1754 * in the rsp queue and continue normally
1757 ql_dbg(ql_dbg_edif
, vha
, 0x911d,
1758 "%s: adding update sa_index %d, lid 0x%x to edif_list\n",
1759 __func__
, sa_index
, nport_handle
);
1761 result
= qla_edif_list_add_sa_update_index(fcport
, sa_index
,
1764 ql_dbg(ql_dbg_edif
, vha
, 0x911d,
1765 "%s: SA_UPDATE failed to add new sa index %d to list for lid 0x%x\n",
1766 __func__
, sa_index
, nport_handle
);
1769 if (sa_frame
.flags
& SAU_FLG_GMAC_MODE
)
1770 fcport
->edif
.aes_gmac
= 1;
1772 fcport
->edif
.aes_gmac
= 0;
1776 * sa_update for both rx and tx keys, sa_delete for tx key
1777 * immediately process the request
1779 sp
= qla2x00_get_sp(vha
, fcport
, GFP_KERNEL
);
1782 SET_DID_STATUS(bsg_reply
->result
, DID_IMM_RETRY
);
1786 sp
->type
= SRB_SA_UPDATE
;
1787 sp
->name
= "bsg_sa_update";
1788 sp
->u
.bsg_job
= bsg_job
;
1789 /* sp->free = qla2x00_bsg_sp_free; */
1790 sp
->free
= qla2x00_rel_sp
;
1791 sp
->done
= qla2x00_bsg_job_done
;
1792 iocb_cmd
= &sp
->u
.iocb_cmd
;
1793 iocb_cmd
->u
.sa_update
.sa_frame
= sa_frame
;
1796 rval
= qla2x00_start_sp(sp
);
1801 msleep(EDIF_MSLEEP_INTERVAL
);
1803 if (cnt
< EDIF_RETRY_COUNT
)
1808 ql_log(ql_dbg_edif
, vha
, 0x70e3,
1809 "%s qla2x00_start_sp failed=%d.\n",
1814 SET_DID_STATUS(bsg_reply
->result
, DID_IMM_RETRY
);
1818 ql_dbg(ql_dbg_edif
, vha
, 0x911d,
1819 "%s: %s sent, hdl=%x, portid=%06x.\n",
1820 __func__
, sp
->name
, sp
->handle
, fcport
->d_id
.b24
);
1822 fcport
->edif
.rekey_cnt
++;
1823 bsg_job
->reply_len
= sizeof(struct fc_bsg_reply
);
1824 SET_DID_STATUS(bsg_reply
->result
, DID_OK
);
1829 * send back error status
1832 bsg_job
->reply_len
= sizeof(struct fc_bsg_reply
);
1833 ql_dbg(ql_dbg_edif
, vha
, 0x911d,
1834 "%s:status: FAIL, result: 0x%x, bsg ptr done %p\n",
1835 __func__
, bsg_reply
->result
, bsg_job
);
1836 bsg_job_done(bsg_job
, bsg_reply
->result
,
1837 bsg_reply
->reply_payload_rcv_len
);
1843 qla_enode_free(scsi_qla_host_t
*vha
, struct enode
*node
)
1845 node
->ntype
= N_UNDEF
;
1850 * qla_enode_init - initialize enode structs & lock
1851 * @vha: host adapter pointer
1853 * should only be called when driver attaching
1856 qla_enode_init(scsi_qla_host_t
*vha
)
1858 struct qla_hw_data
*ha
= vha
->hw
;
1861 if (vha
->pur_cinfo
.enode_flags
== ENODE_ACTIVE
) {
1862 /* list still active - error */
1863 ql_dbg(ql_dbg_edif
, vha
, 0x09102, "%s enode still active\n",
1868 /* initialize lock which protects pur_core & init list */
1869 spin_lock_init(&vha
->pur_cinfo
.pur_lock
);
1870 INIT_LIST_HEAD(&vha
->pur_cinfo
.head
);
1872 snprintf(name
, sizeof(name
), "%s_%d_purex", QLA2XXX_DRIVER_NAME
,
1877 * qla_enode_stop - stop and clear and enode data
1878 * @vha: host adapter pointer
1880 * called when app notified it is exiting
1883 qla_enode_stop(scsi_qla_host_t
*vha
)
1885 unsigned long flags
;
1886 struct enode
*node
, *q
;
1888 if (vha
->pur_cinfo
.enode_flags
!= ENODE_ACTIVE
) {
1889 /* doorbell list not enabled */
1890 ql_dbg(ql_dbg_edif
, vha
, 0x09102,
1891 "%s enode not active\n", __func__
);
1895 /* grab lock so list doesn't move */
1896 spin_lock_irqsave(&vha
->pur_cinfo
.pur_lock
, flags
);
1898 vha
->pur_cinfo
.enode_flags
&= ~ENODE_ACTIVE
; /* mark it not active */
1900 /* hopefully this is a null list at this point */
1901 list_for_each_entry_safe(node
, q
, &vha
->pur_cinfo
.head
, list
) {
1902 ql_dbg(ql_dbg_edif
, vha
, 0x910f,
1903 "%s freeing enode type=%x, cnt=%x\n", __func__
, node
->ntype
,
1904 node
->dinfo
.nodecnt
);
1905 list_del_init(&node
->list
);
1906 qla_enode_free(vha
, node
);
1908 spin_unlock_irqrestore(&vha
->pur_cinfo
.pur_lock
, flags
);
1911 static void qla_enode_clear(scsi_qla_host_t
*vha
, port_id_t portid
)
1913 unsigned long flags
;
1914 struct enode
*e
, *tmp
;
1915 struct purexevent
*purex
;
1916 LIST_HEAD(enode_list
);
1918 if (vha
->pur_cinfo
.enode_flags
!= ENODE_ACTIVE
) {
1919 ql_dbg(ql_dbg_edif
, vha
, 0x09102,
1920 "%s enode not active\n", __func__
);
1923 spin_lock_irqsave(&vha
->pur_cinfo
.pur_lock
, flags
);
1924 list_for_each_entry_safe(e
, tmp
, &vha
->pur_cinfo
.head
, list
) {
1925 purex
= &e
->u
.purexinfo
;
1926 if (purex
->pur_info
.pur_sid
.b24
== portid
.b24
) {
1927 ql_dbg(ql_dbg_edif
, vha
, 0x911d,
1928 "%s free ELS sid=%06x. xchg %x, nb=%xh\n",
1929 __func__
, portid
.b24
,
1930 purex
->pur_info
.pur_rx_xchg_address
,
1931 purex
->pur_info
.pur_bytes_rcvd
);
1933 list_del_init(&e
->list
);
1934 list_add_tail(&e
->list
, &enode_list
);
1937 spin_unlock_irqrestore(&vha
->pur_cinfo
.pur_lock
, flags
);
1939 list_for_each_entry_safe(e
, tmp
, &enode_list
, list
) {
1940 list_del_init(&e
->list
);
1941 qla_enode_free(vha
, e
);
1946 * allocate enode struct and populate buffer
1947 * returns: enode pointer with buffers
1950 static struct enode
*
1951 qla_enode_alloc(scsi_qla_host_t
*vha
, uint32_t ntype
)
1954 struct purexevent
*purex
;
1956 node
= kzalloc(RX_ELS_SIZE
, GFP_ATOMIC
);
1960 purex
= &node
->u
.purexinfo
;
1961 purex
->msgp
= (u8
*)(node
+ 1);
1962 purex
->msgp_len
= ELS_MAX_PAYLOAD
;
1964 node
->ntype
= ntype
;
1965 INIT_LIST_HEAD(&node
->list
);
1970 qla_enode_add(scsi_qla_host_t
*vha
, struct enode
*ptr
)
1972 unsigned long flags
;
1974 ql_dbg(ql_dbg_edif
+ ql_dbg_verbose
, vha
, 0x9109,
1975 "%s add enode for type=%x, cnt=%x\n",
1976 __func__
, ptr
->ntype
, ptr
->dinfo
.nodecnt
);
1978 spin_lock_irqsave(&vha
->pur_cinfo
.pur_lock
, flags
);
1979 list_add_tail(&ptr
->list
, &vha
->pur_cinfo
.head
);
1980 spin_unlock_irqrestore(&vha
->pur_cinfo
.pur_lock
, flags
);
1985 static struct enode
*
1986 qla_enode_find(scsi_qla_host_t
*vha
, uint32_t ntype
, uint32_t p1
, uint32_t p2
)
1988 struct enode
*node_rtn
= NULL
;
1989 struct enode
*list_node
, *q
;
1990 unsigned long flags
;
1992 struct purexevent
*purex
;
1994 /* secure the list from moving under us */
1995 spin_lock_irqsave(&vha
->pur_cinfo
.pur_lock
, flags
);
1997 list_for_each_entry_safe(list_node
, q
, &vha
->pur_cinfo
.head
, list
) {
1999 /* node type determines what p1 and p2 are */
2000 purex
= &list_node
->u
.purexinfo
;
2003 if (purex
->pur_info
.pur_sid
.b24
== sid
) {
2004 /* found it and its complete */
2005 node_rtn
= list_node
;
2006 list_del(&list_node
->list
);
2011 spin_unlock_irqrestore(&vha
->pur_cinfo
.pur_lock
, flags
);
2017 * qla_pur_get_pending - read/return authentication message sent
2019 * @vha: host adapter pointer
2020 * @fcport: session pointer
2021 * @bsg_job: user request where the message is copy to.
2024 qla_pur_get_pending(scsi_qla_host_t
*vha
, fc_port_t
*fcport
,
2025 struct bsg_job
*bsg_job
)
2028 struct purexevent
*purex
;
2029 struct qla_bsg_auth_els_reply
*rpl
=
2030 (struct qla_bsg_auth_els_reply
*)bsg_job
->reply
;
2032 bsg_job
->reply_len
= sizeof(*rpl
);
2034 ptr
= qla_enode_find(vha
, N_PUREX
, fcport
->d_id
.b24
, PUR_GET
);
2036 ql_dbg(ql_dbg_edif
, vha
, 0x9111,
2037 "%s no enode data found for %8phN sid=%06x\n",
2038 __func__
, fcport
->port_name
, fcport
->d_id
.b24
);
2039 SET_DID_STATUS(rpl
->r
.result
, DID_IMM_RETRY
);
2044 * enode is now off the linked list and is ours to deal with
2046 purex
= &ptr
->u
.purexinfo
;
2048 /* Copy info back to caller */
2049 rpl
->rx_xchg_address
= purex
->pur_info
.pur_rx_xchg_address
;
2051 SET_DID_STATUS(rpl
->r
.result
, DID_OK
);
2052 rpl
->r
.reply_payload_rcv_len
=
2053 sg_pcopy_from_buffer(bsg_job
->reply_payload
.sg_list
,
2054 bsg_job
->reply_payload
.sg_cnt
, purex
->msgp
,
2055 purex
->pur_info
.pur_bytes_rcvd
, 0);
2057 /* data copy / passback completed - destroy enode */
2058 qla_enode_free(vha
, ptr
);
2063 /* it is assume qpair lock is held */
2065 qla_els_reject_iocb(scsi_qla_host_t
*vha
, struct qla_qpair
*qp
,
2066 struct qla_els_pt_arg
*a
)
2068 struct els_entry_24xx
*els_iocb
;
2070 els_iocb
= __qla2x00_alloc_iocbs(qp
, NULL
);
2072 ql_log(ql_log_warn
, vha
, 0x700c,
2073 "qla2x00_alloc_iocbs failed.\n");
2074 return QLA_FUNCTION_FAILED
;
2077 qla_els_pt_iocb(vha
, els_iocb
, a
);
2079 ql_dbg(ql_dbg_edif
, vha
, 0x0183,
2080 "Sending ELS reject ox_id %04x s:%06x -> d:%06x\n",
2081 a
->ox_id
, a
->sid
.b24
, a
->did
.b24
);
2082 ql_dump_buffer(ql_dbg_edif
+ ql_dbg_verbose
, vha
, 0x0185,
2083 vha
->hw
->elsrej
.c
, sizeof(*vha
->hw
->elsrej
.c
));
2084 /* flush iocb to mem before notifying hw doorbell */
2086 qla2x00_start_iocbs(vha
, qp
->req
);
2091 qla_edb_init(scsi_qla_host_t
*vha
)
2093 if (DBELL_ACTIVE(vha
)) {
2094 /* list already init'd - error */
2095 ql_dbg(ql_dbg_edif
, vha
, 0x09102,
2096 "edif db already initialized, cannot reinit\n");
2100 /* initialize lock which protects doorbell & init list */
2101 spin_lock_init(&vha
->e_dbell
.db_lock
);
2102 INIT_LIST_HEAD(&vha
->e_dbell
.head
);
2105 static void qla_edb_clear(scsi_qla_host_t
*vha
, port_id_t portid
)
2107 unsigned long flags
;
2108 struct edb_node
*e
, *tmp
;
2110 LIST_HEAD(edb_list
);
2112 if (DBELL_INACTIVE(vha
)) {
2113 /* doorbell list not enabled */
2114 ql_dbg(ql_dbg_edif
, vha
, 0x09102,
2115 "%s doorbell not enabled\n", __func__
);
2119 /* grab lock so list doesn't move */
2120 spin_lock_irqsave(&vha
->e_dbell
.db_lock
, flags
);
2121 list_for_each_entry_safe(e
, tmp
, &vha
->e_dbell
.head
, list
) {
2123 case VND_CMD_AUTH_STATE_NEEDED
:
2124 case VND_CMD_AUTH_STATE_SESSION_SHUTDOWN
:
2125 sid
= e
->u
.plogi_did
;
2127 case VND_CMD_AUTH_STATE_ELS_RCVD
:
2130 case VND_CMD_AUTH_STATE_SAUPDATE_COMPL
:
2131 /* app wants to see this */
2134 ql_log(ql_log_warn
, vha
, 0x09102,
2135 "%s unknown node type: %x\n", __func__
, e
->ntype
);
2139 if (sid
.b24
== portid
.b24
) {
2140 ql_dbg(ql_dbg_edif
, vha
, 0x910f,
2141 "%s free doorbell event : node type = %x %p\n",
2142 __func__
, e
->ntype
, e
);
2143 list_del_init(&e
->list
);
2144 list_add_tail(&e
->list
, &edb_list
);
2147 spin_unlock_irqrestore(&vha
->e_dbell
.db_lock
, flags
);
2149 list_for_each_entry_safe(e
, tmp
, &edb_list
, list
)
2150 qla_edb_node_free(vha
, e
);
2153 /* function called when app is stopping */
2156 qla_edb_stop(scsi_qla_host_t
*vha
)
2158 unsigned long flags
;
2159 struct edb_node
*node
, *q
;
2161 if (DBELL_INACTIVE(vha
)) {
2162 /* doorbell list not enabled */
2163 ql_dbg(ql_dbg_edif
, vha
, 0x09102,
2164 "%s doorbell not enabled\n", __func__
);
2168 /* grab lock so list doesn't move */
2169 spin_lock_irqsave(&vha
->e_dbell
.db_lock
, flags
);
2171 vha
->e_dbell
.db_flags
&= ~EDB_ACTIVE
; /* mark it not active */
2172 /* hopefully this is a null list at this point */
2173 list_for_each_entry_safe(node
, q
, &vha
->e_dbell
.head
, list
) {
2174 ql_dbg(ql_dbg_edif
, vha
, 0x910f,
2175 "%s freeing edb_node type=%x\n",
2176 __func__
, node
->ntype
);
2177 qla_edb_node_free(vha
, node
);
2179 spin_unlock_irqrestore(&vha
->e_dbell
.db_lock
, flags
);
2181 qla_edif_dbell_bsg_done(vha
);
2184 static struct edb_node
*
2185 qla_edb_node_alloc(scsi_qla_host_t
*vha
, uint32_t ntype
)
2187 struct edb_node
*node
;
2189 node
= kzalloc(sizeof(*node
), GFP_ATOMIC
);
2191 /* couldn't get space */
2192 ql_dbg(ql_dbg_edif
, vha
, 0x9100,
2193 "edb node unable to be allocated\n");
2197 node
->ntype
= ntype
;
2198 INIT_LIST_HEAD(&node
->list
);
2202 /* adds a already allocated enode to the linked list */
2204 qla_edb_node_add(scsi_qla_host_t
*vha
, struct edb_node
*ptr
)
2206 unsigned long flags
;
2208 if (DBELL_INACTIVE(vha
)) {
2209 /* doorbell list not enabled */
2210 ql_dbg(ql_dbg_edif
, vha
, 0x09102,
2211 "%s doorbell not enabled\n", __func__
);
2215 spin_lock_irqsave(&vha
->e_dbell
.db_lock
, flags
);
2216 list_add_tail(&ptr
->list
, &vha
->e_dbell
.head
);
2217 spin_unlock_irqrestore(&vha
->e_dbell
.db_lock
, flags
);
2222 /* adds event to doorbell list */
2224 qla_edb_eventcreate(scsi_qla_host_t
*vha
, uint32_t dbtype
,
2225 uint32_t data
, uint32_t data2
, fc_port_t
*sfcport
)
2227 struct edb_node
*edbnode
;
2228 fc_port_t
*fcport
= sfcport
;
2231 if (!vha
->hw
->flags
.edif_enabled
) {
2232 /* edif not enabled */
2236 if (DBELL_INACTIVE(vha
)) {
2238 fcport
->edif
.auth_state
= dbtype
;
2239 /* doorbell list not enabled */
2240 ql_dbg(ql_dbg_edif
, vha
, 0x09102,
2241 "%s doorbell not enabled (type=%d\n", __func__
, dbtype
);
2245 edbnode
= qla_edb_node_alloc(vha
, dbtype
);
2247 ql_dbg(ql_dbg_edif
, vha
, 0x09102,
2248 "%s unable to alloc db node\n", __func__
);
2253 id
.b
.domain
= (data
>> 16) & 0xff;
2254 id
.b
.area
= (data
>> 8) & 0xff;
2255 id
.b
.al_pa
= data
& 0xff;
2256 ql_dbg(ql_dbg_edif
, vha
, 0x09222,
2257 "%s: Arrived s_id: %06x\n", __func__
,
2259 fcport
= qla2x00_find_fcport_by_pid(vha
, &id
);
2261 ql_dbg(ql_dbg_edif
, vha
, 0x09102,
2262 "%s can't find fcport for sid= 0x%x - ignoring\n",
2269 /* populate the edb node */
2271 case VND_CMD_AUTH_STATE_NEEDED
:
2272 case VND_CMD_AUTH_STATE_SESSION_SHUTDOWN
:
2273 edbnode
->u
.plogi_did
.b24
= fcport
->d_id
.b24
;
2275 case VND_CMD_AUTH_STATE_ELS_RCVD
:
2276 edbnode
->u
.els_sid
.b24
= fcport
->d_id
.b24
;
2278 case VND_CMD_AUTH_STATE_SAUPDATE_COMPL
:
2279 edbnode
->u
.sa_aen
.port_id
= fcport
->d_id
;
2280 edbnode
->u
.sa_aen
.status
= data
;
2281 edbnode
->u
.sa_aen
.key_type
= data2
;
2282 edbnode
->u
.sa_aen
.version
= EDIF_VERSION1
;
2285 ql_dbg(ql_dbg_edif
, vha
, 0x09102,
2286 "%s unknown type: %x\n", __func__
, dbtype
);
2293 if (!qla_edb_node_add(vha
, edbnode
)) {
2294 ql_dbg(ql_dbg_edif
, vha
, 0x09102,
2295 "%s unable to add dbnode\n", __func__
);
2299 ql_dbg(ql_dbg_edif
, vha
, 0x09102,
2300 "%s Doorbell produced : type=%d %p\n", __func__
, dbtype
, edbnode
);
2301 qla_edif_dbell_bsg_done(vha
);
2303 fcport
->edif
.auth_state
= dbtype
;
2308 qla_edif_timer(scsi_qla_host_t
*vha
)
2310 struct qla_hw_data
*ha
= vha
->hw
;
2312 if (!vha
->vp_idx
&& N2N_TOPO(ha
) && ha
->flags
.n2n_fw_acc_sec
) {
2313 if (DBELL_INACTIVE(vha
) &&
2314 ha
->edif_post_stop_cnt_down
) {
2315 ha
->edif_post_stop_cnt_down
--;
2318 * turn off auto 'Plogi Acc + secure=1' feature
2319 * Set Add FW option[3]
2322 if (ha
->edif_post_stop_cnt_down
== 0) {
2323 ql_dbg(ql_dbg_async
, vha
, 0x911d,
2324 "%s chip reset to turn off PLOGI ACC + secure\n",
2326 set_bit(ISP_ABORT_NEEDED
, &vha
->dpc_flags
);
2329 ha
->edif_post_stop_cnt_down
= 60;
2333 if (vha
->e_dbell
.dbell_bsg_job
&& time_after_eq(jiffies
, vha
->e_dbell
.bsg_expire
))
2334 qla_edif_dbell_bsg_done(vha
);
2337 static void qla_noop_sp_done(srb_t
*sp
, int res
)
2339 sp
->fcport
->flags
&= ~(FCF_ASYNC_SENT
| FCF_ASYNC_ACTIVE
);
2341 kref_put(&sp
->cmd_kref
, qla2x00_sp_release
);
2345 * Called from work queue
2346 * build and send the sa_update iocb to delete an rx sa_index
2349 qla24xx_issue_sa_replace_iocb(scsi_qla_host_t
*vha
, struct qla_work_evt
*e
)
2352 fc_port_t
*fcport
= NULL
;
2353 struct srb_iocb
*iocb_cmd
= NULL
;
2354 int rval
= QLA_SUCCESS
;
2355 struct edif_sa_ctl
*sa_ctl
= e
->u
.sa_update
.sa_ctl
;
2356 uint16_t nport_handle
= e
->u
.sa_update
.nport_handle
;
2358 ql_dbg(ql_dbg_edif
, vha
, 0x70e6,
2359 "%s: starting, sa_ctl: %p\n", __func__
, sa_ctl
);
2362 ql_dbg(ql_dbg_edif
, vha
, 0x70e6,
2363 "sa_ctl allocation failed\n");
2368 fcport
= sa_ctl
->fcport
;
2370 /* Alloc SRB structure */
2371 sp
= qla2x00_get_sp(vha
, fcport
, GFP_KERNEL
);
2373 ql_dbg(ql_dbg_edif
, vha
, 0x70e6,
2374 "SRB allocation failed\n");
2379 fcport
->flags
|= FCF_ASYNC_SENT
;
2380 iocb_cmd
= &sp
->u
.iocb_cmd
;
2381 iocb_cmd
->u
.sa_update
.sa_ctl
= sa_ctl
;
2383 ql_dbg(ql_dbg_edif
, vha
, 0x3073,
2384 "Enter: SA REPL portid=%06x, sa_ctl %p, index %x, nport_handle: 0x%x\n",
2385 fcport
->d_id
.b24
, sa_ctl
, sa_ctl
->index
, nport_handle
);
2387 * if this is a sadb cleanup delete, mark it so the isr can
2388 * take the correct action
2390 if (sa_ctl
->flags
& EDIF_SA_CTL_FLG_CLEANUP_DEL
) {
2391 /* mark this srb as a cleanup delete */
2392 sp
->flags
|= SRB_EDIF_CLEANUP_DELETE
;
2393 ql_dbg(ql_dbg_edif
, vha
, 0x70e6,
2394 "%s: sp 0x%p flagged as cleanup delete\n", __func__
, sp
);
2397 sp
->type
= SRB_SA_REPLACE
;
2398 sp
->name
= "SA_REPLACE";
2399 sp
->fcport
= fcport
;
2400 sp
->free
= qla2x00_rel_sp
;
2401 sp
->done
= qla_noop_sp_done
;
2403 rval
= qla2x00_start_sp(sp
);
2405 if (rval
!= QLA_SUCCESS
) {
2411 kref_put(&sp
->cmd_kref
, qla2x00_sp_release
);
2412 fcport
->flags
&= ~FCF_ASYNC_SENT
;
2414 fcport
->flags
&= ~FCF_ASYNC_ACTIVE
;
2418 void qla24xx_sa_update_iocb(srb_t
*sp
, struct sa_update_28xx
*sa_update_iocb
)
2421 struct scsi_qla_host
*vha
= sp
->vha
;
2422 struct qla_sa_update_frame
*sa_frame
=
2423 &sp
->u
.iocb_cmd
.u
.sa_update
.sa_frame
;
2426 switch (sa_frame
->flags
& (SAU_FLG_INV
| SAU_FLG_TX
)) {
2428 ql_dbg(ql_dbg_edif
, vha
, 0x911d,
2429 "%s: EDIF SA UPDATE RX IOCB vha: 0x%p index: %d\n",
2430 __func__
, vha
, sa_frame
->fast_sa_index
);
2433 ql_dbg(ql_dbg_edif
, vha
, 0x911d,
2434 "%s: EDIF SA DELETE RX IOCB vha: 0x%p index: %d\n",
2435 __func__
, vha
, sa_frame
->fast_sa_index
);
2436 flags
|= SA_FLAG_INVALIDATE
;
2439 ql_dbg(ql_dbg_edif
, vha
, 0x911d,
2440 "%s: EDIF SA UPDATE TX IOCB vha: 0x%p index: %d\n",
2441 __func__
, vha
, sa_frame
->fast_sa_index
);
2442 flags
|= SA_FLAG_TX
;
2445 ql_dbg(ql_dbg_edif
, vha
, 0x911d,
2446 "%s: EDIF SA DELETE TX IOCB vha: 0x%p index: %d\n",
2447 __func__
, vha
, sa_frame
->fast_sa_index
);
2448 flags
|= SA_FLAG_TX
| SA_FLAG_INVALIDATE
;
2452 sa_update_iocb
->entry_type
= SA_UPDATE_IOCB_TYPE
;
2453 sa_update_iocb
->entry_count
= 1;
2454 sa_update_iocb
->sys_define
= 0;
2455 sa_update_iocb
->entry_status
= 0;
2456 sa_update_iocb
->handle
= sp
->handle
;
2457 sa_update_iocb
->u
.nport_handle
= cpu_to_le16(sp
->fcport
->loop_id
);
2458 sa_update_iocb
->vp_index
= sp
->fcport
->vha
->vp_idx
;
2459 sa_update_iocb
->port_id
[0] = sp
->fcport
->d_id
.b
.al_pa
;
2460 sa_update_iocb
->port_id
[1] = sp
->fcport
->d_id
.b
.area
;
2461 sa_update_iocb
->port_id
[2] = sp
->fcport
->d_id
.b
.domain
;
2463 sa_update_iocb
->flags
= flags
;
2464 sa_update_iocb
->salt
= cpu_to_le32(sa_frame
->salt
);
2465 sa_update_iocb
->spi
= cpu_to_le32(sa_frame
->spi
);
2466 sa_update_iocb
->sa_index
= cpu_to_le16(sa_frame
->fast_sa_index
);
2468 sa_update_iocb
->sa_control
|= SA_CNTL_ENC_FCSP
;
2469 if (sp
->fcport
->edif
.aes_gmac
)
2470 sa_update_iocb
->sa_control
|= SA_CNTL_AES_GMAC
;
2472 if (sa_frame
->flags
& SAU_FLG_KEY256
) {
2473 sa_update_iocb
->sa_control
|= SA_CNTL_KEY256
;
2474 for (itr
= 0; itr
< 32; itr
++)
2475 sa_update_iocb
->sa_key
[itr
] = sa_frame
->sa_key
[itr
];
2477 sa_update_iocb
->sa_control
|= SA_CNTL_KEY128
;
2478 for (itr
= 0; itr
< 16; itr
++)
2479 sa_update_iocb
->sa_key
[itr
] = sa_frame
->sa_key
[itr
];
2482 ql_dbg(ql_dbg_edif
, vha
, 0x921d,
2483 "%s SAU Port ID = %02x%02x%02x, flags=%xh, index=%u, ctl=%xh, SPI 0x%x flags 0x%x hdl=%x gmac %d\n",
2484 __func__
, sa_update_iocb
->port_id
[2], sa_update_iocb
->port_id
[1],
2485 sa_update_iocb
->port_id
[0], sa_update_iocb
->flags
, sa_update_iocb
->sa_index
,
2486 sa_update_iocb
->sa_control
, sa_update_iocb
->spi
, sa_frame
->flags
, sp
->handle
,
2487 sp
->fcport
->edif
.aes_gmac
);
2489 if (sa_frame
->flags
& SAU_FLG_TX
)
2490 sp
->fcport
->edif
.tx_sa_pending
= 1;
2492 sp
->fcport
->edif
.rx_sa_pending
= 1;
2494 sp
->fcport
->vha
->qla_stats
.control_requests
++;
2498 qla24xx_sa_replace_iocb(srb_t
*sp
, struct sa_update_28xx
*sa_update_iocb
)
2500 struct scsi_qla_host
*vha
= sp
->vha
;
2501 struct srb_iocb
*srb_iocb
= &sp
->u
.iocb_cmd
;
2502 struct edif_sa_ctl
*sa_ctl
= srb_iocb
->u
.sa_update
.sa_ctl
;
2503 uint16_t nport_handle
= sp
->fcport
->loop_id
;
2505 sa_update_iocb
->entry_type
= SA_UPDATE_IOCB_TYPE
;
2506 sa_update_iocb
->entry_count
= 1;
2507 sa_update_iocb
->sys_define
= 0;
2508 sa_update_iocb
->entry_status
= 0;
2509 sa_update_iocb
->handle
= sp
->handle
;
2511 sa_update_iocb
->u
.nport_handle
= cpu_to_le16(nport_handle
);
2513 sa_update_iocb
->vp_index
= sp
->fcport
->vha
->vp_idx
;
2514 sa_update_iocb
->port_id
[0] = sp
->fcport
->d_id
.b
.al_pa
;
2515 sa_update_iocb
->port_id
[1] = sp
->fcport
->d_id
.b
.area
;
2516 sa_update_iocb
->port_id
[2] = sp
->fcport
->d_id
.b
.domain
;
2518 /* Invalidate the index. salt, spi, control & key are ignore */
2519 sa_update_iocb
->flags
= SA_FLAG_INVALIDATE
;
2520 sa_update_iocb
->salt
= 0;
2521 sa_update_iocb
->spi
= 0;
2522 sa_update_iocb
->sa_index
= cpu_to_le16(sa_ctl
->index
);
2523 sa_update_iocb
->sa_control
= 0;
2525 ql_dbg(ql_dbg_edif
, vha
, 0x921d,
2526 "%s SAU DELETE RX Port ID = %02x:%02x:%02x, lid %d flags=%xh, index=%u, hdl=%x\n",
2527 __func__
, sa_update_iocb
->port_id
[2], sa_update_iocb
->port_id
[1],
2528 sa_update_iocb
->port_id
[0], nport_handle
, sa_update_iocb
->flags
,
2529 sa_update_iocb
->sa_index
, sp
->handle
);
2531 sp
->fcport
->vha
->qla_stats
.control_requests
++;
2534 void qla24xx_auth_els(scsi_qla_host_t
*vha
, void **pkt
, struct rsp_que
**rsp
)
2536 struct purex_entry_24xx
*p
= *pkt
;
2540 struct purexevent
*purex
;
2541 struct scsi_qla_host
*host
= NULL
;
2543 struct fc_port
*fcport
;
2544 struct qla_els_pt_arg a
;
2547 memset(&a
, 0, sizeof(a
));
2549 a
.els_opcode
= ELS_AUTH_ELS
;
2550 a
.nport_handle
= p
->nport_handle
;
2551 a
.rx_xchg_address
= p
->rx_xchg_addr
;
2552 a
.did
.b
.domain
= p
->s_id
[2];
2553 a
.did
.b
.area
= p
->s_id
[1];
2554 a
.did
.b
.al_pa
= p
->s_id
[0];
2555 a
.tx_byte_count
= a
.tx_len
= sizeof(struct fc_els_ls_rjt
);
2556 a
.tx_addr
= vha
->hw
->elsrej
.cdma
;
2557 a
.vp_idx
= vha
->vp_idx
;
2558 a
.control_flags
= EPD_ELS_RJT
;
2559 a
.ox_id
= le16_to_cpu(p
->ox_id
);
2561 sid
= p
->s_id
[0] | (p
->s_id
[1] << 8) | (p
->s_id
[2] << 16);
2563 totlen
= (le16_to_cpu(p
->frame_size
) & 0x0fff) - PURX_ELS_HEADER_SIZE
;
2564 if (le16_to_cpu(p
->status_flags
) & 0x8000) {
2565 totlen
= le16_to_cpu(p
->trunc_frame_size
);
2566 qla_els_reject_iocb(vha
, (*rsp
)->qpair
, &a
);
2567 __qla_consume_iocb(vha
, pkt
, rsp
);
2571 if (totlen
> ELS_MAX_PAYLOAD
) {
2572 ql_dbg(ql_dbg_edif
, vha
, 0x0910d,
2573 "%s WARNING: verbose ELS frame received (totlen=%x)\n",
2575 qla_els_reject_iocb(vha
, (*rsp
)->qpair
, &a
);
2576 __qla_consume_iocb(vha
, pkt
, rsp
);
2580 if (!vha
->hw
->flags
.edif_enabled
) {
2581 /* edif support not enabled */
2582 ql_dbg(ql_dbg_edif
, vha
, 0x910e, "%s edif not enabled\n",
2584 qla_els_reject_iocb(vha
, (*rsp
)->qpair
, &a
);
2585 __qla_consume_iocb(vha
, pkt
, rsp
);
2589 ptr
= qla_enode_alloc(vha
, N_PUREX
);
2591 ql_dbg(ql_dbg_edif
, vha
, 0x09109,
2592 "WARNING: enode alloc failed for sid=%x\n",
2594 qla_els_reject_iocb(vha
, (*rsp
)->qpair
, &a
);
2595 __qla_consume_iocb(vha
, pkt
, rsp
);
2599 purex
= &ptr
->u
.purexinfo
;
2600 purex
->pur_info
.pur_sid
= a
.did
;
2601 purex
->pur_info
.pur_bytes_rcvd
= totlen
;
2602 purex
->pur_info
.pur_rx_xchg_address
= le32_to_cpu(p
->rx_xchg_addr
);
2603 purex
->pur_info
.pur_nphdl
= le16_to_cpu(p
->nport_handle
);
2604 purex
->pur_info
.pur_did
.b
.domain
= p
->d_id
[2];
2605 purex
->pur_info
.pur_did
.b
.area
= p
->d_id
[1];
2606 purex
->pur_info
.pur_did
.b
.al_pa
= p
->d_id
[0];
2607 purex
->pur_info
.vp_idx
= p
->vp_idx
;
2609 a
.sid
= purex
->pur_info
.pur_did
;
2611 rc
= __qla_copy_purex_to_buffer(vha
, pkt
, rsp
, purex
->msgp
,
2614 qla_els_reject_iocb(vha
, (*rsp
)->qpair
, &a
);
2615 qla_enode_free(vha
, ptr
);
2618 beid
.al_pa
= purex
->pur_info
.pur_did
.b
.al_pa
;
2619 beid
.area
= purex
->pur_info
.pur_did
.b
.area
;
2620 beid
.domain
= purex
->pur_info
.pur_did
.b
.domain
;
2621 host
= qla_find_host_by_d_id(vha
, beid
);
2623 ql_log(ql_log_fatal
, vha
, 0x508b,
2624 "%s Drop ELS due to unable to find host %06x\n",
2625 __func__
, purex
->pur_info
.pur_did
.b24
);
2627 qla_els_reject_iocb(vha
, (*rsp
)->qpair
, &a
);
2628 qla_enode_free(vha
, ptr
);
2632 fcport
= qla2x00_find_fcport_by_pid(host
, &purex
->pur_info
.pur_sid
);
2634 if (DBELL_INACTIVE(vha
)) {
2635 ql_dbg(ql_dbg_edif
, host
, 0x0910c, "%s e_dbell.db_flags =%x %06x\n",
2636 __func__
, host
->e_dbell
.db_flags
,
2637 fcport
? fcport
->d_id
.b24
: 0);
2639 qla_els_reject_iocb(host
, (*rsp
)->qpair
, &a
);
2640 qla_enode_free(host
, ptr
);
2644 if (fcport
&& EDIF_SESSION_DOWN(fcport
)) {
2645 ql_dbg(ql_dbg_edif
, host
, 0x13b6,
2646 "%s terminate exchange. Send logo to 0x%x\n",
2647 __func__
, a
.did
.b24
);
2649 a
.tx_byte_count
= a
.tx_len
= 0;
2651 a
.control_flags
= EPD_RX_XCHG
; /* EPD_RX_XCHG = terminate cmd */
2652 qla_els_reject_iocb(host
, (*rsp
)->qpair
, &a
);
2653 qla_enode_free(host
, ptr
);
2654 /* send logo to let remote port knows to tear down session */
2655 fcport
->send_els_logo
= 1;
2656 qlt_schedule_sess_for_deletion(fcport
);
2660 /* add the local enode to the list */
2661 qla_enode_add(host
, ptr
);
2663 ql_dbg(ql_dbg_edif
, host
, 0x0910c,
2664 "%s COMPLETE purex->pur_info.pur_bytes_rcvd =%xh s:%06x -> d:%06x xchg=%xh\n",
2665 __func__
, purex
->pur_info
.pur_bytes_rcvd
, purex
->pur_info
.pur_sid
.b24
,
2666 purex
->pur_info
.pur_did
.b24
, purex
->pur_info
.pur_rx_xchg_address
);
2668 qla_edb_eventcreate(host
, VND_CMD_AUTH_STATE_ELS_RCVD
, sid
, 0, NULL
);
2671 static uint16_t qla_edif_get_sa_index_from_freepool(fc_port_t
*fcport
, int dir
)
2673 struct scsi_qla_host
*vha
= fcport
->vha
;
2674 struct qla_hw_data
*ha
= vha
->hw
;
2676 unsigned long flags
= 0;
2679 ql_dbg(ql_dbg_edif
+ ql_dbg_verbose
, vha
, 0x3063,
2680 "%s: entry\n", __func__
);
2683 sa_id_map
= ha
->edif_tx_sa_id_map
;
2685 sa_id_map
= ha
->edif_rx_sa_id_map
;
2687 spin_lock_irqsave(&ha
->sadb_fp_lock
, flags
);
2688 sa_index
= find_first_zero_bit(sa_id_map
, EDIF_NUM_SA_INDEX
);
2689 if (sa_index
>= EDIF_NUM_SA_INDEX
) {
2690 spin_unlock_irqrestore(&ha
->sadb_fp_lock
, flags
);
2691 return INVALID_EDIF_SA_INDEX
;
2693 set_bit(sa_index
, sa_id_map
);
2694 spin_unlock_irqrestore(&ha
->sadb_fp_lock
, flags
);
2697 sa_index
+= EDIF_TX_SA_INDEX_BASE
;
2699 ql_dbg(ql_dbg_edif
, vha
, 0x3063,
2700 "%s: index retrieved from free pool %d\n", __func__
, sa_index
);
2705 /* find an sadb entry for an nport_handle */
2706 static struct edif_sa_index_entry
*
2707 qla_edif_sadb_find_sa_index_entry(uint16_t nport_handle
,
2708 struct list_head
*sa_list
)
2710 struct edif_sa_index_entry
*entry
;
2711 struct edif_sa_index_entry
*tentry
;
2712 struct list_head
*indx_list
= sa_list
;
2714 list_for_each_entry_safe(entry
, tentry
, indx_list
, next
) {
2715 if (entry
->handle
== nport_handle
)
2721 /* remove an sa_index from the nport_handle and return it to the free pool */
2722 static int qla_edif_sadb_delete_sa_index(fc_port_t
*fcport
, uint16_t nport_handle
,
2725 struct edif_sa_index_entry
*entry
;
2726 struct list_head
*sa_list
;
2727 int dir
= (sa_index
< EDIF_TX_SA_INDEX_BASE
) ? 0 : 1;
2729 int free_slot_count
= 0;
2730 scsi_qla_host_t
*vha
= fcport
->vha
;
2731 struct qla_hw_data
*ha
= vha
->hw
;
2732 unsigned long flags
= 0;
2734 ql_dbg(ql_dbg_edif
, vha
, 0x3063,
2735 "%s: entry\n", __func__
);
2738 sa_list
= &ha
->sadb_tx_index_list
;
2740 sa_list
= &ha
->sadb_rx_index_list
;
2742 entry
= qla_edif_sadb_find_sa_index_entry(nport_handle
, sa_list
);
2744 ql_dbg(ql_dbg_edif
, vha
, 0x3063,
2745 "%s: no entry found for nport_handle 0x%x\n",
2746 __func__
, nport_handle
);
2750 spin_lock_irqsave(&ha
->sadb_lock
, flags
);
2752 * each tx/rx direction has up to 2 sa indexes/slots. 1 slot for in flight traffic
2753 * the other is use at re-key time.
2755 for (slot
= 0; slot
< 2; slot
++) {
2756 if (entry
->sa_pair
[slot
].sa_index
== sa_index
) {
2757 entry
->sa_pair
[slot
].sa_index
= INVALID_EDIF_SA_INDEX
;
2758 entry
->sa_pair
[slot
].spi
= 0;
2760 qla_edif_add_sa_index_to_freepool(fcport
, dir
, sa_index
);
2761 } else if (entry
->sa_pair
[slot
].sa_index
== INVALID_EDIF_SA_INDEX
) {
2766 if (free_slot_count
== 2) {
2767 list_del(&entry
->next
);
2770 spin_unlock_irqrestore(&ha
->sadb_lock
, flags
);
2772 ql_dbg(ql_dbg_edif
, vha
, 0x3063,
2773 "%s: sa_index %d removed, free_slot_count: %d\n",
2774 __func__
, sa_index
, free_slot_count
);
2780 qla28xx_sa_update_iocb_entry(scsi_qla_host_t
*v
, struct req_que
*req
,
2781 struct sa_update_28xx
*pkt
)
2783 const char *func
= "SA_UPDATE_RESPONSE_IOCB";
2785 struct edif_sa_ctl
*sa_ctl
;
2786 int old_sa_deleted
= 1;
2787 uint16_t nport_handle
;
2788 struct scsi_qla_host
*vha
;
2790 sp
= qla2x00_get_sp_from_handle(v
, func
, req
, pkt
);
2793 ql_dbg(ql_dbg_edif
, v
, 0x3063,
2794 "%s: no sp found for pkt\n", __func__
);
2797 /* use sp->vha due to npiv */
2800 switch (pkt
->flags
& (SA_FLAG_INVALIDATE
| SA_FLAG_TX
)) {
2802 ql_dbg(ql_dbg_edif
, vha
, 0x3063,
2803 "%s: EDIF SA UPDATE RX IOCB vha: 0x%p index: %d\n",
2804 __func__
, vha
, pkt
->sa_index
);
2807 ql_dbg(ql_dbg_edif
, vha
, 0x3063,
2808 "%s: EDIF SA DELETE RX IOCB vha: 0x%p index: %d\n",
2809 __func__
, vha
, pkt
->sa_index
);
2812 ql_dbg(ql_dbg_edif
, vha
, 0x3063,
2813 "%s: EDIF SA UPDATE TX IOCB vha: 0x%p index: %d\n",
2814 __func__
, vha
, pkt
->sa_index
);
2817 ql_dbg(ql_dbg_edif
, vha
, 0x3063,
2818 "%s: EDIF SA DELETE TX IOCB vha: 0x%p index: %d\n",
2819 __func__
, vha
, pkt
->sa_index
);
2824 * dig the nport handle out of the iocb, fcport->loop_id can not be trusted
2825 * to be correct during cleanup sa_update iocbs.
2827 nport_handle
= sp
->fcport
->loop_id
;
2829 ql_dbg(ql_dbg_edif
, vha
, 0x3063,
2830 "%s: %8phN comp status=%x old_sa_info=%x new_sa_info=%x lid %d, index=0x%x pkt_flags %xh hdl=%x\n",
2831 __func__
, sp
->fcport
->port_name
, pkt
->u
.comp_sts
, pkt
->old_sa_info
, pkt
->new_sa_info
,
2832 nport_handle
, pkt
->sa_index
, pkt
->flags
, sp
->handle
);
2834 /* if rx delete, remove the timer */
2835 if ((pkt
->flags
& (SA_FLAG_INVALIDATE
| SA_FLAG_TX
)) == SA_FLAG_INVALIDATE
) {
2836 struct edif_list_entry
*edif_entry
;
2838 sp
->fcport
->flags
&= ~(FCF_ASYNC_SENT
| FCF_ASYNC_ACTIVE
);
2840 edif_entry
= qla_edif_list_find_sa_index(sp
->fcport
, nport_handle
);
2842 ql_dbg(ql_dbg_edif
, vha
, 0x5033,
2843 "%s: removing edif_entry %p, new sa_index: 0x%x\n",
2844 __func__
, edif_entry
, pkt
->sa_index
);
2845 qla_edif_list_delete_sa_index(sp
->fcport
, edif_entry
);
2846 timer_shutdown(&edif_entry
->timer
);
2848 ql_dbg(ql_dbg_edif
, vha
, 0x5033,
2849 "%s: releasing edif_entry %p, new sa_index: 0x%x\n",
2850 __func__
, edif_entry
, pkt
->sa_index
);
2857 * if this is a delete for either tx or rx, make sure it succeeded.
2858 * The new_sa_info field should be 0xffff on success
2860 if (pkt
->flags
& SA_FLAG_INVALIDATE
)
2861 old_sa_deleted
= (le16_to_cpu(pkt
->new_sa_info
) == 0xffff) ? 1 : 0;
2863 /* Process update and delete the same way */
2865 /* If this is an sadb cleanup delete, bypass sending events to IPSEC */
2866 if (sp
->flags
& SRB_EDIF_CLEANUP_DELETE
) {
2867 sp
->fcport
->flags
&= ~(FCF_ASYNC_SENT
| FCF_ASYNC_ACTIVE
);
2868 ql_dbg(ql_dbg_edif
, vha
, 0x3063,
2869 "%s: nph 0x%x, sa_index %d removed from fw\n",
2870 __func__
, sp
->fcport
->loop_id
, pkt
->sa_index
);
2872 } else if ((pkt
->entry_status
== 0) && (pkt
->u
.comp_sts
== 0) &&
2875 * Note: Wa are only keeping track of latest SA,
2876 * so we know when we can start enableing encryption per I/O.
2877 * If all SA's get deleted, let FW reject the IOCB.
2879 * TODO: edif: don't set enabled here I think
2880 * TODO: edif: prli complete is where it should be set
2882 ql_dbg(ql_dbg_edif
+ ql_dbg_verbose
, vha
, 0x3063,
2883 "SA(%x)updated for s_id %02x%02x%02x\n",
2885 pkt
->port_id
[2], pkt
->port_id
[1], pkt
->port_id
[0]);
2886 sp
->fcport
->edif
.enable
= 1;
2887 if (pkt
->flags
& SA_FLAG_TX
) {
2888 sp
->fcport
->edif
.tx_sa_set
= 1;
2889 sp
->fcport
->edif
.tx_sa_pending
= 0;
2890 qla_edb_eventcreate(vha
, VND_CMD_AUTH_STATE_SAUPDATE_COMPL
,
2891 QL_VND_SA_STAT_SUCCESS
,
2892 QL_VND_TX_SA_KEY
, sp
->fcport
);
2894 sp
->fcport
->edif
.rx_sa_set
= 1;
2895 sp
->fcport
->edif
.rx_sa_pending
= 0;
2896 qla_edb_eventcreate(vha
, VND_CMD_AUTH_STATE_SAUPDATE_COMPL
,
2897 QL_VND_SA_STAT_SUCCESS
,
2898 QL_VND_RX_SA_KEY
, sp
->fcport
);
2901 ql_dbg(ql_dbg_edif
, vha
, 0x3063,
2902 "%s: %8phN SA update FAILED: sa_index: %d, new_sa_info %d, %02x%02x%02x\n",
2903 __func__
, sp
->fcport
->port_name
, pkt
->sa_index
, pkt
->new_sa_info
,
2904 pkt
->port_id
[2], pkt
->port_id
[1], pkt
->port_id
[0]);
2906 if (pkt
->flags
& SA_FLAG_TX
)
2907 qla_edb_eventcreate(vha
, VND_CMD_AUTH_STATE_SAUPDATE_COMPL
,
2908 (le16_to_cpu(pkt
->u
.comp_sts
) << 16) | QL_VND_SA_STAT_FAILED
,
2909 QL_VND_TX_SA_KEY
, sp
->fcport
);
2911 qla_edb_eventcreate(vha
, VND_CMD_AUTH_STATE_SAUPDATE_COMPL
,
2912 (le16_to_cpu(pkt
->u
.comp_sts
) << 16) | QL_VND_SA_STAT_FAILED
,
2913 QL_VND_RX_SA_KEY
, sp
->fcport
);
2916 /* for delete, release sa_ctl, sa_index */
2917 if (pkt
->flags
& SA_FLAG_INVALIDATE
) {
2918 /* release the sa_ctl */
2919 sa_ctl
= qla_edif_find_sa_ctl_by_index(sp
->fcport
,
2920 le16_to_cpu(pkt
->sa_index
), (pkt
->flags
& SA_FLAG_TX
));
2922 qla_edif_find_sa_ctl_by_index(sp
->fcport
, sa_ctl
->index
,
2923 (pkt
->flags
& SA_FLAG_TX
)) != NULL
) {
2924 ql_dbg(ql_dbg_edif
+ ql_dbg_verbose
, vha
, 0x3063,
2925 "%s: freeing sa_ctl for index %d\n",
2926 __func__
, sa_ctl
->index
);
2927 qla_edif_free_sa_ctl(sp
->fcport
, sa_ctl
, sa_ctl
->index
);
2929 ql_dbg(ql_dbg_edif
, vha
, 0x3063,
2930 "%s: sa_ctl NOT freed, sa_ctl: %p\n",
2933 ql_dbg(ql_dbg_edif
, vha
, 0x3063,
2934 "%s: freeing sa_index %d, nph: 0x%x\n",
2935 __func__
, le16_to_cpu(pkt
->sa_index
), nport_handle
);
2936 qla_edif_sadb_delete_sa_index(sp
->fcport
, nport_handle
,
2937 le16_to_cpu(pkt
->sa_index
));
2939 * check for a failed sa_update and remove
2942 } else if (pkt
->u
.comp_sts
) {
2943 ql_dbg(ql_dbg_edif
, vha
, 0x3063,
2944 "%s: freeing sa_index %d, nph: 0x%x\n",
2945 __func__
, pkt
->sa_index
, nport_handle
);
2946 qla_edif_sadb_delete_sa_index(sp
->fcport
, nport_handle
,
2947 le16_to_cpu(pkt
->sa_index
));
2948 switch (le16_to_cpu(pkt
->u
.comp_sts
)) {
2949 case CS_PORT_EDIF_UNAVAIL
:
2950 case CS_PORT_EDIF_LOGOUT
:
2951 qlt_schedule_sess_for_deletion(sp
->fcport
);
2962 * qla28xx_start_scsi_edif() - Send a SCSI type 6 command to the ISP
2963 * @sp: command to send to the ISP
2965 * Return: non-zero if a failure occurred, else zero.
2968 qla28xx_start_scsi_edif(srb_t
*sp
)
2971 unsigned long flags
;
2972 struct scsi_cmnd
*cmd
;
2980 uint8_t additional_cdb_len
;
2981 struct ct6_dsd
*ctx
;
2982 struct scsi_qla_host
*vha
= sp
->vha
;
2983 struct qla_hw_data
*ha
= vha
->hw
;
2984 struct cmd_type_6
*cmd_pkt
;
2985 struct dsd64
*cur_dsd
;
2986 uint8_t avail_dsds
= 0;
2987 struct scatterlist
*sg
;
2988 struct req_que
*req
= sp
->qpair
->req
;
2989 spinlock_t
*lock
= sp
->qpair
->qp_lock_ptr
;
2991 /* Setup device pointers. */
2992 cmd
= GET_CMD_SP(sp
);
2994 /* So we know we haven't pci_map'ed anything yet */
2997 /* Send marker if required */
2998 if (vha
->marker_needed
!= 0) {
2999 if (qla2x00_marker(vha
, sp
->qpair
, 0, 0, MK_SYNC_ALL
) !=
3001 ql_log(ql_log_warn
, vha
, 0x300c,
3002 "qla2x00_marker failed for cmd=%p.\n", cmd
);
3003 return QLA_FUNCTION_FAILED
;
3005 vha
->marker_needed
= 0;
3008 /* Acquire ring specific lock */
3009 spin_lock_irqsave(lock
, flags
);
3011 /* Check for room in outstanding command list. */
3012 handle
= req
->current_outstanding_cmd
;
3013 for (index
= 1; index
< req
->num_outstanding_cmds
; index
++) {
3015 if (handle
== req
->num_outstanding_cmds
)
3017 if (!req
->outstanding_cmds
[handle
])
3020 if (index
== req
->num_outstanding_cmds
)
3023 /* Map the sg table so we have an accurate count of sg entries needed */
3024 if (scsi_sg_count(cmd
)) {
3025 nseg
= dma_map_sg(&ha
->pdev
->dev
, scsi_sglist(cmd
),
3026 scsi_sg_count(cmd
), cmd
->sc_data_direction
);
3027 if (unlikely(!nseg
))
3034 req_cnt
= qla24xx_calc_iocbs(vha
, tot_dsds
);
3036 sp
->iores
.res_type
= RESOURCE_IOCB
| RESOURCE_EXCH
;
3037 sp
->iores
.exch_cnt
= 1;
3038 sp
->iores
.iocb_cnt
= req_cnt
;
3039 if (qla_get_fw_resources(sp
->qpair
, &sp
->iores
))
3042 if (req
->cnt
< (req_cnt
+ 2)) {
3043 cnt
= IS_SHADOW_REG_CAPABLE(ha
) ? *req
->out_ptr
:
3044 rd_reg_dword(req
->req_q_out
);
3045 if (req
->ring_index
< cnt
)
3046 req
->cnt
= cnt
- req
->ring_index
;
3048 req
->cnt
= req
->length
-
3049 (req
->ring_index
- cnt
);
3050 if (req
->cnt
< (req_cnt
+ 2))
3054 if (qla_get_buf(vha
, sp
->qpair
, &sp
->u
.scmd
.buf_dsc
)) {
3055 ql_log(ql_log_fatal
, vha
, 0x3011,
3056 "Failed to allocate buf for fcp_cmnd for cmd=%p.\n", cmd
);
3060 sp
->flags
|= SRB_GOT_BUF
;
3061 ctx
= &sp
->u
.scmd
.ct6_ctx
;
3062 ctx
->fcp_cmnd
= sp
->u
.scmd
.buf_dsc
.buf
;
3063 ctx
->fcp_cmnd_dma
= sp
->u
.scmd
.buf_dsc
.buf_dma
;
3065 if (cmd
->cmd_len
> 16) {
3066 additional_cdb_len
= cmd
->cmd_len
- 16;
3067 if ((cmd
->cmd_len
% 4) != 0) {
3069 * SCSI command bigger than 16 bytes must be
3072 ql_log(ql_log_warn
, vha
, 0x3012,
3073 "scsi cmd len %d not multiple of 4 for cmd=%p.\n",
3075 goto queuing_error_fcp_cmnd
;
3077 ctx
->fcp_cmnd_len
= 12 + cmd
->cmd_len
+ 4;
3079 additional_cdb_len
= 0;
3080 ctx
->fcp_cmnd_len
= 12 + 16 + 4;
3083 cmd_pkt
= (struct cmd_type_6
*)req
->ring_ptr
;
3084 cmd_pkt
->handle
= make_handle(req
->id
, handle
);
3087 * Zero out remaining portion of packet.
3088 * tagged queuing modifier -- default is TSK_SIMPLE (0).
3090 clr_ptr
= (uint32_t *)cmd_pkt
+ 2;
3091 memset(clr_ptr
, 0, REQUEST_ENTRY_SIZE
- 8);
3092 cmd_pkt
->dseg_count
= cpu_to_le16(tot_dsds
);
3094 /* No data transfer */
3095 if (!scsi_bufflen(cmd
) || cmd
->sc_data_direction
== DMA_NONE
) {
3096 cmd_pkt
->byte_count
= cpu_to_le32(0);
3100 /* Set transfer direction */
3101 if (cmd
->sc_data_direction
== DMA_TO_DEVICE
) {
3102 cmd_pkt
->control_flags
= cpu_to_le16(CF_WRITE_DATA
);
3103 vha
->qla_stats
.output_bytes
+= scsi_bufflen(cmd
);
3104 vha
->qla_stats
.output_requests
++;
3105 sp
->fcport
->edif
.tx_bytes
+= scsi_bufflen(cmd
);
3106 } else if (cmd
->sc_data_direction
== DMA_FROM_DEVICE
) {
3107 cmd_pkt
->control_flags
= cpu_to_le16(CF_READ_DATA
);
3108 vha
->qla_stats
.input_bytes
+= scsi_bufflen(cmd
);
3109 vha
->qla_stats
.input_requests
++;
3110 sp
->fcport
->edif
.rx_bytes
+= scsi_bufflen(cmd
);
3113 cmd_pkt
->control_flags
|= cpu_to_le16(CF_EN_EDIF
);
3114 cmd_pkt
->control_flags
&= ~(cpu_to_le16(CF_NEW_SA
));
3116 /* One DSD is available in the Command Type 6 IOCB */
3118 cur_dsd
= &cmd_pkt
->fcp_dsd
;
3120 /* Load data segments */
3121 scsi_for_each_sg(cmd
, sg
, tot_dsds
, i
) {
3123 cont_a64_entry_t
*cont_pkt
;
3125 /* Allocate additional continuation packets? */
3126 if (avail_dsds
== 0) {
3128 * Five DSDs are available in the Continuation
3131 cont_pkt
= qla2x00_prep_cont_type1_iocb(vha
, req
);
3132 cur_dsd
= cont_pkt
->dsd
;
3136 sle_dma
= sg_dma_address(sg
);
3137 put_unaligned_le64(sle_dma
, &cur_dsd
->address
);
3138 cur_dsd
->length
= cpu_to_le32(sg_dma_len(sg
));
3144 /* Set NPORT-ID and LUN number*/
3145 cmd_pkt
->nport_handle
= cpu_to_le16(sp
->fcport
->loop_id
);
3146 cmd_pkt
->port_id
[0] = sp
->fcport
->d_id
.b
.al_pa
;
3147 cmd_pkt
->port_id
[1] = sp
->fcport
->d_id
.b
.area
;
3148 cmd_pkt
->port_id
[2] = sp
->fcport
->d_id
.b
.domain
;
3149 cmd_pkt
->vp_index
= sp
->vha
->vp_idx
;
3151 cmd_pkt
->entry_type
= COMMAND_TYPE_6
;
3153 /* Set total data segment count. */
3154 cmd_pkt
->entry_count
= (uint8_t)req_cnt
;
3156 int_to_scsilun(cmd
->device
->lun
, &cmd_pkt
->lun
);
3157 host_to_fcp_swap((uint8_t *)&cmd_pkt
->lun
, sizeof(cmd_pkt
->lun
));
3159 /* build FCP_CMND IU */
3160 int_to_scsilun(cmd
->device
->lun
, &ctx
->fcp_cmnd
->lun
);
3161 ctx
->fcp_cmnd
->additional_cdb_len
= additional_cdb_len
;
3163 if (cmd
->sc_data_direction
== DMA_TO_DEVICE
)
3164 ctx
->fcp_cmnd
->additional_cdb_len
|= 1;
3165 else if (cmd
->sc_data_direction
== DMA_FROM_DEVICE
)
3166 ctx
->fcp_cmnd
->additional_cdb_len
|= 2;
3168 /* Populate the FCP_PRIO. */
3169 if (ha
->flags
.fcp_prio_enabled
)
3170 ctx
->fcp_cmnd
->task_attribute
|=
3171 sp
->fcport
->fcp_prio
<< 3;
3173 memcpy(ctx
->fcp_cmnd
->cdb
, cmd
->cmnd
, cmd
->cmd_len
);
3175 fcp_dl
= (__be32
*)(ctx
->fcp_cmnd
->cdb
+ 16 +
3176 additional_cdb_len
);
3177 *fcp_dl
= htonl((uint32_t)scsi_bufflen(cmd
));
3179 cmd_pkt
->fcp_cmnd_dseg_len
= cpu_to_le16(ctx
->fcp_cmnd_len
);
3180 put_unaligned_le64(ctx
->fcp_cmnd_dma
, &cmd_pkt
->fcp_cmnd_dseg_address
);
3182 cmd_pkt
->byte_count
= cpu_to_le32((uint32_t)scsi_bufflen(cmd
));
3183 /* Set total data segment count. */
3184 cmd_pkt
->entry_count
= (uint8_t)req_cnt
;
3185 cmd_pkt
->entry_status
= 0;
3187 /* Build command packet. */
3188 req
->current_outstanding_cmd
= handle
;
3189 req
->outstanding_cmds
[handle
] = sp
;
3190 sp
->handle
= handle
;
3191 cmd
->host_scribble
= (unsigned char *)(unsigned long)handle
;
3192 req
->cnt
-= req_cnt
;
3194 /* Adjust ring index. */
3197 if (req
->ring_index
== req
->length
) {
3198 req
->ring_index
= 0;
3199 req
->ring_ptr
= req
->ring
;
3204 sp
->qpair
->cmd_cnt
++;
3205 /* Set chip new ring index. */
3206 wrt_reg_dword(req
->req_q_in
, req
->ring_index
);
3208 spin_unlock_irqrestore(lock
, flags
);
3212 queuing_error_fcp_cmnd
:
3215 scsi_dma_unmap(cmd
);
3217 qla_put_buf(sp
->qpair
, &sp
->u
.scmd
.buf_dsc
);
3218 qla_put_fw_resources(sp
->qpair
, &sp
->iores
);
3219 spin_unlock_irqrestore(lock
, flags
);
3221 return QLA_FUNCTION_FAILED
;
3224 /**********************************************
3225 * edif update/delete sa_index list functions *
3226 **********************************************/
3228 /* clear the edif_indx_list for this port */
3229 void qla_edif_list_del(fc_port_t
*fcport
)
3231 struct edif_list_entry
*indx_lst
;
3232 struct edif_list_entry
*tindx_lst
;
3233 struct list_head
*indx_list
= &fcport
->edif
.edif_indx_list
;
3234 unsigned long flags
= 0;
3236 spin_lock_irqsave(&fcport
->edif
.indx_list_lock
, flags
);
3237 list_for_each_entry_safe(indx_lst
, tindx_lst
, indx_list
, next
) {
3238 list_del(&indx_lst
->next
);
3241 spin_unlock_irqrestore(&fcport
->edif
.indx_list_lock
, flags
);
3248 /* allocate/retrieve an sa_index for a given spi */
3249 static uint16_t qla_edif_sadb_get_sa_index(fc_port_t
*fcport
,
3250 struct qla_sa_update_frame
*sa_frame
)
3252 struct edif_sa_index_entry
*entry
;
3253 struct list_head
*sa_list
;
3255 int dir
= sa_frame
->flags
& SAU_FLG_TX
;
3258 scsi_qla_host_t
*vha
= fcport
->vha
;
3259 struct qla_hw_data
*ha
= vha
->hw
;
3260 unsigned long flags
= 0;
3261 uint16_t nport_handle
= fcport
->loop_id
;
3263 ql_dbg(ql_dbg_edif
, vha
, 0x3063,
3264 "%s: entry fc_port: %p, nport_handle: 0x%x\n",
3265 __func__
, fcport
, nport_handle
);
3268 sa_list
= &ha
->sadb_tx_index_list
;
3270 sa_list
= &ha
->sadb_rx_index_list
;
3272 entry
= qla_edif_sadb_find_sa_index_entry(nport_handle
, sa_list
);
3274 if ((sa_frame
->flags
& (SAU_FLG_TX
| SAU_FLG_INV
)) == SAU_FLG_INV
) {
3275 ql_dbg(ql_dbg_edif
, vha
, 0x3063,
3276 "%s: rx delete request with no entry\n", __func__
);
3277 return RX_DELETE_NO_EDIF_SA_INDEX
;
3280 /* if there is no entry for this nport, add one */
3281 entry
= kzalloc((sizeof(struct edif_sa_index_entry
)), GFP_ATOMIC
);
3283 return INVALID_EDIF_SA_INDEX
;
3285 sa_index
= qla_edif_get_sa_index_from_freepool(fcport
, dir
);
3286 if (sa_index
== INVALID_EDIF_SA_INDEX
) {
3288 return INVALID_EDIF_SA_INDEX
;
3291 INIT_LIST_HEAD(&entry
->next
);
3292 entry
->handle
= nport_handle
;
3293 entry
->fcport
= fcport
;
3294 entry
->sa_pair
[0].spi
= sa_frame
->spi
;
3295 entry
->sa_pair
[0].sa_index
= sa_index
;
3296 entry
->sa_pair
[1].spi
= 0;
3297 entry
->sa_pair
[1].sa_index
= INVALID_EDIF_SA_INDEX
;
3298 spin_lock_irqsave(&ha
->sadb_lock
, flags
);
3299 list_add_tail(&entry
->next
, sa_list
);
3300 spin_unlock_irqrestore(&ha
->sadb_lock
, flags
);
3301 ql_dbg(ql_dbg_edif
, vha
, 0x3063,
3302 "%s: Created new sadb entry for nport_handle 0x%x, spi 0x%x, returning sa_index %d\n",
3303 __func__
, nport_handle
, sa_frame
->spi
, sa_index
);
3308 spin_lock_irqsave(&ha
->sadb_lock
, flags
);
3310 /* see if we already have an entry for this spi */
3311 for (slot
= 0; slot
< 2; slot
++) {
3312 if (entry
->sa_pair
[slot
].sa_index
== INVALID_EDIF_SA_INDEX
) {
3315 if (entry
->sa_pair
[slot
].spi
== sa_frame
->spi
) {
3316 spin_unlock_irqrestore(&ha
->sadb_lock
, flags
);
3317 ql_dbg(ql_dbg_edif
, vha
, 0x3063,
3318 "%s: sadb slot %d entry for lid 0x%x, spi 0x%x found, sa_index %d\n",
3319 __func__
, slot
, entry
->handle
, sa_frame
->spi
,
3320 entry
->sa_pair
[slot
].sa_index
);
3321 return entry
->sa_pair
[slot
].sa_index
;
3325 spin_unlock_irqrestore(&ha
->sadb_lock
, flags
);
3327 /* both slots are used */
3328 if (free_slot
== -1) {
3329 ql_dbg(ql_dbg_edif
, vha
, 0x3063,
3330 "%s: WARNING: No free slots in sadb for nport_handle 0x%x, spi: 0x%x\n",
3331 __func__
, entry
->handle
, sa_frame
->spi
);
3332 ql_dbg(ql_dbg_edif
, vha
, 0x3063,
3333 "%s: Slot 0 spi: 0x%x sa_index: %d, Slot 1 spi: 0x%x sa_index: %d\n",
3334 __func__
, entry
->sa_pair
[0].spi
, entry
->sa_pair
[0].sa_index
,
3335 entry
->sa_pair
[1].spi
, entry
->sa_pair
[1].sa_index
);
3337 return INVALID_EDIF_SA_INDEX
;
3340 /* there is at least one free slot, use it */
3341 sa_index
= qla_edif_get_sa_index_from_freepool(fcport
, dir
);
3342 if (sa_index
== INVALID_EDIF_SA_INDEX
) {
3343 ql_dbg(ql_dbg_edif
, fcport
->vha
, 0x3063,
3344 "%s: empty freepool!!\n", __func__
);
3345 return INVALID_EDIF_SA_INDEX
;
3348 spin_lock_irqsave(&ha
->sadb_lock
, flags
);
3349 entry
->sa_pair
[free_slot
].spi
= sa_frame
->spi
;
3350 entry
->sa_pair
[free_slot
].sa_index
= sa_index
;
3351 spin_unlock_irqrestore(&ha
->sadb_lock
, flags
);
3352 ql_dbg(ql_dbg_edif
, fcport
->vha
, 0x3063,
3353 "%s: sadb slot %d entry for nport_handle 0x%x, spi 0x%x added, returning sa_index %d\n",
3354 __func__
, free_slot
, entry
->handle
, sa_frame
->spi
, sa_index
);
3359 /* release any sadb entries -- only done at teardown */
3360 void qla_edif_sadb_release(struct qla_hw_data
*ha
)
3362 struct edif_sa_index_entry
*entry
, *tmp
;
3364 list_for_each_entry_safe(entry
, tmp
, &ha
->sadb_rx_index_list
, next
) {
3365 list_del(&entry
->next
);
3369 list_for_each_entry_safe(entry
, tmp
, &ha
->sadb_tx_index_list
, next
) {
3370 list_del(&entry
->next
);
3375 /**************************
3376 * sadb freepool functions
3377 **************************/
3379 /* build the rx and tx sa_index free pools -- only done at fcport init */
3380 int qla_edif_sadb_build_free_pool(struct qla_hw_data
*ha
)
3382 ha
->edif_tx_sa_id_map
=
3383 kcalloc(BITS_TO_LONGS(EDIF_NUM_SA_INDEX
), sizeof(long), GFP_KERNEL
);
3385 if (!ha
->edif_tx_sa_id_map
) {
3386 ql_log_pci(ql_log_fatal
, ha
->pdev
, 0x0009,
3387 "Unable to allocate memory for sadb tx.\n");
3391 ha
->edif_rx_sa_id_map
=
3392 kcalloc(BITS_TO_LONGS(EDIF_NUM_SA_INDEX
), sizeof(long), GFP_KERNEL
);
3393 if (!ha
->edif_rx_sa_id_map
) {
3394 kfree(ha
->edif_tx_sa_id_map
);
3395 ha
->edif_tx_sa_id_map
= NULL
;
3396 ql_log_pci(ql_log_fatal
, ha
->pdev
, 0x0009,
3397 "Unable to allocate memory for sadb rx.\n");
3403 /* release the free pool - only done during fcport teardown */
3404 void qla_edif_sadb_release_free_pool(struct qla_hw_data
*ha
)
3406 kfree(ha
->edif_tx_sa_id_map
);
3407 ha
->edif_tx_sa_id_map
= NULL
;
3408 kfree(ha
->edif_rx_sa_id_map
);
3409 ha
->edif_rx_sa_id_map
= NULL
;
3412 static void __chk_edif_rx_sa_delete_pending(scsi_qla_host_t
*vha
,
3413 fc_port_t
*fcport
, uint32_t handle
, uint16_t sa_index
)
3415 struct edif_list_entry
*edif_entry
;
3416 struct edif_sa_ctl
*sa_ctl
;
3417 uint16_t delete_sa_index
= INVALID_EDIF_SA_INDEX
;
3418 unsigned long flags
= 0;
3419 uint16_t nport_handle
= fcport
->loop_id
;
3420 uint16_t cached_nport_handle
;
3422 spin_lock_irqsave(&fcport
->edif
.indx_list_lock
, flags
);
3423 edif_entry
= qla_edif_list_find_sa_index(fcport
, nport_handle
);
3425 spin_unlock_irqrestore(&fcport
->edif
.indx_list_lock
, flags
);
3426 return; /* no pending delete for this handle */
3430 * check for no pending delete for this index or iocb does not
3433 if (edif_entry
->delete_sa_index
== INVALID_EDIF_SA_INDEX
||
3434 edif_entry
->update_sa_index
!= sa_index
) {
3435 spin_unlock_irqrestore(&fcport
->edif
.indx_list_lock
, flags
);
3440 * wait until we have seen at least EDIF_DELAY_COUNT transfers before
3441 * queueing RX delete
3443 if (edif_entry
->count
++ < EDIF_RX_DELETE_FILTER_COUNT
) {
3444 spin_unlock_irqrestore(&fcport
->edif
.indx_list_lock
, flags
);
3448 ql_dbg(ql_dbg_edif
, vha
, 0x5033,
3449 "%s: invalidating delete_sa_index, update_sa_index: 0x%x sa_index: 0x%x, delete_sa_index: 0x%x\n",
3450 __func__
, edif_entry
->update_sa_index
, sa_index
, edif_entry
->delete_sa_index
);
3452 delete_sa_index
= edif_entry
->delete_sa_index
;
3453 edif_entry
->delete_sa_index
= INVALID_EDIF_SA_INDEX
;
3454 cached_nport_handle
= edif_entry
->handle
;
3455 spin_unlock_irqrestore(&fcport
->edif
.indx_list_lock
, flags
);
3457 /* sanity check on the nport handle */
3458 if (nport_handle
!= cached_nport_handle
) {
3459 ql_dbg(ql_dbg_edif
, vha
, 0x3063,
3460 "%s: POST SA DELETE nport_handle mismatch: lid: 0x%x, edif_entry nph: 0x%x\n",
3461 __func__
, nport_handle
, cached_nport_handle
);
3464 /* find the sa_ctl for the delete and schedule the delete */
3465 sa_ctl
= qla_edif_find_sa_ctl_by_index(fcport
, delete_sa_index
, 0);
3467 ql_dbg(ql_dbg_edif
, vha
, 0x3063,
3468 "%s: POST SA DELETE sa_ctl: %p, index recvd %d\n",
3469 __func__
, sa_ctl
, sa_index
);
3470 ql_dbg(ql_dbg_edif
, vha
, 0x3063,
3471 "delete index %d, update index: %d, nport handle: 0x%x, handle: 0x%x\n",
3473 edif_entry
->update_sa_index
, nport_handle
, handle
);
3475 sa_ctl
->flags
= EDIF_SA_CTL_FLG_DEL
;
3476 set_bit(EDIF_SA_CTL_REPL
, &sa_ctl
->state
);
3477 qla_post_sa_replace_work(fcport
->vha
, fcport
,
3478 nport_handle
, sa_ctl
);
3480 ql_dbg(ql_dbg_edif
, vha
, 0x3063,
3481 "%s: POST SA DELETE sa_ctl not found for delete_sa_index: %d\n",
3482 __func__
, delete_sa_index
);
3486 void qla_chk_edif_rx_sa_delete_pending(scsi_qla_host_t
*vha
,
3487 srb_t
*sp
, struct sts_entry_24xx
*sts24
)
3489 fc_port_t
*fcport
= sp
->fcport
;
3490 /* sa_index used by this iocb */
3491 struct scsi_cmnd
*cmd
= GET_CMD_SP(sp
);
3494 handle
= (uint32_t)LSW(sts24
->handle
);
3496 /* find out if this status iosb is for a scsi read */
3497 if (cmd
->sc_data_direction
!= DMA_FROM_DEVICE
)
3500 return __chk_edif_rx_sa_delete_pending(vha
, fcport
, handle
,
3501 le16_to_cpu(sts24
->edif_sa_index
));
3504 void qlt_chk_edif_rx_sa_delete_pending(scsi_qla_host_t
*vha
, fc_port_t
*fcport
,
3505 struct ctio7_from_24xx
*pkt
)
3507 __chk_edif_rx_sa_delete_pending(vha
, fcport
,
3508 pkt
->handle
, le16_to_cpu(pkt
->edif_sa_index
));
3511 static void qla_parse_auth_els_ctl(struct srb
*sp
)
3513 struct qla_els_pt_arg
*a
= &sp
->u
.bsg_cmd
.u
.els_arg
;
3514 struct bsg_job
*bsg_job
= sp
->u
.bsg_cmd
.bsg_job
;
3515 struct fc_bsg_request
*request
= bsg_job
->request
;
3516 struct qla_bsg_auth_els_request
*p
=
3517 (struct qla_bsg_auth_els_request
*)bsg_job
->request
;
3519 a
->tx_len
= a
->tx_byte_count
= sp
->remap
.req
.len
;
3520 a
->tx_addr
= sp
->remap
.req
.dma
;
3521 a
->rx_len
= a
->rx_byte_count
= sp
->remap
.rsp
.len
;
3522 a
->rx_addr
= sp
->remap
.rsp
.dma
;
3524 if (p
->e
.sub_cmd
== SEND_ELS_REPLY
) {
3525 a
->control_flags
= p
->e
.extra_control_flags
<< 13;
3526 a
->rx_xchg_address
= cpu_to_le32(p
->e
.extra_rx_xchg_address
);
3527 if (p
->e
.extra_control_flags
== BSG_CTL_FLAG_LS_ACC
)
3528 a
->els_opcode
= ELS_LS_ACC
;
3529 else if (p
->e
.extra_control_flags
== BSG_CTL_FLAG_LS_RJT
)
3530 a
->els_opcode
= ELS_LS_RJT
;
3532 a
->did
= sp
->fcport
->d_id
;
3533 a
->els_opcode
= request
->rqst_data
.h_els
.command_code
;
3534 a
->nport_handle
= cpu_to_le16(sp
->fcport
->loop_id
);
3535 a
->vp_idx
= sp
->vha
->vp_idx
;
3538 int qla_edif_process_els(scsi_qla_host_t
*vha
, struct bsg_job
*bsg_job
)
3540 struct fc_bsg_request
*bsg_request
= bsg_job
->request
;
3541 struct fc_bsg_reply
*bsg_reply
= bsg_job
->reply
;
3542 fc_port_t
*fcport
= NULL
;
3543 struct qla_hw_data
*ha
= vha
->hw
;
3545 int rval
= (DID_ERROR
<< 16), cnt
;
3547 struct qla_bsg_auth_els_request
*p
=
3548 (struct qla_bsg_auth_els_request
*)bsg_job
->request
;
3549 struct qla_bsg_auth_els_reply
*rpl
=
3550 (struct qla_bsg_auth_els_reply
*)bsg_job
->reply
;
3552 rpl
->version
= EDIF_VERSION1
;
3554 d_id
.b
.al_pa
= bsg_request
->rqst_data
.h_els
.port_id
[2];
3555 d_id
.b
.area
= bsg_request
->rqst_data
.h_els
.port_id
[1];
3556 d_id
.b
.domain
= bsg_request
->rqst_data
.h_els
.port_id
[0];
3558 /* find matching d_id in fcport list */
3559 fcport
= qla2x00_find_fcport_by_pid(vha
, &d_id
);
3561 ql_dbg(ql_dbg_edif
, vha
, 0x911a,
3562 "%s fcport not find online portid=%06x.\n",
3563 __func__
, d_id
.b24
);
3564 SET_DID_STATUS(bsg_reply
->result
, DID_ERROR
);
3568 if (qla_bsg_check(vha
, bsg_job
, fcport
))
3571 if (EDIF_SESS_DELETE(fcport
)) {
3572 ql_dbg(ql_dbg_edif
, vha
, 0x910d,
3573 "%s ELS code %x, no loop id.\n", __func__
,
3574 bsg_request
->rqst_data
.r_els
.els_code
);
3575 SET_DID_STATUS(bsg_reply
->result
, DID_BAD_TARGET
);
3579 if (!vha
->flags
.online
) {
3580 ql_log(ql_log_warn
, vha
, 0x7005, "Host not online.\n");
3581 SET_DID_STATUS(bsg_reply
->result
, DID_BAD_TARGET
);
3586 /* pass through is supported only for ISP 4Gb or higher */
3587 if (!IS_FWI2_CAPABLE(ha
)) {
3588 ql_dbg(ql_dbg_user
, vha
, 0x7001,
3589 "ELS passthru not supported for ISP23xx based adapters.\n");
3590 SET_DID_STATUS(bsg_reply
->result
, DID_BAD_TARGET
);
3595 sp
= qla2x00_get_sp(vha
, fcport
, GFP_KERNEL
);
3597 ql_dbg(ql_dbg_user
, vha
, 0x7004,
3598 "Failed get sp pid=%06x\n", fcport
->d_id
.b24
);
3600 SET_DID_STATUS(bsg_reply
->result
, DID_IMM_RETRY
);
3604 sp
->remap
.req
.len
= bsg_job
->request_payload
.payload_len
;
3605 sp
->remap
.req
.buf
= dma_pool_alloc(ha
->purex_dma_pool
,
3606 GFP_KERNEL
, &sp
->remap
.req
.dma
);
3607 if (!sp
->remap
.req
.buf
) {
3608 ql_dbg(ql_dbg_user
, vha
, 0x7005,
3609 "Failed allocate request dma len=%x\n",
3610 bsg_job
->request_payload
.payload_len
);
3612 SET_DID_STATUS(bsg_reply
->result
, DID_IMM_RETRY
);
3616 sp
->remap
.rsp
.len
= bsg_job
->reply_payload
.payload_len
;
3617 sp
->remap
.rsp
.buf
= dma_pool_alloc(ha
->purex_dma_pool
,
3618 GFP_KERNEL
, &sp
->remap
.rsp
.dma
);
3619 if (!sp
->remap
.rsp
.buf
) {
3620 ql_dbg(ql_dbg_user
, vha
, 0x7006,
3621 "Failed allocate response dma len=%x\n",
3622 bsg_job
->reply_payload
.payload_len
);
3624 SET_DID_STATUS(bsg_reply
->result
, DID_IMM_RETRY
);
3625 goto done_free_remap_req
;
3627 sg_copy_to_buffer(bsg_job
->request_payload
.sg_list
,
3628 bsg_job
->request_payload
.sg_cnt
, sp
->remap
.req
.buf
,
3630 sp
->remap
.remapped
= true;
3632 sp
->type
= SRB_ELS_CMD_HST_NOLOGIN
;
3633 sp
->name
= "SPCN_BSG_HST_NOLOGIN";
3634 sp
->u
.bsg_cmd
.bsg_job
= bsg_job
;
3635 qla_parse_auth_els_ctl(sp
);
3637 sp
->free
= qla2x00_bsg_sp_free
;
3638 sp
->done
= qla2x00_bsg_job_done
;
3642 rval
= qla2x00_start_sp(sp
);
3645 ql_dbg(ql_dbg_edif
, vha
, 0x700a,
3646 "%s %s %8phN xchg %x ctlflag %x hdl %x reqlen %xh bsg ptr %p\n",
3647 __func__
, sc_to_str(p
->e
.sub_cmd
), fcport
->port_name
,
3648 p
->e
.extra_rx_xchg_address
, p
->e
.extra_control_flags
,
3649 sp
->handle
, sp
->remap
.req
.len
, bsg_job
);
3652 msleep(EDIF_MSLEEP_INTERVAL
);
3654 if (cnt
< EDIF_RETRY_COUNT
)
3658 ql_log(ql_log_warn
, vha
, 0x700e,
3659 "%s qla2x00_start_sp failed = %d\n", __func__
, rval
);
3660 SET_DID_STATUS(bsg_reply
->result
, DID_IMM_RETRY
);
3662 goto done_free_remap_rsp
;
3666 done_free_remap_rsp
:
3667 dma_pool_free(ha
->purex_dma_pool
, sp
->remap
.rsp
.buf
,
3669 done_free_remap_req
:
3670 dma_pool_free(ha
->purex_dma_pool
, sp
->remap
.req
.buf
,
3679 void qla_edif_sess_down(struct scsi_qla_host
*vha
, struct fc_port
*sess
)
3683 if (sess
->edif
.app_sess_online
&& DBELL_ACTIVE(vha
)) {
3684 ql_dbg(ql_dbg_disc
, vha
, 0xf09c,
3685 "%s: sess %8phN send port_offline event\n",
3686 __func__
, sess
->port_name
);
3687 sess
->edif
.app_sess_online
= 0;
3688 sess
->edif
.sess_down_acked
= 0;
3689 qla_edb_eventcreate(vha
, VND_CMD_AUTH_STATE_SESSION_SHUTDOWN
,
3690 sess
->d_id
.b24
, 0, sess
);
3691 qla2x00_post_aen_work(vha
, FCH_EVT_PORT_OFFLINE
, sess
->d_id
.b24
);
3693 while (!READ_ONCE(sess
->edif
.sess_down_acked
) &&
3694 !test_bit(VPORT_DELETE
, &vha
->dpc_flags
)) {
3700 sess
->edif
.sess_down_acked
= 0;
3701 ql_dbg(ql_dbg_disc
, vha
, 0xf09c,
3702 "%s: sess %8phN port_offline event completed\n",
3703 __func__
, sess
->port_name
);
3707 void qla_edif_clear_appdata(struct scsi_qla_host
*vha
, struct fc_port
*fcport
)
3709 if (!(fcport
->flags
& FCF_FCSP_DEVICE
))
3712 qla_edb_clear(vha
, fcport
->d_id
);
3713 qla_enode_clear(vha
, fcport
->d_id
);