1 /*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2017-2024 Broadcom. All Rights Reserved. The term *
5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
6 * Copyright (C) 2004-2016 Emulex. All rights reserved. *
7 * EMULEX and SLI are trademarks of Emulex. *
10 * This program is free software; you can redistribute it and/or *
11 * modify it under the terms of version 2 of the GNU General *
12 * Public License as published by the Free Software Foundation. *
13 * This program is distributed in the hope that it will be useful. *
14 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
15 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
16 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
17 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
18 * TO BE LEGALLY INVALID. See the GNU General Public License for *
19 * more details, a copy of which can be found in the file COPYING *
20 * included with this package. *
21 *******************************************************************/
24 * Fibre Channel SCSI LAN Device Driver CT support: FC Generic Services FC-GS
27 #include <linux/blkdev.h>
28 #include <linux/pci.h>
29 #include <linux/interrupt.h>
30 #include <linux/slab.h>
31 #include <linux/utsname.h>
33 #include <scsi/scsi.h>
34 #include <scsi/scsi_device.h>
35 #include <scsi/scsi_host.h>
36 #include <scsi/scsi_transport_fc.h>
37 #include <scsi/fc/fc_fs.h>
42 #include "lpfc_sli4.h"
44 #include "lpfc_disc.h"
46 #include "lpfc_scsi.h"
47 #include "lpfc_logmsg.h"
48 #include "lpfc_crtn.h"
49 #include "lpfc_version.h"
50 #include "lpfc_vport.h"
51 #include "lpfc_debugfs.h"
53 /* FDMI Port Speed definitions - FC-GS-7 */
54 #define HBA_PORTSPEED_1GFC 0x00000001 /* 1G FC */
55 #define HBA_PORTSPEED_2GFC 0x00000002 /* 2G FC */
56 #define HBA_PORTSPEED_4GFC 0x00000008 /* 4G FC */
57 #define HBA_PORTSPEED_10GFC 0x00000004 /* 10G FC */
58 #define HBA_PORTSPEED_8GFC 0x00000010 /* 8G FC */
59 #define HBA_PORTSPEED_16GFC 0x00000020 /* 16G FC */
60 #define HBA_PORTSPEED_32GFC 0x00000040 /* 32G FC */
61 #define HBA_PORTSPEED_20GFC 0x00000080 /* 20G FC */
62 #define HBA_PORTSPEED_40GFC 0x00000100 /* 40G FC */
63 #define HBA_PORTSPEED_128GFC 0x00000200 /* 128G FC */
64 #define HBA_PORTSPEED_64GFC 0x00000400 /* 64G FC */
65 #define HBA_PORTSPEED_256GFC 0x00000800 /* 256G FC */
66 #define HBA_PORTSPEED_UNKNOWN 0x00008000 /* Unknown */
67 #define HBA_PORTSPEED_10GE 0x00010000 /* 10G E */
68 #define HBA_PORTSPEED_40GE 0x00020000 /* 40G E */
69 #define HBA_PORTSPEED_100GE 0x00040000 /* 100G E */
70 #define HBA_PORTSPEED_25GE 0x00080000 /* 25G E */
71 #define HBA_PORTSPEED_50GE 0x00100000 /* 50G E */
72 #define HBA_PORTSPEED_400GE 0x00200000 /* 400G E */
77 static char *lpfc_release_version
= LPFC_DRIVER_VERSION
;
79 lpfc_cmpl_ct_cmd_vmid(struct lpfc_hba
*phba
, struct lpfc_iocbq
*cmdiocb
,
80 struct lpfc_iocbq
*rspiocb
);
83 lpfc_ct_ignore_hbq_buffer(struct lpfc_hba
*phba
, struct lpfc_iocbq
*piocbq
,
84 struct lpfc_dmabuf
*mp
, uint32_t size
)
87 lpfc_printf_log(phba
, KERN_INFO
, LOG_ELS
,
88 "0146 Ignoring unsolicited CT No HBQ "
90 get_job_ulpstatus(phba
, piocbq
));
92 lpfc_printf_log(phba
, KERN_INFO
, LOG_ELS
,
93 "0145 Ignoring unsolicited CT HBQ Size:%d "
95 size
, get_job_ulpstatus(phba
, piocbq
));
99 lpfc_ct_unsol_buffer(struct lpfc_hba
*phba
, struct lpfc_iocbq
*piocbq
,
100 struct lpfc_dmabuf
*mp
, uint32_t size
)
102 lpfc_ct_ignore_hbq_buffer(phba
, piocbq
, mp
, size
);
106 * lpfc_ct_unsol_cmpl : Completion callback function for unsol ct commands
107 * @phba : pointer to lpfc hba data structure.
108 * @cmdiocb : pointer to lpfc command iocb data structure.
109 * @rspiocb : pointer to lpfc response iocb data structure.
111 * This routine is the callback function for issuing unsol ct reject command.
112 * The memory allocated in the reject command path is freed up here.
115 lpfc_ct_unsol_cmpl(struct lpfc_hba
*phba
, struct lpfc_iocbq
*cmdiocb
,
116 struct lpfc_iocbq
*rspiocb
)
118 struct lpfc_nodelist
*ndlp
;
119 struct lpfc_dmabuf
*mp
, *bmp
;
121 ndlp
= cmdiocb
->ndlp
;
125 mp
= cmdiocb
->rsp_dmabuf
;
126 bmp
= cmdiocb
->bpl_dmabuf
;
128 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
130 cmdiocb
->rsp_dmabuf
= NULL
;
134 lpfc_mbuf_free(phba
, bmp
->virt
, bmp
->phys
);
136 cmdiocb
->bpl_dmabuf
= NULL
;
139 lpfc_sli_release_iocbq(phba
, cmdiocb
);
143 * lpfc_ct_reject_event - Issue reject for unhandled CT MIB commands
144 * @ndlp: pointer to a node-list data structure.
145 * @ct_req: pointer to the CT request data structure.
146 * @ulp_context: context of received UNSOL CT command
147 * @ox_id: ox_id of the UNSOL CT command
149 * This routine is invoked by the lpfc_ct_handle_mibreq routine for sending
150 * a reject response. Reject response is sent for the unhandled commands.
153 lpfc_ct_reject_event(struct lpfc_nodelist
*ndlp
,
154 struct lpfc_sli_ct_request
*ct_req
,
155 u16 ulp_context
, u16 ox_id
)
157 struct lpfc_vport
*vport
= ndlp
->vport
;
158 struct lpfc_hba
*phba
= vport
->phba
;
159 struct lpfc_sli_ct_request
*ct_rsp
;
160 struct lpfc_iocbq
*cmdiocbq
= NULL
;
161 struct lpfc_dmabuf
*bmp
= NULL
;
162 struct lpfc_dmabuf
*mp
= NULL
;
163 struct ulp_bde64
*bpl
;
167 /* fill in BDEs for command */
168 mp
= kmalloc(sizeof(*mp
), GFP_KERNEL
);
174 mp
->virt
= lpfc_mbuf_alloc(phba
, MEM_PRI
, &mp
->phys
);
180 /* Allocate buffer for Buffer ptr list */
181 bmp
= kmalloc(sizeof(*bmp
), GFP_KERNEL
);
187 bmp
->virt
= lpfc_mbuf_alloc(phba
, MEM_PRI
, &bmp
->phys
);
193 INIT_LIST_HEAD(&mp
->list
);
194 INIT_LIST_HEAD(&bmp
->list
);
196 bpl
= (struct ulp_bde64
*)bmp
->virt
;
197 memset(bpl
, 0, sizeof(struct ulp_bde64
));
198 bpl
->addrHigh
= le32_to_cpu(putPaddrHigh(mp
->phys
));
199 bpl
->addrLow
= le32_to_cpu(putPaddrLow(mp
->phys
));
200 bpl
->tus
.f
.bdeFlags
= BUFF_TYPE_BDE_64
;
201 bpl
->tus
.f
.bdeSize
= (LPFC_CT_PREAMBLE
- 4);
202 bpl
->tus
.w
= le32_to_cpu(bpl
->tus
.w
);
204 ct_rsp
= (struct lpfc_sli_ct_request
*)mp
->virt
;
205 memset(ct_rsp
, 0, sizeof(struct lpfc_sli_ct_request
));
207 ct_rsp
->RevisionId
.bits
.Revision
= SLI_CT_REVISION
;
208 ct_rsp
->RevisionId
.bits
.InId
= 0;
209 ct_rsp
->FsType
= ct_req
->FsType
;
210 ct_rsp
->FsSubType
= ct_req
->FsSubType
;
211 ct_rsp
->CommandResponse
.bits
.Size
= 0;
212 ct_rsp
->CommandResponse
.bits
.CmdRsp
=
213 cpu_to_be16(SLI_CT_RESPONSE_FS_RJT
);
214 ct_rsp
->ReasonCode
= SLI_CT_REQ_NOT_SUPPORTED
;
215 ct_rsp
->Explanation
= SLI_CT_NO_ADDITIONAL_EXPL
;
217 cmdiocbq
= lpfc_sli_get_iocbq(phba
);
220 goto ct_free_bmpvirt
;
223 if (phba
->sli_rev
== LPFC_SLI_REV4
) {
224 lpfc_sli_prep_xmit_seq64(phba
, cmdiocbq
, bmp
,
225 phba
->sli4_hba
.rpi_ids
[ndlp
->nlp_rpi
],
226 ox_id
, 1, FC_RCTL_DD_SOL_CTL
, 1,
227 CMD_XMIT_SEQUENCE64_WQE
);
229 lpfc_sli_prep_xmit_seq64(phba
, cmdiocbq
, bmp
, 0, ulp_context
, 1,
230 FC_RCTL_DD_SOL_CTL
, 1,
231 CMD_XMIT_SEQUENCE64_CX
);
234 /* Save for completion so we can release these resources */
235 cmdiocbq
->rsp_dmabuf
= mp
;
236 cmdiocbq
->bpl_dmabuf
= bmp
;
237 cmdiocbq
->cmd_cmpl
= lpfc_ct_unsol_cmpl
;
238 tmo
= (3 * phba
->fc_ratov
);
241 cmdiocbq
->vport
= vport
;
242 cmdiocbq
->drvrTimeout
= tmo
+ LPFC_DRVR_TIMEOUT
;
244 cmdiocbq
->ndlp
= lpfc_nlp_get(ndlp
);
248 rc
= lpfc_sli_issue_iocb(phba
, LPFC_ELS_RING
, cmdiocbq
, 0);
257 lpfc_sli_release_iocbq(phba
, cmdiocbq
);
259 lpfc_mbuf_free(phba
, bmp
->virt
, bmp
->phys
);
263 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
267 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_ELS
,
268 "6440 Unsol CT: Rsp err %d Data: x%lx\n",
273 * lpfc_ct_handle_mibreq - Process an unsolicited CT MIB request data buffer
274 * @phba: pointer to lpfc hba data structure.
275 * @ctiocbq: pointer to lpfc CT command iocb data structure.
277 * This routine is used for processing the IOCB associated with a unsolicited
278 * CT MIB request. It first determines whether there is an existing ndlp that
279 * matches the DID from the unsolicited IOCB. If not, it will return.
282 lpfc_ct_handle_mibreq(struct lpfc_hba
*phba
, struct lpfc_iocbq
*ctiocbq
)
284 struct lpfc_sli_ct_request
*ct_req
;
285 struct lpfc_nodelist
*ndlp
= NULL
;
286 struct lpfc_vport
*vport
= ctiocbq
->vport
;
287 u32 ulp_status
= get_job_ulpstatus(phba
, ctiocbq
);
288 u32 ulp_word4
= get_job_word4(phba
, ctiocbq
);
292 did
= bf_get(els_rsp64_sid
, &ctiocbq
->wqe
.xmit_els_rsp
);
294 lpfc_vlog_msg(vport
, KERN_WARNING
, LOG_ELS
,
295 "6438 Unsol CT: status:x%x/x%x did : x%x\n",
296 ulp_status
, ulp_word4
, did
);
300 /* Ignore traffic received during vport shutdown */
301 if (test_bit(FC_UNLOADING
, &vport
->load_flag
))
304 ndlp
= lpfc_findnode_did(vport
, did
);
306 lpfc_vlog_msg(vport
, KERN_WARNING
, LOG_ELS
,
307 "6439 Unsol CT: NDLP Not Found for DID : x%x",
312 ct_req
= (struct lpfc_sli_ct_request
*)ctiocbq
->cmd_dmabuf
->virt
;
314 mi_cmd
= be16_to_cpu(ct_req
->CommandResponse
.bits
.CmdRsp
);
315 lpfc_vlog_msg(vport
, KERN_WARNING
, LOG_ELS
,
316 "6442 MI Cmd : x%x Not Supported\n", mi_cmd
);
317 lpfc_ct_reject_event(ndlp
, ct_req
,
319 &ctiocbq
->wqe
.xmit_els_rsp
.wqe_com
),
321 &ctiocbq
->wqe
.xmit_els_rsp
.wqe_com
));
325 * lpfc_ct_unsol_event - Process an unsolicited event from a ct sli ring
326 * @phba: pointer to lpfc hba data structure.
327 * @pring: pointer to a SLI ring.
328 * @ctiocbq: pointer to lpfc ct iocb data structure.
330 * This routine is used to process an unsolicited event received from a SLI
331 * (Service Level Interface) ring. The actual processing of the data buffer
332 * associated with the unsolicited event is done by invoking appropriate routine
333 * after properly set up the iocb buffer from the SLI ring on which the
334 * unsolicited event was received.
337 lpfc_ct_unsol_event(struct lpfc_hba
*phba
, struct lpfc_sli_ring
*pring
,
338 struct lpfc_iocbq
*ctiocbq
)
340 struct lpfc_dmabuf
*mp
= NULL
;
341 IOCB_t
*icmd
= &ctiocbq
->iocb
;
343 struct lpfc_iocbq
*iocbq
;
344 struct lpfc_iocbq
*iocb
;
347 struct list_head head
;
348 struct lpfc_sli_ct_request
*ct_req
;
349 struct lpfc_dmabuf
*bdeBuf1
= ctiocbq
->cmd_dmabuf
;
350 struct lpfc_dmabuf
*bdeBuf2
= ctiocbq
->bpl_dmabuf
;
351 u32 status
, parameter
, bde_count
= 0;
352 struct lpfc_wcqe_complete
*wcqe_cmpl
= NULL
;
354 ctiocbq
->cmd_dmabuf
= NULL
;
355 ctiocbq
->rsp_dmabuf
= NULL
;
356 ctiocbq
->bpl_dmabuf
= NULL
;
358 wcqe_cmpl
= &ctiocbq
->wcqe_cmpl
;
359 status
= get_job_ulpstatus(phba
, ctiocbq
);
360 parameter
= get_job_word4(phba
, ctiocbq
);
361 if (phba
->sli_rev
== LPFC_SLI_REV4
)
362 bde_count
= wcqe_cmpl
->word3
;
364 bde_count
= icmd
->ulpBdeCount
;
366 if (unlikely(status
== IOSTAT_NEED_BUFFER
)) {
367 lpfc_sli_hbqbuf_add_hbqs(phba
, LPFC_ELS_HBQ
);
368 } else if ((status
== IOSTAT_LOCAL_REJECT
) &&
369 ((parameter
& IOERR_PARAM_MASK
) ==
370 IOERR_RCV_BUFFER_WAITING
)) {
371 /* Not enough posted buffers; Try posting more buffers */
372 phba
->fc_stat
.NoRcvBuf
++;
373 if (!(phba
->sli3_options
& LPFC_SLI3_HBQ_ENABLED
))
374 lpfc_sli3_post_buffer(phba
, pring
, 2);
378 /* If there are no BDEs associated
379 * with this IOCB, there is nothing to do.
384 ctiocbq
->cmd_dmabuf
= bdeBuf1
;
386 ctiocbq
->bpl_dmabuf
= bdeBuf2
;
388 ct_req
= (struct lpfc_sli_ct_request
*)ctiocbq
->cmd_dmabuf
->virt
;
390 if (ct_req
->FsType
== SLI_CT_MANAGEMENT_SERVICE
&&
391 ct_req
->FsSubType
== SLI_CT_MIB_Subtypes
) {
392 lpfc_ct_handle_mibreq(phba
, ctiocbq
);
394 if (!lpfc_bsg_ct_unsol_event(phba
, pring
, ctiocbq
))
398 if (phba
->sli3_options
& LPFC_SLI3_HBQ_ENABLED
) {
399 INIT_LIST_HEAD(&head
);
400 list_add_tail(&head
, &ctiocbq
->list
);
401 list_for_each_entry(iocb
, &head
, list
) {
402 if (phba
->sli_rev
== LPFC_SLI_REV4
)
403 bde_count
= iocb
->wcqe_cmpl
.word3
;
405 bde_count
= iocb
->iocb
.ulpBdeCount
;
409 bdeBuf1
= iocb
->cmd_dmabuf
;
410 iocb
->cmd_dmabuf
= NULL
;
411 if (phba
->sli_rev
== LPFC_SLI_REV4
)
412 size
= iocb
->wqe
.gen_req
.bde
.tus
.f
.bdeSize
;
414 size
= iocb
->iocb
.un
.cont64
[0].tus
.f
.bdeSize
;
415 lpfc_ct_unsol_buffer(phba
, ctiocbq
, bdeBuf1
, size
);
416 lpfc_in_buf_free(phba
, bdeBuf1
);
417 if (bde_count
== 2) {
418 bdeBuf2
= iocb
->bpl_dmabuf
;
419 iocb
->bpl_dmabuf
= NULL
;
420 if (phba
->sli_rev
== LPFC_SLI_REV4
)
421 size
= iocb
->unsol_rcv_len
;
423 size
= iocb
->iocb
.unsli3
.rcvsli3
.bde2
.tus
.f
.bdeSize
;
424 lpfc_ct_unsol_buffer(phba
, ctiocbq
, bdeBuf2
,
426 lpfc_in_buf_free(phba
, bdeBuf2
);
431 INIT_LIST_HEAD(&head
);
432 list_add_tail(&head
, &ctiocbq
->list
);
433 list_for_each_entry(iocbq
, &head
, list
) {
435 if (icmd
->ulpBdeCount
== 0)
436 lpfc_ct_unsol_buffer(phba
, iocbq
, NULL
, 0);
437 for (i
= 0; i
< icmd
->ulpBdeCount
; i
++) {
438 dma_addr
= getPaddr(icmd
->un
.cont64
[i
].addrHigh
,
439 icmd
->un
.cont64
[i
].addrLow
);
440 mp
= lpfc_sli_ringpostbuf_get(phba
, pring
,
442 size
= icmd
->un
.cont64
[i
].tus
.f
.bdeSize
;
443 lpfc_ct_unsol_buffer(phba
, iocbq
, mp
, size
);
444 lpfc_in_buf_free(phba
, mp
);
446 lpfc_sli3_post_buffer(phba
, pring
, i
);
453 * lpfc_ct_handle_unsol_abort - ct upper level protocol abort handler
454 * @phba: Pointer to HBA context object.
455 * @dmabuf: pointer to a dmabuf that describes the FC sequence
457 * This function serves as the upper level protocol abort handler for CT
460 * Return 1 if abort has been handled, 0 otherwise.
463 lpfc_ct_handle_unsol_abort(struct lpfc_hba
*phba
, struct hbq_dmabuf
*dmabuf
)
467 /* CT upper level goes through BSG */
468 handled
= lpfc_bsg_ct_unsol_abort(phba
, dmabuf
);
474 lpfc_free_ct_rsp(struct lpfc_hba
*phba
, struct lpfc_dmabuf
*mlist
)
476 struct lpfc_dmabuf
*mlast
, *next_mlast
;
478 list_for_each_entry_safe(mlast
, next_mlast
, &mlist
->list
, list
) {
479 list_del(&mlast
->list
);
480 lpfc_mbuf_free(phba
, mlast
->virt
, mlast
->phys
);
483 lpfc_mbuf_free(phba
, mlist
->virt
, mlist
->phys
);
488 static struct lpfc_dmabuf
*
489 lpfc_alloc_ct_rsp(struct lpfc_hba
*phba
, __be16 cmdcode
, struct ulp_bde64
*bpl
,
490 uint32_t size
, int *entries
)
492 struct lpfc_dmabuf
*mlist
= NULL
;
493 struct lpfc_dmabuf
*mp
;
496 /* We get chunks of FCELSSIZE */
497 cnt
= size
> FCELSSIZE
? FCELSSIZE
: size
;
500 /* Allocate buffer for rsp payload */
501 mp
= kmalloc(sizeof(struct lpfc_dmabuf
), GFP_KERNEL
);
504 lpfc_free_ct_rsp(phba
, mlist
);
508 INIT_LIST_HEAD(&mp
->list
);
510 if (be16_to_cpu(cmdcode
) == SLI_CTNS_GID_FT
||
511 be16_to_cpu(cmdcode
) == SLI_CTNS_GFF_ID
)
512 mp
->virt
= lpfc_mbuf_alloc(phba
, MEM_PRI
, &(mp
->phys
));
514 mp
->virt
= lpfc_mbuf_alloc(phba
, 0, &(mp
->phys
));
519 lpfc_free_ct_rsp(phba
, mlist
);
523 /* Queue it to a linked list */
527 list_add_tail(&mp
->list
, &mlist
->list
);
529 bpl
->tus
.f
.bdeFlags
= BUFF_TYPE_BDE_64I
;
530 /* build buffer ptr list for IOCB */
531 bpl
->addrLow
= le32_to_cpu(putPaddrLow(mp
->phys
) );
532 bpl
->addrHigh
= le32_to_cpu(putPaddrHigh(mp
->phys
) );
533 bpl
->tus
.f
.bdeSize
= (uint16_t) cnt
;
534 bpl
->tus
.w
= le32_to_cpu(bpl
->tus
.w
);
546 lpfc_ct_free_iocb(struct lpfc_hba
*phba
, struct lpfc_iocbq
*ctiocb
)
548 struct lpfc_dmabuf
*buf_ptr
;
550 /* IOCBQ job structure gets cleaned during release. Just release
551 * the dma buffers here.
553 if (ctiocb
->cmd_dmabuf
) {
554 buf_ptr
= ctiocb
->cmd_dmabuf
;
555 lpfc_mbuf_free(phba
, buf_ptr
->virt
, buf_ptr
->phys
);
557 ctiocb
->cmd_dmabuf
= NULL
;
559 if (ctiocb
->rsp_dmabuf
) {
560 lpfc_free_ct_rsp(phba
, ctiocb
->rsp_dmabuf
);
561 ctiocb
->rsp_dmabuf
= NULL
;
564 if (ctiocb
->bpl_dmabuf
) {
565 buf_ptr
= ctiocb
->bpl_dmabuf
;
566 lpfc_mbuf_free(phba
, buf_ptr
->virt
, buf_ptr
->phys
);
568 ctiocb
->bpl_dmabuf
= NULL
;
570 lpfc_sli_release_iocbq(phba
, ctiocb
);
575 * lpfc_gen_req - Build and issue a GEN_REQUEST command to the SLI Layer
576 * @vport: pointer to a host virtual N_Port data structure.
577 * @bmp: Pointer to BPL for SLI command
578 * @inp: Pointer to data buffer for response data.
579 * @outp: Pointer to data buffer that hold the CT command.
580 * @cmpl: completion routine to call when command completes
581 * @ndlp: Destination NPort nodelist entry
583 * This function as the final part for issuing a CT command.
586 lpfc_gen_req(struct lpfc_vport
*vport
, struct lpfc_dmabuf
*bmp
,
587 struct lpfc_dmabuf
*inp
, struct lpfc_dmabuf
*outp
,
588 void (*cmpl
)(struct lpfc_hba
*, struct lpfc_iocbq
*,
589 struct lpfc_iocbq
*),
590 struct lpfc_nodelist
*ndlp
, uint32_t event_tag
, uint32_t num_entry
,
591 uint32_t tmo
, uint8_t retry
)
593 struct lpfc_hba
*phba
= vport
->phba
;
594 struct lpfc_iocbq
*geniocb
;
598 /* Allocate buffer for command iocb */
599 geniocb
= lpfc_sli_get_iocbq(phba
);
604 /* Update the num_entry bde count */
605 geniocb
->num_bdes
= num_entry
;
607 geniocb
->bpl_dmabuf
= bmp
;
609 /* Save for completion so we can release these resources */
610 geniocb
->cmd_dmabuf
= inp
;
611 geniocb
->rsp_dmabuf
= outp
;
613 geniocb
->event_tag
= event_tag
;
616 /* FC spec states we need 3 * ratov for CT requests */
617 tmo
= (3 * phba
->fc_ratov
);
620 if (phba
->sli_rev
== LPFC_SLI_REV4
)
621 ulp_context
= phba
->sli4_hba
.rpi_ids
[ndlp
->nlp_rpi
];
623 ulp_context
= ndlp
->nlp_rpi
;
625 lpfc_sli_prep_gen_req(phba
, geniocb
, bmp
, ulp_context
, num_entry
, tmo
);
627 /* Issue GEN REQ IOCB for NPORT <did> */
628 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_ELS
,
629 "0119 Issue GEN REQ IOCB to NPORT x%x "
631 ndlp
->nlp_DID
, geniocb
->iotag
,
633 geniocb
->cmd_cmpl
= cmpl
;
634 geniocb
->drvrTimeout
= tmo
+ LPFC_DRVR_TIMEOUT
;
635 geniocb
->vport
= vport
;
636 geniocb
->retry
= retry
;
637 geniocb
->ndlp
= lpfc_nlp_get(ndlp
);
641 rc
= lpfc_sli_issue_iocb(phba
, LPFC_ELS_RING
, geniocb
, 0);
642 if (rc
== IOCB_ERROR
) {
649 lpfc_sli_release_iocbq(phba
, geniocb
);
654 * lpfc_ct_cmd - Build and issue a CT command
655 * @vport: pointer to a host virtual N_Port data structure.
656 * @inmp: Pointer to data buffer for response data.
657 * @bmp: Pointer to BPL for SLI command
658 * @ndlp: Destination NPort nodelist entry
659 * @cmpl: completion routine to call when command completes
661 * This function is called for issuing a CT command.
664 lpfc_ct_cmd(struct lpfc_vport
*vport
, struct lpfc_dmabuf
*inmp
,
665 struct lpfc_dmabuf
*bmp
, struct lpfc_nodelist
*ndlp
,
666 void (*cmpl
) (struct lpfc_hba
*, struct lpfc_iocbq
*,
667 struct lpfc_iocbq
*),
668 uint32_t rsp_size
, uint8_t retry
)
670 struct lpfc_hba
*phba
= vport
->phba
;
671 struct ulp_bde64
*bpl
= (struct ulp_bde64
*) bmp
->virt
;
672 struct lpfc_dmabuf
*outmp
;
674 __be16 cmdcode
= ((struct lpfc_sli_ct_request
*)inmp
->virt
)->
675 CommandResponse
.bits
.CmdRsp
;
677 bpl
++; /* Skip past ct request */
679 /* Put buffer(s) for ct rsp in bpl */
680 outmp
= lpfc_alloc_ct_rsp(phba
, cmdcode
, bpl
, rsp_size
, &cnt
);
684 * Form the CT IOCB. The total number of BDEs in this IOCB
685 * is the single command plus response count from
689 status
= lpfc_gen_req(vport
, bmp
, inmp
, outmp
, cmpl
, ndlp
,
690 phba
->fc_eventTag
, cnt
, 0, retry
);
692 lpfc_free_ct_rsp(phba
, outmp
);
699 lpfc_find_vport_by_did(struct lpfc_hba
*phba
, uint32_t did
) {
700 struct lpfc_vport
*vport_curr
;
703 spin_lock_irqsave(&phba
->port_list_lock
, flags
);
704 list_for_each_entry(vport_curr
, &phba
->port_list
, listentry
) {
705 if ((vport_curr
->fc_myDID
) && (vport_curr
->fc_myDID
== did
)) {
706 spin_unlock_irqrestore(&phba
->port_list_lock
, flags
);
710 spin_unlock_irqrestore(&phba
->port_list_lock
, flags
);
715 lpfc_prep_node_fc4type(struct lpfc_vport
*vport
, uint32_t Did
, uint8_t fc4_type
)
717 struct lpfc_nodelist
*ndlp
;
719 if ((vport
->port_type
!= LPFC_NPIV_PORT
) ||
720 !(vport
->ct_flags
& FC_CT_RFF_ID
) || !vport
->cfg_restrict_login
) {
722 ndlp
= lpfc_setup_disc_node(vport
, Did
);
725 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_CT
,
726 "Parse GID_FTrsp: did:x%x flg:x%lx x%x",
727 Did
, ndlp
->nlp_flag
, vport
->fc_flag
);
729 /* By default, the driver expects to support FCP FC4 */
730 if (fc4_type
== FC_TYPE_FCP
)
731 ndlp
->nlp_fc4_type
|= NLP_FC4_FCP
;
733 if (fc4_type
== FC_TYPE_NVME
)
734 ndlp
->nlp_fc4_type
|= NLP_FC4_NVME
;
736 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_DISCOVERY
,
737 "0238 Process x%06x NameServer Rsp "
738 "Data: x%lx x%x x%x x%lx x%x\n", Did
,
739 ndlp
->nlp_flag
, ndlp
->nlp_fc4_type
,
740 ndlp
->nlp_state
, vport
->fc_flag
,
741 vport
->fc_rscn_id_cnt
);
743 /* if ndlp needs to be discovered and prior
744 * state of ndlp hit devloss, change state to
747 if (test_bit(NLP_NPR_2B_DISC
, &ndlp
->nlp_flag
) &&
748 ndlp
->nlp_state
== NLP_STE_UNUSED_NODE
) {
749 lpfc_nlp_set_state(vport
, ndlp
,
753 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_CT
,
754 "Skip1 GID_FTrsp: did:x%x flg:x%lx cnt:%d",
755 Did
, vport
->fc_flag
, vport
->fc_rscn_id_cnt
);
757 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_DISCOVERY
,
758 "0239 Skip x%06x NameServer Rsp "
759 "Data: x%lx x%x x%px\n",
761 vport
->fc_rscn_id_cnt
, ndlp
);
764 if (!test_bit(FC_RSCN_MODE
, &vport
->fc_flag
) ||
765 lpfc_rscn_payload_check(vport
, Did
)) {
766 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_CT
,
767 "Query GID_FTrsp: did:x%x flg:x%lx cnt:%d",
768 Did
, vport
->fc_flag
, vport
->fc_rscn_id_cnt
);
771 * This NPortID was previously a FCP/NVMe target,
772 * Don't even bother to send GFF_ID.
774 ndlp
= lpfc_findnode_did(vport
, Did
);
777 (NLP_FCP_TARGET
| NLP_NVME_TARGET
))) {
778 if (fc4_type
== FC_TYPE_FCP
)
779 ndlp
->nlp_fc4_type
|= NLP_FC4_FCP
;
780 if (fc4_type
== FC_TYPE_NVME
)
781 ndlp
->nlp_fc4_type
|= NLP_FC4_NVME
;
782 lpfc_setup_disc_node(vport
, Did
);
783 } else if (lpfc_ns_cmd(vport
, SLI_CTNS_GFF_ID
,
785 vport
->num_disc_nodes
++;
787 lpfc_setup_disc_node(vport
, Did
);
789 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_CT
,
790 "Skip2 GID_FTrsp: did:x%x flg:x%lx cnt:%d",
791 Did
, vport
->fc_flag
, vport
->fc_rscn_id_cnt
);
793 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_DISCOVERY
,
794 "0245 Skip x%06x NameServer Rsp "
795 "Data: x%lx x%x\n", Did
,
797 vport
->fc_rscn_id_cnt
);
803 lpfc_ns_rsp_audit_did(struct lpfc_vport
*vport
, uint32_t Did
, uint8_t fc4_type
)
805 struct lpfc_hba
*phba
= vport
->phba
;
806 struct lpfc_nodelist
*ndlp
= NULL
;
809 if (phba
->cfg_ns_query
== LPFC_NS_QUERY_GID_FT
)
813 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_DISCOVERY
,
814 "6430 Process %s rsp for %08x type %x %s %s\n",
816 (fc4_type
== FC_TYPE_FCP
) ? "FCP" : " ",
817 (fc4_type
== FC_TYPE_NVME
) ? "NVME" : " ");
819 * To conserve rpi's, filter out addresses for other
820 * vports on the same physical HBAs.
822 if (Did
!= vport
->fc_myDID
&&
823 (!lpfc_find_vport_by_did(phba
, Did
) ||
824 vport
->cfg_peer_port_login
)) {
825 if (!phba
->nvmet_support
) {
826 /* FCPI/NVMEI path. Process Did */
827 lpfc_prep_node_fc4type(vport
, Did
, fc4_type
);
830 /* NVMET path. NVMET only cares about NVMEI nodes. */
831 list_for_each_entry(ndlp
, &vport
->fc_nodes
, nlp_listp
) {
832 if (ndlp
->nlp_type
!= NLP_NVME_INITIATOR
||
833 ndlp
->nlp_state
!= NLP_STE_UNMAPPED_NODE
)
835 if (ndlp
->nlp_DID
== Did
)
836 clear_bit(NLP_NVMET_RECOV
, &ndlp
->nlp_flag
);
838 set_bit(NLP_NVMET_RECOV
, &ndlp
->nlp_flag
);
844 lpfc_ns_rsp(struct lpfc_vport
*vport
, struct lpfc_dmabuf
*mp
, uint8_t fc4_type
,
847 struct lpfc_sli_ct_request
*Response
=
848 (struct lpfc_sli_ct_request
*) mp
->virt
;
849 struct lpfc_dmabuf
*mlast
, *next_mp
;
850 uint32_t *ctptr
= (uint32_t *) & Response
->un
.gid
.PortType
;
851 uint32_t Did
, CTentry
;
853 struct list_head head
;
854 struct lpfc_nodelist
*ndlp
= NULL
;
856 lpfc_set_disctmo(vport
);
857 vport
->num_disc_nodes
= 0;
858 vport
->fc_ns_retry
= 0;
861 list_add_tail(&head
, &mp
->list
);
862 list_for_each_entry_safe(mp
, next_mp
, &head
, list
) {
865 Cnt
= Size
> FCELSSIZE
? FCELSSIZE
: Size
;
870 ctptr
= (uint32_t *) mlast
->virt
;
872 Cnt
-= 16; /* subtract length of CT header */
874 /* Loop through entire NameServer list of DIDs */
875 while (Cnt
>= sizeof(uint32_t)) {
876 /* Get next DID from NameServer List */
878 Did
= ((be32_to_cpu(CTentry
)) & Mask_DID
);
879 lpfc_ns_rsp_audit_did(vport
, Did
, fc4_type
);
880 if (CTentry
& (cpu_to_be32(SLI_CT_LAST_ENTRY
)))
883 Cnt
-= sizeof(uint32_t);
889 /* All GID_FT entries processed. If the driver is running in
890 * in target mode, put impacted nodes into recovery and drop
891 * the RPI to flush outstanding IO.
893 if (vport
->phba
->nvmet_support
) {
894 list_for_each_entry(ndlp
, &vport
->fc_nodes
, nlp_listp
) {
895 if (!test_bit(NLP_NVMET_RECOV
, &ndlp
->nlp_flag
))
897 lpfc_disc_state_machine(vport
, ndlp
, NULL
,
898 NLP_EVT_DEVICE_RECOVERY
);
899 clear_bit(NLP_NVMET_RECOV
, &ndlp
->nlp_flag
);
909 lpfc_cmpl_ct_cmd_gid_ft(struct lpfc_hba
*phba
, struct lpfc_iocbq
*cmdiocb
,
910 struct lpfc_iocbq
*rspiocb
)
912 struct lpfc_vport
*vport
= cmdiocb
->vport
;
913 struct lpfc_dmabuf
*outp
;
914 struct lpfc_dmabuf
*inp
;
915 struct lpfc_sli_ct_request
*CTrsp
;
916 struct lpfc_sli_ct_request
*CTreq
;
917 struct lpfc_nodelist
*ndlp
;
918 u32 ulp_status
= get_job_ulpstatus(phba
, rspiocb
);
919 u32 ulp_word4
= get_job_word4(phba
, rspiocb
);
922 /* First save ndlp, before we overwrite it */
923 ndlp
= cmdiocb
->ndlp
;
925 /* we pass cmdiocb to state machine which needs rspiocb as well */
926 cmdiocb
->rsp_iocb
= rspiocb
;
927 inp
= cmdiocb
->cmd_dmabuf
;
928 outp
= cmdiocb
->rsp_dmabuf
;
930 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_CT
,
931 "GID_FT cmpl: status:x%x/x%x rtry:%d",
932 ulp_status
, ulp_word4
, vport
->fc_ns_retry
);
934 /* Ignore response if link flipped after this request was made */
935 if (cmdiocb
->event_tag
!= phba
->fc_eventTag
) {
936 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_DISCOVERY
,
937 "9043 Event tag mismatch. Ignoring NS rsp\n");
941 /* Skip processing response on pport if unloading */
942 if (vport
== phba
->pport
&& test_bit(FC_UNLOADING
, &vport
->load_flag
)) {
943 if (test_bit(FC_RSCN_MODE
, &vport
->fc_flag
))
944 lpfc_els_flush_rscn(vport
);
948 if (lpfc_els_chk_latt(vport
)) {
949 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_DISCOVERY
,
950 "0216 Link event during NS query\n");
951 if (test_bit(FC_RSCN_MODE
, &vport
->fc_flag
))
952 lpfc_els_flush_rscn(vport
);
953 lpfc_vport_set_state(vport
, FC_VPORT_FAILED
);
956 if (lpfc_error_lost_link(vport
, ulp_status
, ulp_word4
)) {
957 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_DISCOVERY
,
958 "0226 NS query failed due to link event: "
959 "ulp_status x%x ulp_word4 x%x fc_flag x%lx "
960 "port_state x%x gidft_inp x%x\n",
961 ulp_status
, ulp_word4
, vport
->fc_flag
,
962 vport
->port_state
, vport
->gidft_inp
);
963 if (test_bit(FC_RSCN_MODE
, &vport
->fc_flag
))
964 lpfc_els_flush_rscn(vport
);
965 if (vport
->gidft_inp
)
970 if (test_and_clear_bit(FC_RSCN_DEFERRED
, &vport
->fc_flag
)) {
971 /* This is a GID_FT completing so the gidft_inp counter was
972 * incremented before the GID_FT was issued to the wire.
974 if (vport
->gidft_inp
)
978 * Skip processing the NS response
979 * Re-issue the NS cmd
981 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_ELS
,
982 "0151 Process Deferred RSCN Data: x%lx x%x\n",
983 vport
->fc_flag
, vport
->fc_rscn_id_cnt
);
984 lpfc_els_handle_rscn(vport
);
990 /* Check for retry */
991 if (vport
->fc_ns_retry
< LPFC_MAX_NS_RETRY
) {
992 if (ulp_status
!= IOSTAT_LOCAL_REJECT
||
993 (ulp_word4
& IOERR_PARAM_MASK
) !=
995 vport
->fc_ns_retry
++;
997 type
= lpfc_get_gidft_type(vport
, cmdiocb
);
1001 /* CT command is being retried */
1002 rc
= lpfc_ns_cmd(vport
, SLI_CTNS_GID_FT
,
1003 vport
->fc_ns_retry
, type
);
1006 else { /* Unable to send NS cmd */
1007 if (vport
->gidft_inp
)
1011 if (test_bit(FC_RSCN_MODE
, &vport
->fc_flag
))
1012 lpfc_els_flush_rscn(vport
);
1013 lpfc_vport_set_state(vport
, FC_VPORT_FAILED
);
1014 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_TRACE_EVENT
,
1015 "0257 GID_FT Query error: 0x%x 0x%x\n",
1016 ulp_status
, vport
->fc_ns_retry
);
1018 /* Good status, continue checking */
1019 CTreq
= (struct lpfc_sli_ct_request
*) inp
->virt
;
1020 CTrsp
= (struct lpfc_sli_ct_request
*) outp
->virt
;
1021 if (CTrsp
->CommandResponse
.bits
.CmdRsp
==
1022 cpu_to_be16(SLI_CT_RESPONSE_FS_ACC
)) {
1023 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_DISCOVERY
,
1024 "0208 NameServer Rsp Data: x%lx x%x "
1027 CTreq
->un
.gid
.Fc4Type
,
1028 vport
->num_disc_nodes
,
1030 get_job_data_placed(phba
, rspiocb
));
1034 CTreq
->un
.gid
.Fc4Type
,
1035 get_job_data_placed(phba
, rspiocb
));
1036 } else if (be16_to_cpu(CTrsp
->CommandResponse
.bits
.CmdRsp
) ==
1037 SLI_CT_RESPONSE_FS_RJT
) {
1038 /* NameServer Rsp Error */
1039 if ((CTrsp
->ReasonCode
== SLI_CT_UNABLE_TO_PERFORM_REQ
)
1040 && (CTrsp
->Explanation
== SLI_CT_NO_FC4_TYPES
)) {
1041 lpfc_printf_vlog(vport
, KERN_INFO
,
1043 "0269 No NameServer Entries "
1044 "Data: x%x x%x x%x x%lx\n",
1045 be16_to_cpu(CTrsp
->CommandResponse
.bits
.CmdRsp
),
1046 (uint32_t) CTrsp
->ReasonCode
,
1047 (uint32_t) CTrsp
->Explanation
,
1050 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_CT
,
1051 "GID_FT no entry cmd:x%x rsn:x%x exp:x%x",
1052 be16_to_cpu(CTrsp
->CommandResponse
.bits
.CmdRsp
),
1053 (uint32_t) CTrsp
->ReasonCode
,
1054 (uint32_t) CTrsp
->Explanation
);
1056 lpfc_printf_vlog(vport
, KERN_INFO
,
1058 "0240 NameServer Rsp Error "
1059 "Data: x%x x%x x%x x%lx\n",
1060 be16_to_cpu(CTrsp
->CommandResponse
.bits
.CmdRsp
),
1061 (uint32_t) CTrsp
->ReasonCode
,
1062 (uint32_t) CTrsp
->Explanation
,
1065 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_CT
,
1066 "GID_FT rsp err1 cmd:x%x rsn:x%x exp:x%x",
1067 be16_to_cpu(CTrsp
->CommandResponse
.bits
.CmdRsp
),
1068 (uint32_t) CTrsp
->ReasonCode
,
1069 (uint32_t) CTrsp
->Explanation
);
1074 /* NameServer Rsp Error */
1075 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_TRACE_EVENT
,
1076 "0241 NameServer Rsp Error "
1077 "Data: x%x x%x x%x x%lx\n",
1078 be16_to_cpu(CTrsp
->CommandResponse
.bits
.CmdRsp
),
1079 (uint32_t) CTrsp
->ReasonCode
,
1080 (uint32_t) CTrsp
->Explanation
,
1083 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_CT
,
1084 "GID_FT rsp err2 cmd:x%x rsn:x%x exp:x%x",
1085 be16_to_cpu(CTrsp
->CommandResponse
.bits
.CmdRsp
),
1086 (uint32_t) CTrsp
->ReasonCode
,
1087 (uint32_t) CTrsp
->Explanation
);
1089 if (vport
->gidft_inp
)
1093 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_DISCOVERY
,
1094 "4216 GID_FT cmpl inp %d disc %d\n",
1095 vport
->gidft_inp
, vport
->num_disc_nodes
);
1097 /* Link up / RSCN discovery */
1098 if ((vport
->num_disc_nodes
== 0) &&
1099 (vport
->gidft_inp
== 0)) {
1101 * The driver has cycled through all Nports in the RSCN payload.
1102 * Complete the handling by cleaning up and marking the
1103 * current driver state.
1105 if (vport
->port_state
>= LPFC_DISC_AUTH
) {
1106 if (test_bit(FC_RSCN_MODE
, &vport
->fc_flag
)) {
1107 lpfc_els_flush_rscn(vport
);
1109 set_bit(FC_RSCN_MODE
, &vport
->fc_flag
);
1111 lpfc_els_flush_rscn(vport
);
1115 lpfc_disc_start(vport
);
1118 lpfc_ct_free_iocb(phba
, cmdiocb
);
1124 lpfc_cmpl_ct_cmd_gid_pt(struct lpfc_hba
*phba
, struct lpfc_iocbq
*cmdiocb
,
1125 struct lpfc_iocbq
*rspiocb
)
1127 struct lpfc_vport
*vport
= cmdiocb
->vport
;
1128 struct lpfc_dmabuf
*outp
;
1129 struct lpfc_dmabuf
*inp
;
1130 struct lpfc_sli_ct_request
*CTrsp
;
1131 struct lpfc_sli_ct_request
*CTreq
;
1132 struct lpfc_nodelist
*ndlp
;
1133 u32 ulp_status
= get_job_ulpstatus(phba
, rspiocb
);
1134 u32 ulp_word4
= get_job_word4(phba
, rspiocb
);
1137 /* First save ndlp, before we overwrite it */
1138 ndlp
= cmdiocb
->ndlp
;
1140 /* we pass cmdiocb to state machine which needs rspiocb as well */
1141 cmdiocb
->rsp_iocb
= rspiocb
;
1142 inp
= cmdiocb
->cmd_dmabuf
;
1143 outp
= cmdiocb
->rsp_dmabuf
;
1145 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_CT
,
1146 "GID_PT cmpl: status:x%x/x%x rtry:%d",
1147 ulp_status
, ulp_word4
,
1148 vport
->fc_ns_retry
);
1150 /* Ignore response if link flipped after this request was made */
1151 if (cmdiocb
->event_tag
!= phba
->fc_eventTag
) {
1152 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_DISCOVERY
,
1153 "9044 Event tag mismatch. Ignoring NS rsp\n");
1157 /* Skip processing response on pport if unloading */
1158 if (vport
== phba
->pport
&& test_bit(FC_UNLOADING
, &vport
->load_flag
)) {
1159 if (test_bit(FC_RSCN_MODE
, &vport
->fc_flag
))
1160 lpfc_els_flush_rscn(vport
);
1164 if (lpfc_els_chk_latt(vport
)) {
1165 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_DISCOVERY
,
1166 "4108 Link event during NS query\n");
1167 if (test_bit(FC_RSCN_MODE
, &vport
->fc_flag
))
1168 lpfc_els_flush_rscn(vport
);
1169 lpfc_vport_set_state(vport
, FC_VPORT_FAILED
);
1172 if (lpfc_error_lost_link(vport
, ulp_status
, ulp_word4
)) {
1173 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_DISCOVERY
,
1174 "4166 NS query failed due to link event: "
1175 "ulp_status x%x ulp_word4 x%x fc_flag x%lx "
1176 "port_state x%x gidft_inp x%x\n",
1177 ulp_status
, ulp_word4
, vport
->fc_flag
,
1178 vport
->port_state
, vport
->gidft_inp
);
1179 if (test_bit(FC_RSCN_MODE
, &vport
->fc_flag
))
1180 lpfc_els_flush_rscn(vport
);
1181 if (vport
->gidft_inp
)
1186 if (test_and_clear_bit(FC_RSCN_DEFERRED
, &vport
->fc_flag
)) {
1187 /* This is a GID_PT completing so the gidft_inp counter was
1188 * incremented before the GID_PT was issued to the wire.
1190 if (vport
->gidft_inp
)
1194 * Skip processing the NS response
1195 * Re-issue the NS cmd
1197 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_ELS
,
1198 "4167 Process Deferred RSCN Data: x%lx x%x\n",
1199 vport
->fc_flag
, vport
->fc_rscn_id_cnt
);
1200 lpfc_els_handle_rscn(vport
);
1206 /* Check for retry */
1207 if (vport
->fc_ns_retry
< LPFC_MAX_NS_RETRY
) {
1208 if (ulp_status
!= IOSTAT_LOCAL_REJECT
||
1209 (ulp_word4
& IOERR_PARAM_MASK
) !=
1211 vport
->fc_ns_retry
++;
1213 /* CT command is being retried */
1214 rc
= lpfc_ns_cmd(vport
, SLI_CTNS_GID_PT
,
1215 vport
->fc_ns_retry
, GID_PT_N_PORT
);
1218 else { /* Unable to send NS cmd */
1219 if (vport
->gidft_inp
)
1223 if (test_bit(FC_RSCN_MODE
, &vport
->fc_flag
))
1224 lpfc_els_flush_rscn(vport
);
1225 lpfc_vport_set_state(vport
, FC_VPORT_FAILED
);
1226 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_TRACE_EVENT
,
1227 "4103 GID_FT Query error: 0x%x 0x%x\n",
1228 ulp_status
, vport
->fc_ns_retry
);
1230 /* Good status, continue checking */
1231 CTreq
= (struct lpfc_sli_ct_request
*)inp
->virt
;
1232 CTrsp
= (struct lpfc_sli_ct_request
*)outp
->virt
;
1233 if (be16_to_cpu(CTrsp
->CommandResponse
.bits
.CmdRsp
) ==
1234 SLI_CT_RESPONSE_FS_ACC
) {
1235 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_DISCOVERY
,
1236 "4105 NameServer Rsp Data: x%lx x%x "
1239 CTreq
->un
.gid
.Fc4Type
,
1240 vport
->num_disc_nodes
,
1242 get_job_data_placed(phba
, rspiocb
));
1246 CTreq
->un
.gid
.Fc4Type
,
1247 get_job_data_placed(phba
, rspiocb
));
1248 } else if (be16_to_cpu(CTrsp
->CommandResponse
.bits
.CmdRsp
) ==
1249 SLI_CT_RESPONSE_FS_RJT
) {
1250 /* NameServer Rsp Error */
1251 if ((CTrsp
->ReasonCode
== SLI_CT_UNABLE_TO_PERFORM_REQ
)
1252 && (CTrsp
->Explanation
== SLI_CT_NO_FC4_TYPES
)) {
1254 vport
, KERN_INFO
, LOG_DISCOVERY
,
1255 "4106 No NameServer Entries "
1256 "Data: x%x x%x x%x x%lx\n",
1257 be16_to_cpu(CTrsp
->CommandResponse
.bits
.CmdRsp
),
1258 (uint32_t)CTrsp
->ReasonCode
,
1259 (uint32_t)CTrsp
->Explanation
,
1262 lpfc_debugfs_disc_trc(
1263 vport
, LPFC_DISC_TRC_CT
,
1264 "GID_PT no entry cmd:x%x rsn:x%x exp:x%x",
1265 be16_to_cpu(CTrsp
->CommandResponse
.bits
.CmdRsp
),
1266 (uint32_t)CTrsp
->ReasonCode
,
1267 (uint32_t)CTrsp
->Explanation
);
1270 vport
, KERN_INFO
, LOG_DISCOVERY
,
1271 "4107 NameServer Rsp Error "
1272 "Data: x%x x%x x%x x%lx\n",
1273 be16_to_cpu(CTrsp
->CommandResponse
.bits
.CmdRsp
),
1274 (uint32_t)CTrsp
->ReasonCode
,
1275 (uint32_t)CTrsp
->Explanation
,
1278 lpfc_debugfs_disc_trc(
1279 vport
, LPFC_DISC_TRC_CT
,
1280 "GID_PT rsp err1 cmd:x%x rsn:x%x exp:x%x",
1281 be16_to_cpu(CTrsp
->CommandResponse
.bits
.CmdRsp
),
1282 (uint32_t)CTrsp
->ReasonCode
,
1283 (uint32_t)CTrsp
->Explanation
);
1286 /* NameServer Rsp Error */
1287 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_TRACE_EVENT
,
1288 "4109 NameServer Rsp Error "
1289 "Data: x%x x%x x%x x%lx\n",
1290 be16_to_cpu(CTrsp
->CommandResponse
.bits
.CmdRsp
),
1291 (uint32_t)CTrsp
->ReasonCode
,
1292 (uint32_t)CTrsp
->Explanation
,
1295 lpfc_debugfs_disc_trc(
1296 vport
, LPFC_DISC_TRC_CT
,
1297 "GID_PT rsp err2 cmd:x%x rsn:x%x exp:x%x",
1298 be16_to_cpu(CTrsp
->CommandResponse
.bits
.CmdRsp
),
1299 (uint32_t)CTrsp
->ReasonCode
,
1300 (uint32_t)CTrsp
->Explanation
);
1302 if (vport
->gidft_inp
)
1306 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_DISCOVERY
,
1307 "6450 GID_PT cmpl inp %d disc %d\n",
1308 vport
->gidft_inp
, vport
->num_disc_nodes
);
1310 /* Link up / RSCN discovery */
1311 if ((vport
->num_disc_nodes
== 0) &&
1312 (vport
->gidft_inp
== 0)) {
1314 * The driver has cycled through all Nports in the RSCN payload.
1315 * Complete the handling by cleaning up and marking the
1316 * current driver state.
1318 if (vport
->port_state
>= LPFC_DISC_AUTH
) {
1319 if (test_bit(FC_RSCN_MODE
, &vport
->fc_flag
)) {
1320 lpfc_els_flush_rscn(vport
);
1322 set_bit(FC_RSCN_MODE
, &vport
->fc_flag
);
1324 lpfc_els_flush_rscn(vport
);
1328 lpfc_disc_start(vport
);
1331 lpfc_ct_free_iocb(phba
, cmdiocb
);
1336 lpfc_cmpl_ct_cmd_gff_id(struct lpfc_hba
*phba
, struct lpfc_iocbq
*cmdiocb
,
1337 struct lpfc_iocbq
*rspiocb
)
1339 struct lpfc_vport
*vport
= cmdiocb
->vport
;
1340 struct lpfc_dmabuf
*inp
= cmdiocb
->cmd_dmabuf
;
1341 struct lpfc_dmabuf
*outp
= cmdiocb
->rsp_dmabuf
;
1342 struct lpfc_sli_ct_request
*CTrsp
;
1345 struct lpfc_nodelist
*ndlp
= NULL
, *free_ndlp
= NULL
;
1346 u32 ulp_status
= get_job_ulpstatus(phba
, rspiocb
);
1347 u32 ulp_word4
= get_job_word4(phba
, rspiocb
);
1349 did
= ((struct lpfc_sli_ct_request
*) inp
->virt
)->un
.gff
.PortId
;
1350 did
= be32_to_cpu(did
);
1352 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_CT
,
1353 "GFF_ID cmpl: status:x%x/x%x did:x%x",
1354 ulp_status
, ulp_word4
, did
);
1356 /* Ignore response if link flipped after this request was made */
1357 if (cmdiocb
->event_tag
!= phba
->fc_eventTag
) {
1358 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_DISCOVERY
,
1359 "9045 Event tag mismatch. Ignoring NS rsp\n");
1363 if (ulp_status
== IOSTAT_SUCCESS
) {
1364 /* Good status, continue checking */
1365 CTrsp
= (struct lpfc_sli_ct_request
*) outp
->virt
;
1366 fbits
= CTrsp
->un
.gff_acc
.fbits
[FCP_TYPE_FEATURE_OFFSET
];
1368 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_DISCOVERY
,
1369 "6431 Process GFF_ID rsp for %08x "
1370 "fbits %02x %s %s\n",
1372 (fbits
& FC4_FEATURE_INIT
) ? "Initiator" : " ",
1373 (fbits
& FC4_FEATURE_TARGET
) ? "Target" : " ");
1375 if (be16_to_cpu(CTrsp
->CommandResponse
.bits
.CmdRsp
) ==
1376 SLI_CT_RESPONSE_FS_ACC
) {
1377 if ((fbits
& FC4_FEATURE_INIT
) &&
1378 !(fbits
& FC4_FEATURE_TARGET
)) {
1379 lpfc_printf_vlog(vport
, KERN_INFO
,
1381 "0270 Skip x%x GFF "
1382 "NameServer Rsp Data: (init) "
1383 "x%x x%x\n", did
, fbits
,
1384 vport
->fc_rscn_id_cnt
);
1390 /* Check for retry */
1391 if (cmdiocb
->retry
< LPFC_MAX_NS_RETRY
) {
1393 if (ulp_status
== IOSTAT_LOCAL_REJECT
) {
1394 switch ((ulp_word4
&
1395 IOERR_PARAM_MASK
)) {
1397 case IOERR_NO_RESOURCES
:
1398 /* We don't increment the retry
1399 * count for this case.
1402 case IOERR_LINK_DOWN
:
1403 case IOERR_SLI_ABORTED
:
1404 case IOERR_SLI_DOWN
:
1415 /* CT command is being retried */
1416 rc
= lpfc_ns_cmd(vport
, SLI_CTNS_GFF_ID
,
1417 cmdiocb
->retry
, did
);
1420 free_ndlp
= cmdiocb
->ndlp
;
1421 lpfc_ct_free_iocb(phba
, cmdiocb
);
1422 lpfc_nlp_put(free_ndlp
);
1427 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_TRACE_EVENT
,
1428 "0267 NameServer GFF Rsp "
1429 "x%x Error (%d %d) Data: x%lx x%x\n",
1430 did
, ulp_status
, ulp_word4
,
1431 vport
->fc_flag
, vport
->fc_rscn_id_cnt
);
1434 /* This is a target port, unregistered port, or the GFF_ID failed */
1435 ndlp
= lpfc_setup_disc_node(vport
, did
);
1437 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_DISCOVERY
,
1438 "0242 Process x%x GFF "
1439 "NameServer Rsp Data: x%lx x%lx x%x\n",
1440 did
, ndlp
->nlp_flag
, vport
->fc_flag
,
1441 vport
->fc_rscn_id_cnt
);
1443 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_DISCOVERY
,
1444 "0243 Skip x%x GFF "
1445 "NameServer Rsp Data: x%lx x%x\n", did
,
1446 vport
->fc_flag
, vport
->fc_rscn_id_cnt
);
1449 /* Link up / RSCN discovery */
1450 if (vport
->num_disc_nodes
)
1451 vport
->num_disc_nodes
--;
1453 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_DISCOVERY
,
1454 "6451 GFF_ID cmpl inp %d disc %d\n",
1455 vport
->gidft_inp
, vport
->num_disc_nodes
);
1457 if (vport
->num_disc_nodes
== 0) {
1459 * The driver has cycled through all Nports in the RSCN payload.
1460 * Complete the handling by cleaning up and marking the
1461 * current driver state.
1463 if (vport
->port_state
>= LPFC_DISC_AUTH
) {
1464 if (test_bit(FC_RSCN_MODE
, &vport
->fc_flag
)) {
1465 lpfc_els_flush_rscn(vport
);
1467 set_bit(FC_RSCN_MODE
, &vport
->fc_flag
);
1469 lpfc_els_flush_rscn(vport
);
1472 lpfc_disc_start(vport
);
1476 free_ndlp
= cmdiocb
->ndlp
;
1477 lpfc_ct_free_iocb(phba
, cmdiocb
);
1478 lpfc_nlp_put(free_ndlp
);
1483 lpfc_cmpl_ct_cmd_gft_id(struct lpfc_hba
*phba
, struct lpfc_iocbq
*cmdiocb
,
1484 struct lpfc_iocbq
*rspiocb
)
1486 struct lpfc_vport
*vport
= cmdiocb
->vport
;
1487 struct lpfc_dmabuf
*inp
= cmdiocb
->cmd_dmabuf
;
1488 struct lpfc_dmabuf
*outp
= cmdiocb
->rsp_dmabuf
;
1489 struct lpfc_sli_ct_request
*CTrsp
;
1491 struct lpfc_nodelist
*ndlp
= NULL
;
1492 struct lpfc_nodelist
*ns_ndlp
= cmdiocb
->ndlp
;
1493 uint32_t fc4_data_0
, fc4_data_1
;
1494 u32 ulp_status
= get_job_ulpstatus(phba
, rspiocb
);
1495 u32 ulp_word4
= get_job_word4(phba
, rspiocb
);
1497 did
= ((struct lpfc_sli_ct_request
*)inp
->virt
)->un
.gft
.PortId
;
1498 did
= be32_to_cpu(did
);
1500 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_CT
,
1501 "GFT_ID cmpl: status:x%x/x%x did:x%x",
1502 ulp_status
, ulp_word4
, did
);
1504 /* Ignore response if link flipped after this request was made */
1505 if ((uint32_t)cmdiocb
->event_tag
!= phba
->fc_eventTag
) {
1506 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_DISCOVERY
,
1507 "9046 Event tag mismatch. Ignoring NS rsp\n");
1511 if (ulp_status
== IOSTAT_SUCCESS
) {
1512 /* Good status, continue checking */
1513 CTrsp
= (struct lpfc_sli_ct_request
*)outp
->virt
;
1514 fc4_data_0
= be32_to_cpu(CTrsp
->un
.gft_acc
.fc4_types
[0]);
1515 fc4_data_1
= be32_to_cpu(CTrsp
->un
.gft_acc
.fc4_types
[1]);
1517 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_DISCOVERY
,
1518 "6432 Process GFT_ID rsp for %08x "
1519 "Data %08x %08x %s %s\n",
1520 did
, fc4_data_0
, fc4_data_1
,
1521 (fc4_data_0
& LPFC_FC4_TYPE_BITMASK
) ?
1523 (fc4_data_1
& LPFC_FC4_TYPE_BITMASK
) ?
1526 /* Lookup the NPort_ID queried in the GFT_ID and find the
1527 * driver's local node. It's an error if the driver
1530 ndlp
= lpfc_findnode_did(vport
, did
);
1532 /* The bitmask value for FCP and NVME FCP types is
1533 * the same because they are 32 bits distant from
1534 * each other in word0 and word0.
1536 if (fc4_data_0
& LPFC_FC4_TYPE_BITMASK
)
1537 ndlp
->nlp_fc4_type
|= NLP_FC4_FCP
;
1538 if (fc4_data_1
& LPFC_FC4_TYPE_BITMASK
)
1539 ndlp
->nlp_fc4_type
|= NLP_FC4_NVME
;
1540 lpfc_printf_vlog(vport
, KERN_INFO
,
1541 LOG_DISCOVERY
| LOG_NODE
,
1542 "3064 Setting ndlp x%px, DID x%06x "
1543 "with FC4 x%08x, Data: x%08x x%08x "
1545 ndlp
, did
, ndlp
->nlp_fc4_type
,
1546 FC_TYPE_FCP
, FC_TYPE_NVME
,
1549 if (ndlp
->nlp_state
== NLP_STE_REG_LOGIN_ISSUE
&&
1550 ndlp
->nlp_fc4_type
) {
1551 ndlp
->nlp_prev_state
= NLP_STE_REG_LOGIN_ISSUE
;
1552 lpfc_nlp_set_state(vport
, ndlp
,
1553 NLP_STE_PRLI_ISSUE
);
1554 lpfc_issue_els_prli(vport
, ndlp
, 0);
1555 } else if (!ndlp
->nlp_fc4_type
) {
1556 /* If fc4 type is still unknown, then LOGO */
1557 lpfc_printf_vlog(vport
, KERN_INFO
,
1558 LOG_DISCOVERY
| LOG_NODE
,
1559 "6443 Sending LOGO ndlp x%px, "
1560 "DID x%06x with fc4_type: "
1561 "x%08x, state: %d\n",
1562 ndlp
, did
, ndlp
->nlp_fc4_type
,
1564 lpfc_issue_els_logo(vport
, ndlp
, 0);
1565 ndlp
->nlp_prev_state
= NLP_STE_REG_LOGIN_ISSUE
;
1566 lpfc_nlp_set_state(vport
, ndlp
,
1571 lpfc_vlog_msg(vport
, KERN_WARNING
, LOG_DISCOVERY
,
1572 "3065 GFT_ID status x%08x\n", ulp_status
);
1575 lpfc_ct_free_iocb(phba
, cmdiocb
);
1576 lpfc_nlp_put(ns_ndlp
);
1580 lpfc_cmpl_ct(struct lpfc_hba
*phba
, struct lpfc_iocbq
*cmdiocb
,
1581 struct lpfc_iocbq
*rspiocb
)
1583 struct lpfc_vport
*vport
= cmdiocb
->vport
;
1584 struct lpfc_dmabuf
*inp
;
1585 struct lpfc_dmabuf
*outp
;
1586 struct lpfc_sli_ct_request
*CTrsp
;
1587 struct lpfc_nodelist
*ndlp
;
1591 u32 ulp_status
= get_job_ulpstatus(phba
, rspiocb
);
1592 u32 ulp_word4
= get_job_word4(phba
, rspiocb
);
1594 /* First save ndlp, before we overwrite it */
1595 ndlp
= cmdiocb
->ndlp
;
1597 /* we pass cmdiocb to state machine which needs rspiocb as well */
1598 cmdiocb
->rsp_iocb
= rspiocb
;
1600 inp
= cmdiocb
->cmd_dmabuf
;
1601 outp
= cmdiocb
->rsp_dmabuf
;
1603 cmdcode
= be16_to_cpu(((struct lpfc_sli_ct_request
*) inp
->virt
)->
1604 CommandResponse
.bits
.CmdRsp
);
1605 CTrsp
= (struct lpfc_sli_ct_request
*) outp
->virt
;
1607 latt
= lpfc_els_chk_latt(vport
);
1609 /* RFT request completes status <ulp_status> CmdRsp <CmdRsp> */
1610 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_DISCOVERY
,
1611 "0209 CT Request completes, latt %d, "
1612 "ulp_status x%x CmdRsp x%x, Context x%x, Tag x%x\n",
1614 be16_to_cpu(CTrsp
->CommandResponse
.bits
.CmdRsp
),
1615 get_job_ulpcontext(phba
, cmdiocb
), cmdiocb
->iotag
);
1617 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_CT
,
1618 "CT cmd cmpl: status:x%x/x%x cmd:x%x",
1619 ulp_status
, ulp_word4
, cmdcode
);
1622 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_TRACE_EVENT
,
1623 "0268 NS cmd x%x Error (x%x x%x)\n",
1624 cmdcode
, ulp_status
, ulp_word4
);
1626 if (ulp_status
== IOSTAT_LOCAL_REJECT
&&
1627 (((ulp_word4
& IOERR_PARAM_MASK
) ==
1629 ((ulp_word4
& IOERR_PARAM_MASK
) ==
1630 IOERR_SLI_ABORTED
)))
1633 retry
= cmdiocb
->retry
;
1634 if (retry
>= LPFC_MAX_NS_RETRY
)
1638 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_DISCOVERY
,
1639 "0250 Retrying NS cmd %x\n", cmdcode
);
1640 rc
= lpfc_ns_cmd(vport
, cmdcode
, retry
, 0);
1646 /* If the caller wanted a synchronous DA_ID completion, signal the
1647 * wait obj and clear flag to reset the vport.
1649 if (ndlp
->save_flags
& NLP_WAIT_FOR_DA_ID
) {
1650 if (ndlp
->da_id_waitq
)
1651 wake_up(ndlp
->da_id_waitq
);
1654 spin_lock_irq(&ndlp
->lock
);
1655 ndlp
->save_flags
&= ~NLP_WAIT_FOR_DA_ID
;
1656 spin_unlock_irq(&ndlp
->lock
);
1658 lpfc_ct_free_iocb(phba
, cmdiocb
);
1664 lpfc_cmpl_ct_cmd_rft_id(struct lpfc_hba
*phba
, struct lpfc_iocbq
*cmdiocb
,
1665 struct lpfc_iocbq
*rspiocb
)
1667 struct lpfc_vport
*vport
= cmdiocb
->vport
;
1668 u32 ulp_status
= get_job_ulpstatus(phba
, rspiocb
);
1670 if (ulp_status
== IOSTAT_SUCCESS
) {
1671 struct lpfc_dmabuf
*outp
;
1672 struct lpfc_sli_ct_request
*CTrsp
;
1674 outp
= cmdiocb
->rsp_dmabuf
;
1675 CTrsp
= (struct lpfc_sli_ct_request
*)outp
->virt
;
1676 if (be16_to_cpu(CTrsp
->CommandResponse
.bits
.CmdRsp
) ==
1677 SLI_CT_RESPONSE_FS_ACC
)
1678 vport
->ct_flags
|= FC_CT_RFT_ID
;
1680 lpfc_cmpl_ct(phba
, cmdiocb
, rspiocb
);
1685 lpfc_cmpl_ct_cmd_rnn_id(struct lpfc_hba
*phba
, struct lpfc_iocbq
*cmdiocb
,
1686 struct lpfc_iocbq
*rspiocb
)
1688 struct lpfc_vport
*vport
= cmdiocb
->vport
;
1689 u32 ulp_status
= get_job_ulpstatus(phba
, rspiocb
);
1691 if (ulp_status
== IOSTAT_SUCCESS
) {
1692 struct lpfc_dmabuf
*outp
;
1693 struct lpfc_sli_ct_request
*CTrsp
;
1695 outp
= cmdiocb
->rsp_dmabuf
;
1696 CTrsp
= (struct lpfc_sli_ct_request
*) outp
->virt
;
1697 if (be16_to_cpu(CTrsp
->CommandResponse
.bits
.CmdRsp
) ==
1698 SLI_CT_RESPONSE_FS_ACC
)
1699 vport
->ct_flags
|= FC_CT_RNN_ID
;
1701 lpfc_cmpl_ct(phba
, cmdiocb
, rspiocb
);
1706 lpfc_cmpl_ct_cmd_rspn_id(struct lpfc_hba
*phba
, struct lpfc_iocbq
*cmdiocb
,
1707 struct lpfc_iocbq
*rspiocb
)
1709 struct lpfc_vport
*vport
= cmdiocb
->vport
;
1710 u32 ulp_status
= get_job_ulpstatus(phba
, rspiocb
);
1712 if (ulp_status
== IOSTAT_SUCCESS
) {
1713 struct lpfc_dmabuf
*outp
;
1714 struct lpfc_sli_ct_request
*CTrsp
;
1716 outp
= cmdiocb
->rsp_dmabuf
;
1717 CTrsp
= (struct lpfc_sli_ct_request
*)outp
->virt
;
1718 if (be16_to_cpu(CTrsp
->CommandResponse
.bits
.CmdRsp
) ==
1719 SLI_CT_RESPONSE_FS_ACC
)
1720 vport
->ct_flags
|= FC_CT_RSPN_ID
;
1722 lpfc_cmpl_ct(phba
, cmdiocb
, rspiocb
);
1727 lpfc_cmpl_ct_cmd_rsnn_nn(struct lpfc_hba
*phba
, struct lpfc_iocbq
*cmdiocb
,
1728 struct lpfc_iocbq
*rspiocb
)
1730 struct lpfc_vport
*vport
= cmdiocb
->vport
;
1731 u32 ulp_status
= get_job_ulpstatus(phba
, rspiocb
);
1733 if (ulp_status
== IOSTAT_SUCCESS
) {
1734 struct lpfc_dmabuf
*outp
;
1735 struct lpfc_sli_ct_request
*CTrsp
;
1737 outp
= cmdiocb
->rsp_dmabuf
;
1738 CTrsp
= (struct lpfc_sli_ct_request
*) outp
->virt
;
1739 if (be16_to_cpu(CTrsp
->CommandResponse
.bits
.CmdRsp
) ==
1740 SLI_CT_RESPONSE_FS_ACC
)
1741 vport
->ct_flags
|= FC_CT_RSNN_NN
;
1743 lpfc_cmpl_ct(phba
, cmdiocb
, rspiocb
);
1748 lpfc_cmpl_ct_cmd_da_id(struct lpfc_hba
*phba
, struct lpfc_iocbq
*cmdiocb
,
1749 struct lpfc_iocbq
*rspiocb
)
1751 struct lpfc_vport
*vport
= cmdiocb
->vport
;
1753 /* even if it fails we will act as though it succeeded. */
1754 vport
->ct_flags
= 0;
1755 lpfc_cmpl_ct(phba
, cmdiocb
, rspiocb
);
1760 lpfc_cmpl_ct_cmd_rff_id(struct lpfc_hba
*phba
, struct lpfc_iocbq
*cmdiocb
,
1761 struct lpfc_iocbq
*rspiocb
)
1763 struct lpfc_vport
*vport
= cmdiocb
->vport
;
1764 u32 ulp_status
= get_job_ulpstatus(phba
, rspiocb
);
1766 if (ulp_status
== IOSTAT_SUCCESS
) {
1767 struct lpfc_dmabuf
*outp
;
1768 struct lpfc_sli_ct_request
*CTrsp
;
1770 outp
= cmdiocb
->rsp_dmabuf
;
1771 CTrsp
= (struct lpfc_sli_ct_request
*)outp
->virt
;
1772 if (be16_to_cpu(CTrsp
->CommandResponse
.bits
.CmdRsp
) ==
1773 SLI_CT_RESPONSE_FS_ACC
)
1774 vport
->ct_flags
|= FC_CT_RFF_ID
;
1776 lpfc_cmpl_ct(phba
, cmdiocb
, rspiocb
);
1781 * Although the symbolic port name is thought to be an integer
1782 * as of January 18, 2016, leave it as a string until more of
1783 * the record state becomes defined.
1786 lpfc_vport_symbolic_port_name(struct lpfc_vport
*vport
, char *symbol
,
1792 * Use the lpfc board number as the Symbolic Port
1793 * Name object. NPIV is not in play so this integer
1794 * value is sufficient and unique per FC-ID.
1796 n
= scnprintf(symbol
, size
, "%d", vport
->phba
->brd_no
);
1802 lpfc_vport_symbolic_node_name(struct lpfc_vport
*vport
, char *symbol
,
1805 char fwrev
[FW_REV_STR_SIZE
] = {0};
1806 char tmp
[MAXHOSTNAMELEN
] = {0};
1808 memset(symbol
, 0, size
);
1810 scnprintf(tmp
, sizeof(tmp
), "Emulex %s", vport
->phba
->ModelName
);
1811 if (strlcat(symbol
, tmp
, size
) >= size
)
1814 lpfc_decode_firmware_rev(vport
->phba
, fwrev
, 0);
1815 scnprintf(tmp
, sizeof(tmp
), " FV%s", fwrev
);
1816 if (strlcat(symbol
, tmp
, size
) >= size
)
1819 scnprintf(tmp
, sizeof(tmp
), " DV%s", lpfc_release_version
);
1820 if (strlcat(symbol
, tmp
, size
) >= size
)
1823 scnprintf(tmp
, sizeof(tmp
), " HN:%s", vport
->phba
->os_host_name
);
1824 if (strlcat(symbol
, tmp
, size
) >= size
)
1827 /* Note :- OS name is "Linux" */
1828 scnprintf(tmp
, sizeof(tmp
), " OS:%s", init_utsname()->sysname
);
1829 strlcat(symbol
, tmp
, size
);
1832 return strnlen(symbol
, size
);
1837 lpfc_find_map_node(struct lpfc_vport
*vport
)
1839 struct lpfc_nodelist
*ndlp
, *next_ndlp
;
1840 unsigned long iflags
;
1843 spin_lock_irqsave(&vport
->fc_nodes_list_lock
, iflags
);
1844 list_for_each_entry_safe(ndlp
, next_ndlp
, &vport
->fc_nodes
, nlp_listp
) {
1845 if (ndlp
->nlp_type
& NLP_FABRIC
)
1847 if ((ndlp
->nlp_state
== NLP_STE_MAPPED_NODE
) ||
1848 (ndlp
->nlp_state
== NLP_STE_UNMAPPED_NODE
))
1851 spin_unlock_irqrestore(&vport
->fc_nodes_list_lock
, iflags
);
1856 * This routine will return the FC4 Type associated with the CT
1860 lpfc_get_gidft_type(struct lpfc_vport
*vport
, struct lpfc_iocbq
*cmdiocb
)
1862 struct lpfc_sli_ct_request
*CtReq
;
1863 struct lpfc_dmabuf
*mp
;
1866 mp
= cmdiocb
->cmd_dmabuf
;
1869 CtReq
= (struct lpfc_sli_ct_request
*)mp
->virt
;
1870 type
= (uint32_t)CtReq
->un
.gid
.Fc4Type
;
1871 if ((type
!= SLI_CTPT_FCP
) && (type
!= SLI_CTPT_NVME
))
1879 * Issue Cmd to NameServer
1884 lpfc_ns_cmd(struct lpfc_vport
*vport
, int cmdcode
,
1885 uint8_t retry
, uint32_t context
)
1887 struct lpfc_nodelist
* ndlp
;
1888 struct lpfc_hba
*phba
= vport
->phba
;
1889 struct lpfc_dmabuf
*mp
, *bmp
;
1890 struct lpfc_sli_ct_request
*CtReq
;
1891 struct ulp_bde64
*bpl
;
1892 void (*cmpl
) (struct lpfc_hba
*, struct lpfc_iocbq
*,
1893 struct lpfc_iocbq
*) = NULL
;
1895 uint32_t rsp_size
= 1024;
1899 ndlp
= lpfc_findnode_did(vport
, NameServer_DID
);
1900 if (!ndlp
|| ndlp
->nlp_state
!= NLP_STE_UNMAPPED_NODE
) {
1905 /* fill in BDEs for command */
1906 /* Allocate buffer for command payload */
1907 mp
= kmalloc(sizeof(struct lpfc_dmabuf
), GFP_KERNEL
);
1913 INIT_LIST_HEAD(&mp
->list
);
1914 mp
->virt
= lpfc_mbuf_alloc(phba
, MEM_PRI
, &(mp
->phys
));
1917 goto ns_cmd_free_mp
;
1920 /* Allocate buffer for Buffer ptr list */
1921 bmp
= kmalloc(sizeof(struct lpfc_dmabuf
), GFP_KERNEL
);
1924 goto ns_cmd_free_mpvirt
;
1927 INIT_LIST_HEAD(&bmp
->list
);
1928 bmp
->virt
= lpfc_mbuf_alloc(phba
, MEM_PRI
, &(bmp
->phys
));
1931 goto ns_cmd_free_bmp
;
1934 /* NameServer Req */
1935 lpfc_printf_vlog(vport
, KERN_INFO
,LOG_DISCOVERY
,
1936 "0236 NameServer Req Data: x%x x%lx x%x x%x\n",
1937 cmdcode
, vport
->fc_flag
, vport
->fc_rscn_id_cnt
,
1940 bpl
= (struct ulp_bde64
*) bmp
->virt
;
1941 memset(bpl
, 0, sizeof(struct ulp_bde64
));
1942 bpl
->addrHigh
= le32_to_cpu(putPaddrHigh(mp
->phys
) );
1943 bpl
->addrLow
= le32_to_cpu(putPaddrLow(mp
->phys
) );
1944 bpl
->tus
.f
.bdeFlags
= 0;
1945 if (cmdcode
== SLI_CTNS_GID_FT
)
1946 bpl
->tus
.f
.bdeSize
= GID_REQUEST_SZ
;
1947 else if (cmdcode
== SLI_CTNS_GID_PT
)
1948 bpl
->tus
.f
.bdeSize
= GID_REQUEST_SZ
;
1949 else if (cmdcode
== SLI_CTNS_GFF_ID
)
1950 bpl
->tus
.f
.bdeSize
= GFF_REQUEST_SZ
;
1951 else if (cmdcode
== SLI_CTNS_GFT_ID
)
1952 bpl
->tus
.f
.bdeSize
= GFT_REQUEST_SZ
;
1953 else if (cmdcode
== SLI_CTNS_RFT_ID
)
1954 bpl
->tus
.f
.bdeSize
= RFT_REQUEST_SZ
;
1955 else if (cmdcode
== SLI_CTNS_RNN_ID
)
1956 bpl
->tus
.f
.bdeSize
= RNN_REQUEST_SZ
;
1957 else if (cmdcode
== SLI_CTNS_RSPN_ID
)
1958 bpl
->tus
.f
.bdeSize
= RSPN_REQUEST_SZ
;
1959 else if (cmdcode
== SLI_CTNS_RSNN_NN
)
1960 bpl
->tus
.f
.bdeSize
= RSNN_REQUEST_SZ
;
1961 else if (cmdcode
== SLI_CTNS_DA_ID
)
1962 bpl
->tus
.f
.bdeSize
= DA_ID_REQUEST_SZ
;
1963 else if (cmdcode
== SLI_CTNS_RFF_ID
)
1964 bpl
->tus
.f
.bdeSize
= RFF_REQUEST_SZ
;
1966 bpl
->tus
.f
.bdeSize
= 0;
1967 bpl
->tus
.w
= le32_to_cpu(bpl
->tus
.w
);
1969 CtReq
= (struct lpfc_sli_ct_request
*) mp
->virt
;
1970 memset(CtReq
, 0, sizeof(struct lpfc_sli_ct_request
));
1971 CtReq
->RevisionId
.bits
.Revision
= SLI_CT_REVISION
;
1972 CtReq
->RevisionId
.bits
.InId
= 0;
1973 CtReq
->FsType
= SLI_CT_DIRECTORY_SERVICE
;
1974 CtReq
->FsSubType
= SLI_CT_DIRECTORY_NAME_SERVER
;
1975 CtReq
->CommandResponse
.bits
.Size
= 0;
1977 case SLI_CTNS_GID_FT
:
1978 CtReq
->CommandResponse
.bits
.CmdRsp
=
1979 cpu_to_be16(SLI_CTNS_GID_FT
);
1980 CtReq
->un
.gid
.Fc4Type
= context
;
1982 if (vport
->port_state
< LPFC_NS_QRY
)
1983 vport
->port_state
= LPFC_NS_QRY
;
1984 lpfc_set_disctmo(vport
);
1985 cmpl
= lpfc_cmpl_ct_cmd_gid_ft
;
1986 rsp_size
= FC_MAX_NS_RSP
;
1989 case SLI_CTNS_GID_PT
:
1990 CtReq
->CommandResponse
.bits
.CmdRsp
=
1991 cpu_to_be16(SLI_CTNS_GID_PT
);
1992 CtReq
->un
.gid
.PortType
= context
;
1994 if (vport
->port_state
< LPFC_NS_QRY
)
1995 vport
->port_state
= LPFC_NS_QRY
;
1996 lpfc_set_disctmo(vport
);
1997 cmpl
= lpfc_cmpl_ct_cmd_gid_pt
;
1998 rsp_size
= FC_MAX_NS_RSP
;
2001 case SLI_CTNS_GFF_ID
:
2002 CtReq
->CommandResponse
.bits
.CmdRsp
=
2003 cpu_to_be16(SLI_CTNS_GFF_ID
);
2004 CtReq
->un
.gff
.PortId
= cpu_to_be32(context
);
2005 cmpl
= lpfc_cmpl_ct_cmd_gff_id
;
2008 case SLI_CTNS_GFT_ID
:
2009 CtReq
->CommandResponse
.bits
.CmdRsp
=
2010 cpu_to_be16(SLI_CTNS_GFT_ID
);
2011 CtReq
->un
.gft
.PortId
= cpu_to_be32(context
);
2012 cmpl
= lpfc_cmpl_ct_cmd_gft_id
;
2015 case SLI_CTNS_RFT_ID
:
2016 vport
->ct_flags
&= ~FC_CT_RFT_ID
;
2017 CtReq
->CommandResponse
.bits
.CmdRsp
=
2018 cpu_to_be16(SLI_CTNS_RFT_ID
);
2019 CtReq
->un
.rft
.port_id
= cpu_to_be32(vport
->fc_myDID
);
2021 /* Register Application Services type if vmid enabled. */
2022 if (phba
->cfg_vmid_app_header
)
2023 CtReq
->un
.rft
.app_serv_reg
=
2024 cpu_to_be32(RFT_APP_SERV_REG
);
2026 /* Register FC4 FCP type if enabled. */
2027 if (vport
->cfg_enable_fc4_type
== LPFC_ENABLE_BOTH
||
2028 vport
->cfg_enable_fc4_type
== LPFC_ENABLE_FCP
)
2029 CtReq
->un
.rft
.fcp_reg
= cpu_to_be32(RFT_FCP_REG
);
2031 /* Register NVME type if enabled. */
2032 if (vport
->cfg_enable_fc4_type
== LPFC_ENABLE_BOTH
||
2033 vport
->cfg_enable_fc4_type
== LPFC_ENABLE_NVME
)
2034 CtReq
->un
.rft
.nvme_reg
= cpu_to_be32(RFT_NVME_REG
);
2036 ptr
= (uint32_t *)CtReq
;
2037 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_DISCOVERY
,
2038 "6433 Issue RFT (%s %s %s): %08x %08x %08x "
2039 "%08x %08x %08x %08x %08x\n",
2040 CtReq
->un
.rft
.fcp_reg
? "FCP" : " ",
2041 CtReq
->un
.rft
.nvme_reg
? "NVME" : " ",
2042 CtReq
->un
.rft
.app_serv_reg
? "APPS" : " ",
2043 *ptr
, *(ptr
+ 1), *(ptr
+ 2), *(ptr
+ 3),
2044 *(ptr
+ 4), *(ptr
+ 5),
2045 *(ptr
+ 6), *(ptr
+ 7));
2046 cmpl
= lpfc_cmpl_ct_cmd_rft_id
;
2049 case SLI_CTNS_RNN_ID
:
2050 vport
->ct_flags
&= ~FC_CT_RNN_ID
;
2051 CtReq
->CommandResponse
.bits
.CmdRsp
=
2052 cpu_to_be16(SLI_CTNS_RNN_ID
);
2053 CtReq
->un
.rnn
.PortId
= cpu_to_be32(vport
->fc_myDID
);
2054 memcpy(CtReq
->un
.rnn
.wwnn
, &vport
->fc_nodename
,
2055 sizeof(struct lpfc_name
));
2056 cmpl
= lpfc_cmpl_ct_cmd_rnn_id
;
2059 case SLI_CTNS_RSPN_ID
:
2060 vport
->ct_flags
&= ~FC_CT_RSPN_ID
;
2061 CtReq
->CommandResponse
.bits
.CmdRsp
=
2062 cpu_to_be16(SLI_CTNS_RSPN_ID
);
2063 CtReq
->un
.rspn
.PortId
= cpu_to_be32(vport
->fc_myDID
);
2064 size
= sizeof(CtReq
->un
.rspn
.symbname
);
2065 CtReq
->un
.rspn
.len
=
2066 lpfc_vport_symbolic_port_name(vport
,
2067 CtReq
->un
.rspn
.symbname
, size
);
2068 cmpl
= lpfc_cmpl_ct_cmd_rspn_id
;
2070 case SLI_CTNS_RSNN_NN
:
2071 vport
->ct_flags
&= ~FC_CT_RSNN_NN
;
2072 CtReq
->CommandResponse
.bits
.CmdRsp
=
2073 cpu_to_be16(SLI_CTNS_RSNN_NN
);
2074 memcpy(CtReq
->un
.rsnn
.wwnn
, &vport
->fc_nodename
,
2075 sizeof(struct lpfc_name
));
2076 size
= sizeof(CtReq
->un
.rsnn
.symbname
);
2077 CtReq
->un
.rsnn
.len
=
2078 lpfc_vport_symbolic_node_name(vport
,
2079 CtReq
->un
.rsnn
.symbname
, size
);
2080 cmpl
= lpfc_cmpl_ct_cmd_rsnn_nn
;
2082 case SLI_CTNS_DA_ID
:
2083 /* Implement DA_ID Nameserver request */
2084 CtReq
->CommandResponse
.bits
.CmdRsp
=
2085 cpu_to_be16(SLI_CTNS_DA_ID
);
2086 CtReq
->un
.da_id
.port_id
= cpu_to_be32(vport
->fc_myDID
);
2087 cmpl
= lpfc_cmpl_ct_cmd_da_id
;
2089 case SLI_CTNS_RFF_ID
:
2090 vport
->ct_flags
&= ~FC_CT_RFF_ID
;
2091 CtReq
->CommandResponse
.bits
.CmdRsp
=
2092 cpu_to_be16(SLI_CTNS_RFF_ID
);
2093 CtReq
->un
.rff
.PortId
= cpu_to_be32(vport
->fc_myDID
);
2094 CtReq
->un
.rff
.fbits
= FC4_FEATURE_INIT
;
2096 /* The driver always supports FC_TYPE_FCP. However, the
2097 * caller can specify NVME (type x28) as well. But only
2098 * these that FC4 type is supported.
2100 if (((vport
->cfg_enable_fc4_type
== LPFC_ENABLE_BOTH
) ||
2101 (vport
->cfg_enable_fc4_type
== LPFC_ENABLE_NVME
)) &&
2102 (context
== FC_TYPE_NVME
)) {
2103 if ((vport
== phba
->pport
) && phba
->nvmet_support
) {
2104 CtReq
->un
.rff
.fbits
= (FC4_FEATURE_TARGET
|
2105 FC4_FEATURE_NVME_DISC
);
2106 lpfc_nvmet_update_targetport(phba
);
2108 lpfc_nvme_update_localport(vport
);
2110 CtReq
->un
.rff
.type_code
= context
;
2112 } else if (((vport
->cfg_enable_fc4_type
== LPFC_ENABLE_BOTH
) ||
2113 (vport
->cfg_enable_fc4_type
== LPFC_ENABLE_FCP
)) &&
2114 (context
== FC_TYPE_FCP
))
2115 CtReq
->un
.rff
.type_code
= context
;
2118 goto ns_cmd_free_bmpvirt
;
2120 ptr
= (uint32_t *)CtReq
;
2121 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_DISCOVERY
,
2122 "6434 Issue RFF (%s): %08x %08x %08x %08x "
2123 "%08x %08x %08x %08x\n",
2124 (context
== FC_TYPE_NVME
) ? "NVME" : "FCP",
2125 *ptr
, *(ptr
+ 1), *(ptr
+ 2), *(ptr
+ 3),
2126 *(ptr
+ 4), *(ptr
+ 5),
2127 *(ptr
+ 6), *(ptr
+ 7));
2128 cmpl
= lpfc_cmpl_ct_cmd_rff_id
;
2131 /* The lpfc_ct_cmd/lpfc_get_req shall increment ndlp reference count
2132 * to hold ndlp reference for the corresponding callback function.
2134 if (!lpfc_ct_cmd(vport
, mp
, bmp
, ndlp
, cmpl
, rsp_size
, retry
)) {
2135 /* On success, The cmpl function will free the buffers */
2136 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_CT
,
2137 "Issue CT cmd: cmd:x%x did:x%x",
2138 cmdcode
, ndlp
->nlp_DID
, 0);
2143 ns_cmd_free_bmpvirt
:
2144 lpfc_mbuf_free(phba
, bmp
->virt
, bmp
->phys
);
2148 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
2152 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_TRACE_EVENT
,
2153 "0266 Issue NameServer Req x%x err %d Data: x%lx "
2155 cmdcode
, rc
, vport
->fc_flag
, vport
->fc_rscn_id_cnt
);
2160 * lpfc_fdmi_rprt_defer - Check for any deferred FDMI RPRT commands
2161 * @phba: Pointer to HBA context object.
2162 * @mask: Initial port attributes mask
2164 * This function checks to see if any vports have deferred their FDMI RPRT.
2165 * A vports RPRT may be deferred if it is issued before the primary ports
2169 lpfc_fdmi_rprt_defer(struct lpfc_hba
*phba
, uint32_t mask
)
2171 struct lpfc_vport
**vports
;
2172 struct lpfc_vport
*vport
;
2173 struct lpfc_nodelist
*ndlp
;
2176 set_bit(HBA_RHBA_CMPL
, &phba
->hba_flag
);
2177 vports
= lpfc_create_vport_work_array(phba
);
2179 for (i
= 0; i
<= phba
->max_vports
&& vports
[i
] != NULL
; i
++) {
2181 ndlp
= lpfc_findnode_did(phba
->pport
, FDMI_DID
);
2184 if (vport
->ct_flags
& FC_CT_RPRT_DEFER
) {
2185 vport
->ct_flags
&= ~FC_CT_RPRT_DEFER
;
2186 vport
->fdmi_port_mask
= mask
;
2187 lpfc_fdmi_cmd(vport
, ndlp
, SLI_MGMT_RPRT
, 0);
2191 lpfc_destroy_vport_work_array(phba
, vports
);
2195 * lpfc_cmpl_ct_disc_fdmi - Handle a discovery FDMI completion
2196 * @phba: Pointer to HBA context object.
2197 * @cmdiocb: Pointer to the command IOCBQ.
2198 * @rspiocb: Pointer to the response IOCBQ.
2200 * This function to handle the completion of a driver initiated FDMI
2201 * CT command issued during discovery.
2204 lpfc_cmpl_ct_disc_fdmi(struct lpfc_hba
*phba
, struct lpfc_iocbq
*cmdiocb
,
2205 struct lpfc_iocbq
*rspiocb
)
2207 struct lpfc_vport
*vport
= cmdiocb
->vport
;
2208 struct lpfc_dmabuf
*inp
= cmdiocb
->cmd_dmabuf
;
2209 struct lpfc_dmabuf
*outp
= cmdiocb
->rsp_dmabuf
;
2210 struct lpfc_sli_ct_request
*CTcmd
= inp
->virt
;
2211 struct lpfc_sli_ct_request
*CTrsp
= outp
->virt
;
2212 __be16 fdmi_cmd
= CTcmd
->CommandResponse
.bits
.CmdRsp
;
2213 __be16 fdmi_rsp
= CTrsp
->CommandResponse
.bits
.CmdRsp
;
2214 struct lpfc_nodelist
*ndlp
, *free_ndlp
= NULL
;
2215 uint32_t latt
, cmd
, err
;
2216 u32 ulp_status
= get_job_ulpstatus(phba
, rspiocb
);
2217 u32 ulp_word4
= get_job_word4(phba
, rspiocb
);
2219 latt
= lpfc_els_chk_latt(vport
);
2220 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_CT
,
2221 "FDMI cmpl: status:x%x/x%x latt:%d",
2222 ulp_status
, ulp_word4
, latt
);
2224 if (latt
|| ulp_status
) {
2225 lpfc_printf_vlog(vport
, KERN_WARNING
, LOG_DISCOVERY
,
2226 "0229 FDMI cmd %04x failed, latt = %d "
2227 "ulp_status: (x%x/x%x), sli_flag x%x\n",
2228 be16_to_cpu(fdmi_cmd
), latt
, ulp_status
,
2229 ulp_word4
, phba
->sli
.sli_flag
);
2231 /* Look for a retryable error */
2232 if (ulp_status
== IOSTAT_LOCAL_REJECT
) {
2233 switch ((ulp_word4
& IOERR_PARAM_MASK
)) {
2234 case IOERR_SLI_ABORTED
:
2235 case IOERR_SLI_DOWN
:
2236 /* Driver aborted this IO. No retry as error
2237 * is likely Offline->Online or some adapter
2238 * error. Recovery will try again, but if port
2239 * is not active there's no point to continue
2240 * issuing follow up FDMI commands.
2242 if (!(phba
->sli
.sli_flag
& LPFC_SLI_ACTIVE
)) {
2243 free_ndlp
= cmdiocb
->ndlp
;
2244 lpfc_ct_free_iocb(phba
, cmdiocb
);
2245 lpfc_nlp_put(free_ndlp
);
2249 case IOERR_ABORT_IN_PROGRESS
:
2250 case IOERR_SEQUENCE_TIMEOUT
:
2251 case IOERR_ILLEGAL_FRAME
:
2252 case IOERR_NO_RESOURCES
:
2253 case IOERR_ILLEGAL_COMMAND
:
2255 if (cmdiocb
->retry
>= LPFC_FDMI_MAX_RETRY
)
2258 /* Retry the same FDMI command */
2259 err
= lpfc_sli_issue_iocb(phba
, LPFC_ELS_RING
,
2261 if (err
== IOCB_ERROR
)
2270 free_ndlp
= cmdiocb
->ndlp
;
2271 lpfc_ct_free_iocb(phba
, cmdiocb
);
2272 lpfc_nlp_put(free_ndlp
);
2274 ndlp
= lpfc_findnode_did(vport
, FDMI_DID
);
2278 /* Check for a CT LS_RJT response */
2279 cmd
= be16_to_cpu(fdmi_cmd
);
2280 if (be16_to_cpu(fdmi_rsp
) == SLI_CT_RESPONSE_FS_RJT
) {
2281 /* Log FDMI reject */
2282 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_DISCOVERY
| LOG_ELS
,
2283 "0220 FDMI cmd FS_RJT Data: x%x", cmd
);
2285 /* Should we fallback to FDMI-2 / FDMI-1 ? */
2288 if (vport
->fdmi_hba_mask
== LPFC_FDMI2_HBA_ATTR
) {
2289 /* Fallback to FDMI-1 for HBA attributes */
2290 vport
->fdmi_hba_mask
= LPFC_FDMI1_HBA_ATTR
;
2292 /* If HBA attributes are FDMI1, so should
2293 * port attributes be for consistency.
2295 vport
->fdmi_port_mask
= LPFC_FDMI1_PORT_ATTR
;
2297 lpfc_fdmi_cmd(vport
, ndlp
, SLI_MGMT_DHBA
, 0);
2302 if (vport
->port_type
!= LPFC_PHYSICAL_PORT
) {
2303 ndlp
= lpfc_findnode_did(phba
->pport
, FDMI_DID
);
2307 if (vport
->fdmi_port_mask
== LPFC_FDMI2_PORT_ATTR
) {
2308 /* Fallback to FDMI-1 */
2309 vport
->fdmi_port_mask
= LPFC_FDMI1_PORT_ATTR
;
2311 lpfc_fdmi_cmd(vport
, ndlp
, cmd
, 0);
2314 if (vport
->fdmi_port_mask
== LPFC_FDMI2_SMART_ATTR
) {
2315 vport
->fdmi_port_mask
= LPFC_FDMI2_PORT_ATTR
;
2316 /* Retry the same command */
2317 lpfc_fdmi_cmd(vport
, ndlp
, cmd
, 0);
2322 /* No retry on Vendor, RPA only done on physical port */
2323 if (phba
->link_flag
& LS_CT_VEN_RPA
) {
2324 phba
->link_flag
&= ~LS_CT_VEN_RPA
;
2325 if (phba
->cmf_active_mode
== LPFC_CFG_OFF
)
2327 lpfc_printf_log(phba
, KERN_WARNING
,
2328 LOG_DISCOVERY
| LOG_ELS
,
2329 "6460 VEN FDMI RPA RJT\n");
2332 if (vport
->fdmi_port_mask
== LPFC_FDMI2_PORT_ATTR
) {
2333 /* Fallback to FDMI-1 */
2334 vport
->fdmi_hba_mask
= LPFC_FDMI1_HBA_ATTR
;
2335 vport
->fdmi_port_mask
= LPFC_FDMI1_PORT_ATTR
;
2337 lpfc_fdmi_cmd(vport
, ndlp
, SLI_MGMT_DHBA
, 0);
2340 if (vport
->fdmi_port_mask
== LPFC_FDMI2_SMART_ATTR
) {
2341 vport
->fdmi_port_mask
= LPFC_FDMI2_PORT_ATTR
;
2342 /* Retry the same command */
2343 lpfc_fdmi_cmd(vport
, ndlp
, cmd
, 0);
2350 * On success, need to cycle thru FDMI registration for discovery
2351 * DHBA -> DPRT -> RHBA -> RPA (physical port)
2352 * DPRT -> RPRT (vports)
2356 /* Check for any RPRTs deferred till after RHBA completes */
2357 lpfc_fdmi_rprt_defer(phba
, vport
->fdmi_port_mask
);
2359 lpfc_fdmi_cmd(vport
, ndlp
, SLI_MGMT_RPA
, 0);
2363 lpfc_fdmi_cmd(vport
, ndlp
, SLI_MGMT_DPRT
, 0);
2367 if (vport
->port_type
== LPFC_PHYSICAL_PORT
) {
2368 lpfc_fdmi_cmd(vport
, ndlp
, SLI_MGMT_RHBA
, 0);
2370 ndlp
= lpfc_findnode_did(phba
->pport
, FDMI_DID
);
2374 /* Only issue a RPRT for the vport if the RHBA
2375 * for the physical port completes successfully.
2376 * We may have to defer the RPRT accordingly.
2378 if (test_bit(HBA_RHBA_CMPL
, &phba
->hba_flag
)) {
2379 lpfc_fdmi_cmd(vport
, ndlp
, SLI_MGMT_RPRT
, 0);
2381 lpfc_printf_vlog(vport
, KERN_INFO
,
2383 "6078 RPRT deferred\n");
2384 vport
->ct_flags
|= FC_CT_RPRT_DEFER
;
2389 if (vport
->port_type
== LPFC_PHYSICAL_PORT
&&
2390 phba
->sli4_hba
.pc_sli4_params
.mi_ver
) {
2391 /* mi is only for the phyical port, no vports */
2392 if (phba
->link_flag
& LS_CT_VEN_RPA
) {
2393 lpfc_printf_vlog(vport
, KERN_INFO
,
2394 LOG_DISCOVERY
| LOG_ELS
|
2396 "6449 VEN RPA FDMI Success\n");
2397 phba
->link_flag
&= ~LS_CT_VEN_RPA
;
2401 lpfc_printf_log(phba
, KERN_INFO
,
2402 LOG_DISCOVERY
| LOG_CGN_MGMT
,
2403 "6210 Issue Vendor MI FDMI %x\n",
2404 phba
->sli4_hba
.pc_sli4_params
.mi_ver
);
2406 /* CGN is only for the physical port, no vports */
2407 if (lpfc_fdmi_cmd(vport
, ndlp
, cmd
,
2408 LPFC_FDMI_VENDOR_ATTR_mi
) == 0)
2409 phba
->link_flag
|= LS_CT_VEN_RPA
;
2410 lpfc_printf_log(phba
, KERN_INFO
,
2411 LOG_DISCOVERY
| LOG_ELS
,
2412 "6458 Send MI FDMI:%x Flag x%x\n",
2413 phba
->sli4_hba
.pc_sli4_params
.mi_ver
,
2416 lpfc_printf_log(phba
, KERN_INFO
,
2417 LOG_DISCOVERY
| LOG_ELS
,
2418 "6459 No FDMI VEN MI support - "
2428 * lpfc_fdmi_change_check - Check for changed FDMI parameters
2429 * @vport: pointer to a host virtual N_Port data structure.
2431 * Check how many mapped NPorts we are connected to
2432 * Check if our hostname changed
2433 * Called from hbeat timeout routine to check if any FDMI parameters
2434 * changed. If so, re-register those Attributes.
2437 lpfc_fdmi_change_check(struct lpfc_vport
*vport
)
2439 struct lpfc_hba
*phba
= vport
->phba
;
2440 struct lpfc_nodelist
*ndlp
;
2443 if (!lpfc_is_link_up(phba
))
2446 /* Must be connected to a Fabric */
2447 if (!test_bit(FC_FABRIC
, &vport
->fc_flag
))
2450 ndlp
= lpfc_findnode_did(vport
, FDMI_DID
);
2454 /* Check if system hostname changed */
2455 if (strcmp(phba
->os_host_name
, init_utsname()->nodename
)) {
2456 memset(phba
->os_host_name
, 0, sizeof(phba
->os_host_name
));
2457 scnprintf(phba
->os_host_name
, sizeof(phba
->os_host_name
), "%s",
2458 init_utsname()->nodename
);
2459 lpfc_ns_cmd(vport
, SLI_CTNS_RSNN_NN
, 0, 0);
2461 /* Since this effects multiple HBA and PORT attributes, we need
2462 * de-register and go thru the whole FDMI registration cycle.
2463 * DHBA -> DPRT -> RHBA -> RPA (physical port)
2464 * DPRT -> RPRT (vports)
2466 if (vport
->port_type
== LPFC_PHYSICAL_PORT
) {
2467 /* For extra Vendor RPA */
2468 phba
->link_flag
&= ~LS_CT_VEN_RPA
;
2469 lpfc_fdmi_cmd(vport
, ndlp
, SLI_MGMT_DHBA
, 0);
2471 ndlp
= lpfc_findnode_did(phba
->pport
, FDMI_DID
);
2474 lpfc_fdmi_cmd(vport
, ndlp
, SLI_MGMT_DPRT
, 0);
2477 /* Since this code path registers all the port attributes
2478 * we can just return without further checking.
2483 if (!(vport
->fdmi_port_mask
& LPFC_FDMI_PORT_ATTR_num_disc
))
2486 /* Check if the number of mapped NPorts changed */
2487 cnt
= lpfc_find_map_node(vport
);
2488 if (cnt
== vport
->fdmi_num_disc
)
2491 if (vport
->port_type
== LPFC_PHYSICAL_PORT
) {
2492 lpfc_fdmi_cmd(vport
, ndlp
, SLI_MGMT_RPA
,
2493 LPFC_FDMI_PORT_ATTR_num_disc
);
2495 ndlp
= lpfc_findnode_did(phba
->pport
, FDMI_DID
);
2498 lpfc_fdmi_cmd(vport
, ndlp
, SLI_MGMT_RPRT
,
2499 LPFC_FDMI_PORT_ATTR_num_disc
);
2504 lpfc_fdmi_set_attr_u32(void *attr
, uint16_t attrtype
, uint32_t attrval
)
2506 struct lpfc_fdmi_attr_u32
*ae
= attr
;
2507 int size
= sizeof(*ae
);
2509 ae
->type
= cpu_to_be16(attrtype
);
2510 ae
->len
= cpu_to_be16(size
);
2511 ae
->value_u32
= cpu_to_be32(attrval
);
2517 lpfc_fdmi_set_attr_wwn(void *attr
, uint16_t attrtype
, struct lpfc_name
*wwn
)
2519 struct lpfc_fdmi_attr_wwn
*ae
= attr
;
2520 int size
= sizeof(*ae
);
2522 ae
->type
= cpu_to_be16(attrtype
);
2523 ae
->len
= cpu_to_be16(size
);
2524 /* WWN's assumed to be bytestreams - Big Endian presentation */
2525 memcpy(ae
->name
, wwn
,
2526 min_t(size_t, sizeof(struct lpfc_name
), sizeof(__be64
)));
2532 lpfc_fdmi_set_attr_fullwwn(void *attr
, uint16_t attrtype
,
2533 struct lpfc_name
*wwnn
, struct lpfc_name
*wwpn
)
2535 struct lpfc_fdmi_attr_fullwwn
*ae
= attr
;
2536 u8
*nname
= ae
->nname
;
2537 u8
*pname
= ae
->pname
;
2538 int size
= sizeof(*ae
);
2540 ae
->type
= cpu_to_be16(attrtype
);
2541 ae
->len
= cpu_to_be16(size
);
2542 /* WWN's assumed to be bytestreams - Big Endian presentation */
2544 min_t(size_t, sizeof(struct lpfc_name
), sizeof(__be64
)));
2546 min_t(size_t, sizeof(struct lpfc_name
), sizeof(__be64
)));
2552 lpfc_fdmi_set_attr_string(void *attr
, uint16_t attrtype
, char *attrstring
)
2554 struct lpfc_fdmi_attr_string
*ae
= attr
;
2558 * We are trusting the caller that if a fdmi string field
2559 * is capped at 64 bytes, the caller passes in a string of
2563 strscpy(ae
->value_string
, attrstring
, sizeof(ae
->value_string
));
2564 len
= strnlen(ae
->value_string
, sizeof(ae
->value_string
));
2565 /* round string length to a 32bit boundary */
2566 len
+= (len
& 3) ? (4 - (len
& 3)) : 4;
2567 /* size is Type/Len (4 bytes) plus string length */
2568 size
= FOURBYTES
+ len
;
2570 ae
->type
= cpu_to_be16(attrtype
);
2571 ae
->len
= cpu_to_be16(size
);
2576 /* Bitfields for FC4 Types that can be reported */
2577 #define ATTR_FC4_CT 0x00000001
2578 #define ATTR_FC4_FCP 0x00000002
2579 #define ATTR_FC4_NVME 0x00000004
2582 lpfc_fdmi_set_attr_fc4types(void *attr
, uint16_t attrtype
, uint32_t typemask
)
2584 struct lpfc_fdmi_attr_fc4types
*ae
= attr
;
2585 int size
= sizeof(*ae
);
2587 ae
->type
= cpu_to_be16(attrtype
);
2588 ae
->len
= cpu_to_be16(size
);
2590 if (typemask
& ATTR_FC4_FCP
)
2591 ae
->value_types
[2] = 0x01; /* Type 0x8 - FCP */
2593 if (typemask
& ATTR_FC4_CT
)
2594 ae
->value_types
[7] = 0x01; /* Type 0x20 - CT */
2596 if (typemask
& ATTR_FC4_NVME
)
2597 ae
->value_types
[6] = 0x01; /* Type 0x28 - NVME */
2602 /* Routines for all individual HBA attributes */
2604 lpfc_fdmi_hba_attr_wwnn(struct lpfc_vport
*vport
, void *attr
)
2606 return lpfc_fdmi_set_attr_wwn(attr
, RHBA_NODENAME
,
2607 &vport
->fc_sparam
.nodeName
);
2611 lpfc_fdmi_hba_attr_manufacturer(struct lpfc_vport
*vport
, void *attr
)
2613 /* This string MUST be consistent with other FC platforms
2614 * supported by Broadcom.
2616 return lpfc_fdmi_set_attr_string(attr
, RHBA_MANUFACTURER
,
2617 "Emulex Corporation");
2621 lpfc_fdmi_hba_attr_sn(struct lpfc_vport
*vport
, void *attr
)
2623 struct lpfc_hba
*phba
= vport
->phba
;
2625 return lpfc_fdmi_set_attr_string(attr
, RHBA_SERIAL_NUMBER
,
2626 phba
->SerialNumber
);
2630 lpfc_fdmi_hba_attr_model(struct lpfc_vport
*vport
, void *attr
)
2632 struct lpfc_hba
*phba
= vport
->phba
;
2634 return lpfc_fdmi_set_attr_string(attr
, RHBA_MODEL
,
2639 lpfc_fdmi_hba_attr_description(struct lpfc_vport
*vport
, void *attr
)
2641 struct lpfc_hba
*phba
= vport
->phba
;
2643 return lpfc_fdmi_set_attr_string(attr
, RHBA_MODEL_DESCRIPTION
,
2648 lpfc_fdmi_hba_attr_hdw_ver(struct lpfc_vport
*vport
, void *attr
)
2650 struct lpfc_hba
*phba
= vport
->phba
;
2651 lpfc_vpd_t
*vp
= &phba
->vpd
;
2652 char buf
[16] = { 0 };
2654 snprintf(buf
, sizeof(buf
), "%08x", vp
->rev
.biuRev
);
2656 return lpfc_fdmi_set_attr_string(attr
, RHBA_HARDWARE_VERSION
, buf
);
2660 lpfc_fdmi_hba_attr_drvr_ver(struct lpfc_vport
*vport
, void *attr
)
2662 return lpfc_fdmi_set_attr_string(attr
, RHBA_DRIVER_VERSION
,
2663 lpfc_release_version
);
2667 lpfc_fdmi_hba_attr_rom_ver(struct lpfc_vport
*vport
, void *attr
)
2669 struct lpfc_hba
*phba
= vport
->phba
;
2670 char buf
[64] = { 0 };
2672 if (phba
->sli_rev
== LPFC_SLI_REV4
) {
2673 lpfc_decode_firmware_rev(phba
, buf
, 1);
2675 return lpfc_fdmi_set_attr_string(attr
, RHBA_OPTION_ROM_VERSION
,
2679 return lpfc_fdmi_set_attr_string(attr
, RHBA_OPTION_ROM_VERSION
,
2680 phba
->OptionROMVersion
);
2684 lpfc_fdmi_hba_attr_fmw_ver(struct lpfc_vport
*vport
, void *attr
)
2686 struct lpfc_hba
*phba
= vport
->phba
;
2687 char buf
[64] = { 0 };
2689 lpfc_decode_firmware_rev(phba
, buf
, 1);
2691 return lpfc_fdmi_set_attr_string(attr
, RHBA_FIRMWARE_VERSION
, buf
);
2695 lpfc_fdmi_hba_attr_os_ver(struct lpfc_vport
*vport
, void *attr
)
2697 char buf
[256] = { 0 };
2699 snprintf(buf
, sizeof(buf
), "%s %s %s",
2700 init_utsname()->sysname
,
2701 init_utsname()->release
,
2702 init_utsname()->version
);
2704 return lpfc_fdmi_set_attr_string(attr
, RHBA_OS_NAME_VERSION
, buf
);
2708 lpfc_fdmi_hba_attr_ct_len(struct lpfc_vport
*vport
, void *attr
)
2710 return lpfc_fdmi_set_attr_u32(attr
, RHBA_MAX_CT_PAYLOAD_LEN
,
2715 lpfc_fdmi_hba_attr_symbolic_name(struct lpfc_vport
*vport
, void *attr
)
2717 char buf
[256] = { 0 };
2719 lpfc_vport_symbolic_node_name(vport
, buf
, sizeof(buf
));
2721 return lpfc_fdmi_set_attr_string(attr
, RHBA_SYM_NODENAME
, buf
);
2725 lpfc_fdmi_hba_attr_vendor_info(struct lpfc_vport
*vport
, void *attr
)
2727 return lpfc_fdmi_set_attr_u32(attr
, RHBA_VENDOR_INFO
, 0);
2731 lpfc_fdmi_hba_attr_num_ports(struct lpfc_vport
*vport
, void *attr
)
2733 /* Each driver instance corresponds to a single port */
2734 return lpfc_fdmi_set_attr_u32(attr
, RHBA_NUM_PORTS
, 1);
2738 lpfc_fdmi_hba_attr_fabric_wwnn(struct lpfc_vport
*vport
, void *attr
)
2740 return lpfc_fdmi_set_attr_wwn(attr
, RHBA_FABRIC_WWNN
,
2741 &vport
->fabric_nodename
);
2745 lpfc_fdmi_hba_attr_bios_ver(struct lpfc_vport
*vport
, void *attr
)
2747 struct lpfc_hba
*phba
= vport
->phba
;
2749 return lpfc_fdmi_set_attr_string(attr
, RHBA_BIOS_VERSION
,
2754 lpfc_fdmi_hba_attr_bios_state(struct lpfc_vport
*vport
, void *attr
)
2756 /* Driver doesn't have access to this information */
2757 return lpfc_fdmi_set_attr_u32(attr
, RHBA_BIOS_STATE
, 0);
2761 lpfc_fdmi_hba_attr_vendor_id(struct lpfc_vport
*vport
, void *attr
)
2763 return lpfc_fdmi_set_attr_string(attr
, RHBA_VENDOR_ID
, "EMULEX");
2767 * Routines for all individual PORT attributes
2771 lpfc_fdmi_port_attr_fc4type(struct lpfc_vport
*vport
, void *attr
)
2773 struct lpfc_hba
*phba
= vport
->phba
;
2776 fc4types
= (ATTR_FC4_CT
| ATTR_FC4_FCP
);
2778 /* Check to see if Firmware supports NVME and on physical port */
2779 if ((phba
->sli_rev
== LPFC_SLI_REV4
) && (vport
== phba
->pport
) &&
2780 phba
->sli4_hba
.pc_sli4_params
.nvme
)
2781 fc4types
|= ATTR_FC4_NVME
;
2783 return lpfc_fdmi_set_attr_fc4types(attr
, RPRT_SUPPORTED_FC4_TYPES
,
2788 lpfc_fdmi_port_attr_support_speed(struct lpfc_vport
*vport
, void *attr
)
2790 struct lpfc_hba
*phba
= vport
->phba
;
2795 if (!test_bit(HBA_FCOE_MODE
, &phba
->hba_flag
)) {
2797 if (phba
->sli_rev
== LPFC_SLI_REV4
) {
2798 tcfg
= phba
->sli4_hba
.conf_trunk
;
2799 for (i
= 0; i
< 4; i
++, tcfg
>>= 1)
2804 if (cnt
> 2) { /* 4 lane trunk group */
2805 if (phba
->lmt
& LMT_64Gb
)
2806 speeds
|= HBA_PORTSPEED_256GFC
;
2807 if (phba
->lmt
& LMT_32Gb
)
2808 speeds
|= HBA_PORTSPEED_128GFC
;
2809 if (phba
->lmt
& LMT_16Gb
)
2810 speeds
|= HBA_PORTSPEED_64GFC
;
2811 } else if (cnt
) { /* 2 lane trunk group */
2812 if (phba
->lmt
& LMT_128Gb
)
2813 speeds
|= HBA_PORTSPEED_256GFC
;
2814 if (phba
->lmt
& LMT_64Gb
)
2815 speeds
|= HBA_PORTSPEED_128GFC
;
2816 if (phba
->lmt
& LMT_32Gb
)
2817 speeds
|= HBA_PORTSPEED_64GFC
;
2818 if (phba
->lmt
& LMT_16Gb
)
2819 speeds
|= HBA_PORTSPEED_32GFC
;
2821 if (phba
->lmt
& LMT_256Gb
)
2822 speeds
|= HBA_PORTSPEED_256GFC
;
2823 if (phba
->lmt
& LMT_128Gb
)
2824 speeds
|= HBA_PORTSPEED_128GFC
;
2825 if (phba
->lmt
& LMT_64Gb
)
2826 speeds
|= HBA_PORTSPEED_64GFC
;
2827 if (phba
->lmt
& LMT_32Gb
)
2828 speeds
|= HBA_PORTSPEED_32GFC
;
2829 if (phba
->lmt
& LMT_16Gb
)
2830 speeds
|= HBA_PORTSPEED_16GFC
;
2831 if (phba
->lmt
& LMT_10Gb
)
2832 speeds
|= HBA_PORTSPEED_10GFC
;
2833 if (phba
->lmt
& LMT_8Gb
)
2834 speeds
|= HBA_PORTSPEED_8GFC
;
2835 if (phba
->lmt
& LMT_4Gb
)
2836 speeds
|= HBA_PORTSPEED_4GFC
;
2837 if (phba
->lmt
& LMT_2Gb
)
2838 speeds
|= HBA_PORTSPEED_2GFC
;
2839 if (phba
->lmt
& LMT_1Gb
)
2840 speeds
|= HBA_PORTSPEED_1GFC
;
2843 /* FCoE links support only one speed */
2844 switch (phba
->fc_linkspeed
) {
2845 case LPFC_ASYNC_LINK_SPEED_10GBPS
:
2846 speeds
= HBA_PORTSPEED_10GE
;
2848 case LPFC_ASYNC_LINK_SPEED_25GBPS
:
2849 speeds
= HBA_PORTSPEED_25GE
;
2851 case LPFC_ASYNC_LINK_SPEED_40GBPS
:
2852 speeds
= HBA_PORTSPEED_40GE
;
2854 case LPFC_ASYNC_LINK_SPEED_100GBPS
:
2855 speeds
= HBA_PORTSPEED_100GE
;
2860 return lpfc_fdmi_set_attr_u32(attr
, RPRT_SUPPORTED_SPEED
, speeds
);
2864 lpfc_fdmi_port_attr_speed(struct lpfc_vport
*vport
, void *attr
)
2866 struct lpfc_hba
*phba
= vport
->phba
;
2869 if (!test_bit(HBA_FCOE_MODE
, &phba
->hba_flag
)) {
2870 switch (phba
->fc_linkspeed
) {
2871 case LPFC_LINK_SPEED_1GHZ
:
2872 speeds
= HBA_PORTSPEED_1GFC
;
2874 case LPFC_LINK_SPEED_2GHZ
:
2875 speeds
= HBA_PORTSPEED_2GFC
;
2877 case LPFC_LINK_SPEED_4GHZ
:
2878 speeds
= HBA_PORTSPEED_4GFC
;
2880 case LPFC_LINK_SPEED_8GHZ
:
2881 speeds
= HBA_PORTSPEED_8GFC
;
2883 case LPFC_LINK_SPEED_10GHZ
:
2884 speeds
= HBA_PORTSPEED_10GFC
;
2886 case LPFC_LINK_SPEED_16GHZ
:
2887 speeds
= HBA_PORTSPEED_16GFC
;
2889 case LPFC_LINK_SPEED_32GHZ
:
2890 speeds
= HBA_PORTSPEED_32GFC
;
2892 case LPFC_LINK_SPEED_64GHZ
:
2893 speeds
= HBA_PORTSPEED_64GFC
;
2895 case LPFC_LINK_SPEED_128GHZ
:
2896 speeds
= HBA_PORTSPEED_128GFC
;
2898 case LPFC_LINK_SPEED_256GHZ
:
2899 speeds
= HBA_PORTSPEED_256GFC
;
2902 speeds
= HBA_PORTSPEED_UNKNOWN
;
2906 switch (phba
->fc_linkspeed
) {
2907 case LPFC_ASYNC_LINK_SPEED_10GBPS
:
2908 speeds
= HBA_PORTSPEED_10GE
;
2910 case LPFC_ASYNC_LINK_SPEED_25GBPS
:
2911 speeds
= HBA_PORTSPEED_25GE
;
2913 case LPFC_ASYNC_LINK_SPEED_40GBPS
:
2914 speeds
= HBA_PORTSPEED_40GE
;
2916 case LPFC_ASYNC_LINK_SPEED_100GBPS
:
2917 speeds
= HBA_PORTSPEED_100GE
;
2920 speeds
= HBA_PORTSPEED_UNKNOWN
;
2925 return lpfc_fdmi_set_attr_u32(attr
, RPRT_PORT_SPEED
, speeds
);
2929 lpfc_fdmi_port_attr_max_frame(struct lpfc_vport
*vport
, void *attr
)
2931 struct serv_parm
*hsp
= (struct serv_parm
*)&vport
->fc_sparam
;
2933 return lpfc_fdmi_set_attr_u32(attr
, RPRT_MAX_FRAME_SIZE
,
2934 (((uint32_t)hsp
->cmn
.bbRcvSizeMsb
& 0x0F) << 8) |
2935 (uint32_t)hsp
->cmn
.bbRcvSizeLsb
);
2939 lpfc_fdmi_port_attr_os_devname(struct lpfc_vport
*vport
, void *attr
)
2941 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
2942 char buf
[64] = { 0 };
2944 snprintf(buf
, sizeof(buf
), "/sys/class/scsi_host/host%d",
2947 return lpfc_fdmi_set_attr_string(attr
, RPRT_OS_DEVICE_NAME
, buf
);
2951 lpfc_fdmi_port_attr_host_name(struct lpfc_vport
*vport
, void *attr
)
2953 char buf
[64] = { 0 };
2955 scnprintf(buf
, sizeof(buf
), "%s", vport
->phba
->os_host_name
);
2957 return lpfc_fdmi_set_attr_string(attr
, RPRT_HOST_NAME
, buf
);
2961 lpfc_fdmi_port_attr_wwnn(struct lpfc_vport
*vport
, void *attr
)
2963 return lpfc_fdmi_set_attr_wwn(attr
, RPRT_NODENAME
,
2964 &vport
->fc_sparam
.nodeName
);
2968 lpfc_fdmi_port_attr_wwpn(struct lpfc_vport
*vport
, void *attr
)
2970 return lpfc_fdmi_set_attr_wwn(attr
, RPRT_PORTNAME
,
2971 &vport
->fc_sparam
.portName
);
2975 lpfc_fdmi_port_attr_symbolic_name(struct lpfc_vport
*vport
, void *attr
)
2977 char buf
[256] = { 0 };
2979 lpfc_vport_symbolic_port_name(vport
, buf
, sizeof(buf
));
2981 return lpfc_fdmi_set_attr_string(attr
, RPRT_SYM_PORTNAME
, buf
);
2985 lpfc_fdmi_port_attr_port_type(struct lpfc_vport
*vport
, void *attr
)
2987 struct lpfc_hba
*phba
= vport
->phba
;
2989 return lpfc_fdmi_set_attr_u32(attr
, RPRT_PORT_TYPE
,
2990 (phba
->fc_topology
== LPFC_TOPOLOGY_LOOP
) ?
2991 LPFC_FDMI_PORTTYPE_NLPORT
:
2992 LPFC_FDMI_PORTTYPE_NPORT
);
2996 lpfc_fdmi_port_attr_class(struct lpfc_vport
*vport
, void *attr
)
2998 return lpfc_fdmi_set_attr_u32(attr
, RPRT_SUPPORTED_CLASS
,
2999 FC_COS_CLASS2
| FC_COS_CLASS3
);
3003 lpfc_fdmi_port_attr_fabric_wwpn(struct lpfc_vport
*vport
, void *attr
)
3005 return lpfc_fdmi_set_attr_wwn(attr
, RPRT_FABRICNAME
,
3006 &vport
->fabric_portname
);
3010 lpfc_fdmi_port_attr_active_fc4type(struct lpfc_vport
*vport
, void *attr
)
3012 struct lpfc_hba
*phba
= vport
->phba
;
3015 fc4types
= (ATTR_FC4_CT
| ATTR_FC4_FCP
);
3017 /* Check to see if NVME is configured or not */
3018 if (vport
== phba
->pport
&&
3019 phba
->cfg_enable_fc4_type
& LPFC_ENABLE_NVME
)
3020 fc4types
|= ATTR_FC4_NVME
;
3022 return lpfc_fdmi_set_attr_fc4types(attr
, RPRT_ACTIVE_FC4_TYPES
,
3027 lpfc_fdmi_port_attr_port_state(struct lpfc_vport
*vport
, void *attr
)
3029 return lpfc_fdmi_set_attr_u32(attr
, RPRT_PORT_STATE
,
3030 LPFC_FDMI_PORTSTATE_ONLINE
);
3034 lpfc_fdmi_port_attr_num_disc(struct lpfc_vport
*vport
, void *attr
)
3036 vport
->fdmi_num_disc
= lpfc_find_map_node(vport
);
3038 return lpfc_fdmi_set_attr_u32(attr
, RPRT_DISC_PORT
,
3039 vport
->fdmi_num_disc
);
3043 lpfc_fdmi_port_attr_nportid(struct lpfc_vport
*vport
, void *attr
)
3045 return lpfc_fdmi_set_attr_u32(attr
, RPRT_PORT_ID
, vport
->fc_myDID
);
3049 lpfc_fdmi_smart_attr_service(struct lpfc_vport
*vport
, void *attr
)
3051 return lpfc_fdmi_set_attr_string(attr
, RPRT_SMART_SERVICE
,
3052 "Smart SAN Initiator");
3056 lpfc_fdmi_smart_attr_guid(struct lpfc_vport
*vport
, void *attr
)
3058 return lpfc_fdmi_set_attr_fullwwn(attr
, RPRT_SMART_GUID
,
3059 &vport
->fc_sparam
.nodeName
,
3060 &vport
->fc_sparam
.portName
);
3064 lpfc_fdmi_smart_attr_version(struct lpfc_vport
*vport
, void *attr
)
3066 return lpfc_fdmi_set_attr_string(attr
, RPRT_SMART_VERSION
,
3067 "Smart SAN Version 2.0");
3071 lpfc_fdmi_smart_attr_model(struct lpfc_vport
*vport
, void *attr
)
3073 struct lpfc_hba
*phba
= vport
->phba
;
3075 return lpfc_fdmi_set_attr_string(attr
, RPRT_SMART_MODEL
,
3080 lpfc_fdmi_smart_attr_port_info(struct lpfc_vport
*vport
, void *attr
)
3082 /* SRIOV (type 3) is not supported */
3084 return lpfc_fdmi_set_attr_u32(attr
, RPRT_SMART_PORT_INFO
,
3085 (vport
->vpi
) ? 2 /* NPIV */ : 1 /* Physical */);
3089 lpfc_fdmi_smart_attr_qos(struct lpfc_vport
*vport
, void *attr
)
3091 return lpfc_fdmi_set_attr_u32(attr
, RPRT_SMART_QOS
, 0);
3095 lpfc_fdmi_smart_attr_security(struct lpfc_vport
*vport
, void *attr
)
3097 return lpfc_fdmi_set_attr_u32(attr
, RPRT_SMART_SECURITY
, 1);
3101 lpfc_fdmi_vendor_attr_mi(struct lpfc_vport
*vport
, void *attr
)
3103 struct lpfc_hba
*phba
= vport
->phba
;
3104 char buf
[32] = { 0 };
3106 sprintf(buf
, "ELXE2EM:%04d", phba
->sli4_hba
.pc_sli4_params
.mi_ver
);
3108 return lpfc_fdmi_set_attr_string(attr
, RPRT_VENDOR_MI
, buf
);
3111 /* RHBA attribute jump table */
3112 static int (*lpfc_fdmi_hba_action
[])
3113 (struct lpfc_vport
*vport
, void *attrbuf
) = {
3114 /* Action routine Mask bit Attribute type */
3115 lpfc_fdmi_hba_attr_wwnn
, /* bit0 RHBA_NODENAME */
3116 lpfc_fdmi_hba_attr_manufacturer
, /* bit1 RHBA_MANUFACTURER */
3117 lpfc_fdmi_hba_attr_sn
, /* bit2 RHBA_SERIAL_NUMBER */
3118 lpfc_fdmi_hba_attr_model
, /* bit3 RHBA_MODEL */
3119 lpfc_fdmi_hba_attr_description
, /* bit4 RHBA_MODEL_DESCRIPTION */
3120 lpfc_fdmi_hba_attr_hdw_ver
, /* bit5 RHBA_HARDWARE_VERSION */
3121 lpfc_fdmi_hba_attr_drvr_ver
, /* bit6 RHBA_DRIVER_VERSION */
3122 lpfc_fdmi_hba_attr_rom_ver
, /* bit7 RHBA_OPTION_ROM_VERSION */
3123 lpfc_fdmi_hba_attr_fmw_ver
, /* bit8 RHBA_FIRMWARE_VERSION */
3124 lpfc_fdmi_hba_attr_os_ver
, /* bit9 RHBA_OS_NAME_VERSION */
3125 lpfc_fdmi_hba_attr_ct_len
, /* bit10 RHBA_MAX_CT_PAYLOAD_LEN */
3126 lpfc_fdmi_hba_attr_symbolic_name
, /* bit11 RHBA_SYM_NODENAME */
3127 lpfc_fdmi_hba_attr_vendor_info
, /* bit12 RHBA_VENDOR_INFO */
3128 lpfc_fdmi_hba_attr_num_ports
, /* bit13 RHBA_NUM_PORTS */
3129 lpfc_fdmi_hba_attr_fabric_wwnn
, /* bit14 RHBA_FABRIC_WWNN */
3130 lpfc_fdmi_hba_attr_bios_ver
, /* bit15 RHBA_BIOS_VERSION */
3131 lpfc_fdmi_hba_attr_bios_state
, /* bit16 RHBA_BIOS_STATE */
3132 lpfc_fdmi_hba_attr_vendor_id
, /* bit17 RHBA_VENDOR_ID */
3135 /* RPA / RPRT attribute jump table */
3136 static int (*lpfc_fdmi_port_action
[])
3137 (struct lpfc_vport
*vport
, void *attrbuf
) = {
3138 /* Action routine Mask bit Attribute type */
3139 lpfc_fdmi_port_attr_fc4type
, /* bit0 RPRT_SUPPORT_FC4_TYPES */
3140 lpfc_fdmi_port_attr_support_speed
, /* bit1 RPRT_SUPPORTED_SPEED */
3141 lpfc_fdmi_port_attr_speed
, /* bit2 RPRT_PORT_SPEED */
3142 lpfc_fdmi_port_attr_max_frame
, /* bit3 RPRT_MAX_FRAME_SIZE */
3143 lpfc_fdmi_port_attr_os_devname
, /* bit4 RPRT_OS_DEVICE_NAME */
3144 lpfc_fdmi_port_attr_host_name
, /* bit5 RPRT_HOST_NAME */
3145 lpfc_fdmi_port_attr_wwnn
, /* bit6 RPRT_NODENAME */
3146 lpfc_fdmi_port_attr_wwpn
, /* bit7 RPRT_PORTNAME */
3147 lpfc_fdmi_port_attr_symbolic_name
, /* bit8 RPRT_SYM_PORTNAME */
3148 lpfc_fdmi_port_attr_port_type
, /* bit9 RPRT_PORT_TYPE */
3149 lpfc_fdmi_port_attr_class
, /* bit10 RPRT_SUPPORTED_CLASS */
3150 lpfc_fdmi_port_attr_fabric_wwpn
, /* bit11 RPRT_FABRICNAME */
3151 lpfc_fdmi_port_attr_active_fc4type
, /* bit12 RPRT_ACTIVE_FC4_TYPES */
3152 lpfc_fdmi_port_attr_port_state
, /* bit13 RPRT_PORT_STATE */
3153 lpfc_fdmi_port_attr_num_disc
, /* bit14 RPRT_DISC_PORT */
3154 lpfc_fdmi_port_attr_nportid
, /* bit15 RPRT_PORT_ID */
3155 lpfc_fdmi_smart_attr_service
, /* bit16 RPRT_SMART_SERVICE */
3156 lpfc_fdmi_smart_attr_guid
, /* bit17 RPRT_SMART_GUID */
3157 lpfc_fdmi_smart_attr_version
, /* bit18 RPRT_SMART_VERSION */
3158 lpfc_fdmi_smart_attr_model
, /* bit19 RPRT_SMART_MODEL */
3159 lpfc_fdmi_smart_attr_port_info
, /* bit20 RPRT_SMART_PORT_INFO */
3160 lpfc_fdmi_smart_attr_qos
, /* bit21 RPRT_SMART_QOS */
3161 lpfc_fdmi_smart_attr_security
, /* bit22 RPRT_SMART_SECURITY */
3162 lpfc_fdmi_vendor_attr_mi
, /* bit23 RPRT_VENDOR_MI */
3166 * lpfc_fdmi_cmd - Build and send a FDMI cmd to the specified NPort
3167 * @vport: pointer to a host virtual N_Port data structure.
3168 * @ndlp: ndlp to send FDMI cmd to (if NULL use FDMI_DID)
3169 * @cmdcode: FDMI command to send
3170 * @new_mask: Mask of HBA or PORT Attributes to send
3172 * Builds and sends a FDMI command using the CT subsystem.
3175 lpfc_fdmi_cmd(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
,
3176 int cmdcode
, uint32_t new_mask
)
3178 struct lpfc_hba
*phba
= vport
->phba
;
3179 struct lpfc_dmabuf
*rq
, *rsp
;
3180 struct lpfc_sli_ct_request
*CtReq
;
3181 struct ulp_bde64_le
*bde
;
3183 uint32_t size
, addsz
;
3186 struct lpfc_fdmi_reg_hba
*rh
;
3187 struct lpfc_fdmi_port_entry
*pe
;
3188 struct lpfc_fdmi_reg_portattr
*pab
= NULL
, *base
= NULL
;
3189 struct lpfc_fdmi_attr_block
*ab
= NULL
;
3190 int (*func
)(struct lpfc_vport
*vport
, void *attrbuf
);
3191 void (*cmpl
)(struct lpfc_hba
*phba
, struct lpfc_iocbq
*cmdiocb
,
3192 struct lpfc_iocbq
*rspiocb
);
3197 cmpl
= lpfc_cmpl_ct_disc_fdmi
; /* called from discovery */
3199 /* fill in BDEs for command */
3200 /* Allocate buffer for command payload */
3201 rq
= kmalloc(sizeof(*rq
), GFP_KERNEL
);
3205 rq
->virt
= lpfc_mbuf_alloc(phba
, 0, &rq
->phys
);
3207 goto fdmi_cmd_free_rq
;
3209 /* Allocate buffer for Buffer ptr list */
3210 rsp
= kmalloc(sizeof(*rsp
), GFP_KERNEL
);
3212 goto fdmi_cmd_free_rqvirt
;
3214 rsp
->virt
= lpfc_mbuf_alloc(phba
, 0, &rsp
->phys
);
3216 goto fdmi_cmd_free_rsp
;
3218 INIT_LIST_HEAD(&rq
->list
);
3219 INIT_LIST_HEAD(&rsp
->list
);
3221 /* mbuf buffers are 1K in length - aka LPFC_BPL_SIZE */
3222 memset(rq
->virt
, 0, LPFC_BPL_SIZE
);
3223 rsp_size
= LPFC_BPL_SIZE
;
3226 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_DISCOVERY
,
3227 "0218 FDMI Request x%x mask x%x Data: x%x x%lx x%x\n",
3228 cmdcode
, new_mask
, vport
->fdmi_port_mask
,
3229 vport
->fc_flag
, vport
->port_state
);
3231 CtReq
= (struct lpfc_sli_ct_request
*)rq
->virt
;
3233 /* First populate the CT_IU preamble */
3234 CtReq
->RevisionId
.bits
.Revision
= SLI_CT_REVISION
;
3235 CtReq
->RevisionId
.bits
.InId
= 0;
3237 CtReq
->FsType
= SLI_CT_MANAGEMENT_SERVICE
;
3238 CtReq
->FsSubType
= SLI_CT_FDMI_Subtypes
;
3240 CtReq
->CommandResponse
.bits
.CmdRsp
= cpu_to_be16(cmdcode
);
3244 /* Next fill in the specific FDMI cmd information */
3248 rh
= (struct lpfc_fdmi_reg_hba
*)&CtReq
->un
;
3249 /* HBA Identifier */
3250 memcpy(&rh
->hi
.PortName
, &phba
->pport
->fc_sparam
.portName
,
3251 sizeof(struct lpfc_name
));
3252 size
+= sizeof(struct lpfc_fdmi_hba_ident
);
3254 if (cmdcode
== SLI_MGMT_RHBA
) {
3255 /* Registered Port List */
3256 /* One entry (port) per adapter */
3257 rh
->rpl
.EntryCnt
= cpu_to_be32(1);
3258 memcpy(&rh
->rpl
.pe
.PortName
,
3259 &phba
->pport
->fc_sparam
.portName
,
3260 sizeof(struct lpfc_name
));
3261 size
+= sizeof(struct lpfc_fdmi_reg_port_list
);
3264 ab
= (struct lpfc_fdmi_attr_block
*)((uint8_t *)rh
+ size
);
3266 size
+= FOURBYTES
; /* add length of EntryCnt field */
3272 mask
= vport
->fdmi_hba_mask
;
3274 /* Mask will dictate what attributes to build in the request */
3277 func
= lpfc_fdmi_hba_action
[bit_pos
];
3278 addsz
= func(vport
, ((uint8_t *)rh
+ size
));
3283 /* check if another attribute fits */
3284 if ((size
+ FDMI_MAX_ATTRLEN
) >
3285 (LPFC_BPL_SIZE
- LPFC_CT_PREAMBLE
))
3292 ab
->EntryCnt
= cpu_to_be32(ab
->EntryCnt
);
3294 size
+= GID_REQUEST_SZ
- 4;
3298 if (vport
->port_type
!= LPFC_PHYSICAL_PORT
) {
3299 ndlp
= lpfc_findnode_did(phba
->pport
, FDMI_DID
);
3305 /* Store base ptr right after preamble */
3306 base
= (struct lpfc_fdmi_reg_portattr
*)&CtReq
->un
;
3308 if (cmdcode
== SLI_MGMT_RPRT
) {
3309 rh
= (struct lpfc_fdmi_reg_hba
*)base
;
3310 /* HBA Identifier */
3311 memcpy(&rh
->hi
.PortName
,
3312 &phba
->pport
->fc_sparam
.portName
,
3313 sizeof(struct lpfc_name
));
3314 pab
= (struct lpfc_fdmi_reg_portattr
*)
3315 ((uint8_t *)base
+ sizeof(struct lpfc_name
));
3316 size
+= sizeof(struct lpfc_name
);
3321 memcpy((uint8_t *)&pab
->PortName
,
3322 (uint8_t *)&vport
->fc_sparam
.portName
,
3323 sizeof(struct lpfc_name
));
3324 pab
->ab
.EntryCnt
= 0;
3325 /* add length of name and EntryCnt field */
3326 size
+= sizeof(struct lpfc_name
) + FOURBYTES
;
3332 mask
= vport
->fdmi_port_mask
;
3334 /* Mask will dictate what attributes to build in the request */
3337 func
= lpfc_fdmi_port_action
[bit_pos
];
3338 addsz
= func(vport
, ((uint8_t *)base
+ size
));
3343 /* check if another attribute fits */
3344 if ((size
+ FDMI_MAX_ATTRLEN
) >
3345 (LPFC_BPL_SIZE
- LPFC_CT_PREAMBLE
))
3352 pab
->ab
.EntryCnt
= cpu_to_be32(pab
->ab
.EntryCnt
);
3353 size
+= GID_REQUEST_SZ
- 4;
3358 rsp_size
= FC_MAX_NS_RSP
;
3362 pe
= (struct lpfc_fdmi_port_entry
*)&CtReq
->un
;
3363 memcpy((uint8_t *)&pe
->PortName
,
3364 (uint8_t *)&vport
->fc_sparam
.portName
,
3365 sizeof(struct lpfc_name
));
3366 size
= GID_REQUEST_SZ
- 4 + sizeof(struct lpfc_name
);
3371 rsp_size
= FC_MAX_NS_RSP
;
3374 if (vport
->port_type
!= LPFC_PHYSICAL_PORT
) {
3375 ndlp
= lpfc_findnode_did(phba
->pport
, FDMI_DID
);
3381 pe
= (struct lpfc_fdmi_port_entry
*)&CtReq
->un
;
3382 memcpy((uint8_t *)&pe
->PortName
,
3383 (uint8_t *)&vport
->fc_sparam
.portName
,
3384 sizeof(struct lpfc_name
));
3385 size
= GID_REQUEST_SZ
- 4 + sizeof(struct lpfc_name
);
3388 size
= GID_REQUEST_SZ
- 4;
3391 lpfc_printf_vlog(vport
, KERN_WARNING
, LOG_DISCOVERY
,
3392 "0298 FDMI cmdcode x%x not supported\n",
3394 goto fdmi_cmd_free_rspvirt
;
3396 CtReq
->CommandResponse
.bits
.Size
= cpu_to_be16(rsp_size
);
3398 bde
= (struct ulp_bde64_le
*)rsp
->virt
;
3399 bde
->addr_high
= cpu_to_le32(putPaddrHigh(rq
->phys
));
3400 bde
->addr_low
= cpu_to_le32(putPaddrLow(rq
->phys
));
3401 bde
->type_size
= cpu_to_le32(ULP_BDE64_TYPE_BDE_64
<<
3402 ULP_BDE64_TYPE_SHIFT
);
3403 bde
->type_size
|= cpu_to_le32(size
);
3406 * The lpfc_ct_cmd/lpfc_get_req shall increment ndlp reference count
3407 * to hold ndlp reference for the corresponding callback function.
3409 if (!lpfc_ct_cmd(vport
, rq
, rsp
, ndlp
, cmpl
, rsp_size
, 0))
3412 fdmi_cmd_free_rspvirt
:
3413 lpfc_mbuf_free(phba
, rsp
->virt
, rsp
->phys
);
3416 fdmi_cmd_free_rqvirt
:
3417 lpfc_mbuf_free(phba
, rq
->virt
, rq
->phys
);
3421 /* Issue FDMI request failed */
3422 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_DISCOVERY
,
3423 "0244 Issue FDMI request failed Data: x%x\n",
3429 * lpfc_delayed_disc_tmo - Timeout handler for delayed discovery timer.
3430 * @t: Context object of the timer.
3432 * This function set the WORKER_DELAYED_DISC_TMO flag and wake up
3433 * the worker thread.
3436 lpfc_delayed_disc_tmo(struct timer_list
*t
)
3438 struct lpfc_vport
*vport
= from_timer(vport
, t
, delayed_disc_tmo
);
3439 struct lpfc_hba
*phba
= vport
->phba
;
3440 uint32_t tmo_posted
;
3441 unsigned long iflag
;
3443 spin_lock_irqsave(&vport
->work_port_lock
, iflag
);
3444 tmo_posted
= vport
->work_port_events
& WORKER_DELAYED_DISC_TMO
;
3446 vport
->work_port_events
|= WORKER_DELAYED_DISC_TMO
;
3447 spin_unlock_irqrestore(&vport
->work_port_lock
, iflag
);
3450 lpfc_worker_wake_up(phba
);
3455 * lpfc_delayed_disc_timeout_handler - Function called by worker thread to
3456 * handle delayed discovery.
3457 * @vport: pointer to a host virtual N_Port data structure.
3459 * This function start nport discovery of the vport.
3462 lpfc_delayed_disc_timeout_handler(struct lpfc_vport
*vport
)
3464 if (!test_and_clear_bit(FC_DISC_DELAYED
, &vport
->fc_flag
))
3467 lpfc_do_scr_ns_plogi(vport
->phba
, vport
);
3471 lpfc_decode_firmware_rev(struct lpfc_hba
*phba
, char *fwrevision
, int flag
)
3473 struct lpfc_sli
*psli
= &phba
->sli
;
3474 lpfc_vpd_t
*vp
= &phba
->vpd
;
3475 uint32_t b1
, b2
, b3
, b4
, i
, rev
;
3477 uint32_t *ptr
, str
[4];
3480 if (phba
->sli_rev
== LPFC_SLI_REV4
)
3481 snprintf(fwrevision
, FW_REV_STR_SIZE
, "%s", vp
->rev
.opFwName
);
3482 else if (vp
->rev
.rBit
) {
3483 if (psli
->sli_flag
& LPFC_SLI_ACTIVE
)
3484 rev
= vp
->rev
.sli2FwRev
;
3486 rev
= vp
->rev
.sli1FwRev
;
3488 b1
= (rev
& 0x0000f000) >> 12;
3489 b2
= (rev
& 0x00000f00) >> 8;
3490 b3
= (rev
& 0x000000c0) >> 6;
3491 b4
= (rev
& 0x00000030) >> 4;
3510 b4
= (rev
& 0x0000000f);
3512 if (psli
->sli_flag
& LPFC_SLI_ACTIVE
)
3513 fwname
= vp
->rev
.sli2FwName
;
3515 fwname
= vp
->rev
.sli1FwName
;
3517 for (i
= 0; i
< 16; i
++)
3518 if (fwname
[i
] == 0x20)
3521 ptr
= (uint32_t*)fwname
;
3523 for (i
= 0; i
< 3; i
++)
3524 str
[i
] = be32_to_cpu(*ptr
++);
3528 sprintf(fwrevision
, "%d.%d%d (%s)",
3529 b1
, b2
, b3
, (char *)str
);
3531 sprintf(fwrevision
, "%d.%d%d", b1
,
3535 sprintf(fwrevision
, "%d.%d%d%c%d (%s)",
3539 sprintf(fwrevision
, "%d.%d%d%c%d",
3543 rev
= vp
->rev
.smFwRev
;
3545 b1
= (rev
& 0xff000000) >> 24;
3546 b2
= (rev
& 0x00f00000) >> 20;
3547 b3
= (rev
& 0x000f0000) >> 16;
3548 c
= (rev
& 0x0000ff00) >> 8;
3549 b4
= (rev
& 0x000000ff);
3551 sprintf(fwrevision
, "%d.%d%d%c%d", b1
, b2
, b3
, c
, b4
);
3557 lpfc_cmpl_ct_cmd_vmid(struct lpfc_hba
*phba
, struct lpfc_iocbq
*cmdiocb
,
3558 struct lpfc_iocbq
*rspiocb
)
3560 struct lpfc_vport
*vport
= cmdiocb
->vport
;
3561 struct lpfc_dmabuf
*inp
= cmdiocb
->cmd_dmabuf
;
3562 struct lpfc_dmabuf
*outp
= cmdiocb
->rsp_dmabuf
;
3563 struct lpfc_sli_ct_request
*ctcmd
= inp
->virt
;
3564 struct lpfc_sli_ct_request
*ctrsp
= outp
->virt
;
3565 __be16 rsp
= ctrsp
->CommandResponse
.bits
.CmdRsp
;
3566 struct app_id_object
*app
;
3567 struct lpfc_nodelist
*ndlp
= cmdiocb
->ndlp
;
3568 u32 cmd
, hash
, bucket
;
3569 struct lpfc_vmid
*vmp
, *cur
;
3570 u8
*data
= outp
->virt
;
3573 cmd
= be16_to_cpu(ctcmd
->CommandResponse
.bits
.CmdRsp
);
3574 if (cmd
== SLI_CTAS_DALLAPP_ID
)
3575 lpfc_ct_free_iocb(phba
, cmdiocb
);
3577 if (lpfc_els_chk_latt(vport
) || get_job_ulpstatus(phba
, rspiocb
)) {
3578 if (cmd
!= SLI_CTAS_DALLAPP_ID
)
3581 /* Check for a CT LS_RJT response */
3582 if (be16_to_cpu(rsp
) == SLI_CT_RESPONSE_FS_RJT
) {
3583 if (cmd
!= SLI_CTAS_DALLAPP_ID
)
3584 lpfc_printf_vlog(vport
, KERN_DEBUG
, LOG_DISCOVERY
,
3585 "3306 VMID FS_RJT Data: x%x x%x x%x\n",
3586 cmd
, ctrsp
->ReasonCode
,
3587 ctrsp
->Explanation
);
3588 if ((cmd
!= SLI_CTAS_DALLAPP_ID
) ||
3589 (ctrsp
->ReasonCode
!= SLI_CT_UNABLE_TO_PERFORM_REQ
) ||
3590 (ctrsp
->Explanation
!= SLI_CT_APP_ID_NOT_AVAILABLE
)) {
3591 /* If DALLAPP_ID failed retry later */
3592 if (cmd
== SLI_CTAS_DALLAPP_ID
)
3593 set_bit(FC_DEREGISTER_ALL_APP_ID
,
3600 case SLI_CTAS_RAPP_IDENT
:
3601 app
= (struct app_id_object
*)(RAPP_IDENT_OFFSET
+ data
);
3602 lpfc_printf_vlog(vport
, KERN_DEBUG
, LOG_DISCOVERY
,
3603 "6712 RAPP_IDENT app id %d port id x%x id "
3604 "len %d\n", be32_to_cpu(app
->app_id
),
3605 be32_to_cpu(app
->port_id
),
3606 app
->obj
.entity_id_len
);
3608 if (app
->obj
.entity_id_len
== 0 || app
->port_id
== 0)
3611 hash
= lpfc_vmid_hash_fn(app
->obj
.entity_id
,
3612 app
->obj
.entity_id_len
);
3613 vmp
= lpfc_get_vmid_from_hashtable(vport
, hash
,
3614 app
->obj
.entity_id
);
3616 write_lock(&vport
->vmid_lock
);
3617 vmp
->un
.app_id
= be32_to_cpu(app
->app_id
);
3618 vmp
->flag
|= LPFC_VMID_REGISTERED
;
3619 vmp
->flag
&= ~LPFC_VMID_REQ_REGISTER
;
3620 write_unlock(&vport
->vmid_lock
);
3621 /* Set IN USE flag */
3622 vport
->vmid_flag
|= LPFC_VMID_IN_USE
;
3624 lpfc_printf_vlog(vport
, KERN_DEBUG
, LOG_DISCOVERY
,
3625 "6901 No entry found %s hash %d\n",
3626 app
->obj
.entity_id
, hash
);
3629 case SLI_CTAS_DAPP_IDENT
:
3630 app
= (struct app_id_object
*)(DAPP_IDENT_OFFSET
+ data
);
3631 lpfc_printf_vlog(vport
, KERN_DEBUG
, LOG_DISCOVERY
,
3632 "6713 DAPP_IDENT app id %d port id x%x\n",
3633 be32_to_cpu(app
->app_id
),
3634 be32_to_cpu(app
->port_id
));
3636 case SLI_CTAS_DALLAPP_ID
:
3637 lpfc_printf_vlog(vport
, KERN_DEBUG
, LOG_DISCOVERY
,
3638 "8856 Deregistered all app ids\n");
3639 read_lock(&vport
->vmid_lock
);
3640 for (i
= 0; i
< phba
->cfg_max_vmid
; i
++) {
3641 vmp
= &vport
->vmid
[i
];
3642 if (vmp
->flag
!= LPFC_VMID_SLOT_FREE
)
3643 memset(vmp
, 0, sizeof(struct lpfc_vmid
));
3645 read_unlock(&vport
->vmid_lock
);
3646 /* for all elements in the hash table */
3647 if (!hash_empty(vport
->hash_table
))
3648 hash_for_each(vport
->hash_table
, bucket
, cur
, hnode
)
3649 hash_del(&cur
->hnode
);
3650 set_bit(FC_ALLOW_VMID
, &vport
->load_flag
);
3653 lpfc_printf_vlog(vport
, KERN_DEBUG
, LOG_DISCOVERY
,
3654 "8857 Invalid command code\n");
3657 lpfc_ct_free_iocb(phba
, cmdiocb
);
3662 * lpfc_vmid_cmd - Build and send a FDMI cmd to the specified NPort
3663 * @vport: pointer to a host virtual N_Port data structure.
3664 * @cmdcode: application server command code to send
3665 * @vmid: pointer to vmid info structure
3667 * Builds and sends a FDMI command using the CT subsystem.
3670 lpfc_vmid_cmd(struct lpfc_vport
*vport
,
3671 int cmdcode
, struct lpfc_vmid
*vmid
)
3673 struct lpfc_hba
*phba
= vport
->phba
;
3674 struct lpfc_dmabuf
*mp
, *bmp
;
3675 struct lpfc_sli_ct_request
*ctreq
;
3676 struct ulp_bde64
*bpl
;
3680 struct lpfc_vmid_rapp_ident_list
*rap
;
3681 struct lpfc_vmid_dapp_ident_list
*dap
;
3683 struct lpfc_nodelist
*ndlp
;
3685 void (*cmpl
)(struct lpfc_hba
*phba
, struct lpfc_iocbq
*cmdiocb
,
3686 struct lpfc_iocbq
*rspiocb
);
3688 ndlp
= lpfc_findnode_did(vport
, FDMI_DID
);
3689 if (!ndlp
|| ndlp
->nlp_state
!= NLP_STE_UNMAPPED_NODE
)
3692 cmpl
= lpfc_cmpl_ct_cmd_vmid
;
3694 /* fill in BDEs for command */
3695 /* Allocate buffer for command payload */
3696 mp
= kmalloc(sizeof(*mp
), GFP_KERNEL
);
3698 goto vmid_free_mp_exit
;
3700 mp
->virt
= lpfc_mbuf_alloc(phba
, 0, &mp
->phys
);
3702 goto vmid_free_mp_virt_exit
;
3704 /* Allocate buffer for Buffer ptr list */
3705 bmp
= kmalloc(sizeof(*bmp
), GFP_KERNEL
);
3707 goto vmid_free_bmp_exit
;
3709 bmp
->virt
= lpfc_mbuf_alloc(phba
, 0, &bmp
->phys
);
3711 goto vmid_free_bmp_virt_exit
;
3713 INIT_LIST_HEAD(&mp
->list
);
3714 INIT_LIST_HEAD(&bmp
->list
);
3716 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_DISCOVERY
,
3717 "3275 VMID Request Data: x%lx x%x x%x\n",
3718 vport
->fc_flag
, vport
->port_state
, cmdcode
);
3719 ctreq
= (struct lpfc_sli_ct_request
*)mp
->virt
;
3721 /* First populate the CT_IU preamble */
3722 memset(data
, 0, LPFC_BPL_SIZE
);
3723 ctreq
->RevisionId
.bits
.Revision
= SLI_CT_REVISION
;
3724 ctreq
->RevisionId
.bits
.InId
= 0;
3726 ctreq
->FsType
= SLI_CT_MANAGEMENT_SERVICE
;
3727 ctreq
->FsSubType
= SLI_CT_APP_SEV_Subtypes
;
3729 ctreq
->CommandResponse
.bits
.CmdRsp
= cpu_to_be16(cmdcode
);
3730 rsp_size
= LPFC_BPL_SIZE
;
3734 case SLI_CTAS_RAPP_IDENT
:
3735 lpfc_printf_vlog(vport
, KERN_DEBUG
, LOG_DISCOVERY
,
3736 "1329 RAPP_IDENT for %s\n", vmid
->host_vmid
);
3737 ctreq
->un
.PortID
= cpu_to_be32(vport
->fc_myDID
);
3738 rap
= (struct lpfc_vmid_rapp_ident_list
*)
3739 (DAPP_IDENT_OFFSET
+ data
);
3740 rap
->no_of_objects
= cpu_to_be32(1);
3741 rap
->obj
[0].entity_id_len
= vmid
->vmid_len
;
3742 memcpy(rap
->obj
[0].entity_id
, vmid
->host_vmid
, vmid
->vmid_len
);
3743 size
= RAPP_IDENT_OFFSET
+
3744 struct_size(rap
, obj
, be32_to_cpu(rap
->no_of_objects
));
3748 case SLI_CTAS_GALLAPPIA_ID
:
3749 ctreq
->un
.PortID
= cpu_to_be32(vport
->fc_myDID
);
3750 size
= GALLAPPIA_ID_SIZE
;
3753 case SLI_CTAS_DAPP_IDENT
:
3754 lpfc_printf_vlog(vport
, KERN_DEBUG
, LOG_DISCOVERY
,
3755 "1469 DAPP_IDENT for %s\n", vmid
->host_vmid
);
3756 ctreq
->un
.PortID
= cpu_to_be32(vport
->fc_myDID
);
3757 dap
= (struct lpfc_vmid_dapp_ident_list
*)
3758 (DAPP_IDENT_OFFSET
+ data
);
3759 dap
->no_of_objects
= cpu_to_be32(1);
3760 dap
->obj
[0].entity_id_len
= vmid
->vmid_len
;
3761 memcpy(dap
->obj
[0].entity_id
, vmid
->host_vmid
, vmid
->vmid_len
);
3762 size
= DAPP_IDENT_OFFSET
+
3763 struct_size(dap
, obj
, be32_to_cpu(dap
->no_of_objects
));
3764 write_lock(&vport
->vmid_lock
);
3765 vmid
->flag
&= ~LPFC_VMID_REGISTERED
;
3766 write_unlock(&vport
->vmid_lock
);
3770 case SLI_CTAS_DALLAPP_ID
:
3771 ctreq
->un
.PortID
= cpu_to_be32(vport
->fc_myDID
);
3772 size
= DALLAPP_ID_SIZE
;
3776 lpfc_printf_vlog(vport
, KERN_DEBUG
, LOG_DISCOVERY
,
3777 "7062 VMID cmdcode x%x not supported\n",
3779 goto vmid_free_all_mem
;
3782 ctreq
->CommandResponse
.bits
.Size
= cpu_to_be16(rsp_size
);
3784 bpl
= (struct ulp_bde64
*)bmp
->virt
;
3785 bpl
->addrHigh
= putPaddrHigh(mp
->phys
);
3786 bpl
->addrLow
= putPaddrLow(mp
->phys
);
3787 bpl
->tus
.f
.bdeFlags
= 0;
3788 bpl
->tus
.f
.bdeSize
= size
;
3790 /* The lpfc_ct_cmd/lpfc_get_req shall increment ndlp reference count
3791 * to hold ndlp reference for the corresponding callback function.
3793 if (!lpfc_ct_cmd(vport
, mp
, bmp
, ndlp
, cmpl
, rsp_size
, retry
))
3797 lpfc_mbuf_free(phba
, bmp
->virt
, bmp
->phys
);
3798 vmid_free_bmp_virt_exit
:
3801 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
3802 vmid_free_mp_virt_exit
:
3806 /* Issue CT request failed */
3807 lpfc_printf_vlog(vport
, KERN_DEBUG
, LOG_DISCOVERY
,
3808 "3276 VMID CT request failed Data: x%x\n", cmdcode
);