drm/tests: hdmi: Fix memory leaks in drm_display_mode_from_cea_vic()
[drm/drm-misc.git] / drivers / scsi / qla2xxx / qla_gs.c
blobd2bddca7045aa1f5791a9600965ae747f341dfca
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * QLogic Fibre Channel HBA Driver
4 * Copyright (c) 2003-2014 QLogic Corporation
5 */
6 #include "qla_def.h"
7 #include "qla_target.h"
8 #include <linux/utsname.h>
10 static int qla2x00_sns_ga_nxt(scsi_qla_host_t *, fc_port_t *);
11 static int qla2x00_sns_gid_pt(scsi_qla_host_t *, sw_info_t *);
12 static int qla2x00_sns_gpn_id(scsi_qla_host_t *, sw_info_t *);
13 static int qla2x00_sns_gnn_id(scsi_qla_host_t *, sw_info_t *);
14 static int qla2x00_sns_rft_id(scsi_qla_host_t *);
15 static int qla2x00_sns_rnn_id(scsi_qla_host_t *);
16 static int qla_async_rftid(scsi_qla_host_t *, port_id_t *);
17 static int qla_async_rffid(scsi_qla_host_t *, port_id_t *, u8, u8);
18 static int qla_async_rnnid(scsi_qla_host_t *, port_id_t *, u8*);
19 static int qla_async_rsnn_nn(scsi_qla_host_t *);
23 /**
24 * qla2x00_prep_ms_iocb() - Prepare common MS/CT IOCB fields for SNS CT query.
25 * @vha: HA context
26 * @arg: CT arguments
28 * Returns a pointer to the @vha's ms_iocb.
30 void *
31 qla2x00_prep_ms_iocb(scsi_qla_host_t *vha, struct ct_arg *arg)
33 struct qla_hw_data *ha = vha->hw;
34 ms_iocb_entry_t *ms_pkt;
36 ms_pkt = (ms_iocb_entry_t *)arg->iocb;
37 memset(ms_pkt, 0, sizeof(ms_iocb_entry_t));
39 ms_pkt->entry_type = MS_IOCB_TYPE;
40 ms_pkt->entry_count = 1;
41 SET_TARGET_ID(ha, ms_pkt->loop_id, SIMPLE_NAME_SERVER);
42 ms_pkt->control_flags = cpu_to_le16(CF_READ | CF_HEAD_TAG);
43 ms_pkt->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
44 ms_pkt->cmd_dsd_count = cpu_to_le16(1);
45 ms_pkt->total_dsd_count = cpu_to_le16(2);
46 ms_pkt->rsp_bytecount = cpu_to_le32(arg->rsp_size);
47 ms_pkt->req_bytecount = cpu_to_le32(arg->req_size);
49 put_unaligned_le64(arg->req_dma, &ms_pkt->req_dsd.address);
50 ms_pkt->req_dsd.length = ms_pkt->req_bytecount;
52 put_unaligned_le64(arg->rsp_dma, &ms_pkt->rsp_dsd.address);
53 ms_pkt->rsp_dsd.length = ms_pkt->rsp_bytecount;
55 vha->qla_stats.control_requests++;
57 return (ms_pkt);
60 /**
61 * qla24xx_prep_ms_iocb() - Prepare common CT IOCB fields for SNS CT query.
62 * @vha: HA context
63 * @arg: CT arguments
65 * Returns a pointer to the @ha's ms_iocb.
67 void *
68 qla24xx_prep_ms_iocb(scsi_qla_host_t *vha, struct ct_arg *arg)
70 struct qla_hw_data *ha = vha->hw;
71 struct ct_entry_24xx *ct_pkt;
73 ct_pkt = (struct ct_entry_24xx *)arg->iocb;
74 memset(ct_pkt, 0, sizeof(struct ct_entry_24xx));
76 ct_pkt->entry_type = CT_IOCB_TYPE;
77 ct_pkt->entry_count = 1;
78 ct_pkt->nport_handle = cpu_to_le16(arg->nport_handle);
79 ct_pkt->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
80 ct_pkt->cmd_dsd_count = cpu_to_le16(1);
81 ct_pkt->rsp_dsd_count = cpu_to_le16(1);
82 ct_pkt->rsp_byte_count = cpu_to_le32(arg->rsp_size);
83 ct_pkt->cmd_byte_count = cpu_to_le32(arg->req_size);
85 put_unaligned_le64(arg->req_dma, &ct_pkt->dsd[0].address);
86 ct_pkt->dsd[0].length = ct_pkt->cmd_byte_count;
88 put_unaligned_le64(arg->rsp_dma, &ct_pkt->dsd[1].address);
89 ct_pkt->dsd[1].length = ct_pkt->rsp_byte_count;
90 ct_pkt->vp_index = vha->vp_idx;
92 vha->qla_stats.control_requests++;
94 return (ct_pkt);
97 /**
98 * qla2x00_prep_ct_req() - Prepare common CT request fields for SNS query.
99 * @p: CT request buffer
100 * @cmd: GS command
101 * @rsp_size: response size in bytes
103 * Returns a pointer to the intitialized @ct_req.
105 static inline struct ct_sns_req *
106 qla2x00_prep_ct_req(struct ct_sns_pkt *p, uint16_t cmd, uint16_t rsp_size)
108 memset(p, 0, sizeof(struct ct_sns_pkt));
110 p->p.req.header.revision = 0x01;
111 p->p.req.header.gs_type = 0xFC;
112 p->p.req.header.gs_subtype = 0x02;
113 p->p.req.command = cpu_to_be16(cmd);
114 p->p.req.max_rsp_size = cpu_to_be16((rsp_size - 16) / 4);
116 return &p->p.req;
120 qla2x00_chk_ms_status(scsi_qla_host_t *vha, ms_iocb_entry_t *ms_pkt,
121 struct ct_sns_rsp *ct_rsp, const char *routine)
123 int rval;
124 uint16_t comp_status;
125 struct qla_hw_data *ha = vha->hw;
126 bool lid_is_sns = false;
128 rval = QLA_FUNCTION_FAILED;
129 if (ms_pkt->entry_status != 0) {
130 ql_dbg(ql_dbg_disc, vha, 0x2031,
131 "%s failed, error status (%x) on port_id: %02x%02x%02x.\n",
132 routine, ms_pkt->entry_status, vha->d_id.b.domain,
133 vha->d_id.b.area, vha->d_id.b.al_pa);
134 } else {
135 if (IS_FWI2_CAPABLE(ha))
136 comp_status = le16_to_cpu(
137 ((struct ct_entry_24xx *)ms_pkt)->comp_status);
138 else
139 comp_status = le16_to_cpu(ms_pkt->status);
140 switch (comp_status) {
141 case CS_COMPLETE:
142 case CS_DATA_UNDERRUN:
143 case CS_DATA_OVERRUN: /* Overrun? */
144 if (ct_rsp->header.response !=
145 cpu_to_be16(CT_ACCEPT_RESPONSE)) {
146 ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x2077,
147 "%s failed rejected request on port_id: %02x%02x%02x Completion status 0x%x, response 0x%x\n",
148 routine, vha->d_id.b.domain,
149 vha->d_id.b.area, vha->d_id.b.al_pa,
150 comp_status, ct_rsp->header.response);
151 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha,
152 0x2078, ct_rsp,
153 offsetof(typeof(*ct_rsp), rsp));
154 rval = QLA_INVALID_COMMAND;
155 } else
156 rval = QLA_SUCCESS;
157 break;
158 case CS_PORT_LOGGED_OUT:
159 if (IS_FWI2_CAPABLE(ha)) {
160 if (le16_to_cpu(ms_pkt->loop_id.extended) ==
161 NPH_SNS)
162 lid_is_sns = true;
163 } else {
164 if (le16_to_cpu(ms_pkt->loop_id.extended) ==
165 SIMPLE_NAME_SERVER)
166 lid_is_sns = true;
168 if (lid_is_sns) {
169 ql_dbg(ql_dbg_async, vha, 0x502b,
170 "%s failed, Name server has logged out",
171 routine);
172 rval = QLA_NOT_LOGGED_IN;
173 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
174 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
176 break;
177 case CS_TIMEOUT:
178 rval = QLA_FUNCTION_TIMEOUT;
179 fallthrough;
180 default:
181 ql_dbg(ql_dbg_disc, vha, 0x2033,
182 "%s failed, completion status (%x) on port_id: "
183 "%02x%02x%02x.\n", routine, comp_status,
184 vha->d_id.b.domain, vha->d_id.b.area,
185 vha->d_id.b.al_pa);
186 break;
189 return rval;
193 * qla2x00_ga_nxt() - SNS scan for fabric devices via GA_NXT command.
194 * @vha: HA context
195 * @fcport: fcport entry to updated
197 * Returns 0 on success.
200 qla2x00_ga_nxt(scsi_qla_host_t *vha, fc_port_t *fcport)
202 int rval;
204 ms_iocb_entry_t *ms_pkt;
205 struct ct_sns_req *ct_req;
206 struct ct_sns_rsp *ct_rsp;
207 struct qla_hw_data *ha = vha->hw;
208 struct ct_arg arg;
210 if (IS_QLA2100(ha) || IS_QLA2200(ha))
211 return qla2x00_sns_ga_nxt(vha, fcport);
213 arg.iocb = ha->ms_iocb;
214 arg.req_dma = ha->ct_sns_dma;
215 arg.rsp_dma = ha->ct_sns_dma;
216 arg.req_size = GA_NXT_REQ_SIZE;
217 arg.rsp_size = GA_NXT_RSP_SIZE;
218 arg.nport_handle = NPH_SNS;
220 /* Issue GA_NXT */
221 /* Prepare common MS IOCB */
222 ms_pkt = ha->isp_ops->prep_ms_iocb(vha, &arg);
224 /* Prepare CT request */
225 ct_req = qla2x00_prep_ct_req(ha->ct_sns, GA_NXT_CMD,
226 GA_NXT_RSP_SIZE);
227 ct_rsp = &ha->ct_sns->p.rsp;
229 /* Prepare CT arguments -- port_id */
230 ct_req->req.port_id.port_id = port_id_to_be_id(fcport->d_id);
232 /* Execute MS IOCB */
233 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
234 sizeof(ms_iocb_entry_t));
235 if (rval != QLA_SUCCESS) {
236 /*EMPTY*/
237 ql_dbg(ql_dbg_disc, vha, 0x2062,
238 "GA_NXT issue IOCB failed (%d).\n", rval);
239 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "GA_NXT") !=
240 QLA_SUCCESS) {
241 rval = QLA_FUNCTION_FAILED;
242 } else {
243 /* Populate fc_port_t entry. */
244 fcport->d_id = be_to_port_id(ct_rsp->rsp.ga_nxt.port_id);
246 memcpy(fcport->node_name, ct_rsp->rsp.ga_nxt.node_name,
247 WWN_SIZE);
248 memcpy(fcport->port_name, ct_rsp->rsp.ga_nxt.port_name,
249 WWN_SIZE);
251 fcport->fc4_type = (ct_rsp->rsp.ga_nxt.fc4_types[2] & BIT_0) ?
252 FS_FC4TYPE_FCP : FC4_TYPE_OTHER;
254 if (ct_rsp->rsp.ga_nxt.port_type != NS_N_PORT_TYPE &&
255 ct_rsp->rsp.ga_nxt.port_type != NS_NL_PORT_TYPE)
256 fcport->d_id.b.domain = 0xf0;
258 ql_dbg(ql_dbg_disc, vha, 0x2063,
259 "GA_NXT entry - nn %8phN pn %8phN "
260 "port_id=%02x%02x%02x.\n",
261 fcport->node_name, fcport->port_name,
262 fcport->d_id.b.domain, fcport->d_id.b.area,
263 fcport->d_id.b.al_pa);
266 return (rval);
269 static inline int
270 qla2x00_gid_pt_rsp_size(scsi_qla_host_t *vha)
272 return vha->hw->max_fibre_devices * 4 + 16;
276 * qla2x00_gid_pt() - SNS scan for fabric devices via GID_PT command.
277 * @vha: HA context
278 * @list: switch info entries to populate
280 * NOTE: Non-Nx_Ports are not requested.
282 * Returns 0 on success.
285 qla2x00_gid_pt(scsi_qla_host_t *vha, sw_info_t *list)
287 int rval;
288 uint16_t i;
290 ms_iocb_entry_t *ms_pkt;
291 struct ct_sns_req *ct_req;
292 struct ct_sns_rsp *ct_rsp;
294 struct ct_sns_gid_pt_data *gid_data;
295 struct qla_hw_data *ha = vha->hw;
296 uint16_t gid_pt_rsp_size;
297 struct ct_arg arg;
299 if (IS_QLA2100(ha) || IS_QLA2200(ha))
300 return qla2x00_sns_gid_pt(vha, list);
302 gid_data = NULL;
303 gid_pt_rsp_size = qla2x00_gid_pt_rsp_size(vha);
305 arg.iocb = ha->ms_iocb;
306 arg.req_dma = ha->ct_sns_dma;
307 arg.rsp_dma = ha->ct_sns_dma;
308 arg.req_size = GID_PT_REQ_SIZE;
309 arg.rsp_size = gid_pt_rsp_size;
310 arg.nport_handle = NPH_SNS;
312 /* Issue GID_PT */
313 /* Prepare common MS IOCB */
314 ms_pkt = ha->isp_ops->prep_ms_iocb(vha, &arg);
316 /* Prepare CT request */
317 ct_req = qla2x00_prep_ct_req(ha->ct_sns, GID_PT_CMD, gid_pt_rsp_size);
318 ct_rsp = &ha->ct_sns->p.rsp;
320 /* Prepare CT arguments -- port_type */
321 ct_req->req.gid_pt.port_type = NS_NX_PORT_TYPE;
323 /* Execute MS IOCB */
324 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
325 sizeof(ms_iocb_entry_t));
326 if (rval != QLA_SUCCESS) {
327 /*EMPTY*/
328 ql_dbg(ql_dbg_disc, vha, 0x2055,
329 "GID_PT issue IOCB failed (%d).\n", rval);
330 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "GID_PT") !=
331 QLA_SUCCESS) {
332 rval = QLA_FUNCTION_FAILED;
333 } else {
334 /* Set port IDs in switch info list. */
335 for (i = 0; i < ha->max_fibre_devices; i++) {
336 gid_data = &ct_rsp->rsp.gid_pt.entries[i];
337 list[i].d_id = be_to_port_id(gid_data->port_id);
338 memset(list[i].fabric_port_name, 0, WWN_SIZE);
339 list[i].fp_speed = PORT_SPEED_UNKNOWN;
341 /* Last one exit. */
342 if (gid_data->control_byte & BIT_7) {
343 list[i].d_id.b.rsvd_1 = gid_data->control_byte;
344 break;
349 * If we've used all available slots, then the switch is
350 * reporting back more devices than we can handle with this
351 * single call. Return a failed status, and let GA_NXT handle
352 * the overload.
354 if (i == ha->max_fibre_devices)
355 rval = QLA_FUNCTION_FAILED;
358 return (rval);
362 * qla2x00_gpn_id() - SNS Get Port Name (GPN_ID) query.
363 * @vha: HA context
364 * @list: switch info entries to populate
366 * Returns 0 on success.
369 qla2x00_gpn_id(scsi_qla_host_t *vha, sw_info_t *list)
371 int rval = QLA_SUCCESS;
372 uint16_t i;
374 ms_iocb_entry_t *ms_pkt;
375 struct ct_sns_req *ct_req;
376 struct ct_sns_rsp *ct_rsp;
377 struct qla_hw_data *ha = vha->hw;
378 struct ct_arg arg;
380 if (IS_QLA2100(ha) || IS_QLA2200(ha))
381 return qla2x00_sns_gpn_id(vha, list);
383 arg.iocb = ha->ms_iocb;
384 arg.req_dma = ha->ct_sns_dma;
385 arg.rsp_dma = ha->ct_sns_dma;
386 arg.req_size = GPN_ID_REQ_SIZE;
387 arg.rsp_size = GPN_ID_RSP_SIZE;
388 arg.nport_handle = NPH_SNS;
390 for (i = 0; i < ha->max_fibre_devices; i++) {
391 /* Issue GPN_ID */
392 /* Prepare common MS IOCB */
393 ms_pkt = ha->isp_ops->prep_ms_iocb(vha, &arg);
395 /* Prepare CT request */
396 ct_req = qla2x00_prep_ct_req(ha->ct_sns, GPN_ID_CMD,
397 GPN_ID_RSP_SIZE);
398 ct_rsp = &ha->ct_sns->p.rsp;
400 /* Prepare CT arguments -- port_id */
401 ct_req->req.port_id.port_id = port_id_to_be_id(list[i].d_id);
403 /* Execute MS IOCB */
404 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
405 sizeof(ms_iocb_entry_t));
406 if (rval != QLA_SUCCESS) {
407 /*EMPTY*/
408 ql_dbg(ql_dbg_disc, vha, 0x2056,
409 "GPN_ID issue IOCB failed (%d).\n", rval);
410 break;
411 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp,
412 "GPN_ID") != QLA_SUCCESS) {
413 rval = QLA_FUNCTION_FAILED;
414 break;
415 } else {
416 /* Save portname */
417 memcpy(list[i].port_name,
418 ct_rsp->rsp.gpn_id.port_name, WWN_SIZE);
421 /* Last device exit. */
422 if (list[i].d_id.b.rsvd_1 != 0)
423 break;
426 return (rval);
430 * qla2x00_gnn_id() - SNS Get Node Name (GNN_ID) query.
431 * @vha: HA context
432 * @list: switch info entries to populate
434 * Returns 0 on success.
437 qla2x00_gnn_id(scsi_qla_host_t *vha, sw_info_t *list)
439 int rval = QLA_SUCCESS;
440 uint16_t i;
441 struct qla_hw_data *ha = vha->hw;
442 ms_iocb_entry_t *ms_pkt;
443 struct ct_sns_req *ct_req;
444 struct ct_sns_rsp *ct_rsp;
445 struct ct_arg arg;
447 if (IS_QLA2100(ha) || IS_QLA2200(ha))
448 return qla2x00_sns_gnn_id(vha, list);
450 arg.iocb = ha->ms_iocb;
451 arg.req_dma = ha->ct_sns_dma;
452 arg.rsp_dma = ha->ct_sns_dma;
453 arg.req_size = GNN_ID_REQ_SIZE;
454 arg.rsp_size = GNN_ID_RSP_SIZE;
455 arg.nport_handle = NPH_SNS;
457 for (i = 0; i < ha->max_fibre_devices; i++) {
458 /* Issue GNN_ID */
459 /* Prepare common MS IOCB */
460 ms_pkt = ha->isp_ops->prep_ms_iocb(vha, &arg);
462 /* Prepare CT request */
463 ct_req = qla2x00_prep_ct_req(ha->ct_sns, GNN_ID_CMD,
464 GNN_ID_RSP_SIZE);
465 ct_rsp = &ha->ct_sns->p.rsp;
467 /* Prepare CT arguments -- port_id */
468 ct_req->req.port_id.port_id = port_id_to_be_id(list[i].d_id);
470 /* Execute MS IOCB */
471 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
472 sizeof(ms_iocb_entry_t));
473 if (rval != QLA_SUCCESS) {
474 /*EMPTY*/
475 ql_dbg(ql_dbg_disc, vha, 0x2057,
476 "GNN_ID issue IOCB failed (%d).\n", rval);
477 break;
478 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp,
479 "GNN_ID") != QLA_SUCCESS) {
480 rval = QLA_FUNCTION_FAILED;
481 break;
482 } else {
483 /* Save nodename */
484 memcpy(list[i].node_name,
485 ct_rsp->rsp.gnn_id.node_name, WWN_SIZE);
487 ql_dbg(ql_dbg_disc, vha, 0x2058,
488 "GID_PT entry - nn %8phN pn %8phN "
489 "portid=%02x%02x%02x.\n",
490 list[i].node_name, list[i].port_name,
491 list[i].d_id.b.domain, list[i].d_id.b.area,
492 list[i].d_id.b.al_pa);
495 /* Last device exit. */
496 if (list[i].d_id.b.rsvd_1 != 0)
497 break;
500 return (rval);
503 static void qla2x00_async_sns_sp_done(srb_t *sp, int rc)
505 struct scsi_qla_host *vha = sp->vha;
506 struct ct_sns_pkt *ct_sns;
507 struct qla_work_evt *e;
509 sp->rc = rc;
510 if (rc == QLA_SUCCESS) {
511 ql_dbg(ql_dbg_disc, vha, 0x204f,
512 "Async done-%s exiting normally.\n",
513 sp->name);
514 } else if (rc == QLA_FUNCTION_TIMEOUT) {
515 ql_dbg(ql_dbg_disc, vha, 0x204f,
516 "Async done-%s timeout\n", sp->name);
517 } else {
518 ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.rsp;
519 memset(ct_sns, 0, sizeof(*ct_sns));
520 sp->retry_count++;
521 if (sp->retry_count > 3)
522 goto err;
524 ql_dbg(ql_dbg_disc, vha, 0x204f,
525 "Async done-%s fail rc %x. Retry count %d\n",
526 sp->name, rc, sp->retry_count);
528 e = qla2x00_alloc_work(vha, QLA_EVT_SP_RETRY);
529 if (!e)
530 goto err2;
532 e->u.iosb.sp = sp;
533 qla2x00_post_work(vha, e);
534 return;
537 err:
538 e = qla2x00_alloc_work(vha, QLA_EVT_UNMAP);
539 err2:
540 if (!e) {
541 /* please ignore kernel warning. otherwise, we have mem leak. */
542 if (sp->u.iocb_cmd.u.ctarg.req) {
543 dma_free_coherent(&vha->hw->pdev->dev,
544 sp->u.iocb_cmd.u.ctarg.req_allocated_size,
545 sp->u.iocb_cmd.u.ctarg.req,
546 sp->u.iocb_cmd.u.ctarg.req_dma);
547 sp->u.iocb_cmd.u.ctarg.req = NULL;
550 if (sp->u.iocb_cmd.u.ctarg.rsp) {
551 dma_free_coherent(&vha->hw->pdev->dev,
552 sp->u.iocb_cmd.u.ctarg.rsp_allocated_size,
553 sp->u.iocb_cmd.u.ctarg.rsp,
554 sp->u.iocb_cmd.u.ctarg.rsp_dma);
555 sp->u.iocb_cmd.u.ctarg.rsp = NULL;
558 /* ref: INIT */
559 kref_put(&sp->cmd_kref, qla2x00_sp_release);
560 return;
563 e->u.iosb.sp = sp;
564 qla2x00_post_work(vha, e);
568 * qla2x00_rft_id() - SNS Register FC-4 TYPEs (RFT_ID) supported by the HBA.
569 * @vha: HA context
571 * Returns 0 on success.
574 qla2x00_rft_id(scsi_qla_host_t *vha)
576 struct qla_hw_data *ha = vha->hw;
578 if (IS_QLA2100(ha) || IS_QLA2200(ha))
579 return qla2x00_sns_rft_id(vha);
581 return qla_async_rftid(vha, &vha->d_id);
584 static int qla_async_rftid(scsi_qla_host_t *vha, port_id_t *d_id)
586 int rval = QLA_MEMORY_ALLOC_FAILED;
587 struct ct_sns_req *ct_req;
588 srb_t *sp;
589 struct ct_sns_pkt *ct_sns;
591 if (!vha->flags.online)
592 goto done;
594 /* ref: INIT */
595 sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL);
596 if (!sp)
597 goto done;
599 sp->type = SRB_CT_PTHRU_CMD;
600 sp->name = "rft_id";
601 qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2,
602 qla2x00_async_sns_sp_done);
604 sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev,
605 sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma,
606 GFP_KERNEL);
607 sp->u.iocb_cmd.u.ctarg.req_allocated_size = sizeof(struct ct_sns_pkt);
608 if (!sp->u.iocb_cmd.u.ctarg.req) {
609 ql_log(ql_log_warn, vha, 0xd041,
610 "%s: Failed to allocate ct_sns request.\n",
611 __func__);
612 goto done_free_sp;
615 sp->u.iocb_cmd.u.ctarg.rsp = dma_alloc_coherent(&vha->hw->pdev->dev,
616 sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.rsp_dma,
617 GFP_KERNEL);
618 sp->u.iocb_cmd.u.ctarg.rsp_allocated_size = sizeof(struct ct_sns_pkt);
619 if (!sp->u.iocb_cmd.u.ctarg.rsp) {
620 ql_log(ql_log_warn, vha, 0xd042,
621 "%s: Failed to allocate ct_sns request.\n",
622 __func__);
623 goto done_free_sp;
625 ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.rsp;
626 memset(ct_sns, 0, sizeof(*ct_sns));
627 ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.req;
629 /* Prepare CT request */
630 ct_req = qla2x00_prep_ct_req(ct_sns, RFT_ID_CMD, RFT_ID_RSP_SIZE);
632 /* Prepare CT arguments -- port_id, FC-4 types */
633 ct_req->req.rft_id.port_id = port_id_to_be_id(vha->d_id);
634 ct_req->req.rft_id.fc4_types[2] = 0x01; /* FCP-3 */
636 if (vha->flags.nvme_enabled && qla_ini_mode_enabled(vha))
637 ct_req->req.rft_id.fc4_types[6] = 1; /* NVMe type 28h */
639 sp->u.iocb_cmd.u.ctarg.req_size = RFT_ID_REQ_SIZE;
640 sp->u.iocb_cmd.u.ctarg.rsp_size = RFT_ID_RSP_SIZE;
641 sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
643 ql_dbg(ql_dbg_disc, vha, 0xffff,
644 "Async-%s - hdl=%x portid %06x.\n",
645 sp->name, sp->handle, d_id->b24);
647 rval = qla2x00_start_sp(sp);
648 if (rval != QLA_SUCCESS) {
649 ql_dbg(ql_dbg_disc, vha, 0x2043,
650 "RFT_ID issue IOCB failed (%d).\n", rval);
651 goto done_free_sp;
653 return rval;
654 done_free_sp:
655 /* ref: INIT */
656 kref_put(&sp->cmd_kref, qla2x00_sp_release);
657 done:
658 return rval;
662 * qla2x00_rff_id() - SNS Register FC-4 Features (RFF_ID) supported by the HBA.
663 * @vha: HA context
664 * @type: not used
666 * Returns 0 on success.
669 qla2x00_rff_id(scsi_qla_host_t *vha, u8 type)
671 struct qla_hw_data *ha = vha->hw;
673 if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
674 ql_dbg(ql_dbg_disc, vha, 0x2046,
675 "RFF_ID call not supported on ISP2100/ISP2200.\n");
676 return (QLA_SUCCESS);
679 return qla_async_rffid(vha, &vha->d_id, qlt_rff_id(vha), type);
682 static int qla_async_rffid(scsi_qla_host_t *vha, port_id_t *d_id,
683 u8 fc4feature, u8 fc4type)
685 int rval = QLA_MEMORY_ALLOC_FAILED;
686 struct ct_sns_req *ct_req;
687 srb_t *sp;
688 struct ct_sns_pkt *ct_sns;
690 /* ref: INIT */
691 sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL);
692 if (!sp)
693 goto done;
695 sp->type = SRB_CT_PTHRU_CMD;
696 sp->name = "rff_id";
697 qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2,
698 qla2x00_async_sns_sp_done);
700 sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev,
701 sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma,
702 GFP_KERNEL);
703 sp->u.iocb_cmd.u.ctarg.req_allocated_size = sizeof(struct ct_sns_pkt);
704 if (!sp->u.iocb_cmd.u.ctarg.req) {
705 ql_log(ql_log_warn, vha, 0xd041,
706 "%s: Failed to allocate ct_sns request.\n",
707 __func__);
708 goto done_free_sp;
711 sp->u.iocb_cmd.u.ctarg.rsp = dma_alloc_coherent(&vha->hw->pdev->dev,
712 sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.rsp_dma,
713 GFP_KERNEL);
714 sp->u.iocb_cmd.u.ctarg.rsp_allocated_size = sizeof(struct ct_sns_pkt);
715 if (!sp->u.iocb_cmd.u.ctarg.rsp) {
716 ql_log(ql_log_warn, vha, 0xd042,
717 "%s: Failed to allocate ct_sns request.\n",
718 __func__);
719 goto done_free_sp;
721 ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.rsp;
722 memset(ct_sns, 0, sizeof(*ct_sns));
723 ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.req;
725 /* Prepare CT request */
726 ct_req = qla2x00_prep_ct_req(ct_sns, RFF_ID_CMD, RFF_ID_RSP_SIZE);
728 /* Prepare CT arguments -- port_id, FC-4 feature, FC-4 type */
729 ct_req->req.rff_id.port_id = port_id_to_be_id(*d_id);
730 ct_req->req.rff_id.fc4_feature = fc4feature;
731 ct_req->req.rff_id.fc4_type = fc4type; /* SCSI-FCP or FC-NVMe */
733 sp->u.iocb_cmd.u.ctarg.req_size = RFF_ID_REQ_SIZE;
734 sp->u.iocb_cmd.u.ctarg.rsp_size = RFF_ID_RSP_SIZE;
735 sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
737 ql_dbg(ql_dbg_disc, vha, 0xffff,
738 "Async-%s - hdl=%x portid %06x feature %x type %x.\n",
739 sp->name, sp->handle, d_id->b24, fc4feature, fc4type);
741 rval = qla2x00_start_sp(sp);
742 if (rval != QLA_SUCCESS) {
743 ql_dbg(ql_dbg_disc, vha, 0x2047,
744 "RFF_ID issue IOCB failed (%d).\n", rval);
745 goto done_free_sp;
748 return rval;
750 done_free_sp:
751 /* ref: INIT */
752 kref_put(&sp->cmd_kref, qla2x00_sp_release);
753 done:
754 return rval;
758 * qla2x00_rnn_id() - SNS Register Node Name (RNN_ID) of the HBA.
759 * @vha: HA context
761 * Returns 0 on success.
764 qla2x00_rnn_id(scsi_qla_host_t *vha)
766 struct qla_hw_data *ha = vha->hw;
768 if (IS_QLA2100(ha) || IS_QLA2200(ha))
769 return qla2x00_sns_rnn_id(vha);
771 return qla_async_rnnid(vha, &vha->d_id, vha->node_name);
774 static int qla_async_rnnid(scsi_qla_host_t *vha, port_id_t *d_id,
775 u8 *node_name)
777 int rval = QLA_MEMORY_ALLOC_FAILED;
778 struct ct_sns_req *ct_req;
779 srb_t *sp;
780 struct ct_sns_pkt *ct_sns;
782 /* ref: INIT */
783 sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL);
784 if (!sp)
785 goto done;
787 sp->type = SRB_CT_PTHRU_CMD;
788 sp->name = "rnid";
789 qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2,
790 qla2x00_async_sns_sp_done);
792 sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev,
793 sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma,
794 GFP_KERNEL);
795 sp->u.iocb_cmd.u.ctarg.req_allocated_size = sizeof(struct ct_sns_pkt);
796 if (!sp->u.iocb_cmd.u.ctarg.req) {
797 ql_log(ql_log_warn, vha, 0xd041,
798 "%s: Failed to allocate ct_sns request.\n",
799 __func__);
800 goto done_free_sp;
803 sp->u.iocb_cmd.u.ctarg.rsp = dma_alloc_coherent(&vha->hw->pdev->dev,
804 sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.rsp_dma,
805 GFP_KERNEL);
806 sp->u.iocb_cmd.u.ctarg.rsp_allocated_size = sizeof(struct ct_sns_pkt);
807 if (!sp->u.iocb_cmd.u.ctarg.rsp) {
808 ql_log(ql_log_warn, vha, 0xd042,
809 "%s: Failed to allocate ct_sns request.\n",
810 __func__);
811 goto done_free_sp;
813 ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.rsp;
814 memset(ct_sns, 0, sizeof(*ct_sns));
815 ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.req;
817 /* Prepare CT request */
818 ct_req = qla2x00_prep_ct_req(ct_sns, RNN_ID_CMD, RNN_ID_RSP_SIZE);
820 /* Prepare CT arguments -- port_id, node_name */
821 ct_req->req.rnn_id.port_id = port_id_to_be_id(vha->d_id);
822 memcpy(ct_req->req.rnn_id.node_name, vha->node_name, WWN_SIZE);
824 sp->u.iocb_cmd.u.ctarg.req_size = RNN_ID_REQ_SIZE;
825 sp->u.iocb_cmd.u.ctarg.rsp_size = RNN_ID_RSP_SIZE;
826 sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
828 ql_dbg(ql_dbg_disc, vha, 0xffff,
829 "Async-%s - hdl=%x portid %06x\n",
830 sp->name, sp->handle, d_id->b24);
832 rval = qla2x00_start_sp(sp);
833 if (rval != QLA_SUCCESS) {
834 ql_dbg(ql_dbg_disc, vha, 0x204d,
835 "RNN_ID issue IOCB failed (%d).\n", rval);
836 goto done_free_sp;
839 return rval;
841 done_free_sp:
842 /* ref: INIT */
843 kref_put(&sp->cmd_kref, qla2x00_sp_release);
844 done:
845 return rval;
848 size_t
849 qla2x00_get_sym_node_name(scsi_qla_host_t *vha, uint8_t *snn, size_t size)
851 struct qla_hw_data *ha = vha->hw;
853 if (IS_QLAFX00(ha))
854 return scnprintf(snn, size, "%s FW:v%s DVR:v%s",
855 ha->model_number, ha->mr.fw_version, qla2x00_version_str);
857 return scnprintf(snn, size, "%s FW:v%d.%02d.%02d DVR:v%s",
858 ha->model_number, ha->fw_major_version, ha->fw_minor_version,
859 ha->fw_subminor_version, qla2x00_version_str);
863 * qla2x00_rsnn_nn() - SNS Register Symbolic Node Name (RSNN_NN) of the HBA.
864 * @vha: HA context
866 * Returns 0 on success.
869 qla2x00_rsnn_nn(scsi_qla_host_t *vha)
871 struct qla_hw_data *ha = vha->hw;
873 if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
874 ql_dbg(ql_dbg_disc, vha, 0x2050,
875 "RSNN_ID call unsupported on ISP2100/ISP2200.\n");
876 return (QLA_SUCCESS);
879 return qla_async_rsnn_nn(vha);
882 static int qla_async_rsnn_nn(scsi_qla_host_t *vha)
884 int rval = QLA_MEMORY_ALLOC_FAILED;
885 struct ct_sns_req *ct_req;
886 srb_t *sp;
887 struct ct_sns_pkt *ct_sns;
889 /* ref: INIT */
890 sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL);
891 if (!sp)
892 goto done;
894 sp->type = SRB_CT_PTHRU_CMD;
895 sp->name = "rsnn_nn";
896 qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2,
897 qla2x00_async_sns_sp_done);
899 sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev,
900 sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma,
901 GFP_KERNEL);
902 sp->u.iocb_cmd.u.ctarg.req_allocated_size = sizeof(struct ct_sns_pkt);
903 if (!sp->u.iocb_cmd.u.ctarg.req) {
904 ql_log(ql_log_warn, vha, 0xd041,
905 "%s: Failed to allocate ct_sns request.\n",
906 __func__);
907 goto done_free_sp;
910 sp->u.iocb_cmd.u.ctarg.rsp = dma_alloc_coherent(&vha->hw->pdev->dev,
911 sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.rsp_dma,
912 GFP_KERNEL);
913 sp->u.iocb_cmd.u.ctarg.rsp_allocated_size = sizeof(struct ct_sns_pkt);
914 if (!sp->u.iocb_cmd.u.ctarg.rsp) {
915 ql_log(ql_log_warn, vha, 0xd042,
916 "%s: Failed to allocate ct_sns request.\n",
917 __func__);
918 goto done_free_sp;
920 ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.rsp;
921 memset(ct_sns, 0, sizeof(*ct_sns));
922 ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.req;
924 /* Prepare CT request */
925 ct_req = qla2x00_prep_ct_req(ct_sns, RSNN_NN_CMD, RSNN_NN_RSP_SIZE);
927 /* Prepare CT arguments -- node_name, symbolic node_name, size */
928 memcpy(ct_req->req.rsnn_nn.node_name, vha->node_name, WWN_SIZE);
930 /* Prepare the Symbolic Node Name */
931 qla2x00_get_sym_node_name(vha, ct_req->req.rsnn_nn.sym_node_name,
932 sizeof(ct_req->req.rsnn_nn.sym_node_name));
933 ct_req->req.rsnn_nn.name_len =
934 (uint8_t)strlen(ct_req->req.rsnn_nn.sym_node_name);
937 sp->u.iocb_cmd.u.ctarg.req_size = 24 + 1 + ct_req->req.rsnn_nn.name_len;
938 sp->u.iocb_cmd.u.ctarg.rsp_size = RSNN_NN_RSP_SIZE;
939 sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
941 ql_dbg(ql_dbg_disc, vha, 0xffff,
942 "Async-%s - hdl=%x.\n",
943 sp->name, sp->handle);
945 rval = qla2x00_start_sp(sp);
946 if (rval != QLA_SUCCESS) {
947 ql_dbg(ql_dbg_disc, vha, 0x2043,
948 "RFT_ID issue IOCB failed (%d).\n", rval);
949 goto done_free_sp;
952 return rval;
954 done_free_sp:
955 /* ref: INIT */
956 kref_put(&sp->cmd_kref, qla2x00_sp_release);
957 done:
958 return rval;
962 * qla2x00_prep_sns_cmd() - Prepare common SNS command request fields for query.
963 * @vha: HA context
964 * @cmd: GS command
965 * @scmd_len: Subcommand length
966 * @data_size: response size in bytes
968 * Returns a pointer to the @ha's sns_cmd.
970 static inline struct sns_cmd_pkt *
971 qla2x00_prep_sns_cmd(scsi_qla_host_t *vha, uint16_t cmd, uint16_t scmd_len,
972 uint16_t data_size)
974 uint16_t wc;
975 struct sns_cmd_pkt *sns_cmd;
976 struct qla_hw_data *ha = vha->hw;
978 sns_cmd = ha->sns_cmd;
979 memset(sns_cmd, 0, sizeof(struct sns_cmd_pkt));
980 wc = data_size / 2; /* Size in 16bit words. */
981 sns_cmd->p.cmd.buffer_length = cpu_to_le16(wc);
982 put_unaligned_le64(ha->sns_cmd_dma, &sns_cmd->p.cmd.buffer_address);
983 sns_cmd->p.cmd.subcommand_length = cpu_to_le16(scmd_len);
984 sns_cmd->p.cmd.subcommand = cpu_to_le16(cmd);
985 wc = (data_size - 16) / 4; /* Size in 32bit words. */
986 sns_cmd->p.cmd.size = cpu_to_le16(wc);
988 vha->qla_stats.control_requests++;
990 return (sns_cmd);
994 * qla2x00_sns_ga_nxt() - SNS scan for fabric devices via GA_NXT command.
995 * @vha: HA context
996 * @fcport: fcport entry to updated
998 * This command uses the old Exectute SNS Command mailbox routine.
1000 * Returns 0 on success.
1002 static int
1003 qla2x00_sns_ga_nxt(scsi_qla_host_t *vha, fc_port_t *fcport)
1005 int rval = QLA_SUCCESS;
1006 struct qla_hw_data *ha = vha->hw;
1007 struct sns_cmd_pkt *sns_cmd;
1009 /* Issue GA_NXT. */
1010 /* Prepare SNS command request. */
1011 sns_cmd = qla2x00_prep_sns_cmd(vha, GA_NXT_CMD, GA_NXT_SNS_SCMD_LEN,
1012 GA_NXT_SNS_DATA_SIZE);
1014 /* Prepare SNS command arguments -- port_id. */
1015 sns_cmd->p.cmd.param[0] = fcport->d_id.b.al_pa;
1016 sns_cmd->p.cmd.param[1] = fcport->d_id.b.area;
1017 sns_cmd->p.cmd.param[2] = fcport->d_id.b.domain;
1019 /* Execute SNS command. */
1020 rval = qla2x00_send_sns(vha, ha->sns_cmd_dma, GA_NXT_SNS_CMD_SIZE / 2,
1021 sizeof(struct sns_cmd_pkt));
1022 if (rval != QLA_SUCCESS) {
1023 /*EMPTY*/
1024 ql_dbg(ql_dbg_disc, vha, 0x205f,
1025 "GA_NXT Send SNS failed (%d).\n", rval);
1026 } else if (sns_cmd->p.gan_data[8] != 0x80 ||
1027 sns_cmd->p.gan_data[9] != 0x02) {
1028 ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x2084,
1029 "GA_NXT failed, rejected request ga_nxt_rsp:\n");
1030 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2074,
1031 sns_cmd->p.gan_data, 16);
1032 rval = QLA_FUNCTION_FAILED;
1033 } else {
1034 /* Populate fc_port_t entry. */
1035 fcport->d_id.b.domain = sns_cmd->p.gan_data[17];
1036 fcport->d_id.b.area = sns_cmd->p.gan_data[18];
1037 fcport->d_id.b.al_pa = sns_cmd->p.gan_data[19];
1039 memcpy(fcport->node_name, &sns_cmd->p.gan_data[284], WWN_SIZE);
1040 memcpy(fcport->port_name, &sns_cmd->p.gan_data[20], WWN_SIZE);
1042 if (sns_cmd->p.gan_data[16] != NS_N_PORT_TYPE &&
1043 sns_cmd->p.gan_data[16] != NS_NL_PORT_TYPE)
1044 fcport->d_id.b.domain = 0xf0;
1046 ql_dbg(ql_dbg_disc, vha, 0x2061,
1047 "GA_NXT entry - nn %8phN pn %8phN "
1048 "port_id=%02x%02x%02x.\n",
1049 fcport->node_name, fcport->port_name,
1050 fcport->d_id.b.domain, fcport->d_id.b.area,
1051 fcport->d_id.b.al_pa);
1054 return (rval);
1058 * qla2x00_sns_gid_pt() - SNS scan for fabric devices via GID_PT command.
1059 * @vha: HA context
1060 * @list: switch info entries to populate
1062 * This command uses the old Exectute SNS Command mailbox routine.
1064 * NOTE: Non-Nx_Ports are not requested.
1066 * Returns 0 on success.
1068 static int
1069 qla2x00_sns_gid_pt(scsi_qla_host_t *vha, sw_info_t *list)
1071 int rval;
1072 struct qla_hw_data *ha = vha->hw;
1073 uint16_t i;
1074 uint8_t *entry;
1075 struct sns_cmd_pkt *sns_cmd;
1076 uint16_t gid_pt_sns_data_size;
1078 gid_pt_sns_data_size = qla2x00_gid_pt_rsp_size(vha);
1080 /* Issue GID_PT. */
1081 /* Prepare SNS command request. */
1082 sns_cmd = qla2x00_prep_sns_cmd(vha, GID_PT_CMD, GID_PT_SNS_SCMD_LEN,
1083 gid_pt_sns_data_size);
1085 /* Prepare SNS command arguments -- port_type. */
1086 sns_cmd->p.cmd.param[0] = NS_NX_PORT_TYPE;
1088 /* Execute SNS command. */
1089 rval = qla2x00_send_sns(vha, ha->sns_cmd_dma, GID_PT_SNS_CMD_SIZE / 2,
1090 sizeof(struct sns_cmd_pkt));
1091 if (rval != QLA_SUCCESS) {
1092 /*EMPTY*/
1093 ql_dbg(ql_dbg_disc, vha, 0x206d,
1094 "GID_PT Send SNS failed (%d).\n", rval);
1095 } else if (sns_cmd->p.gid_data[8] != 0x80 ||
1096 sns_cmd->p.gid_data[9] != 0x02) {
1097 ql_dbg(ql_dbg_disc, vha, 0x202f,
1098 "GID_PT failed, rejected request, gid_rsp:\n");
1099 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2081,
1100 sns_cmd->p.gid_data, 16);
1101 rval = QLA_FUNCTION_FAILED;
1102 } else {
1103 /* Set port IDs in switch info list. */
1104 for (i = 0; i < ha->max_fibre_devices; i++) {
1105 entry = &sns_cmd->p.gid_data[(i * 4) + 16];
1106 list[i].d_id.b.domain = entry[1];
1107 list[i].d_id.b.area = entry[2];
1108 list[i].d_id.b.al_pa = entry[3];
1110 /* Last one exit. */
1111 if (entry[0] & BIT_7) {
1112 list[i].d_id.b.rsvd_1 = entry[0];
1113 break;
1118 * If we've used all available slots, then the switch is
1119 * reporting back more devices that we can handle with this
1120 * single call. Return a failed status, and let GA_NXT handle
1121 * the overload.
1123 if (i == ha->max_fibre_devices)
1124 rval = QLA_FUNCTION_FAILED;
1127 return (rval);
1131 * qla2x00_sns_gpn_id() - SNS Get Port Name (GPN_ID) query.
1132 * @vha: HA context
1133 * @list: switch info entries to populate
1135 * This command uses the old Exectute SNS Command mailbox routine.
1137 * Returns 0 on success.
1139 static int
1140 qla2x00_sns_gpn_id(scsi_qla_host_t *vha, sw_info_t *list)
1142 int rval = QLA_SUCCESS;
1143 struct qla_hw_data *ha = vha->hw;
1144 uint16_t i;
1145 struct sns_cmd_pkt *sns_cmd;
1147 for (i = 0; i < ha->max_fibre_devices; i++) {
1148 /* Issue GPN_ID */
1149 /* Prepare SNS command request. */
1150 sns_cmd = qla2x00_prep_sns_cmd(vha, GPN_ID_CMD,
1151 GPN_ID_SNS_SCMD_LEN, GPN_ID_SNS_DATA_SIZE);
1153 /* Prepare SNS command arguments -- port_id. */
1154 sns_cmd->p.cmd.param[0] = list[i].d_id.b.al_pa;
1155 sns_cmd->p.cmd.param[1] = list[i].d_id.b.area;
1156 sns_cmd->p.cmd.param[2] = list[i].d_id.b.domain;
1158 /* Execute SNS command. */
1159 rval = qla2x00_send_sns(vha, ha->sns_cmd_dma,
1160 GPN_ID_SNS_CMD_SIZE / 2, sizeof(struct sns_cmd_pkt));
1161 if (rval != QLA_SUCCESS) {
1162 /*EMPTY*/
1163 ql_dbg(ql_dbg_disc, vha, 0x2032,
1164 "GPN_ID Send SNS failed (%d).\n", rval);
1165 } else if (sns_cmd->p.gpn_data[8] != 0x80 ||
1166 sns_cmd->p.gpn_data[9] != 0x02) {
1167 ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x207e,
1168 "GPN_ID failed, rejected request, gpn_rsp:\n");
1169 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x207f,
1170 sns_cmd->p.gpn_data, 16);
1171 rval = QLA_FUNCTION_FAILED;
1172 } else {
1173 /* Save portname */
1174 memcpy(list[i].port_name, &sns_cmd->p.gpn_data[16],
1175 WWN_SIZE);
1178 /* Last device exit. */
1179 if (list[i].d_id.b.rsvd_1 != 0)
1180 break;
1183 return (rval);
1187 * qla2x00_sns_gnn_id() - SNS Get Node Name (GNN_ID) query.
1188 * @vha: HA context
1189 * @list: switch info entries to populate
1191 * This command uses the old Exectute SNS Command mailbox routine.
1193 * Returns 0 on success.
1195 static int
1196 qla2x00_sns_gnn_id(scsi_qla_host_t *vha, sw_info_t *list)
1198 int rval = QLA_SUCCESS;
1199 struct qla_hw_data *ha = vha->hw;
1200 uint16_t i;
1201 struct sns_cmd_pkt *sns_cmd;
1203 for (i = 0; i < ha->max_fibre_devices; i++) {
1204 /* Issue GNN_ID */
1205 /* Prepare SNS command request. */
1206 sns_cmd = qla2x00_prep_sns_cmd(vha, GNN_ID_CMD,
1207 GNN_ID_SNS_SCMD_LEN, GNN_ID_SNS_DATA_SIZE);
1209 /* Prepare SNS command arguments -- port_id. */
1210 sns_cmd->p.cmd.param[0] = list[i].d_id.b.al_pa;
1211 sns_cmd->p.cmd.param[1] = list[i].d_id.b.area;
1212 sns_cmd->p.cmd.param[2] = list[i].d_id.b.domain;
1214 /* Execute SNS command. */
1215 rval = qla2x00_send_sns(vha, ha->sns_cmd_dma,
1216 GNN_ID_SNS_CMD_SIZE / 2, sizeof(struct sns_cmd_pkt));
1217 if (rval != QLA_SUCCESS) {
1218 /*EMPTY*/
1219 ql_dbg(ql_dbg_disc, vha, 0x203f,
1220 "GNN_ID Send SNS failed (%d).\n", rval);
1221 } else if (sns_cmd->p.gnn_data[8] != 0x80 ||
1222 sns_cmd->p.gnn_data[9] != 0x02) {
1223 ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x2082,
1224 "GNN_ID failed, rejected request, gnn_rsp:\n");
1225 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x207a,
1226 sns_cmd->p.gnn_data, 16);
1227 rval = QLA_FUNCTION_FAILED;
1228 } else {
1229 /* Save nodename */
1230 memcpy(list[i].node_name, &sns_cmd->p.gnn_data[16],
1231 WWN_SIZE);
1233 ql_dbg(ql_dbg_disc, vha, 0x206e,
1234 "GID_PT entry - nn %8phN pn %8phN "
1235 "port_id=%02x%02x%02x.\n",
1236 list[i].node_name, list[i].port_name,
1237 list[i].d_id.b.domain, list[i].d_id.b.area,
1238 list[i].d_id.b.al_pa);
1241 /* Last device exit. */
1242 if (list[i].d_id.b.rsvd_1 != 0)
1243 break;
1246 return (rval);
1250 * qla2x00_sns_rft_id() - SNS Register FC-4 TYPEs (RFT_ID) supported by the HBA.
1251 * @vha: HA context
1253 * This command uses the old Exectute SNS Command mailbox routine.
1255 * Returns 0 on success.
1257 static int
1258 qla2x00_sns_rft_id(scsi_qla_host_t *vha)
1260 int rval;
1261 struct qla_hw_data *ha = vha->hw;
1262 struct sns_cmd_pkt *sns_cmd;
1264 /* Issue RFT_ID. */
1265 /* Prepare SNS command request. */
1266 sns_cmd = qla2x00_prep_sns_cmd(vha, RFT_ID_CMD, RFT_ID_SNS_SCMD_LEN,
1267 RFT_ID_SNS_DATA_SIZE);
1269 /* Prepare SNS command arguments -- port_id, FC-4 types */
1270 sns_cmd->p.cmd.param[0] = vha->d_id.b.al_pa;
1271 sns_cmd->p.cmd.param[1] = vha->d_id.b.area;
1272 sns_cmd->p.cmd.param[2] = vha->d_id.b.domain;
1274 sns_cmd->p.cmd.param[5] = 0x01; /* FCP-3 */
1276 /* Execute SNS command. */
1277 rval = qla2x00_send_sns(vha, ha->sns_cmd_dma, RFT_ID_SNS_CMD_SIZE / 2,
1278 sizeof(struct sns_cmd_pkt));
1279 if (rval != QLA_SUCCESS) {
1280 /*EMPTY*/
1281 ql_dbg(ql_dbg_disc, vha, 0x2060,
1282 "RFT_ID Send SNS failed (%d).\n", rval);
1283 } else if (sns_cmd->p.rft_data[8] != 0x80 ||
1284 sns_cmd->p.rft_data[9] != 0x02) {
1285 ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x2083,
1286 "RFT_ID failed, rejected request rft_rsp:\n");
1287 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2080,
1288 sns_cmd->p.rft_data, 16);
1289 rval = QLA_FUNCTION_FAILED;
1290 } else {
1291 ql_dbg(ql_dbg_disc, vha, 0x2073,
1292 "RFT_ID exiting normally.\n");
1295 return (rval);
1299 * qla2x00_sns_rnn_id() - SNS Register Node Name (RNN_ID) of the HBA.
1300 * @vha: HA context
1302 * This command uses the old Exectute SNS Command mailbox routine.
1304 * Returns 0 on success.
1306 static int
1307 qla2x00_sns_rnn_id(scsi_qla_host_t *vha)
1309 int rval;
1310 struct qla_hw_data *ha = vha->hw;
1311 struct sns_cmd_pkt *sns_cmd;
1313 /* Issue RNN_ID. */
1314 /* Prepare SNS command request. */
1315 sns_cmd = qla2x00_prep_sns_cmd(vha, RNN_ID_CMD, RNN_ID_SNS_SCMD_LEN,
1316 RNN_ID_SNS_DATA_SIZE);
1318 /* Prepare SNS command arguments -- port_id, nodename. */
1319 sns_cmd->p.cmd.param[0] = vha->d_id.b.al_pa;
1320 sns_cmd->p.cmd.param[1] = vha->d_id.b.area;
1321 sns_cmd->p.cmd.param[2] = vha->d_id.b.domain;
1323 sns_cmd->p.cmd.param[4] = vha->node_name[7];
1324 sns_cmd->p.cmd.param[5] = vha->node_name[6];
1325 sns_cmd->p.cmd.param[6] = vha->node_name[5];
1326 sns_cmd->p.cmd.param[7] = vha->node_name[4];
1327 sns_cmd->p.cmd.param[8] = vha->node_name[3];
1328 sns_cmd->p.cmd.param[9] = vha->node_name[2];
1329 sns_cmd->p.cmd.param[10] = vha->node_name[1];
1330 sns_cmd->p.cmd.param[11] = vha->node_name[0];
1332 /* Execute SNS command. */
1333 rval = qla2x00_send_sns(vha, ha->sns_cmd_dma, RNN_ID_SNS_CMD_SIZE / 2,
1334 sizeof(struct sns_cmd_pkt));
1335 if (rval != QLA_SUCCESS) {
1336 /*EMPTY*/
1337 ql_dbg(ql_dbg_disc, vha, 0x204a,
1338 "RNN_ID Send SNS failed (%d).\n", rval);
1339 } else if (sns_cmd->p.rnn_data[8] != 0x80 ||
1340 sns_cmd->p.rnn_data[9] != 0x02) {
1341 ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x207b,
1342 "RNN_ID failed, rejected request, rnn_rsp:\n");
1343 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x207c,
1344 sns_cmd->p.rnn_data, 16);
1345 rval = QLA_FUNCTION_FAILED;
1346 } else {
1347 ql_dbg(ql_dbg_disc, vha, 0x204c,
1348 "RNN_ID exiting normally.\n");
1351 return (rval);
1355 * qla2x00_mgmt_svr_login() - Login to fabric Management Service.
1356 * @vha: HA context
1358 * Returns 0 on success.
1361 qla2x00_mgmt_svr_login(scsi_qla_host_t *vha)
1363 int ret, rval;
1364 uint16_t mb[MAILBOX_REGISTER_COUNT];
1365 struct qla_hw_data *ha = vha->hw;
1367 ret = QLA_SUCCESS;
1368 if (vha->flags.management_server_logged_in)
1369 return ret;
1371 rval = ha->isp_ops->fabric_login(vha, vha->mgmt_svr_loop_id, 0xff, 0xff,
1372 0xfa, mb, BIT_1);
1373 if (rval != QLA_SUCCESS || mb[0] != MBS_COMMAND_COMPLETE) {
1374 if (rval == QLA_MEMORY_ALLOC_FAILED)
1375 ql_dbg(ql_dbg_disc, vha, 0x2085,
1376 "Failed management_server login: loopid=%x "
1377 "rval=%d\n", vha->mgmt_svr_loop_id, rval);
1378 else
1379 ql_dbg(ql_dbg_disc, vha, 0x2024,
1380 "Failed management_server login: loopid=%x "
1381 "mb[0]=%x mb[1]=%x mb[2]=%x mb[6]=%x mb[7]=%x.\n",
1382 vha->mgmt_svr_loop_id, mb[0], mb[1], mb[2], mb[6],
1383 mb[7]);
1384 ret = QLA_FUNCTION_FAILED;
1385 } else
1386 vha->flags.management_server_logged_in = 1;
1388 return ret;
1392 * qla2x00_prep_ms_fdmi_iocb() - Prepare common MS IOCB fields for FDMI query.
1393 * @vha: HA context
1394 * @req_size: request size in bytes
1395 * @rsp_size: response size in bytes
1397 * Returns a pointer to the @ha's ms_iocb.
1399 void *
1400 qla2x00_prep_ms_fdmi_iocb(scsi_qla_host_t *vha, uint32_t req_size,
1401 uint32_t rsp_size)
1403 ms_iocb_entry_t *ms_pkt;
1404 struct qla_hw_data *ha = vha->hw;
1406 ms_pkt = ha->ms_iocb;
1407 memset(ms_pkt, 0, sizeof(ms_iocb_entry_t));
1409 ms_pkt->entry_type = MS_IOCB_TYPE;
1410 ms_pkt->entry_count = 1;
1411 SET_TARGET_ID(ha, ms_pkt->loop_id, vha->mgmt_svr_loop_id);
1412 ms_pkt->control_flags = cpu_to_le16(CF_READ | CF_HEAD_TAG);
1413 ms_pkt->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
1414 ms_pkt->cmd_dsd_count = cpu_to_le16(1);
1415 ms_pkt->total_dsd_count = cpu_to_le16(2);
1416 ms_pkt->rsp_bytecount = cpu_to_le32(rsp_size);
1417 ms_pkt->req_bytecount = cpu_to_le32(req_size);
1419 put_unaligned_le64(ha->ct_sns_dma, &ms_pkt->req_dsd.address);
1420 ms_pkt->req_dsd.length = ms_pkt->req_bytecount;
1422 put_unaligned_le64(ha->ct_sns_dma, &ms_pkt->rsp_dsd.address);
1423 ms_pkt->rsp_dsd.length = ms_pkt->rsp_bytecount;
1425 return ms_pkt;
1429 * qla24xx_prep_ms_fdmi_iocb() - Prepare common MS IOCB fields for FDMI query.
1430 * @vha: HA context
1431 * @req_size: request size in bytes
1432 * @rsp_size: response size in bytes
1434 * Returns a pointer to the @ha's ms_iocb.
1436 void *
1437 qla24xx_prep_ms_fdmi_iocb(scsi_qla_host_t *vha, uint32_t req_size,
1438 uint32_t rsp_size)
1440 struct ct_entry_24xx *ct_pkt;
1441 struct qla_hw_data *ha = vha->hw;
1443 ct_pkt = (struct ct_entry_24xx *)ha->ms_iocb;
1444 memset(ct_pkt, 0, sizeof(struct ct_entry_24xx));
1446 ct_pkt->entry_type = CT_IOCB_TYPE;
1447 ct_pkt->entry_count = 1;
1448 ct_pkt->nport_handle = cpu_to_le16(vha->mgmt_svr_loop_id);
1449 ct_pkt->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
1450 ct_pkt->cmd_dsd_count = cpu_to_le16(1);
1451 ct_pkt->rsp_dsd_count = cpu_to_le16(1);
1452 ct_pkt->rsp_byte_count = cpu_to_le32(rsp_size);
1453 ct_pkt->cmd_byte_count = cpu_to_le32(req_size);
1455 put_unaligned_le64(ha->ct_sns_dma, &ct_pkt->dsd[0].address);
1456 ct_pkt->dsd[0].length = ct_pkt->cmd_byte_count;
1458 put_unaligned_le64(ha->ct_sns_dma, &ct_pkt->dsd[1].address);
1459 ct_pkt->dsd[1].length = ct_pkt->rsp_byte_count;
1460 ct_pkt->vp_index = vha->vp_idx;
1462 return ct_pkt;
1465 static void
1466 qla2x00_update_ms_fdmi_iocb(scsi_qla_host_t *vha, uint32_t req_size)
1468 struct qla_hw_data *ha = vha->hw;
1469 ms_iocb_entry_t *ms_pkt = ha->ms_iocb;
1470 struct ct_entry_24xx *ct_pkt = (struct ct_entry_24xx *)ha->ms_iocb;
1472 if (IS_FWI2_CAPABLE(ha)) {
1473 ct_pkt->cmd_byte_count = cpu_to_le32(req_size);
1474 ct_pkt->dsd[0].length = ct_pkt->cmd_byte_count;
1475 } else {
1476 ms_pkt->req_bytecount = cpu_to_le32(req_size);
1477 ms_pkt->req_dsd.length = ms_pkt->req_bytecount;
1482 * qla2x00_prep_ct_fdmi_req() - Prepare common CT request fields for SNS query.
1483 * @p: CT request buffer
1484 * @cmd: GS command
1485 * @rsp_size: response size in bytes
1487 * Returns a pointer to the intitialized @ct_req.
1489 static inline struct ct_sns_req *
1490 qla2x00_prep_ct_fdmi_req(struct ct_sns_pkt *p, uint16_t cmd,
1491 uint16_t rsp_size)
1493 memset(p, 0, sizeof(struct ct_sns_pkt));
1495 p->p.req.header.revision = 0x01;
1496 p->p.req.header.gs_type = 0xFA;
1497 p->p.req.header.gs_subtype = 0x10;
1498 p->p.req.command = cpu_to_be16(cmd);
1499 p->p.req.max_rsp_size = cpu_to_be16((rsp_size - 16) / 4);
1501 return &p->p.req;
1504 uint
1505 qla25xx_fdmi_port_speed_capability(struct qla_hw_data *ha)
1507 uint speeds = 0;
1509 if (IS_CNA_CAPABLE(ha))
1510 return FDMI_PORT_SPEED_10GB;
1511 if (IS_QLA28XX(ha) || IS_QLA27XX(ha)) {
1512 if (ha->max_supported_speed == 2) {
1513 if (ha->min_supported_speed <= 6)
1514 speeds |= FDMI_PORT_SPEED_64GB;
1516 if (ha->max_supported_speed == 2 ||
1517 ha->max_supported_speed == 1) {
1518 if (ha->min_supported_speed <= 5)
1519 speeds |= FDMI_PORT_SPEED_32GB;
1521 if (ha->max_supported_speed == 2 ||
1522 ha->max_supported_speed == 1 ||
1523 ha->max_supported_speed == 0) {
1524 if (ha->min_supported_speed <= 4)
1525 speeds |= FDMI_PORT_SPEED_16GB;
1527 if (ha->max_supported_speed == 1 ||
1528 ha->max_supported_speed == 0) {
1529 if (ha->min_supported_speed <= 3)
1530 speeds |= FDMI_PORT_SPEED_8GB;
1532 if (ha->max_supported_speed == 0) {
1533 if (ha->min_supported_speed <= 2)
1534 speeds |= FDMI_PORT_SPEED_4GB;
1536 return speeds;
1538 if (IS_QLA2031(ha)) {
1539 if ((ha->pdev->subsystem_vendor == 0x103C) &&
1540 ((ha->pdev->subsystem_device == 0x8002) ||
1541 (ha->pdev->subsystem_device == 0x8086))) {
1542 speeds = FDMI_PORT_SPEED_16GB;
1543 } else {
1544 speeds = FDMI_PORT_SPEED_16GB|FDMI_PORT_SPEED_8GB|
1545 FDMI_PORT_SPEED_4GB;
1547 return speeds;
1549 if (IS_QLA25XX(ha) || IS_QLAFX00(ha))
1550 return FDMI_PORT_SPEED_8GB|FDMI_PORT_SPEED_4GB|
1551 FDMI_PORT_SPEED_2GB|FDMI_PORT_SPEED_1GB;
1552 if (IS_QLA24XX_TYPE(ha))
1553 return FDMI_PORT_SPEED_4GB|FDMI_PORT_SPEED_2GB|
1554 FDMI_PORT_SPEED_1GB;
1555 if (IS_QLA23XX(ha))
1556 return FDMI_PORT_SPEED_2GB|FDMI_PORT_SPEED_1GB;
1557 return FDMI_PORT_SPEED_1GB;
1560 uint
1561 qla25xx_fdmi_port_speed_currently(struct qla_hw_data *ha)
1563 switch (ha->link_data_rate) {
1564 case PORT_SPEED_1GB:
1565 return FDMI_PORT_SPEED_1GB;
1566 case PORT_SPEED_2GB:
1567 return FDMI_PORT_SPEED_2GB;
1568 case PORT_SPEED_4GB:
1569 return FDMI_PORT_SPEED_4GB;
1570 case PORT_SPEED_8GB:
1571 return FDMI_PORT_SPEED_8GB;
1572 case PORT_SPEED_10GB:
1573 return FDMI_PORT_SPEED_10GB;
1574 case PORT_SPEED_16GB:
1575 return FDMI_PORT_SPEED_16GB;
1576 case PORT_SPEED_32GB:
1577 return FDMI_PORT_SPEED_32GB;
1578 case PORT_SPEED_64GB:
1579 return FDMI_PORT_SPEED_64GB;
1580 default:
1581 return FDMI_PORT_SPEED_UNKNOWN;
1586 * qla2x00_hba_attributes() - perform HBA attributes registration
1587 * @vha: HA context
1588 * @entries: number of entries to use
1589 * @callopt: Option to issue extended or standard FDMI
1590 * command parameter
1592 * Returns 0 on success.
1594 static unsigned long
1595 qla2x00_hba_attributes(scsi_qla_host_t *vha, void *entries,
1596 unsigned int callopt)
1598 struct qla_hw_data *ha = vha->hw;
1599 struct new_utsname *p_sysid = utsname();
1600 struct ct_fdmi_hba_attr *eiter;
1601 uint16_t alen;
1602 unsigned long size = 0;
1604 /* Nodename. */
1605 eiter = entries + size;
1606 eiter->type = cpu_to_be16(FDMI_HBA_NODE_NAME);
1607 memcpy(eiter->a.node_name, vha->node_name, sizeof(eiter->a.node_name));
1608 alen = sizeof(eiter->a.node_name);
1609 alen += FDMI_ATTR_TYPELEN(eiter);
1610 eiter->len = cpu_to_be16(alen);
1611 size += alen;
1612 ql_dbg(ql_dbg_disc, vha, 0x20a0,
1613 "NODENAME = %016llx.\n", wwn_to_u64(eiter->a.node_name));
1614 /* Manufacturer. */
1615 eiter = entries + size;
1616 eiter->type = cpu_to_be16(FDMI_HBA_MANUFACTURER);
1617 alen = scnprintf(
1618 eiter->a.manufacturer, sizeof(eiter->a.manufacturer),
1619 "%s", QLA2XXX_MANUFACTURER);
1620 alen += FDMI_ATTR_ALIGNMENT(alen);
1621 alen += FDMI_ATTR_TYPELEN(eiter);
1622 eiter->len = cpu_to_be16(alen);
1623 size += alen;
1624 ql_dbg(ql_dbg_disc, vha, 0x20a1,
1625 "MANUFACTURER = %s.\n", eiter->a.manufacturer);
1626 /* Serial number. */
1627 eiter = entries + size;
1628 eiter->type = cpu_to_be16(FDMI_HBA_SERIAL_NUMBER);
1629 alen = 0;
1630 if (IS_FWI2_CAPABLE(ha)) {
1631 alen = qla2xxx_get_vpd_field(vha, "SN",
1632 eiter->a.serial_num, sizeof(eiter->a.serial_num));
1634 if (!alen) {
1635 uint32_t sn = ((ha->serial0 & 0x1f) << 16) |
1636 (ha->serial2 << 8) | ha->serial1;
1637 alen = scnprintf(
1638 eiter->a.serial_num, sizeof(eiter->a.serial_num),
1639 "%c%05d", 'A' + sn / 100000, sn % 100000);
1641 alen += FDMI_ATTR_ALIGNMENT(alen);
1642 alen += FDMI_ATTR_TYPELEN(eiter);
1643 eiter->len = cpu_to_be16(alen);
1644 size += alen;
1645 ql_dbg(ql_dbg_disc, vha, 0x20a2,
1646 "SERIAL NUMBER = %s.\n", eiter->a.serial_num);
1647 /* Model name. */
1648 eiter = entries + size;
1649 eiter->type = cpu_to_be16(FDMI_HBA_MODEL);
1650 alen = scnprintf(
1651 eiter->a.model, sizeof(eiter->a.model),
1652 "%s", ha->model_number);
1653 alen += FDMI_ATTR_ALIGNMENT(alen);
1654 alen += FDMI_ATTR_TYPELEN(eiter);
1655 eiter->len = cpu_to_be16(alen);
1656 size += alen;
1657 ql_dbg(ql_dbg_disc, vha, 0x20a3,
1658 "MODEL NAME = %s.\n", eiter->a.model);
1659 /* Model description. */
1660 eiter = entries + size;
1661 eiter->type = cpu_to_be16(FDMI_HBA_MODEL_DESCRIPTION);
1662 alen = scnprintf(
1663 eiter->a.model_desc, sizeof(eiter->a.model_desc),
1664 "%s", ha->model_desc);
1665 alen += FDMI_ATTR_ALIGNMENT(alen);
1666 alen += FDMI_ATTR_TYPELEN(eiter);
1667 eiter->len = cpu_to_be16(alen);
1668 size += alen;
1669 ql_dbg(ql_dbg_disc, vha, 0x20a4,
1670 "MODEL DESCRIPTION = %s.\n", eiter->a.model_desc);
1671 /* Hardware version. */
1672 eiter = entries + size;
1673 eiter->type = cpu_to_be16(FDMI_HBA_HARDWARE_VERSION);
1674 alen = 0;
1675 if (IS_FWI2_CAPABLE(ha)) {
1676 if (!alen) {
1677 alen = qla2xxx_get_vpd_field(vha, "MN",
1678 eiter->a.hw_version, sizeof(eiter->a.hw_version));
1680 if (!alen) {
1681 alen = qla2xxx_get_vpd_field(vha, "EC",
1682 eiter->a.hw_version, sizeof(eiter->a.hw_version));
1685 if (!alen) {
1686 alen = scnprintf(
1687 eiter->a.hw_version, sizeof(eiter->a.hw_version),
1688 "HW:%s", ha->adapter_id);
1690 alen += FDMI_ATTR_ALIGNMENT(alen);
1691 alen += FDMI_ATTR_TYPELEN(eiter);
1692 eiter->len = cpu_to_be16(alen);
1693 size += alen;
1694 ql_dbg(ql_dbg_disc, vha, 0x20a5,
1695 "HARDWARE VERSION = %s.\n", eiter->a.hw_version);
1696 /* Driver version. */
1697 eiter = entries + size;
1698 eiter->type = cpu_to_be16(FDMI_HBA_DRIVER_VERSION);
1699 alen = scnprintf(
1700 eiter->a.driver_version, sizeof(eiter->a.driver_version),
1701 "%s", qla2x00_version_str);
1702 alen += FDMI_ATTR_ALIGNMENT(alen);
1703 alen += FDMI_ATTR_TYPELEN(eiter);
1704 eiter->len = cpu_to_be16(alen);
1705 size += alen;
1706 ql_dbg(ql_dbg_disc, vha, 0x20a6,
1707 "DRIVER VERSION = %s.\n", eiter->a.driver_version);
1708 /* Option ROM version. */
1709 eiter = entries + size;
1710 eiter->type = cpu_to_be16(FDMI_HBA_OPTION_ROM_VERSION);
1711 alen = scnprintf(
1712 eiter->a.orom_version, sizeof(eiter->a.orom_version),
1713 "%d.%02d", ha->efi_revision[1], ha->efi_revision[0]);
1714 alen += FDMI_ATTR_ALIGNMENT(alen);
1715 alen += FDMI_ATTR_TYPELEN(eiter);
1716 eiter->len = cpu_to_be16(alen);
1717 size += alen;
1719 ql_dbg(ql_dbg_disc, vha, 0x20a7,
1720 "OPTROM VERSION = %d.%02d.\n",
1721 eiter->a.orom_version[1], eiter->a.orom_version[0]);
1722 /* Firmware version */
1723 eiter = entries + size;
1724 eiter->type = cpu_to_be16(FDMI_HBA_FIRMWARE_VERSION);
1725 ha->isp_ops->fw_version_str(vha, eiter->a.fw_version,
1726 sizeof(eiter->a.fw_version));
1727 alen += FDMI_ATTR_ALIGNMENT(alen);
1728 alen += FDMI_ATTR_TYPELEN(eiter);
1729 eiter->len = cpu_to_be16(alen);
1730 size += alen;
1731 ql_dbg(ql_dbg_disc, vha, 0x20a8,
1732 "FIRMWARE VERSION = %s.\n", eiter->a.fw_version);
1733 /* OS Name and Version */
1734 eiter = entries + size;
1735 eiter->type = cpu_to_be16(FDMI_HBA_OS_NAME_AND_VERSION);
1736 alen = 0;
1737 if (p_sysid) {
1738 alen = scnprintf(
1739 eiter->a.os_version, sizeof(eiter->a.os_version),
1740 "%s %s %s",
1741 p_sysid->sysname, p_sysid->release, p_sysid->machine);
1743 if (!alen) {
1744 alen = scnprintf(
1745 eiter->a.os_version, sizeof(eiter->a.os_version),
1746 "%s %s",
1747 "Linux", fc_host_system_hostname(vha->host));
1749 alen += FDMI_ATTR_ALIGNMENT(alen);
1750 alen += FDMI_ATTR_TYPELEN(eiter);
1751 eiter->len = cpu_to_be16(alen);
1752 size += alen;
1753 ql_dbg(ql_dbg_disc, vha, 0x20a9,
1754 "OS VERSION = %s.\n", eiter->a.os_version);
1755 if (callopt == CALLOPT_FDMI1)
1756 goto done;
1757 /* MAX CT Payload Length */
1758 eiter = entries + size;
1759 eiter->type = cpu_to_be16(FDMI_HBA_MAXIMUM_CT_PAYLOAD_LENGTH);
1760 eiter->a.max_ct_len = cpu_to_be32(ha->frame_payload_size >> 2);
1762 alen = sizeof(eiter->a.max_ct_len);
1763 alen += FDMI_ATTR_TYPELEN(eiter);
1764 eiter->len = cpu_to_be16(alen);
1765 size += alen;
1766 ql_dbg(ql_dbg_disc, vha, 0x20aa,
1767 "CT PAYLOAD LENGTH = 0x%x.\n", be32_to_cpu(eiter->a.max_ct_len));
1768 /* Node Symbolic Name */
1769 eiter = entries + size;
1770 eiter->type = cpu_to_be16(FDMI_HBA_NODE_SYMBOLIC_NAME);
1771 alen = qla2x00_get_sym_node_name(vha, eiter->a.sym_name,
1772 sizeof(eiter->a.sym_name));
1773 alen += FDMI_ATTR_ALIGNMENT(alen);
1774 alen += FDMI_ATTR_TYPELEN(eiter);
1775 eiter->len = cpu_to_be16(alen);
1776 size += alen;
1777 ql_dbg(ql_dbg_disc, vha, 0x20ab,
1778 "SYMBOLIC NAME = %s.\n", eiter->a.sym_name);
1779 /* Vendor Specific information */
1780 eiter = entries + size;
1781 eiter->type = cpu_to_be16(FDMI_HBA_VENDOR_SPECIFIC_INFO);
1782 eiter->a.vendor_specific_info = cpu_to_be32(PCI_VENDOR_ID_QLOGIC);
1783 alen = sizeof(eiter->a.vendor_specific_info);
1784 alen += FDMI_ATTR_TYPELEN(eiter);
1785 eiter->len = cpu_to_be16(alen);
1786 size += alen;
1787 ql_dbg(ql_dbg_disc, vha, 0x20ac,
1788 "VENDOR SPECIFIC INFO = 0x%x.\n",
1789 be32_to_cpu(eiter->a.vendor_specific_info));
1790 /* Num Ports */
1791 eiter = entries + size;
1792 eiter->type = cpu_to_be16(FDMI_HBA_NUM_PORTS);
1793 eiter->a.num_ports = cpu_to_be32(1);
1794 alen = sizeof(eiter->a.num_ports);
1795 alen += FDMI_ATTR_TYPELEN(eiter);
1796 eiter->len = cpu_to_be16(alen);
1797 size += alen;
1798 ql_dbg(ql_dbg_disc, vha, 0x20ad,
1799 "PORT COUNT = %x.\n", be32_to_cpu(eiter->a.num_ports));
1800 /* Fabric Name */
1801 eiter = entries + size;
1802 eiter->type = cpu_to_be16(FDMI_HBA_FABRIC_NAME);
1803 memcpy(eiter->a.fabric_name, vha->fabric_node_name,
1804 sizeof(eiter->a.fabric_name));
1805 alen = sizeof(eiter->a.fabric_name);
1806 alen += FDMI_ATTR_TYPELEN(eiter);
1807 eiter->len = cpu_to_be16(alen);
1808 size += alen;
1809 ql_dbg(ql_dbg_disc, vha, 0x20ae,
1810 "FABRIC NAME = %016llx.\n", wwn_to_u64(eiter->a.fabric_name));
1811 /* BIOS Version */
1812 eiter = entries + size;
1813 eiter->type = cpu_to_be16(FDMI_HBA_BOOT_BIOS_NAME);
1814 alen = scnprintf(
1815 eiter->a.bios_name, sizeof(eiter->a.bios_name),
1816 "BIOS %d.%02d", ha->bios_revision[1], ha->bios_revision[0]);
1817 alen += FDMI_ATTR_ALIGNMENT(alen);
1818 alen += FDMI_ATTR_TYPELEN(eiter);
1819 eiter->len = cpu_to_be16(alen);
1820 size += alen;
1821 ql_dbg(ql_dbg_disc, vha, 0x20af,
1822 "BIOS NAME = %s\n", eiter->a.bios_name);
1823 /* Vendor Identifier */
1824 eiter = entries + size;
1825 eiter->type = cpu_to_be16(FDMI_HBA_VENDOR_IDENTIFIER);
1826 alen = scnprintf(
1827 eiter->a.vendor_identifier, sizeof(eiter->a.vendor_identifier),
1828 "%s", "QLGC");
1829 alen += FDMI_ATTR_ALIGNMENT(alen);
1830 alen += FDMI_ATTR_TYPELEN(eiter);
1831 eiter->len = cpu_to_be16(alen);
1832 size += alen;
1833 ql_dbg(ql_dbg_disc, vha, 0x20b0,
1834 "VENDOR IDENTIFIER = %s.\n", eiter->a.vendor_identifier);
1835 done:
1836 return size;
1840 * qla2x00_port_attributes() - perform Port attributes registration
1841 * @vha: HA context
1842 * @entries: number of entries to use
1843 * @callopt: Option to issue extended or standard FDMI
1844 * command parameter
1846 * Returns 0 on success.
1848 static unsigned long
1849 qla2x00_port_attributes(scsi_qla_host_t *vha, void *entries,
1850 unsigned int callopt)
1852 struct qla_hw_data *ha = vha->hw;
1853 struct new_utsname *p_sysid = utsname();
1854 char *hostname = p_sysid ?
1855 p_sysid->nodename : fc_host_system_hostname(vha->host);
1856 struct ct_fdmi_port_attr *eiter;
1857 uint16_t alen;
1858 unsigned long size = 0;
1860 /* FC4 types. */
1861 eiter = entries + size;
1862 eiter->type = cpu_to_be16(FDMI_PORT_FC4_TYPES);
1863 eiter->a.fc4_types[0] = 0x00;
1864 eiter->a.fc4_types[1] = 0x00;
1865 eiter->a.fc4_types[2] = 0x01;
1866 eiter->a.fc4_types[3] = 0x00;
1867 alen = sizeof(eiter->a.fc4_types);
1868 alen += FDMI_ATTR_TYPELEN(eiter);
1869 eiter->len = cpu_to_be16(alen);
1870 size += alen;
1871 ql_dbg(ql_dbg_disc, vha, 0x20c0,
1872 "FC4 TYPES = %016llx.\n", *(uint64_t *)eiter->a.fc4_types);
1873 if (vha->flags.nvme_enabled) {
1874 eiter->a.fc4_types[6] = 1; /* NVMe type 28h */
1875 ql_dbg(ql_dbg_disc, vha, 0x211f,
1876 "NVME FC4 Type = %02x 0x0 0x0 0x0 0x0 0x0.\n",
1877 eiter->a.fc4_types[6]);
1879 /* Supported speed. */
1880 eiter = entries + size;
1881 eiter->type = cpu_to_be16(FDMI_PORT_SUPPORT_SPEED);
1882 eiter->a.sup_speed = cpu_to_be32(
1883 qla25xx_fdmi_port_speed_capability(ha));
1884 alen = sizeof(eiter->a.sup_speed);
1885 alen += FDMI_ATTR_TYPELEN(eiter);
1886 eiter->len = cpu_to_be16(alen);
1887 size += alen;
1888 ql_dbg(ql_dbg_disc, vha, 0x20c1,
1889 "SUPPORTED SPEED = %x.\n", be32_to_cpu(eiter->a.sup_speed));
1890 /* Current speed. */
1891 eiter = entries + size;
1892 eiter->type = cpu_to_be16(FDMI_PORT_CURRENT_SPEED);
1893 eiter->a.cur_speed = cpu_to_be32(
1894 qla25xx_fdmi_port_speed_currently(ha));
1895 alen = sizeof(eiter->a.cur_speed);
1896 alen += FDMI_ATTR_TYPELEN(eiter);
1897 eiter->len = cpu_to_be16(alen);
1898 size += alen;
1899 ql_dbg(ql_dbg_disc, vha, 0x20c2,
1900 "CURRENT SPEED = %x.\n", be32_to_cpu(eiter->a.cur_speed));
1901 /* Max frame size. */
1902 eiter = entries + size;
1903 eiter->type = cpu_to_be16(FDMI_PORT_MAX_FRAME_SIZE);
1904 eiter->a.max_frame_size = cpu_to_be32(ha->frame_payload_size);
1905 alen = sizeof(eiter->a.max_frame_size);
1906 alen += FDMI_ATTR_TYPELEN(eiter);
1907 eiter->len = cpu_to_be16(alen);
1908 size += alen;
1909 ql_dbg(ql_dbg_disc, vha, 0x20c3,
1910 "MAX FRAME SIZE = %x.\n", be32_to_cpu(eiter->a.max_frame_size));
1911 /* OS device name. */
1912 eiter = entries + size;
1913 eiter->type = cpu_to_be16(FDMI_PORT_OS_DEVICE_NAME);
1914 alen = scnprintf(
1915 eiter->a.os_dev_name, sizeof(eiter->a.os_dev_name),
1916 "%s:host%lu", QLA2XXX_DRIVER_NAME, vha->host_no);
1917 alen += FDMI_ATTR_ALIGNMENT(alen);
1918 alen += FDMI_ATTR_TYPELEN(eiter);
1919 eiter->len = cpu_to_be16(alen);
1920 size += alen;
1921 ql_dbg(ql_dbg_disc, vha, 0x20c4,
1922 "OS DEVICE NAME = %s.\n", eiter->a.os_dev_name);
1923 /* Hostname. */
1924 eiter = entries + size;
1925 eiter->type = cpu_to_be16(FDMI_PORT_HOST_NAME);
1926 if (!*hostname || !strncmp(hostname, "(none)", 6))
1927 hostname = "Linux-default";
1928 alen = scnprintf(
1929 eiter->a.host_name, sizeof(eiter->a.host_name),
1930 "%s", hostname);
1931 alen += FDMI_ATTR_ALIGNMENT(alen);
1932 alen += FDMI_ATTR_TYPELEN(eiter);
1933 eiter->len = cpu_to_be16(alen);
1934 size += alen;
1935 ql_dbg(ql_dbg_disc, vha, 0x20c5,
1936 "HOSTNAME = %s.\n", eiter->a.host_name);
1938 if (callopt == CALLOPT_FDMI1)
1939 goto done;
1941 /* Node Name */
1942 eiter = entries + size;
1943 eiter->type = cpu_to_be16(FDMI_PORT_NODE_NAME);
1944 memcpy(eiter->a.node_name, vha->node_name, sizeof(eiter->a.node_name));
1945 alen = sizeof(eiter->a.node_name);
1946 alen += FDMI_ATTR_TYPELEN(eiter);
1947 eiter->len = cpu_to_be16(alen);
1948 size += alen;
1949 ql_dbg(ql_dbg_disc, vha, 0x20c6,
1950 "NODENAME = %016llx.\n", wwn_to_u64(eiter->a.node_name));
1952 /* Port Name */
1953 eiter = entries + size;
1954 eiter->type = cpu_to_be16(FDMI_PORT_NAME);
1955 memcpy(eiter->a.port_name, vha->port_name, sizeof(eiter->a.port_name));
1956 alen = sizeof(eiter->a.port_name);
1957 alen += FDMI_ATTR_TYPELEN(eiter);
1958 eiter->len = cpu_to_be16(alen);
1959 size += alen;
1960 ql_dbg(ql_dbg_disc, vha, 0x20c7,
1961 "PORTNAME = %016llx.\n", wwn_to_u64(eiter->a.port_name));
1963 /* Port Symbolic Name */
1964 eiter = entries + size;
1965 eiter->type = cpu_to_be16(FDMI_PORT_SYM_NAME);
1966 alen = qla2x00_get_sym_node_name(vha, eiter->a.port_sym_name,
1967 sizeof(eiter->a.port_sym_name));
1968 alen += FDMI_ATTR_ALIGNMENT(alen);
1969 alen += FDMI_ATTR_TYPELEN(eiter);
1970 eiter->len = cpu_to_be16(alen);
1971 size += alen;
1972 ql_dbg(ql_dbg_disc, vha, 0x20c8,
1973 "PORT SYMBOLIC NAME = %s\n", eiter->a.port_sym_name);
1975 /* Port Type */
1976 eiter = entries + size;
1977 eiter->type = cpu_to_be16(FDMI_PORT_TYPE);
1978 eiter->a.port_type = cpu_to_be32(NS_NX_PORT_TYPE);
1979 alen = sizeof(eiter->a.port_type);
1980 alen += FDMI_ATTR_TYPELEN(eiter);
1981 eiter->len = cpu_to_be16(alen);
1982 size += alen;
1983 ql_dbg(ql_dbg_disc, vha, 0x20c9,
1984 "PORT TYPE = %x.\n", be32_to_cpu(eiter->a.port_type));
1986 /* Supported Class of Service */
1987 eiter = entries + size;
1988 eiter->type = cpu_to_be16(FDMI_PORT_SUPP_COS);
1989 eiter->a.port_supported_cos = cpu_to_be32(FC_CLASS_3);
1990 alen = sizeof(eiter->a.port_supported_cos);
1991 alen += FDMI_ATTR_TYPELEN(eiter);
1992 eiter->len = cpu_to_be16(alen);
1993 size += alen;
1994 ql_dbg(ql_dbg_disc, vha, 0x20ca,
1995 "SUPPORTED COS = %08x\n", be32_to_cpu(eiter->a.port_supported_cos));
1997 /* Port Fabric Name */
1998 eiter = entries + size;
1999 eiter->type = cpu_to_be16(FDMI_PORT_FABRIC_NAME);
2000 memcpy(eiter->a.fabric_name, vha->fabric_node_name,
2001 sizeof(eiter->a.fabric_name));
2002 alen = sizeof(eiter->a.fabric_name);
2003 alen += FDMI_ATTR_TYPELEN(eiter);
2004 eiter->len = cpu_to_be16(alen);
2005 size += alen;
2006 ql_dbg(ql_dbg_disc, vha, 0x20cb,
2007 "FABRIC NAME = %016llx.\n", wwn_to_u64(eiter->a.fabric_name));
2009 /* FC4_type */
2010 eiter = entries + size;
2011 eiter->type = cpu_to_be16(FDMI_PORT_FC4_TYPE);
2012 eiter->a.port_fc4_type[0] = 0x00;
2013 eiter->a.port_fc4_type[1] = 0x00;
2014 eiter->a.port_fc4_type[2] = 0x01;
2015 eiter->a.port_fc4_type[3] = 0x00;
2016 alen = sizeof(eiter->a.port_fc4_type);
2017 alen += FDMI_ATTR_TYPELEN(eiter);
2018 eiter->len = cpu_to_be16(alen);
2019 size += alen;
2020 ql_dbg(ql_dbg_disc, vha, 0x20cc,
2021 "PORT ACTIVE FC4 TYPE = %016llx.\n",
2022 *(uint64_t *)eiter->a.port_fc4_type);
2024 /* Port State */
2025 eiter = entries + size;
2026 eiter->type = cpu_to_be16(FDMI_PORT_STATE);
2027 eiter->a.port_state = cpu_to_be32(2);
2028 alen = sizeof(eiter->a.port_state);
2029 alen += FDMI_ATTR_TYPELEN(eiter);
2030 eiter->len = cpu_to_be16(alen);
2031 size += alen;
2032 ql_dbg(ql_dbg_disc, vha, 0x20cd,
2033 "PORT_STATE = %x.\n", be32_to_cpu(eiter->a.port_state));
2035 /* Number of Ports */
2036 eiter = entries + size;
2037 eiter->type = cpu_to_be16(FDMI_PORT_COUNT);
2038 eiter->a.num_ports = cpu_to_be32(1);
2039 alen = sizeof(eiter->a.num_ports);
2040 alen += FDMI_ATTR_TYPELEN(eiter);
2041 eiter->len = cpu_to_be16(alen);
2042 size += alen;
2043 ql_dbg(ql_dbg_disc, vha, 0x20ce,
2044 "PORT COUNT = %x.\n", be32_to_cpu(eiter->a.num_ports));
2046 /* Port Identifier */
2047 eiter = entries + size;
2048 eiter->type = cpu_to_be16(FDMI_PORT_IDENTIFIER);
2049 eiter->a.port_id = cpu_to_be32(vha->d_id.b24);
2050 alen = sizeof(eiter->a.port_id);
2051 alen += FDMI_ATTR_TYPELEN(eiter);
2052 eiter->len = cpu_to_be16(alen);
2053 size += alen;
2054 ql_dbg(ql_dbg_disc, vha, 0x20cf,
2055 "PORT ID = %x.\n", be32_to_cpu(eiter->a.port_id));
2057 if (callopt == CALLOPT_FDMI2 || !ql2xsmartsan)
2058 goto done;
2060 /* Smart SAN Service Category (Populate Smart SAN Initiator)*/
2061 eiter = entries + size;
2062 eiter->type = cpu_to_be16(FDMI_SMARTSAN_SERVICE);
2063 alen = scnprintf(
2064 eiter->a.smartsan_service, sizeof(eiter->a.smartsan_service),
2065 "%s", "Smart SAN Initiator");
2066 alen += FDMI_ATTR_ALIGNMENT(alen);
2067 alen += FDMI_ATTR_TYPELEN(eiter);
2068 eiter->len = cpu_to_be16(alen);
2069 size += alen;
2070 ql_dbg(ql_dbg_disc, vha, 0x20d0,
2071 "SMARTSAN SERVICE CATEGORY = %s.\n", eiter->a.smartsan_service);
2073 /* Smart SAN GUID (NWWN+PWWN) */
2074 eiter = entries + size;
2075 eiter->type = cpu_to_be16(FDMI_SMARTSAN_GUID);
2076 memcpy(eiter->a.smartsan_guid, vha->node_name, WWN_SIZE);
2077 memcpy(eiter->a.smartsan_guid + WWN_SIZE, vha->port_name, WWN_SIZE);
2078 alen = sizeof(eiter->a.smartsan_guid);
2079 alen += FDMI_ATTR_TYPELEN(eiter);
2080 eiter->len = cpu_to_be16(alen);
2081 size += alen;
2082 ql_dbg(ql_dbg_disc, vha, 0x20d1,
2083 "Smart SAN GUID = %016llx-%016llx\n",
2084 wwn_to_u64(eiter->a.smartsan_guid),
2085 wwn_to_u64(eiter->a.smartsan_guid + WWN_SIZE));
2087 /* Smart SAN Version (populate "Smart SAN Version 1.0") */
2088 eiter = entries + size;
2089 eiter->type = cpu_to_be16(FDMI_SMARTSAN_VERSION);
2090 alen = scnprintf(
2091 eiter->a.smartsan_version, sizeof(eiter->a.smartsan_version),
2092 "%s", "Smart SAN Version 2.0");
2093 alen += FDMI_ATTR_ALIGNMENT(alen);
2094 alen += FDMI_ATTR_TYPELEN(eiter);
2095 eiter->len = cpu_to_be16(alen);
2096 size += alen;
2097 ql_dbg(ql_dbg_disc, vha, 0x20d2,
2098 "SMARTSAN VERSION = %s\n", eiter->a.smartsan_version);
2100 /* Smart SAN Product Name (Specify Adapter Model No) */
2101 eiter = entries + size;
2102 eiter->type = cpu_to_be16(FDMI_SMARTSAN_PROD_NAME);
2103 alen = scnprintf(eiter->a.smartsan_prod_name,
2104 sizeof(eiter->a.smartsan_prod_name),
2105 "ISP%04x", ha->pdev->device);
2106 alen += FDMI_ATTR_ALIGNMENT(alen);
2107 alen += FDMI_ATTR_TYPELEN(eiter);
2108 eiter->len = cpu_to_be16(alen);
2109 size += alen;
2110 ql_dbg(ql_dbg_disc, vha, 0x20d3,
2111 "SMARTSAN PRODUCT NAME = %s\n", eiter->a.smartsan_prod_name);
2113 /* Smart SAN Port Info (specify: 1=Physical, 2=NPIV, 3=SRIOV) */
2114 eiter = entries + size;
2115 eiter->type = cpu_to_be16(FDMI_SMARTSAN_PORT_INFO);
2116 eiter->a.smartsan_port_info = cpu_to_be32(vha->vp_idx ? 2 : 1);
2117 alen = sizeof(eiter->a.smartsan_port_info);
2118 alen += FDMI_ATTR_TYPELEN(eiter);
2119 eiter->len = cpu_to_be16(alen);
2120 size += alen;
2121 ql_dbg(ql_dbg_disc, vha, 0x20d4,
2122 "SMARTSAN PORT INFO = %x\n", eiter->a.smartsan_port_info);
2124 /* Smart SAN Security Support */
2125 eiter = entries + size;
2126 eiter->type = cpu_to_be16(FDMI_SMARTSAN_SECURITY_SUPPORT);
2127 eiter->a.smartsan_security_support = cpu_to_be32(1);
2128 alen = sizeof(eiter->a.smartsan_security_support);
2129 alen += FDMI_ATTR_TYPELEN(eiter);
2130 eiter->len = cpu_to_be16(alen);
2131 size += alen;
2132 ql_dbg(ql_dbg_disc, vha, 0x20d6,
2133 "SMARTSAN SECURITY SUPPORT = %d\n",
2134 be32_to_cpu(eiter->a.smartsan_security_support));
2136 done:
2137 return size;
2141 * qla2x00_fdmi_rhba() - perform RHBA FDMI registration
2142 * @vha: HA context
2143 * @callopt: Option to issue FDMI registration
2145 * Returns 0 on success.
2147 static int
2148 qla2x00_fdmi_rhba(scsi_qla_host_t *vha, unsigned int callopt)
2150 struct qla_hw_data *ha = vha->hw;
2151 unsigned long size = 0;
2152 unsigned int rval, count;
2153 ms_iocb_entry_t *ms_pkt;
2154 struct ct_sns_req *ct_req;
2155 struct ct_sns_rsp *ct_rsp;
2156 void *entries;
2158 count = callopt != CALLOPT_FDMI1 ?
2159 FDMI2_HBA_ATTR_COUNT : FDMI1_HBA_ATTR_COUNT;
2161 size = RHBA_RSP_SIZE;
2163 ql_dbg(ql_dbg_disc, vha, 0x20e0,
2164 "RHBA (callopt=%x count=%u size=%lu).\n", callopt, count, size);
2166 /* Request size adjusted after CT preparation */
2167 ms_pkt = ha->isp_ops->prep_ms_fdmi_iocb(vha, 0, size);
2169 /* Prepare CT request */
2170 ct_req = qla2x00_prep_ct_fdmi_req(ha->ct_sns, RHBA_CMD, size);
2171 ct_rsp = &ha->ct_sns->p.rsp;
2173 /* Prepare FDMI command entries */
2174 memcpy(ct_req->req.rhba.hba_identifier, vha->port_name,
2175 sizeof(ct_req->req.rhba.hba_identifier));
2176 size += sizeof(ct_req->req.rhba.hba_identifier);
2178 ct_req->req.rhba.entry_count = cpu_to_be32(1);
2179 size += sizeof(ct_req->req.rhba.entry_count);
2181 memcpy(ct_req->req.rhba.port_name, vha->port_name,
2182 sizeof(ct_req->req.rhba.port_name));
2183 size += sizeof(ct_req->req.rhba.port_name);
2185 /* Attribute count */
2186 ct_req->req.rhba.attrs.count = cpu_to_be32(count);
2187 size += sizeof(ct_req->req.rhba.attrs.count);
2189 /* Attribute block */
2190 entries = &ct_req->req.rhba.attrs.entry;
2192 size += qla2x00_hba_attributes(vha, entries, callopt);
2194 /* Update MS request size. */
2195 qla2x00_update_ms_fdmi_iocb(vha, size + 16);
2197 ql_dbg(ql_dbg_disc, vha, 0x20e1,
2198 "RHBA %016llx %016llx.\n",
2199 wwn_to_u64(ct_req->req.rhba.hba_identifier),
2200 wwn_to_u64(ct_req->req.rhba.port_name));
2202 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x20e2,
2203 entries, size);
2205 /* Execute MS IOCB */
2206 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
2207 sizeof(*ha->ms_iocb));
2208 if (rval) {
2209 ql_dbg(ql_dbg_disc, vha, 0x20e3,
2210 "RHBA iocb failed (%d).\n", rval);
2211 return rval;
2214 rval = qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RHBA");
2215 if (rval) {
2216 if (ct_rsp->header.reason_code == CT_REASON_CANNOT_PERFORM &&
2217 ct_rsp->header.explanation_code ==
2218 CT_EXPL_ALREADY_REGISTERED) {
2219 ql_dbg(ql_dbg_disc, vha, 0x20e4,
2220 "RHBA already registered.\n");
2221 return QLA_ALREADY_REGISTERED;
2224 ql_dbg(ql_dbg_disc, vha, 0x20e5,
2225 "RHBA failed, CT Reason %#x, CT Explanation %#x\n",
2226 ct_rsp->header.reason_code,
2227 ct_rsp->header.explanation_code);
2228 return rval;
2231 ql_dbg(ql_dbg_disc, vha, 0x20e6, "RHBA exiting normally.\n");
2232 return rval;
2236 static int
2237 qla2x00_fdmi_dhba(scsi_qla_host_t *vha)
2239 int rval;
2240 struct qla_hw_data *ha = vha->hw;
2241 ms_iocb_entry_t *ms_pkt;
2242 struct ct_sns_req *ct_req;
2243 struct ct_sns_rsp *ct_rsp;
2244 /* Issue RPA */
2245 /* Prepare common MS IOCB */
2246 ms_pkt = ha->isp_ops->prep_ms_fdmi_iocb(vha, DHBA_REQ_SIZE,
2247 DHBA_RSP_SIZE);
2248 /* Prepare CT request */
2249 ct_req = qla2x00_prep_ct_fdmi_req(ha->ct_sns, DHBA_CMD, DHBA_RSP_SIZE);
2250 ct_rsp = &ha->ct_sns->p.rsp;
2251 /* Prepare FDMI command arguments -- portname. */
2252 memcpy(ct_req->req.dhba.port_name, vha->port_name, WWN_SIZE);
2253 ql_dbg(ql_dbg_disc, vha, 0x2036,
2254 "DHBA portname = %8phN.\n", ct_req->req.dhba.port_name);
2255 /* Execute MS IOCB */
2256 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
2257 sizeof(ms_iocb_entry_t));
2258 if (rval != QLA_SUCCESS) {
2259 /*EMPTY*/
2260 ql_dbg(ql_dbg_disc, vha, 0x2037,
2261 "DHBA issue IOCB failed (%d).\n", rval);
2262 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "DHBA") !=
2263 QLA_SUCCESS) {
2264 rval = QLA_FUNCTION_FAILED;
2265 } else {
2266 ql_dbg(ql_dbg_disc, vha, 0x2038,
2267 "DHBA exiting normally.\n");
2269 return rval;
2273 * qla2x00_fdmi_rprt() - perform RPRT registration
2274 * @vha: HA context
2275 * @callopt: Option to issue extended or standard FDMI
2276 * command parameter
2278 * Returns 0 on success.
2280 static int
2281 qla2x00_fdmi_rprt(scsi_qla_host_t *vha, int callopt)
2283 struct scsi_qla_host *base_vha = pci_get_drvdata(vha->hw->pdev);
2284 struct qla_hw_data *ha = vha->hw;
2285 ulong size = 0;
2286 uint rval, count;
2287 ms_iocb_entry_t *ms_pkt;
2288 struct ct_sns_req *ct_req;
2289 struct ct_sns_rsp *ct_rsp;
2290 void *entries;
2291 count = callopt == CALLOPT_FDMI2_SMARTSAN && ql2xsmartsan ?
2292 FDMI2_SMARTSAN_PORT_ATTR_COUNT :
2293 callopt != CALLOPT_FDMI1 ?
2294 FDMI2_PORT_ATTR_COUNT : FDMI1_PORT_ATTR_COUNT;
2296 size = RPRT_RSP_SIZE;
2297 ql_dbg(ql_dbg_disc, vha, 0x20e8,
2298 "RPRT (callopt=%x count=%u size=%lu).\n", callopt, count, size);
2299 /* Request size adjusted after CT preparation */
2300 ms_pkt = ha->isp_ops->prep_ms_fdmi_iocb(vha, 0, size);
2301 /* Prepare CT request */
2302 ct_req = qla2x00_prep_ct_fdmi_req(ha->ct_sns, RPRT_CMD, size);
2303 ct_rsp = &ha->ct_sns->p.rsp;
2304 /* Prepare FDMI command entries */
2305 memcpy(ct_req->req.rprt.hba_identifier, base_vha->port_name,
2306 sizeof(ct_req->req.rprt.hba_identifier));
2307 size += sizeof(ct_req->req.rprt.hba_identifier);
2308 memcpy(ct_req->req.rprt.port_name, vha->port_name,
2309 sizeof(ct_req->req.rprt.port_name));
2310 size += sizeof(ct_req->req.rprt.port_name);
2311 /* Attribute count */
2312 ct_req->req.rprt.attrs.count = cpu_to_be32(count);
2313 size += sizeof(ct_req->req.rprt.attrs.count);
2314 /* Attribute block */
2315 entries = ct_req->req.rprt.attrs.entry;
2316 size += qla2x00_port_attributes(vha, entries, callopt);
2317 /* Update MS request size. */
2318 qla2x00_update_ms_fdmi_iocb(vha, size + 16);
2319 ql_dbg(ql_dbg_disc, vha, 0x20e9,
2320 "RPRT %016llx %016llx.\n",
2321 wwn_to_u64(ct_req->req.rprt.port_name),
2322 wwn_to_u64(ct_req->req.rprt.port_name));
2323 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x20ea,
2324 entries, size);
2325 /* Execute MS IOCB */
2326 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
2327 sizeof(*ha->ms_iocb));
2328 if (rval) {
2329 ql_dbg(ql_dbg_disc, vha, 0x20eb,
2330 "RPRT iocb failed (%d).\n", rval);
2331 return rval;
2333 rval = qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RPRT");
2334 if (rval) {
2335 if (ct_rsp->header.reason_code == CT_REASON_CANNOT_PERFORM &&
2336 ct_rsp->header.explanation_code ==
2337 CT_EXPL_ALREADY_REGISTERED) {
2338 ql_dbg(ql_dbg_disc, vha, 0x20ec,
2339 "RPRT already registered.\n");
2340 return QLA_ALREADY_REGISTERED;
2343 ql_dbg(ql_dbg_disc, vha, 0x20ed,
2344 "RPRT failed, CT Reason code: %#x, CT Explanation %#x\n",
2345 ct_rsp->header.reason_code,
2346 ct_rsp->header.explanation_code);
2347 return rval;
2349 ql_dbg(ql_dbg_disc, vha, 0x20ee, "RPRT exiting normally.\n");
2350 return rval;
2354 * qla2x00_fdmi_rpa() - perform RPA registration
2355 * @vha: HA context
2356 * @callopt: Option to issue FDMI registration
2358 * Returns 0 on success.
2360 static int
2361 qla2x00_fdmi_rpa(scsi_qla_host_t *vha, uint callopt)
2363 struct qla_hw_data *ha = vha->hw;
2364 ulong size = 0;
2365 uint rval, count;
2366 ms_iocb_entry_t *ms_pkt;
2367 struct ct_sns_req *ct_req;
2368 struct ct_sns_rsp *ct_rsp;
2369 void *entries;
2371 count =
2372 callopt == CALLOPT_FDMI2_SMARTSAN && ql2xsmartsan ?
2373 FDMI2_SMARTSAN_PORT_ATTR_COUNT :
2374 callopt != CALLOPT_FDMI1 ?
2375 FDMI2_PORT_ATTR_COUNT : FDMI1_PORT_ATTR_COUNT;
2377 size =
2378 callopt != CALLOPT_FDMI1 ?
2379 SMARTSAN_RPA_RSP_SIZE : RPA_RSP_SIZE;
2381 ql_dbg(ql_dbg_disc, vha, 0x20f0,
2382 "RPA (callopt=%x count=%u size=%lu).\n", callopt, count, size);
2384 /* Request size adjusted after CT preparation */
2385 ms_pkt = ha->isp_ops->prep_ms_fdmi_iocb(vha, 0, size);
2387 /* Prepare CT request */
2388 ct_req = qla2x00_prep_ct_fdmi_req(ha->ct_sns, RPA_CMD, size);
2389 ct_rsp = &ha->ct_sns->p.rsp;
2391 /* Prepare FDMI command entries. */
2392 memcpy(ct_req->req.rpa.port_name, vha->port_name,
2393 sizeof(ct_req->req.rpa.port_name));
2394 size += sizeof(ct_req->req.rpa.port_name);
2396 /* Attribute count */
2397 ct_req->req.rpa.attrs.count = cpu_to_be32(count);
2398 size += sizeof(ct_req->req.rpa.attrs.count);
2400 /* Attribute block */
2401 entries = ct_req->req.rpa.attrs.entry;
2403 size += qla2x00_port_attributes(vha, entries, callopt);
2405 /* Update MS request size. */
2406 qla2x00_update_ms_fdmi_iocb(vha, size + 16);
2408 ql_dbg(ql_dbg_disc, vha, 0x20f1,
2409 "RPA %016llx.\n", wwn_to_u64(ct_req->req.rpa.port_name));
2411 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x20f2,
2412 entries, size);
2414 /* Execute MS IOCB */
2415 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
2416 sizeof(*ha->ms_iocb));
2417 if (rval) {
2418 ql_dbg(ql_dbg_disc, vha, 0x20f3,
2419 "RPA iocb failed (%d).\n", rval);
2420 return rval;
2423 rval = qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RPA");
2424 if (rval) {
2425 if (ct_rsp->header.reason_code == CT_REASON_CANNOT_PERFORM &&
2426 ct_rsp->header.explanation_code ==
2427 CT_EXPL_ALREADY_REGISTERED) {
2428 ql_dbg(ql_dbg_disc, vha, 0x20f4,
2429 "RPA already registered.\n");
2430 return QLA_ALREADY_REGISTERED;
2433 ql_dbg(ql_dbg_disc, vha, 0x20f5,
2434 "RPA failed, CT Reason code: %#x, CT Explanation %#x\n",
2435 ct_rsp->header.reason_code,
2436 ct_rsp->header.explanation_code);
2437 return rval;
2440 ql_dbg(ql_dbg_disc, vha, 0x20f6, "RPA exiting normally.\n");
2441 return rval;
2445 * qla2x00_fdmi_register() -
2446 * @vha: HA context
2448 * Returns 0 on success.
2451 qla2x00_fdmi_register(scsi_qla_host_t *vha)
2453 int rval = QLA_SUCCESS;
2454 struct qla_hw_data *ha = vha->hw;
2456 if (IS_QLA2100(ha) || IS_QLA2200(ha) ||
2457 IS_QLAFX00(ha))
2458 return rval;
2460 rval = qla2x00_mgmt_svr_login(vha);
2461 if (rval)
2462 return rval;
2464 /* For npiv/vport send rprt only */
2465 if (vha->vp_idx) {
2466 if (ql2xsmartsan)
2467 rval = qla2x00_fdmi_rprt(vha, CALLOPT_FDMI2_SMARTSAN);
2468 if (rval || !ql2xsmartsan)
2469 rval = qla2x00_fdmi_rprt(vha, CALLOPT_FDMI2);
2470 if (rval)
2471 rval = qla2x00_fdmi_rprt(vha, CALLOPT_FDMI1);
2473 return rval;
2476 /* Try fdmi2 first, if fails then try fdmi1 */
2477 rval = qla2x00_fdmi_rhba(vha, CALLOPT_FDMI2);
2478 if (rval) {
2479 if (rval != QLA_ALREADY_REGISTERED)
2480 goto try_fdmi;
2482 rval = qla2x00_fdmi_dhba(vha);
2483 if (rval)
2484 goto try_fdmi;
2486 rval = qla2x00_fdmi_rhba(vha, CALLOPT_FDMI2);
2487 if (rval)
2488 goto try_fdmi;
2491 if (ql2xsmartsan)
2492 rval = qla2x00_fdmi_rpa(vha, CALLOPT_FDMI2_SMARTSAN);
2493 if (rval || !ql2xsmartsan)
2494 rval = qla2x00_fdmi_rpa(vha, CALLOPT_FDMI2);
2495 if (rval)
2496 goto try_fdmi;
2498 return rval;
2500 try_fdmi:
2501 rval = qla2x00_fdmi_rhba(vha, CALLOPT_FDMI1);
2502 if (rval) {
2503 if (rval != QLA_ALREADY_REGISTERED)
2504 return rval;
2506 rval = qla2x00_fdmi_dhba(vha);
2507 if (rval)
2508 return rval;
2510 rval = qla2x00_fdmi_rhba(vha, CALLOPT_FDMI1);
2511 if (rval)
2512 return rval;
2515 rval = qla2x00_fdmi_rpa(vha, CALLOPT_FDMI1);
2517 return rval;
2521 * qla2x00_gfpn_id() - SNS Get Fabric Port Name (GFPN_ID) query.
2522 * @vha: HA context
2523 * @list: switch info entries to populate
2525 * Returns 0 on success.
2528 qla2x00_gfpn_id(scsi_qla_host_t *vha, sw_info_t *list)
2530 int rval = QLA_SUCCESS;
2531 uint16_t i;
2532 struct qla_hw_data *ha = vha->hw;
2533 ms_iocb_entry_t *ms_pkt;
2534 struct ct_sns_req *ct_req;
2535 struct ct_sns_rsp *ct_rsp;
2536 struct ct_arg arg;
2538 if (!IS_IIDMA_CAPABLE(ha))
2539 return QLA_FUNCTION_FAILED;
2541 arg.iocb = ha->ms_iocb;
2542 arg.req_dma = ha->ct_sns_dma;
2543 arg.rsp_dma = ha->ct_sns_dma;
2544 arg.req_size = GFPN_ID_REQ_SIZE;
2545 arg.rsp_size = GFPN_ID_RSP_SIZE;
2546 arg.nport_handle = NPH_SNS;
2548 for (i = 0; i < ha->max_fibre_devices; i++) {
2549 /* Issue GFPN_ID */
2550 /* Prepare common MS IOCB */
2551 ms_pkt = ha->isp_ops->prep_ms_iocb(vha, &arg);
2553 /* Prepare CT request */
2554 ct_req = qla2x00_prep_ct_req(ha->ct_sns, GFPN_ID_CMD,
2555 GFPN_ID_RSP_SIZE);
2556 ct_rsp = &ha->ct_sns->p.rsp;
2558 /* Prepare CT arguments -- port_id */
2559 ct_req->req.port_id.port_id = port_id_to_be_id(list[i].d_id);
2561 /* Execute MS IOCB */
2562 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
2563 sizeof(ms_iocb_entry_t));
2564 if (rval != QLA_SUCCESS) {
2565 /*EMPTY*/
2566 ql_dbg(ql_dbg_disc, vha, 0x2023,
2567 "GFPN_ID issue IOCB failed (%d).\n", rval);
2568 break;
2569 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp,
2570 "GFPN_ID") != QLA_SUCCESS) {
2571 rval = QLA_FUNCTION_FAILED;
2572 break;
2573 } else {
2574 /* Save fabric portname */
2575 memcpy(list[i].fabric_port_name,
2576 ct_rsp->rsp.gfpn_id.port_name, WWN_SIZE);
2579 /* Last device exit. */
2580 if (list[i].d_id.b.rsvd_1 != 0)
2581 break;
2584 return (rval);
2588 static inline struct ct_sns_req *
2589 qla24xx_prep_ct_fm_req(struct ct_sns_pkt *p, uint16_t cmd,
2590 uint16_t rsp_size)
2592 memset(p, 0, sizeof(struct ct_sns_pkt));
2594 p->p.req.header.revision = 0x01;
2595 p->p.req.header.gs_type = 0xFA;
2596 p->p.req.header.gs_subtype = 0x01;
2597 p->p.req.command = cpu_to_be16(cmd);
2598 p->p.req.max_rsp_size = cpu_to_be16((rsp_size - 16) / 4);
2600 return &p->p.req;
2603 static uint16_t
2604 qla2x00_port_speed_capability(uint16_t speed)
2606 switch (speed) {
2607 case BIT_15:
2608 return PORT_SPEED_1GB;
2609 case BIT_14:
2610 return PORT_SPEED_2GB;
2611 case BIT_13:
2612 return PORT_SPEED_4GB;
2613 case BIT_12:
2614 return PORT_SPEED_10GB;
2615 case BIT_11:
2616 return PORT_SPEED_8GB;
2617 case BIT_10:
2618 return PORT_SPEED_16GB;
2619 case BIT_8:
2620 return PORT_SPEED_32GB;
2621 case BIT_7:
2622 return PORT_SPEED_64GB;
2623 default:
2624 return PORT_SPEED_UNKNOWN;
2629 * qla2x00_gpsc() - FCS Get Port Speed Capabilities (GPSC) query.
2630 * @vha: HA context
2631 * @list: switch info entries to populate
2633 * Returns 0 on success.
2636 qla2x00_gpsc(scsi_qla_host_t *vha, sw_info_t *list)
2638 int rval;
2639 uint16_t i;
2640 struct qla_hw_data *ha = vha->hw;
2641 ms_iocb_entry_t *ms_pkt;
2642 struct ct_sns_req *ct_req;
2643 struct ct_sns_rsp *ct_rsp;
2644 struct ct_arg arg;
2646 if (!IS_IIDMA_CAPABLE(ha))
2647 return QLA_FUNCTION_FAILED;
2648 if (!ha->flags.gpsc_supported)
2649 return QLA_FUNCTION_FAILED;
2651 rval = qla2x00_mgmt_svr_login(vha);
2652 if (rval)
2653 return rval;
2655 arg.iocb = ha->ms_iocb;
2656 arg.req_dma = ha->ct_sns_dma;
2657 arg.rsp_dma = ha->ct_sns_dma;
2658 arg.req_size = GPSC_REQ_SIZE;
2659 arg.rsp_size = GPSC_RSP_SIZE;
2660 arg.nport_handle = vha->mgmt_svr_loop_id;
2662 for (i = 0; i < ha->max_fibre_devices; i++) {
2663 /* Issue GFPN_ID */
2664 /* Prepare common MS IOCB */
2665 ms_pkt = qla24xx_prep_ms_iocb(vha, &arg);
2667 /* Prepare CT request */
2668 ct_req = qla24xx_prep_ct_fm_req(ha->ct_sns, GPSC_CMD,
2669 GPSC_RSP_SIZE);
2670 ct_rsp = &ha->ct_sns->p.rsp;
2672 /* Prepare CT arguments -- port_name */
2673 memcpy(ct_req->req.gpsc.port_name, list[i].fabric_port_name,
2674 WWN_SIZE);
2676 /* Execute MS IOCB */
2677 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
2678 sizeof(ms_iocb_entry_t));
2679 if (rval != QLA_SUCCESS) {
2680 /*EMPTY*/
2681 ql_dbg(ql_dbg_disc, vha, 0x2059,
2682 "GPSC issue IOCB failed (%d).\n", rval);
2683 } else if ((rval = qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp,
2684 "GPSC")) != QLA_SUCCESS) {
2685 /* FM command unsupported? */
2686 if (rval == QLA_INVALID_COMMAND &&
2687 (ct_rsp->header.reason_code ==
2688 CT_REASON_INVALID_COMMAND_CODE ||
2689 ct_rsp->header.reason_code ==
2690 CT_REASON_COMMAND_UNSUPPORTED)) {
2691 ql_dbg(ql_dbg_disc, vha, 0x205a,
2692 "GPSC command unsupported, disabling "
2693 "query.\n");
2694 ha->flags.gpsc_supported = 0;
2695 rval = QLA_FUNCTION_FAILED;
2696 break;
2698 rval = QLA_FUNCTION_FAILED;
2699 } else {
2700 list->fp_speed = qla2x00_port_speed_capability(
2701 be16_to_cpu(ct_rsp->rsp.gpsc.speed));
2702 ql_dbg(ql_dbg_disc, vha, 0x205b,
2703 "GPSC ext entry - fpn "
2704 "%8phN speeds=%04x speed=%04x.\n",
2705 list[i].fabric_port_name,
2706 be16_to_cpu(ct_rsp->rsp.gpsc.speeds),
2707 be16_to_cpu(ct_rsp->rsp.gpsc.speed));
2710 /* Last device exit. */
2711 if (list[i].d_id.b.rsvd_1 != 0)
2712 break;
2715 return (rval);
2719 * qla2x00_gff_id() - SNS Get FC-4 Features (GFF_ID) query.
2721 * @vha: HA context
2722 * @list: switch info entries to populate
2725 void
2726 qla2x00_gff_id(scsi_qla_host_t *vha, sw_info_t *list)
2728 int rval;
2729 uint16_t i;
2731 ms_iocb_entry_t *ms_pkt;
2732 struct ct_sns_req *ct_req;
2733 struct ct_sns_rsp *ct_rsp;
2734 struct qla_hw_data *ha = vha->hw;
2735 uint8_t fcp_scsi_features = 0, nvme_features = 0;
2736 struct ct_arg arg;
2738 for (i = 0; i < ha->max_fibre_devices; i++) {
2739 /* Set default FC4 Type as UNKNOWN so the default is to
2740 * Process this port */
2741 list[i].fc4_type = 0;
2743 /* Do not attempt GFF_ID if we are not FWI_2 capable */
2744 if (!IS_FWI2_CAPABLE(ha))
2745 continue;
2747 arg.iocb = ha->ms_iocb;
2748 arg.req_dma = ha->ct_sns_dma;
2749 arg.rsp_dma = ha->ct_sns_dma;
2750 arg.req_size = GFF_ID_REQ_SIZE;
2751 arg.rsp_size = GFF_ID_RSP_SIZE;
2752 arg.nport_handle = NPH_SNS;
2754 /* Prepare common MS IOCB */
2755 ms_pkt = ha->isp_ops->prep_ms_iocb(vha, &arg);
2757 /* Prepare CT request */
2758 ct_req = qla2x00_prep_ct_req(ha->ct_sns, GFF_ID_CMD,
2759 GFF_ID_RSP_SIZE);
2760 ct_rsp = &ha->ct_sns->p.rsp;
2762 /* Prepare CT arguments -- port_id */
2763 ct_req->req.port_id.port_id = port_id_to_be_id(list[i].d_id);
2765 /* Execute MS IOCB */
2766 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
2767 sizeof(ms_iocb_entry_t));
2769 if (rval != QLA_SUCCESS) {
2770 ql_dbg(ql_dbg_disc, vha, 0x205c,
2771 "GFF_ID issue IOCB failed (%d).\n", rval);
2772 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp,
2773 "GFF_ID") != QLA_SUCCESS) {
2774 ql_dbg(ql_dbg_disc, vha, 0x205d,
2775 "GFF_ID IOCB status had a failure status code.\n");
2776 } else {
2777 fcp_scsi_features =
2778 ct_rsp->rsp.gff_id.fc4_features[GFF_FCP_SCSI_OFFSET];
2779 fcp_scsi_features &= 0x0f;
2781 if (fcp_scsi_features) {
2782 list[i].fc4_type = FS_FC4TYPE_FCP;
2783 list[i].fc4_features = fcp_scsi_features;
2786 nvme_features =
2787 ct_rsp->rsp.gff_id.fc4_features[GFF_NVME_OFFSET];
2788 nvme_features &= 0xf;
2790 if (nvme_features) {
2791 list[i].fc4_type |= FS_FC4TYPE_NVME;
2792 list[i].fc4_features = nvme_features;
2796 /* Last device exit. */
2797 if (list[i].d_id.b.rsvd_1 != 0)
2798 break;
2802 int qla24xx_post_gpsc_work(struct scsi_qla_host *vha, fc_port_t *fcport)
2804 struct qla_work_evt *e;
2806 e = qla2x00_alloc_work(vha, QLA_EVT_GPSC);
2807 if (!e)
2808 return QLA_FUNCTION_FAILED;
2810 e->u.fcport.fcport = fcport;
2811 return qla2x00_post_work(vha, e);
2814 void qla24xx_handle_gpsc_event(scsi_qla_host_t *vha, struct event_arg *ea)
2816 struct fc_port *fcport = ea->fcport;
2818 ql_dbg(ql_dbg_disc, vha, 0x20d8,
2819 "%s %8phC DS %d LS %d rc %d login %d|%d rscn %d|%d lid %d\n",
2820 __func__, fcport->port_name, fcport->disc_state,
2821 fcport->fw_login_state, ea->rc, ea->sp->gen2, fcport->login_gen,
2822 ea->sp->gen2, fcport->rscn_gen|ea->sp->gen1, fcport->loop_id);
2824 if (fcport->disc_state == DSC_DELETE_PEND)
2825 return;
2827 /* We will figure-out what happen after AUTH completes */
2828 if (fcport->disc_state == DSC_LOGIN_AUTH_PEND)
2829 return;
2831 if (ea->sp->gen2 != fcport->login_gen) {
2832 /* target side must have changed it. */
2833 ql_dbg(ql_dbg_disc, vha, 0x20d3,
2834 "%s %8phC generation changed\n",
2835 __func__, fcport->port_name);
2836 return;
2837 } else if (ea->sp->gen1 != fcport->rscn_gen) {
2838 return;
2841 qla_post_iidma_work(vha, fcport);
2844 static void qla24xx_async_gpsc_sp_done(srb_t *sp, int res)
2846 struct scsi_qla_host *vha = sp->vha;
2847 struct qla_hw_data *ha = vha->hw;
2848 fc_port_t *fcport = sp->fcport;
2849 struct ct_sns_rsp *ct_rsp;
2850 struct event_arg ea;
2852 ct_rsp = &fcport->ct_desc.ct_sns->p.rsp;
2854 ql_dbg(ql_dbg_disc, vha, 0x2053,
2855 "Async done-%s res %x, WWPN %8phC \n",
2856 sp->name, res, fcport->port_name);
2858 fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
2860 if (res == QLA_FUNCTION_TIMEOUT)
2861 goto done;
2863 if (res == (DID_ERROR << 16)) {
2864 /* entry status error */
2865 goto done;
2866 } else if (res) {
2867 if ((ct_rsp->header.reason_code ==
2868 CT_REASON_INVALID_COMMAND_CODE) ||
2869 (ct_rsp->header.reason_code ==
2870 CT_REASON_COMMAND_UNSUPPORTED)) {
2871 ql_dbg(ql_dbg_disc, vha, 0x2019,
2872 "GPSC command unsupported, disabling query.\n");
2873 ha->flags.gpsc_supported = 0;
2874 goto done;
2876 } else {
2877 fcport->fp_speed = qla2x00_port_speed_capability(
2878 be16_to_cpu(ct_rsp->rsp.gpsc.speed));
2880 ql_dbg(ql_dbg_disc, vha, 0x2054,
2881 "Async-%s OUT WWPN %8phC speeds=%04x speed=%04x.\n",
2882 sp->name, fcport->fabric_port_name,
2883 be16_to_cpu(ct_rsp->rsp.gpsc.speeds),
2884 be16_to_cpu(ct_rsp->rsp.gpsc.speed));
2886 memset(&ea, 0, sizeof(ea));
2887 ea.rc = res;
2888 ea.fcport = fcport;
2889 ea.sp = sp;
2890 qla24xx_handle_gpsc_event(vha, &ea);
2892 done:
2893 /* ref: INIT */
2894 kref_put(&sp->cmd_kref, qla2x00_sp_release);
2897 int qla24xx_async_gpsc(scsi_qla_host_t *vha, fc_port_t *fcport)
2899 int rval = QLA_FUNCTION_FAILED;
2900 struct ct_sns_req *ct_req;
2901 srb_t *sp;
2903 if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT))
2904 return rval;
2906 /* ref: INIT */
2907 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
2908 if (!sp)
2909 goto done;
2911 sp->type = SRB_CT_PTHRU_CMD;
2912 sp->name = "gpsc";
2913 sp->gen1 = fcport->rscn_gen;
2914 sp->gen2 = fcport->login_gen;
2915 qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2,
2916 qla24xx_async_gpsc_sp_done);
2918 /* CT_IU preamble */
2919 ct_req = qla24xx_prep_ct_fm_req(fcport->ct_desc.ct_sns, GPSC_CMD,
2920 GPSC_RSP_SIZE);
2922 /* GPSC req */
2923 memcpy(ct_req->req.gpsc.port_name, fcport->fabric_port_name,
2924 WWN_SIZE);
2926 sp->u.iocb_cmd.u.ctarg.req = fcport->ct_desc.ct_sns;
2927 sp->u.iocb_cmd.u.ctarg.req_dma = fcport->ct_desc.ct_sns_dma;
2928 sp->u.iocb_cmd.u.ctarg.rsp = fcport->ct_desc.ct_sns;
2929 sp->u.iocb_cmd.u.ctarg.rsp_dma = fcport->ct_desc.ct_sns_dma;
2930 sp->u.iocb_cmd.u.ctarg.req_size = GPSC_REQ_SIZE;
2931 sp->u.iocb_cmd.u.ctarg.rsp_size = GPSC_RSP_SIZE;
2932 sp->u.iocb_cmd.u.ctarg.nport_handle = vha->mgmt_svr_loop_id;
2934 ql_dbg(ql_dbg_disc, vha, 0x205e,
2935 "Async-%s %8phC hdl=%x loopid=%x portid=%02x%02x%02x.\n",
2936 sp->name, fcport->port_name, sp->handle,
2937 fcport->loop_id, fcport->d_id.b.domain,
2938 fcport->d_id.b.area, fcport->d_id.b.al_pa);
2940 rval = qla2x00_start_sp(sp);
2941 if (rval != QLA_SUCCESS)
2942 goto done_free_sp;
2943 return rval;
2945 done_free_sp:
2946 /* ref: INIT */
2947 kref_put(&sp->cmd_kref, qla2x00_sp_release);
2948 done:
2949 return rval;
2952 void qla24xx_sp_unmap(scsi_qla_host_t *vha, srb_t *sp)
2954 struct srb_iocb *c = &sp->u.iocb_cmd;
2956 switch (sp->type) {
2957 case SRB_ELS_DCMD:
2958 qla2x00_els_dcmd2_free(vha, &c->u.els_plogi);
2959 break;
2960 case SRB_CT_PTHRU_CMD:
2961 default:
2962 if (sp->u.iocb_cmd.u.ctarg.req) {
2963 dma_free_coherent(&vha->hw->pdev->dev,
2964 sp->u.iocb_cmd.u.ctarg.req_allocated_size,
2965 sp->u.iocb_cmd.u.ctarg.req,
2966 sp->u.iocb_cmd.u.ctarg.req_dma);
2967 sp->u.iocb_cmd.u.ctarg.req = NULL;
2970 if (sp->u.iocb_cmd.u.ctarg.rsp) {
2971 dma_free_coherent(&vha->hw->pdev->dev,
2972 sp->u.iocb_cmd.u.ctarg.rsp_allocated_size,
2973 sp->u.iocb_cmd.u.ctarg.rsp,
2974 sp->u.iocb_cmd.u.ctarg.rsp_dma);
2975 sp->u.iocb_cmd.u.ctarg.rsp = NULL;
2977 break;
2980 /* ref: INIT */
2981 kref_put(&sp->cmd_kref, qla2x00_sp_release);
2984 void qla24xx_async_gffid_sp_done(srb_t *sp, int res)
2986 struct scsi_qla_host *vha = sp->vha;
2987 fc_port_t *fcport = sp->fcport;
2988 struct ct_sns_rsp *ct_rsp;
2989 uint8_t fc4_scsi_feat;
2990 uint8_t fc4_nvme_feat;
2992 ql_dbg(ql_dbg_disc, vha, 0x2133,
2993 "Async done-%s res %x ID %x. %8phC\n",
2994 sp->name, res, fcport->d_id.b24, fcport->port_name);
2996 ct_rsp = sp->u.iocb_cmd.u.ctarg.rsp;
2997 fc4_scsi_feat = ct_rsp->rsp.gff_id.fc4_features[GFF_FCP_SCSI_OFFSET];
2998 fc4_nvme_feat = ct_rsp->rsp.gff_id.fc4_features[GFF_NVME_OFFSET];
2999 sp->rc = res;
3002 * FC-GS-7, 5.2.3.12 FC-4 Features - format
3003 * The format of the FC-4 Features object, as defined by the FC-4,
3004 * Shall be an array of 4-bit values, one for each type code value
3006 if (!res) {
3007 if (fc4_scsi_feat & 0xf) {
3008 /* w1 b00:03 */
3009 fcport->fc4_type = FS_FC4TYPE_FCP;
3010 fcport->fc4_features = fc4_scsi_feat & 0xf;
3013 if (fc4_nvme_feat & 0xf) {
3014 /* w5 [00:03]/28h */
3015 fcport->fc4_type |= FS_FC4TYPE_NVME;
3016 fcport->fc4_features = fc4_nvme_feat & 0xf;
3020 if (sp->flags & SRB_WAKEUP_ON_COMP) {
3021 complete(sp->comp);
3022 } else {
3023 if (sp->u.iocb_cmd.u.ctarg.req) {
3024 dma_free_coherent(&vha->hw->pdev->dev,
3025 sp->u.iocb_cmd.u.ctarg.req_allocated_size,
3026 sp->u.iocb_cmd.u.ctarg.req,
3027 sp->u.iocb_cmd.u.ctarg.req_dma);
3028 sp->u.iocb_cmd.u.ctarg.req = NULL;
3031 if (sp->u.iocb_cmd.u.ctarg.rsp) {
3032 dma_free_coherent(&vha->hw->pdev->dev,
3033 sp->u.iocb_cmd.u.ctarg.rsp_allocated_size,
3034 sp->u.iocb_cmd.u.ctarg.rsp,
3035 sp->u.iocb_cmd.u.ctarg.rsp_dma);
3036 sp->u.iocb_cmd.u.ctarg.rsp = NULL;
3039 /* ref: INIT */
3040 kref_put(&sp->cmd_kref, qla2x00_sp_release);
3041 /* we should not be here */
3042 dump_stack();
3046 /* Get FC4 Feature with Nport ID. */
3047 int qla24xx_async_gffid(scsi_qla_host_t *vha, fc_port_t *fcport, bool wait)
3049 int rval = QLA_FUNCTION_FAILED;
3050 struct ct_sns_req *ct_req;
3051 srb_t *sp;
3052 DECLARE_COMPLETION_ONSTACK(comp);
3054 /* this routine does not have handling for no wait */
3055 if (!vha->flags.online || !wait)
3056 return rval;
3058 /* ref: INIT */
3059 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
3060 if (!sp)
3061 return rval;
3063 sp->type = SRB_CT_PTHRU_CMD;
3064 sp->name = "gffid";
3065 sp->gen1 = fcport->rscn_gen;
3066 sp->gen2 = fcport->login_gen;
3067 qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2,
3068 qla24xx_async_gffid_sp_done);
3069 sp->comp = &comp;
3070 sp->u.iocb_cmd.timeout = qla2x00_els_dcmd2_iocb_timeout;
3072 if (wait)
3073 sp->flags = SRB_WAKEUP_ON_COMP;
3075 sp->u.iocb_cmd.u.ctarg.req_allocated_size = sizeof(struct ct_sns_pkt);
3076 sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev,
3077 sp->u.iocb_cmd.u.ctarg.req_allocated_size,
3078 &sp->u.iocb_cmd.u.ctarg.req_dma,
3079 GFP_KERNEL);
3080 if (!sp->u.iocb_cmd.u.ctarg.req) {
3081 ql_log(ql_log_warn, vha, 0xd041,
3082 "%s: Failed to allocate ct_sns request.\n",
3083 __func__);
3084 goto done_free_sp;
3087 sp->u.iocb_cmd.u.ctarg.rsp_allocated_size = sizeof(struct ct_sns_pkt);
3088 sp->u.iocb_cmd.u.ctarg.rsp = dma_alloc_coherent(&vha->hw->pdev->dev,
3089 sp->u.iocb_cmd.u.ctarg.rsp_allocated_size,
3090 &sp->u.iocb_cmd.u.ctarg.rsp_dma,
3091 GFP_KERNEL);
3092 if (!sp->u.iocb_cmd.u.ctarg.rsp) {
3093 ql_log(ql_log_warn, vha, 0xd041,
3094 "%s: Failed to allocate ct_sns response.\n",
3095 __func__);
3096 goto done_free_sp;
3099 /* CT_IU preamble */
3100 ct_req = qla2x00_prep_ct_req(sp->u.iocb_cmd.u.ctarg.req, GFF_ID_CMD, GFF_ID_RSP_SIZE);
3102 ct_req->req.gff_id.port_id[0] = fcport->d_id.b.domain;
3103 ct_req->req.gff_id.port_id[1] = fcport->d_id.b.area;
3104 ct_req->req.gff_id.port_id[2] = fcport->d_id.b.al_pa;
3106 sp->u.iocb_cmd.u.ctarg.req_size = GFF_ID_REQ_SIZE;
3107 sp->u.iocb_cmd.u.ctarg.rsp_size = GFF_ID_RSP_SIZE;
3108 sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
3110 rval = qla2x00_start_sp(sp);
3112 if (rval != QLA_SUCCESS) {
3113 rval = QLA_FUNCTION_FAILED;
3114 goto done_free_sp;
3115 } else {
3116 ql_dbg(ql_dbg_disc, vha, 0x3074,
3117 "Async-%s hdl=%x portid %06x\n",
3118 sp->name, sp->handle, fcport->d_id.b24);
3121 wait_for_completion(sp->comp);
3122 rval = sp->rc;
3124 done_free_sp:
3125 if (sp->u.iocb_cmd.u.ctarg.req) {
3126 dma_free_coherent(&vha->hw->pdev->dev,
3127 sp->u.iocb_cmd.u.ctarg.req_allocated_size,
3128 sp->u.iocb_cmd.u.ctarg.req,
3129 sp->u.iocb_cmd.u.ctarg.req_dma);
3130 sp->u.iocb_cmd.u.ctarg.req = NULL;
3133 if (sp->u.iocb_cmd.u.ctarg.rsp) {
3134 dma_free_coherent(&vha->hw->pdev->dev,
3135 sp->u.iocb_cmd.u.ctarg.rsp_allocated_size,
3136 sp->u.iocb_cmd.u.ctarg.rsp,
3137 sp->u.iocb_cmd.u.ctarg.rsp_dma);
3138 sp->u.iocb_cmd.u.ctarg.rsp = NULL;
3141 /* ref: INIT */
3142 kref_put(&sp->cmd_kref, qla2x00_sp_release);
3143 return rval;
3146 /* GPN_FT + GNN_FT*/
3147 static int qla2x00_is_a_vp(scsi_qla_host_t *vha, u64 wwn)
3149 struct qla_hw_data *ha = vha->hw;
3150 scsi_qla_host_t *vp;
3151 unsigned long flags;
3152 u64 twwn;
3153 int rc = 0;
3155 if (!ha->num_vhosts)
3156 return 0;
3158 spin_lock_irqsave(&ha->vport_slock, flags);
3159 list_for_each_entry(vp, &ha->vp_list, list) {
3160 twwn = wwn_to_u64(vp->port_name);
3161 if (wwn == twwn) {
3162 rc = 1;
3163 break;
3166 spin_unlock_irqrestore(&ha->vport_slock, flags);
3168 return rc;
3171 static bool qla_ok_to_clear_rscn(scsi_qla_host_t *vha, fc_port_t *fcport)
3173 u32 rscn_gen;
3175 rscn_gen = atomic_read(&vha->rscn_gen);
3176 ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0x2017,
3177 "%s %d %8phC rscn_gen %x start %x end %x current %x\n",
3178 __func__, __LINE__, fcport->port_name, fcport->rscn_gen,
3179 vha->scan.rscn_gen_start, vha->scan.rscn_gen_end, rscn_gen);
3181 if (val_is_in_range(fcport->rscn_gen, vha->scan.rscn_gen_start,
3182 vha->scan.rscn_gen_end))
3183 /* rscn came in before fabric scan */
3184 return true;
3186 if (val_is_in_range(fcport->rscn_gen, vha->scan.rscn_gen_end, rscn_gen))
3187 /* rscn came in after fabric scan */
3188 return false;
3190 /* rare: fcport's scan_needed + rscn_gen must be stale */
3191 return true;
3194 void qla_fab_scan_finish(scsi_qla_host_t *vha, srb_t *sp)
3196 fc_port_t *fcport;
3197 u32 i, rc;
3198 bool found;
3199 struct fab_scan_rp *rp, *trp;
3200 unsigned long flags;
3201 u8 recheck = 0;
3202 u16 dup = 0, dup_cnt = 0;
3204 ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0xffff,
3205 "%s enter\n", __func__);
3207 if (sp->gen1 != vha->hw->base_qpair->chip_reset) {
3208 ql_dbg(ql_dbg_disc, vha, 0xffff,
3209 "%s scan stop due to chip reset %x/%x\n",
3210 sp->name, sp->gen1, vha->hw->base_qpair->chip_reset);
3211 goto out;
3214 rc = sp->rc;
3215 if (rc) {
3216 vha->scan.scan_retry++;
3217 if (vha->scan.scan_retry < MAX_SCAN_RETRIES) {
3218 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
3219 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
3220 goto out;
3221 } else {
3222 ql_dbg(ql_dbg_disc, vha, 0xffff,
3223 "%s: Fabric scan failed for %d retries.\n",
3224 __func__, vha->scan.scan_retry);
3226 * Unable to scan any rports. logout loop below
3227 * will unregister all sessions.
3229 list_for_each_entry(fcport, &vha->vp_fcports, list) {
3230 if ((fcport->flags & FCF_FABRIC_DEVICE) != 0) {
3231 fcport->scan_state = QLA_FCPORT_SCAN;
3232 if (fcport->loop_id == FC_NO_LOOP_ID)
3233 fcport->logout_on_delete = 0;
3234 else
3235 fcport->logout_on_delete = 1;
3238 goto login_logout;
3241 vha->scan.scan_retry = 0;
3243 list_for_each_entry(fcport, &vha->vp_fcports, list)
3244 fcport->scan_state = QLA_FCPORT_SCAN;
3246 for (i = 0; i < vha->hw->max_fibre_devices; i++) {
3247 u64 wwn;
3248 int k;
3250 rp = &vha->scan.l[i];
3251 found = false;
3253 wwn = wwn_to_u64(rp->port_name);
3254 if (wwn == 0)
3255 continue;
3257 /* Remove duplicate NPORT ID entries from switch data base */
3258 for (k = i + 1; k < vha->hw->max_fibre_devices; k++) {
3259 trp = &vha->scan.l[k];
3260 if (rp->id.b24 == trp->id.b24) {
3261 dup = 1;
3262 dup_cnt++;
3263 ql_dbg(ql_dbg_disc + ql_dbg_verbose,
3264 vha, 0xffff,
3265 "Detected duplicate NPORT ID from switch data base: ID %06x WWN %8phN WWN %8phN\n",
3266 rp->id.b24, rp->port_name, trp->port_name);
3267 memset(trp, 0, sizeof(*trp));
3271 if (!memcmp(rp->port_name, vha->port_name, WWN_SIZE))
3272 continue;
3274 /* Bypass reserved domain fields. */
3275 if ((rp->id.b.domain & 0xf0) == 0xf0)
3276 continue;
3278 /* Bypass virtual ports of the same host. */
3279 if (qla2x00_is_a_vp(vha, wwn))
3280 continue;
3282 list_for_each_entry(fcport, &vha->vp_fcports, list) {
3283 if (memcmp(rp->port_name, fcport->port_name, WWN_SIZE))
3284 continue;
3285 fcport->scan_state = QLA_FCPORT_FOUND;
3286 fcport->last_rscn_gen = fcport->rscn_gen;
3287 fcport->fc4_type = rp->fc4type;
3288 found = true;
3290 if (fcport->scan_needed) {
3291 if (NVME_PRIORITY(vha->hw, fcport))
3292 fcport->do_prli_nvme = 1;
3293 else
3294 fcport->do_prli_nvme = 0;
3298 * If device was not a fabric device before.
3300 if ((fcport->flags & FCF_FABRIC_DEVICE) == 0) {
3301 qla2x00_clear_loop_id(fcport);
3302 fcport->flags |= FCF_FABRIC_DEVICE;
3303 } else if (fcport->d_id.b24 != rp->id.b24 ||
3304 (fcport->scan_needed &&
3305 fcport->port_type != FCT_INITIATOR &&
3306 fcport->port_type != FCT_NVME_INITIATOR)) {
3307 fcport->scan_needed = 0;
3308 qlt_schedule_sess_for_deletion(fcport);
3310 fcport->d_id.b24 = rp->id.b24;
3311 break;
3314 if (!found) {
3315 ql_dbg(ql_dbg_disc, vha, 0xffff,
3316 "%s %d %8phC post new sess\n",
3317 __func__, __LINE__, rp->port_name);
3318 qla24xx_post_newsess_work(vha, &rp->id, rp->port_name,
3319 rp->node_name, NULL, rp->fc4type);
3323 if (dup) {
3324 ql_log(ql_log_warn, vha, 0xffff,
3325 "Detected %d duplicate NPORT ID(s) from switch data base\n",
3326 dup_cnt);
3329 login_logout:
3331 * Logout all previous fabric dev marked lost, except FCP2 devices.
3333 list_for_each_entry(fcport, &vha->vp_fcports, list) {
3334 if ((fcport->flags & FCF_FABRIC_DEVICE) == 0) {
3335 fcport->scan_needed = 0;
3336 continue;
3339 if (fcport->scan_state != QLA_FCPORT_FOUND) {
3340 bool do_delete = false;
3342 if (fcport->scan_needed &&
3343 fcport->disc_state == DSC_LOGIN_PEND) {
3344 /* Cable got disconnected after we sent
3345 * a login. Do delete to prevent timeout.
3347 fcport->logout_on_delete = 1;
3348 do_delete = true;
3351 if (qla_ok_to_clear_rscn(vha, fcport))
3352 fcport->scan_needed = 0;
3354 if (((qla_dual_mode_enabled(vha) ||
3355 qla_ini_mode_enabled(vha)) &&
3356 atomic_read(&fcport->state) == FCS_ONLINE) ||
3357 do_delete) {
3358 if (fcport->loop_id != FC_NO_LOOP_ID) {
3359 if (fcport->flags & FCF_FCP2_DEVICE)
3360 continue;
3362 ql_log(ql_log_warn, vha, 0x20f0,
3363 "%s %d %8phC post del sess\n",
3364 __func__, __LINE__,
3365 fcport->port_name);
3367 fcport->tgt_link_down_time = 0;
3368 qlt_schedule_sess_for_deletion(fcport);
3369 continue;
3372 } else {
3373 if (fcport->scan_needed ||
3374 fcport->disc_state != DSC_LOGIN_COMPLETE) {
3375 if (fcport->login_retry == 0) {
3376 fcport->login_retry =
3377 vha->hw->login_retry_count;
3378 ql_dbg(ql_dbg_disc, vha, 0x20a3,
3379 "Port login retry %8phN, lid 0x%04x retry cnt=%d.\n",
3380 fcport->port_name, fcport->loop_id,
3381 fcport->login_retry);
3384 if (qla_ok_to_clear_rscn(vha, fcport))
3385 fcport->scan_needed = 0;
3386 qla24xx_fcport_handle_login(vha, fcport);
3391 recheck = 1;
3392 out:
3393 qla24xx_sp_unmap(vha, sp);
3394 spin_lock_irqsave(&vha->work_lock, flags);
3395 vha->scan.scan_flags &= ~SF_SCANNING;
3396 spin_unlock_irqrestore(&vha->work_lock, flags);
3398 if (recheck) {
3399 list_for_each_entry(fcport, &vha->vp_fcports, list) {
3400 if (fcport->scan_needed) {
3401 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
3402 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
3403 break;
3409 static int qla2x00_post_next_scan_work(struct scsi_qla_host *vha,
3410 srb_t *sp, int cmd)
3412 struct qla_work_evt *e;
3414 e = qla2x00_alloc_work(vha, cmd);
3415 if (!e)
3416 return QLA_FUNCTION_FAILED;
3418 e->u.iosb.sp = sp;
3420 return qla2x00_post_work(vha, e);
3423 static void qla2x00_find_free_fcp_nvme_slot(struct scsi_qla_host *vha,
3424 struct srb *sp)
3426 struct qla_hw_data *ha = vha->hw;
3427 int num_fibre_dev = ha->max_fibre_devices;
3428 struct ct_sns_gpnft_rsp *ct_rsp =
3429 (struct ct_sns_gpnft_rsp *)sp->u.iocb_cmd.u.ctarg.rsp;
3430 struct ct_sns_gpn_ft_data *d;
3431 struct fab_scan_rp *rp;
3432 int i, j, k;
3433 port_id_t id;
3434 u8 found;
3435 u64 wwn;
3437 j = 0;
3438 for (i = 0; i < num_fibre_dev; i++) {
3439 d = &ct_rsp->entries[i];
3441 id.b.rsvd_1 = 0;
3442 id.b.domain = d->port_id[0];
3443 id.b.area = d->port_id[1];
3444 id.b.al_pa = d->port_id[2];
3445 wwn = wwn_to_u64(d->port_name);
3447 if (id.b24 == 0 || wwn == 0)
3448 continue;
3450 ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0x2025,
3451 "%s %06x %8ph \n",
3452 __func__, id.b24, d->port_name);
3454 switch (vha->scan.step) {
3455 case FAB_SCAN_GPNFT_FCP:
3456 rp = &vha->scan.l[j];
3457 rp->id = id;
3458 memcpy(rp->port_name, d->port_name, 8);
3459 j++;
3460 rp->fc4type = FS_FC4TYPE_FCP;
3461 break;
3462 case FAB_SCAN_GNNFT_FCP:
3463 for (k = 0; k < num_fibre_dev; k++) {
3464 rp = &vha->scan.l[k];
3465 if (id.b24 == rp->id.b24) {
3466 memcpy(rp->node_name,
3467 d->port_name, 8);
3468 break;
3471 break;
3472 case FAB_SCAN_GPNFT_NVME:
3473 found = 0;
3475 for (k = 0; k < num_fibre_dev; k++) {
3476 rp = &vha->scan.l[k];
3477 if (!memcmp(rp->port_name, d->port_name, 8)) {
3479 * Supports FC-NVMe & FCP
3481 rp->fc4type |= FS_FC4TYPE_NVME;
3482 found = 1;
3483 break;
3487 /* We found new FC-NVMe only port */
3488 if (!found) {
3489 for (k = 0; k < num_fibre_dev; k++) {
3490 rp = &vha->scan.l[k];
3491 if (wwn_to_u64(rp->port_name)) {
3492 continue;
3493 } else {
3494 rp->id = id;
3495 memcpy(rp->port_name, d->port_name, 8);
3496 rp->fc4type = FS_FC4TYPE_NVME;
3497 break;
3501 break;
3502 case FAB_SCAN_GNNFT_NVME:
3503 for (k = 0; k < num_fibre_dev; k++) {
3504 rp = &vha->scan.l[k];
3505 if (id.b24 == rp->id.b24) {
3506 memcpy(rp->node_name, d->port_name, 8);
3507 break;
3510 break;
3511 default:
3512 break;
3517 static void qla_async_scan_sp_done(srb_t *sp, int res)
3519 struct scsi_qla_host *vha = sp->vha;
3520 unsigned long flags;
3521 int rc;
3523 /* gen2 field is holding the fc4type */
3524 ql_dbg(ql_dbg_disc, vha, 0x2026,
3525 "Async done-%s res %x step %x\n",
3526 sp->name, res, vha->scan.step);
3528 sp->rc = res;
3529 if (res) {
3530 unsigned long flags;
3531 const char *name = sp->name;
3533 if (res == QLA_OS_TIMER_EXPIRED) {
3534 /* switch is ignoring all commands.
3535 * This might be a zone disable behavior.
3536 * This means we hit 64s timeout.
3537 * 22s GPNFT + 44s Abort = 64s
3539 ql_dbg(ql_dbg_disc, vha, 0xffff,
3540 "%s: Switch Zone check please .\n",
3541 name);
3542 qla2x00_mark_all_devices_lost(vha);
3546 * We are in an Interrupt context, queue up this
3547 * sp for GNNFT_DONE work. This will allow all
3548 * the resource to get freed up.
3550 rc = qla2x00_post_next_scan_work(vha, sp, QLA_EVT_SCAN_FINISH);
3551 if (rc) {
3552 /* Cleanup here to prevent memory leak */
3553 qla24xx_sp_unmap(vha, sp);
3555 spin_lock_irqsave(&vha->work_lock, flags);
3556 vha->scan.scan_flags &= ~SF_SCANNING;
3557 vha->scan.scan_retry++;
3558 spin_unlock_irqrestore(&vha->work_lock, flags);
3560 if (vha->scan.scan_retry < MAX_SCAN_RETRIES) {
3561 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
3562 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
3563 qla2xxx_wake_dpc(vha);
3564 } else {
3565 ql_dbg(ql_dbg_disc, vha, 0xffff,
3566 "Async done-%s rescan failed on all retries.\n",
3567 name);
3570 return;
3573 qla2x00_find_free_fcp_nvme_slot(vha, sp);
3575 spin_lock_irqsave(&vha->work_lock, flags);
3576 vha->scan.scan_flags &= ~SF_SCANNING;
3577 spin_unlock_irqrestore(&vha->work_lock, flags);
3579 switch (vha->scan.step) {
3580 case FAB_SCAN_GPNFT_FCP:
3581 case FAB_SCAN_GPNFT_NVME:
3582 rc = qla2x00_post_next_scan_work(vha, sp, QLA_EVT_SCAN_CMD);
3583 break;
3584 case FAB_SCAN_GNNFT_FCP:
3585 if (vha->flags.nvme_enabled)
3586 rc = qla2x00_post_next_scan_work(vha, sp, QLA_EVT_SCAN_CMD);
3587 else
3588 rc = qla2x00_post_next_scan_work(vha, sp, QLA_EVT_SCAN_FINISH);
3590 break;
3591 case FAB_SCAN_GNNFT_NVME:
3592 rc = qla2x00_post_next_scan_work(vha, sp, QLA_EVT_SCAN_FINISH);
3593 break;
3594 default:
3595 /* should not be here */
3596 WARN_ON(1);
3597 rc = QLA_FUNCTION_FAILED;
3598 break;
3601 if (rc) {
3602 qla24xx_sp_unmap(vha, sp);
3603 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
3604 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
3605 return;
3609 /* Get WWPN list for certain fc4_type */
3610 int qla_fab_async_scan(scsi_qla_host_t *vha, srb_t *sp)
3612 int rval = QLA_FUNCTION_FAILED;
3613 struct ct_sns_req *ct_req;
3614 struct ct_sns_pkt *ct_sns;
3615 u32 rspsz = 0;
3616 unsigned long flags;
3618 ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0x200c,
3619 "%s enter\n", __func__);
3621 if (!vha->flags.online)
3622 return rval;
3624 spin_lock_irqsave(&vha->work_lock, flags);
3625 if (vha->scan.scan_flags & SF_SCANNING) {
3626 spin_unlock_irqrestore(&vha->work_lock, flags);
3627 ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0x2012,
3628 "%s: scan active\n", __func__);
3629 return rval;
3631 vha->scan.scan_flags |= SF_SCANNING;
3632 if (!sp)
3633 vha->scan.step = FAB_SCAN_START;
3635 spin_unlock_irqrestore(&vha->work_lock, flags);
3637 switch (vha->scan.step) {
3638 case FAB_SCAN_START:
3639 ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0x2018,
3640 "%s: Performing FCP Scan\n", __func__);
3642 /* ref: INIT */
3643 sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL);
3644 if (!sp) {
3645 spin_lock_irqsave(&vha->work_lock, flags);
3646 vha->scan.scan_flags &= ~SF_SCANNING;
3647 spin_unlock_irqrestore(&vha->work_lock, flags);
3648 return rval;
3651 sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev,
3652 sizeof(struct ct_sns_pkt),
3653 &sp->u.iocb_cmd.u.ctarg.req_dma,
3654 GFP_KERNEL);
3655 sp->u.iocb_cmd.u.ctarg.req_allocated_size = sizeof(struct ct_sns_pkt);
3656 if (!sp->u.iocb_cmd.u.ctarg.req) {
3657 ql_log(ql_log_warn, vha, 0x201a,
3658 "Failed to allocate ct_sns request.\n");
3659 spin_lock_irqsave(&vha->work_lock, flags);
3660 vha->scan.scan_flags &= ~SF_SCANNING;
3661 spin_unlock_irqrestore(&vha->work_lock, flags);
3662 qla2x00_rel_sp(sp);
3663 return rval;
3666 rspsz = sizeof(struct ct_sns_gpnft_rsp) +
3667 vha->hw->max_fibre_devices *
3668 sizeof(struct ct_sns_gpn_ft_data);
3670 sp->u.iocb_cmd.u.ctarg.rsp = dma_alloc_coherent(&vha->hw->pdev->dev,
3671 rspsz,
3672 &sp->u.iocb_cmd.u.ctarg.rsp_dma,
3673 GFP_KERNEL);
3674 sp->u.iocb_cmd.u.ctarg.rsp_allocated_size = rspsz;
3675 if (!sp->u.iocb_cmd.u.ctarg.rsp) {
3676 ql_log(ql_log_warn, vha, 0x201b,
3677 "Failed to allocate ct_sns request.\n");
3678 spin_lock_irqsave(&vha->work_lock, flags);
3679 vha->scan.scan_flags &= ~SF_SCANNING;
3680 spin_unlock_irqrestore(&vha->work_lock, flags);
3681 dma_free_coherent(&vha->hw->pdev->dev,
3682 sp->u.iocb_cmd.u.ctarg.req_allocated_size,
3683 sp->u.iocb_cmd.u.ctarg.req,
3684 sp->u.iocb_cmd.u.ctarg.req_dma);
3685 sp->u.iocb_cmd.u.ctarg.req = NULL;
3686 /* ref: INIT */
3687 qla2x00_rel_sp(sp);
3688 return rval;
3690 sp->u.iocb_cmd.u.ctarg.rsp_size = rspsz;
3692 ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0xffff,
3693 "%s scan list size %d\n", __func__, vha->scan.size);
3695 memset(vha->scan.l, 0, vha->scan.size);
3697 vha->scan.step = FAB_SCAN_GPNFT_FCP;
3698 break;
3699 case FAB_SCAN_GPNFT_FCP:
3700 vha->scan.step = FAB_SCAN_GNNFT_FCP;
3701 break;
3702 case FAB_SCAN_GNNFT_FCP:
3703 vha->scan.step = FAB_SCAN_GPNFT_NVME;
3704 break;
3705 case FAB_SCAN_GPNFT_NVME:
3706 vha->scan.step = FAB_SCAN_GNNFT_NVME;
3707 break;
3708 case FAB_SCAN_GNNFT_NVME:
3709 default:
3710 /* should not be here */
3711 WARN_ON(1);
3712 goto done_free_sp;
3715 if (!sp) {
3716 ql_dbg(ql_dbg_disc, vha, 0x201c,
3717 "scan did not provide SP\n");
3718 return rval;
3720 if (!sp->u.iocb_cmd.u.ctarg.req || !sp->u.iocb_cmd.u.ctarg.rsp) {
3721 ql_log(ql_log_warn, vha, 0x201d,
3722 "%s: req %p rsp %p are not setup\n",
3723 __func__, sp->u.iocb_cmd.u.ctarg.req,
3724 sp->u.iocb_cmd.u.ctarg.rsp);
3725 spin_lock_irqsave(&vha->work_lock, flags);
3726 vha->scan.scan_flags &= ~SF_SCANNING;
3727 spin_unlock_irqrestore(&vha->work_lock, flags);
3728 WARN_ON(1);
3729 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
3730 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
3731 goto done_free_sp;
3734 rspsz = sp->u.iocb_cmd.u.ctarg.rsp_size;
3735 memset(sp->u.iocb_cmd.u.ctarg.req, 0, sp->u.iocb_cmd.u.ctarg.req_size);
3736 memset(sp->u.iocb_cmd.u.ctarg.rsp, 0, sp->u.iocb_cmd.u.ctarg.rsp_size);
3739 sp->type = SRB_CT_PTHRU_CMD;
3740 sp->gen1 = vha->hw->base_qpair->chip_reset;
3741 qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2,
3742 qla_async_scan_sp_done);
3744 ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.req;
3746 /* CT_IU preamble */
3747 switch (vha->scan.step) {
3748 case FAB_SCAN_GPNFT_FCP:
3749 sp->name = "gpnft";
3750 ct_req = qla2x00_prep_ct_req(ct_sns, GPN_FT_CMD, rspsz);
3751 ct_req->req.gpn_ft.port_type = FC4_TYPE_FCP_SCSI;
3752 sp->u.iocb_cmd.u.ctarg.req_size = GPN_FT_REQ_SIZE;
3753 break;
3754 case FAB_SCAN_GNNFT_FCP:
3755 sp->name = "gnnft";
3756 ct_req = qla2x00_prep_ct_req(ct_sns, GNN_FT_CMD, rspsz);
3757 ct_req->req.gpn_ft.port_type = FC4_TYPE_FCP_SCSI;
3758 sp->u.iocb_cmd.u.ctarg.req_size = GNN_FT_REQ_SIZE;
3759 break;
3760 case FAB_SCAN_GPNFT_NVME:
3761 sp->name = "gpnft";
3762 ct_req = qla2x00_prep_ct_req(ct_sns, GPN_FT_CMD, rspsz);
3763 ct_req->req.gpn_ft.port_type = FC4_TYPE_NVME;
3764 sp->u.iocb_cmd.u.ctarg.req_size = GPN_FT_REQ_SIZE;
3765 break;
3766 case FAB_SCAN_GNNFT_NVME:
3767 sp->name = "gnnft";
3768 ct_req = qla2x00_prep_ct_req(ct_sns, GNN_FT_CMD, rspsz);
3769 ct_req->req.gpn_ft.port_type = FC4_TYPE_NVME;
3770 sp->u.iocb_cmd.u.ctarg.req_size = GNN_FT_REQ_SIZE;
3771 break;
3772 default:
3773 /* should not be here */
3774 WARN_ON(1);
3775 goto done_free_sp;
3778 sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
3780 ql_dbg(ql_dbg_disc, vha, 0x2003,
3781 "%s: step %d, rsp size %d, req size %d hdl %x %s FC4TYPE %x \n",
3782 __func__, vha->scan.step, sp->u.iocb_cmd.u.ctarg.rsp_size,
3783 sp->u.iocb_cmd.u.ctarg.req_size, sp->handle, sp->name,
3784 ct_req->req.gpn_ft.port_type);
3786 rval = qla2x00_start_sp(sp);
3787 if (rval != QLA_SUCCESS) {
3788 goto done_free_sp;
3791 return rval;
3793 done_free_sp:
3794 if (sp->u.iocb_cmd.u.ctarg.req) {
3795 dma_free_coherent(&vha->hw->pdev->dev,
3796 sp->u.iocb_cmd.u.ctarg.req_allocated_size,
3797 sp->u.iocb_cmd.u.ctarg.req,
3798 sp->u.iocb_cmd.u.ctarg.req_dma);
3799 sp->u.iocb_cmd.u.ctarg.req = NULL;
3801 if (sp->u.iocb_cmd.u.ctarg.rsp) {
3802 dma_free_coherent(&vha->hw->pdev->dev,
3803 sp->u.iocb_cmd.u.ctarg.rsp_allocated_size,
3804 sp->u.iocb_cmd.u.ctarg.rsp,
3805 sp->u.iocb_cmd.u.ctarg.rsp_dma);
3806 sp->u.iocb_cmd.u.ctarg.rsp = NULL;
3809 /* ref: INIT */
3810 kref_put(&sp->cmd_kref, qla2x00_sp_release);
3812 spin_lock_irqsave(&vha->work_lock, flags);
3813 vha->scan.scan_flags &= ~SF_SCANNING;
3814 if (vha->scan.scan_flags == 0) {
3815 ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0x2007,
3816 "%s: Scan scheduled.\n", __func__);
3817 vha->scan.scan_flags |= SF_QUEUED;
3818 schedule_delayed_work(&vha->scan.scan_work, 5);
3820 spin_unlock_irqrestore(&vha->work_lock, flags);
3823 return rval;
3826 void qla_fab_scan_start(struct scsi_qla_host *vha)
3828 int rval;
3830 rval = qla_fab_async_scan(vha, NULL);
3831 if (rval)
3832 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
3835 void qla_scan_work_fn(struct work_struct *work)
3837 struct fab_scan *s = container_of(to_delayed_work(work),
3838 struct fab_scan, scan_work);
3839 struct scsi_qla_host *vha = container_of(s, struct scsi_qla_host,
3840 scan);
3841 unsigned long flags;
3843 ql_dbg(ql_dbg_disc, vha, 0xffff,
3844 "%s: schedule loop resync\n", __func__);
3845 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
3846 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
3847 qla2xxx_wake_dpc(vha);
3848 spin_lock_irqsave(&vha->work_lock, flags);
3849 vha->scan.scan_flags &= ~SF_QUEUED;
3850 spin_unlock_irqrestore(&vha->work_lock, flags);
3853 /* GPFN_ID */
3854 void qla24xx_handle_gfpnid_event(scsi_qla_host_t *vha, struct event_arg *ea)
3856 fc_port_t *fcport = ea->fcport;
3858 ql_dbg(ql_dbg_disc, vha, 0xffff,
3859 "%s %8phC DS %d LS %d rc %d login %d|%d rscn %d|%d fcpcnt %d\n",
3860 __func__, fcport->port_name, fcport->disc_state,
3861 fcport->fw_login_state, ea->rc, fcport->login_gen, ea->sp->gen2,
3862 fcport->rscn_gen, ea->sp->gen1, vha->fcport_count);
3864 if (fcport->disc_state == DSC_DELETE_PEND)
3865 return;
3867 if (ea->sp->gen2 != fcport->login_gen) {
3868 /* target side must have changed it. */
3869 ql_dbg(ql_dbg_disc, vha, 0x20d3,
3870 "%s %8phC generation changed\n",
3871 __func__, fcport->port_name);
3872 return;
3873 } else if (ea->sp->gen1 != fcport->rscn_gen) {
3874 return;
3877 qla24xx_post_gpsc_work(vha, fcport);
3880 static void qla2x00_async_gfpnid_sp_done(srb_t *sp, int res)
3882 struct scsi_qla_host *vha = sp->vha;
3883 fc_port_t *fcport = sp->fcport;
3884 u8 *fpn = fcport->ct_desc.ct_sns->p.rsp.rsp.gfpn_id.port_name;
3885 struct event_arg ea;
3886 u64 wwn;
3888 wwn = wwn_to_u64(fpn);
3889 if (wwn)
3890 memcpy(fcport->fabric_port_name, fpn, WWN_SIZE);
3892 memset(&ea, 0, sizeof(ea));
3893 ea.fcport = fcport;
3894 ea.sp = sp;
3895 ea.rc = res;
3897 ql_dbg(ql_dbg_disc, vha, 0x204f,
3898 "Async done-%s res %x, WWPN %8phC %8phC\n",
3899 sp->name, res, fcport->port_name, fcport->fabric_port_name);
3901 qla24xx_handle_gfpnid_event(vha, &ea);
3903 /* ref: INIT */
3904 kref_put(&sp->cmd_kref, qla2x00_sp_release);
3907 int qla24xx_async_gfpnid(scsi_qla_host_t *vha, fc_port_t *fcport)
3909 int rval = QLA_FUNCTION_FAILED;
3910 struct ct_sns_req *ct_req;
3911 srb_t *sp;
3913 if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT))
3914 return rval;
3916 /* ref: INIT */
3917 sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC);
3918 if (!sp)
3919 goto done;
3921 sp->type = SRB_CT_PTHRU_CMD;
3922 sp->name = "gfpnid";
3923 sp->gen1 = fcport->rscn_gen;
3924 sp->gen2 = fcport->login_gen;
3925 qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2,
3926 qla2x00_async_gfpnid_sp_done);
3928 /* CT_IU preamble */
3929 ct_req = qla2x00_prep_ct_req(fcport->ct_desc.ct_sns, GFPN_ID_CMD,
3930 GFPN_ID_RSP_SIZE);
3932 /* GFPN_ID req */
3933 ct_req->req.port_id.port_id = port_id_to_be_id(fcport->d_id);
3936 /* req & rsp use the same buffer */
3937 sp->u.iocb_cmd.u.ctarg.req = fcport->ct_desc.ct_sns;
3938 sp->u.iocb_cmd.u.ctarg.req_dma = fcport->ct_desc.ct_sns_dma;
3939 sp->u.iocb_cmd.u.ctarg.rsp = fcport->ct_desc.ct_sns;
3940 sp->u.iocb_cmd.u.ctarg.rsp_dma = fcport->ct_desc.ct_sns_dma;
3941 sp->u.iocb_cmd.u.ctarg.req_size = GFPN_ID_REQ_SIZE;
3942 sp->u.iocb_cmd.u.ctarg.rsp_size = GFPN_ID_RSP_SIZE;
3943 sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
3945 ql_dbg(ql_dbg_disc, vha, 0xffff,
3946 "Async-%s - %8phC hdl=%x loopid=%x portid %06x.\n",
3947 sp->name, fcport->port_name,
3948 sp->handle, fcport->loop_id, fcport->d_id.b24);
3950 rval = qla2x00_start_sp(sp);
3951 if (rval != QLA_SUCCESS)
3952 goto done_free_sp;
3954 return rval;
3956 done_free_sp:
3957 /* ref: INIT */
3958 kref_put(&sp->cmd_kref, qla2x00_sp_release);
3959 done:
3960 return rval;
3963 int qla24xx_post_gfpnid_work(struct scsi_qla_host *vha, fc_port_t *fcport)
3965 struct qla_work_evt *e;
3966 int ls;
3968 ls = atomic_read(&vha->loop_state);
3969 if (((ls != LOOP_READY) && (ls != LOOP_UP)) ||
3970 test_bit(UNLOADING, &vha->dpc_flags))
3971 return 0;
3973 e = qla2x00_alloc_work(vha, QLA_EVT_GFPNID);
3974 if (!e)
3975 return QLA_FUNCTION_FAILED;
3977 e->u.fcport.fcport = fcport;
3978 return qla2x00_post_work(vha, e);