treewide: remove redundant IS_ERR() before error code check
[linux/fpc-iii.git] / drivers / scsi / qla2xxx / qla_gs.c
blobaaa4a5bbf2ff0115b1eda3134b6749660a4c2e07
1 /*
2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2014 QLogic Corporation
5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */
7 #include "qla_def.h"
8 #include "qla_target.h"
9 #include <linux/utsname.h>
11 static int qla2x00_sns_ga_nxt(scsi_qla_host_t *, fc_port_t *);
12 static int qla2x00_sns_gid_pt(scsi_qla_host_t *, sw_info_t *);
13 static int qla2x00_sns_gpn_id(scsi_qla_host_t *, sw_info_t *);
14 static int qla2x00_sns_gnn_id(scsi_qla_host_t *, sw_info_t *);
15 static int qla2x00_sns_rft_id(scsi_qla_host_t *);
16 static int qla2x00_sns_rnn_id(scsi_qla_host_t *);
17 static int qla_async_rftid(scsi_qla_host_t *, port_id_t *);
18 static int qla_async_rffid(scsi_qla_host_t *, port_id_t *, u8, u8);
19 static int qla_async_rnnid(scsi_qla_host_t *, port_id_t *, u8*);
20 static int qla_async_rsnn_nn(scsi_qla_host_t *);
22 /**
23 * qla2x00_prep_ms_iocb() - Prepare common MS/CT IOCB fields for SNS CT query.
24 * @vha: HA context
25 * @arg: CT arguments
27 * Returns a pointer to the @vha's ms_iocb.
29 void *
30 qla2x00_prep_ms_iocb(scsi_qla_host_t *vha, struct ct_arg *arg)
32 struct qla_hw_data *ha = vha->hw;
33 ms_iocb_entry_t *ms_pkt;
35 ms_pkt = (ms_iocb_entry_t *)arg->iocb;
36 memset(ms_pkt, 0, sizeof(ms_iocb_entry_t));
38 ms_pkt->entry_type = MS_IOCB_TYPE;
39 ms_pkt->entry_count = 1;
40 SET_TARGET_ID(ha, ms_pkt->loop_id, SIMPLE_NAME_SERVER);
41 ms_pkt->control_flags = cpu_to_le16(CF_READ | CF_HEAD_TAG);
42 ms_pkt->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
43 ms_pkt->cmd_dsd_count = cpu_to_le16(1);
44 ms_pkt->total_dsd_count = cpu_to_le16(2);
45 ms_pkt->rsp_bytecount = cpu_to_le32(arg->rsp_size);
46 ms_pkt->req_bytecount = cpu_to_le32(arg->req_size);
48 put_unaligned_le64(arg->req_dma, &ms_pkt->req_dsd.address);
49 ms_pkt->req_dsd.length = ms_pkt->req_bytecount;
51 put_unaligned_le64(arg->rsp_dma, &ms_pkt->rsp_dsd.address);
52 ms_pkt->rsp_dsd.length = ms_pkt->rsp_bytecount;
54 vha->qla_stats.control_requests++;
56 return (ms_pkt);
59 /**
60 * qla24xx_prep_ms_iocb() - Prepare common CT IOCB fields for SNS CT query.
61 * @vha: HA context
62 * @arg: CT arguments
64 * Returns a pointer to the @ha's ms_iocb.
66 void *
67 qla24xx_prep_ms_iocb(scsi_qla_host_t *vha, struct ct_arg *arg)
69 struct qla_hw_data *ha = vha->hw;
70 struct ct_entry_24xx *ct_pkt;
72 ct_pkt = (struct ct_entry_24xx *)arg->iocb;
73 memset(ct_pkt, 0, sizeof(struct ct_entry_24xx));
75 ct_pkt->entry_type = CT_IOCB_TYPE;
76 ct_pkt->entry_count = 1;
77 ct_pkt->nport_handle = cpu_to_le16(arg->nport_handle);
78 ct_pkt->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
79 ct_pkt->cmd_dsd_count = cpu_to_le16(1);
80 ct_pkt->rsp_dsd_count = cpu_to_le16(1);
81 ct_pkt->rsp_byte_count = cpu_to_le32(arg->rsp_size);
82 ct_pkt->cmd_byte_count = cpu_to_le32(arg->req_size);
84 put_unaligned_le64(arg->req_dma, &ct_pkt->dsd[0].address);
85 ct_pkt->dsd[0].length = ct_pkt->cmd_byte_count;
87 put_unaligned_le64(arg->rsp_dma, &ct_pkt->dsd[1].address);
88 ct_pkt->dsd[1].length = ct_pkt->rsp_byte_count;
89 ct_pkt->vp_index = vha->vp_idx;
91 vha->qla_stats.control_requests++;
93 return (ct_pkt);
96 /**
97 * qla2x00_prep_ct_req() - Prepare common CT request fields for SNS query.
98 * @p: CT request buffer
99 * @cmd: GS command
100 * @rsp_size: response size in bytes
102 * Returns a pointer to the intitialized @ct_req.
104 static inline struct ct_sns_req *
105 qla2x00_prep_ct_req(struct ct_sns_pkt *p, uint16_t cmd, uint16_t rsp_size)
107 memset(p, 0, sizeof(struct ct_sns_pkt));
109 p->p.req.header.revision = 0x01;
110 p->p.req.header.gs_type = 0xFC;
111 p->p.req.header.gs_subtype = 0x02;
112 p->p.req.command = cpu_to_be16(cmd);
113 p->p.req.max_rsp_size = cpu_to_be16((rsp_size - 16) / 4);
115 return &p->p.req;
119 qla2x00_chk_ms_status(scsi_qla_host_t *vha, ms_iocb_entry_t *ms_pkt,
120 struct ct_sns_rsp *ct_rsp, const char *routine)
122 int rval;
123 uint16_t comp_status;
124 struct qla_hw_data *ha = vha->hw;
125 bool lid_is_sns = false;
127 rval = QLA_FUNCTION_FAILED;
128 if (ms_pkt->entry_status != 0) {
129 ql_dbg(ql_dbg_disc, vha, 0x2031,
130 "%s failed, error status (%x) on port_id: %02x%02x%02x.\n",
131 routine, ms_pkt->entry_status, vha->d_id.b.domain,
132 vha->d_id.b.area, vha->d_id.b.al_pa);
133 } else {
134 if (IS_FWI2_CAPABLE(ha))
135 comp_status = le16_to_cpu(
136 ((struct ct_entry_24xx *)ms_pkt)->comp_status);
137 else
138 comp_status = le16_to_cpu(ms_pkt->status);
139 switch (comp_status) {
140 case CS_COMPLETE:
141 case CS_DATA_UNDERRUN:
142 case CS_DATA_OVERRUN: /* Overrun? */
143 if (ct_rsp->header.response !=
144 cpu_to_be16(CT_ACCEPT_RESPONSE)) {
145 ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x2077,
146 "%s failed rejected request on port_id: %02x%02x%02x Completion status 0x%x, response 0x%x\n",
147 routine, vha->d_id.b.domain,
148 vha->d_id.b.area, vha->d_id.b.al_pa,
149 comp_status, ct_rsp->header.response);
150 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha,
151 0x2078, ct_rsp,
152 offsetof(typeof(*ct_rsp), rsp));
153 rval = QLA_INVALID_COMMAND;
154 } else
155 rval = QLA_SUCCESS;
156 break;
157 case CS_PORT_LOGGED_OUT:
158 if (IS_FWI2_CAPABLE(ha)) {
159 if (le16_to_cpu(ms_pkt->loop_id.extended) ==
160 NPH_SNS)
161 lid_is_sns = true;
162 } else {
163 if (le16_to_cpu(ms_pkt->loop_id.extended) ==
164 SIMPLE_NAME_SERVER)
165 lid_is_sns = true;
167 if (lid_is_sns) {
168 ql_dbg(ql_dbg_async, vha, 0x502b,
169 "%s failed, Name server has logged out",
170 routine);
171 rval = QLA_NOT_LOGGED_IN;
172 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
173 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
175 break;
176 case CS_TIMEOUT:
177 rval = QLA_FUNCTION_TIMEOUT;
178 /* fall through */
179 default:
180 ql_dbg(ql_dbg_disc, vha, 0x2033,
181 "%s failed, completion status (%x) on port_id: "
182 "%02x%02x%02x.\n", routine, comp_status,
183 vha->d_id.b.domain, vha->d_id.b.area,
184 vha->d_id.b.al_pa);
185 break;
188 return rval;
192 * qla2x00_ga_nxt() - SNS scan for fabric devices via GA_NXT command.
193 * @vha: HA context
194 * @fcport: fcport entry to updated
196 * Returns 0 on success.
199 qla2x00_ga_nxt(scsi_qla_host_t *vha, fc_port_t *fcport)
201 int rval;
203 ms_iocb_entry_t *ms_pkt;
204 struct ct_sns_req *ct_req;
205 struct ct_sns_rsp *ct_rsp;
206 struct qla_hw_data *ha = vha->hw;
207 struct ct_arg arg;
209 if (IS_QLA2100(ha) || IS_QLA2200(ha))
210 return qla2x00_sns_ga_nxt(vha, fcport);
212 arg.iocb = ha->ms_iocb;
213 arg.req_dma = ha->ct_sns_dma;
214 arg.rsp_dma = ha->ct_sns_dma;
215 arg.req_size = GA_NXT_REQ_SIZE;
216 arg.rsp_size = GA_NXT_RSP_SIZE;
217 arg.nport_handle = NPH_SNS;
219 /* Issue GA_NXT */
220 /* Prepare common MS IOCB */
221 ms_pkt = ha->isp_ops->prep_ms_iocb(vha, &arg);
223 /* Prepare CT request */
224 ct_req = qla2x00_prep_ct_req(ha->ct_sns, GA_NXT_CMD,
225 GA_NXT_RSP_SIZE);
226 ct_rsp = &ha->ct_sns->p.rsp;
228 /* Prepare CT arguments -- port_id */
229 ct_req->req.port_id.port_id = port_id_to_be_id(fcport->d_id);
231 /* Execute MS IOCB */
232 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
233 sizeof(ms_iocb_entry_t));
234 if (rval != QLA_SUCCESS) {
235 /*EMPTY*/
236 ql_dbg(ql_dbg_disc, vha, 0x2062,
237 "GA_NXT issue IOCB failed (%d).\n", rval);
238 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "GA_NXT") !=
239 QLA_SUCCESS) {
240 rval = QLA_FUNCTION_FAILED;
241 } else {
242 /* Populate fc_port_t entry. */
243 fcport->d_id = be_to_port_id(ct_rsp->rsp.ga_nxt.port_id);
245 memcpy(fcport->node_name, ct_rsp->rsp.ga_nxt.node_name,
246 WWN_SIZE);
247 memcpy(fcport->port_name, ct_rsp->rsp.ga_nxt.port_name,
248 WWN_SIZE);
250 fcport->fc4_type = (ct_rsp->rsp.ga_nxt.fc4_types[2] & BIT_0) ?
251 FS_FC4TYPE_FCP : FC4_TYPE_OTHER;
253 if (ct_rsp->rsp.ga_nxt.port_type != NS_N_PORT_TYPE &&
254 ct_rsp->rsp.ga_nxt.port_type != NS_NL_PORT_TYPE)
255 fcport->d_id.b.domain = 0xf0;
257 ql_dbg(ql_dbg_disc, vha, 0x2063,
258 "GA_NXT entry - nn %8phN pn %8phN "
259 "port_id=%02x%02x%02x.\n",
260 fcport->node_name, fcport->port_name,
261 fcport->d_id.b.domain, fcport->d_id.b.area,
262 fcport->d_id.b.al_pa);
265 return (rval);
268 static inline int
269 qla2x00_gid_pt_rsp_size(scsi_qla_host_t *vha)
271 return vha->hw->max_fibre_devices * 4 + 16;
275 * qla2x00_gid_pt() - SNS scan for fabric devices via GID_PT command.
276 * @vha: HA context
277 * @list: switch info entries to populate
279 * NOTE: Non-Nx_Ports are not requested.
281 * Returns 0 on success.
284 qla2x00_gid_pt(scsi_qla_host_t *vha, sw_info_t *list)
286 int rval;
287 uint16_t i;
289 ms_iocb_entry_t *ms_pkt;
290 struct ct_sns_req *ct_req;
291 struct ct_sns_rsp *ct_rsp;
293 struct ct_sns_gid_pt_data *gid_data;
294 struct qla_hw_data *ha = vha->hw;
295 uint16_t gid_pt_rsp_size;
296 struct ct_arg arg;
298 if (IS_QLA2100(ha) || IS_QLA2200(ha))
299 return qla2x00_sns_gid_pt(vha, list);
301 gid_data = NULL;
302 gid_pt_rsp_size = qla2x00_gid_pt_rsp_size(vha);
304 arg.iocb = ha->ms_iocb;
305 arg.req_dma = ha->ct_sns_dma;
306 arg.rsp_dma = ha->ct_sns_dma;
307 arg.req_size = GID_PT_REQ_SIZE;
308 arg.rsp_size = gid_pt_rsp_size;
309 arg.nport_handle = NPH_SNS;
311 /* Issue GID_PT */
312 /* Prepare common MS IOCB */
313 ms_pkt = ha->isp_ops->prep_ms_iocb(vha, &arg);
315 /* Prepare CT request */
316 ct_req = qla2x00_prep_ct_req(ha->ct_sns, GID_PT_CMD, gid_pt_rsp_size);
317 ct_rsp = &ha->ct_sns->p.rsp;
319 /* Prepare CT arguments -- port_type */
320 ct_req->req.gid_pt.port_type = NS_NX_PORT_TYPE;
322 /* Execute MS IOCB */
323 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
324 sizeof(ms_iocb_entry_t));
325 if (rval != QLA_SUCCESS) {
326 /*EMPTY*/
327 ql_dbg(ql_dbg_disc, vha, 0x2055,
328 "GID_PT issue IOCB failed (%d).\n", rval);
329 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "GID_PT") !=
330 QLA_SUCCESS) {
331 rval = QLA_FUNCTION_FAILED;
332 } else {
333 /* Set port IDs in switch info list. */
334 for (i = 0; i < ha->max_fibre_devices; i++) {
335 gid_data = &ct_rsp->rsp.gid_pt.entries[i];
336 list[i].d_id = be_to_port_id(gid_data->port_id);
337 memset(list[i].fabric_port_name, 0, WWN_SIZE);
338 list[i].fp_speed = PORT_SPEED_UNKNOWN;
340 /* Last one exit. */
341 if (gid_data->control_byte & BIT_7) {
342 list[i].d_id.b.rsvd_1 = gid_data->control_byte;
343 break;
348 * If we've used all available slots, then the switch is
349 * reporting back more devices than we can handle with this
350 * single call. Return a failed status, and let GA_NXT handle
351 * the overload.
353 if (i == ha->max_fibre_devices)
354 rval = QLA_FUNCTION_FAILED;
357 return (rval);
361 * qla2x00_gpn_id() - SNS Get Port Name (GPN_ID) query.
362 * @vha: HA context
363 * @list: switch info entries to populate
365 * Returns 0 on success.
368 qla2x00_gpn_id(scsi_qla_host_t *vha, sw_info_t *list)
370 int rval = QLA_SUCCESS;
371 uint16_t i;
373 ms_iocb_entry_t *ms_pkt;
374 struct ct_sns_req *ct_req;
375 struct ct_sns_rsp *ct_rsp;
376 struct qla_hw_data *ha = vha->hw;
377 struct ct_arg arg;
379 if (IS_QLA2100(ha) || IS_QLA2200(ha))
380 return qla2x00_sns_gpn_id(vha, list);
382 arg.iocb = ha->ms_iocb;
383 arg.req_dma = ha->ct_sns_dma;
384 arg.rsp_dma = ha->ct_sns_dma;
385 arg.req_size = GPN_ID_REQ_SIZE;
386 arg.rsp_size = GPN_ID_RSP_SIZE;
387 arg.nport_handle = NPH_SNS;
389 for (i = 0; i < ha->max_fibre_devices; i++) {
390 /* Issue GPN_ID */
391 /* Prepare common MS IOCB */
392 ms_pkt = ha->isp_ops->prep_ms_iocb(vha, &arg);
394 /* Prepare CT request */
395 ct_req = qla2x00_prep_ct_req(ha->ct_sns, GPN_ID_CMD,
396 GPN_ID_RSP_SIZE);
397 ct_rsp = &ha->ct_sns->p.rsp;
399 /* Prepare CT arguments -- port_id */
400 ct_req->req.port_id.port_id = port_id_to_be_id(list[i].d_id);
402 /* Execute MS IOCB */
403 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
404 sizeof(ms_iocb_entry_t));
405 if (rval != QLA_SUCCESS) {
406 /*EMPTY*/
407 ql_dbg(ql_dbg_disc, vha, 0x2056,
408 "GPN_ID issue IOCB failed (%d).\n", rval);
409 break;
410 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp,
411 "GPN_ID") != QLA_SUCCESS) {
412 rval = QLA_FUNCTION_FAILED;
413 break;
414 } else {
415 /* Save portname */
416 memcpy(list[i].port_name,
417 ct_rsp->rsp.gpn_id.port_name, WWN_SIZE);
420 /* Last device exit. */
421 if (list[i].d_id.b.rsvd_1 != 0)
422 break;
425 return (rval);
429 * qla2x00_gnn_id() - SNS Get Node Name (GNN_ID) query.
430 * @vha: HA context
431 * @list: switch info entries to populate
433 * Returns 0 on success.
436 qla2x00_gnn_id(scsi_qla_host_t *vha, sw_info_t *list)
438 int rval = QLA_SUCCESS;
439 uint16_t i;
440 struct qla_hw_data *ha = vha->hw;
441 ms_iocb_entry_t *ms_pkt;
442 struct ct_sns_req *ct_req;
443 struct ct_sns_rsp *ct_rsp;
444 struct ct_arg arg;
446 if (IS_QLA2100(ha) || IS_QLA2200(ha))
447 return qla2x00_sns_gnn_id(vha, list);
449 arg.iocb = ha->ms_iocb;
450 arg.req_dma = ha->ct_sns_dma;
451 arg.rsp_dma = ha->ct_sns_dma;
452 arg.req_size = GNN_ID_REQ_SIZE;
453 arg.rsp_size = GNN_ID_RSP_SIZE;
454 arg.nport_handle = NPH_SNS;
456 for (i = 0; i < ha->max_fibre_devices; i++) {
457 /* Issue GNN_ID */
458 /* Prepare common MS IOCB */
459 ms_pkt = ha->isp_ops->prep_ms_iocb(vha, &arg);
461 /* Prepare CT request */
462 ct_req = qla2x00_prep_ct_req(ha->ct_sns, GNN_ID_CMD,
463 GNN_ID_RSP_SIZE);
464 ct_rsp = &ha->ct_sns->p.rsp;
466 /* Prepare CT arguments -- port_id */
467 ct_req->req.port_id.port_id = port_id_to_be_id(list[i].d_id);
469 /* Execute MS IOCB */
470 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
471 sizeof(ms_iocb_entry_t));
472 if (rval != QLA_SUCCESS) {
473 /*EMPTY*/
474 ql_dbg(ql_dbg_disc, vha, 0x2057,
475 "GNN_ID issue IOCB failed (%d).\n", rval);
476 break;
477 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp,
478 "GNN_ID") != QLA_SUCCESS) {
479 rval = QLA_FUNCTION_FAILED;
480 break;
481 } else {
482 /* Save nodename */
483 memcpy(list[i].node_name,
484 ct_rsp->rsp.gnn_id.node_name, WWN_SIZE);
486 ql_dbg(ql_dbg_disc, vha, 0x2058,
487 "GID_PT entry - nn %8phN pn %8phN "
488 "portid=%02x%02x%02x.\n",
489 list[i].node_name, list[i].port_name,
490 list[i].d_id.b.domain, list[i].d_id.b.area,
491 list[i].d_id.b.al_pa);
494 /* Last device exit. */
495 if (list[i].d_id.b.rsvd_1 != 0)
496 break;
499 return (rval);
502 static void qla2x00_async_sns_sp_done(srb_t *sp, int rc)
504 struct scsi_qla_host *vha = sp->vha;
505 struct ct_sns_pkt *ct_sns;
506 struct qla_work_evt *e;
508 sp->rc = rc;
509 if (rc == QLA_SUCCESS) {
510 ql_dbg(ql_dbg_disc, vha, 0x204f,
511 "Async done-%s exiting normally.\n",
512 sp->name);
513 } else if (rc == QLA_FUNCTION_TIMEOUT) {
514 ql_dbg(ql_dbg_disc, vha, 0x204f,
515 "Async done-%s timeout\n", sp->name);
516 } else {
517 ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.rsp;
518 memset(ct_sns, 0, sizeof(*ct_sns));
519 sp->retry_count++;
520 if (sp->retry_count > 3)
521 goto err;
523 ql_dbg(ql_dbg_disc, vha, 0x204f,
524 "Async done-%s fail rc %x. Retry count %d\n",
525 sp->name, rc, sp->retry_count);
527 e = qla2x00_alloc_work(vha, QLA_EVT_SP_RETRY);
528 if (!e)
529 goto err2;
531 del_timer(&sp->u.iocb_cmd.timer);
532 e->u.iosb.sp = sp;
533 qla2x00_post_work(vha, e);
534 return;
537 err:
538 e = qla2x00_alloc_work(vha, QLA_EVT_UNMAP);
539 err2:
540 if (!e) {
541 /* please ignore kernel warning. otherwise, we have mem leak. */
542 if (sp->u.iocb_cmd.u.ctarg.req) {
543 dma_free_coherent(&vha->hw->pdev->dev,
544 sp->u.iocb_cmd.u.ctarg.req_allocated_size,
545 sp->u.iocb_cmd.u.ctarg.req,
546 sp->u.iocb_cmd.u.ctarg.req_dma);
547 sp->u.iocb_cmd.u.ctarg.req = NULL;
550 if (sp->u.iocb_cmd.u.ctarg.rsp) {
551 dma_free_coherent(&vha->hw->pdev->dev,
552 sp->u.iocb_cmd.u.ctarg.rsp_allocated_size,
553 sp->u.iocb_cmd.u.ctarg.rsp,
554 sp->u.iocb_cmd.u.ctarg.rsp_dma);
555 sp->u.iocb_cmd.u.ctarg.rsp = NULL;
558 sp->free(sp);
560 return;
563 e->u.iosb.sp = sp;
564 qla2x00_post_work(vha, e);
568 * qla2x00_rft_id() - SNS Register FC-4 TYPEs (RFT_ID) supported by the HBA.
569 * @vha: HA context
571 * Returns 0 on success.
574 qla2x00_rft_id(scsi_qla_host_t *vha)
576 struct qla_hw_data *ha = vha->hw;
578 if (IS_QLA2100(ha) || IS_QLA2200(ha))
579 return qla2x00_sns_rft_id(vha);
581 return qla_async_rftid(vha, &vha->d_id);
584 static int qla_async_rftid(scsi_qla_host_t *vha, port_id_t *d_id)
586 int rval = QLA_MEMORY_ALLOC_FAILED;
587 struct ct_sns_req *ct_req;
588 srb_t *sp;
589 struct ct_sns_pkt *ct_sns;
591 if (!vha->flags.online)
592 goto done;
594 sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL);
595 if (!sp)
596 goto done;
598 sp->type = SRB_CT_PTHRU_CMD;
599 sp->name = "rft_id";
600 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
602 sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev,
603 sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma,
604 GFP_KERNEL);
605 sp->u.iocb_cmd.u.ctarg.req_allocated_size = sizeof(struct ct_sns_pkt);
606 if (!sp->u.iocb_cmd.u.ctarg.req) {
607 ql_log(ql_log_warn, vha, 0xd041,
608 "%s: Failed to allocate ct_sns request.\n",
609 __func__);
610 goto done_free_sp;
613 sp->u.iocb_cmd.u.ctarg.rsp = dma_alloc_coherent(&vha->hw->pdev->dev,
614 sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.rsp_dma,
615 GFP_KERNEL);
616 sp->u.iocb_cmd.u.ctarg.rsp_allocated_size = sizeof(struct ct_sns_pkt);
617 if (!sp->u.iocb_cmd.u.ctarg.rsp) {
618 ql_log(ql_log_warn, vha, 0xd042,
619 "%s: Failed to allocate ct_sns request.\n",
620 __func__);
621 goto done_free_sp;
623 ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.rsp;
624 memset(ct_sns, 0, sizeof(*ct_sns));
625 ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.req;
627 /* Prepare CT request */
628 ct_req = qla2x00_prep_ct_req(ct_sns, RFT_ID_CMD, RFT_ID_RSP_SIZE);
630 /* Prepare CT arguments -- port_id, FC-4 types */
631 ct_req->req.rft_id.port_id = port_id_to_be_id(vha->d_id);
632 ct_req->req.rft_id.fc4_types[2] = 0x01; /* FCP-3 */
634 if (vha->flags.nvme_enabled)
635 ct_req->req.rft_id.fc4_types[6] = 1; /* NVMe type 28h */
637 sp->u.iocb_cmd.u.ctarg.req_size = RFT_ID_REQ_SIZE;
638 sp->u.iocb_cmd.u.ctarg.rsp_size = RFT_ID_RSP_SIZE;
639 sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
640 sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
641 sp->done = qla2x00_async_sns_sp_done;
643 ql_dbg(ql_dbg_disc, vha, 0xffff,
644 "Async-%s - hdl=%x portid %06x.\n",
645 sp->name, sp->handle, d_id->b24);
647 rval = qla2x00_start_sp(sp);
648 if (rval != QLA_SUCCESS) {
649 ql_dbg(ql_dbg_disc, vha, 0x2043,
650 "RFT_ID issue IOCB failed (%d).\n", rval);
651 goto done_free_sp;
653 return rval;
654 done_free_sp:
655 sp->free(sp);
656 done:
657 return rval;
661 * qla2x00_rff_id() - SNS Register FC-4 Features (RFF_ID) supported by the HBA.
662 * @vha: HA context
663 * @type: not used
665 * Returns 0 on success.
668 qla2x00_rff_id(scsi_qla_host_t *vha, u8 type)
670 struct qla_hw_data *ha = vha->hw;
672 if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
673 ql_dbg(ql_dbg_disc, vha, 0x2046,
674 "RFF_ID call not supported on ISP2100/ISP2200.\n");
675 return (QLA_SUCCESS);
678 return qla_async_rffid(vha, &vha->d_id, qlt_rff_id(vha),
679 FC4_TYPE_FCP_SCSI);
682 static int qla_async_rffid(scsi_qla_host_t *vha, port_id_t *d_id,
683 u8 fc4feature, u8 fc4type)
685 int rval = QLA_MEMORY_ALLOC_FAILED;
686 struct ct_sns_req *ct_req;
687 srb_t *sp;
688 struct ct_sns_pkt *ct_sns;
690 sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL);
691 if (!sp)
692 goto done;
694 sp->type = SRB_CT_PTHRU_CMD;
695 sp->name = "rff_id";
696 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
698 sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev,
699 sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma,
700 GFP_KERNEL);
701 sp->u.iocb_cmd.u.ctarg.req_allocated_size = sizeof(struct ct_sns_pkt);
702 if (!sp->u.iocb_cmd.u.ctarg.req) {
703 ql_log(ql_log_warn, vha, 0xd041,
704 "%s: Failed to allocate ct_sns request.\n",
705 __func__);
706 goto done_free_sp;
709 sp->u.iocb_cmd.u.ctarg.rsp = dma_alloc_coherent(&vha->hw->pdev->dev,
710 sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.rsp_dma,
711 GFP_KERNEL);
712 sp->u.iocb_cmd.u.ctarg.rsp_allocated_size = sizeof(struct ct_sns_pkt);
713 if (!sp->u.iocb_cmd.u.ctarg.rsp) {
714 ql_log(ql_log_warn, vha, 0xd042,
715 "%s: Failed to allocate ct_sns request.\n",
716 __func__);
717 goto done_free_sp;
719 ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.rsp;
720 memset(ct_sns, 0, sizeof(*ct_sns));
721 ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.req;
723 /* Prepare CT request */
724 ct_req = qla2x00_prep_ct_req(ct_sns, RFF_ID_CMD, RFF_ID_RSP_SIZE);
726 /* Prepare CT arguments -- port_id, FC-4 feature, FC-4 type */
727 ct_req->req.rff_id.port_id = port_id_to_be_id(*d_id);
728 ct_req->req.rff_id.fc4_feature = fc4feature;
729 ct_req->req.rff_id.fc4_type = fc4type; /* SCSI - FCP */
731 sp->u.iocb_cmd.u.ctarg.req_size = RFF_ID_REQ_SIZE;
732 sp->u.iocb_cmd.u.ctarg.rsp_size = RFF_ID_RSP_SIZE;
733 sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
734 sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
735 sp->done = qla2x00_async_sns_sp_done;
737 ql_dbg(ql_dbg_disc, vha, 0xffff,
738 "Async-%s - hdl=%x portid %06x feature %x type %x.\n",
739 sp->name, sp->handle, d_id->b24, fc4feature, fc4type);
741 rval = qla2x00_start_sp(sp);
742 if (rval != QLA_SUCCESS) {
743 ql_dbg(ql_dbg_disc, vha, 0x2047,
744 "RFF_ID issue IOCB failed (%d).\n", rval);
745 goto done_free_sp;
748 return rval;
750 done_free_sp:
751 sp->free(sp);
752 done:
753 return rval;
757 * qla2x00_rnn_id() - SNS Register Node Name (RNN_ID) of the HBA.
758 * @vha: HA context
760 * Returns 0 on success.
763 qla2x00_rnn_id(scsi_qla_host_t *vha)
765 struct qla_hw_data *ha = vha->hw;
767 if (IS_QLA2100(ha) || IS_QLA2200(ha))
768 return qla2x00_sns_rnn_id(vha);
770 return qla_async_rnnid(vha, &vha->d_id, vha->node_name);
773 static int qla_async_rnnid(scsi_qla_host_t *vha, port_id_t *d_id,
774 u8 *node_name)
776 int rval = QLA_MEMORY_ALLOC_FAILED;
777 struct ct_sns_req *ct_req;
778 srb_t *sp;
779 struct ct_sns_pkt *ct_sns;
781 sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL);
782 if (!sp)
783 goto done;
785 sp->type = SRB_CT_PTHRU_CMD;
786 sp->name = "rnid";
787 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
789 sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev,
790 sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma,
791 GFP_KERNEL);
792 sp->u.iocb_cmd.u.ctarg.req_allocated_size = sizeof(struct ct_sns_pkt);
793 if (!sp->u.iocb_cmd.u.ctarg.req) {
794 ql_log(ql_log_warn, vha, 0xd041,
795 "%s: Failed to allocate ct_sns request.\n",
796 __func__);
797 goto done_free_sp;
800 sp->u.iocb_cmd.u.ctarg.rsp = dma_alloc_coherent(&vha->hw->pdev->dev,
801 sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.rsp_dma,
802 GFP_KERNEL);
803 sp->u.iocb_cmd.u.ctarg.rsp_allocated_size = sizeof(struct ct_sns_pkt);
804 if (!sp->u.iocb_cmd.u.ctarg.rsp) {
805 ql_log(ql_log_warn, vha, 0xd042,
806 "%s: Failed to allocate ct_sns request.\n",
807 __func__);
808 goto done_free_sp;
810 ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.rsp;
811 memset(ct_sns, 0, sizeof(*ct_sns));
812 ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.req;
814 /* Prepare CT request */
815 ct_req = qla2x00_prep_ct_req(ct_sns, RNN_ID_CMD, RNN_ID_RSP_SIZE);
817 /* Prepare CT arguments -- port_id, node_name */
818 ct_req->req.rnn_id.port_id = port_id_to_be_id(vha->d_id);
819 memcpy(ct_req->req.rnn_id.node_name, vha->node_name, WWN_SIZE);
821 sp->u.iocb_cmd.u.ctarg.req_size = RNN_ID_REQ_SIZE;
822 sp->u.iocb_cmd.u.ctarg.rsp_size = RNN_ID_RSP_SIZE;
823 sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
825 sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
826 sp->done = qla2x00_async_sns_sp_done;
828 ql_dbg(ql_dbg_disc, vha, 0xffff,
829 "Async-%s - hdl=%x portid %06x\n",
830 sp->name, sp->handle, d_id->b24);
832 rval = qla2x00_start_sp(sp);
833 if (rval != QLA_SUCCESS) {
834 ql_dbg(ql_dbg_disc, vha, 0x204d,
835 "RNN_ID issue IOCB failed (%d).\n", rval);
836 goto done_free_sp;
839 return rval;
841 done_free_sp:
842 sp->free(sp);
843 done:
844 return rval;
847 void
848 qla2x00_get_sym_node_name(scsi_qla_host_t *vha, uint8_t *snn, size_t size)
850 struct qla_hw_data *ha = vha->hw;
852 if (IS_QLAFX00(ha))
853 snprintf(snn, size, "%s FW:v%s DVR:v%s", ha->model_number,
854 ha->mr.fw_version, qla2x00_version_str);
855 else
856 snprintf(snn, size,
857 "%s FW:v%d.%02d.%02d DVR:v%s", ha->model_number,
858 ha->fw_major_version, ha->fw_minor_version,
859 ha->fw_subminor_version, qla2x00_version_str);
863 * qla2x00_rsnn_nn() - SNS Register Symbolic Node Name (RSNN_NN) of the HBA.
864 * @vha: HA context
866 * Returns 0 on success.
869 qla2x00_rsnn_nn(scsi_qla_host_t *vha)
871 struct qla_hw_data *ha = vha->hw;
873 if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
874 ql_dbg(ql_dbg_disc, vha, 0x2050,
875 "RSNN_ID call unsupported on ISP2100/ISP2200.\n");
876 return (QLA_SUCCESS);
879 return qla_async_rsnn_nn(vha);
882 static int qla_async_rsnn_nn(scsi_qla_host_t *vha)
884 int rval = QLA_MEMORY_ALLOC_FAILED;
885 struct ct_sns_req *ct_req;
886 srb_t *sp;
887 struct ct_sns_pkt *ct_sns;
889 sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL);
890 if (!sp)
891 goto done;
893 sp->type = SRB_CT_PTHRU_CMD;
894 sp->name = "rsnn_nn";
895 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
897 sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev,
898 sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma,
899 GFP_KERNEL);
900 sp->u.iocb_cmd.u.ctarg.req_allocated_size = sizeof(struct ct_sns_pkt);
901 if (!sp->u.iocb_cmd.u.ctarg.req) {
902 ql_log(ql_log_warn, vha, 0xd041,
903 "%s: Failed to allocate ct_sns request.\n",
904 __func__);
905 goto done_free_sp;
908 sp->u.iocb_cmd.u.ctarg.rsp = dma_alloc_coherent(&vha->hw->pdev->dev,
909 sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.rsp_dma,
910 GFP_KERNEL);
911 sp->u.iocb_cmd.u.ctarg.rsp_allocated_size = sizeof(struct ct_sns_pkt);
912 if (!sp->u.iocb_cmd.u.ctarg.rsp) {
913 ql_log(ql_log_warn, vha, 0xd042,
914 "%s: Failed to allocate ct_sns request.\n",
915 __func__);
916 goto done_free_sp;
918 ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.rsp;
919 memset(ct_sns, 0, sizeof(*ct_sns));
920 ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.req;
922 /* Prepare CT request */
923 ct_req = qla2x00_prep_ct_req(ct_sns, RSNN_NN_CMD, RSNN_NN_RSP_SIZE);
925 /* Prepare CT arguments -- node_name, symbolic node_name, size */
926 memcpy(ct_req->req.rsnn_nn.node_name, vha->node_name, WWN_SIZE);
928 /* Prepare the Symbolic Node Name */
929 qla2x00_get_sym_node_name(vha, ct_req->req.rsnn_nn.sym_node_name,
930 sizeof(ct_req->req.rsnn_nn.sym_node_name));
931 ct_req->req.rsnn_nn.name_len =
932 (uint8_t)strlen(ct_req->req.rsnn_nn.sym_node_name);
935 sp->u.iocb_cmd.u.ctarg.req_size = 24 + 1 + ct_req->req.rsnn_nn.name_len;
936 sp->u.iocb_cmd.u.ctarg.rsp_size = RSNN_NN_RSP_SIZE;
937 sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
939 sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
940 sp->done = qla2x00_async_sns_sp_done;
942 ql_dbg(ql_dbg_disc, vha, 0xffff,
943 "Async-%s - hdl=%x.\n",
944 sp->name, sp->handle);
946 rval = qla2x00_start_sp(sp);
947 if (rval != QLA_SUCCESS) {
948 ql_dbg(ql_dbg_disc, vha, 0x2043,
949 "RFT_ID issue IOCB failed (%d).\n", rval);
950 goto done_free_sp;
953 return rval;
955 done_free_sp:
956 sp->free(sp);
957 done:
958 return rval;
962 * qla2x00_prep_sns_cmd() - Prepare common SNS command request fields for query.
963 * @vha: HA context
964 * @cmd: GS command
965 * @scmd_len: Subcommand length
966 * @data_size: response size in bytes
968 * Returns a pointer to the @ha's sns_cmd.
970 static inline struct sns_cmd_pkt *
971 qla2x00_prep_sns_cmd(scsi_qla_host_t *vha, uint16_t cmd, uint16_t scmd_len,
972 uint16_t data_size)
974 uint16_t wc;
975 struct sns_cmd_pkt *sns_cmd;
976 struct qla_hw_data *ha = vha->hw;
978 sns_cmd = ha->sns_cmd;
979 memset(sns_cmd, 0, sizeof(struct sns_cmd_pkt));
980 wc = data_size / 2; /* Size in 16bit words. */
981 sns_cmd->p.cmd.buffer_length = cpu_to_le16(wc);
982 put_unaligned_le64(ha->sns_cmd_dma, &sns_cmd->p.cmd.buffer_address);
983 sns_cmd->p.cmd.subcommand_length = cpu_to_le16(scmd_len);
984 sns_cmd->p.cmd.subcommand = cpu_to_le16(cmd);
985 wc = (data_size - 16) / 4; /* Size in 32bit words. */
986 sns_cmd->p.cmd.size = cpu_to_le16(wc);
988 vha->qla_stats.control_requests++;
990 return (sns_cmd);
994 * qla2x00_sns_ga_nxt() - SNS scan for fabric devices via GA_NXT command.
995 * @vha: HA context
996 * @fcport: fcport entry to updated
998 * This command uses the old Exectute SNS Command mailbox routine.
1000 * Returns 0 on success.
1002 static int
1003 qla2x00_sns_ga_nxt(scsi_qla_host_t *vha, fc_port_t *fcport)
1005 int rval = QLA_SUCCESS;
1006 struct qla_hw_data *ha = vha->hw;
1007 struct sns_cmd_pkt *sns_cmd;
1009 /* Issue GA_NXT. */
1010 /* Prepare SNS command request. */
1011 sns_cmd = qla2x00_prep_sns_cmd(vha, GA_NXT_CMD, GA_NXT_SNS_SCMD_LEN,
1012 GA_NXT_SNS_DATA_SIZE);
1014 /* Prepare SNS command arguments -- port_id. */
1015 sns_cmd->p.cmd.param[0] = fcport->d_id.b.al_pa;
1016 sns_cmd->p.cmd.param[1] = fcport->d_id.b.area;
1017 sns_cmd->p.cmd.param[2] = fcport->d_id.b.domain;
1019 /* Execute SNS command. */
1020 rval = qla2x00_send_sns(vha, ha->sns_cmd_dma, GA_NXT_SNS_CMD_SIZE / 2,
1021 sizeof(struct sns_cmd_pkt));
1022 if (rval != QLA_SUCCESS) {
1023 /*EMPTY*/
1024 ql_dbg(ql_dbg_disc, vha, 0x205f,
1025 "GA_NXT Send SNS failed (%d).\n", rval);
1026 } else if (sns_cmd->p.gan_data[8] != 0x80 ||
1027 sns_cmd->p.gan_data[9] != 0x02) {
1028 ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x2084,
1029 "GA_NXT failed, rejected request ga_nxt_rsp:\n");
1030 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2074,
1031 sns_cmd->p.gan_data, 16);
1032 rval = QLA_FUNCTION_FAILED;
1033 } else {
1034 /* Populate fc_port_t entry. */
1035 fcport->d_id.b.domain = sns_cmd->p.gan_data[17];
1036 fcport->d_id.b.area = sns_cmd->p.gan_data[18];
1037 fcport->d_id.b.al_pa = sns_cmd->p.gan_data[19];
1039 memcpy(fcport->node_name, &sns_cmd->p.gan_data[284], WWN_SIZE);
1040 memcpy(fcport->port_name, &sns_cmd->p.gan_data[20], WWN_SIZE);
1042 if (sns_cmd->p.gan_data[16] != NS_N_PORT_TYPE &&
1043 sns_cmd->p.gan_data[16] != NS_NL_PORT_TYPE)
1044 fcport->d_id.b.domain = 0xf0;
1046 ql_dbg(ql_dbg_disc, vha, 0x2061,
1047 "GA_NXT entry - nn %8phN pn %8phN "
1048 "port_id=%02x%02x%02x.\n",
1049 fcport->node_name, fcport->port_name,
1050 fcport->d_id.b.domain, fcport->d_id.b.area,
1051 fcport->d_id.b.al_pa);
1054 return (rval);
1058 * qla2x00_sns_gid_pt() - SNS scan for fabric devices via GID_PT command.
1059 * @vha: HA context
1060 * @list: switch info entries to populate
1062 * This command uses the old Exectute SNS Command mailbox routine.
1064 * NOTE: Non-Nx_Ports are not requested.
1066 * Returns 0 on success.
1068 static int
1069 qla2x00_sns_gid_pt(scsi_qla_host_t *vha, sw_info_t *list)
1071 int rval;
1072 struct qla_hw_data *ha = vha->hw;
1073 uint16_t i;
1074 uint8_t *entry;
1075 struct sns_cmd_pkt *sns_cmd;
1076 uint16_t gid_pt_sns_data_size;
1078 gid_pt_sns_data_size = qla2x00_gid_pt_rsp_size(vha);
1080 /* Issue GID_PT. */
1081 /* Prepare SNS command request. */
1082 sns_cmd = qla2x00_prep_sns_cmd(vha, GID_PT_CMD, GID_PT_SNS_SCMD_LEN,
1083 gid_pt_sns_data_size);
1085 /* Prepare SNS command arguments -- port_type. */
1086 sns_cmd->p.cmd.param[0] = NS_NX_PORT_TYPE;
1088 /* Execute SNS command. */
1089 rval = qla2x00_send_sns(vha, ha->sns_cmd_dma, GID_PT_SNS_CMD_SIZE / 2,
1090 sizeof(struct sns_cmd_pkt));
1091 if (rval != QLA_SUCCESS) {
1092 /*EMPTY*/
1093 ql_dbg(ql_dbg_disc, vha, 0x206d,
1094 "GID_PT Send SNS failed (%d).\n", rval);
1095 } else if (sns_cmd->p.gid_data[8] != 0x80 ||
1096 sns_cmd->p.gid_data[9] != 0x02) {
1097 ql_dbg(ql_dbg_disc, vha, 0x202f,
1098 "GID_PT failed, rejected request, gid_rsp:\n");
1099 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2081,
1100 sns_cmd->p.gid_data, 16);
1101 rval = QLA_FUNCTION_FAILED;
1102 } else {
1103 /* Set port IDs in switch info list. */
1104 for (i = 0; i < ha->max_fibre_devices; i++) {
1105 entry = &sns_cmd->p.gid_data[(i * 4) + 16];
1106 list[i].d_id.b.domain = entry[1];
1107 list[i].d_id.b.area = entry[2];
1108 list[i].d_id.b.al_pa = entry[3];
1110 /* Last one exit. */
1111 if (entry[0] & BIT_7) {
1112 list[i].d_id.b.rsvd_1 = entry[0];
1113 break;
1118 * If we've used all available slots, then the switch is
1119 * reporting back more devices that we can handle with this
1120 * single call. Return a failed status, and let GA_NXT handle
1121 * the overload.
1123 if (i == ha->max_fibre_devices)
1124 rval = QLA_FUNCTION_FAILED;
1127 return (rval);
1131 * qla2x00_sns_gpn_id() - SNS Get Port Name (GPN_ID) query.
1132 * @vha: HA context
1133 * @list: switch info entries to populate
1135 * This command uses the old Exectute SNS Command mailbox routine.
1137 * Returns 0 on success.
1139 static int
1140 qla2x00_sns_gpn_id(scsi_qla_host_t *vha, sw_info_t *list)
1142 int rval = QLA_SUCCESS;
1143 struct qla_hw_data *ha = vha->hw;
1144 uint16_t i;
1145 struct sns_cmd_pkt *sns_cmd;
1147 for (i = 0; i < ha->max_fibre_devices; i++) {
1148 /* Issue GPN_ID */
1149 /* Prepare SNS command request. */
1150 sns_cmd = qla2x00_prep_sns_cmd(vha, GPN_ID_CMD,
1151 GPN_ID_SNS_SCMD_LEN, GPN_ID_SNS_DATA_SIZE);
1153 /* Prepare SNS command arguments -- port_id. */
1154 sns_cmd->p.cmd.param[0] = list[i].d_id.b.al_pa;
1155 sns_cmd->p.cmd.param[1] = list[i].d_id.b.area;
1156 sns_cmd->p.cmd.param[2] = list[i].d_id.b.domain;
1158 /* Execute SNS command. */
1159 rval = qla2x00_send_sns(vha, ha->sns_cmd_dma,
1160 GPN_ID_SNS_CMD_SIZE / 2, sizeof(struct sns_cmd_pkt));
1161 if (rval != QLA_SUCCESS) {
1162 /*EMPTY*/
1163 ql_dbg(ql_dbg_disc, vha, 0x2032,
1164 "GPN_ID Send SNS failed (%d).\n", rval);
1165 } else if (sns_cmd->p.gpn_data[8] != 0x80 ||
1166 sns_cmd->p.gpn_data[9] != 0x02) {
1167 ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x207e,
1168 "GPN_ID failed, rejected request, gpn_rsp:\n");
1169 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x207f,
1170 sns_cmd->p.gpn_data, 16);
1171 rval = QLA_FUNCTION_FAILED;
1172 } else {
1173 /* Save portname */
1174 memcpy(list[i].port_name, &sns_cmd->p.gpn_data[16],
1175 WWN_SIZE);
1178 /* Last device exit. */
1179 if (list[i].d_id.b.rsvd_1 != 0)
1180 break;
1183 return (rval);
1187 * qla2x00_sns_gnn_id() - SNS Get Node Name (GNN_ID) query.
1188 * @vha: HA context
1189 * @list: switch info entries to populate
1191 * This command uses the old Exectute SNS Command mailbox routine.
1193 * Returns 0 on success.
1195 static int
1196 qla2x00_sns_gnn_id(scsi_qla_host_t *vha, sw_info_t *list)
1198 int rval = QLA_SUCCESS;
1199 struct qla_hw_data *ha = vha->hw;
1200 uint16_t i;
1201 struct sns_cmd_pkt *sns_cmd;
1203 for (i = 0; i < ha->max_fibre_devices; i++) {
1204 /* Issue GNN_ID */
1205 /* Prepare SNS command request. */
1206 sns_cmd = qla2x00_prep_sns_cmd(vha, GNN_ID_CMD,
1207 GNN_ID_SNS_SCMD_LEN, GNN_ID_SNS_DATA_SIZE);
1209 /* Prepare SNS command arguments -- port_id. */
1210 sns_cmd->p.cmd.param[0] = list[i].d_id.b.al_pa;
1211 sns_cmd->p.cmd.param[1] = list[i].d_id.b.area;
1212 sns_cmd->p.cmd.param[2] = list[i].d_id.b.domain;
1214 /* Execute SNS command. */
1215 rval = qla2x00_send_sns(vha, ha->sns_cmd_dma,
1216 GNN_ID_SNS_CMD_SIZE / 2, sizeof(struct sns_cmd_pkt));
1217 if (rval != QLA_SUCCESS) {
1218 /*EMPTY*/
1219 ql_dbg(ql_dbg_disc, vha, 0x203f,
1220 "GNN_ID Send SNS failed (%d).\n", rval);
1221 } else if (sns_cmd->p.gnn_data[8] != 0x80 ||
1222 sns_cmd->p.gnn_data[9] != 0x02) {
1223 ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x2082,
1224 "GNN_ID failed, rejected request, gnn_rsp:\n");
1225 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x207a,
1226 sns_cmd->p.gnn_data, 16);
1227 rval = QLA_FUNCTION_FAILED;
1228 } else {
1229 /* Save nodename */
1230 memcpy(list[i].node_name, &sns_cmd->p.gnn_data[16],
1231 WWN_SIZE);
1233 ql_dbg(ql_dbg_disc, vha, 0x206e,
1234 "GID_PT entry - nn %8phN pn %8phN "
1235 "port_id=%02x%02x%02x.\n",
1236 list[i].node_name, list[i].port_name,
1237 list[i].d_id.b.domain, list[i].d_id.b.area,
1238 list[i].d_id.b.al_pa);
1241 /* Last device exit. */
1242 if (list[i].d_id.b.rsvd_1 != 0)
1243 break;
1246 return (rval);
1250 * qla2x00_snd_rft_id() - SNS Register FC-4 TYPEs (RFT_ID) supported by the HBA.
1251 * @vha: HA context
1253 * This command uses the old Exectute SNS Command mailbox routine.
1255 * Returns 0 on success.
1257 static int
1258 qla2x00_sns_rft_id(scsi_qla_host_t *vha)
1260 int rval;
1261 struct qla_hw_data *ha = vha->hw;
1262 struct sns_cmd_pkt *sns_cmd;
1264 /* Issue RFT_ID. */
1265 /* Prepare SNS command request. */
1266 sns_cmd = qla2x00_prep_sns_cmd(vha, RFT_ID_CMD, RFT_ID_SNS_SCMD_LEN,
1267 RFT_ID_SNS_DATA_SIZE);
1269 /* Prepare SNS command arguments -- port_id, FC-4 types */
1270 sns_cmd->p.cmd.param[0] = vha->d_id.b.al_pa;
1271 sns_cmd->p.cmd.param[1] = vha->d_id.b.area;
1272 sns_cmd->p.cmd.param[2] = vha->d_id.b.domain;
1274 sns_cmd->p.cmd.param[5] = 0x01; /* FCP-3 */
1276 /* Execute SNS command. */
1277 rval = qla2x00_send_sns(vha, ha->sns_cmd_dma, RFT_ID_SNS_CMD_SIZE / 2,
1278 sizeof(struct sns_cmd_pkt));
1279 if (rval != QLA_SUCCESS) {
1280 /*EMPTY*/
1281 ql_dbg(ql_dbg_disc, vha, 0x2060,
1282 "RFT_ID Send SNS failed (%d).\n", rval);
1283 } else if (sns_cmd->p.rft_data[8] != 0x80 ||
1284 sns_cmd->p.rft_data[9] != 0x02) {
1285 ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x2083,
1286 "RFT_ID failed, rejected request rft_rsp:\n");
1287 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2080,
1288 sns_cmd->p.rft_data, 16);
1289 rval = QLA_FUNCTION_FAILED;
1290 } else {
1291 ql_dbg(ql_dbg_disc, vha, 0x2073,
1292 "RFT_ID exiting normally.\n");
1295 return (rval);
1299 * qla2x00_sns_rnn_id() - SNS Register Node Name (RNN_ID) of the HBA.
1300 * @vha: HA context
1302 * This command uses the old Exectute SNS Command mailbox routine.
1304 * Returns 0 on success.
1306 static int
1307 qla2x00_sns_rnn_id(scsi_qla_host_t *vha)
1309 int rval;
1310 struct qla_hw_data *ha = vha->hw;
1311 struct sns_cmd_pkt *sns_cmd;
1313 /* Issue RNN_ID. */
1314 /* Prepare SNS command request. */
1315 sns_cmd = qla2x00_prep_sns_cmd(vha, RNN_ID_CMD, RNN_ID_SNS_SCMD_LEN,
1316 RNN_ID_SNS_DATA_SIZE);
1318 /* Prepare SNS command arguments -- port_id, nodename. */
1319 sns_cmd->p.cmd.param[0] = vha->d_id.b.al_pa;
1320 sns_cmd->p.cmd.param[1] = vha->d_id.b.area;
1321 sns_cmd->p.cmd.param[2] = vha->d_id.b.domain;
1323 sns_cmd->p.cmd.param[4] = vha->node_name[7];
1324 sns_cmd->p.cmd.param[5] = vha->node_name[6];
1325 sns_cmd->p.cmd.param[6] = vha->node_name[5];
1326 sns_cmd->p.cmd.param[7] = vha->node_name[4];
1327 sns_cmd->p.cmd.param[8] = vha->node_name[3];
1328 sns_cmd->p.cmd.param[9] = vha->node_name[2];
1329 sns_cmd->p.cmd.param[10] = vha->node_name[1];
1330 sns_cmd->p.cmd.param[11] = vha->node_name[0];
1332 /* Execute SNS command. */
1333 rval = qla2x00_send_sns(vha, ha->sns_cmd_dma, RNN_ID_SNS_CMD_SIZE / 2,
1334 sizeof(struct sns_cmd_pkt));
1335 if (rval != QLA_SUCCESS) {
1336 /*EMPTY*/
1337 ql_dbg(ql_dbg_disc, vha, 0x204a,
1338 "RNN_ID Send SNS failed (%d).\n", rval);
1339 } else if (sns_cmd->p.rnn_data[8] != 0x80 ||
1340 sns_cmd->p.rnn_data[9] != 0x02) {
1341 ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x207b,
1342 "RNN_ID failed, rejected request, rnn_rsp:\n");
1343 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x207c,
1344 sns_cmd->p.rnn_data, 16);
1345 rval = QLA_FUNCTION_FAILED;
1346 } else {
1347 ql_dbg(ql_dbg_disc, vha, 0x204c,
1348 "RNN_ID exiting normally.\n");
1351 return (rval);
1355 * qla2x00_mgmt_svr_login() - Login to fabric Management Service.
1356 * @vha: HA context
1358 * Returns 0 on success.
1361 qla2x00_mgmt_svr_login(scsi_qla_host_t *vha)
1363 int ret, rval;
1364 uint16_t mb[MAILBOX_REGISTER_COUNT];
1365 struct qla_hw_data *ha = vha->hw;
1367 ret = QLA_SUCCESS;
1368 if (vha->flags.management_server_logged_in)
1369 return ret;
1371 rval = ha->isp_ops->fabric_login(vha, vha->mgmt_svr_loop_id, 0xff, 0xff,
1372 0xfa, mb, BIT_1);
1373 if (rval != QLA_SUCCESS || mb[0] != MBS_COMMAND_COMPLETE) {
1374 if (rval == QLA_MEMORY_ALLOC_FAILED)
1375 ql_dbg(ql_dbg_disc, vha, 0x2085,
1376 "Failed management_server login: loopid=%x "
1377 "rval=%d\n", vha->mgmt_svr_loop_id, rval);
1378 else
1379 ql_dbg(ql_dbg_disc, vha, 0x2024,
1380 "Failed management_server login: loopid=%x "
1381 "mb[0]=%x mb[1]=%x mb[2]=%x mb[6]=%x mb[7]=%x.\n",
1382 vha->mgmt_svr_loop_id, mb[0], mb[1], mb[2], mb[6],
1383 mb[7]);
1384 ret = QLA_FUNCTION_FAILED;
1385 } else
1386 vha->flags.management_server_logged_in = 1;
1388 return ret;
1392 * qla2x00_prep_ms_fdmi_iocb() - Prepare common MS IOCB fields for FDMI query.
1393 * @vha: HA context
1394 * @req_size: request size in bytes
1395 * @rsp_size: response size in bytes
1397 * Returns a pointer to the @ha's ms_iocb.
1399 void *
1400 qla2x00_prep_ms_fdmi_iocb(scsi_qla_host_t *vha, uint32_t req_size,
1401 uint32_t rsp_size)
1403 ms_iocb_entry_t *ms_pkt;
1404 struct qla_hw_data *ha = vha->hw;
1406 ms_pkt = ha->ms_iocb;
1407 memset(ms_pkt, 0, sizeof(ms_iocb_entry_t));
1409 ms_pkt->entry_type = MS_IOCB_TYPE;
1410 ms_pkt->entry_count = 1;
1411 SET_TARGET_ID(ha, ms_pkt->loop_id, vha->mgmt_svr_loop_id);
1412 ms_pkt->control_flags = cpu_to_le16(CF_READ | CF_HEAD_TAG);
1413 ms_pkt->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
1414 ms_pkt->cmd_dsd_count = cpu_to_le16(1);
1415 ms_pkt->total_dsd_count = cpu_to_le16(2);
1416 ms_pkt->rsp_bytecount = cpu_to_le32(rsp_size);
1417 ms_pkt->req_bytecount = cpu_to_le32(req_size);
1419 put_unaligned_le64(ha->ct_sns_dma, &ms_pkt->req_dsd.address);
1420 ms_pkt->req_dsd.length = ms_pkt->req_bytecount;
1422 put_unaligned_le64(ha->ct_sns_dma, &ms_pkt->rsp_dsd.address);
1423 ms_pkt->rsp_dsd.length = ms_pkt->rsp_bytecount;
1425 return ms_pkt;
1429 * qla24xx_prep_ms_fdmi_iocb() - Prepare common MS IOCB fields for FDMI query.
1430 * @vha: HA context
1431 * @req_size: request size in bytes
1432 * @rsp_size: response size in bytes
1434 * Returns a pointer to the @ha's ms_iocb.
1436 void *
1437 qla24xx_prep_ms_fdmi_iocb(scsi_qla_host_t *vha, uint32_t req_size,
1438 uint32_t rsp_size)
1440 struct ct_entry_24xx *ct_pkt;
1441 struct qla_hw_data *ha = vha->hw;
1443 ct_pkt = (struct ct_entry_24xx *)ha->ms_iocb;
1444 memset(ct_pkt, 0, sizeof(struct ct_entry_24xx));
1446 ct_pkt->entry_type = CT_IOCB_TYPE;
1447 ct_pkt->entry_count = 1;
1448 ct_pkt->nport_handle = cpu_to_le16(vha->mgmt_svr_loop_id);
1449 ct_pkt->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
1450 ct_pkt->cmd_dsd_count = cpu_to_le16(1);
1451 ct_pkt->rsp_dsd_count = cpu_to_le16(1);
1452 ct_pkt->rsp_byte_count = cpu_to_le32(rsp_size);
1453 ct_pkt->cmd_byte_count = cpu_to_le32(req_size);
1455 put_unaligned_le64(ha->ct_sns_dma, &ct_pkt->dsd[0].address);
1456 ct_pkt->dsd[0].length = ct_pkt->cmd_byte_count;
1458 put_unaligned_le64(ha->ct_sns_dma, &ct_pkt->dsd[1].address);
1459 ct_pkt->dsd[1].length = ct_pkt->rsp_byte_count;
1460 ct_pkt->vp_index = vha->vp_idx;
1462 return ct_pkt;
1465 static void
1466 qla2x00_update_ms_fdmi_iocb(scsi_qla_host_t *vha, uint32_t req_size)
1468 struct qla_hw_data *ha = vha->hw;
1469 ms_iocb_entry_t *ms_pkt = ha->ms_iocb;
1470 struct ct_entry_24xx *ct_pkt = (struct ct_entry_24xx *)ha->ms_iocb;
1472 if (IS_FWI2_CAPABLE(ha)) {
1473 ct_pkt->cmd_byte_count = cpu_to_le32(req_size);
1474 ct_pkt->dsd[0].length = ct_pkt->cmd_byte_count;
1475 } else {
1476 ms_pkt->req_bytecount = cpu_to_le32(req_size);
1477 ms_pkt->req_dsd.length = ms_pkt->req_bytecount;
1482 * qla2x00_prep_ct_req() - Prepare common CT request fields for SNS query.
1483 * @p: CT request buffer
1484 * @cmd: GS command
1485 * @rsp_size: response size in bytes
1487 * Returns a pointer to the intitialized @ct_req.
1489 static inline struct ct_sns_req *
1490 qla2x00_prep_ct_fdmi_req(struct ct_sns_pkt *p, uint16_t cmd,
1491 uint16_t rsp_size)
1493 memset(p, 0, sizeof(struct ct_sns_pkt));
1495 p->p.req.header.revision = 0x01;
1496 p->p.req.header.gs_type = 0xFA;
1497 p->p.req.header.gs_subtype = 0x10;
1498 p->p.req.command = cpu_to_be16(cmd);
1499 p->p.req.max_rsp_size = cpu_to_be16((rsp_size - 16) / 4);
1501 return &p->p.req;
1505 * qla2x00_fdmi_rhba() - perform RHBA FDMI registration
1506 * @vha: HA context
1508 * Returns 0 on success.
1510 static int
1511 qla2x00_fdmi_rhba(scsi_qla_host_t *vha)
1513 int rval, alen;
1514 uint32_t size, sn;
1516 ms_iocb_entry_t *ms_pkt;
1517 struct ct_sns_req *ct_req;
1518 struct ct_sns_rsp *ct_rsp;
1519 void *entries;
1520 struct ct_fdmi_hba_attr *eiter;
1521 struct qla_hw_data *ha = vha->hw;
1523 /* Issue RHBA */
1524 /* Prepare common MS IOCB */
1525 /* Request size adjusted after CT preparation */
1526 ms_pkt = ha->isp_ops->prep_ms_fdmi_iocb(vha, 0, RHBA_RSP_SIZE);
1528 /* Prepare CT request */
1529 ct_req = qla2x00_prep_ct_fdmi_req(ha->ct_sns, RHBA_CMD, RHBA_RSP_SIZE);
1530 ct_rsp = &ha->ct_sns->p.rsp;
1532 /* Prepare FDMI command arguments -- attribute block, attributes. */
1533 memcpy(ct_req->req.rhba.hba_identifier, vha->port_name, WWN_SIZE);
1534 ct_req->req.rhba.entry_count = cpu_to_be32(1);
1535 memcpy(ct_req->req.rhba.port_name, vha->port_name, WWN_SIZE);
1536 size = 2 * WWN_SIZE + 4 + 4;
1538 /* Attributes */
1539 ct_req->req.rhba.attrs.count =
1540 cpu_to_be32(FDMI_HBA_ATTR_COUNT);
1541 entries = &ct_req->req;
1543 /* Nodename. */
1544 eiter = entries + size;
1545 eiter->type = cpu_to_be16(FDMI_HBA_NODE_NAME);
1546 eiter->len = cpu_to_be16(4 + WWN_SIZE);
1547 memcpy(eiter->a.node_name, vha->node_name, WWN_SIZE);
1548 size += 4 + WWN_SIZE;
1550 ql_dbg(ql_dbg_disc, vha, 0x2025,
1551 "NodeName = %8phN.\n", eiter->a.node_name);
1553 /* Manufacturer. */
1554 eiter = entries + size;
1555 eiter->type = cpu_to_be16(FDMI_HBA_MANUFACTURER);
1556 alen = strlen(QLA2XXX_MANUFACTURER);
1557 snprintf(eiter->a.manufacturer, sizeof(eiter->a.manufacturer),
1558 "%s", "QLogic Corporation");
1559 alen += 4 - (alen & 3);
1560 eiter->len = cpu_to_be16(4 + alen);
1561 size += 4 + alen;
1563 ql_dbg(ql_dbg_disc, vha, 0x2026,
1564 "Manufacturer = %s.\n", eiter->a.manufacturer);
1566 /* Serial number. */
1567 eiter = entries + size;
1568 eiter->type = cpu_to_be16(FDMI_HBA_SERIAL_NUMBER);
1569 if (IS_FWI2_CAPABLE(ha))
1570 qla2xxx_get_vpd_field(vha, "SN", eiter->a.serial_num,
1571 sizeof(eiter->a.serial_num));
1572 else {
1573 sn = ((ha->serial0 & 0x1f) << 16) |
1574 (ha->serial2 << 8) | ha->serial1;
1575 snprintf(eiter->a.serial_num, sizeof(eiter->a.serial_num),
1576 "%c%05d", 'A' + sn / 100000, sn % 100000);
1578 alen = strlen(eiter->a.serial_num);
1579 alen += 4 - (alen & 3);
1580 eiter->len = cpu_to_be16(4 + alen);
1581 size += 4 + alen;
1583 ql_dbg(ql_dbg_disc, vha, 0x2027,
1584 "Serial no. = %s.\n", eiter->a.serial_num);
1586 /* Model name. */
1587 eiter = entries + size;
1588 eiter->type = cpu_to_be16(FDMI_HBA_MODEL);
1589 snprintf(eiter->a.model, sizeof(eiter->a.model),
1590 "%s", ha->model_number);
1591 alen = strlen(eiter->a.model);
1592 alen += 4 - (alen & 3);
1593 eiter->len = cpu_to_be16(4 + alen);
1594 size += 4 + alen;
1596 ql_dbg(ql_dbg_disc, vha, 0x2028,
1597 "Model Name = %s.\n", eiter->a.model);
1599 /* Model description. */
1600 eiter = entries + size;
1601 eiter->type = cpu_to_be16(FDMI_HBA_MODEL_DESCRIPTION);
1602 snprintf(eiter->a.model_desc, sizeof(eiter->a.model_desc),
1603 "%s", ha->model_desc);
1604 alen = strlen(eiter->a.model_desc);
1605 alen += 4 - (alen & 3);
1606 eiter->len = cpu_to_be16(4 + alen);
1607 size += 4 + alen;
1609 ql_dbg(ql_dbg_disc, vha, 0x2029,
1610 "Model Desc = %s.\n", eiter->a.model_desc);
1612 /* Hardware version. */
1613 eiter = entries + size;
1614 eiter->type = cpu_to_be16(FDMI_HBA_HARDWARE_VERSION);
1615 if (!IS_FWI2_CAPABLE(ha)) {
1616 snprintf(eiter->a.hw_version, sizeof(eiter->a.hw_version),
1617 "HW:%s", ha->adapter_id);
1618 } else if (qla2xxx_get_vpd_field(vha, "MN", eiter->a.hw_version,
1619 sizeof(eiter->a.hw_version))) {
1621 } else if (qla2xxx_get_vpd_field(vha, "EC", eiter->a.hw_version,
1622 sizeof(eiter->a.hw_version))) {
1624 } else {
1625 snprintf(eiter->a.hw_version, sizeof(eiter->a.hw_version),
1626 "HW:%s", ha->adapter_id);
1628 alen = strlen(eiter->a.hw_version);
1629 alen += 4 - (alen & 3);
1630 eiter->len = cpu_to_be16(4 + alen);
1631 size += 4 + alen;
1633 ql_dbg(ql_dbg_disc, vha, 0x202a,
1634 "Hardware ver = %s.\n", eiter->a.hw_version);
1636 /* Driver version. */
1637 eiter = entries + size;
1638 eiter->type = cpu_to_be16(FDMI_HBA_DRIVER_VERSION);
1639 snprintf(eiter->a.driver_version, sizeof(eiter->a.driver_version),
1640 "%s", qla2x00_version_str);
1641 alen = strlen(eiter->a.driver_version);
1642 alen += 4 - (alen & 3);
1643 eiter->len = cpu_to_be16(4 + alen);
1644 size += 4 + alen;
1646 ql_dbg(ql_dbg_disc, vha, 0x202b,
1647 "Driver ver = %s.\n", eiter->a.driver_version);
1649 /* Option ROM version. */
1650 eiter = entries + size;
1651 eiter->type = cpu_to_be16(FDMI_HBA_OPTION_ROM_VERSION);
1652 snprintf(eiter->a.orom_version, sizeof(eiter->a.orom_version),
1653 "%d.%02d", ha->bios_revision[1], ha->bios_revision[0]);
1654 alen = strlen(eiter->a.orom_version);
1655 alen += 4 - (alen & 3);
1656 eiter->len = cpu_to_be16(4 + alen);
1657 size += 4 + alen;
1659 ql_dbg(ql_dbg_disc, vha , 0x202c,
1660 "Optrom vers = %s.\n", eiter->a.orom_version);
1662 /* Firmware version */
1663 eiter = entries + size;
1664 eiter->type = cpu_to_be16(FDMI_HBA_FIRMWARE_VERSION);
1665 ha->isp_ops->fw_version_str(vha, eiter->a.fw_version,
1666 sizeof(eiter->a.fw_version));
1667 alen = strlen(eiter->a.fw_version);
1668 alen += 4 - (alen & 3);
1669 eiter->len = cpu_to_be16(4 + alen);
1670 size += 4 + alen;
1672 ql_dbg(ql_dbg_disc, vha, 0x202d,
1673 "Firmware vers = %s.\n", eiter->a.fw_version);
1675 /* Update MS request size. */
1676 qla2x00_update_ms_fdmi_iocb(vha, size + 16);
1678 ql_dbg(ql_dbg_disc, vha, 0x202e,
1679 "RHBA identifier = %8phN size=%d.\n",
1680 ct_req->req.rhba.hba_identifier, size);
1681 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2076,
1682 entries, size);
1684 /* Execute MS IOCB */
1685 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
1686 sizeof(ms_iocb_entry_t));
1687 if (rval != QLA_SUCCESS) {
1688 /*EMPTY*/
1689 ql_dbg(ql_dbg_disc, vha, 0x2030,
1690 "RHBA issue IOCB failed (%d).\n", rval);
1691 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RHBA") !=
1692 QLA_SUCCESS) {
1693 rval = QLA_FUNCTION_FAILED;
1694 if (ct_rsp->header.reason_code == CT_REASON_CANNOT_PERFORM &&
1695 ct_rsp->header.explanation_code ==
1696 CT_EXPL_ALREADY_REGISTERED) {
1697 ql_dbg(ql_dbg_disc, vha, 0x2034,
1698 "HBA already registered.\n");
1699 rval = QLA_ALREADY_REGISTERED;
1700 } else {
1701 ql_dbg(ql_dbg_disc, vha, 0x20ad,
1702 "RHBA FDMI registration failed, CT Reason code: 0x%x, CT Explanation 0x%x\n",
1703 ct_rsp->header.reason_code,
1704 ct_rsp->header.explanation_code);
1706 } else {
1707 ql_dbg(ql_dbg_disc, vha, 0x2035,
1708 "RHBA exiting normally.\n");
1711 return rval;
1715 * qla2x00_fdmi_rpa() - perform RPA registration
1716 * @vha: HA context
1718 * Returns 0 on success.
1720 static int
1721 qla2x00_fdmi_rpa(scsi_qla_host_t *vha)
1723 int rval, alen;
1724 uint32_t size;
1725 struct qla_hw_data *ha = vha->hw;
1726 ms_iocb_entry_t *ms_pkt;
1727 struct ct_sns_req *ct_req;
1728 struct ct_sns_rsp *ct_rsp;
1729 void *entries;
1730 struct ct_fdmi_port_attr *eiter;
1731 struct init_cb_24xx *icb24 = (struct init_cb_24xx *)ha->init_cb;
1732 struct new_utsname *p_sysid = NULL;
1734 /* Issue RPA */
1735 /* Prepare common MS IOCB */
1736 /* Request size adjusted after CT preparation */
1737 ms_pkt = ha->isp_ops->prep_ms_fdmi_iocb(vha, 0, RPA_RSP_SIZE);
1739 /* Prepare CT request */
1740 ct_req = qla2x00_prep_ct_fdmi_req(ha->ct_sns, RPA_CMD,
1741 RPA_RSP_SIZE);
1742 ct_rsp = &ha->ct_sns->p.rsp;
1744 /* Prepare FDMI command arguments -- attribute block, attributes. */
1745 memcpy(ct_req->req.rpa.port_name, vha->port_name, WWN_SIZE);
1746 size = WWN_SIZE + 4;
1748 /* Attributes */
1749 ct_req->req.rpa.attrs.count = cpu_to_be32(FDMI_PORT_ATTR_COUNT);
1750 entries = &ct_req->req;
1752 /* FC4 types. */
1753 eiter = entries + size;
1754 eiter->type = cpu_to_be16(FDMI_PORT_FC4_TYPES);
1755 eiter->len = cpu_to_be16(4 + 32);
1756 eiter->a.fc4_types[2] = 0x01;
1757 size += 4 + 32;
1759 ql_dbg(ql_dbg_disc, vha, 0x2039,
1760 "FC4_TYPES=%02x %02x.\n",
1761 eiter->a.fc4_types[2],
1762 eiter->a.fc4_types[1]);
1764 /* Supported speed. */
1765 eiter = entries + size;
1766 eiter->type = cpu_to_be16(FDMI_PORT_SUPPORT_SPEED);
1767 eiter->len = cpu_to_be16(4 + 4);
1768 if (IS_CNA_CAPABLE(ha))
1769 eiter->a.sup_speed = cpu_to_be32(
1770 FDMI_PORT_SPEED_10GB);
1771 else if (IS_QLA27XX(ha) || IS_QLA28XX(ha))
1772 eiter->a.sup_speed = cpu_to_be32(
1773 FDMI_PORT_SPEED_32GB|
1774 FDMI_PORT_SPEED_16GB|
1775 FDMI_PORT_SPEED_8GB);
1776 else if (IS_QLA2031(ha))
1777 eiter->a.sup_speed = cpu_to_be32(
1778 FDMI_PORT_SPEED_16GB|
1779 FDMI_PORT_SPEED_8GB|
1780 FDMI_PORT_SPEED_4GB);
1781 else if (IS_QLA25XX(ha))
1782 eiter->a.sup_speed = cpu_to_be32(
1783 FDMI_PORT_SPEED_8GB|
1784 FDMI_PORT_SPEED_4GB|
1785 FDMI_PORT_SPEED_2GB|
1786 FDMI_PORT_SPEED_1GB);
1787 else if (IS_QLA24XX_TYPE(ha))
1788 eiter->a.sup_speed = cpu_to_be32(
1789 FDMI_PORT_SPEED_4GB|
1790 FDMI_PORT_SPEED_2GB|
1791 FDMI_PORT_SPEED_1GB);
1792 else if (IS_QLA23XX(ha))
1793 eiter->a.sup_speed = cpu_to_be32(
1794 FDMI_PORT_SPEED_2GB|
1795 FDMI_PORT_SPEED_1GB);
1796 else
1797 eiter->a.sup_speed = cpu_to_be32(
1798 FDMI_PORT_SPEED_1GB);
1799 size += 4 + 4;
1801 ql_dbg(ql_dbg_disc, vha, 0x203a,
1802 "Supported_Speed=%x.\n", eiter->a.sup_speed);
1804 /* Current speed. */
1805 eiter = entries + size;
1806 eiter->type = cpu_to_be16(FDMI_PORT_CURRENT_SPEED);
1807 eiter->len = cpu_to_be16(4 + 4);
1808 switch (ha->link_data_rate) {
1809 case PORT_SPEED_1GB:
1810 eiter->a.cur_speed =
1811 cpu_to_be32(FDMI_PORT_SPEED_1GB);
1812 break;
1813 case PORT_SPEED_2GB:
1814 eiter->a.cur_speed =
1815 cpu_to_be32(FDMI_PORT_SPEED_2GB);
1816 break;
1817 case PORT_SPEED_4GB:
1818 eiter->a.cur_speed =
1819 cpu_to_be32(FDMI_PORT_SPEED_4GB);
1820 break;
1821 case PORT_SPEED_8GB:
1822 eiter->a.cur_speed =
1823 cpu_to_be32(FDMI_PORT_SPEED_8GB);
1824 break;
1825 case PORT_SPEED_10GB:
1826 eiter->a.cur_speed =
1827 cpu_to_be32(FDMI_PORT_SPEED_10GB);
1828 break;
1829 case PORT_SPEED_16GB:
1830 eiter->a.cur_speed =
1831 cpu_to_be32(FDMI_PORT_SPEED_16GB);
1832 break;
1833 case PORT_SPEED_32GB:
1834 eiter->a.cur_speed =
1835 cpu_to_be32(FDMI_PORT_SPEED_32GB);
1836 break;
1837 default:
1838 eiter->a.cur_speed =
1839 cpu_to_be32(FDMI_PORT_SPEED_UNKNOWN);
1840 break;
1842 size += 4 + 4;
1844 ql_dbg(ql_dbg_disc, vha, 0x203b,
1845 "Current_Speed=%x.\n", eiter->a.cur_speed);
1847 /* Max frame size. */
1848 eiter = entries + size;
1849 eiter->type = cpu_to_be16(FDMI_PORT_MAX_FRAME_SIZE);
1850 eiter->len = cpu_to_be16(4 + 4);
1851 eiter->a.max_frame_size = IS_FWI2_CAPABLE(ha) ?
1852 le16_to_cpu(icb24->frame_payload_size) :
1853 le16_to_cpu(ha->init_cb->frame_payload_size);
1854 eiter->a.max_frame_size = cpu_to_be32(eiter->a.max_frame_size);
1855 size += 4 + 4;
1857 ql_dbg(ql_dbg_disc, vha, 0x203c,
1858 "Max_Frame_Size=%x.\n", eiter->a.max_frame_size);
1860 /* OS device name. */
1861 eiter = entries + size;
1862 eiter->type = cpu_to_be16(FDMI_PORT_OS_DEVICE_NAME);
1863 snprintf(eiter->a.os_dev_name, sizeof(eiter->a.os_dev_name),
1864 "%s:host%lu", QLA2XXX_DRIVER_NAME, vha->host_no);
1865 alen = strlen(eiter->a.os_dev_name);
1866 alen += 4 - (alen & 3);
1867 eiter->len = cpu_to_be16(4 + alen);
1868 size += 4 + alen;
1870 ql_dbg(ql_dbg_disc, vha, 0x204b,
1871 "OS_Device_Name=%s.\n", eiter->a.os_dev_name);
1873 /* Hostname. */
1874 eiter = entries + size;
1875 eiter->type = cpu_to_be16(FDMI_PORT_HOST_NAME);
1876 p_sysid = utsname();
1877 if (p_sysid) {
1878 snprintf(eiter->a.host_name, sizeof(eiter->a.host_name),
1879 "%s", p_sysid->nodename);
1880 } else {
1881 snprintf(eiter->a.host_name, sizeof(eiter->a.host_name),
1882 "%s", fc_host_system_hostname(vha->host));
1884 alen = strlen(eiter->a.host_name);
1885 alen += 4 - (alen & 3);
1886 eiter->len = cpu_to_be16(4 + alen);
1887 size += 4 + alen;
1889 ql_dbg(ql_dbg_disc, vha, 0x203d, "HostName=%s.\n", eiter->a.host_name);
1891 /* Update MS request size. */
1892 qla2x00_update_ms_fdmi_iocb(vha, size + 16);
1894 ql_dbg(ql_dbg_disc, vha, 0x203e,
1895 "RPA portname %016llx, size = %d.\n",
1896 wwn_to_u64(ct_req->req.rpa.port_name), size);
1897 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2079,
1898 entries, size);
1900 /* Execute MS IOCB */
1901 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
1902 sizeof(ms_iocb_entry_t));
1903 if (rval != QLA_SUCCESS) {
1904 /*EMPTY*/
1905 ql_dbg(ql_dbg_disc, vha, 0x2040,
1906 "RPA issue IOCB failed (%d).\n", rval);
1907 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RPA") !=
1908 QLA_SUCCESS) {
1909 rval = QLA_FUNCTION_FAILED;
1910 if (ct_rsp->header.reason_code == CT_REASON_CANNOT_PERFORM &&
1911 ct_rsp->header.explanation_code ==
1912 CT_EXPL_ALREADY_REGISTERED) {
1913 ql_dbg(ql_dbg_disc, vha, 0x20cd,
1914 "RPA already registered.\n");
1915 rval = QLA_ALREADY_REGISTERED;
1918 } else {
1919 ql_dbg(ql_dbg_disc, vha, 0x2041,
1920 "RPA exiting normally.\n");
1923 return rval;
1927 * qla2x00_fdmiv2_rhba() - perform RHBA FDMI v2 registration
1928 * @vha: HA context
1930 * Returns 0 on success.
1932 static int
1933 qla2x00_fdmiv2_rhba(scsi_qla_host_t *vha)
1935 int rval, alen;
1936 uint32_t size, sn;
1937 ms_iocb_entry_t *ms_pkt;
1938 struct ct_sns_req *ct_req;
1939 struct ct_sns_rsp *ct_rsp;
1940 void *entries;
1941 struct ct_fdmiv2_hba_attr *eiter;
1942 struct qla_hw_data *ha = vha->hw;
1943 struct new_utsname *p_sysid = NULL;
1945 /* Issue RHBA */
1946 /* Prepare common MS IOCB */
1947 /* Request size adjusted after CT preparation */
1948 ms_pkt = ha->isp_ops->prep_ms_fdmi_iocb(vha, 0, RHBA_RSP_SIZE);
1950 /* Prepare CT request */
1951 ct_req = qla2x00_prep_ct_fdmi_req(ha->ct_sns, RHBA_CMD,
1952 RHBA_RSP_SIZE);
1953 ct_rsp = &ha->ct_sns->p.rsp;
1955 /* Prepare FDMI command arguments -- attribute block, attributes. */
1956 memcpy(ct_req->req.rhba2.hba_identifier, vha->port_name, WWN_SIZE);
1957 ct_req->req.rhba2.entry_count = cpu_to_be32(1);
1958 memcpy(ct_req->req.rhba2.port_name, vha->port_name, WWN_SIZE);
1959 size = 2 * WWN_SIZE + 4 + 4;
1961 /* Attributes */
1962 ct_req->req.rhba2.attrs.count = cpu_to_be32(FDMIV2_HBA_ATTR_COUNT);
1963 entries = &ct_req->req;
1965 /* Nodename. */
1966 eiter = entries + size;
1967 eiter->type = cpu_to_be16(FDMI_HBA_NODE_NAME);
1968 eiter->len = cpu_to_be16(4 + WWN_SIZE);
1969 memcpy(eiter->a.node_name, vha->node_name, WWN_SIZE);
1970 size += 4 + WWN_SIZE;
1972 ql_dbg(ql_dbg_disc, vha, 0x207d,
1973 "NodeName = %016llx.\n", wwn_to_u64(eiter->a.node_name));
1975 /* Manufacturer. */
1976 eiter = entries + size;
1977 eiter->type = cpu_to_be16(FDMI_HBA_MANUFACTURER);
1978 snprintf(eiter->a.manufacturer, sizeof(eiter->a.manufacturer),
1979 "%s", "QLogic Corporation");
1980 eiter->a.manufacturer[strlen("QLogic Corporation")] = '\0';
1981 alen = strlen(eiter->a.manufacturer);
1982 alen += 4 - (alen & 3);
1983 eiter->len = cpu_to_be16(4 + alen);
1984 size += 4 + alen;
1986 ql_dbg(ql_dbg_disc, vha, 0x20a5,
1987 "Manufacturer = %s.\n", eiter->a.manufacturer);
1989 /* Serial number. */
1990 eiter = entries + size;
1991 eiter->type = cpu_to_be16(FDMI_HBA_SERIAL_NUMBER);
1992 if (IS_FWI2_CAPABLE(ha))
1993 qla2xxx_get_vpd_field(vha, "SN", eiter->a.serial_num,
1994 sizeof(eiter->a.serial_num));
1995 else {
1996 sn = ((ha->serial0 & 0x1f) << 16) |
1997 (ha->serial2 << 8) | ha->serial1;
1998 snprintf(eiter->a.serial_num, sizeof(eiter->a.serial_num),
1999 "%c%05d", 'A' + sn / 100000, sn % 100000);
2001 alen = strlen(eiter->a.serial_num);
2002 alen += 4 - (alen & 3);
2003 eiter->len = cpu_to_be16(4 + alen);
2004 size += 4 + alen;
2006 ql_dbg(ql_dbg_disc, vha, 0x20a6,
2007 "Serial no. = %s.\n", eiter->a.serial_num);
2009 /* Model name. */
2010 eiter = entries + size;
2011 eiter->type = cpu_to_be16(FDMI_HBA_MODEL);
2012 snprintf(eiter->a.model, sizeof(eiter->a.model),
2013 "%s", ha->model_number);
2014 alen = strlen(eiter->a.model);
2015 alen += 4 - (alen & 3);
2016 eiter->len = cpu_to_be16(4 + alen);
2017 size += 4 + alen;
2019 ql_dbg(ql_dbg_disc, vha, 0x20a7,
2020 "Model Name = %s.\n", eiter->a.model);
2022 /* Model description. */
2023 eiter = entries + size;
2024 eiter->type = cpu_to_be16(FDMI_HBA_MODEL_DESCRIPTION);
2025 snprintf(eiter->a.model_desc, sizeof(eiter->a.model_desc),
2026 "%s", ha->model_desc);
2027 alen = strlen(eiter->a.model_desc);
2028 alen += 4 - (alen & 3);
2029 eiter->len = cpu_to_be16(4 + alen);
2030 size += 4 + alen;
2032 ql_dbg(ql_dbg_disc, vha, 0x20a8,
2033 "Model Desc = %s.\n", eiter->a.model_desc);
2035 /* Hardware version. */
2036 eiter = entries + size;
2037 eiter->type = cpu_to_be16(FDMI_HBA_HARDWARE_VERSION);
2038 if (!IS_FWI2_CAPABLE(ha)) {
2039 snprintf(eiter->a.hw_version, sizeof(eiter->a.hw_version),
2040 "HW:%s", ha->adapter_id);
2041 } else if (qla2xxx_get_vpd_field(vha, "MN", eiter->a.hw_version,
2042 sizeof(eiter->a.hw_version))) {
2044 } else if (qla2xxx_get_vpd_field(vha, "EC", eiter->a.hw_version,
2045 sizeof(eiter->a.hw_version))) {
2047 } else {
2048 snprintf(eiter->a.hw_version, sizeof(eiter->a.hw_version),
2049 "HW:%s", ha->adapter_id);
2051 alen = strlen(eiter->a.hw_version);
2052 alen += 4 - (alen & 3);
2053 eiter->len = cpu_to_be16(4 + alen);
2054 size += 4 + alen;
2056 ql_dbg(ql_dbg_disc, vha, 0x20a9,
2057 "Hardware ver = %s.\n", eiter->a.hw_version);
2059 /* Driver version. */
2060 eiter = entries + size;
2061 eiter->type = cpu_to_be16(FDMI_HBA_DRIVER_VERSION);
2062 snprintf(eiter->a.driver_version, sizeof(eiter->a.driver_version),
2063 "%s", qla2x00_version_str);
2064 alen = strlen(eiter->a.driver_version);
2065 alen += 4 - (alen & 3);
2066 eiter->len = cpu_to_be16(4 + alen);
2067 size += 4 + alen;
2069 ql_dbg(ql_dbg_disc, vha, 0x20aa,
2070 "Driver ver = %s.\n", eiter->a.driver_version);
2072 /* Option ROM version. */
2073 eiter = entries + size;
2074 eiter->type = cpu_to_be16(FDMI_HBA_OPTION_ROM_VERSION);
2075 snprintf(eiter->a.orom_version, sizeof(eiter->a.orom_version),
2076 "%d.%02d", ha->bios_revision[1], ha->bios_revision[0]);
2077 alen = strlen(eiter->a.orom_version);
2078 alen += 4 - (alen & 3);
2079 eiter->len = cpu_to_be16(4 + alen);
2080 size += 4 + alen;
2082 ql_dbg(ql_dbg_disc, vha , 0x20ab,
2083 "Optrom version = %d.%02d.\n", eiter->a.orom_version[1],
2084 eiter->a.orom_version[0]);
2086 /* Firmware version */
2087 eiter = entries + size;
2088 eiter->type = cpu_to_be16(FDMI_HBA_FIRMWARE_VERSION);
2089 ha->isp_ops->fw_version_str(vha, eiter->a.fw_version,
2090 sizeof(eiter->a.fw_version));
2091 alen = strlen(eiter->a.fw_version);
2092 alen += 4 - (alen & 3);
2093 eiter->len = cpu_to_be16(4 + alen);
2094 size += 4 + alen;
2096 ql_dbg(ql_dbg_disc, vha, 0x20ac,
2097 "Firmware vers = %s.\n", eiter->a.fw_version);
2099 /* OS Name and Version */
2100 eiter = entries + size;
2101 eiter->type = cpu_to_be16(FDMI_HBA_OS_NAME_AND_VERSION);
2102 p_sysid = utsname();
2103 if (p_sysid) {
2104 snprintf(eiter->a.os_version, sizeof(eiter->a.os_version),
2105 "%s %s %s",
2106 p_sysid->sysname, p_sysid->release, p_sysid->version);
2107 } else {
2108 snprintf(eiter->a.os_version, sizeof(eiter->a.os_version),
2109 "%s %s", "Linux", fc_host_system_hostname(vha->host));
2111 alen = strlen(eiter->a.os_version);
2112 alen += 4 - (alen & 3);
2113 eiter->len = cpu_to_be16(4 + alen);
2114 size += 4 + alen;
2116 ql_dbg(ql_dbg_disc, vha, 0x20ae,
2117 "OS Name and Version = %s.\n", eiter->a.os_version);
2119 /* MAX CT Payload Length */
2120 eiter = entries + size;
2121 eiter->type = cpu_to_be16(FDMI_HBA_MAXIMUM_CT_PAYLOAD_LENGTH);
2122 eiter->a.max_ct_len = cpu_to_be32(ha->frame_payload_size);
2123 eiter->a.max_ct_len = cpu_to_be32(eiter->a.max_ct_len);
2124 eiter->len = cpu_to_be16(4 + 4);
2125 size += 4 + 4;
2127 ql_dbg(ql_dbg_disc, vha, 0x20af,
2128 "CT Payload Length = 0x%x.\n", eiter->a.max_ct_len);
2130 /* Node Sybolic Name */
2131 eiter = entries + size;
2132 eiter->type = cpu_to_be16(FDMI_HBA_NODE_SYMBOLIC_NAME);
2133 qla2x00_get_sym_node_name(vha, eiter->a.sym_name,
2134 sizeof(eiter->a.sym_name));
2135 alen = strlen(eiter->a.sym_name);
2136 alen += 4 - (alen & 3);
2137 eiter->len = cpu_to_be16(4 + alen);
2138 size += 4 + alen;
2140 ql_dbg(ql_dbg_disc, vha, 0x20b0,
2141 "Symbolic Name = %s.\n", eiter->a.sym_name);
2143 /* Vendor Id */
2144 eiter = entries + size;
2145 eiter->type = cpu_to_be16(FDMI_HBA_VENDOR_ID);
2146 eiter->a.vendor_id = cpu_to_be32(0x1077);
2147 eiter->len = cpu_to_be16(4 + 4);
2148 size += 4 + 4;
2150 ql_dbg(ql_dbg_disc, vha, 0x20b1,
2151 "Vendor Id = %x.\n", eiter->a.vendor_id);
2153 /* Num Ports */
2154 eiter = entries + size;
2155 eiter->type = cpu_to_be16(FDMI_HBA_NUM_PORTS);
2156 eiter->a.num_ports = cpu_to_be32(1);
2157 eiter->len = cpu_to_be16(4 + 4);
2158 size += 4 + 4;
2160 ql_dbg(ql_dbg_disc, vha, 0x20b2,
2161 "Port Num = %x.\n", eiter->a.num_ports);
2163 /* Fabric Name */
2164 eiter = entries + size;
2165 eiter->type = cpu_to_be16(FDMI_HBA_FABRIC_NAME);
2166 memcpy(eiter->a.fabric_name, vha->fabric_node_name, WWN_SIZE);
2167 eiter->len = cpu_to_be16(4 + WWN_SIZE);
2168 size += 4 + WWN_SIZE;
2170 ql_dbg(ql_dbg_disc, vha, 0x20b3,
2171 "Fabric Name = %016llx.\n", wwn_to_u64(eiter->a.fabric_name));
2173 /* BIOS Version */
2174 eiter = entries + size;
2175 eiter->type = cpu_to_be16(FDMI_HBA_BOOT_BIOS_NAME);
2176 snprintf(eiter->a.bios_name, sizeof(eiter->a.bios_name),
2177 "BIOS %d.%02d", ha->bios_revision[1], ha->bios_revision[0]);
2178 alen = strlen(eiter->a.bios_name);
2179 alen += 4 - (alen & 3);
2180 eiter->len = cpu_to_be16(4 + alen);
2181 size += 4 + alen;
2183 ql_dbg(ql_dbg_disc, vha, 0x20b4,
2184 "BIOS Name = %s\n", eiter->a.bios_name);
2186 /* Vendor Identifier */
2187 eiter = entries + size;
2188 eiter->type = cpu_to_be16(FDMI_HBA_TYPE_VENDOR_IDENTIFIER);
2189 snprintf(eiter->a.vendor_identifier, sizeof(eiter->a.vendor_identifier),
2190 "%s", "QLGC");
2191 alen = strlen(eiter->a.vendor_identifier);
2192 alen += 4 - (alen & 3);
2193 eiter->len = cpu_to_be16(4 + alen);
2194 size += 4 + alen;
2196 ql_dbg(ql_dbg_disc, vha, 0x201b,
2197 "Vendor Identifier = %s.\n", eiter->a.vendor_identifier);
2199 /* Update MS request size. */
2200 qla2x00_update_ms_fdmi_iocb(vha, size + 16);
2202 ql_dbg(ql_dbg_disc, vha, 0x20b5,
2203 "RHBA identifier = %016llx.\n",
2204 wwn_to_u64(ct_req->req.rhba2.hba_identifier));
2205 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x20b6,
2206 entries, size);
2208 /* Execute MS IOCB */
2209 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
2210 sizeof(ms_iocb_entry_t));
2211 if (rval != QLA_SUCCESS) {
2212 /*EMPTY*/
2213 ql_dbg(ql_dbg_disc, vha, 0x20b7,
2214 "RHBA issue IOCB failed (%d).\n", rval);
2215 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RHBA") !=
2216 QLA_SUCCESS) {
2217 rval = QLA_FUNCTION_FAILED;
2219 if (ct_rsp->header.reason_code == CT_REASON_CANNOT_PERFORM &&
2220 ct_rsp->header.explanation_code ==
2221 CT_EXPL_ALREADY_REGISTERED) {
2222 ql_dbg(ql_dbg_disc, vha, 0x20b8,
2223 "HBA already registered.\n");
2224 rval = QLA_ALREADY_REGISTERED;
2225 } else {
2226 ql_dbg(ql_dbg_disc, vha, 0x2016,
2227 "RHBA FDMI v2 failed, CT Reason code: 0x%x, CT Explanation 0x%x\n",
2228 ct_rsp->header.reason_code,
2229 ct_rsp->header.explanation_code);
2231 } else {
2232 ql_dbg(ql_dbg_disc, vha, 0x20b9,
2233 "RHBA FDMI V2 exiting normally.\n");
2236 return rval;
2240 * qla2x00_fdmi_dhba() -
2241 * @vha: HA context
2243 * Returns 0 on success.
2245 static int
2246 qla2x00_fdmi_dhba(scsi_qla_host_t *vha)
2248 int rval;
2249 struct qla_hw_data *ha = vha->hw;
2250 ms_iocb_entry_t *ms_pkt;
2251 struct ct_sns_req *ct_req;
2252 struct ct_sns_rsp *ct_rsp;
2254 /* Issue RPA */
2255 /* Prepare common MS IOCB */
2256 ms_pkt = ha->isp_ops->prep_ms_fdmi_iocb(vha, DHBA_REQ_SIZE,
2257 DHBA_RSP_SIZE);
2259 /* Prepare CT request */
2260 ct_req = qla2x00_prep_ct_fdmi_req(ha->ct_sns, DHBA_CMD, DHBA_RSP_SIZE);
2261 ct_rsp = &ha->ct_sns->p.rsp;
2263 /* Prepare FDMI command arguments -- portname. */
2264 memcpy(ct_req->req.dhba.port_name, vha->port_name, WWN_SIZE);
2266 ql_dbg(ql_dbg_disc, vha, 0x2036,
2267 "DHBA portname = %8phN.\n", ct_req->req.dhba.port_name);
2269 /* Execute MS IOCB */
2270 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
2271 sizeof(ms_iocb_entry_t));
2272 if (rval != QLA_SUCCESS) {
2273 /*EMPTY*/
2274 ql_dbg(ql_dbg_disc, vha, 0x2037,
2275 "DHBA issue IOCB failed (%d).\n", rval);
2276 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "DHBA") !=
2277 QLA_SUCCESS) {
2278 rval = QLA_FUNCTION_FAILED;
2279 } else {
2280 ql_dbg(ql_dbg_disc, vha, 0x2038,
2281 "DHBA exiting normally.\n");
2284 return rval;
2288 * qla2x00_fdmiv2_rpa() -
2289 * @vha: HA context
2291 * Returns 0 on success.
2293 static int
2294 qla2x00_fdmiv2_rpa(scsi_qla_host_t *vha)
2296 int rval, alen;
2297 uint32_t size;
2298 struct qla_hw_data *ha = vha->hw;
2299 ms_iocb_entry_t *ms_pkt;
2300 struct ct_sns_req *ct_req;
2301 struct ct_sns_rsp *ct_rsp;
2302 void *entries;
2303 struct ct_fdmiv2_port_attr *eiter;
2304 struct init_cb_24xx *icb24 = (struct init_cb_24xx *)ha->init_cb;
2305 struct new_utsname *p_sysid = NULL;
2307 /* Issue RPA */
2308 /* Prepare common MS IOCB */
2309 /* Request size adjusted after CT preparation */
2310 ms_pkt = ha->isp_ops->prep_ms_fdmi_iocb(vha, 0, RPA_RSP_SIZE);
2312 /* Prepare CT request */
2313 ct_req = qla2x00_prep_ct_fdmi_req(ha->ct_sns, RPA_CMD, RPA_RSP_SIZE);
2314 ct_rsp = &ha->ct_sns->p.rsp;
2316 /* Prepare FDMI command arguments -- attribute block, attributes. */
2317 memcpy(ct_req->req.rpa2.port_name, vha->port_name, WWN_SIZE);
2318 size = WWN_SIZE + 4;
2320 /* Attributes */
2321 ct_req->req.rpa2.attrs.count = cpu_to_be32(FDMIV2_PORT_ATTR_COUNT);
2322 entries = &ct_req->req;
2324 /* FC4 types. */
2325 eiter = entries + size;
2326 eiter->type = cpu_to_be16(FDMI_PORT_FC4_TYPES);
2327 eiter->len = cpu_to_be16(4 + 32);
2328 eiter->a.fc4_types[2] = 0x01;
2329 size += 4 + 32;
2331 ql_dbg(ql_dbg_disc, vha, 0x20ba,
2332 "FC4_TYPES=%02x %02x.\n",
2333 eiter->a.fc4_types[2],
2334 eiter->a.fc4_types[1]);
2336 if (vha->flags.nvme_enabled) {
2337 eiter->a.fc4_types[6] = 1; /* NVMe type 28h */
2338 ql_dbg(ql_dbg_disc, vha, 0x211f,
2339 "NVME FC4 Type = %02x 0x0 0x0 0x0 0x0 0x0.\n",
2340 eiter->a.fc4_types[6]);
2343 /* Supported speed. */
2344 eiter = entries + size;
2345 eiter->type = cpu_to_be16(FDMI_PORT_SUPPORT_SPEED);
2346 eiter->len = cpu_to_be16(4 + 4);
2347 if (IS_CNA_CAPABLE(ha))
2348 eiter->a.sup_speed = cpu_to_be32(
2349 FDMI_PORT_SPEED_10GB);
2350 else if (IS_QLA27XX(ha) || IS_QLA28XX(ha))
2351 eiter->a.sup_speed = cpu_to_be32(
2352 FDMI_PORT_SPEED_32GB|
2353 FDMI_PORT_SPEED_16GB|
2354 FDMI_PORT_SPEED_8GB);
2355 else if (IS_QLA2031(ha))
2356 eiter->a.sup_speed = cpu_to_be32(
2357 FDMI_PORT_SPEED_16GB|
2358 FDMI_PORT_SPEED_8GB|
2359 FDMI_PORT_SPEED_4GB);
2360 else if (IS_QLA25XX(ha))
2361 eiter->a.sup_speed = cpu_to_be32(
2362 FDMI_PORT_SPEED_8GB|
2363 FDMI_PORT_SPEED_4GB|
2364 FDMI_PORT_SPEED_2GB|
2365 FDMI_PORT_SPEED_1GB);
2366 else if (IS_QLA24XX_TYPE(ha))
2367 eiter->a.sup_speed = cpu_to_be32(
2368 FDMI_PORT_SPEED_4GB|
2369 FDMI_PORT_SPEED_2GB|
2370 FDMI_PORT_SPEED_1GB);
2371 else if (IS_QLA23XX(ha))
2372 eiter->a.sup_speed = cpu_to_be32(
2373 FDMI_PORT_SPEED_2GB|
2374 FDMI_PORT_SPEED_1GB);
2375 else
2376 eiter->a.sup_speed = cpu_to_be32(
2377 FDMI_PORT_SPEED_1GB);
2378 size += 4 + 4;
2380 ql_dbg(ql_dbg_disc, vha, 0x20bb,
2381 "Supported Port Speed = %x.\n", eiter->a.sup_speed);
2383 /* Current speed. */
2384 eiter = entries + size;
2385 eiter->type = cpu_to_be16(FDMI_PORT_CURRENT_SPEED);
2386 eiter->len = cpu_to_be16(4 + 4);
2387 switch (ha->link_data_rate) {
2388 case PORT_SPEED_1GB:
2389 eiter->a.cur_speed = cpu_to_be32(FDMI_PORT_SPEED_1GB);
2390 break;
2391 case PORT_SPEED_2GB:
2392 eiter->a.cur_speed = cpu_to_be32(FDMI_PORT_SPEED_2GB);
2393 break;
2394 case PORT_SPEED_4GB:
2395 eiter->a.cur_speed = cpu_to_be32(FDMI_PORT_SPEED_4GB);
2396 break;
2397 case PORT_SPEED_8GB:
2398 eiter->a.cur_speed = cpu_to_be32(FDMI_PORT_SPEED_8GB);
2399 break;
2400 case PORT_SPEED_10GB:
2401 eiter->a.cur_speed = cpu_to_be32(FDMI_PORT_SPEED_10GB);
2402 break;
2403 case PORT_SPEED_16GB:
2404 eiter->a.cur_speed = cpu_to_be32(FDMI_PORT_SPEED_16GB);
2405 break;
2406 case PORT_SPEED_32GB:
2407 eiter->a.cur_speed = cpu_to_be32(FDMI_PORT_SPEED_32GB);
2408 break;
2409 default:
2410 eiter->a.cur_speed = cpu_to_be32(FDMI_PORT_SPEED_UNKNOWN);
2411 break;
2413 size += 4 + 4;
2415 ql_dbg(ql_dbg_disc, vha, 0x2017,
2416 "Current_Speed = %x.\n", eiter->a.cur_speed);
2418 /* Max frame size. */
2419 eiter = entries + size;
2420 eiter->type = cpu_to_be16(FDMI_PORT_MAX_FRAME_SIZE);
2421 eiter->len = cpu_to_be16(4 + 4);
2422 eiter->a.max_frame_size = IS_FWI2_CAPABLE(ha) ?
2423 le16_to_cpu(icb24->frame_payload_size) :
2424 le16_to_cpu(ha->init_cb->frame_payload_size);
2425 eiter->a.max_frame_size = cpu_to_be32(eiter->a.max_frame_size);
2426 size += 4 + 4;
2428 ql_dbg(ql_dbg_disc, vha, 0x20bc,
2429 "Max_Frame_Size = %x.\n", eiter->a.max_frame_size);
2431 /* OS device name. */
2432 eiter = entries + size;
2433 eiter->type = cpu_to_be16(FDMI_PORT_OS_DEVICE_NAME);
2434 alen = strlen(QLA2XXX_DRIVER_NAME);
2435 snprintf(eiter->a.os_dev_name, sizeof(eiter->a.os_dev_name),
2436 "%s:host%lu", QLA2XXX_DRIVER_NAME, vha->host_no);
2437 alen += 4 - (alen & 3);
2438 eiter->len = cpu_to_be16(4 + alen);
2439 size += 4 + alen;
2441 ql_dbg(ql_dbg_disc, vha, 0x20be,
2442 "OS_Device_Name = %s.\n", eiter->a.os_dev_name);
2444 /* Hostname. */
2445 eiter = entries + size;
2446 eiter->type = cpu_to_be16(FDMI_PORT_HOST_NAME);
2447 p_sysid = utsname();
2448 if (p_sysid) {
2449 snprintf(eiter->a.host_name, sizeof(eiter->a.host_name),
2450 "%s", p_sysid->nodename);
2451 } else {
2452 snprintf(eiter->a.host_name, sizeof(eiter->a.host_name),
2453 "%s", fc_host_system_hostname(vha->host));
2455 alen = strlen(eiter->a.host_name);
2456 alen += 4 - (alen & 3);
2457 eiter->len = cpu_to_be16(4 + alen);
2458 size += 4 + alen;
2460 ql_dbg(ql_dbg_disc, vha, 0x201a,
2461 "HostName=%s.\n", eiter->a.host_name);
2463 /* Node Name */
2464 eiter = entries + size;
2465 eiter->type = cpu_to_be16(FDMI_PORT_NODE_NAME);
2466 memcpy(eiter->a.node_name, vha->node_name, WWN_SIZE);
2467 eiter->len = cpu_to_be16(4 + WWN_SIZE);
2468 size += 4 + WWN_SIZE;
2470 ql_dbg(ql_dbg_disc, vha, 0x20c0,
2471 "Node Name = %016llx.\n", wwn_to_u64(eiter->a.node_name));
2473 /* Port Name */
2474 eiter = entries + size;
2475 eiter->type = cpu_to_be16(FDMI_PORT_NAME);
2476 memcpy(eiter->a.port_name, vha->port_name, WWN_SIZE);
2477 eiter->len = cpu_to_be16(4 + WWN_SIZE);
2478 size += 4 + WWN_SIZE;
2480 ql_dbg(ql_dbg_disc, vha, 0x20c1,
2481 "Port Name = %016llx.\n", wwn_to_u64(eiter->a.port_name));
2483 /* Port Symbolic Name */
2484 eiter = entries + size;
2485 eiter->type = cpu_to_be16(FDMI_PORT_SYM_NAME);
2486 qla2x00_get_sym_node_name(vha, eiter->a.port_sym_name,
2487 sizeof(eiter->a.port_sym_name));
2488 alen = strlen(eiter->a.port_sym_name);
2489 alen += 4 - (alen & 3);
2490 eiter->len = cpu_to_be16(4 + alen);
2491 size += 4 + alen;
2493 ql_dbg(ql_dbg_disc, vha, 0x20c2,
2494 "port symbolic name = %s\n", eiter->a.port_sym_name);
2496 /* Port Type */
2497 eiter = entries + size;
2498 eiter->type = cpu_to_be16(FDMI_PORT_TYPE);
2499 eiter->a.port_type = cpu_to_be32(NS_NX_PORT_TYPE);
2500 eiter->len = cpu_to_be16(4 + 4);
2501 size += 4 + 4;
2503 ql_dbg(ql_dbg_disc, vha, 0x20c3,
2504 "Port Type = %x.\n", eiter->a.port_type);
2506 /* Class of Service */
2507 eiter = entries + size;
2508 eiter->type = cpu_to_be16(FDMI_PORT_SUPP_COS);
2509 eiter->a.port_supported_cos = cpu_to_be32(FC_CLASS_3);
2510 eiter->len = cpu_to_be16(4 + 4);
2511 size += 4 + 4;
2513 ql_dbg(ql_dbg_disc, vha, 0x20c4,
2514 "Supported COS = %08x\n", eiter->a.port_supported_cos);
2516 /* Port Fabric Name */
2517 eiter = entries + size;
2518 eiter->type = cpu_to_be16(FDMI_PORT_FABRIC_NAME);
2519 memcpy(eiter->a.fabric_name, vha->fabric_node_name, WWN_SIZE);
2520 eiter->len = cpu_to_be16(4 + WWN_SIZE);
2521 size += 4 + WWN_SIZE;
2523 ql_dbg(ql_dbg_disc, vha, 0x20c5,
2524 "Fabric Name = %016llx.\n", wwn_to_u64(eiter->a.fabric_name));
2526 /* FC4_type */
2527 eiter = entries + size;
2528 eiter->type = cpu_to_be16(FDMI_PORT_FC4_TYPE);
2529 eiter->a.port_fc4_type[0] = 0;
2530 eiter->a.port_fc4_type[1] = 0;
2531 eiter->a.port_fc4_type[2] = 1;
2532 eiter->a.port_fc4_type[3] = 0;
2533 eiter->len = cpu_to_be16(4 + 32);
2534 size += 4 + 32;
2536 ql_dbg(ql_dbg_disc, vha, 0x20c6,
2537 "Port Active FC4 Type = %02x %02x.\n",
2538 eiter->a.port_fc4_type[2], eiter->a.port_fc4_type[1]);
2540 if (vha->flags.nvme_enabled) {
2541 eiter->a.port_fc4_type[4] = 0;
2542 eiter->a.port_fc4_type[5] = 0;
2543 eiter->a.port_fc4_type[6] = 1; /* NVMe type 28h */
2544 ql_dbg(ql_dbg_disc, vha, 0x2120,
2545 "NVME Port Active FC4 Type = %02x 0x0 0x0 0x0 0x0 0x0.\n",
2546 eiter->a.port_fc4_type[6]);
2549 /* Port State */
2550 eiter = entries + size;
2551 eiter->type = cpu_to_be16(FDMI_PORT_STATE);
2552 eiter->a.port_state = cpu_to_be32(1);
2553 eiter->len = cpu_to_be16(4 + 4);
2554 size += 4 + 4;
2556 ql_dbg(ql_dbg_disc, vha, 0x20c7,
2557 "Port State = %x.\n", eiter->a.port_state);
2559 /* Number of Ports */
2560 eiter = entries + size;
2561 eiter->type = cpu_to_be16(FDMI_PORT_COUNT);
2562 eiter->a.num_ports = cpu_to_be32(1);
2563 eiter->len = cpu_to_be16(4 + 4);
2564 size += 4 + 4;
2566 ql_dbg(ql_dbg_disc, vha, 0x20c8,
2567 "Number of ports = %x.\n", eiter->a.num_ports);
2569 /* Port Id */
2570 eiter = entries + size;
2571 eiter->type = cpu_to_be16(FDMI_PORT_ID);
2572 eiter->a.port_id = cpu_to_be32(vha->d_id.b24);
2573 eiter->len = cpu_to_be16(4 + 4);
2574 size += 4 + 4;
2576 ql_dbg(ql_dbg_disc, vha, 0x201c,
2577 "Port Id = %x.\n", eiter->a.port_id);
2579 /* Update MS request size. */
2580 qla2x00_update_ms_fdmi_iocb(vha, size + 16);
2582 ql_dbg(ql_dbg_disc, vha, 0x2018,
2583 "RPA portname= %8phN size=%d.\n", ct_req->req.rpa.port_name, size);
2584 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x20ca,
2585 entries, size);
2587 /* Execute MS IOCB */
2588 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
2589 sizeof(ms_iocb_entry_t));
2590 if (rval != QLA_SUCCESS) {
2591 /*EMPTY*/
2592 ql_dbg(ql_dbg_disc, vha, 0x20cb,
2593 "RPA FDMI v2 issue IOCB failed (%d).\n", rval);
2594 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RPA") !=
2595 QLA_SUCCESS) {
2596 rval = QLA_FUNCTION_FAILED;
2597 if (ct_rsp->header.reason_code == CT_REASON_CANNOT_PERFORM &&
2598 ct_rsp->header.explanation_code ==
2599 CT_EXPL_ALREADY_REGISTERED) {
2600 ql_dbg(ql_dbg_disc, vha, 0x20ce,
2601 "RPA FDMI v2 already registered\n");
2602 rval = QLA_ALREADY_REGISTERED;
2603 } else {
2604 ql_dbg(ql_dbg_disc, vha, 0x2020,
2605 "RPA FDMI v2 failed, CT Reason code: 0x%x, CT Explanation 0x%x\n",
2606 ct_rsp->header.reason_code,
2607 ct_rsp->header.explanation_code);
2609 } else {
2610 ql_dbg(ql_dbg_disc, vha, 0x20cc,
2611 "RPA FDMI V2 exiting normally.\n");
2614 return rval;
2618 * qla2x00_fdmi_register() -
2619 * @vha: HA context
2621 * Returns 0 on success.
2624 qla2x00_fdmi_register(scsi_qla_host_t *vha)
2626 int rval = QLA_FUNCTION_FAILED;
2627 struct qla_hw_data *ha = vha->hw;
2629 if (IS_QLA2100(ha) || IS_QLA2200(ha) ||
2630 IS_QLAFX00(ha))
2631 return QLA_FUNCTION_FAILED;
2633 rval = qla2x00_mgmt_svr_login(vha);
2634 if (rval)
2635 return rval;
2637 rval = qla2x00_fdmiv2_rhba(vha);
2638 if (rval) {
2639 if (rval != QLA_ALREADY_REGISTERED)
2640 goto try_fdmi;
2642 rval = qla2x00_fdmi_dhba(vha);
2643 if (rval)
2644 goto try_fdmi;
2646 rval = qla2x00_fdmiv2_rhba(vha);
2647 if (rval)
2648 goto try_fdmi;
2650 rval = qla2x00_fdmiv2_rpa(vha);
2651 if (rval)
2652 goto try_fdmi;
2654 goto out;
2656 try_fdmi:
2657 rval = qla2x00_fdmi_rhba(vha);
2658 if (rval) {
2659 if (rval != QLA_ALREADY_REGISTERED)
2660 return rval;
2662 rval = qla2x00_fdmi_dhba(vha);
2663 if (rval)
2664 return rval;
2666 rval = qla2x00_fdmi_rhba(vha);
2667 if (rval)
2668 return rval;
2670 rval = qla2x00_fdmi_rpa(vha);
2671 out:
2672 return rval;
2676 * qla2x00_gfpn_id() - SNS Get Fabric Port Name (GFPN_ID) query.
2677 * @vha: HA context
2678 * @list: switch info entries to populate
2680 * Returns 0 on success.
2683 qla2x00_gfpn_id(scsi_qla_host_t *vha, sw_info_t *list)
2685 int rval = QLA_SUCCESS;
2686 uint16_t i;
2687 struct qla_hw_data *ha = vha->hw;
2688 ms_iocb_entry_t *ms_pkt;
2689 struct ct_sns_req *ct_req;
2690 struct ct_sns_rsp *ct_rsp;
2691 struct ct_arg arg;
2693 if (!IS_IIDMA_CAPABLE(ha))
2694 return QLA_FUNCTION_FAILED;
2696 arg.iocb = ha->ms_iocb;
2697 arg.req_dma = ha->ct_sns_dma;
2698 arg.rsp_dma = ha->ct_sns_dma;
2699 arg.req_size = GFPN_ID_REQ_SIZE;
2700 arg.rsp_size = GFPN_ID_RSP_SIZE;
2701 arg.nport_handle = NPH_SNS;
2703 for (i = 0; i < ha->max_fibre_devices; i++) {
2704 /* Issue GFPN_ID */
2705 /* Prepare common MS IOCB */
2706 ms_pkt = ha->isp_ops->prep_ms_iocb(vha, &arg);
2708 /* Prepare CT request */
2709 ct_req = qla2x00_prep_ct_req(ha->ct_sns, GFPN_ID_CMD,
2710 GFPN_ID_RSP_SIZE);
2711 ct_rsp = &ha->ct_sns->p.rsp;
2713 /* Prepare CT arguments -- port_id */
2714 ct_req->req.port_id.port_id = port_id_to_be_id(list[i].d_id);
2716 /* Execute MS IOCB */
2717 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
2718 sizeof(ms_iocb_entry_t));
2719 if (rval != QLA_SUCCESS) {
2720 /*EMPTY*/
2721 ql_dbg(ql_dbg_disc, vha, 0x2023,
2722 "GFPN_ID issue IOCB failed (%d).\n", rval);
2723 break;
2724 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp,
2725 "GFPN_ID") != QLA_SUCCESS) {
2726 rval = QLA_FUNCTION_FAILED;
2727 break;
2728 } else {
2729 /* Save fabric portname */
2730 memcpy(list[i].fabric_port_name,
2731 ct_rsp->rsp.gfpn_id.port_name, WWN_SIZE);
2734 /* Last device exit. */
2735 if (list[i].d_id.b.rsvd_1 != 0)
2736 break;
2739 return (rval);
2743 static inline struct ct_sns_req *
2744 qla24xx_prep_ct_fm_req(struct ct_sns_pkt *p, uint16_t cmd,
2745 uint16_t rsp_size)
2747 memset(p, 0, sizeof(struct ct_sns_pkt));
2749 p->p.req.header.revision = 0x01;
2750 p->p.req.header.gs_type = 0xFA;
2751 p->p.req.header.gs_subtype = 0x01;
2752 p->p.req.command = cpu_to_be16(cmd);
2753 p->p.req.max_rsp_size = cpu_to_be16((rsp_size - 16) / 4);
2755 return &p->p.req;
2758 static uint16_t
2759 qla2x00_port_speed_capability(uint16_t speed)
2761 switch (speed) {
2762 case BIT_15:
2763 return PORT_SPEED_1GB;
2764 case BIT_14:
2765 return PORT_SPEED_2GB;
2766 case BIT_13:
2767 return PORT_SPEED_4GB;
2768 case BIT_12:
2769 return PORT_SPEED_10GB;
2770 case BIT_11:
2771 return PORT_SPEED_8GB;
2772 case BIT_10:
2773 return PORT_SPEED_16GB;
2774 case BIT_8:
2775 return PORT_SPEED_32GB;
2776 case BIT_7:
2777 return PORT_SPEED_64GB;
2778 default:
2779 return PORT_SPEED_UNKNOWN;
2784 * qla2x00_gpsc() - FCS Get Port Speed Capabilities (GPSC) query.
2785 * @vha: HA context
2786 * @list: switch info entries to populate
2788 * Returns 0 on success.
2791 qla2x00_gpsc(scsi_qla_host_t *vha, sw_info_t *list)
2793 int rval;
2794 uint16_t i;
2795 struct qla_hw_data *ha = vha->hw;
2796 ms_iocb_entry_t *ms_pkt;
2797 struct ct_sns_req *ct_req;
2798 struct ct_sns_rsp *ct_rsp;
2799 struct ct_arg arg;
2801 if (!IS_IIDMA_CAPABLE(ha))
2802 return QLA_FUNCTION_FAILED;
2803 if (!ha->flags.gpsc_supported)
2804 return QLA_FUNCTION_FAILED;
2806 rval = qla2x00_mgmt_svr_login(vha);
2807 if (rval)
2808 return rval;
2810 arg.iocb = ha->ms_iocb;
2811 arg.req_dma = ha->ct_sns_dma;
2812 arg.rsp_dma = ha->ct_sns_dma;
2813 arg.req_size = GPSC_REQ_SIZE;
2814 arg.rsp_size = GPSC_RSP_SIZE;
2815 arg.nport_handle = vha->mgmt_svr_loop_id;
2817 for (i = 0; i < ha->max_fibre_devices; i++) {
2818 /* Issue GFPN_ID */
2819 /* Prepare common MS IOCB */
2820 ms_pkt = qla24xx_prep_ms_iocb(vha, &arg);
2822 /* Prepare CT request */
2823 ct_req = qla24xx_prep_ct_fm_req(ha->ct_sns, GPSC_CMD,
2824 GPSC_RSP_SIZE);
2825 ct_rsp = &ha->ct_sns->p.rsp;
2827 /* Prepare CT arguments -- port_name */
2828 memcpy(ct_req->req.gpsc.port_name, list[i].fabric_port_name,
2829 WWN_SIZE);
2831 /* Execute MS IOCB */
2832 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
2833 sizeof(ms_iocb_entry_t));
2834 if (rval != QLA_SUCCESS) {
2835 /*EMPTY*/
2836 ql_dbg(ql_dbg_disc, vha, 0x2059,
2837 "GPSC issue IOCB failed (%d).\n", rval);
2838 } else if ((rval = qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp,
2839 "GPSC")) != QLA_SUCCESS) {
2840 /* FM command unsupported? */
2841 if (rval == QLA_INVALID_COMMAND &&
2842 (ct_rsp->header.reason_code ==
2843 CT_REASON_INVALID_COMMAND_CODE ||
2844 ct_rsp->header.reason_code ==
2845 CT_REASON_COMMAND_UNSUPPORTED)) {
2846 ql_dbg(ql_dbg_disc, vha, 0x205a,
2847 "GPSC command unsupported, disabling "
2848 "query.\n");
2849 ha->flags.gpsc_supported = 0;
2850 rval = QLA_FUNCTION_FAILED;
2851 break;
2853 rval = QLA_FUNCTION_FAILED;
2854 } else {
2855 list->fp_speed = qla2x00_port_speed_capability(
2856 be16_to_cpu(ct_rsp->rsp.gpsc.speed));
2857 ql_dbg(ql_dbg_disc, vha, 0x205b,
2858 "GPSC ext entry - fpn "
2859 "%8phN speeds=%04x speed=%04x.\n",
2860 list[i].fabric_port_name,
2861 be16_to_cpu(ct_rsp->rsp.gpsc.speeds),
2862 be16_to_cpu(ct_rsp->rsp.gpsc.speed));
2865 /* Last device exit. */
2866 if (list[i].d_id.b.rsvd_1 != 0)
2867 break;
2870 return (rval);
2874 * qla2x00_gff_id() - SNS Get FC-4 Features (GFF_ID) query.
2876 * @vha: HA context
2877 * @list: switch info entries to populate
2880 void
2881 qla2x00_gff_id(scsi_qla_host_t *vha, sw_info_t *list)
2883 int rval;
2884 uint16_t i;
2886 ms_iocb_entry_t *ms_pkt;
2887 struct ct_sns_req *ct_req;
2888 struct ct_sns_rsp *ct_rsp;
2889 struct qla_hw_data *ha = vha->hw;
2890 uint8_t fcp_scsi_features = 0, nvme_features = 0;
2891 struct ct_arg arg;
2893 for (i = 0; i < ha->max_fibre_devices; i++) {
2894 /* Set default FC4 Type as UNKNOWN so the default is to
2895 * Process this port */
2896 list[i].fc4_type = FC4_TYPE_UNKNOWN;
2898 /* Do not attempt GFF_ID if we are not FWI_2 capable */
2899 if (!IS_FWI2_CAPABLE(ha))
2900 continue;
2902 arg.iocb = ha->ms_iocb;
2903 arg.req_dma = ha->ct_sns_dma;
2904 arg.rsp_dma = ha->ct_sns_dma;
2905 arg.req_size = GFF_ID_REQ_SIZE;
2906 arg.rsp_size = GFF_ID_RSP_SIZE;
2907 arg.nport_handle = NPH_SNS;
2909 /* Prepare common MS IOCB */
2910 ms_pkt = ha->isp_ops->prep_ms_iocb(vha, &arg);
2912 /* Prepare CT request */
2913 ct_req = qla2x00_prep_ct_req(ha->ct_sns, GFF_ID_CMD,
2914 GFF_ID_RSP_SIZE);
2915 ct_rsp = &ha->ct_sns->p.rsp;
2917 /* Prepare CT arguments -- port_id */
2918 ct_req->req.port_id.port_id = port_id_to_be_id(list[i].d_id);
2920 /* Execute MS IOCB */
2921 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
2922 sizeof(ms_iocb_entry_t));
2924 if (rval != QLA_SUCCESS) {
2925 ql_dbg(ql_dbg_disc, vha, 0x205c,
2926 "GFF_ID issue IOCB failed (%d).\n", rval);
2927 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp,
2928 "GFF_ID") != QLA_SUCCESS) {
2929 ql_dbg(ql_dbg_disc, vha, 0x205d,
2930 "GFF_ID IOCB status had a failure status code.\n");
2931 } else {
2932 fcp_scsi_features =
2933 ct_rsp->rsp.gff_id.fc4_features[GFF_FCP_SCSI_OFFSET];
2934 fcp_scsi_features &= 0x0f;
2936 if (fcp_scsi_features) {
2937 list[i].fc4_type = FS_FC4TYPE_FCP;
2938 list[i].fc4_features = fcp_scsi_features;
2941 nvme_features =
2942 ct_rsp->rsp.gff_id.fc4_features[GFF_NVME_OFFSET];
2943 nvme_features &= 0xf;
2945 if (nvme_features) {
2946 list[i].fc4_type |= FS_FC4TYPE_NVME;
2947 list[i].fc4_features = nvme_features;
2951 /* Last device exit. */
2952 if (list[i].d_id.b.rsvd_1 != 0)
2953 break;
2957 int qla24xx_post_gpsc_work(struct scsi_qla_host *vha, fc_port_t *fcport)
2959 struct qla_work_evt *e;
2961 e = qla2x00_alloc_work(vha, QLA_EVT_GPSC);
2962 if (!e)
2963 return QLA_FUNCTION_FAILED;
2965 e->u.fcport.fcport = fcport;
2966 return qla2x00_post_work(vha, e);
2969 void qla24xx_handle_gpsc_event(scsi_qla_host_t *vha, struct event_arg *ea)
2971 struct fc_port *fcport = ea->fcport;
2973 ql_dbg(ql_dbg_disc, vha, 0x20d8,
2974 "%s %8phC DS %d LS %d rc %d login %d|%d rscn %d|%d lid %d\n",
2975 __func__, fcport->port_name, fcport->disc_state,
2976 fcport->fw_login_state, ea->rc, ea->sp->gen2, fcport->login_gen,
2977 ea->sp->gen2, fcport->rscn_gen|ea->sp->gen1, fcport->loop_id);
2979 if (fcport->disc_state == DSC_DELETE_PEND)
2980 return;
2982 if (ea->sp->gen2 != fcport->login_gen) {
2983 /* target side must have changed it. */
2984 ql_dbg(ql_dbg_disc, vha, 0x20d3,
2985 "%s %8phC generation changed\n",
2986 __func__, fcport->port_name);
2987 return;
2988 } else if (ea->sp->gen1 != fcport->rscn_gen) {
2989 return;
2992 qla_post_iidma_work(vha, fcport);
2995 static void qla24xx_async_gpsc_sp_done(srb_t *sp, int res)
2997 struct scsi_qla_host *vha = sp->vha;
2998 struct qla_hw_data *ha = vha->hw;
2999 fc_port_t *fcport = sp->fcport;
3000 struct ct_sns_rsp *ct_rsp;
3001 struct event_arg ea;
3003 ct_rsp = &fcport->ct_desc.ct_sns->p.rsp;
3005 ql_dbg(ql_dbg_disc, vha, 0x2053,
3006 "Async done-%s res %x, WWPN %8phC \n",
3007 sp->name, res, fcport->port_name);
3009 fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
3011 if (res == QLA_FUNCTION_TIMEOUT)
3012 goto done;
3014 if (res == (DID_ERROR << 16)) {
3015 /* entry status error */
3016 goto done;
3017 } else if (res) {
3018 if ((ct_rsp->header.reason_code ==
3019 CT_REASON_INVALID_COMMAND_CODE) ||
3020 (ct_rsp->header.reason_code ==
3021 CT_REASON_COMMAND_UNSUPPORTED)) {
3022 ql_dbg(ql_dbg_disc, vha, 0x2019,
3023 "GPSC command unsupported, disabling query.\n");
3024 ha->flags.gpsc_supported = 0;
3025 goto done;
3027 } else {
3028 fcport->fp_speed = qla2x00_port_speed_capability(
3029 be16_to_cpu(ct_rsp->rsp.gpsc.speed));
3031 ql_dbg(ql_dbg_disc, vha, 0x2054,
3032 "Async-%s OUT WWPN %8phC speeds=%04x speed=%04x.\n",
3033 sp->name, fcport->fabric_port_name,
3034 be16_to_cpu(ct_rsp->rsp.gpsc.speeds),
3035 be16_to_cpu(ct_rsp->rsp.gpsc.speed));
3037 memset(&ea, 0, sizeof(ea));
3038 ea.rc = res;
3039 ea.fcport = fcport;
3040 ea.sp = sp;
3041 qla24xx_handle_gpsc_event(vha, &ea);
3043 done:
3044 sp->free(sp);
3047 int qla24xx_async_gpsc(scsi_qla_host_t *vha, fc_port_t *fcport)
3049 int rval = QLA_FUNCTION_FAILED;
3050 struct ct_sns_req *ct_req;
3051 srb_t *sp;
3053 if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT))
3054 return rval;
3056 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
3057 if (!sp)
3058 goto done;
3060 sp->type = SRB_CT_PTHRU_CMD;
3061 sp->name = "gpsc";
3062 sp->gen1 = fcport->rscn_gen;
3063 sp->gen2 = fcport->login_gen;
3065 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
3067 /* CT_IU preamble */
3068 ct_req = qla24xx_prep_ct_fm_req(fcport->ct_desc.ct_sns, GPSC_CMD,
3069 GPSC_RSP_SIZE);
3071 /* GPSC req */
3072 memcpy(ct_req->req.gpsc.port_name, fcport->fabric_port_name,
3073 WWN_SIZE);
3075 sp->u.iocb_cmd.u.ctarg.req = fcport->ct_desc.ct_sns;
3076 sp->u.iocb_cmd.u.ctarg.req_dma = fcport->ct_desc.ct_sns_dma;
3077 sp->u.iocb_cmd.u.ctarg.rsp = fcport->ct_desc.ct_sns;
3078 sp->u.iocb_cmd.u.ctarg.rsp_dma = fcport->ct_desc.ct_sns_dma;
3079 sp->u.iocb_cmd.u.ctarg.req_size = GPSC_REQ_SIZE;
3080 sp->u.iocb_cmd.u.ctarg.rsp_size = GPSC_RSP_SIZE;
3081 sp->u.iocb_cmd.u.ctarg.nport_handle = vha->mgmt_svr_loop_id;
3083 sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
3084 sp->done = qla24xx_async_gpsc_sp_done;
3086 ql_dbg(ql_dbg_disc, vha, 0x205e,
3087 "Async-%s %8phC hdl=%x loopid=%x portid=%02x%02x%02x.\n",
3088 sp->name, fcport->port_name, sp->handle,
3089 fcport->loop_id, fcport->d_id.b.domain,
3090 fcport->d_id.b.area, fcport->d_id.b.al_pa);
3092 rval = qla2x00_start_sp(sp);
3093 if (rval != QLA_SUCCESS)
3094 goto done_free_sp;
3095 return rval;
3097 done_free_sp:
3098 sp->free(sp);
3099 done:
3100 return rval;
3103 int qla24xx_post_gpnid_work(struct scsi_qla_host *vha, port_id_t *id)
3105 struct qla_work_evt *e;
3107 if (test_bit(UNLOADING, &vha->dpc_flags) ||
3108 (vha->vp_idx && test_bit(VPORT_DELETE, &vha->dpc_flags)))
3109 return 0;
3111 e = qla2x00_alloc_work(vha, QLA_EVT_GPNID);
3112 if (!e)
3113 return QLA_FUNCTION_FAILED;
3115 e->u.gpnid.id = *id;
3116 return qla2x00_post_work(vha, e);
3119 void qla24xx_sp_unmap(scsi_qla_host_t *vha, srb_t *sp)
3121 struct srb_iocb *c = &sp->u.iocb_cmd;
3123 switch (sp->type) {
3124 case SRB_ELS_DCMD:
3125 qla2x00_els_dcmd2_free(vha, &c->u.els_plogi);
3126 break;
3127 case SRB_CT_PTHRU_CMD:
3128 default:
3129 if (sp->u.iocb_cmd.u.ctarg.req) {
3130 dma_free_coherent(&vha->hw->pdev->dev,
3131 sp->u.iocb_cmd.u.ctarg.req_allocated_size,
3132 sp->u.iocb_cmd.u.ctarg.req,
3133 sp->u.iocb_cmd.u.ctarg.req_dma);
3134 sp->u.iocb_cmd.u.ctarg.req = NULL;
3137 if (sp->u.iocb_cmd.u.ctarg.rsp) {
3138 dma_free_coherent(&vha->hw->pdev->dev,
3139 sp->u.iocb_cmd.u.ctarg.rsp_allocated_size,
3140 sp->u.iocb_cmd.u.ctarg.rsp,
3141 sp->u.iocb_cmd.u.ctarg.rsp_dma);
3142 sp->u.iocb_cmd.u.ctarg.rsp = NULL;
3144 break;
3147 sp->free(sp);
3150 void qla24xx_handle_gpnid_event(scsi_qla_host_t *vha, struct event_arg *ea)
3152 fc_port_t *fcport, *conflict, *t;
3153 u16 data[2];
3155 ql_dbg(ql_dbg_disc, vha, 0xffff,
3156 "%s %d port_id: %06x\n",
3157 __func__, __LINE__, ea->id.b24);
3159 if (ea->rc) {
3160 /* cable is disconnected */
3161 list_for_each_entry_safe(fcport, t, &vha->vp_fcports, list) {
3162 if (fcport->d_id.b24 == ea->id.b24)
3163 fcport->scan_state = QLA_FCPORT_SCAN;
3165 qlt_schedule_sess_for_deletion(fcport);
3167 } else {
3168 /* cable is connected */
3169 fcport = qla2x00_find_fcport_by_wwpn(vha, ea->port_name, 1);
3170 if (fcport) {
3171 list_for_each_entry_safe(conflict, t, &vha->vp_fcports,
3172 list) {
3173 if ((conflict->d_id.b24 == ea->id.b24) &&
3174 (fcport != conflict))
3176 * 2 fcports with conflict Nport ID or
3177 * an existing fcport is having nport ID
3178 * conflict with new fcport.
3181 conflict->scan_state = QLA_FCPORT_SCAN;
3183 qlt_schedule_sess_for_deletion(conflict);
3186 fcport->scan_needed = 0;
3187 fcport->rscn_gen++;
3188 fcport->scan_state = QLA_FCPORT_FOUND;
3189 fcport->flags |= FCF_FABRIC_DEVICE;
3190 if (fcport->login_retry == 0) {
3191 fcport->login_retry =
3192 vha->hw->login_retry_count;
3193 ql_dbg(ql_dbg_disc, vha, 0xffff,
3194 "Port login retry %8phN, lid 0x%04x cnt=%d.\n",
3195 fcport->port_name, fcport->loop_id,
3196 fcport->login_retry);
3198 switch (fcport->disc_state) {
3199 case DSC_LOGIN_COMPLETE:
3200 /* recheck session is still intact. */
3201 ql_dbg(ql_dbg_disc, vha, 0x210d,
3202 "%s %d %8phC revalidate session with ADISC\n",
3203 __func__, __LINE__, fcport->port_name);
3204 data[0] = data[1] = 0;
3205 qla2x00_post_async_adisc_work(vha, fcport,
3206 data);
3207 break;
3208 case DSC_DELETED:
3209 ql_dbg(ql_dbg_disc, vha, 0x210d,
3210 "%s %d %8phC login\n", __func__, __LINE__,
3211 fcport->port_name);
3212 fcport->d_id = ea->id;
3213 qla24xx_fcport_handle_login(vha, fcport);
3214 break;
3215 case DSC_DELETE_PEND:
3216 fcport->d_id = ea->id;
3217 break;
3218 default:
3219 fcport->d_id = ea->id;
3220 break;
3222 } else {
3223 list_for_each_entry_safe(conflict, t, &vha->vp_fcports,
3224 list) {
3225 if (conflict->d_id.b24 == ea->id.b24) {
3226 /* 2 fcports with conflict Nport ID or
3227 * an existing fcport is having nport ID
3228 * conflict with new fcport.
3230 ql_dbg(ql_dbg_disc, vha, 0xffff,
3231 "%s %d %8phC DS %d\n",
3232 __func__, __LINE__,
3233 conflict->port_name,
3234 conflict->disc_state);
3236 conflict->scan_state = QLA_FCPORT_SCAN;
3237 qlt_schedule_sess_for_deletion(conflict);
3241 /* create new fcport */
3242 ql_dbg(ql_dbg_disc, vha, 0x2065,
3243 "%s %d %8phC post new sess\n",
3244 __func__, __LINE__, ea->port_name);
3245 qla24xx_post_newsess_work(vha, &ea->id,
3246 ea->port_name, NULL, NULL, FC4_TYPE_UNKNOWN);
3251 static void qla2x00_async_gpnid_sp_done(srb_t *sp, int res)
3253 struct scsi_qla_host *vha = sp->vha;
3254 struct ct_sns_req *ct_req =
3255 (struct ct_sns_req *)sp->u.iocb_cmd.u.ctarg.req;
3256 struct ct_sns_rsp *ct_rsp =
3257 (struct ct_sns_rsp *)sp->u.iocb_cmd.u.ctarg.rsp;
3258 struct event_arg ea;
3259 struct qla_work_evt *e;
3260 unsigned long flags;
3262 if (res)
3263 ql_dbg(ql_dbg_disc, vha, 0x2066,
3264 "Async done-%s fail res %x rscn gen %d ID %3phC. %8phC\n",
3265 sp->name, res, sp->gen1, &ct_req->req.port_id.port_id,
3266 ct_rsp->rsp.gpn_id.port_name);
3267 else
3268 ql_dbg(ql_dbg_disc, vha, 0x2066,
3269 "Async done-%s good rscn gen %d ID %3phC. %8phC\n",
3270 sp->name, sp->gen1, &ct_req->req.port_id.port_id,
3271 ct_rsp->rsp.gpn_id.port_name);
3273 memset(&ea, 0, sizeof(ea));
3274 memcpy(ea.port_name, ct_rsp->rsp.gpn_id.port_name, WWN_SIZE);
3275 ea.sp = sp;
3276 ea.id = be_to_port_id(ct_req->req.port_id.port_id);
3277 ea.rc = res;
3279 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
3280 list_del(&sp->elem);
3281 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
3283 if (res) {
3284 if (res == QLA_FUNCTION_TIMEOUT) {
3285 qla24xx_post_gpnid_work(sp->vha, &ea.id);
3286 sp->free(sp);
3287 return;
3289 } else if (sp->gen1) {
3290 /* There was another RSCN for this Nport ID */
3291 qla24xx_post_gpnid_work(sp->vha, &ea.id);
3292 sp->free(sp);
3293 return;
3296 qla24xx_handle_gpnid_event(vha, &ea);
3298 e = qla2x00_alloc_work(vha, QLA_EVT_UNMAP);
3299 if (!e) {
3300 /* please ignore kernel warning. otherwise, we have mem leak. */
3301 dma_free_coherent(&vha->hw->pdev->dev,
3302 sp->u.iocb_cmd.u.ctarg.req_allocated_size,
3303 sp->u.iocb_cmd.u.ctarg.req,
3304 sp->u.iocb_cmd.u.ctarg.req_dma);
3305 sp->u.iocb_cmd.u.ctarg.req = NULL;
3307 dma_free_coherent(&vha->hw->pdev->dev,
3308 sp->u.iocb_cmd.u.ctarg.rsp_allocated_size,
3309 sp->u.iocb_cmd.u.ctarg.rsp,
3310 sp->u.iocb_cmd.u.ctarg.rsp_dma);
3311 sp->u.iocb_cmd.u.ctarg.rsp = NULL;
3313 sp->free(sp);
3314 return;
3317 e->u.iosb.sp = sp;
3318 qla2x00_post_work(vha, e);
3321 /* Get WWPN with Nport ID. */
3322 int qla24xx_async_gpnid(scsi_qla_host_t *vha, port_id_t *id)
3324 int rval = QLA_FUNCTION_FAILED;
3325 struct ct_sns_req *ct_req;
3326 srb_t *sp, *tsp;
3327 struct ct_sns_pkt *ct_sns;
3328 unsigned long flags;
3330 if (!vha->flags.online)
3331 goto done;
3333 sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL);
3334 if (!sp)
3335 goto done;
3337 sp->type = SRB_CT_PTHRU_CMD;
3338 sp->name = "gpnid";
3339 sp->u.iocb_cmd.u.ctarg.id = *id;
3340 sp->gen1 = 0;
3341 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
3343 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
3344 list_for_each_entry(tsp, &vha->gpnid_list, elem) {
3345 if (tsp->u.iocb_cmd.u.ctarg.id.b24 == id->b24) {
3346 tsp->gen1++;
3347 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
3348 sp->free(sp);
3349 goto done;
3352 list_add_tail(&sp->elem, &vha->gpnid_list);
3353 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
3355 sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev,
3356 sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma,
3357 GFP_KERNEL);
3358 sp->u.iocb_cmd.u.ctarg.req_allocated_size = sizeof(struct ct_sns_pkt);
3359 if (!sp->u.iocb_cmd.u.ctarg.req) {
3360 ql_log(ql_log_warn, vha, 0xd041,
3361 "Failed to allocate ct_sns request.\n");
3362 goto done_free_sp;
3365 sp->u.iocb_cmd.u.ctarg.rsp = dma_alloc_coherent(&vha->hw->pdev->dev,
3366 sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.rsp_dma,
3367 GFP_KERNEL);
3368 sp->u.iocb_cmd.u.ctarg.rsp_allocated_size = sizeof(struct ct_sns_pkt);
3369 if (!sp->u.iocb_cmd.u.ctarg.rsp) {
3370 ql_log(ql_log_warn, vha, 0xd042,
3371 "Failed to allocate ct_sns request.\n");
3372 goto done_free_sp;
3375 ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.rsp;
3376 memset(ct_sns, 0, sizeof(*ct_sns));
3378 ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.req;
3379 /* CT_IU preamble */
3380 ct_req = qla2x00_prep_ct_req(ct_sns, GPN_ID_CMD, GPN_ID_RSP_SIZE);
3382 /* GPN_ID req */
3383 ct_req->req.port_id.port_id = port_id_to_be_id(*id);
3385 sp->u.iocb_cmd.u.ctarg.req_size = GPN_ID_REQ_SIZE;
3386 sp->u.iocb_cmd.u.ctarg.rsp_size = GPN_ID_RSP_SIZE;
3387 sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
3389 sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
3390 sp->done = qla2x00_async_gpnid_sp_done;
3392 ql_dbg(ql_dbg_disc, vha, 0x2067,
3393 "Async-%s hdl=%x ID %3phC.\n", sp->name,
3394 sp->handle, &ct_req->req.port_id.port_id);
3396 rval = qla2x00_start_sp(sp);
3397 if (rval != QLA_SUCCESS)
3398 goto done_free_sp;
3400 return rval;
3402 done_free_sp:
3403 spin_lock_irqsave(&vha->hw->vport_slock, flags);
3404 list_del(&sp->elem);
3405 spin_unlock_irqrestore(&vha->hw->vport_slock, flags);
3407 if (sp->u.iocb_cmd.u.ctarg.req) {
3408 dma_free_coherent(&vha->hw->pdev->dev,
3409 sizeof(struct ct_sns_pkt),
3410 sp->u.iocb_cmd.u.ctarg.req,
3411 sp->u.iocb_cmd.u.ctarg.req_dma);
3412 sp->u.iocb_cmd.u.ctarg.req = NULL;
3414 if (sp->u.iocb_cmd.u.ctarg.rsp) {
3415 dma_free_coherent(&vha->hw->pdev->dev,
3416 sizeof(struct ct_sns_pkt),
3417 sp->u.iocb_cmd.u.ctarg.rsp,
3418 sp->u.iocb_cmd.u.ctarg.rsp_dma);
3419 sp->u.iocb_cmd.u.ctarg.rsp = NULL;
3422 sp->free(sp);
3423 done:
3424 return rval;
3427 void qla24xx_handle_gffid_event(scsi_qla_host_t *vha, struct event_arg *ea)
3429 fc_port_t *fcport = ea->fcport;
3431 qla24xx_post_gnl_work(vha, fcport);
3434 void qla24xx_async_gffid_sp_done(srb_t *sp, int res)
3436 struct scsi_qla_host *vha = sp->vha;
3437 fc_port_t *fcport = sp->fcport;
3438 struct ct_sns_rsp *ct_rsp;
3439 struct event_arg ea;
3440 uint8_t fc4_scsi_feat;
3441 uint8_t fc4_nvme_feat;
3443 ql_dbg(ql_dbg_disc, vha, 0x2133,
3444 "Async done-%s res %x ID %x. %8phC\n",
3445 sp->name, res, fcport->d_id.b24, fcport->port_name);
3447 fcport->flags &= ~FCF_ASYNC_SENT;
3448 ct_rsp = &fcport->ct_desc.ct_sns->p.rsp;
3449 fc4_scsi_feat = ct_rsp->rsp.gff_id.fc4_features[GFF_FCP_SCSI_OFFSET];
3450 fc4_nvme_feat = ct_rsp->rsp.gff_id.fc4_features[GFF_NVME_OFFSET];
3453 * FC-GS-7, 5.2.3.12 FC-4 Features - format
3454 * The format of the FC-4 Features object, as defined by the FC-4,
3455 * Shall be an array of 4-bit values, one for each type code value
3457 if (!res) {
3458 if (fc4_scsi_feat & 0xf) {
3459 /* w1 b00:03 */
3460 fcport->fc4_type = FS_FC4TYPE_FCP;
3461 fcport->fc4_features = fc4_scsi_feat & 0xf;
3464 if (fc4_nvme_feat & 0xf) {
3465 /* w5 [00:03]/28h */
3466 fcport->fc4_type |= FS_FC4TYPE_NVME;
3467 fcport->fc4_features = fc4_nvme_feat & 0xf;
3471 memset(&ea, 0, sizeof(ea));
3472 ea.sp = sp;
3473 ea.fcport = sp->fcport;
3474 ea.rc = res;
3476 qla24xx_handle_gffid_event(vha, &ea);
3477 sp->free(sp);
3480 /* Get FC4 Feature with Nport ID. */
3481 int qla24xx_async_gffid(scsi_qla_host_t *vha, fc_port_t *fcport)
3483 int rval = QLA_FUNCTION_FAILED;
3484 struct ct_sns_req *ct_req;
3485 srb_t *sp;
3487 if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT))
3488 return rval;
3490 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
3491 if (!sp)
3492 return rval;
3494 fcport->flags |= FCF_ASYNC_SENT;
3495 sp->type = SRB_CT_PTHRU_CMD;
3496 sp->name = "gffid";
3497 sp->gen1 = fcport->rscn_gen;
3498 sp->gen2 = fcport->login_gen;
3500 sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
3501 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
3503 /* CT_IU preamble */
3504 ct_req = qla2x00_prep_ct_req(fcport->ct_desc.ct_sns, GFF_ID_CMD,
3505 GFF_ID_RSP_SIZE);
3507 ct_req->req.gff_id.port_id[0] = fcport->d_id.b.domain;
3508 ct_req->req.gff_id.port_id[1] = fcport->d_id.b.area;
3509 ct_req->req.gff_id.port_id[2] = fcport->d_id.b.al_pa;
3511 sp->u.iocb_cmd.u.ctarg.req = fcport->ct_desc.ct_sns;
3512 sp->u.iocb_cmd.u.ctarg.req_dma = fcport->ct_desc.ct_sns_dma;
3513 sp->u.iocb_cmd.u.ctarg.rsp = fcport->ct_desc.ct_sns;
3514 sp->u.iocb_cmd.u.ctarg.rsp_dma = fcport->ct_desc.ct_sns_dma;
3515 sp->u.iocb_cmd.u.ctarg.req_size = GFF_ID_REQ_SIZE;
3516 sp->u.iocb_cmd.u.ctarg.rsp_size = GFF_ID_RSP_SIZE;
3517 sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
3519 sp->done = qla24xx_async_gffid_sp_done;
3521 ql_dbg(ql_dbg_disc, vha, 0x2132,
3522 "Async-%s hdl=%x %8phC.\n", sp->name,
3523 sp->handle, fcport->port_name);
3525 rval = qla2x00_start_sp(sp);
3526 if (rval != QLA_SUCCESS)
3527 goto done_free_sp;
3529 return rval;
3530 done_free_sp:
3531 sp->free(sp);
3532 fcport->flags &= ~FCF_ASYNC_SENT;
3533 return rval;
3536 /* GPN_FT + GNN_FT*/
3537 static int qla2x00_is_a_vp(scsi_qla_host_t *vha, u64 wwn)
3539 struct qla_hw_data *ha = vha->hw;
3540 scsi_qla_host_t *vp;
3541 unsigned long flags;
3542 u64 twwn;
3543 int rc = 0;
3545 if (!ha->num_vhosts)
3546 return 0;
3548 spin_lock_irqsave(&ha->vport_slock, flags);
3549 list_for_each_entry(vp, &ha->vp_list, list) {
3550 twwn = wwn_to_u64(vp->port_name);
3551 if (wwn == twwn) {
3552 rc = 1;
3553 break;
3556 spin_unlock_irqrestore(&ha->vport_slock, flags);
3558 return rc;
3561 void qla24xx_async_gnnft_done(scsi_qla_host_t *vha, srb_t *sp)
3563 fc_port_t *fcport;
3564 u32 i, rc;
3565 bool found;
3566 struct fab_scan_rp *rp, *trp;
3567 unsigned long flags;
3568 u8 recheck = 0;
3569 u16 dup = 0, dup_cnt = 0;
3571 ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0xffff,
3572 "%s enter\n", __func__);
3574 if (sp->gen1 != vha->hw->base_qpair->chip_reset) {
3575 ql_dbg(ql_dbg_disc, vha, 0xffff,
3576 "%s scan stop due to chip reset %x/%x\n",
3577 sp->name, sp->gen1, vha->hw->base_qpair->chip_reset);
3578 goto out;
3581 rc = sp->rc;
3582 if (rc) {
3583 vha->scan.scan_retry++;
3584 if (vha->scan.scan_retry < MAX_SCAN_RETRIES) {
3585 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
3586 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
3587 goto out;
3588 } else {
3589 ql_dbg(ql_dbg_disc, vha, 0xffff,
3590 "%s: Fabric scan failed for %d retries.\n",
3591 __func__, vha->scan.scan_retry);
3593 * Unable to scan any rports. logout loop below
3594 * will unregister all sessions.
3596 list_for_each_entry(fcport, &vha->vp_fcports, list) {
3597 if ((fcport->flags & FCF_FABRIC_DEVICE) != 0) {
3598 fcport->scan_state = QLA_FCPORT_SCAN;
3599 fcport->logout_on_delete = 0;
3602 goto login_logout;
3605 vha->scan.scan_retry = 0;
3607 list_for_each_entry(fcport, &vha->vp_fcports, list)
3608 fcport->scan_state = QLA_FCPORT_SCAN;
3610 for (i = 0; i < vha->hw->max_fibre_devices; i++) {
3611 u64 wwn;
3612 int k;
3614 rp = &vha->scan.l[i];
3615 found = false;
3617 wwn = wwn_to_u64(rp->port_name);
3618 if (wwn == 0)
3619 continue;
3621 /* Remove duplicate NPORT ID entries from switch data base */
3622 for (k = i + 1; k < vha->hw->max_fibre_devices; k++) {
3623 trp = &vha->scan.l[k];
3624 if (rp->id.b24 == trp->id.b24) {
3625 dup = 1;
3626 dup_cnt++;
3627 ql_dbg(ql_dbg_disc + ql_dbg_verbose,
3628 vha, 0xffff,
3629 "Detected duplicate NPORT ID from switch data base: ID %06x WWN %8phN WWN %8phN\n",
3630 rp->id.b24, rp->port_name, trp->port_name);
3631 memset(trp, 0, sizeof(*trp));
3635 if (!memcmp(rp->port_name, vha->port_name, WWN_SIZE))
3636 continue;
3638 /* Bypass reserved domain fields. */
3639 if ((rp->id.b.domain & 0xf0) == 0xf0)
3640 continue;
3642 /* Bypass virtual ports of the same host. */
3643 if (qla2x00_is_a_vp(vha, wwn))
3644 continue;
3646 list_for_each_entry(fcport, &vha->vp_fcports, list) {
3647 if (memcmp(rp->port_name, fcport->port_name, WWN_SIZE))
3648 continue;
3649 fcport->scan_state = QLA_FCPORT_FOUND;
3650 found = true;
3652 * If device was not a fabric device before.
3654 if ((fcport->flags & FCF_FABRIC_DEVICE) == 0) {
3655 qla2x00_clear_loop_id(fcport);
3656 fcport->flags |= FCF_FABRIC_DEVICE;
3657 } else if (fcport->d_id.b24 != rp->id.b24 ||
3658 fcport->scan_needed) {
3659 qlt_schedule_sess_for_deletion(fcport);
3661 fcport->d_id.b24 = rp->id.b24;
3662 fcport->scan_needed = 0;
3663 break;
3666 if (!found) {
3667 ql_dbg(ql_dbg_disc, vha, 0xffff,
3668 "%s %d %8phC post new sess\n",
3669 __func__, __LINE__, rp->port_name);
3670 qla24xx_post_newsess_work(vha, &rp->id, rp->port_name,
3671 rp->node_name, NULL, rp->fc4type);
3675 if (dup) {
3676 ql_log(ql_log_warn, vha, 0xffff,
3677 "Detected %d duplicate NPORT ID(s) from switch data base\n",
3678 dup_cnt);
3681 login_logout:
3683 * Logout all previous fabric dev marked lost, except FCP2 devices.
3685 list_for_each_entry(fcport, &vha->vp_fcports, list) {
3686 if ((fcport->flags & FCF_FABRIC_DEVICE) == 0) {
3687 fcport->scan_needed = 0;
3688 continue;
3691 if (fcport->scan_state != QLA_FCPORT_FOUND) {
3692 fcport->scan_needed = 0;
3693 if ((qla_dual_mode_enabled(vha) ||
3694 qla_ini_mode_enabled(vha)) &&
3695 atomic_read(&fcport->state) == FCS_ONLINE) {
3696 if (fcport->loop_id != FC_NO_LOOP_ID) {
3697 if (fcport->flags & FCF_FCP2_DEVICE)
3698 fcport->logout_on_delete = 0;
3700 ql_dbg(ql_dbg_disc, vha, 0x20f0,
3701 "%s %d %8phC post del sess\n",
3702 __func__, __LINE__,
3703 fcport->port_name);
3705 qlt_schedule_sess_for_deletion(fcport);
3706 continue;
3709 } else {
3710 if (fcport->scan_needed ||
3711 fcport->disc_state != DSC_LOGIN_COMPLETE) {
3712 if (fcport->login_retry == 0) {
3713 fcport->login_retry =
3714 vha->hw->login_retry_count;
3715 ql_dbg(ql_dbg_disc, vha, 0x20a3,
3716 "Port login retry %8phN, lid 0x%04x retry cnt=%d.\n",
3717 fcport->port_name, fcport->loop_id,
3718 fcport->login_retry);
3720 fcport->scan_needed = 0;
3721 qla24xx_fcport_handle_login(vha, fcport);
3726 recheck = 1;
3727 out:
3728 qla24xx_sp_unmap(vha, sp);
3729 spin_lock_irqsave(&vha->work_lock, flags);
3730 vha->scan.scan_flags &= ~SF_SCANNING;
3731 spin_unlock_irqrestore(&vha->work_lock, flags);
3733 if (recheck) {
3734 list_for_each_entry(fcport, &vha->vp_fcports, list) {
3735 if (fcport->scan_needed) {
3736 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
3737 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
3738 break;
3744 static int qla2x00_post_gnnft_gpnft_done_work(struct scsi_qla_host *vha,
3745 srb_t *sp, int cmd)
3747 struct qla_work_evt *e;
3749 if (cmd != QLA_EVT_GPNFT_DONE && cmd != QLA_EVT_GNNFT_DONE)
3750 return QLA_PARAMETER_ERROR;
3752 e = qla2x00_alloc_work(vha, cmd);
3753 if (!e)
3754 return QLA_FUNCTION_FAILED;
3756 e->u.iosb.sp = sp;
3758 return qla2x00_post_work(vha, e);
3761 static int qla2x00_post_nvme_gpnft_work(struct scsi_qla_host *vha,
3762 srb_t *sp, int cmd)
3764 struct qla_work_evt *e;
3766 if (cmd != QLA_EVT_GPNFT)
3767 return QLA_PARAMETER_ERROR;
3769 e = qla2x00_alloc_work(vha, cmd);
3770 if (!e)
3771 return QLA_FUNCTION_FAILED;
3773 e->u.gpnft.fc4_type = FC4_TYPE_NVME;
3774 e->u.gpnft.sp = sp;
3776 return qla2x00_post_work(vha, e);
3779 static void qla2x00_find_free_fcp_nvme_slot(struct scsi_qla_host *vha,
3780 struct srb *sp)
3782 struct qla_hw_data *ha = vha->hw;
3783 int num_fibre_dev = ha->max_fibre_devices;
3784 struct ct_sns_req *ct_req =
3785 (struct ct_sns_req *)sp->u.iocb_cmd.u.ctarg.req;
3786 struct ct_sns_gpnft_rsp *ct_rsp =
3787 (struct ct_sns_gpnft_rsp *)sp->u.iocb_cmd.u.ctarg.rsp;
3788 struct ct_sns_gpn_ft_data *d;
3789 struct fab_scan_rp *rp;
3790 u16 cmd = be16_to_cpu(ct_req->command);
3791 u8 fc4_type = sp->gen2;
3792 int i, j, k;
3793 port_id_t id;
3794 u8 found;
3795 u64 wwn;
3797 j = 0;
3798 for (i = 0; i < num_fibre_dev; i++) {
3799 d = &ct_rsp->entries[i];
3801 id.b.rsvd_1 = 0;
3802 id.b.domain = d->port_id[0];
3803 id.b.area = d->port_id[1];
3804 id.b.al_pa = d->port_id[2];
3805 wwn = wwn_to_u64(d->port_name);
3807 if (id.b24 == 0 || wwn == 0)
3808 continue;
3810 if (fc4_type == FC4_TYPE_FCP_SCSI) {
3811 if (cmd == GPN_FT_CMD) {
3812 rp = &vha->scan.l[j];
3813 rp->id = id;
3814 memcpy(rp->port_name, d->port_name, 8);
3815 j++;
3816 rp->fc4type = FS_FC4TYPE_FCP;
3817 } else {
3818 for (k = 0; k < num_fibre_dev; k++) {
3819 rp = &vha->scan.l[k];
3820 if (id.b24 == rp->id.b24) {
3821 memcpy(rp->node_name,
3822 d->port_name, 8);
3823 break;
3827 } else {
3828 /* Search if the fibre device supports FC4_TYPE_NVME */
3829 if (cmd == GPN_FT_CMD) {
3830 found = 0;
3832 for (k = 0; k < num_fibre_dev; k++) {
3833 rp = &vha->scan.l[k];
3834 if (!memcmp(rp->port_name,
3835 d->port_name, 8)) {
3837 * Supports FC-NVMe & FCP
3839 rp->fc4type |= FS_FC4TYPE_NVME;
3840 found = 1;
3841 break;
3845 /* We found new FC-NVMe only port */
3846 if (!found) {
3847 for (k = 0; k < num_fibre_dev; k++) {
3848 rp = &vha->scan.l[k];
3849 if (wwn_to_u64(rp->port_name)) {
3850 continue;
3851 } else {
3852 rp->id = id;
3853 memcpy(rp->port_name,
3854 d->port_name, 8);
3855 rp->fc4type =
3856 FS_FC4TYPE_NVME;
3857 break;
3861 } else {
3862 for (k = 0; k < num_fibre_dev; k++) {
3863 rp = &vha->scan.l[k];
3864 if (id.b24 == rp->id.b24) {
3865 memcpy(rp->node_name,
3866 d->port_name, 8);
3867 break;
3875 static void qla2x00_async_gpnft_gnnft_sp_done(srb_t *sp, int res)
3877 struct scsi_qla_host *vha = sp->vha;
3878 struct ct_sns_req *ct_req =
3879 (struct ct_sns_req *)sp->u.iocb_cmd.u.ctarg.req;
3880 u16 cmd = be16_to_cpu(ct_req->command);
3881 u8 fc4_type = sp->gen2;
3882 unsigned long flags;
3883 int rc;
3885 /* gen2 field is holding the fc4type */
3886 ql_dbg(ql_dbg_disc, vha, 0xffff,
3887 "Async done-%s res %x FC4Type %x\n",
3888 sp->name, res, sp->gen2);
3890 del_timer(&sp->u.iocb_cmd.timer);
3891 sp->rc = res;
3892 if (res) {
3893 unsigned long flags;
3894 const char *name = sp->name;
3897 * We are in an Interrupt context, queue up this
3898 * sp for GNNFT_DONE work. This will allow all
3899 * the resource to get freed up.
3901 rc = qla2x00_post_gnnft_gpnft_done_work(vha, sp,
3902 QLA_EVT_GNNFT_DONE);
3903 if (rc) {
3904 /* Cleanup here to prevent memory leak */
3905 qla24xx_sp_unmap(vha, sp);
3907 spin_lock_irqsave(&vha->work_lock, flags);
3908 vha->scan.scan_flags &= ~SF_SCANNING;
3909 vha->scan.scan_retry++;
3910 spin_unlock_irqrestore(&vha->work_lock, flags);
3912 if (vha->scan.scan_retry < MAX_SCAN_RETRIES) {
3913 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
3914 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
3915 qla2xxx_wake_dpc(vha);
3916 } else {
3917 ql_dbg(ql_dbg_disc, vha, 0xffff,
3918 "Async done-%s rescan failed on all retries.\n",
3919 name);
3922 return;
3925 qla2x00_find_free_fcp_nvme_slot(vha, sp);
3927 if ((fc4_type == FC4_TYPE_FCP_SCSI) && vha->flags.nvme_enabled &&
3928 cmd == GNN_FT_CMD) {
3929 spin_lock_irqsave(&vha->work_lock, flags);
3930 vha->scan.scan_flags &= ~SF_SCANNING;
3931 spin_unlock_irqrestore(&vha->work_lock, flags);
3933 sp->rc = res;
3934 rc = qla2x00_post_nvme_gpnft_work(vha, sp, QLA_EVT_GPNFT);
3935 if (rc) {
3936 qla24xx_sp_unmap(vha, sp);
3937 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
3938 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
3940 return;
3943 if (cmd == GPN_FT_CMD) {
3944 rc = qla2x00_post_gnnft_gpnft_done_work(vha, sp,
3945 QLA_EVT_GPNFT_DONE);
3946 } else {
3947 rc = qla2x00_post_gnnft_gpnft_done_work(vha, sp,
3948 QLA_EVT_GNNFT_DONE);
3951 if (rc) {
3952 qla24xx_sp_unmap(vha, sp);
3953 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
3954 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
3955 return;
3960 * Get WWNN list for fc4_type
3962 * It is assumed the same SRB is re-used from GPNFT to avoid
3963 * mem free & re-alloc
3965 static int qla24xx_async_gnnft(scsi_qla_host_t *vha, struct srb *sp,
3966 u8 fc4_type)
3968 int rval = QLA_FUNCTION_FAILED;
3969 struct ct_sns_req *ct_req;
3970 struct ct_sns_pkt *ct_sns;
3971 unsigned long flags;
3973 if (!vha->flags.online) {
3974 spin_lock_irqsave(&vha->work_lock, flags);
3975 vha->scan.scan_flags &= ~SF_SCANNING;
3976 spin_unlock_irqrestore(&vha->work_lock, flags);
3977 goto done_free_sp;
3980 if (!sp->u.iocb_cmd.u.ctarg.req || !sp->u.iocb_cmd.u.ctarg.rsp) {
3981 ql_log(ql_log_warn, vha, 0xffff,
3982 "%s: req %p rsp %p are not setup\n",
3983 __func__, sp->u.iocb_cmd.u.ctarg.req,
3984 sp->u.iocb_cmd.u.ctarg.rsp);
3985 spin_lock_irqsave(&vha->work_lock, flags);
3986 vha->scan.scan_flags &= ~SF_SCANNING;
3987 spin_unlock_irqrestore(&vha->work_lock, flags);
3988 WARN_ON(1);
3989 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
3990 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
3991 goto done_free_sp;
3994 ql_dbg(ql_dbg_disc, vha, 0xfffff,
3995 "%s: FC4Type %x, CT-PASSTHRU %s command ctarg rsp size %d, ctarg req size %d\n",
3996 __func__, fc4_type, sp->name, sp->u.iocb_cmd.u.ctarg.rsp_size,
3997 sp->u.iocb_cmd.u.ctarg.req_size);
3999 sp->type = SRB_CT_PTHRU_CMD;
4000 sp->name = "gnnft";
4001 sp->gen1 = vha->hw->base_qpair->chip_reset;
4002 sp->gen2 = fc4_type;
4004 sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
4005 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
4007 memset(sp->u.iocb_cmd.u.ctarg.rsp, 0, sp->u.iocb_cmd.u.ctarg.rsp_size);
4008 memset(sp->u.iocb_cmd.u.ctarg.req, 0, sp->u.iocb_cmd.u.ctarg.req_size);
4010 ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.req;
4011 /* CT_IU preamble */
4012 ct_req = qla2x00_prep_ct_req(ct_sns, GNN_FT_CMD,
4013 sp->u.iocb_cmd.u.ctarg.rsp_size);
4015 /* GPN_FT req */
4016 ct_req->req.gpn_ft.port_type = fc4_type;
4018 sp->u.iocb_cmd.u.ctarg.req_size = GNN_FT_REQ_SIZE;
4019 sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
4021 sp->done = qla2x00_async_gpnft_gnnft_sp_done;
4023 ql_dbg(ql_dbg_disc, vha, 0xffff,
4024 "Async-%s hdl=%x FC4Type %x.\n", sp->name,
4025 sp->handle, ct_req->req.gpn_ft.port_type);
4027 rval = qla2x00_start_sp(sp);
4028 if (rval != QLA_SUCCESS) {
4029 goto done_free_sp;
4032 return rval;
4034 done_free_sp:
4035 if (sp->u.iocb_cmd.u.ctarg.req) {
4036 dma_free_coherent(&vha->hw->pdev->dev,
4037 sp->u.iocb_cmd.u.ctarg.req_allocated_size,
4038 sp->u.iocb_cmd.u.ctarg.req,
4039 sp->u.iocb_cmd.u.ctarg.req_dma);
4040 sp->u.iocb_cmd.u.ctarg.req = NULL;
4042 if (sp->u.iocb_cmd.u.ctarg.rsp) {
4043 dma_free_coherent(&vha->hw->pdev->dev,
4044 sp->u.iocb_cmd.u.ctarg.rsp_allocated_size,
4045 sp->u.iocb_cmd.u.ctarg.rsp,
4046 sp->u.iocb_cmd.u.ctarg.rsp_dma);
4047 sp->u.iocb_cmd.u.ctarg.rsp = NULL;
4050 sp->free(sp);
4052 spin_lock_irqsave(&vha->work_lock, flags);
4053 vha->scan.scan_flags &= ~SF_SCANNING;
4054 if (vha->scan.scan_flags == 0) {
4055 ql_dbg(ql_dbg_disc, vha, 0xffff,
4056 "%s: schedule\n", __func__);
4057 vha->scan.scan_flags |= SF_QUEUED;
4058 schedule_delayed_work(&vha->scan.scan_work, 5);
4060 spin_unlock_irqrestore(&vha->work_lock, flags);
4063 return rval;
4064 } /* GNNFT */
4066 void qla24xx_async_gpnft_done(scsi_qla_host_t *vha, srb_t *sp)
4068 ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0xffff,
4069 "%s enter\n", __func__);
4070 qla24xx_async_gnnft(vha, sp, sp->gen2);
4073 /* Get WWPN list for certain fc4_type */
4074 int qla24xx_async_gpnft(scsi_qla_host_t *vha, u8 fc4_type, srb_t *sp)
4076 int rval = QLA_FUNCTION_FAILED;
4077 struct ct_sns_req *ct_req;
4078 struct ct_sns_pkt *ct_sns;
4079 u32 rspsz;
4080 unsigned long flags;
4082 ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0xffff,
4083 "%s enter\n", __func__);
4085 if (!vha->flags.online)
4086 return rval;
4088 spin_lock_irqsave(&vha->work_lock, flags);
4089 if (vha->scan.scan_flags & SF_SCANNING) {
4090 spin_unlock_irqrestore(&vha->work_lock, flags);
4091 ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0xffff,
4092 "%s: scan active\n", __func__);
4093 return rval;
4095 vha->scan.scan_flags |= SF_SCANNING;
4096 spin_unlock_irqrestore(&vha->work_lock, flags);
4098 if (fc4_type == FC4_TYPE_FCP_SCSI) {
4099 ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0xffff,
4100 "%s: Performing FCP Scan\n", __func__);
4102 if (sp)
4103 sp->free(sp); /* should not happen */
4105 sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL);
4106 if (!sp) {
4107 spin_lock_irqsave(&vha->work_lock, flags);
4108 vha->scan.scan_flags &= ~SF_SCANNING;
4109 spin_unlock_irqrestore(&vha->work_lock, flags);
4110 return rval;
4113 sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev,
4114 sizeof(struct ct_sns_pkt),
4115 &sp->u.iocb_cmd.u.ctarg.req_dma,
4116 GFP_KERNEL);
4117 sp->u.iocb_cmd.u.ctarg.req_allocated_size = sizeof(struct ct_sns_pkt);
4118 if (!sp->u.iocb_cmd.u.ctarg.req) {
4119 ql_log(ql_log_warn, vha, 0xffff,
4120 "Failed to allocate ct_sns request.\n");
4121 spin_lock_irqsave(&vha->work_lock, flags);
4122 vha->scan.scan_flags &= ~SF_SCANNING;
4123 spin_unlock_irqrestore(&vha->work_lock, flags);
4124 qla2x00_rel_sp(sp);
4125 return rval;
4127 sp->u.iocb_cmd.u.ctarg.req_size = GPN_FT_REQ_SIZE;
4129 rspsz = sizeof(struct ct_sns_gpnft_rsp) +
4130 ((vha->hw->max_fibre_devices - 1) *
4131 sizeof(struct ct_sns_gpn_ft_data));
4133 sp->u.iocb_cmd.u.ctarg.rsp = dma_alloc_coherent(&vha->hw->pdev->dev,
4134 rspsz,
4135 &sp->u.iocb_cmd.u.ctarg.rsp_dma,
4136 GFP_KERNEL);
4137 sp->u.iocb_cmd.u.ctarg.rsp_allocated_size = rspsz;
4138 if (!sp->u.iocb_cmd.u.ctarg.rsp) {
4139 ql_log(ql_log_warn, vha, 0xffff,
4140 "Failed to allocate ct_sns request.\n");
4141 spin_lock_irqsave(&vha->work_lock, flags);
4142 vha->scan.scan_flags &= ~SF_SCANNING;
4143 spin_unlock_irqrestore(&vha->work_lock, flags);
4144 dma_free_coherent(&vha->hw->pdev->dev,
4145 sp->u.iocb_cmd.u.ctarg.req_allocated_size,
4146 sp->u.iocb_cmd.u.ctarg.req,
4147 sp->u.iocb_cmd.u.ctarg.req_dma);
4148 sp->u.iocb_cmd.u.ctarg.req = NULL;
4149 qla2x00_rel_sp(sp);
4150 return rval;
4152 sp->u.iocb_cmd.u.ctarg.rsp_size = rspsz;
4154 ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0xffff,
4155 "%s scan list size %d\n", __func__, vha->scan.size);
4157 memset(vha->scan.l, 0, vha->scan.size);
4158 } else if (!sp) {
4159 ql_dbg(ql_dbg_disc, vha, 0xffff,
4160 "NVME scan did not provide SP\n");
4161 return rval;
4164 sp->type = SRB_CT_PTHRU_CMD;
4165 sp->name = "gpnft";
4166 sp->gen1 = vha->hw->base_qpair->chip_reset;
4167 sp->gen2 = fc4_type;
4169 sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
4170 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
4172 rspsz = sp->u.iocb_cmd.u.ctarg.rsp_size;
4173 memset(sp->u.iocb_cmd.u.ctarg.rsp, 0, sp->u.iocb_cmd.u.ctarg.rsp_size);
4174 memset(sp->u.iocb_cmd.u.ctarg.req, 0, sp->u.iocb_cmd.u.ctarg.req_size);
4176 ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.req;
4177 /* CT_IU preamble */
4178 ct_req = qla2x00_prep_ct_req(ct_sns, GPN_FT_CMD, rspsz);
4180 /* GPN_FT req */
4181 ct_req->req.gpn_ft.port_type = fc4_type;
4183 sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
4185 sp->done = qla2x00_async_gpnft_gnnft_sp_done;
4187 ql_dbg(ql_dbg_disc, vha, 0xffff,
4188 "Async-%s hdl=%x FC4Type %x.\n", sp->name,
4189 sp->handle, ct_req->req.gpn_ft.port_type);
4191 rval = qla2x00_start_sp(sp);
4192 if (rval != QLA_SUCCESS) {
4193 goto done_free_sp;
4196 return rval;
4198 done_free_sp:
4199 if (sp->u.iocb_cmd.u.ctarg.req) {
4200 dma_free_coherent(&vha->hw->pdev->dev,
4201 sp->u.iocb_cmd.u.ctarg.req_allocated_size,
4202 sp->u.iocb_cmd.u.ctarg.req,
4203 sp->u.iocb_cmd.u.ctarg.req_dma);
4204 sp->u.iocb_cmd.u.ctarg.req = NULL;
4206 if (sp->u.iocb_cmd.u.ctarg.rsp) {
4207 dma_free_coherent(&vha->hw->pdev->dev,
4208 sp->u.iocb_cmd.u.ctarg.rsp_allocated_size,
4209 sp->u.iocb_cmd.u.ctarg.rsp,
4210 sp->u.iocb_cmd.u.ctarg.rsp_dma);
4211 sp->u.iocb_cmd.u.ctarg.rsp = NULL;
4214 sp->free(sp);
4216 spin_lock_irqsave(&vha->work_lock, flags);
4217 vha->scan.scan_flags &= ~SF_SCANNING;
4218 if (vha->scan.scan_flags == 0) {
4219 ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0xffff,
4220 "%s: Scan scheduled.\n", __func__);
4221 vha->scan.scan_flags |= SF_QUEUED;
4222 schedule_delayed_work(&vha->scan.scan_work, 5);
4224 spin_unlock_irqrestore(&vha->work_lock, flags);
4227 return rval;
4230 void qla_scan_work_fn(struct work_struct *work)
4232 struct fab_scan *s = container_of(to_delayed_work(work),
4233 struct fab_scan, scan_work);
4234 struct scsi_qla_host *vha = container_of(s, struct scsi_qla_host,
4235 scan);
4236 unsigned long flags;
4238 ql_dbg(ql_dbg_disc, vha, 0xffff,
4239 "%s: schedule loop resync\n", __func__);
4240 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
4241 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
4242 qla2xxx_wake_dpc(vha);
4243 spin_lock_irqsave(&vha->work_lock, flags);
4244 vha->scan.scan_flags &= ~SF_QUEUED;
4245 spin_unlock_irqrestore(&vha->work_lock, flags);
4248 /* GNN_ID */
4249 void qla24xx_handle_gnnid_event(scsi_qla_host_t *vha, struct event_arg *ea)
4251 qla24xx_post_gnl_work(vha, ea->fcport);
4254 static void qla2x00_async_gnnid_sp_done(srb_t *sp, int res)
4256 struct scsi_qla_host *vha = sp->vha;
4257 fc_port_t *fcport = sp->fcport;
4258 u8 *node_name = fcport->ct_desc.ct_sns->p.rsp.rsp.gnn_id.node_name;
4259 struct event_arg ea;
4260 u64 wwnn;
4262 fcport->flags &= ~FCF_ASYNC_SENT;
4263 wwnn = wwn_to_u64(node_name);
4264 if (wwnn)
4265 memcpy(fcport->node_name, node_name, WWN_SIZE);
4267 memset(&ea, 0, sizeof(ea));
4268 ea.fcport = fcport;
4269 ea.sp = sp;
4270 ea.rc = res;
4272 ql_dbg(ql_dbg_disc, vha, 0x204f,
4273 "Async done-%s res %x, WWPN %8phC %8phC\n",
4274 sp->name, res, fcport->port_name, fcport->node_name);
4276 qla24xx_handle_gnnid_event(vha, &ea);
4278 sp->free(sp);
4281 int qla24xx_async_gnnid(scsi_qla_host_t *vha, fc_port_t *fcport)
4283 int rval = QLA_FUNCTION_FAILED;
4284 struct ct_sns_req *ct_req;
4285 srb_t *sp;
4287 if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT))
4288 return rval;
4290 qla2x00_set_fcport_disc_state(fcport, DSC_GNN_ID);
4291 sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC);
4292 if (!sp)
4293 goto done;
4295 fcport->flags |= FCF_ASYNC_SENT;
4296 sp->type = SRB_CT_PTHRU_CMD;
4297 sp->name = "gnnid";
4298 sp->gen1 = fcport->rscn_gen;
4299 sp->gen2 = fcport->login_gen;
4301 sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
4302 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
4304 /* CT_IU preamble */
4305 ct_req = qla2x00_prep_ct_req(fcport->ct_desc.ct_sns, GNN_ID_CMD,
4306 GNN_ID_RSP_SIZE);
4308 /* GNN_ID req */
4309 ct_req->req.port_id.port_id = port_id_to_be_id(fcport->d_id);
4312 /* req & rsp use the same buffer */
4313 sp->u.iocb_cmd.u.ctarg.req = fcport->ct_desc.ct_sns;
4314 sp->u.iocb_cmd.u.ctarg.req_dma = fcport->ct_desc.ct_sns_dma;
4315 sp->u.iocb_cmd.u.ctarg.rsp = fcport->ct_desc.ct_sns;
4316 sp->u.iocb_cmd.u.ctarg.rsp_dma = fcport->ct_desc.ct_sns_dma;
4317 sp->u.iocb_cmd.u.ctarg.req_size = GNN_ID_REQ_SIZE;
4318 sp->u.iocb_cmd.u.ctarg.rsp_size = GNN_ID_RSP_SIZE;
4319 sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
4321 sp->done = qla2x00_async_gnnid_sp_done;
4323 ql_dbg(ql_dbg_disc, vha, 0xffff,
4324 "Async-%s - %8phC hdl=%x loopid=%x portid %06x.\n",
4325 sp->name, fcport->port_name,
4326 sp->handle, fcport->loop_id, fcport->d_id.b24);
4328 rval = qla2x00_start_sp(sp);
4329 if (rval != QLA_SUCCESS)
4330 goto done_free_sp;
4331 return rval;
4333 done_free_sp:
4334 sp->free(sp);
4335 fcport->flags &= ~FCF_ASYNC_SENT;
4336 done:
4337 return rval;
4340 int qla24xx_post_gnnid_work(struct scsi_qla_host *vha, fc_port_t *fcport)
4342 struct qla_work_evt *e;
4343 int ls;
4345 ls = atomic_read(&vha->loop_state);
4346 if (((ls != LOOP_READY) && (ls != LOOP_UP)) ||
4347 test_bit(UNLOADING, &vha->dpc_flags))
4348 return 0;
4350 e = qla2x00_alloc_work(vha, QLA_EVT_GNNID);
4351 if (!e)
4352 return QLA_FUNCTION_FAILED;
4354 e->u.fcport.fcport = fcport;
4355 return qla2x00_post_work(vha, e);
4358 /* GPFN_ID */
4359 void qla24xx_handle_gfpnid_event(scsi_qla_host_t *vha, struct event_arg *ea)
4361 fc_port_t *fcport = ea->fcport;
4363 ql_dbg(ql_dbg_disc, vha, 0xffff,
4364 "%s %8phC DS %d LS %d rc %d login %d|%d rscn %d|%d fcpcnt %d\n",
4365 __func__, fcport->port_name, fcport->disc_state,
4366 fcport->fw_login_state, ea->rc, fcport->login_gen, ea->sp->gen2,
4367 fcport->rscn_gen, ea->sp->gen1, vha->fcport_count);
4369 if (fcport->disc_state == DSC_DELETE_PEND)
4370 return;
4372 if (ea->sp->gen2 != fcport->login_gen) {
4373 /* target side must have changed it. */
4374 ql_dbg(ql_dbg_disc, vha, 0x20d3,
4375 "%s %8phC generation changed\n",
4376 __func__, fcport->port_name);
4377 return;
4378 } else if (ea->sp->gen1 != fcport->rscn_gen) {
4379 return;
4382 qla24xx_post_gpsc_work(vha, fcport);
4385 static void qla2x00_async_gfpnid_sp_done(srb_t *sp, int res)
4387 struct scsi_qla_host *vha = sp->vha;
4388 fc_port_t *fcport = sp->fcport;
4389 u8 *fpn = fcport->ct_desc.ct_sns->p.rsp.rsp.gfpn_id.port_name;
4390 struct event_arg ea;
4391 u64 wwn;
4393 wwn = wwn_to_u64(fpn);
4394 if (wwn)
4395 memcpy(fcport->fabric_port_name, fpn, WWN_SIZE);
4397 memset(&ea, 0, sizeof(ea));
4398 ea.fcport = fcport;
4399 ea.sp = sp;
4400 ea.rc = res;
4402 ql_dbg(ql_dbg_disc, vha, 0x204f,
4403 "Async done-%s res %x, WWPN %8phC %8phC\n",
4404 sp->name, res, fcport->port_name, fcport->fabric_port_name);
4406 qla24xx_handle_gfpnid_event(vha, &ea);
4408 sp->free(sp);
4411 int qla24xx_async_gfpnid(scsi_qla_host_t *vha, fc_port_t *fcport)
4413 int rval = QLA_FUNCTION_FAILED;
4414 struct ct_sns_req *ct_req;
4415 srb_t *sp;
4417 if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT))
4418 return rval;
4420 sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC);
4421 if (!sp)
4422 goto done;
4424 sp->type = SRB_CT_PTHRU_CMD;
4425 sp->name = "gfpnid";
4426 sp->gen1 = fcport->rscn_gen;
4427 sp->gen2 = fcport->login_gen;
4429 sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
4430 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
4432 /* CT_IU preamble */
4433 ct_req = qla2x00_prep_ct_req(fcport->ct_desc.ct_sns, GFPN_ID_CMD,
4434 GFPN_ID_RSP_SIZE);
4436 /* GFPN_ID req */
4437 ct_req->req.port_id.port_id = port_id_to_be_id(fcport->d_id);
4440 /* req & rsp use the same buffer */
4441 sp->u.iocb_cmd.u.ctarg.req = fcport->ct_desc.ct_sns;
4442 sp->u.iocb_cmd.u.ctarg.req_dma = fcport->ct_desc.ct_sns_dma;
4443 sp->u.iocb_cmd.u.ctarg.rsp = fcport->ct_desc.ct_sns;
4444 sp->u.iocb_cmd.u.ctarg.rsp_dma = fcport->ct_desc.ct_sns_dma;
4445 sp->u.iocb_cmd.u.ctarg.req_size = GFPN_ID_REQ_SIZE;
4446 sp->u.iocb_cmd.u.ctarg.rsp_size = GFPN_ID_RSP_SIZE;
4447 sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
4449 sp->done = qla2x00_async_gfpnid_sp_done;
4451 ql_dbg(ql_dbg_disc, vha, 0xffff,
4452 "Async-%s - %8phC hdl=%x loopid=%x portid %06x.\n",
4453 sp->name, fcport->port_name,
4454 sp->handle, fcport->loop_id, fcport->d_id.b24);
4456 rval = qla2x00_start_sp(sp);
4457 if (rval != QLA_SUCCESS)
4458 goto done_free_sp;
4460 return rval;
4462 done_free_sp:
4463 sp->free(sp);
4464 done:
4465 return rval;
4468 int qla24xx_post_gfpnid_work(struct scsi_qla_host *vha, fc_port_t *fcport)
4470 struct qla_work_evt *e;
4471 int ls;
4473 ls = atomic_read(&vha->loop_state);
4474 if (((ls != LOOP_READY) && (ls != LOOP_UP)) ||
4475 test_bit(UNLOADING, &vha->dpc_flags))
4476 return 0;
4478 e = qla2x00_alloc_work(vha, QLA_EVT_GFPNID);
4479 if (!e)
4480 return QLA_FUNCTION_FAILED;
4482 e->u.fcport.fcport = fcport;
4483 return qla2x00_post_work(vha, e);