media: stv06xx: add missing descriptor sanity checks
[linux/fpc-iii.git] / drivers / scsi / qla2xxx / qla_gs.c
blob446a9d6ba25506dc06ab9c4d6a6f083f4e056ebf
1 /*
2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2014 QLogic Corporation
5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */
7 #include "qla_def.h"
8 #include "qla_target.h"
9 #include <linux/utsname.h>
11 static int qla2x00_sns_ga_nxt(scsi_qla_host_t *, fc_port_t *);
12 static int qla2x00_sns_gid_pt(scsi_qla_host_t *, sw_info_t *);
13 static int qla2x00_sns_gpn_id(scsi_qla_host_t *, sw_info_t *);
14 static int qla2x00_sns_gnn_id(scsi_qla_host_t *, sw_info_t *);
15 static int qla2x00_sns_rft_id(scsi_qla_host_t *);
16 static int qla2x00_sns_rnn_id(scsi_qla_host_t *);
17 static int qla_async_rftid(scsi_qla_host_t *, port_id_t *);
18 static int qla_async_rffid(scsi_qla_host_t *, port_id_t *, u8, u8);
19 static int qla_async_rnnid(scsi_qla_host_t *, port_id_t *, u8*);
20 static int qla_async_rsnn_nn(scsi_qla_host_t *);
22 /**
23 * qla2x00_prep_ms_iocb() - Prepare common MS/CT IOCB fields for SNS CT query.
24 * @vha: HA context
25 * @arg: CT arguments
27 * Returns a pointer to the @vha's ms_iocb.
29 void *
30 qla2x00_prep_ms_iocb(scsi_qla_host_t *vha, struct ct_arg *arg)
32 struct qla_hw_data *ha = vha->hw;
33 ms_iocb_entry_t *ms_pkt;
35 ms_pkt = (ms_iocb_entry_t *)arg->iocb;
36 memset(ms_pkt, 0, sizeof(ms_iocb_entry_t));
38 ms_pkt->entry_type = MS_IOCB_TYPE;
39 ms_pkt->entry_count = 1;
40 SET_TARGET_ID(ha, ms_pkt->loop_id, SIMPLE_NAME_SERVER);
41 ms_pkt->control_flags = cpu_to_le16(CF_READ | CF_HEAD_TAG);
42 ms_pkt->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
43 ms_pkt->cmd_dsd_count = cpu_to_le16(1);
44 ms_pkt->total_dsd_count = cpu_to_le16(2);
45 ms_pkt->rsp_bytecount = cpu_to_le32(arg->rsp_size);
46 ms_pkt->req_bytecount = cpu_to_le32(arg->req_size);
48 put_unaligned_le64(arg->req_dma, &ms_pkt->req_dsd.address);
49 ms_pkt->req_dsd.length = ms_pkt->req_bytecount;
51 put_unaligned_le64(arg->rsp_dma, &ms_pkt->rsp_dsd.address);
52 ms_pkt->rsp_dsd.length = ms_pkt->rsp_bytecount;
54 vha->qla_stats.control_requests++;
56 return (ms_pkt);
59 /**
60 * qla24xx_prep_ms_iocb() - Prepare common CT IOCB fields for SNS CT query.
61 * @vha: HA context
62 * @arg: CT arguments
64 * Returns a pointer to the @ha's ms_iocb.
66 void *
67 qla24xx_prep_ms_iocb(scsi_qla_host_t *vha, struct ct_arg *arg)
69 struct qla_hw_data *ha = vha->hw;
70 struct ct_entry_24xx *ct_pkt;
72 ct_pkt = (struct ct_entry_24xx *)arg->iocb;
73 memset(ct_pkt, 0, sizeof(struct ct_entry_24xx));
75 ct_pkt->entry_type = CT_IOCB_TYPE;
76 ct_pkt->entry_count = 1;
77 ct_pkt->nport_handle = cpu_to_le16(arg->nport_handle);
78 ct_pkt->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
79 ct_pkt->cmd_dsd_count = cpu_to_le16(1);
80 ct_pkt->rsp_dsd_count = cpu_to_le16(1);
81 ct_pkt->rsp_byte_count = cpu_to_le32(arg->rsp_size);
82 ct_pkt->cmd_byte_count = cpu_to_le32(arg->req_size);
84 put_unaligned_le64(arg->req_dma, &ct_pkt->dsd[0].address);
85 ct_pkt->dsd[0].length = ct_pkt->cmd_byte_count;
87 put_unaligned_le64(arg->rsp_dma, &ct_pkt->dsd[1].address);
88 ct_pkt->dsd[1].length = ct_pkt->rsp_byte_count;
89 ct_pkt->vp_index = vha->vp_idx;
91 vha->qla_stats.control_requests++;
93 return (ct_pkt);
96 /**
97 * qla2x00_prep_ct_req() - Prepare common CT request fields for SNS query.
98 * @p: CT request buffer
99 * @cmd: GS command
100 * @rsp_size: response size in bytes
102 * Returns a pointer to the intitialized @ct_req.
104 static inline struct ct_sns_req *
105 qla2x00_prep_ct_req(struct ct_sns_pkt *p, uint16_t cmd, uint16_t rsp_size)
107 memset(p, 0, sizeof(struct ct_sns_pkt));
109 p->p.req.header.revision = 0x01;
110 p->p.req.header.gs_type = 0xFC;
111 p->p.req.header.gs_subtype = 0x02;
112 p->p.req.command = cpu_to_be16(cmd);
113 p->p.req.max_rsp_size = cpu_to_be16((rsp_size - 16) / 4);
115 return &p->p.req;
119 qla2x00_chk_ms_status(scsi_qla_host_t *vha, ms_iocb_entry_t *ms_pkt,
120 struct ct_sns_rsp *ct_rsp, const char *routine)
122 int rval;
123 uint16_t comp_status;
124 struct qla_hw_data *ha = vha->hw;
125 bool lid_is_sns = false;
127 rval = QLA_FUNCTION_FAILED;
128 if (ms_pkt->entry_status != 0) {
129 ql_dbg(ql_dbg_disc, vha, 0x2031,
130 "%s failed, error status (%x) on port_id: %02x%02x%02x.\n",
131 routine, ms_pkt->entry_status, vha->d_id.b.domain,
132 vha->d_id.b.area, vha->d_id.b.al_pa);
133 } else {
134 if (IS_FWI2_CAPABLE(ha))
135 comp_status = le16_to_cpu(
136 ((struct ct_entry_24xx *)ms_pkt)->comp_status);
137 else
138 comp_status = le16_to_cpu(ms_pkt->status);
139 switch (comp_status) {
140 case CS_COMPLETE:
141 case CS_DATA_UNDERRUN:
142 case CS_DATA_OVERRUN: /* Overrun? */
143 if (ct_rsp->header.response !=
144 cpu_to_be16(CT_ACCEPT_RESPONSE)) {
145 ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x2077,
146 "%s failed rejected request on port_id: %02x%02x%02x Completion status 0x%x, response 0x%x\n",
147 routine, vha->d_id.b.domain,
148 vha->d_id.b.area, vha->d_id.b.al_pa,
149 comp_status, ct_rsp->header.response);
150 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha,
151 0x2078, ct_rsp,
152 offsetof(typeof(*ct_rsp), rsp));
153 rval = QLA_INVALID_COMMAND;
154 } else
155 rval = QLA_SUCCESS;
156 break;
157 case CS_PORT_LOGGED_OUT:
158 if (IS_FWI2_CAPABLE(ha)) {
159 if (le16_to_cpu(ms_pkt->loop_id.extended) ==
160 NPH_SNS)
161 lid_is_sns = true;
162 } else {
163 if (le16_to_cpu(ms_pkt->loop_id.extended) ==
164 SIMPLE_NAME_SERVER)
165 lid_is_sns = true;
167 if (lid_is_sns) {
168 ql_dbg(ql_dbg_async, vha, 0x502b,
169 "%s failed, Name server has logged out",
170 routine);
171 rval = QLA_NOT_LOGGED_IN;
172 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
173 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
175 break;
176 case CS_TIMEOUT:
177 rval = QLA_FUNCTION_TIMEOUT;
178 /* fall through */
179 default:
180 ql_dbg(ql_dbg_disc, vha, 0x2033,
181 "%s failed, completion status (%x) on port_id: "
182 "%02x%02x%02x.\n", routine, comp_status,
183 vha->d_id.b.domain, vha->d_id.b.area,
184 vha->d_id.b.al_pa);
185 break;
188 return rval;
192 * qla2x00_ga_nxt() - SNS scan for fabric devices via GA_NXT command.
193 * @vha: HA context
194 * @fcport: fcport entry to updated
196 * Returns 0 on success.
199 qla2x00_ga_nxt(scsi_qla_host_t *vha, fc_port_t *fcport)
201 int rval;
203 ms_iocb_entry_t *ms_pkt;
204 struct ct_sns_req *ct_req;
205 struct ct_sns_rsp *ct_rsp;
206 struct qla_hw_data *ha = vha->hw;
207 struct ct_arg arg;
209 if (IS_QLA2100(ha) || IS_QLA2200(ha))
210 return qla2x00_sns_ga_nxt(vha, fcport);
212 arg.iocb = ha->ms_iocb;
213 arg.req_dma = ha->ct_sns_dma;
214 arg.rsp_dma = ha->ct_sns_dma;
215 arg.req_size = GA_NXT_REQ_SIZE;
216 arg.rsp_size = GA_NXT_RSP_SIZE;
217 arg.nport_handle = NPH_SNS;
219 /* Issue GA_NXT */
220 /* Prepare common MS IOCB */
221 ms_pkt = ha->isp_ops->prep_ms_iocb(vha, &arg);
223 /* Prepare CT request */
224 ct_req = qla2x00_prep_ct_req(ha->ct_sns, GA_NXT_CMD,
225 GA_NXT_RSP_SIZE);
226 ct_rsp = &ha->ct_sns->p.rsp;
228 /* Prepare CT arguments -- port_id */
229 ct_req->req.port_id.port_id = port_id_to_be_id(fcport->d_id);
231 /* Execute MS IOCB */
232 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
233 sizeof(ms_iocb_entry_t));
234 if (rval != QLA_SUCCESS) {
235 /*EMPTY*/
236 ql_dbg(ql_dbg_disc, vha, 0x2062,
237 "GA_NXT issue IOCB failed (%d).\n", rval);
238 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "GA_NXT") !=
239 QLA_SUCCESS) {
240 rval = QLA_FUNCTION_FAILED;
241 } else {
242 /* Populate fc_port_t entry. */
243 fcport->d_id = be_to_port_id(ct_rsp->rsp.ga_nxt.port_id);
245 memcpy(fcport->node_name, ct_rsp->rsp.ga_nxt.node_name,
246 WWN_SIZE);
247 memcpy(fcport->port_name, ct_rsp->rsp.ga_nxt.port_name,
248 WWN_SIZE);
250 fcport->fc4_type = (ct_rsp->rsp.ga_nxt.fc4_types[2] & BIT_0) ?
251 FS_FC4TYPE_FCP : FC4_TYPE_OTHER;
253 if (ct_rsp->rsp.ga_nxt.port_type != NS_N_PORT_TYPE &&
254 ct_rsp->rsp.ga_nxt.port_type != NS_NL_PORT_TYPE)
255 fcport->d_id.b.domain = 0xf0;
257 ql_dbg(ql_dbg_disc, vha, 0x2063,
258 "GA_NXT entry - nn %8phN pn %8phN "
259 "port_id=%02x%02x%02x.\n",
260 fcport->node_name, fcport->port_name,
261 fcport->d_id.b.domain, fcport->d_id.b.area,
262 fcport->d_id.b.al_pa);
265 return (rval);
268 static inline int
269 qla2x00_gid_pt_rsp_size(scsi_qla_host_t *vha)
271 return vha->hw->max_fibre_devices * 4 + 16;
275 * qla2x00_gid_pt() - SNS scan for fabric devices via GID_PT command.
276 * @vha: HA context
277 * @list: switch info entries to populate
279 * NOTE: Non-Nx_Ports are not requested.
281 * Returns 0 on success.
284 qla2x00_gid_pt(scsi_qla_host_t *vha, sw_info_t *list)
286 int rval;
287 uint16_t i;
289 ms_iocb_entry_t *ms_pkt;
290 struct ct_sns_req *ct_req;
291 struct ct_sns_rsp *ct_rsp;
293 struct ct_sns_gid_pt_data *gid_data;
294 struct qla_hw_data *ha = vha->hw;
295 uint16_t gid_pt_rsp_size;
296 struct ct_arg arg;
298 if (IS_QLA2100(ha) || IS_QLA2200(ha))
299 return qla2x00_sns_gid_pt(vha, list);
301 gid_data = NULL;
302 gid_pt_rsp_size = qla2x00_gid_pt_rsp_size(vha);
304 arg.iocb = ha->ms_iocb;
305 arg.req_dma = ha->ct_sns_dma;
306 arg.rsp_dma = ha->ct_sns_dma;
307 arg.req_size = GID_PT_REQ_SIZE;
308 arg.rsp_size = gid_pt_rsp_size;
309 arg.nport_handle = NPH_SNS;
311 /* Issue GID_PT */
312 /* Prepare common MS IOCB */
313 ms_pkt = ha->isp_ops->prep_ms_iocb(vha, &arg);
315 /* Prepare CT request */
316 ct_req = qla2x00_prep_ct_req(ha->ct_sns, GID_PT_CMD, gid_pt_rsp_size);
317 ct_rsp = &ha->ct_sns->p.rsp;
319 /* Prepare CT arguments -- port_type */
320 ct_req->req.gid_pt.port_type = NS_NX_PORT_TYPE;
322 /* Execute MS IOCB */
323 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
324 sizeof(ms_iocb_entry_t));
325 if (rval != QLA_SUCCESS) {
326 /*EMPTY*/
327 ql_dbg(ql_dbg_disc, vha, 0x2055,
328 "GID_PT issue IOCB failed (%d).\n", rval);
329 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "GID_PT") !=
330 QLA_SUCCESS) {
331 rval = QLA_FUNCTION_FAILED;
332 } else {
333 /* Set port IDs in switch info list. */
334 for (i = 0; i < ha->max_fibre_devices; i++) {
335 gid_data = &ct_rsp->rsp.gid_pt.entries[i];
336 list[i].d_id = be_to_port_id(gid_data->port_id);
337 memset(list[i].fabric_port_name, 0, WWN_SIZE);
338 list[i].fp_speed = PORT_SPEED_UNKNOWN;
340 /* Last one exit. */
341 if (gid_data->control_byte & BIT_7) {
342 list[i].d_id.b.rsvd_1 = gid_data->control_byte;
343 break;
348 * If we've used all available slots, then the switch is
349 * reporting back more devices than we can handle with this
350 * single call. Return a failed status, and let GA_NXT handle
351 * the overload.
353 if (i == ha->max_fibre_devices)
354 rval = QLA_FUNCTION_FAILED;
357 return (rval);
361 * qla2x00_gpn_id() - SNS Get Port Name (GPN_ID) query.
362 * @vha: HA context
363 * @list: switch info entries to populate
365 * Returns 0 on success.
368 qla2x00_gpn_id(scsi_qla_host_t *vha, sw_info_t *list)
370 int rval = QLA_SUCCESS;
371 uint16_t i;
373 ms_iocb_entry_t *ms_pkt;
374 struct ct_sns_req *ct_req;
375 struct ct_sns_rsp *ct_rsp;
376 struct qla_hw_data *ha = vha->hw;
377 struct ct_arg arg;
379 if (IS_QLA2100(ha) || IS_QLA2200(ha))
380 return qla2x00_sns_gpn_id(vha, list);
382 arg.iocb = ha->ms_iocb;
383 arg.req_dma = ha->ct_sns_dma;
384 arg.rsp_dma = ha->ct_sns_dma;
385 arg.req_size = GPN_ID_REQ_SIZE;
386 arg.rsp_size = GPN_ID_RSP_SIZE;
387 arg.nport_handle = NPH_SNS;
389 for (i = 0; i < ha->max_fibre_devices; i++) {
390 /* Issue GPN_ID */
391 /* Prepare common MS IOCB */
392 ms_pkt = ha->isp_ops->prep_ms_iocb(vha, &arg);
394 /* Prepare CT request */
395 ct_req = qla2x00_prep_ct_req(ha->ct_sns, GPN_ID_CMD,
396 GPN_ID_RSP_SIZE);
397 ct_rsp = &ha->ct_sns->p.rsp;
399 /* Prepare CT arguments -- port_id */
400 ct_req->req.port_id.port_id = port_id_to_be_id(list[i].d_id);
402 /* Execute MS IOCB */
403 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
404 sizeof(ms_iocb_entry_t));
405 if (rval != QLA_SUCCESS) {
406 /*EMPTY*/
407 ql_dbg(ql_dbg_disc, vha, 0x2056,
408 "GPN_ID issue IOCB failed (%d).\n", rval);
409 break;
410 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp,
411 "GPN_ID") != QLA_SUCCESS) {
412 rval = QLA_FUNCTION_FAILED;
413 break;
414 } else {
415 /* Save portname */
416 memcpy(list[i].port_name,
417 ct_rsp->rsp.gpn_id.port_name, WWN_SIZE);
420 /* Last device exit. */
421 if (list[i].d_id.b.rsvd_1 != 0)
422 break;
425 return (rval);
429 * qla2x00_gnn_id() - SNS Get Node Name (GNN_ID) query.
430 * @vha: HA context
431 * @list: switch info entries to populate
433 * Returns 0 on success.
436 qla2x00_gnn_id(scsi_qla_host_t *vha, sw_info_t *list)
438 int rval = QLA_SUCCESS;
439 uint16_t i;
440 struct qla_hw_data *ha = vha->hw;
441 ms_iocb_entry_t *ms_pkt;
442 struct ct_sns_req *ct_req;
443 struct ct_sns_rsp *ct_rsp;
444 struct ct_arg arg;
446 if (IS_QLA2100(ha) || IS_QLA2200(ha))
447 return qla2x00_sns_gnn_id(vha, list);
449 arg.iocb = ha->ms_iocb;
450 arg.req_dma = ha->ct_sns_dma;
451 arg.rsp_dma = ha->ct_sns_dma;
452 arg.req_size = GNN_ID_REQ_SIZE;
453 arg.rsp_size = GNN_ID_RSP_SIZE;
454 arg.nport_handle = NPH_SNS;
456 for (i = 0; i < ha->max_fibre_devices; i++) {
457 /* Issue GNN_ID */
458 /* Prepare common MS IOCB */
459 ms_pkt = ha->isp_ops->prep_ms_iocb(vha, &arg);
461 /* Prepare CT request */
462 ct_req = qla2x00_prep_ct_req(ha->ct_sns, GNN_ID_CMD,
463 GNN_ID_RSP_SIZE);
464 ct_rsp = &ha->ct_sns->p.rsp;
466 /* Prepare CT arguments -- port_id */
467 ct_req->req.port_id.port_id = port_id_to_be_id(list[i].d_id);
469 /* Execute MS IOCB */
470 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
471 sizeof(ms_iocb_entry_t));
472 if (rval != QLA_SUCCESS) {
473 /*EMPTY*/
474 ql_dbg(ql_dbg_disc, vha, 0x2057,
475 "GNN_ID issue IOCB failed (%d).\n", rval);
476 break;
477 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp,
478 "GNN_ID") != QLA_SUCCESS) {
479 rval = QLA_FUNCTION_FAILED;
480 break;
481 } else {
482 /* Save nodename */
483 memcpy(list[i].node_name,
484 ct_rsp->rsp.gnn_id.node_name, WWN_SIZE);
486 ql_dbg(ql_dbg_disc, vha, 0x2058,
487 "GID_PT entry - nn %8phN pn %8phN "
488 "portid=%02x%02x%02x.\n",
489 list[i].node_name, list[i].port_name,
490 list[i].d_id.b.domain, list[i].d_id.b.area,
491 list[i].d_id.b.al_pa);
494 /* Last device exit. */
495 if (list[i].d_id.b.rsvd_1 != 0)
496 break;
499 return (rval);
502 static void qla2x00_async_sns_sp_done(srb_t *sp, int rc)
504 struct scsi_qla_host *vha = sp->vha;
505 struct ct_sns_pkt *ct_sns;
506 struct qla_work_evt *e;
508 sp->rc = rc;
509 if (rc == QLA_SUCCESS) {
510 ql_dbg(ql_dbg_disc, vha, 0x204f,
511 "Async done-%s exiting normally.\n",
512 sp->name);
513 } else if (rc == QLA_FUNCTION_TIMEOUT) {
514 ql_dbg(ql_dbg_disc, vha, 0x204f,
515 "Async done-%s timeout\n", sp->name);
516 } else {
517 ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.rsp;
518 memset(ct_sns, 0, sizeof(*ct_sns));
519 sp->retry_count++;
520 if (sp->retry_count > 3)
521 goto err;
523 ql_dbg(ql_dbg_disc, vha, 0x204f,
524 "Async done-%s fail rc %x. Retry count %d\n",
525 sp->name, rc, sp->retry_count);
527 e = qla2x00_alloc_work(vha, QLA_EVT_SP_RETRY);
528 if (!e)
529 goto err2;
531 del_timer(&sp->u.iocb_cmd.timer);
532 e->u.iosb.sp = sp;
533 qla2x00_post_work(vha, e);
534 return;
537 err:
538 e = qla2x00_alloc_work(vha, QLA_EVT_UNMAP);
539 err2:
540 if (!e) {
541 /* please ignore kernel warning. otherwise, we have mem leak. */
542 if (sp->u.iocb_cmd.u.ctarg.req) {
543 dma_free_coherent(&vha->hw->pdev->dev,
544 sp->u.iocb_cmd.u.ctarg.req_allocated_size,
545 sp->u.iocb_cmd.u.ctarg.req,
546 sp->u.iocb_cmd.u.ctarg.req_dma);
547 sp->u.iocb_cmd.u.ctarg.req = NULL;
550 if (sp->u.iocb_cmd.u.ctarg.rsp) {
551 dma_free_coherent(&vha->hw->pdev->dev,
552 sp->u.iocb_cmd.u.ctarg.rsp_allocated_size,
553 sp->u.iocb_cmd.u.ctarg.rsp,
554 sp->u.iocb_cmd.u.ctarg.rsp_dma);
555 sp->u.iocb_cmd.u.ctarg.rsp = NULL;
558 sp->free(sp);
560 return;
563 e->u.iosb.sp = sp;
564 qla2x00_post_work(vha, e);
568 * qla2x00_rft_id() - SNS Register FC-4 TYPEs (RFT_ID) supported by the HBA.
569 * @vha: HA context
571 * Returns 0 on success.
574 qla2x00_rft_id(scsi_qla_host_t *vha)
576 struct qla_hw_data *ha = vha->hw;
578 if (IS_QLA2100(ha) || IS_QLA2200(ha))
579 return qla2x00_sns_rft_id(vha);
581 return qla_async_rftid(vha, &vha->d_id);
584 static int qla_async_rftid(scsi_qla_host_t *vha, port_id_t *d_id)
586 int rval = QLA_MEMORY_ALLOC_FAILED;
587 struct ct_sns_req *ct_req;
588 srb_t *sp;
589 struct ct_sns_pkt *ct_sns;
591 if (!vha->flags.online)
592 goto done;
594 sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL);
595 if (!sp)
596 goto done;
598 sp->type = SRB_CT_PTHRU_CMD;
599 sp->name = "rft_id";
600 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
602 sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev,
603 sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma,
604 GFP_KERNEL);
605 sp->u.iocb_cmd.u.ctarg.req_allocated_size = sizeof(struct ct_sns_pkt);
606 if (!sp->u.iocb_cmd.u.ctarg.req) {
607 ql_log(ql_log_warn, vha, 0xd041,
608 "%s: Failed to allocate ct_sns request.\n",
609 __func__);
610 goto done_free_sp;
613 sp->u.iocb_cmd.u.ctarg.rsp = dma_alloc_coherent(&vha->hw->pdev->dev,
614 sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.rsp_dma,
615 GFP_KERNEL);
616 sp->u.iocb_cmd.u.ctarg.rsp_allocated_size = sizeof(struct ct_sns_pkt);
617 if (!sp->u.iocb_cmd.u.ctarg.rsp) {
618 ql_log(ql_log_warn, vha, 0xd042,
619 "%s: Failed to allocate ct_sns request.\n",
620 __func__);
621 goto done_free_sp;
623 ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.rsp;
624 memset(ct_sns, 0, sizeof(*ct_sns));
625 ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.req;
627 /* Prepare CT request */
628 ct_req = qla2x00_prep_ct_req(ct_sns, RFT_ID_CMD, RFT_ID_RSP_SIZE);
630 /* Prepare CT arguments -- port_id, FC-4 types */
631 ct_req->req.rft_id.port_id = port_id_to_be_id(vha->d_id);
632 ct_req->req.rft_id.fc4_types[2] = 0x01; /* FCP-3 */
634 if (vha->flags.nvme_enabled)
635 ct_req->req.rft_id.fc4_types[6] = 1; /* NVMe type 28h */
637 sp->u.iocb_cmd.u.ctarg.req_size = RFT_ID_REQ_SIZE;
638 sp->u.iocb_cmd.u.ctarg.rsp_size = RFT_ID_RSP_SIZE;
639 sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
640 sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
641 sp->done = qla2x00_async_sns_sp_done;
643 ql_dbg(ql_dbg_disc, vha, 0xffff,
644 "Async-%s - hdl=%x portid %06x.\n",
645 sp->name, sp->handle, d_id->b24);
647 rval = qla2x00_start_sp(sp);
648 if (rval != QLA_SUCCESS) {
649 ql_dbg(ql_dbg_disc, vha, 0x2043,
650 "RFT_ID issue IOCB failed (%d).\n", rval);
651 goto done_free_sp;
653 return rval;
654 done_free_sp:
655 sp->free(sp);
656 done:
657 return rval;
661 * qla2x00_rff_id() - SNS Register FC-4 Features (RFF_ID) supported by the HBA.
662 * @vha: HA context
663 * @type: not used
665 * Returns 0 on success.
668 qla2x00_rff_id(scsi_qla_host_t *vha, u8 type)
670 struct qla_hw_data *ha = vha->hw;
672 if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
673 ql_dbg(ql_dbg_disc, vha, 0x2046,
674 "RFF_ID call not supported on ISP2100/ISP2200.\n");
675 return (QLA_SUCCESS);
678 return qla_async_rffid(vha, &vha->d_id, qlt_rff_id(vha),
679 FC4_TYPE_FCP_SCSI);
682 static int qla_async_rffid(scsi_qla_host_t *vha, port_id_t *d_id,
683 u8 fc4feature, u8 fc4type)
685 int rval = QLA_MEMORY_ALLOC_FAILED;
686 struct ct_sns_req *ct_req;
687 srb_t *sp;
688 struct ct_sns_pkt *ct_sns;
690 sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL);
691 if (!sp)
692 goto done;
694 sp->type = SRB_CT_PTHRU_CMD;
695 sp->name = "rff_id";
696 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
698 sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev,
699 sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma,
700 GFP_KERNEL);
701 sp->u.iocb_cmd.u.ctarg.req_allocated_size = sizeof(struct ct_sns_pkt);
702 if (!sp->u.iocb_cmd.u.ctarg.req) {
703 ql_log(ql_log_warn, vha, 0xd041,
704 "%s: Failed to allocate ct_sns request.\n",
705 __func__);
706 goto done_free_sp;
709 sp->u.iocb_cmd.u.ctarg.rsp = dma_alloc_coherent(&vha->hw->pdev->dev,
710 sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.rsp_dma,
711 GFP_KERNEL);
712 sp->u.iocb_cmd.u.ctarg.rsp_allocated_size = sizeof(struct ct_sns_pkt);
713 if (!sp->u.iocb_cmd.u.ctarg.rsp) {
714 ql_log(ql_log_warn, vha, 0xd042,
715 "%s: Failed to allocate ct_sns request.\n",
716 __func__);
717 goto done_free_sp;
719 ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.rsp;
720 memset(ct_sns, 0, sizeof(*ct_sns));
721 ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.req;
723 /* Prepare CT request */
724 ct_req = qla2x00_prep_ct_req(ct_sns, RFF_ID_CMD, RFF_ID_RSP_SIZE);
726 /* Prepare CT arguments -- port_id, FC-4 feature, FC-4 type */
727 ct_req->req.rff_id.port_id = port_id_to_be_id(*d_id);
728 ct_req->req.rff_id.fc4_feature = fc4feature;
729 ct_req->req.rff_id.fc4_type = fc4type; /* SCSI - FCP */
731 sp->u.iocb_cmd.u.ctarg.req_size = RFF_ID_REQ_SIZE;
732 sp->u.iocb_cmd.u.ctarg.rsp_size = RFF_ID_RSP_SIZE;
733 sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
734 sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
735 sp->done = qla2x00_async_sns_sp_done;
737 ql_dbg(ql_dbg_disc, vha, 0xffff,
738 "Async-%s - hdl=%x portid %06x feature %x type %x.\n",
739 sp->name, sp->handle, d_id->b24, fc4feature, fc4type);
741 rval = qla2x00_start_sp(sp);
742 if (rval != QLA_SUCCESS) {
743 ql_dbg(ql_dbg_disc, vha, 0x2047,
744 "RFF_ID issue IOCB failed (%d).\n", rval);
745 goto done_free_sp;
748 return rval;
750 done_free_sp:
751 sp->free(sp);
752 done:
753 return rval;
757 * qla2x00_rnn_id() - SNS Register Node Name (RNN_ID) of the HBA.
758 * @vha: HA context
760 * Returns 0 on success.
763 qla2x00_rnn_id(scsi_qla_host_t *vha)
765 struct qla_hw_data *ha = vha->hw;
767 if (IS_QLA2100(ha) || IS_QLA2200(ha))
768 return qla2x00_sns_rnn_id(vha);
770 return qla_async_rnnid(vha, &vha->d_id, vha->node_name);
773 static int qla_async_rnnid(scsi_qla_host_t *vha, port_id_t *d_id,
774 u8 *node_name)
776 int rval = QLA_MEMORY_ALLOC_FAILED;
777 struct ct_sns_req *ct_req;
778 srb_t *sp;
779 struct ct_sns_pkt *ct_sns;
781 sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL);
782 if (!sp)
783 goto done;
785 sp->type = SRB_CT_PTHRU_CMD;
786 sp->name = "rnid";
787 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
789 sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev,
790 sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma,
791 GFP_KERNEL);
792 sp->u.iocb_cmd.u.ctarg.req_allocated_size = sizeof(struct ct_sns_pkt);
793 if (!sp->u.iocb_cmd.u.ctarg.req) {
794 ql_log(ql_log_warn, vha, 0xd041,
795 "%s: Failed to allocate ct_sns request.\n",
796 __func__);
797 goto done_free_sp;
800 sp->u.iocb_cmd.u.ctarg.rsp = dma_alloc_coherent(&vha->hw->pdev->dev,
801 sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.rsp_dma,
802 GFP_KERNEL);
803 sp->u.iocb_cmd.u.ctarg.rsp_allocated_size = sizeof(struct ct_sns_pkt);
804 if (!sp->u.iocb_cmd.u.ctarg.rsp) {
805 ql_log(ql_log_warn, vha, 0xd042,
806 "%s: Failed to allocate ct_sns request.\n",
807 __func__);
808 goto done_free_sp;
810 ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.rsp;
811 memset(ct_sns, 0, sizeof(*ct_sns));
812 ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.req;
814 /* Prepare CT request */
815 ct_req = qla2x00_prep_ct_req(ct_sns, RNN_ID_CMD, RNN_ID_RSP_SIZE);
817 /* Prepare CT arguments -- port_id, node_name */
818 ct_req->req.rnn_id.port_id = port_id_to_be_id(vha->d_id);
819 memcpy(ct_req->req.rnn_id.node_name, vha->node_name, WWN_SIZE);
821 sp->u.iocb_cmd.u.ctarg.req_size = RNN_ID_REQ_SIZE;
822 sp->u.iocb_cmd.u.ctarg.rsp_size = RNN_ID_RSP_SIZE;
823 sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
825 sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
826 sp->done = qla2x00_async_sns_sp_done;
828 ql_dbg(ql_dbg_disc, vha, 0xffff,
829 "Async-%s - hdl=%x portid %06x\n",
830 sp->name, sp->handle, d_id->b24);
832 rval = qla2x00_start_sp(sp);
833 if (rval != QLA_SUCCESS) {
834 ql_dbg(ql_dbg_disc, vha, 0x204d,
835 "RNN_ID issue IOCB failed (%d).\n", rval);
836 goto done_free_sp;
839 return rval;
841 done_free_sp:
842 sp->free(sp);
843 done:
844 return rval;
847 void
848 qla2x00_get_sym_node_name(scsi_qla_host_t *vha, uint8_t *snn, size_t size)
850 struct qla_hw_data *ha = vha->hw;
852 if (IS_QLAFX00(ha))
853 snprintf(snn, size, "%s FW:v%s DVR:v%s", ha->model_number,
854 ha->mr.fw_version, qla2x00_version_str);
855 else
856 snprintf(snn, size,
857 "%s FW:v%d.%02d.%02d DVR:v%s", ha->model_number,
858 ha->fw_major_version, ha->fw_minor_version,
859 ha->fw_subminor_version, qla2x00_version_str);
863 * qla2x00_rsnn_nn() - SNS Register Symbolic Node Name (RSNN_NN) of the HBA.
864 * @vha: HA context
866 * Returns 0 on success.
869 qla2x00_rsnn_nn(scsi_qla_host_t *vha)
871 struct qla_hw_data *ha = vha->hw;
873 if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
874 ql_dbg(ql_dbg_disc, vha, 0x2050,
875 "RSNN_ID call unsupported on ISP2100/ISP2200.\n");
876 return (QLA_SUCCESS);
879 return qla_async_rsnn_nn(vha);
882 static int qla_async_rsnn_nn(scsi_qla_host_t *vha)
884 int rval = QLA_MEMORY_ALLOC_FAILED;
885 struct ct_sns_req *ct_req;
886 srb_t *sp;
887 struct ct_sns_pkt *ct_sns;
889 sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL);
890 if (!sp)
891 goto done;
893 sp->type = SRB_CT_PTHRU_CMD;
894 sp->name = "rsnn_nn";
895 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
897 sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev,
898 sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma,
899 GFP_KERNEL);
900 sp->u.iocb_cmd.u.ctarg.req_allocated_size = sizeof(struct ct_sns_pkt);
901 if (!sp->u.iocb_cmd.u.ctarg.req) {
902 ql_log(ql_log_warn, vha, 0xd041,
903 "%s: Failed to allocate ct_sns request.\n",
904 __func__);
905 goto done_free_sp;
908 sp->u.iocb_cmd.u.ctarg.rsp = dma_alloc_coherent(&vha->hw->pdev->dev,
909 sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.rsp_dma,
910 GFP_KERNEL);
911 sp->u.iocb_cmd.u.ctarg.rsp_allocated_size = sizeof(struct ct_sns_pkt);
912 if (!sp->u.iocb_cmd.u.ctarg.rsp) {
913 ql_log(ql_log_warn, vha, 0xd042,
914 "%s: Failed to allocate ct_sns request.\n",
915 __func__);
916 goto done_free_sp;
918 ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.rsp;
919 memset(ct_sns, 0, sizeof(*ct_sns));
920 ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.req;
922 /* Prepare CT request */
923 ct_req = qla2x00_prep_ct_req(ct_sns, RSNN_NN_CMD, RSNN_NN_RSP_SIZE);
925 /* Prepare CT arguments -- node_name, symbolic node_name, size */
926 memcpy(ct_req->req.rsnn_nn.node_name, vha->node_name, WWN_SIZE);
928 /* Prepare the Symbolic Node Name */
929 qla2x00_get_sym_node_name(vha, ct_req->req.rsnn_nn.sym_node_name,
930 sizeof(ct_req->req.rsnn_nn.sym_node_name));
931 ct_req->req.rsnn_nn.name_len =
932 (uint8_t)strlen(ct_req->req.rsnn_nn.sym_node_name);
935 sp->u.iocb_cmd.u.ctarg.req_size = 24 + 1 + ct_req->req.rsnn_nn.name_len;
936 sp->u.iocb_cmd.u.ctarg.rsp_size = RSNN_NN_RSP_SIZE;
937 sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
939 sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
940 sp->done = qla2x00_async_sns_sp_done;
942 ql_dbg(ql_dbg_disc, vha, 0xffff,
943 "Async-%s - hdl=%x.\n",
944 sp->name, sp->handle);
946 rval = qla2x00_start_sp(sp);
947 if (rval != QLA_SUCCESS) {
948 ql_dbg(ql_dbg_disc, vha, 0x2043,
949 "RFT_ID issue IOCB failed (%d).\n", rval);
950 goto done_free_sp;
953 return rval;
955 done_free_sp:
956 sp->free(sp);
957 done:
958 return rval;
962 * qla2x00_prep_sns_cmd() - Prepare common SNS command request fields for query.
963 * @vha: HA context
964 * @cmd: GS command
965 * @scmd_len: Subcommand length
966 * @data_size: response size in bytes
968 * Returns a pointer to the @ha's sns_cmd.
970 static inline struct sns_cmd_pkt *
971 qla2x00_prep_sns_cmd(scsi_qla_host_t *vha, uint16_t cmd, uint16_t scmd_len,
972 uint16_t data_size)
974 uint16_t wc;
975 struct sns_cmd_pkt *sns_cmd;
976 struct qla_hw_data *ha = vha->hw;
978 sns_cmd = ha->sns_cmd;
979 memset(sns_cmd, 0, sizeof(struct sns_cmd_pkt));
980 wc = data_size / 2; /* Size in 16bit words. */
981 sns_cmd->p.cmd.buffer_length = cpu_to_le16(wc);
982 put_unaligned_le64(ha->sns_cmd_dma, &sns_cmd->p.cmd.buffer_address);
983 sns_cmd->p.cmd.subcommand_length = cpu_to_le16(scmd_len);
984 sns_cmd->p.cmd.subcommand = cpu_to_le16(cmd);
985 wc = (data_size - 16) / 4; /* Size in 32bit words. */
986 sns_cmd->p.cmd.size = cpu_to_le16(wc);
988 vha->qla_stats.control_requests++;
990 return (sns_cmd);
994 * qla2x00_sns_ga_nxt() - SNS scan for fabric devices via GA_NXT command.
995 * @vha: HA context
996 * @fcport: fcport entry to updated
998 * This command uses the old Exectute SNS Command mailbox routine.
1000 * Returns 0 on success.
1002 static int
1003 qla2x00_sns_ga_nxt(scsi_qla_host_t *vha, fc_port_t *fcport)
1005 int rval = QLA_SUCCESS;
1006 struct qla_hw_data *ha = vha->hw;
1007 struct sns_cmd_pkt *sns_cmd;
1009 /* Issue GA_NXT. */
1010 /* Prepare SNS command request. */
1011 sns_cmd = qla2x00_prep_sns_cmd(vha, GA_NXT_CMD, GA_NXT_SNS_SCMD_LEN,
1012 GA_NXT_SNS_DATA_SIZE);
1014 /* Prepare SNS command arguments -- port_id. */
1015 sns_cmd->p.cmd.param[0] = fcport->d_id.b.al_pa;
1016 sns_cmd->p.cmd.param[1] = fcport->d_id.b.area;
1017 sns_cmd->p.cmd.param[2] = fcport->d_id.b.domain;
1019 /* Execute SNS command. */
1020 rval = qla2x00_send_sns(vha, ha->sns_cmd_dma, GA_NXT_SNS_CMD_SIZE / 2,
1021 sizeof(struct sns_cmd_pkt));
1022 if (rval != QLA_SUCCESS) {
1023 /*EMPTY*/
1024 ql_dbg(ql_dbg_disc, vha, 0x205f,
1025 "GA_NXT Send SNS failed (%d).\n", rval);
1026 } else if (sns_cmd->p.gan_data[8] != 0x80 ||
1027 sns_cmd->p.gan_data[9] != 0x02) {
1028 ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x2084,
1029 "GA_NXT failed, rejected request ga_nxt_rsp:\n");
1030 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2074,
1031 sns_cmd->p.gan_data, 16);
1032 rval = QLA_FUNCTION_FAILED;
1033 } else {
1034 /* Populate fc_port_t entry. */
1035 fcport->d_id.b.domain = sns_cmd->p.gan_data[17];
1036 fcport->d_id.b.area = sns_cmd->p.gan_data[18];
1037 fcport->d_id.b.al_pa = sns_cmd->p.gan_data[19];
1039 memcpy(fcport->node_name, &sns_cmd->p.gan_data[284], WWN_SIZE);
1040 memcpy(fcport->port_name, &sns_cmd->p.gan_data[20], WWN_SIZE);
1042 if (sns_cmd->p.gan_data[16] != NS_N_PORT_TYPE &&
1043 sns_cmd->p.gan_data[16] != NS_NL_PORT_TYPE)
1044 fcport->d_id.b.domain = 0xf0;
1046 ql_dbg(ql_dbg_disc, vha, 0x2061,
1047 "GA_NXT entry - nn %8phN pn %8phN "
1048 "port_id=%02x%02x%02x.\n",
1049 fcport->node_name, fcport->port_name,
1050 fcport->d_id.b.domain, fcport->d_id.b.area,
1051 fcport->d_id.b.al_pa);
1054 return (rval);
1058 * qla2x00_sns_gid_pt() - SNS scan for fabric devices via GID_PT command.
1059 * @vha: HA context
1060 * @list: switch info entries to populate
1062 * This command uses the old Exectute SNS Command mailbox routine.
1064 * NOTE: Non-Nx_Ports are not requested.
1066 * Returns 0 on success.
1068 static int
1069 qla2x00_sns_gid_pt(scsi_qla_host_t *vha, sw_info_t *list)
1071 int rval;
1072 struct qla_hw_data *ha = vha->hw;
1073 uint16_t i;
1074 uint8_t *entry;
1075 struct sns_cmd_pkt *sns_cmd;
1076 uint16_t gid_pt_sns_data_size;
1078 gid_pt_sns_data_size = qla2x00_gid_pt_rsp_size(vha);
1080 /* Issue GID_PT. */
1081 /* Prepare SNS command request. */
1082 sns_cmd = qla2x00_prep_sns_cmd(vha, GID_PT_CMD, GID_PT_SNS_SCMD_LEN,
1083 gid_pt_sns_data_size);
1085 /* Prepare SNS command arguments -- port_type. */
1086 sns_cmd->p.cmd.param[0] = NS_NX_PORT_TYPE;
1088 /* Execute SNS command. */
1089 rval = qla2x00_send_sns(vha, ha->sns_cmd_dma, GID_PT_SNS_CMD_SIZE / 2,
1090 sizeof(struct sns_cmd_pkt));
1091 if (rval != QLA_SUCCESS) {
1092 /*EMPTY*/
1093 ql_dbg(ql_dbg_disc, vha, 0x206d,
1094 "GID_PT Send SNS failed (%d).\n", rval);
1095 } else if (sns_cmd->p.gid_data[8] != 0x80 ||
1096 sns_cmd->p.gid_data[9] != 0x02) {
1097 ql_dbg(ql_dbg_disc, vha, 0x202f,
1098 "GID_PT failed, rejected request, gid_rsp:\n");
1099 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2081,
1100 sns_cmd->p.gid_data, 16);
1101 rval = QLA_FUNCTION_FAILED;
1102 } else {
1103 /* Set port IDs in switch info list. */
1104 for (i = 0; i < ha->max_fibre_devices; i++) {
1105 entry = &sns_cmd->p.gid_data[(i * 4) + 16];
1106 list[i].d_id.b.domain = entry[1];
1107 list[i].d_id.b.area = entry[2];
1108 list[i].d_id.b.al_pa = entry[3];
1110 /* Last one exit. */
1111 if (entry[0] & BIT_7) {
1112 list[i].d_id.b.rsvd_1 = entry[0];
1113 break;
1118 * If we've used all available slots, then the switch is
1119 * reporting back more devices that we can handle with this
1120 * single call. Return a failed status, and let GA_NXT handle
1121 * the overload.
1123 if (i == ha->max_fibre_devices)
1124 rval = QLA_FUNCTION_FAILED;
1127 return (rval);
1131 * qla2x00_sns_gpn_id() - SNS Get Port Name (GPN_ID) query.
1132 * @vha: HA context
1133 * @list: switch info entries to populate
1135 * This command uses the old Exectute SNS Command mailbox routine.
1137 * Returns 0 on success.
1139 static int
1140 qla2x00_sns_gpn_id(scsi_qla_host_t *vha, sw_info_t *list)
1142 int rval = QLA_SUCCESS;
1143 struct qla_hw_data *ha = vha->hw;
1144 uint16_t i;
1145 struct sns_cmd_pkt *sns_cmd;
1147 for (i = 0; i < ha->max_fibre_devices; i++) {
1148 /* Issue GPN_ID */
1149 /* Prepare SNS command request. */
1150 sns_cmd = qla2x00_prep_sns_cmd(vha, GPN_ID_CMD,
1151 GPN_ID_SNS_SCMD_LEN, GPN_ID_SNS_DATA_SIZE);
1153 /* Prepare SNS command arguments -- port_id. */
1154 sns_cmd->p.cmd.param[0] = list[i].d_id.b.al_pa;
1155 sns_cmd->p.cmd.param[1] = list[i].d_id.b.area;
1156 sns_cmd->p.cmd.param[2] = list[i].d_id.b.domain;
1158 /* Execute SNS command. */
1159 rval = qla2x00_send_sns(vha, ha->sns_cmd_dma,
1160 GPN_ID_SNS_CMD_SIZE / 2, sizeof(struct sns_cmd_pkt));
1161 if (rval != QLA_SUCCESS) {
1162 /*EMPTY*/
1163 ql_dbg(ql_dbg_disc, vha, 0x2032,
1164 "GPN_ID Send SNS failed (%d).\n", rval);
1165 } else if (sns_cmd->p.gpn_data[8] != 0x80 ||
1166 sns_cmd->p.gpn_data[9] != 0x02) {
1167 ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x207e,
1168 "GPN_ID failed, rejected request, gpn_rsp:\n");
1169 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x207f,
1170 sns_cmd->p.gpn_data, 16);
1171 rval = QLA_FUNCTION_FAILED;
1172 } else {
1173 /* Save portname */
1174 memcpy(list[i].port_name, &sns_cmd->p.gpn_data[16],
1175 WWN_SIZE);
1178 /* Last device exit. */
1179 if (list[i].d_id.b.rsvd_1 != 0)
1180 break;
1183 return (rval);
1187 * qla2x00_sns_gnn_id() - SNS Get Node Name (GNN_ID) query.
1188 * @vha: HA context
1189 * @list: switch info entries to populate
1191 * This command uses the old Exectute SNS Command mailbox routine.
1193 * Returns 0 on success.
1195 static int
1196 qla2x00_sns_gnn_id(scsi_qla_host_t *vha, sw_info_t *list)
1198 int rval = QLA_SUCCESS;
1199 struct qla_hw_data *ha = vha->hw;
1200 uint16_t i;
1201 struct sns_cmd_pkt *sns_cmd;
1203 for (i = 0; i < ha->max_fibre_devices; i++) {
1204 /* Issue GNN_ID */
1205 /* Prepare SNS command request. */
1206 sns_cmd = qla2x00_prep_sns_cmd(vha, GNN_ID_CMD,
1207 GNN_ID_SNS_SCMD_LEN, GNN_ID_SNS_DATA_SIZE);
1209 /* Prepare SNS command arguments -- port_id. */
1210 sns_cmd->p.cmd.param[0] = list[i].d_id.b.al_pa;
1211 sns_cmd->p.cmd.param[1] = list[i].d_id.b.area;
1212 sns_cmd->p.cmd.param[2] = list[i].d_id.b.domain;
1214 /* Execute SNS command. */
1215 rval = qla2x00_send_sns(vha, ha->sns_cmd_dma,
1216 GNN_ID_SNS_CMD_SIZE / 2, sizeof(struct sns_cmd_pkt));
1217 if (rval != QLA_SUCCESS) {
1218 /*EMPTY*/
1219 ql_dbg(ql_dbg_disc, vha, 0x203f,
1220 "GNN_ID Send SNS failed (%d).\n", rval);
1221 } else if (sns_cmd->p.gnn_data[8] != 0x80 ||
1222 sns_cmd->p.gnn_data[9] != 0x02) {
1223 ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x2082,
1224 "GNN_ID failed, rejected request, gnn_rsp:\n");
1225 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x207a,
1226 sns_cmd->p.gnn_data, 16);
1227 rval = QLA_FUNCTION_FAILED;
1228 } else {
1229 /* Save nodename */
1230 memcpy(list[i].node_name, &sns_cmd->p.gnn_data[16],
1231 WWN_SIZE);
1233 ql_dbg(ql_dbg_disc, vha, 0x206e,
1234 "GID_PT entry - nn %8phN pn %8phN "
1235 "port_id=%02x%02x%02x.\n",
1236 list[i].node_name, list[i].port_name,
1237 list[i].d_id.b.domain, list[i].d_id.b.area,
1238 list[i].d_id.b.al_pa);
1241 /* Last device exit. */
1242 if (list[i].d_id.b.rsvd_1 != 0)
1243 break;
1246 return (rval);
1250 * qla2x00_snd_rft_id() - SNS Register FC-4 TYPEs (RFT_ID) supported by the HBA.
1251 * @vha: HA context
1253 * This command uses the old Exectute SNS Command mailbox routine.
1255 * Returns 0 on success.
1257 static int
1258 qla2x00_sns_rft_id(scsi_qla_host_t *vha)
1260 int rval;
1261 struct qla_hw_data *ha = vha->hw;
1262 struct sns_cmd_pkt *sns_cmd;
1264 /* Issue RFT_ID. */
1265 /* Prepare SNS command request. */
1266 sns_cmd = qla2x00_prep_sns_cmd(vha, RFT_ID_CMD, RFT_ID_SNS_SCMD_LEN,
1267 RFT_ID_SNS_DATA_SIZE);
1269 /* Prepare SNS command arguments -- port_id, FC-4 types */
1270 sns_cmd->p.cmd.param[0] = vha->d_id.b.al_pa;
1271 sns_cmd->p.cmd.param[1] = vha->d_id.b.area;
1272 sns_cmd->p.cmd.param[2] = vha->d_id.b.domain;
1274 sns_cmd->p.cmd.param[5] = 0x01; /* FCP-3 */
1276 /* Execute SNS command. */
1277 rval = qla2x00_send_sns(vha, ha->sns_cmd_dma, RFT_ID_SNS_CMD_SIZE / 2,
1278 sizeof(struct sns_cmd_pkt));
1279 if (rval != QLA_SUCCESS) {
1280 /*EMPTY*/
1281 ql_dbg(ql_dbg_disc, vha, 0x2060,
1282 "RFT_ID Send SNS failed (%d).\n", rval);
1283 } else if (sns_cmd->p.rft_data[8] != 0x80 ||
1284 sns_cmd->p.rft_data[9] != 0x02) {
1285 ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x2083,
1286 "RFT_ID failed, rejected request rft_rsp:\n");
1287 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2080,
1288 sns_cmd->p.rft_data, 16);
1289 rval = QLA_FUNCTION_FAILED;
1290 } else {
1291 ql_dbg(ql_dbg_disc, vha, 0x2073,
1292 "RFT_ID exiting normally.\n");
1295 return (rval);
1299 * qla2x00_sns_rnn_id() - SNS Register Node Name (RNN_ID) of the HBA.
1300 * @vha: HA context
1302 * This command uses the old Exectute SNS Command mailbox routine.
1304 * Returns 0 on success.
1306 static int
1307 qla2x00_sns_rnn_id(scsi_qla_host_t *vha)
1309 int rval;
1310 struct qla_hw_data *ha = vha->hw;
1311 struct sns_cmd_pkt *sns_cmd;
1313 /* Issue RNN_ID. */
1314 /* Prepare SNS command request. */
1315 sns_cmd = qla2x00_prep_sns_cmd(vha, RNN_ID_CMD, RNN_ID_SNS_SCMD_LEN,
1316 RNN_ID_SNS_DATA_SIZE);
1318 /* Prepare SNS command arguments -- port_id, nodename. */
1319 sns_cmd->p.cmd.param[0] = vha->d_id.b.al_pa;
1320 sns_cmd->p.cmd.param[1] = vha->d_id.b.area;
1321 sns_cmd->p.cmd.param[2] = vha->d_id.b.domain;
1323 sns_cmd->p.cmd.param[4] = vha->node_name[7];
1324 sns_cmd->p.cmd.param[5] = vha->node_name[6];
1325 sns_cmd->p.cmd.param[6] = vha->node_name[5];
1326 sns_cmd->p.cmd.param[7] = vha->node_name[4];
1327 sns_cmd->p.cmd.param[8] = vha->node_name[3];
1328 sns_cmd->p.cmd.param[9] = vha->node_name[2];
1329 sns_cmd->p.cmd.param[10] = vha->node_name[1];
1330 sns_cmd->p.cmd.param[11] = vha->node_name[0];
1332 /* Execute SNS command. */
1333 rval = qla2x00_send_sns(vha, ha->sns_cmd_dma, RNN_ID_SNS_CMD_SIZE / 2,
1334 sizeof(struct sns_cmd_pkt));
1335 if (rval != QLA_SUCCESS) {
1336 /*EMPTY*/
1337 ql_dbg(ql_dbg_disc, vha, 0x204a,
1338 "RNN_ID Send SNS failed (%d).\n", rval);
1339 } else if (sns_cmd->p.rnn_data[8] != 0x80 ||
1340 sns_cmd->p.rnn_data[9] != 0x02) {
1341 ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x207b,
1342 "RNN_ID failed, rejected request, rnn_rsp:\n");
1343 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x207c,
1344 sns_cmd->p.rnn_data, 16);
1345 rval = QLA_FUNCTION_FAILED;
1346 } else {
1347 ql_dbg(ql_dbg_disc, vha, 0x204c,
1348 "RNN_ID exiting normally.\n");
1351 return (rval);
1355 * qla2x00_mgmt_svr_login() - Login to fabric Management Service.
1356 * @vha: HA context
1358 * Returns 0 on success.
1361 qla2x00_mgmt_svr_login(scsi_qla_host_t *vha)
1363 int ret, rval;
1364 uint16_t mb[MAILBOX_REGISTER_COUNT];
1365 struct qla_hw_data *ha = vha->hw;
1367 ret = QLA_SUCCESS;
1368 if (vha->flags.management_server_logged_in)
1369 return ret;
1371 rval = ha->isp_ops->fabric_login(vha, vha->mgmt_svr_loop_id, 0xff, 0xff,
1372 0xfa, mb, BIT_1);
1373 if (rval != QLA_SUCCESS || mb[0] != MBS_COMMAND_COMPLETE) {
1374 if (rval == QLA_MEMORY_ALLOC_FAILED)
1375 ql_dbg(ql_dbg_disc, vha, 0x2085,
1376 "Failed management_server login: loopid=%x "
1377 "rval=%d\n", vha->mgmt_svr_loop_id, rval);
1378 else
1379 ql_dbg(ql_dbg_disc, vha, 0x2024,
1380 "Failed management_server login: loopid=%x "
1381 "mb[0]=%x mb[1]=%x mb[2]=%x mb[6]=%x mb[7]=%x.\n",
1382 vha->mgmt_svr_loop_id, mb[0], mb[1], mb[2], mb[6],
1383 mb[7]);
1384 ret = QLA_FUNCTION_FAILED;
1385 } else
1386 vha->flags.management_server_logged_in = 1;
1388 return ret;
1392 * qla2x00_prep_ms_fdmi_iocb() - Prepare common MS IOCB fields for FDMI query.
1393 * @vha: HA context
1394 * @req_size: request size in bytes
1395 * @rsp_size: response size in bytes
1397 * Returns a pointer to the @ha's ms_iocb.
1399 void *
1400 qla2x00_prep_ms_fdmi_iocb(scsi_qla_host_t *vha, uint32_t req_size,
1401 uint32_t rsp_size)
1403 ms_iocb_entry_t *ms_pkt;
1404 struct qla_hw_data *ha = vha->hw;
1406 ms_pkt = ha->ms_iocb;
1407 memset(ms_pkt, 0, sizeof(ms_iocb_entry_t));
1409 ms_pkt->entry_type = MS_IOCB_TYPE;
1410 ms_pkt->entry_count = 1;
1411 SET_TARGET_ID(ha, ms_pkt->loop_id, vha->mgmt_svr_loop_id);
1412 ms_pkt->control_flags = cpu_to_le16(CF_READ | CF_HEAD_TAG);
1413 ms_pkt->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
1414 ms_pkt->cmd_dsd_count = cpu_to_le16(1);
1415 ms_pkt->total_dsd_count = cpu_to_le16(2);
1416 ms_pkt->rsp_bytecount = cpu_to_le32(rsp_size);
1417 ms_pkt->req_bytecount = cpu_to_le32(req_size);
1419 put_unaligned_le64(ha->ct_sns_dma, &ms_pkt->req_dsd.address);
1420 ms_pkt->req_dsd.length = ms_pkt->req_bytecount;
1422 put_unaligned_le64(ha->ct_sns_dma, &ms_pkt->rsp_dsd.address);
1423 ms_pkt->rsp_dsd.length = ms_pkt->rsp_bytecount;
1425 return ms_pkt;
1429 * qla24xx_prep_ms_fdmi_iocb() - Prepare common MS IOCB fields for FDMI query.
1430 * @vha: HA context
1431 * @req_size: request size in bytes
1432 * @rsp_size: response size in bytes
1434 * Returns a pointer to the @ha's ms_iocb.
1436 void *
1437 qla24xx_prep_ms_fdmi_iocb(scsi_qla_host_t *vha, uint32_t req_size,
1438 uint32_t rsp_size)
1440 struct ct_entry_24xx *ct_pkt;
1441 struct qla_hw_data *ha = vha->hw;
1443 ct_pkt = (struct ct_entry_24xx *)ha->ms_iocb;
1444 memset(ct_pkt, 0, sizeof(struct ct_entry_24xx));
1446 ct_pkt->entry_type = CT_IOCB_TYPE;
1447 ct_pkt->entry_count = 1;
1448 ct_pkt->nport_handle = cpu_to_le16(vha->mgmt_svr_loop_id);
1449 ct_pkt->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
1450 ct_pkt->cmd_dsd_count = cpu_to_le16(1);
1451 ct_pkt->rsp_dsd_count = cpu_to_le16(1);
1452 ct_pkt->rsp_byte_count = cpu_to_le32(rsp_size);
1453 ct_pkt->cmd_byte_count = cpu_to_le32(req_size);
1455 put_unaligned_le64(ha->ct_sns_dma, &ct_pkt->dsd[0].address);
1456 ct_pkt->dsd[0].length = ct_pkt->cmd_byte_count;
1458 put_unaligned_le64(ha->ct_sns_dma, &ct_pkt->dsd[1].address);
1459 ct_pkt->dsd[1].length = ct_pkt->rsp_byte_count;
1460 ct_pkt->vp_index = vha->vp_idx;
1462 return ct_pkt;
1465 static void
1466 qla2x00_update_ms_fdmi_iocb(scsi_qla_host_t *vha, uint32_t req_size)
1468 struct qla_hw_data *ha = vha->hw;
1469 ms_iocb_entry_t *ms_pkt = ha->ms_iocb;
1470 struct ct_entry_24xx *ct_pkt = (struct ct_entry_24xx *)ha->ms_iocb;
1472 if (IS_FWI2_CAPABLE(ha)) {
1473 ct_pkt->cmd_byte_count = cpu_to_le32(req_size);
1474 ct_pkt->dsd[0].length = ct_pkt->cmd_byte_count;
1475 } else {
1476 ms_pkt->req_bytecount = cpu_to_le32(req_size);
1477 ms_pkt->req_dsd.length = ms_pkt->req_bytecount;
1482 * qla2x00_prep_ct_req() - Prepare common CT request fields for SNS query.
1483 * @p: CT request buffer
1484 * @cmd: GS command
1485 * @rsp_size: response size in bytes
1487 * Returns a pointer to the intitialized @ct_req.
1489 static inline struct ct_sns_req *
1490 qla2x00_prep_ct_fdmi_req(struct ct_sns_pkt *p, uint16_t cmd,
1491 uint16_t rsp_size)
1493 memset(p, 0, sizeof(struct ct_sns_pkt));
1495 p->p.req.header.revision = 0x01;
1496 p->p.req.header.gs_type = 0xFA;
1497 p->p.req.header.gs_subtype = 0x10;
1498 p->p.req.command = cpu_to_be16(cmd);
1499 p->p.req.max_rsp_size = cpu_to_be16((rsp_size - 16) / 4);
1501 return &p->p.req;
1505 * qla2x00_fdmi_rhba() - perform RHBA FDMI registration
1506 * @vha: HA context
1508 * Returns 0 on success.
1510 static int
1511 qla2x00_fdmi_rhba(scsi_qla_host_t *vha)
1513 int rval, alen;
1514 uint32_t size, sn;
1516 ms_iocb_entry_t *ms_pkt;
1517 struct ct_sns_req *ct_req;
1518 struct ct_sns_rsp *ct_rsp;
1519 void *entries;
1520 struct ct_fdmi_hba_attr *eiter;
1521 struct qla_hw_data *ha = vha->hw;
1523 /* Issue RHBA */
1524 /* Prepare common MS IOCB */
1525 /* Request size adjusted after CT preparation */
1526 ms_pkt = ha->isp_ops->prep_ms_fdmi_iocb(vha, 0, RHBA_RSP_SIZE);
1528 /* Prepare CT request */
1529 ct_req = qla2x00_prep_ct_fdmi_req(ha->ct_sns, RHBA_CMD, RHBA_RSP_SIZE);
1530 ct_rsp = &ha->ct_sns->p.rsp;
1532 /* Prepare FDMI command arguments -- attribute block, attributes. */
1533 memcpy(ct_req->req.rhba.hba_identifier, vha->port_name, WWN_SIZE);
1534 ct_req->req.rhba.entry_count = cpu_to_be32(1);
1535 memcpy(ct_req->req.rhba.port_name, vha->port_name, WWN_SIZE);
1536 size = 2 * WWN_SIZE + 4 + 4;
1538 /* Attributes */
1539 ct_req->req.rhba.attrs.count =
1540 cpu_to_be32(FDMI_HBA_ATTR_COUNT);
1541 entries = &ct_req->req;
1543 /* Nodename. */
1544 eiter = entries + size;
1545 eiter->type = cpu_to_be16(FDMI_HBA_NODE_NAME);
1546 eiter->len = cpu_to_be16(4 + WWN_SIZE);
1547 memcpy(eiter->a.node_name, vha->node_name, WWN_SIZE);
1548 size += 4 + WWN_SIZE;
1550 ql_dbg(ql_dbg_disc, vha, 0x2025,
1551 "NodeName = %8phN.\n", eiter->a.node_name);
1553 /* Manufacturer. */
1554 eiter = entries + size;
1555 eiter->type = cpu_to_be16(FDMI_HBA_MANUFACTURER);
1556 alen = strlen(QLA2XXX_MANUFACTURER);
1557 snprintf(eiter->a.manufacturer, sizeof(eiter->a.manufacturer),
1558 "%s", "QLogic Corporation");
1559 alen += 4 - (alen & 3);
1560 eiter->len = cpu_to_be16(4 + alen);
1561 size += 4 + alen;
1563 ql_dbg(ql_dbg_disc, vha, 0x2026,
1564 "Manufacturer = %s.\n", eiter->a.manufacturer);
1566 /* Serial number. */
1567 eiter = entries + size;
1568 eiter->type = cpu_to_be16(FDMI_HBA_SERIAL_NUMBER);
1569 if (IS_FWI2_CAPABLE(ha))
1570 qla2xxx_get_vpd_field(vha, "SN", eiter->a.serial_num,
1571 sizeof(eiter->a.serial_num));
1572 else {
1573 sn = ((ha->serial0 & 0x1f) << 16) |
1574 (ha->serial2 << 8) | ha->serial1;
1575 snprintf(eiter->a.serial_num, sizeof(eiter->a.serial_num),
1576 "%c%05d", 'A' + sn / 100000, sn % 100000);
1578 alen = strlen(eiter->a.serial_num);
1579 alen += 4 - (alen & 3);
1580 eiter->len = cpu_to_be16(4 + alen);
1581 size += 4 + alen;
1583 ql_dbg(ql_dbg_disc, vha, 0x2027,
1584 "Serial no. = %s.\n", eiter->a.serial_num);
1586 /* Model name. */
1587 eiter = entries + size;
1588 eiter->type = cpu_to_be16(FDMI_HBA_MODEL);
1589 snprintf(eiter->a.model, sizeof(eiter->a.model),
1590 "%s", ha->model_number);
1591 alen = strlen(eiter->a.model);
1592 alen += 4 - (alen & 3);
1593 eiter->len = cpu_to_be16(4 + alen);
1594 size += 4 + alen;
1596 ql_dbg(ql_dbg_disc, vha, 0x2028,
1597 "Model Name = %s.\n", eiter->a.model);
1599 /* Model description. */
1600 eiter = entries + size;
1601 eiter->type = cpu_to_be16(FDMI_HBA_MODEL_DESCRIPTION);
1602 snprintf(eiter->a.model_desc, sizeof(eiter->a.model_desc),
1603 "%s", ha->model_desc);
1604 alen = strlen(eiter->a.model_desc);
1605 alen += 4 - (alen & 3);
1606 eiter->len = cpu_to_be16(4 + alen);
1607 size += 4 + alen;
1609 ql_dbg(ql_dbg_disc, vha, 0x2029,
1610 "Model Desc = %s.\n", eiter->a.model_desc);
1612 /* Hardware version. */
1613 eiter = entries + size;
1614 eiter->type = cpu_to_be16(FDMI_HBA_HARDWARE_VERSION);
1615 if (!IS_FWI2_CAPABLE(ha)) {
1616 snprintf(eiter->a.hw_version, sizeof(eiter->a.hw_version),
1617 "HW:%s", ha->adapter_id);
1618 } else if (qla2xxx_get_vpd_field(vha, "MN", eiter->a.hw_version,
1619 sizeof(eiter->a.hw_version))) {
1621 } else if (qla2xxx_get_vpd_field(vha, "EC", eiter->a.hw_version,
1622 sizeof(eiter->a.hw_version))) {
1624 } else {
1625 snprintf(eiter->a.hw_version, sizeof(eiter->a.hw_version),
1626 "HW:%s", ha->adapter_id);
1628 alen = strlen(eiter->a.hw_version);
1629 alen += 4 - (alen & 3);
1630 eiter->len = cpu_to_be16(4 + alen);
1631 size += 4 + alen;
1633 ql_dbg(ql_dbg_disc, vha, 0x202a,
1634 "Hardware ver = %s.\n", eiter->a.hw_version);
1636 /* Driver version. */
1637 eiter = entries + size;
1638 eiter->type = cpu_to_be16(FDMI_HBA_DRIVER_VERSION);
1639 snprintf(eiter->a.driver_version, sizeof(eiter->a.driver_version),
1640 "%s", qla2x00_version_str);
1641 alen = strlen(eiter->a.driver_version);
1642 alen += 4 - (alen & 3);
1643 eiter->len = cpu_to_be16(4 + alen);
1644 size += 4 + alen;
1646 ql_dbg(ql_dbg_disc, vha, 0x202b,
1647 "Driver ver = %s.\n", eiter->a.driver_version);
1649 /* Option ROM version. */
1650 eiter = entries + size;
1651 eiter->type = cpu_to_be16(FDMI_HBA_OPTION_ROM_VERSION);
1652 snprintf(eiter->a.orom_version, sizeof(eiter->a.orom_version),
1653 "%d.%02d", ha->bios_revision[1], ha->bios_revision[0]);
1654 alen = strlen(eiter->a.orom_version);
1655 alen += 4 - (alen & 3);
1656 eiter->len = cpu_to_be16(4 + alen);
1657 size += 4 + alen;
1659 ql_dbg(ql_dbg_disc, vha , 0x202c,
1660 "Optrom vers = %s.\n", eiter->a.orom_version);
1662 /* Firmware version */
1663 eiter = entries + size;
1664 eiter->type = cpu_to_be16(FDMI_HBA_FIRMWARE_VERSION);
1665 ha->isp_ops->fw_version_str(vha, eiter->a.fw_version,
1666 sizeof(eiter->a.fw_version));
1667 alen = strlen(eiter->a.fw_version);
1668 alen += 4 - (alen & 3);
1669 eiter->len = cpu_to_be16(4 + alen);
1670 size += 4 + alen;
1672 ql_dbg(ql_dbg_disc, vha, 0x202d,
1673 "Firmware vers = %s.\n", eiter->a.fw_version);
1675 /* Update MS request size. */
1676 qla2x00_update_ms_fdmi_iocb(vha, size + 16);
1678 ql_dbg(ql_dbg_disc, vha, 0x202e,
1679 "RHBA identifier = %8phN size=%d.\n",
1680 ct_req->req.rhba.hba_identifier, size);
1681 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2076,
1682 entries, size);
1684 /* Execute MS IOCB */
1685 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
1686 sizeof(ms_iocb_entry_t));
1687 if (rval != QLA_SUCCESS) {
1688 /*EMPTY*/
1689 ql_dbg(ql_dbg_disc, vha, 0x2030,
1690 "RHBA issue IOCB failed (%d).\n", rval);
1691 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RHBA") !=
1692 QLA_SUCCESS) {
1693 rval = QLA_FUNCTION_FAILED;
1694 if (ct_rsp->header.reason_code == CT_REASON_CANNOT_PERFORM &&
1695 ct_rsp->header.explanation_code ==
1696 CT_EXPL_ALREADY_REGISTERED) {
1697 ql_dbg(ql_dbg_disc, vha, 0x2034,
1698 "HBA already registered.\n");
1699 rval = QLA_ALREADY_REGISTERED;
1700 } else {
1701 ql_dbg(ql_dbg_disc, vha, 0x20ad,
1702 "RHBA FDMI registration failed, CT Reason code: 0x%x, CT Explanation 0x%x\n",
1703 ct_rsp->header.reason_code,
1704 ct_rsp->header.explanation_code);
1706 } else {
1707 ql_dbg(ql_dbg_disc, vha, 0x2035,
1708 "RHBA exiting normally.\n");
1711 return rval;
1715 * qla2x00_fdmi_rpa() - perform RPA registration
1716 * @vha: HA context
1718 * Returns 0 on success.
1720 static int
1721 qla2x00_fdmi_rpa(scsi_qla_host_t *vha)
1723 int rval, alen;
1724 uint32_t size;
1725 struct qla_hw_data *ha = vha->hw;
1726 ms_iocb_entry_t *ms_pkt;
1727 struct ct_sns_req *ct_req;
1728 struct ct_sns_rsp *ct_rsp;
1729 void *entries;
1730 struct ct_fdmi_port_attr *eiter;
1731 struct init_cb_24xx *icb24 = (struct init_cb_24xx *)ha->init_cb;
1732 struct new_utsname *p_sysid = NULL;
1734 /* Issue RPA */
1735 /* Prepare common MS IOCB */
1736 /* Request size adjusted after CT preparation */
1737 ms_pkt = ha->isp_ops->prep_ms_fdmi_iocb(vha, 0, RPA_RSP_SIZE);
1739 /* Prepare CT request */
1740 ct_req = qla2x00_prep_ct_fdmi_req(ha->ct_sns, RPA_CMD,
1741 RPA_RSP_SIZE);
1742 ct_rsp = &ha->ct_sns->p.rsp;
1744 /* Prepare FDMI command arguments -- attribute block, attributes. */
1745 memcpy(ct_req->req.rpa.port_name, vha->port_name, WWN_SIZE);
1746 size = WWN_SIZE + 4;
1748 /* Attributes */
1749 ct_req->req.rpa.attrs.count = cpu_to_be32(FDMI_PORT_ATTR_COUNT);
1750 entries = &ct_req->req;
1752 /* FC4 types. */
1753 eiter = entries + size;
1754 eiter->type = cpu_to_be16(FDMI_PORT_FC4_TYPES);
1755 eiter->len = cpu_to_be16(4 + 32);
1756 eiter->a.fc4_types[2] = 0x01;
1757 size += 4 + 32;
1759 ql_dbg(ql_dbg_disc, vha, 0x2039,
1760 "FC4_TYPES=%02x %02x.\n",
1761 eiter->a.fc4_types[2],
1762 eiter->a.fc4_types[1]);
1764 /* Supported speed. */
1765 eiter = entries + size;
1766 eiter->type = cpu_to_be16(FDMI_PORT_SUPPORT_SPEED);
1767 eiter->len = cpu_to_be16(4 + 4);
1768 if (IS_CNA_CAPABLE(ha))
1769 eiter->a.sup_speed = cpu_to_be32(
1770 FDMI_PORT_SPEED_10GB);
1771 else if (IS_QLA27XX(ha) || IS_QLA28XX(ha))
1772 eiter->a.sup_speed = cpu_to_be32(
1773 FDMI_PORT_SPEED_32GB|
1774 FDMI_PORT_SPEED_16GB|
1775 FDMI_PORT_SPEED_8GB);
1776 else if (IS_QLA2031(ha))
1777 eiter->a.sup_speed = cpu_to_be32(
1778 FDMI_PORT_SPEED_16GB|
1779 FDMI_PORT_SPEED_8GB|
1780 FDMI_PORT_SPEED_4GB);
1781 else if (IS_QLA25XX(ha))
1782 eiter->a.sup_speed = cpu_to_be32(
1783 FDMI_PORT_SPEED_8GB|
1784 FDMI_PORT_SPEED_4GB|
1785 FDMI_PORT_SPEED_2GB|
1786 FDMI_PORT_SPEED_1GB);
1787 else if (IS_QLA24XX_TYPE(ha))
1788 eiter->a.sup_speed = cpu_to_be32(
1789 FDMI_PORT_SPEED_4GB|
1790 FDMI_PORT_SPEED_2GB|
1791 FDMI_PORT_SPEED_1GB);
1792 else if (IS_QLA23XX(ha))
1793 eiter->a.sup_speed = cpu_to_be32(
1794 FDMI_PORT_SPEED_2GB|
1795 FDMI_PORT_SPEED_1GB);
1796 else
1797 eiter->a.sup_speed = cpu_to_be32(
1798 FDMI_PORT_SPEED_1GB);
1799 size += 4 + 4;
1801 ql_dbg(ql_dbg_disc, vha, 0x203a,
1802 "Supported_Speed=%x.\n", eiter->a.sup_speed);
1804 /* Current speed. */
1805 eiter = entries + size;
1806 eiter->type = cpu_to_be16(FDMI_PORT_CURRENT_SPEED);
1807 eiter->len = cpu_to_be16(4 + 4);
1808 switch (ha->link_data_rate) {
1809 case PORT_SPEED_1GB:
1810 eiter->a.cur_speed =
1811 cpu_to_be32(FDMI_PORT_SPEED_1GB);
1812 break;
1813 case PORT_SPEED_2GB:
1814 eiter->a.cur_speed =
1815 cpu_to_be32(FDMI_PORT_SPEED_2GB);
1816 break;
1817 case PORT_SPEED_4GB:
1818 eiter->a.cur_speed =
1819 cpu_to_be32(FDMI_PORT_SPEED_4GB);
1820 break;
1821 case PORT_SPEED_8GB:
1822 eiter->a.cur_speed =
1823 cpu_to_be32(FDMI_PORT_SPEED_8GB);
1824 break;
1825 case PORT_SPEED_10GB:
1826 eiter->a.cur_speed =
1827 cpu_to_be32(FDMI_PORT_SPEED_10GB);
1828 break;
1829 case PORT_SPEED_16GB:
1830 eiter->a.cur_speed =
1831 cpu_to_be32(FDMI_PORT_SPEED_16GB);
1832 break;
1833 case PORT_SPEED_32GB:
1834 eiter->a.cur_speed =
1835 cpu_to_be32(FDMI_PORT_SPEED_32GB);
1836 break;
1837 default:
1838 eiter->a.cur_speed =
1839 cpu_to_be32(FDMI_PORT_SPEED_UNKNOWN);
1840 break;
1842 size += 4 + 4;
1844 ql_dbg(ql_dbg_disc, vha, 0x203b,
1845 "Current_Speed=%x.\n", eiter->a.cur_speed);
1847 /* Max frame size. */
1848 eiter = entries + size;
1849 eiter->type = cpu_to_be16(FDMI_PORT_MAX_FRAME_SIZE);
1850 eiter->len = cpu_to_be16(4 + 4);
1851 eiter->a.max_frame_size = IS_FWI2_CAPABLE(ha) ?
1852 le16_to_cpu(icb24->frame_payload_size) :
1853 le16_to_cpu(ha->init_cb->frame_payload_size);
1854 eiter->a.max_frame_size = cpu_to_be32(eiter->a.max_frame_size);
1855 size += 4 + 4;
1857 ql_dbg(ql_dbg_disc, vha, 0x203c,
1858 "Max_Frame_Size=%x.\n", eiter->a.max_frame_size);
1860 /* OS device name. */
1861 eiter = entries + size;
1862 eiter->type = cpu_to_be16(FDMI_PORT_OS_DEVICE_NAME);
1863 snprintf(eiter->a.os_dev_name, sizeof(eiter->a.os_dev_name),
1864 "%s:host%lu", QLA2XXX_DRIVER_NAME, vha->host_no);
1865 alen = strlen(eiter->a.os_dev_name);
1866 alen += 4 - (alen & 3);
1867 eiter->len = cpu_to_be16(4 + alen);
1868 size += 4 + alen;
1870 ql_dbg(ql_dbg_disc, vha, 0x204b,
1871 "OS_Device_Name=%s.\n", eiter->a.os_dev_name);
1873 /* Hostname. */
1874 eiter = entries + size;
1875 eiter->type = cpu_to_be16(FDMI_PORT_HOST_NAME);
1876 p_sysid = utsname();
1877 if (p_sysid) {
1878 snprintf(eiter->a.host_name, sizeof(eiter->a.host_name),
1879 "%s", p_sysid->nodename);
1880 } else {
1881 snprintf(eiter->a.host_name, sizeof(eiter->a.host_name),
1882 "%s", fc_host_system_hostname(vha->host));
1884 alen = strlen(eiter->a.host_name);
1885 alen += 4 - (alen & 3);
1886 eiter->len = cpu_to_be16(4 + alen);
1887 size += 4 + alen;
1889 ql_dbg(ql_dbg_disc, vha, 0x203d, "HostName=%s.\n", eiter->a.host_name);
1891 /* Update MS request size. */
1892 qla2x00_update_ms_fdmi_iocb(vha, size + 16);
1894 ql_dbg(ql_dbg_disc, vha, 0x203e,
1895 "RPA portname %016llx, size = %d.\n",
1896 wwn_to_u64(ct_req->req.rpa.port_name), size);
1897 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2079,
1898 entries, size);
1900 /* Execute MS IOCB */
1901 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
1902 sizeof(ms_iocb_entry_t));
1903 if (rval != QLA_SUCCESS) {
1904 /*EMPTY*/
1905 ql_dbg(ql_dbg_disc, vha, 0x2040,
1906 "RPA issue IOCB failed (%d).\n", rval);
1907 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RPA") !=
1908 QLA_SUCCESS) {
1909 rval = QLA_FUNCTION_FAILED;
1910 if (ct_rsp->header.reason_code == CT_REASON_CANNOT_PERFORM &&
1911 ct_rsp->header.explanation_code ==
1912 CT_EXPL_ALREADY_REGISTERED) {
1913 ql_dbg(ql_dbg_disc, vha, 0x20cd,
1914 "RPA already registered.\n");
1915 rval = QLA_ALREADY_REGISTERED;
1918 } else {
1919 ql_dbg(ql_dbg_disc, vha, 0x2041,
1920 "RPA exiting normally.\n");
1923 return rval;
1927 * qla2x00_fdmiv2_rhba() - perform RHBA FDMI v2 registration
1928 * @vha: HA context
1930 * Returns 0 on success.
1932 static int
1933 qla2x00_fdmiv2_rhba(scsi_qla_host_t *vha)
1935 int rval, alen;
1936 uint32_t size, sn;
1937 ms_iocb_entry_t *ms_pkt;
1938 struct ct_sns_req *ct_req;
1939 struct ct_sns_rsp *ct_rsp;
1940 void *entries;
1941 struct ct_fdmiv2_hba_attr *eiter;
1942 struct qla_hw_data *ha = vha->hw;
1943 struct new_utsname *p_sysid = NULL;
1945 /* Issue RHBA */
1946 /* Prepare common MS IOCB */
1947 /* Request size adjusted after CT preparation */
1948 ms_pkt = ha->isp_ops->prep_ms_fdmi_iocb(vha, 0, RHBA_RSP_SIZE);
1950 /* Prepare CT request */
1951 ct_req = qla2x00_prep_ct_fdmi_req(ha->ct_sns, RHBA_CMD,
1952 RHBA_RSP_SIZE);
1953 ct_rsp = &ha->ct_sns->p.rsp;
1955 /* Prepare FDMI command arguments -- attribute block, attributes. */
1956 memcpy(ct_req->req.rhba2.hba_identifier, vha->port_name, WWN_SIZE);
1957 ct_req->req.rhba2.entry_count = cpu_to_be32(1);
1958 memcpy(ct_req->req.rhba2.port_name, vha->port_name, WWN_SIZE);
1959 size = 2 * WWN_SIZE + 4 + 4;
1961 /* Attributes */
1962 ct_req->req.rhba2.attrs.count = cpu_to_be32(FDMIV2_HBA_ATTR_COUNT);
1963 entries = &ct_req->req;
1965 /* Nodename. */
1966 eiter = entries + size;
1967 eiter->type = cpu_to_be16(FDMI_HBA_NODE_NAME);
1968 eiter->len = cpu_to_be16(4 + WWN_SIZE);
1969 memcpy(eiter->a.node_name, vha->node_name, WWN_SIZE);
1970 size += 4 + WWN_SIZE;
1972 ql_dbg(ql_dbg_disc, vha, 0x207d,
1973 "NodeName = %016llx.\n", wwn_to_u64(eiter->a.node_name));
1975 /* Manufacturer. */
1976 eiter = entries + size;
1977 eiter->type = cpu_to_be16(FDMI_HBA_MANUFACTURER);
1978 snprintf(eiter->a.manufacturer, sizeof(eiter->a.manufacturer),
1979 "%s", "QLogic Corporation");
1980 eiter->a.manufacturer[strlen("QLogic Corporation")] = '\0';
1981 alen = strlen(eiter->a.manufacturer);
1982 alen += 4 - (alen & 3);
1983 eiter->len = cpu_to_be16(4 + alen);
1984 size += 4 + alen;
1986 ql_dbg(ql_dbg_disc, vha, 0x20a5,
1987 "Manufacturer = %s.\n", eiter->a.manufacturer);
1989 /* Serial number. */
1990 eiter = entries + size;
1991 eiter->type = cpu_to_be16(FDMI_HBA_SERIAL_NUMBER);
1992 if (IS_FWI2_CAPABLE(ha))
1993 qla2xxx_get_vpd_field(vha, "SN", eiter->a.serial_num,
1994 sizeof(eiter->a.serial_num));
1995 else {
1996 sn = ((ha->serial0 & 0x1f) << 16) |
1997 (ha->serial2 << 8) | ha->serial1;
1998 snprintf(eiter->a.serial_num, sizeof(eiter->a.serial_num),
1999 "%c%05d", 'A' + sn / 100000, sn % 100000);
2001 alen = strlen(eiter->a.serial_num);
2002 alen += 4 - (alen & 3);
2003 eiter->len = cpu_to_be16(4 + alen);
2004 size += 4 + alen;
2006 ql_dbg(ql_dbg_disc, vha, 0x20a6,
2007 "Serial no. = %s.\n", eiter->a.serial_num);
2009 /* Model name. */
2010 eiter = entries + size;
2011 eiter->type = cpu_to_be16(FDMI_HBA_MODEL);
2012 snprintf(eiter->a.model, sizeof(eiter->a.model),
2013 "%s", ha->model_number);
2014 alen = strlen(eiter->a.model);
2015 alen += 4 - (alen & 3);
2016 eiter->len = cpu_to_be16(4 + alen);
2017 size += 4 + alen;
2019 ql_dbg(ql_dbg_disc, vha, 0x20a7,
2020 "Model Name = %s.\n", eiter->a.model);
2022 /* Model description. */
2023 eiter = entries + size;
2024 eiter->type = cpu_to_be16(FDMI_HBA_MODEL_DESCRIPTION);
2025 snprintf(eiter->a.model_desc, sizeof(eiter->a.model_desc),
2026 "%s", ha->model_desc);
2027 alen = strlen(eiter->a.model_desc);
2028 alen += 4 - (alen & 3);
2029 eiter->len = cpu_to_be16(4 + alen);
2030 size += 4 + alen;
2032 ql_dbg(ql_dbg_disc, vha, 0x20a8,
2033 "Model Desc = %s.\n", eiter->a.model_desc);
2035 /* Hardware version. */
2036 eiter = entries + size;
2037 eiter->type = cpu_to_be16(FDMI_HBA_HARDWARE_VERSION);
2038 if (!IS_FWI2_CAPABLE(ha)) {
2039 snprintf(eiter->a.hw_version, sizeof(eiter->a.hw_version),
2040 "HW:%s", ha->adapter_id);
2041 } else if (qla2xxx_get_vpd_field(vha, "MN", eiter->a.hw_version,
2042 sizeof(eiter->a.hw_version))) {
2044 } else if (qla2xxx_get_vpd_field(vha, "EC", eiter->a.hw_version,
2045 sizeof(eiter->a.hw_version))) {
2047 } else {
2048 snprintf(eiter->a.hw_version, sizeof(eiter->a.hw_version),
2049 "HW:%s", ha->adapter_id);
2051 alen = strlen(eiter->a.hw_version);
2052 alen += 4 - (alen & 3);
2053 eiter->len = cpu_to_be16(4 + alen);
2054 size += 4 + alen;
2056 ql_dbg(ql_dbg_disc, vha, 0x20a9,
2057 "Hardware ver = %s.\n", eiter->a.hw_version);
2059 /* Driver version. */
2060 eiter = entries + size;
2061 eiter->type = cpu_to_be16(FDMI_HBA_DRIVER_VERSION);
2062 snprintf(eiter->a.driver_version, sizeof(eiter->a.driver_version),
2063 "%s", qla2x00_version_str);
2064 alen = strlen(eiter->a.driver_version);
2065 alen += 4 - (alen & 3);
2066 eiter->len = cpu_to_be16(4 + alen);
2067 size += 4 + alen;
2069 ql_dbg(ql_dbg_disc, vha, 0x20aa,
2070 "Driver ver = %s.\n", eiter->a.driver_version);
2072 /* Option ROM version. */
2073 eiter = entries + size;
2074 eiter->type = cpu_to_be16(FDMI_HBA_OPTION_ROM_VERSION);
2075 snprintf(eiter->a.orom_version, sizeof(eiter->a.orom_version),
2076 "%d.%02d", ha->bios_revision[1], ha->bios_revision[0]);
2077 alen = strlen(eiter->a.orom_version);
2078 alen += 4 - (alen & 3);
2079 eiter->len = cpu_to_be16(4 + alen);
2080 size += 4 + alen;
2082 ql_dbg(ql_dbg_disc, vha , 0x20ab,
2083 "Optrom version = %d.%02d.\n", eiter->a.orom_version[1],
2084 eiter->a.orom_version[0]);
2086 /* Firmware version */
2087 eiter = entries + size;
2088 eiter->type = cpu_to_be16(FDMI_HBA_FIRMWARE_VERSION);
2089 ha->isp_ops->fw_version_str(vha, eiter->a.fw_version,
2090 sizeof(eiter->a.fw_version));
2091 alen = strlen(eiter->a.fw_version);
2092 alen += 4 - (alen & 3);
2093 eiter->len = cpu_to_be16(4 + alen);
2094 size += 4 + alen;
2096 ql_dbg(ql_dbg_disc, vha, 0x20ac,
2097 "Firmware vers = %s.\n", eiter->a.fw_version);
2099 /* OS Name and Version */
2100 eiter = entries + size;
2101 eiter->type = cpu_to_be16(FDMI_HBA_OS_NAME_AND_VERSION);
2102 p_sysid = utsname();
2103 if (p_sysid) {
2104 snprintf(eiter->a.os_version, sizeof(eiter->a.os_version),
2105 "%s %s %s",
2106 p_sysid->sysname, p_sysid->release, p_sysid->version);
2107 } else {
2108 snprintf(eiter->a.os_version, sizeof(eiter->a.os_version),
2109 "%s %s", "Linux", fc_host_system_hostname(vha->host));
2111 alen = strlen(eiter->a.os_version);
2112 alen += 4 - (alen & 3);
2113 eiter->len = cpu_to_be16(4 + alen);
2114 size += 4 + alen;
2116 ql_dbg(ql_dbg_disc, vha, 0x20ae,
2117 "OS Name and Version = %s.\n", eiter->a.os_version);
2119 /* MAX CT Payload Length */
2120 eiter = entries + size;
2121 eiter->type = cpu_to_be16(FDMI_HBA_MAXIMUM_CT_PAYLOAD_LENGTH);
2122 eiter->a.max_ct_len = cpu_to_be32(ha->frame_payload_size);
2123 eiter->a.max_ct_len = cpu_to_be32(eiter->a.max_ct_len);
2124 eiter->len = cpu_to_be16(4 + 4);
2125 size += 4 + 4;
2127 ql_dbg(ql_dbg_disc, vha, 0x20af,
2128 "CT Payload Length = 0x%x.\n", eiter->a.max_ct_len);
2130 /* Node Sybolic Name */
2131 eiter = entries + size;
2132 eiter->type = cpu_to_be16(FDMI_HBA_NODE_SYMBOLIC_NAME);
2133 qla2x00_get_sym_node_name(vha, eiter->a.sym_name,
2134 sizeof(eiter->a.sym_name));
2135 alen = strlen(eiter->a.sym_name);
2136 alen += 4 - (alen & 3);
2137 eiter->len = cpu_to_be16(4 + alen);
2138 size += 4 + alen;
2140 ql_dbg(ql_dbg_disc, vha, 0x20b0,
2141 "Symbolic Name = %s.\n", eiter->a.sym_name);
2143 /* Vendor Id */
2144 eiter = entries + size;
2145 eiter->type = cpu_to_be16(FDMI_HBA_VENDOR_ID);
2146 eiter->a.vendor_id = cpu_to_be32(0x1077);
2147 eiter->len = cpu_to_be16(4 + 4);
2148 size += 4 + 4;
2150 ql_dbg(ql_dbg_disc, vha, 0x20b1,
2151 "Vendor Id = %x.\n", eiter->a.vendor_id);
2153 /* Num Ports */
2154 eiter = entries + size;
2155 eiter->type = cpu_to_be16(FDMI_HBA_NUM_PORTS);
2156 eiter->a.num_ports = cpu_to_be32(1);
2157 eiter->len = cpu_to_be16(4 + 4);
2158 size += 4 + 4;
2160 ql_dbg(ql_dbg_disc, vha, 0x20b2,
2161 "Port Num = %x.\n", eiter->a.num_ports);
2163 /* Fabric Name */
2164 eiter = entries + size;
2165 eiter->type = cpu_to_be16(FDMI_HBA_FABRIC_NAME);
2166 memcpy(eiter->a.fabric_name, vha->fabric_node_name, WWN_SIZE);
2167 eiter->len = cpu_to_be16(4 + WWN_SIZE);
2168 size += 4 + WWN_SIZE;
2170 ql_dbg(ql_dbg_disc, vha, 0x20b3,
2171 "Fabric Name = %016llx.\n", wwn_to_u64(eiter->a.fabric_name));
2173 /* BIOS Version */
2174 eiter = entries + size;
2175 eiter->type = cpu_to_be16(FDMI_HBA_BOOT_BIOS_NAME);
2176 snprintf(eiter->a.bios_name, sizeof(eiter->a.bios_name),
2177 "BIOS %d.%02d", ha->bios_revision[1], ha->bios_revision[0]);
2178 alen = strlen(eiter->a.bios_name);
2179 alen += 4 - (alen & 3);
2180 eiter->len = cpu_to_be16(4 + alen);
2181 size += 4 + alen;
2183 ql_dbg(ql_dbg_disc, vha, 0x20b4,
2184 "BIOS Name = %s\n", eiter->a.bios_name);
2186 /* Vendor Identifier */
2187 eiter = entries + size;
2188 eiter->type = cpu_to_be16(FDMI_HBA_TYPE_VENDOR_IDENTIFIER);
2189 snprintf(eiter->a.vendor_identifier, sizeof(eiter->a.vendor_identifier),
2190 "%s", "QLGC");
2191 alen = strlen(eiter->a.vendor_identifier);
2192 alen += 4 - (alen & 3);
2193 eiter->len = cpu_to_be16(4 + alen);
2194 size += 4 + alen;
2196 ql_dbg(ql_dbg_disc, vha, 0x201b,
2197 "Vendor Identifier = %s.\n", eiter->a.vendor_identifier);
2199 /* Update MS request size. */
2200 qla2x00_update_ms_fdmi_iocb(vha, size + 16);
2202 ql_dbg(ql_dbg_disc, vha, 0x20b5,
2203 "RHBA identifier = %016llx.\n",
2204 wwn_to_u64(ct_req->req.rhba2.hba_identifier));
2205 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x20b6,
2206 entries, size);
2208 /* Execute MS IOCB */
2209 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
2210 sizeof(ms_iocb_entry_t));
2211 if (rval != QLA_SUCCESS) {
2212 /*EMPTY*/
2213 ql_dbg(ql_dbg_disc, vha, 0x20b7,
2214 "RHBA issue IOCB failed (%d).\n", rval);
2215 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RHBA") !=
2216 QLA_SUCCESS) {
2217 rval = QLA_FUNCTION_FAILED;
2219 if (ct_rsp->header.reason_code == CT_REASON_CANNOT_PERFORM &&
2220 ct_rsp->header.explanation_code ==
2221 CT_EXPL_ALREADY_REGISTERED) {
2222 ql_dbg(ql_dbg_disc, vha, 0x20b8,
2223 "HBA already registered.\n");
2224 rval = QLA_ALREADY_REGISTERED;
2225 } else {
2226 ql_dbg(ql_dbg_disc, vha, 0x2016,
2227 "RHBA FDMI v2 failed, CT Reason code: 0x%x, CT Explanation 0x%x\n",
2228 ct_rsp->header.reason_code,
2229 ct_rsp->header.explanation_code);
2231 } else {
2232 ql_dbg(ql_dbg_disc, vha, 0x20b9,
2233 "RHBA FDMI V2 exiting normally.\n");
2236 return rval;
2240 * qla2x00_fdmi_dhba() -
2241 * @vha: HA context
2243 * Returns 0 on success.
2245 static int
2246 qla2x00_fdmi_dhba(scsi_qla_host_t *vha)
2248 int rval;
2249 struct qla_hw_data *ha = vha->hw;
2250 ms_iocb_entry_t *ms_pkt;
2251 struct ct_sns_req *ct_req;
2252 struct ct_sns_rsp *ct_rsp;
2254 /* Issue RPA */
2255 /* Prepare common MS IOCB */
2256 ms_pkt = ha->isp_ops->prep_ms_fdmi_iocb(vha, DHBA_REQ_SIZE,
2257 DHBA_RSP_SIZE);
2259 /* Prepare CT request */
2260 ct_req = qla2x00_prep_ct_fdmi_req(ha->ct_sns, DHBA_CMD, DHBA_RSP_SIZE);
2261 ct_rsp = &ha->ct_sns->p.rsp;
2263 /* Prepare FDMI command arguments -- portname. */
2264 memcpy(ct_req->req.dhba.port_name, vha->port_name, WWN_SIZE);
2266 ql_dbg(ql_dbg_disc, vha, 0x2036,
2267 "DHBA portname = %8phN.\n", ct_req->req.dhba.port_name);
2269 /* Execute MS IOCB */
2270 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
2271 sizeof(ms_iocb_entry_t));
2272 if (rval != QLA_SUCCESS) {
2273 /*EMPTY*/
2274 ql_dbg(ql_dbg_disc, vha, 0x2037,
2275 "DHBA issue IOCB failed (%d).\n", rval);
2276 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "DHBA") !=
2277 QLA_SUCCESS) {
2278 rval = QLA_FUNCTION_FAILED;
2279 } else {
2280 ql_dbg(ql_dbg_disc, vha, 0x2038,
2281 "DHBA exiting normally.\n");
2284 return rval;
2288 * qla2x00_fdmiv2_rpa() -
2289 * @vha: HA context
2291 * Returns 0 on success.
2293 static int
2294 qla2x00_fdmiv2_rpa(scsi_qla_host_t *vha)
2296 int rval, alen;
2297 uint32_t size;
2298 struct qla_hw_data *ha = vha->hw;
2299 ms_iocb_entry_t *ms_pkt;
2300 struct ct_sns_req *ct_req;
2301 struct ct_sns_rsp *ct_rsp;
2302 void *entries;
2303 struct ct_fdmiv2_port_attr *eiter;
2304 struct init_cb_24xx *icb24 = (struct init_cb_24xx *)ha->init_cb;
2305 struct new_utsname *p_sysid = NULL;
2307 /* Issue RPA */
2308 /* Prepare common MS IOCB */
2309 /* Request size adjusted after CT preparation */
2310 ms_pkt = ha->isp_ops->prep_ms_fdmi_iocb(vha, 0, RPA_RSP_SIZE);
2312 /* Prepare CT request */
2313 ct_req = qla2x00_prep_ct_fdmi_req(ha->ct_sns, RPA_CMD, RPA_RSP_SIZE);
2314 ct_rsp = &ha->ct_sns->p.rsp;
2316 /* Prepare FDMI command arguments -- attribute block, attributes. */
2317 memcpy(ct_req->req.rpa2.port_name, vha->port_name, WWN_SIZE);
2318 size = WWN_SIZE + 4;
2320 /* Attributes */
2321 ct_req->req.rpa2.attrs.count = cpu_to_be32(FDMIV2_PORT_ATTR_COUNT);
2322 entries = &ct_req->req;
2324 /* FC4 types. */
2325 eiter = entries + size;
2326 eiter->type = cpu_to_be16(FDMI_PORT_FC4_TYPES);
2327 eiter->len = cpu_to_be16(4 + 32);
2328 eiter->a.fc4_types[2] = 0x01;
2329 size += 4 + 32;
2331 ql_dbg(ql_dbg_disc, vha, 0x20ba,
2332 "FC4_TYPES=%02x %02x.\n",
2333 eiter->a.fc4_types[2],
2334 eiter->a.fc4_types[1]);
2336 if (vha->flags.nvme_enabled) {
2337 eiter->a.fc4_types[6] = 1; /* NVMe type 28h */
2338 ql_dbg(ql_dbg_disc, vha, 0x211f,
2339 "NVME FC4 Type = %02x 0x0 0x0 0x0 0x0 0x0.\n",
2340 eiter->a.fc4_types[6]);
2343 /* Supported speed. */
2344 eiter = entries + size;
2345 eiter->type = cpu_to_be16(FDMI_PORT_SUPPORT_SPEED);
2346 eiter->len = cpu_to_be16(4 + 4);
2347 if (IS_CNA_CAPABLE(ha))
2348 eiter->a.sup_speed = cpu_to_be32(
2349 FDMI_PORT_SPEED_10GB);
2350 else if (IS_QLA27XX(ha) || IS_QLA28XX(ha))
2351 eiter->a.sup_speed = cpu_to_be32(
2352 FDMI_PORT_SPEED_32GB|
2353 FDMI_PORT_SPEED_16GB|
2354 FDMI_PORT_SPEED_8GB);
2355 else if (IS_QLA2031(ha))
2356 eiter->a.sup_speed = cpu_to_be32(
2357 FDMI_PORT_SPEED_16GB|
2358 FDMI_PORT_SPEED_8GB|
2359 FDMI_PORT_SPEED_4GB);
2360 else if (IS_QLA25XX(ha))
2361 eiter->a.sup_speed = cpu_to_be32(
2362 FDMI_PORT_SPEED_8GB|
2363 FDMI_PORT_SPEED_4GB|
2364 FDMI_PORT_SPEED_2GB|
2365 FDMI_PORT_SPEED_1GB);
2366 else if (IS_QLA24XX_TYPE(ha))
2367 eiter->a.sup_speed = cpu_to_be32(
2368 FDMI_PORT_SPEED_4GB|
2369 FDMI_PORT_SPEED_2GB|
2370 FDMI_PORT_SPEED_1GB);
2371 else if (IS_QLA23XX(ha))
2372 eiter->a.sup_speed = cpu_to_be32(
2373 FDMI_PORT_SPEED_2GB|
2374 FDMI_PORT_SPEED_1GB);
2375 else
2376 eiter->a.sup_speed = cpu_to_be32(
2377 FDMI_PORT_SPEED_1GB);
2378 size += 4 + 4;
2380 ql_dbg(ql_dbg_disc, vha, 0x20bb,
2381 "Supported Port Speed = %x.\n", eiter->a.sup_speed);
2383 /* Current speed. */
2384 eiter = entries + size;
2385 eiter->type = cpu_to_be16(FDMI_PORT_CURRENT_SPEED);
2386 eiter->len = cpu_to_be16(4 + 4);
2387 switch (ha->link_data_rate) {
2388 case PORT_SPEED_1GB:
2389 eiter->a.cur_speed = cpu_to_be32(FDMI_PORT_SPEED_1GB);
2390 break;
2391 case PORT_SPEED_2GB:
2392 eiter->a.cur_speed = cpu_to_be32(FDMI_PORT_SPEED_2GB);
2393 break;
2394 case PORT_SPEED_4GB:
2395 eiter->a.cur_speed = cpu_to_be32(FDMI_PORT_SPEED_4GB);
2396 break;
2397 case PORT_SPEED_8GB:
2398 eiter->a.cur_speed = cpu_to_be32(FDMI_PORT_SPEED_8GB);
2399 break;
2400 case PORT_SPEED_10GB:
2401 eiter->a.cur_speed = cpu_to_be32(FDMI_PORT_SPEED_10GB);
2402 break;
2403 case PORT_SPEED_16GB:
2404 eiter->a.cur_speed = cpu_to_be32(FDMI_PORT_SPEED_16GB);
2405 break;
2406 case PORT_SPEED_32GB:
2407 eiter->a.cur_speed = cpu_to_be32(FDMI_PORT_SPEED_32GB);
2408 break;
2409 default:
2410 eiter->a.cur_speed = cpu_to_be32(FDMI_PORT_SPEED_UNKNOWN);
2411 break;
2413 size += 4 + 4;
2415 ql_dbg(ql_dbg_disc, vha, 0x2017,
2416 "Current_Speed = %x.\n", eiter->a.cur_speed);
2418 /* Max frame size. */
2419 eiter = entries + size;
2420 eiter->type = cpu_to_be16(FDMI_PORT_MAX_FRAME_SIZE);
2421 eiter->len = cpu_to_be16(4 + 4);
2422 eiter->a.max_frame_size = IS_FWI2_CAPABLE(ha) ?
2423 le16_to_cpu(icb24->frame_payload_size) :
2424 le16_to_cpu(ha->init_cb->frame_payload_size);
2425 eiter->a.max_frame_size = cpu_to_be32(eiter->a.max_frame_size);
2426 size += 4 + 4;
2428 ql_dbg(ql_dbg_disc, vha, 0x20bc,
2429 "Max_Frame_Size = %x.\n", eiter->a.max_frame_size);
2431 /* OS device name. */
2432 eiter = entries + size;
2433 eiter->type = cpu_to_be16(FDMI_PORT_OS_DEVICE_NAME);
2434 alen = strlen(QLA2XXX_DRIVER_NAME);
2435 snprintf(eiter->a.os_dev_name, sizeof(eiter->a.os_dev_name),
2436 "%s:host%lu", QLA2XXX_DRIVER_NAME, vha->host_no);
2437 alen += 4 - (alen & 3);
2438 eiter->len = cpu_to_be16(4 + alen);
2439 size += 4 + alen;
2441 ql_dbg(ql_dbg_disc, vha, 0x20be,
2442 "OS_Device_Name = %s.\n", eiter->a.os_dev_name);
2444 /* Hostname. */
2445 eiter = entries + size;
2446 eiter->type = cpu_to_be16(FDMI_PORT_HOST_NAME);
2447 p_sysid = utsname();
2448 if (p_sysid) {
2449 snprintf(eiter->a.host_name, sizeof(eiter->a.host_name),
2450 "%s", p_sysid->nodename);
2451 } else {
2452 snprintf(eiter->a.host_name, sizeof(eiter->a.host_name),
2453 "%s", fc_host_system_hostname(vha->host));
2455 alen = strlen(eiter->a.host_name);
2456 alen += 4 - (alen & 3);
2457 eiter->len = cpu_to_be16(4 + alen);
2458 size += 4 + alen;
2460 ql_dbg(ql_dbg_disc, vha, 0x201a,
2461 "HostName=%s.\n", eiter->a.host_name);
2463 /* Node Name */
2464 eiter = entries + size;
2465 eiter->type = cpu_to_be16(FDMI_PORT_NODE_NAME);
2466 memcpy(eiter->a.node_name, vha->node_name, WWN_SIZE);
2467 eiter->len = cpu_to_be16(4 + WWN_SIZE);
2468 size += 4 + WWN_SIZE;
2470 ql_dbg(ql_dbg_disc, vha, 0x20c0,
2471 "Node Name = %016llx.\n", wwn_to_u64(eiter->a.node_name));
2473 /* Port Name */
2474 eiter = entries + size;
2475 eiter->type = cpu_to_be16(FDMI_PORT_NAME);
2476 memcpy(eiter->a.port_name, vha->port_name, WWN_SIZE);
2477 eiter->len = cpu_to_be16(4 + WWN_SIZE);
2478 size += 4 + WWN_SIZE;
2480 ql_dbg(ql_dbg_disc, vha, 0x20c1,
2481 "Port Name = %016llx.\n", wwn_to_u64(eiter->a.port_name));
2483 /* Port Symbolic Name */
2484 eiter = entries + size;
2485 eiter->type = cpu_to_be16(FDMI_PORT_SYM_NAME);
2486 qla2x00_get_sym_node_name(vha, eiter->a.port_sym_name,
2487 sizeof(eiter->a.port_sym_name));
2488 alen = strlen(eiter->a.port_sym_name);
2489 alen += 4 - (alen & 3);
2490 eiter->len = cpu_to_be16(4 + alen);
2491 size += 4 + alen;
2493 ql_dbg(ql_dbg_disc, vha, 0x20c2,
2494 "port symbolic name = %s\n", eiter->a.port_sym_name);
2496 /* Port Type */
2497 eiter = entries + size;
2498 eiter->type = cpu_to_be16(FDMI_PORT_TYPE);
2499 eiter->a.port_type = cpu_to_be32(NS_NX_PORT_TYPE);
2500 eiter->len = cpu_to_be16(4 + 4);
2501 size += 4 + 4;
2503 ql_dbg(ql_dbg_disc, vha, 0x20c3,
2504 "Port Type = %x.\n", eiter->a.port_type);
2506 /* Class of Service */
2507 eiter = entries + size;
2508 eiter->type = cpu_to_be16(FDMI_PORT_SUPP_COS);
2509 eiter->a.port_supported_cos = cpu_to_be32(FC_CLASS_3);
2510 eiter->len = cpu_to_be16(4 + 4);
2511 size += 4 + 4;
2513 ql_dbg(ql_dbg_disc, vha, 0x20c4,
2514 "Supported COS = %08x\n", eiter->a.port_supported_cos);
2516 /* Port Fabric Name */
2517 eiter = entries + size;
2518 eiter->type = cpu_to_be16(FDMI_PORT_FABRIC_NAME);
2519 memcpy(eiter->a.fabric_name, vha->fabric_node_name, WWN_SIZE);
2520 eiter->len = cpu_to_be16(4 + WWN_SIZE);
2521 size += 4 + WWN_SIZE;
2523 ql_dbg(ql_dbg_disc, vha, 0x20c5,
2524 "Fabric Name = %016llx.\n", wwn_to_u64(eiter->a.fabric_name));
2526 /* FC4_type */
2527 eiter = entries + size;
2528 eiter->type = cpu_to_be16(FDMI_PORT_FC4_TYPE);
2529 eiter->a.port_fc4_type[0] = 0;
2530 eiter->a.port_fc4_type[1] = 0;
2531 eiter->a.port_fc4_type[2] = 1;
2532 eiter->a.port_fc4_type[3] = 0;
2533 eiter->len = cpu_to_be16(4 + 32);
2534 size += 4 + 32;
2536 ql_dbg(ql_dbg_disc, vha, 0x20c6,
2537 "Port Active FC4 Type = %02x %02x.\n",
2538 eiter->a.port_fc4_type[2], eiter->a.port_fc4_type[1]);
2540 if (vha->flags.nvme_enabled) {
2541 eiter->a.port_fc4_type[4] = 0;
2542 eiter->a.port_fc4_type[5] = 0;
2543 eiter->a.port_fc4_type[6] = 1; /* NVMe type 28h */
2544 ql_dbg(ql_dbg_disc, vha, 0x2120,
2545 "NVME Port Active FC4 Type = %02x 0x0 0x0 0x0 0x0 0x0.\n",
2546 eiter->a.port_fc4_type[6]);
2549 /* Port State */
2550 eiter = entries + size;
2551 eiter->type = cpu_to_be16(FDMI_PORT_STATE);
2552 eiter->a.port_state = cpu_to_be32(1);
2553 eiter->len = cpu_to_be16(4 + 4);
2554 size += 4 + 4;
2556 ql_dbg(ql_dbg_disc, vha, 0x20c7,
2557 "Port State = %x.\n", eiter->a.port_state);
2559 /* Number of Ports */
2560 eiter = entries + size;
2561 eiter->type = cpu_to_be16(FDMI_PORT_COUNT);
2562 eiter->a.num_ports = cpu_to_be32(1);
2563 eiter->len = cpu_to_be16(4 + 4);
2564 size += 4 + 4;
2566 ql_dbg(ql_dbg_disc, vha, 0x20c8,
2567 "Number of ports = %x.\n", eiter->a.num_ports);
2569 /* Port Id */
2570 eiter = entries + size;
2571 eiter->type = cpu_to_be16(FDMI_PORT_ID);
2572 eiter->a.port_id = cpu_to_be32(vha->d_id.b24);
2573 eiter->len = cpu_to_be16(4 + 4);
2574 size += 4 + 4;
2576 ql_dbg(ql_dbg_disc, vha, 0x201c,
2577 "Port Id = %x.\n", eiter->a.port_id);
2579 /* Update MS request size. */
2580 qla2x00_update_ms_fdmi_iocb(vha, size + 16);
2582 ql_dbg(ql_dbg_disc, vha, 0x2018,
2583 "RPA portname= %8phN size=%d.\n", ct_req->req.rpa.port_name, size);
2584 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x20ca,
2585 entries, size);
2587 /* Execute MS IOCB */
2588 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
2589 sizeof(ms_iocb_entry_t));
2590 if (rval != QLA_SUCCESS) {
2591 /*EMPTY*/
2592 ql_dbg(ql_dbg_disc, vha, 0x20cb,
2593 "RPA FDMI v2 issue IOCB failed (%d).\n", rval);
2594 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RPA") !=
2595 QLA_SUCCESS) {
2596 rval = QLA_FUNCTION_FAILED;
2597 if (ct_rsp->header.reason_code == CT_REASON_CANNOT_PERFORM &&
2598 ct_rsp->header.explanation_code ==
2599 CT_EXPL_ALREADY_REGISTERED) {
2600 ql_dbg(ql_dbg_disc, vha, 0x20ce,
2601 "RPA FDMI v2 already registered\n");
2602 rval = QLA_ALREADY_REGISTERED;
2603 } else {
2604 ql_dbg(ql_dbg_disc, vha, 0x2020,
2605 "RPA FDMI v2 failed, CT Reason code: 0x%x, CT Explanation 0x%x\n",
2606 ct_rsp->header.reason_code,
2607 ct_rsp->header.explanation_code);
2609 } else {
2610 ql_dbg(ql_dbg_disc, vha, 0x20cc,
2611 "RPA FDMI V2 exiting normally.\n");
2614 return rval;
2618 * qla2x00_fdmi_register() -
2619 * @vha: HA context
2621 * Returns 0 on success.
2624 qla2x00_fdmi_register(scsi_qla_host_t *vha)
2626 int rval = QLA_FUNCTION_FAILED;
2627 struct qla_hw_data *ha = vha->hw;
2629 if (IS_QLA2100(ha) || IS_QLA2200(ha) ||
2630 IS_QLAFX00(ha))
2631 return QLA_FUNCTION_FAILED;
2633 rval = qla2x00_mgmt_svr_login(vha);
2634 if (rval)
2635 return rval;
2637 rval = qla2x00_fdmiv2_rhba(vha);
2638 if (rval) {
2639 if (rval != QLA_ALREADY_REGISTERED)
2640 goto try_fdmi;
2642 rval = qla2x00_fdmi_dhba(vha);
2643 if (rval)
2644 goto try_fdmi;
2646 rval = qla2x00_fdmiv2_rhba(vha);
2647 if (rval)
2648 goto try_fdmi;
2650 rval = qla2x00_fdmiv2_rpa(vha);
2651 if (rval)
2652 goto try_fdmi;
2654 goto out;
2656 try_fdmi:
2657 rval = qla2x00_fdmi_rhba(vha);
2658 if (rval) {
2659 if (rval != QLA_ALREADY_REGISTERED)
2660 return rval;
2662 rval = qla2x00_fdmi_dhba(vha);
2663 if (rval)
2664 return rval;
2666 rval = qla2x00_fdmi_rhba(vha);
2667 if (rval)
2668 return rval;
2670 rval = qla2x00_fdmi_rpa(vha);
2671 out:
2672 return rval;
2676 * qla2x00_gfpn_id() - SNS Get Fabric Port Name (GFPN_ID) query.
2677 * @vha: HA context
2678 * @list: switch info entries to populate
2680 * Returns 0 on success.
2683 qla2x00_gfpn_id(scsi_qla_host_t *vha, sw_info_t *list)
2685 int rval = QLA_SUCCESS;
2686 uint16_t i;
2687 struct qla_hw_data *ha = vha->hw;
2688 ms_iocb_entry_t *ms_pkt;
2689 struct ct_sns_req *ct_req;
2690 struct ct_sns_rsp *ct_rsp;
2691 struct ct_arg arg;
2693 if (!IS_IIDMA_CAPABLE(ha))
2694 return QLA_FUNCTION_FAILED;
2696 arg.iocb = ha->ms_iocb;
2697 arg.req_dma = ha->ct_sns_dma;
2698 arg.rsp_dma = ha->ct_sns_dma;
2699 arg.req_size = GFPN_ID_REQ_SIZE;
2700 arg.rsp_size = GFPN_ID_RSP_SIZE;
2701 arg.nport_handle = NPH_SNS;
2703 for (i = 0; i < ha->max_fibre_devices; i++) {
2704 /* Issue GFPN_ID */
2705 /* Prepare common MS IOCB */
2706 ms_pkt = ha->isp_ops->prep_ms_iocb(vha, &arg);
2708 /* Prepare CT request */
2709 ct_req = qla2x00_prep_ct_req(ha->ct_sns, GFPN_ID_CMD,
2710 GFPN_ID_RSP_SIZE);
2711 ct_rsp = &ha->ct_sns->p.rsp;
2713 /* Prepare CT arguments -- port_id */
2714 ct_req->req.port_id.port_id = port_id_to_be_id(list[i].d_id);
2716 /* Execute MS IOCB */
2717 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
2718 sizeof(ms_iocb_entry_t));
2719 if (rval != QLA_SUCCESS) {
2720 /*EMPTY*/
2721 ql_dbg(ql_dbg_disc, vha, 0x2023,
2722 "GFPN_ID issue IOCB failed (%d).\n", rval);
2723 break;
2724 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp,
2725 "GFPN_ID") != QLA_SUCCESS) {
2726 rval = QLA_FUNCTION_FAILED;
2727 break;
2728 } else {
2729 /* Save fabric portname */
2730 memcpy(list[i].fabric_port_name,
2731 ct_rsp->rsp.gfpn_id.port_name, WWN_SIZE);
2734 /* Last device exit. */
2735 if (list[i].d_id.b.rsvd_1 != 0)
2736 break;
2739 return (rval);
2743 static inline struct ct_sns_req *
2744 qla24xx_prep_ct_fm_req(struct ct_sns_pkt *p, uint16_t cmd,
2745 uint16_t rsp_size)
2747 memset(p, 0, sizeof(struct ct_sns_pkt));
2749 p->p.req.header.revision = 0x01;
2750 p->p.req.header.gs_type = 0xFA;
2751 p->p.req.header.gs_subtype = 0x01;
2752 p->p.req.command = cpu_to_be16(cmd);
2753 p->p.req.max_rsp_size = cpu_to_be16((rsp_size - 16) / 4);
2755 return &p->p.req;
2758 static uint16_t
2759 qla2x00_port_speed_capability(uint16_t speed)
2761 switch (speed) {
2762 case BIT_15:
2763 return PORT_SPEED_1GB;
2764 case BIT_14:
2765 return PORT_SPEED_2GB;
2766 case BIT_13:
2767 return PORT_SPEED_4GB;
2768 case BIT_12:
2769 return PORT_SPEED_10GB;
2770 case BIT_11:
2771 return PORT_SPEED_8GB;
2772 case BIT_10:
2773 return PORT_SPEED_16GB;
2774 case BIT_8:
2775 return PORT_SPEED_32GB;
2776 case BIT_7:
2777 return PORT_SPEED_64GB;
2778 default:
2779 return PORT_SPEED_UNKNOWN;
2784 * qla2x00_gpsc() - FCS Get Port Speed Capabilities (GPSC) query.
2785 * @vha: HA context
2786 * @list: switch info entries to populate
2788 * Returns 0 on success.
2791 qla2x00_gpsc(scsi_qla_host_t *vha, sw_info_t *list)
2793 int rval;
2794 uint16_t i;
2795 struct qla_hw_data *ha = vha->hw;
2796 ms_iocb_entry_t *ms_pkt;
2797 struct ct_sns_req *ct_req;
2798 struct ct_sns_rsp *ct_rsp;
2799 struct ct_arg arg;
2801 if (!IS_IIDMA_CAPABLE(ha))
2802 return QLA_FUNCTION_FAILED;
2803 if (!ha->flags.gpsc_supported)
2804 return QLA_FUNCTION_FAILED;
2806 rval = qla2x00_mgmt_svr_login(vha);
2807 if (rval)
2808 return rval;
2810 arg.iocb = ha->ms_iocb;
2811 arg.req_dma = ha->ct_sns_dma;
2812 arg.rsp_dma = ha->ct_sns_dma;
2813 arg.req_size = GPSC_REQ_SIZE;
2814 arg.rsp_size = GPSC_RSP_SIZE;
2815 arg.nport_handle = vha->mgmt_svr_loop_id;
2817 for (i = 0; i < ha->max_fibre_devices; i++) {
2818 /* Issue GFPN_ID */
2819 /* Prepare common MS IOCB */
2820 ms_pkt = qla24xx_prep_ms_iocb(vha, &arg);
2822 /* Prepare CT request */
2823 ct_req = qla24xx_prep_ct_fm_req(ha->ct_sns, GPSC_CMD,
2824 GPSC_RSP_SIZE);
2825 ct_rsp = &ha->ct_sns->p.rsp;
2827 /* Prepare CT arguments -- port_name */
2828 memcpy(ct_req->req.gpsc.port_name, list[i].fabric_port_name,
2829 WWN_SIZE);
2831 /* Execute MS IOCB */
2832 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
2833 sizeof(ms_iocb_entry_t));
2834 if (rval != QLA_SUCCESS) {
2835 /*EMPTY*/
2836 ql_dbg(ql_dbg_disc, vha, 0x2059,
2837 "GPSC issue IOCB failed (%d).\n", rval);
2838 } else if ((rval = qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp,
2839 "GPSC")) != QLA_SUCCESS) {
2840 /* FM command unsupported? */
2841 if (rval == QLA_INVALID_COMMAND &&
2842 (ct_rsp->header.reason_code ==
2843 CT_REASON_INVALID_COMMAND_CODE ||
2844 ct_rsp->header.reason_code ==
2845 CT_REASON_COMMAND_UNSUPPORTED)) {
2846 ql_dbg(ql_dbg_disc, vha, 0x205a,
2847 "GPSC command unsupported, disabling "
2848 "query.\n");
2849 ha->flags.gpsc_supported = 0;
2850 rval = QLA_FUNCTION_FAILED;
2851 break;
2853 rval = QLA_FUNCTION_FAILED;
2854 } else {
2855 list->fp_speed = qla2x00_port_speed_capability(
2856 be16_to_cpu(ct_rsp->rsp.gpsc.speed));
2857 ql_dbg(ql_dbg_disc, vha, 0x205b,
2858 "GPSC ext entry - fpn "
2859 "%8phN speeds=%04x speed=%04x.\n",
2860 list[i].fabric_port_name,
2861 be16_to_cpu(ct_rsp->rsp.gpsc.speeds),
2862 be16_to_cpu(ct_rsp->rsp.gpsc.speed));
2865 /* Last device exit. */
2866 if (list[i].d_id.b.rsvd_1 != 0)
2867 break;
2870 return (rval);
2874 * qla2x00_gff_id() - SNS Get FC-4 Features (GFF_ID) query.
2876 * @vha: HA context
2877 * @list: switch info entries to populate
2880 void
2881 qla2x00_gff_id(scsi_qla_host_t *vha, sw_info_t *list)
2883 int rval;
2884 uint16_t i;
2886 ms_iocb_entry_t *ms_pkt;
2887 struct ct_sns_req *ct_req;
2888 struct ct_sns_rsp *ct_rsp;
2889 struct qla_hw_data *ha = vha->hw;
2890 uint8_t fcp_scsi_features = 0, nvme_features = 0;
2891 struct ct_arg arg;
2893 for (i = 0; i < ha->max_fibre_devices; i++) {
2894 /* Set default FC4 Type as UNKNOWN so the default is to
2895 * Process this port */
2896 list[i].fc4_type = FC4_TYPE_UNKNOWN;
2898 /* Do not attempt GFF_ID if we are not FWI_2 capable */
2899 if (!IS_FWI2_CAPABLE(ha))
2900 continue;
2902 arg.iocb = ha->ms_iocb;
2903 arg.req_dma = ha->ct_sns_dma;
2904 arg.rsp_dma = ha->ct_sns_dma;
2905 arg.req_size = GFF_ID_REQ_SIZE;
2906 arg.rsp_size = GFF_ID_RSP_SIZE;
2907 arg.nport_handle = NPH_SNS;
2909 /* Prepare common MS IOCB */
2910 ms_pkt = ha->isp_ops->prep_ms_iocb(vha, &arg);
2912 /* Prepare CT request */
2913 ct_req = qla2x00_prep_ct_req(ha->ct_sns, GFF_ID_CMD,
2914 GFF_ID_RSP_SIZE);
2915 ct_rsp = &ha->ct_sns->p.rsp;
2917 /* Prepare CT arguments -- port_id */
2918 ct_req->req.port_id.port_id = port_id_to_be_id(list[i].d_id);
2920 /* Execute MS IOCB */
2921 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
2922 sizeof(ms_iocb_entry_t));
2924 if (rval != QLA_SUCCESS) {
2925 ql_dbg(ql_dbg_disc, vha, 0x205c,
2926 "GFF_ID issue IOCB failed (%d).\n", rval);
2927 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp,
2928 "GFF_ID") != QLA_SUCCESS) {
2929 ql_dbg(ql_dbg_disc, vha, 0x205d,
2930 "GFF_ID IOCB status had a failure status code.\n");
2931 } else {
2932 fcp_scsi_features =
2933 ct_rsp->rsp.gff_id.fc4_features[GFF_FCP_SCSI_OFFSET];
2934 fcp_scsi_features &= 0x0f;
2936 if (fcp_scsi_features) {
2937 list[i].fc4_type = FS_FC4TYPE_FCP;
2938 list[i].fc4_features = fcp_scsi_features;
2941 nvme_features =
2942 ct_rsp->rsp.gff_id.fc4_features[GFF_NVME_OFFSET];
2943 nvme_features &= 0xf;
2945 if (nvme_features) {
2946 list[i].fc4_type |= FS_FC4TYPE_NVME;
2947 list[i].fc4_features = nvme_features;
2951 /* Last device exit. */
2952 if (list[i].d_id.b.rsvd_1 != 0)
2953 break;
2957 int qla24xx_post_gpsc_work(struct scsi_qla_host *vha, fc_port_t *fcport)
2959 struct qla_work_evt *e;
2961 e = qla2x00_alloc_work(vha, QLA_EVT_GPSC);
2962 if (!e)
2963 return QLA_FUNCTION_FAILED;
2965 e->u.fcport.fcport = fcport;
2966 fcport->flags |= FCF_ASYNC_ACTIVE;
2967 return qla2x00_post_work(vha, e);
2970 void qla24xx_handle_gpsc_event(scsi_qla_host_t *vha, struct event_arg *ea)
2972 struct fc_port *fcport = ea->fcport;
2974 ql_dbg(ql_dbg_disc, vha, 0x20d8,
2975 "%s %8phC DS %d LS %d rc %d login %d|%d rscn %d|%d lid %d\n",
2976 __func__, fcport->port_name, fcport->disc_state,
2977 fcport->fw_login_state, ea->rc, ea->sp->gen2, fcport->login_gen,
2978 ea->sp->gen2, fcport->rscn_gen|ea->sp->gen1, fcport->loop_id);
2980 if (fcport->disc_state == DSC_DELETE_PEND)
2981 return;
2983 if (ea->sp->gen2 != fcport->login_gen) {
2984 /* target side must have changed it. */
2985 ql_dbg(ql_dbg_disc, vha, 0x20d3,
2986 "%s %8phC generation changed\n",
2987 __func__, fcport->port_name);
2988 return;
2989 } else if (ea->sp->gen1 != fcport->rscn_gen) {
2990 return;
2993 qla_post_iidma_work(vha, fcport);
2996 static void qla24xx_async_gpsc_sp_done(srb_t *sp, int res)
2998 struct scsi_qla_host *vha = sp->vha;
2999 struct qla_hw_data *ha = vha->hw;
3000 fc_port_t *fcport = sp->fcport;
3001 struct ct_sns_rsp *ct_rsp;
3002 struct event_arg ea;
3004 ct_rsp = &fcport->ct_desc.ct_sns->p.rsp;
3006 ql_dbg(ql_dbg_disc, vha, 0x2053,
3007 "Async done-%s res %x, WWPN %8phC \n",
3008 sp->name, res, fcport->port_name);
3010 fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
3012 if (res == QLA_FUNCTION_TIMEOUT)
3013 goto done;
3015 if (res == (DID_ERROR << 16)) {
3016 /* entry status error */
3017 goto done;
3018 } else if (res) {
3019 if ((ct_rsp->header.reason_code ==
3020 CT_REASON_INVALID_COMMAND_CODE) ||
3021 (ct_rsp->header.reason_code ==
3022 CT_REASON_COMMAND_UNSUPPORTED)) {
3023 ql_dbg(ql_dbg_disc, vha, 0x2019,
3024 "GPSC command unsupported, disabling query.\n");
3025 ha->flags.gpsc_supported = 0;
3026 goto done;
3028 } else {
3029 fcport->fp_speed = qla2x00_port_speed_capability(
3030 be16_to_cpu(ct_rsp->rsp.gpsc.speed));
3032 ql_dbg(ql_dbg_disc, vha, 0x2054,
3033 "Async-%s OUT WWPN %8phC speeds=%04x speed=%04x.\n",
3034 sp->name, fcport->fabric_port_name,
3035 be16_to_cpu(ct_rsp->rsp.gpsc.speeds),
3036 be16_to_cpu(ct_rsp->rsp.gpsc.speed));
3038 memset(&ea, 0, sizeof(ea));
3039 ea.rc = res;
3040 ea.fcport = fcport;
3041 ea.sp = sp;
3042 qla24xx_handle_gpsc_event(vha, &ea);
3044 done:
3045 sp->free(sp);
3048 int qla24xx_async_gpsc(scsi_qla_host_t *vha, fc_port_t *fcport)
3050 int rval = QLA_FUNCTION_FAILED;
3051 struct ct_sns_req *ct_req;
3052 srb_t *sp;
3054 if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT))
3055 return rval;
3057 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
3058 if (!sp)
3059 goto done;
3061 sp->type = SRB_CT_PTHRU_CMD;
3062 sp->name = "gpsc";
3063 sp->gen1 = fcport->rscn_gen;
3064 sp->gen2 = fcport->login_gen;
3066 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
3068 /* CT_IU preamble */
3069 ct_req = qla24xx_prep_ct_fm_req(fcport->ct_desc.ct_sns, GPSC_CMD,
3070 GPSC_RSP_SIZE);
3072 /* GPSC req */
3073 memcpy(ct_req->req.gpsc.port_name, fcport->fabric_port_name,
3074 WWN_SIZE);
3076 sp->u.iocb_cmd.u.ctarg.req = fcport->ct_desc.ct_sns;
3077 sp->u.iocb_cmd.u.ctarg.req_dma = fcport->ct_desc.ct_sns_dma;
3078 sp->u.iocb_cmd.u.ctarg.rsp = fcport->ct_desc.ct_sns;
3079 sp->u.iocb_cmd.u.ctarg.rsp_dma = fcport->ct_desc.ct_sns_dma;
3080 sp->u.iocb_cmd.u.ctarg.req_size = GPSC_REQ_SIZE;
3081 sp->u.iocb_cmd.u.ctarg.rsp_size = GPSC_RSP_SIZE;
3082 sp->u.iocb_cmd.u.ctarg.nport_handle = vha->mgmt_svr_loop_id;
3084 sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
3085 sp->done = qla24xx_async_gpsc_sp_done;
3087 ql_dbg(ql_dbg_disc, vha, 0x205e,
3088 "Async-%s %8phC hdl=%x loopid=%x portid=%02x%02x%02x.\n",
3089 sp->name, fcport->port_name, sp->handle,
3090 fcport->loop_id, fcport->d_id.b.domain,
3091 fcport->d_id.b.area, fcport->d_id.b.al_pa);
3093 rval = qla2x00_start_sp(sp);
3094 if (rval != QLA_SUCCESS)
3095 goto done_free_sp;
3096 return rval;
3098 done_free_sp:
3099 sp->free(sp);
3100 fcport->flags &= ~FCF_ASYNC_SENT;
3101 done:
3102 fcport->flags &= ~FCF_ASYNC_ACTIVE;
3103 return rval;
3106 int qla24xx_post_gpnid_work(struct scsi_qla_host *vha, port_id_t *id)
3108 struct qla_work_evt *e;
3110 if (test_bit(UNLOADING, &vha->dpc_flags) ||
3111 (vha->vp_idx && test_bit(VPORT_DELETE, &vha->dpc_flags)))
3112 return 0;
3114 e = qla2x00_alloc_work(vha, QLA_EVT_GPNID);
3115 if (!e)
3116 return QLA_FUNCTION_FAILED;
3118 e->u.gpnid.id = *id;
3119 return qla2x00_post_work(vha, e);
3122 void qla24xx_sp_unmap(scsi_qla_host_t *vha, srb_t *sp)
3124 struct srb_iocb *c = &sp->u.iocb_cmd;
3126 switch (sp->type) {
3127 case SRB_ELS_DCMD:
3128 qla2x00_els_dcmd2_free(vha, &c->u.els_plogi);
3129 break;
3130 case SRB_CT_PTHRU_CMD:
3131 default:
3132 if (sp->u.iocb_cmd.u.ctarg.req) {
3133 dma_free_coherent(&vha->hw->pdev->dev,
3134 sp->u.iocb_cmd.u.ctarg.req_allocated_size,
3135 sp->u.iocb_cmd.u.ctarg.req,
3136 sp->u.iocb_cmd.u.ctarg.req_dma);
3137 sp->u.iocb_cmd.u.ctarg.req = NULL;
3140 if (sp->u.iocb_cmd.u.ctarg.rsp) {
3141 dma_free_coherent(&vha->hw->pdev->dev,
3142 sp->u.iocb_cmd.u.ctarg.rsp_allocated_size,
3143 sp->u.iocb_cmd.u.ctarg.rsp,
3144 sp->u.iocb_cmd.u.ctarg.rsp_dma);
3145 sp->u.iocb_cmd.u.ctarg.rsp = NULL;
3147 break;
3150 sp->free(sp);
3153 void qla24xx_handle_gpnid_event(scsi_qla_host_t *vha, struct event_arg *ea)
3155 fc_port_t *fcport, *conflict, *t;
3156 u16 data[2];
3158 ql_dbg(ql_dbg_disc, vha, 0xffff,
3159 "%s %d port_id: %06x\n",
3160 __func__, __LINE__, ea->id.b24);
3162 if (ea->rc) {
3163 /* cable is disconnected */
3164 list_for_each_entry_safe(fcport, t, &vha->vp_fcports, list) {
3165 if (fcport->d_id.b24 == ea->id.b24)
3166 fcport->scan_state = QLA_FCPORT_SCAN;
3168 qlt_schedule_sess_for_deletion(fcport);
3170 } else {
3171 /* cable is connected */
3172 fcport = qla2x00_find_fcport_by_wwpn(vha, ea->port_name, 1);
3173 if (fcport) {
3174 list_for_each_entry_safe(conflict, t, &vha->vp_fcports,
3175 list) {
3176 if ((conflict->d_id.b24 == ea->id.b24) &&
3177 (fcport != conflict))
3179 * 2 fcports with conflict Nport ID or
3180 * an existing fcport is having nport ID
3181 * conflict with new fcport.
3184 conflict->scan_state = QLA_FCPORT_SCAN;
3186 qlt_schedule_sess_for_deletion(conflict);
3189 fcport->scan_needed = 0;
3190 fcport->rscn_gen++;
3191 fcport->scan_state = QLA_FCPORT_FOUND;
3192 fcport->flags |= FCF_FABRIC_DEVICE;
3193 if (fcport->login_retry == 0) {
3194 fcport->login_retry =
3195 vha->hw->login_retry_count;
3196 ql_dbg(ql_dbg_disc, vha, 0xffff,
3197 "Port login retry %8phN, lid 0x%04x cnt=%d.\n",
3198 fcport->port_name, fcport->loop_id,
3199 fcport->login_retry);
3201 switch (fcport->disc_state) {
3202 case DSC_LOGIN_COMPLETE:
3203 /* recheck session is still intact. */
3204 ql_dbg(ql_dbg_disc, vha, 0x210d,
3205 "%s %d %8phC revalidate session with ADISC\n",
3206 __func__, __LINE__, fcport->port_name);
3207 data[0] = data[1] = 0;
3208 qla2x00_post_async_adisc_work(vha, fcport,
3209 data);
3210 break;
3211 case DSC_DELETED:
3212 ql_dbg(ql_dbg_disc, vha, 0x210d,
3213 "%s %d %8phC login\n", __func__, __LINE__,
3214 fcport->port_name);
3215 fcport->d_id = ea->id;
3216 qla24xx_fcport_handle_login(vha, fcport);
3217 break;
3218 case DSC_DELETE_PEND:
3219 fcport->d_id = ea->id;
3220 break;
3221 default:
3222 fcport->d_id = ea->id;
3223 break;
3225 } else {
3226 list_for_each_entry_safe(conflict, t, &vha->vp_fcports,
3227 list) {
3228 if (conflict->d_id.b24 == ea->id.b24) {
3229 /* 2 fcports with conflict Nport ID or
3230 * an existing fcport is having nport ID
3231 * conflict with new fcport.
3233 ql_dbg(ql_dbg_disc, vha, 0xffff,
3234 "%s %d %8phC DS %d\n",
3235 __func__, __LINE__,
3236 conflict->port_name,
3237 conflict->disc_state);
3239 conflict->scan_state = QLA_FCPORT_SCAN;
3240 qlt_schedule_sess_for_deletion(conflict);
3244 /* create new fcport */
3245 ql_dbg(ql_dbg_disc, vha, 0x2065,
3246 "%s %d %8phC post new sess\n",
3247 __func__, __LINE__, ea->port_name);
3248 qla24xx_post_newsess_work(vha, &ea->id,
3249 ea->port_name, NULL, NULL, FC4_TYPE_UNKNOWN);
3254 static void qla2x00_async_gpnid_sp_done(srb_t *sp, int res)
3256 struct scsi_qla_host *vha = sp->vha;
3257 struct ct_sns_req *ct_req =
3258 (struct ct_sns_req *)sp->u.iocb_cmd.u.ctarg.req;
3259 struct ct_sns_rsp *ct_rsp =
3260 (struct ct_sns_rsp *)sp->u.iocb_cmd.u.ctarg.rsp;
3261 struct event_arg ea;
3262 struct qla_work_evt *e;
3263 unsigned long flags;
3265 if (res)
3266 ql_dbg(ql_dbg_disc, vha, 0x2066,
3267 "Async done-%s fail res %x rscn gen %d ID %3phC. %8phC\n",
3268 sp->name, res, sp->gen1, &ct_req->req.port_id.port_id,
3269 ct_rsp->rsp.gpn_id.port_name);
3270 else
3271 ql_dbg(ql_dbg_disc, vha, 0x2066,
3272 "Async done-%s good rscn gen %d ID %3phC. %8phC\n",
3273 sp->name, sp->gen1, &ct_req->req.port_id.port_id,
3274 ct_rsp->rsp.gpn_id.port_name);
3276 memset(&ea, 0, sizeof(ea));
3277 memcpy(ea.port_name, ct_rsp->rsp.gpn_id.port_name, WWN_SIZE);
3278 ea.sp = sp;
3279 ea.id = be_to_port_id(ct_req->req.port_id.port_id);
3280 ea.rc = res;
3282 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
3283 list_del(&sp->elem);
3284 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
3286 if (res) {
3287 if (res == QLA_FUNCTION_TIMEOUT) {
3288 qla24xx_post_gpnid_work(sp->vha, &ea.id);
3289 sp->free(sp);
3290 return;
3292 } else if (sp->gen1) {
3293 /* There was another RSCN for this Nport ID */
3294 qla24xx_post_gpnid_work(sp->vha, &ea.id);
3295 sp->free(sp);
3296 return;
3299 qla24xx_handle_gpnid_event(vha, &ea);
3301 e = qla2x00_alloc_work(vha, QLA_EVT_UNMAP);
3302 if (!e) {
3303 /* please ignore kernel warning. otherwise, we have mem leak. */
3304 dma_free_coherent(&vha->hw->pdev->dev,
3305 sp->u.iocb_cmd.u.ctarg.req_allocated_size,
3306 sp->u.iocb_cmd.u.ctarg.req,
3307 sp->u.iocb_cmd.u.ctarg.req_dma);
3308 sp->u.iocb_cmd.u.ctarg.req = NULL;
3310 dma_free_coherent(&vha->hw->pdev->dev,
3311 sp->u.iocb_cmd.u.ctarg.rsp_allocated_size,
3312 sp->u.iocb_cmd.u.ctarg.rsp,
3313 sp->u.iocb_cmd.u.ctarg.rsp_dma);
3314 sp->u.iocb_cmd.u.ctarg.rsp = NULL;
3316 sp->free(sp);
3317 return;
3320 e->u.iosb.sp = sp;
3321 qla2x00_post_work(vha, e);
3324 /* Get WWPN with Nport ID. */
3325 int qla24xx_async_gpnid(scsi_qla_host_t *vha, port_id_t *id)
3327 int rval = QLA_FUNCTION_FAILED;
3328 struct ct_sns_req *ct_req;
3329 srb_t *sp, *tsp;
3330 struct ct_sns_pkt *ct_sns;
3331 unsigned long flags;
3333 if (!vha->flags.online)
3334 goto done;
3336 sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL);
3337 if (!sp)
3338 goto done;
3340 sp->type = SRB_CT_PTHRU_CMD;
3341 sp->name = "gpnid";
3342 sp->u.iocb_cmd.u.ctarg.id = *id;
3343 sp->gen1 = 0;
3344 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
3346 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
3347 list_for_each_entry(tsp, &vha->gpnid_list, elem) {
3348 if (tsp->u.iocb_cmd.u.ctarg.id.b24 == id->b24) {
3349 tsp->gen1++;
3350 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
3351 sp->free(sp);
3352 goto done;
3355 list_add_tail(&sp->elem, &vha->gpnid_list);
3356 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
3358 sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev,
3359 sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma,
3360 GFP_KERNEL);
3361 sp->u.iocb_cmd.u.ctarg.req_allocated_size = sizeof(struct ct_sns_pkt);
3362 if (!sp->u.iocb_cmd.u.ctarg.req) {
3363 ql_log(ql_log_warn, vha, 0xd041,
3364 "Failed to allocate ct_sns request.\n");
3365 goto done_free_sp;
3368 sp->u.iocb_cmd.u.ctarg.rsp = dma_alloc_coherent(&vha->hw->pdev->dev,
3369 sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.rsp_dma,
3370 GFP_KERNEL);
3371 sp->u.iocb_cmd.u.ctarg.rsp_allocated_size = sizeof(struct ct_sns_pkt);
3372 if (!sp->u.iocb_cmd.u.ctarg.rsp) {
3373 ql_log(ql_log_warn, vha, 0xd042,
3374 "Failed to allocate ct_sns request.\n");
3375 goto done_free_sp;
3378 ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.rsp;
3379 memset(ct_sns, 0, sizeof(*ct_sns));
3381 ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.req;
3382 /* CT_IU preamble */
3383 ct_req = qla2x00_prep_ct_req(ct_sns, GPN_ID_CMD, GPN_ID_RSP_SIZE);
3385 /* GPN_ID req */
3386 ct_req->req.port_id.port_id = port_id_to_be_id(*id);
3388 sp->u.iocb_cmd.u.ctarg.req_size = GPN_ID_REQ_SIZE;
3389 sp->u.iocb_cmd.u.ctarg.rsp_size = GPN_ID_RSP_SIZE;
3390 sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
3392 sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
3393 sp->done = qla2x00_async_gpnid_sp_done;
3395 ql_dbg(ql_dbg_disc, vha, 0x2067,
3396 "Async-%s hdl=%x ID %3phC.\n", sp->name,
3397 sp->handle, &ct_req->req.port_id.port_id);
3399 rval = qla2x00_start_sp(sp);
3400 if (rval != QLA_SUCCESS)
3401 goto done_free_sp;
3403 return rval;
3405 done_free_sp:
3406 spin_lock_irqsave(&vha->hw->vport_slock, flags);
3407 list_del(&sp->elem);
3408 spin_unlock_irqrestore(&vha->hw->vport_slock, flags);
3410 if (sp->u.iocb_cmd.u.ctarg.req) {
3411 dma_free_coherent(&vha->hw->pdev->dev,
3412 sizeof(struct ct_sns_pkt),
3413 sp->u.iocb_cmd.u.ctarg.req,
3414 sp->u.iocb_cmd.u.ctarg.req_dma);
3415 sp->u.iocb_cmd.u.ctarg.req = NULL;
3417 if (sp->u.iocb_cmd.u.ctarg.rsp) {
3418 dma_free_coherent(&vha->hw->pdev->dev,
3419 sizeof(struct ct_sns_pkt),
3420 sp->u.iocb_cmd.u.ctarg.rsp,
3421 sp->u.iocb_cmd.u.ctarg.rsp_dma);
3422 sp->u.iocb_cmd.u.ctarg.rsp = NULL;
3425 sp->free(sp);
3426 done:
3427 return rval;
3430 void qla24xx_handle_gffid_event(scsi_qla_host_t *vha, struct event_arg *ea)
3432 fc_port_t *fcport = ea->fcport;
3434 qla24xx_post_gnl_work(vha, fcport);
3437 void qla24xx_async_gffid_sp_done(srb_t *sp, int res)
3439 struct scsi_qla_host *vha = sp->vha;
3440 fc_port_t *fcport = sp->fcport;
3441 struct ct_sns_rsp *ct_rsp;
3442 struct event_arg ea;
3443 uint8_t fc4_scsi_feat;
3444 uint8_t fc4_nvme_feat;
3446 ql_dbg(ql_dbg_disc, vha, 0x2133,
3447 "Async done-%s res %x ID %x. %8phC\n",
3448 sp->name, res, fcport->d_id.b24, fcport->port_name);
3450 fcport->flags &= ~FCF_ASYNC_SENT;
3451 ct_rsp = &fcport->ct_desc.ct_sns->p.rsp;
3452 fc4_scsi_feat = ct_rsp->rsp.gff_id.fc4_features[GFF_FCP_SCSI_OFFSET];
3453 fc4_nvme_feat = ct_rsp->rsp.gff_id.fc4_features[GFF_NVME_OFFSET];
3456 * FC-GS-7, 5.2.3.12 FC-4 Features - format
3457 * The format of the FC-4 Features object, as defined by the FC-4,
3458 * Shall be an array of 4-bit values, one for each type code value
3460 if (!res) {
3461 if (fc4_scsi_feat & 0xf) {
3462 /* w1 b00:03 */
3463 fcport->fc4_type = FS_FC4TYPE_FCP;
3464 fcport->fc4_features = fc4_scsi_feat & 0xf;
3467 if (fc4_nvme_feat & 0xf) {
3468 /* w5 [00:03]/28h */
3469 fcport->fc4_type |= FS_FC4TYPE_NVME;
3470 fcport->fc4_features = fc4_nvme_feat & 0xf;
3474 memset(&ea, 0, sizeof(ea));
3475 ea.sp = sp;
3476 ea.fcport = sp->fcport;
3477 ea.rc = res;
3479 qla24xx_handle_gffid_event(vha, &ea);
3480 sp->free(sp);
3483 /* Get FC4 Feature with Nport ID. */
3484 int qla24xx_async_gffid(scsi_qla_host_t *vha, fc_port_t *fcport)
3486 int rval = QLA_FUNCTION_FAILED;
3487 struct ct_sns_req *ct_req;
3488 srb_t *sp;
3490 if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT))
3491 return rval;
3493 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
3494 if (!sp)
3495 return rval;
3497 fcport->flags |= FCF_ASYNC_SENT;
3498 sp->type = SRB_CT_PTHRU_CMD;
3499 sp->name = "gffid";
3500 sp->gen1 = fcport->rscn_gen;
3501 sp->gen2 = fcport->login_gen;
3503 sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
3504 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
3506 /* CT_IU preamble */
3507 ct_req = qla2x00_prep_ct_req(fcport->ct_desc.ct_sns, GFF_ID_CMD,
3508 GFF_ID_RSP_SIZE);
3510 ct_req->req.gff_id.port_id[0] = fcport->d_id.b.domain;
3511 ct_req->req.gff_id.port_id[1] = fcport->d_id.b.area;
3512 ct_req->req.gff_id.port_id[2] = fcport->d_id.b.al_pa;
3514 sp->u.iocb_cmd.u.ctarg.req = fcport->ct_desc.ct_sns;
3515 sp->u.iocb_cmd.u.ctarg.req_dma = fcport->ct_desc.ct_sns_dma;
3516 sp->u.iocb_cmd.u.ctarg.rsp = fcport->ct_desc.ct_sns;
3517 sp->u.iocb_cmd.u.ctarg.rsp_dma = fcport->ct_desc.ct_sns_dma;
3518 sp->u.iocb_cmd.u.ctarg.req_size = GFF_ID_REQ_SIZE;
3519 sp->u.iocb_cmd.u.ctarg.rsp_size = GFF_ID_RSP_SIZE;
3520 sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
3522 sp->done = qla24xx_async_gffid_sp_done;
3524 ql_dbg(ql_dbg_disc, vha, 0x2132,
3525 "Async-%s hdl=%x %8phC.\n", sp->name,
3526 sp->handle, fcport->port_name);
3528 rval = qla2x00_start_sp(sp);
3529 if (rval != QLA_SUCCESS)
3530 goto done_free_sp;
3532 return rval;
3533 done_free_sp:
3534 sp->free(sp);
3535 fcport->flags &= ~FCF_ASYNC_SENT;
3536 return rval;
3539 /* GPN_FT + GNN_FT*/
3540 static int qla2x00_is_a_vp(scsi_qla_host_t *vha, u64 wwn)
3542 struct qla_hw_data *ha = vha->hw;
3543 scsi_qla_host_t *vp;
3544 unsigned long flags;
3545 u64 twwn;
3546 int rc = 0;
3548 if (!ha->num_vhosts)
3549 return 0;
3551 spin_lock_irqsave(&ha->vport_slock, flags);
3552 list_for_each_entry(vp, &ha->vp_list, list) {
3553 twwn = wwn_to_u64(vp->port_name);
3554 if (wwn == twwn) {
3555 rc = 1;
3556 break;
3559 spin_unlock_irqrestore(&ha->vport_slock, flags);
3561 return rc;
3564 void qla24xx_async_gnnft_done(scsi_qla_host_t *vha, srb_t *sp)
3566 fc_port_t *fcport;
3567 u32 i, rc;
3568 bool found;
3569 struct fab_scan_rp *rp, *trp;
3570 unsigned long flags;
3571 u8 recheck = 0;
3572 u16 dup = 0, dup_cnt = 0;
3574 ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0xffff,
3575 "%s enter\n", __func__);
3577 if (sp->gen1 != vha->hw->base_qpair->chip_reset) {
3578 ql_dbg(ql_dbg_disc, vha, 0xffff,
3579 "%s scan stop due to chip reset %x/%x\n",
3580 sp->name, sp->gen1, vha->hw->base_qpair->chip_reset);
3581 goto out;
3584 rc = sp->rc;
3585 if (rc) {
3586 vha->scan.scan_retry++;
3587 if (vha->scan.scan_retry < MAX_SCAN_RETRIES) {
3588 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
3589 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
3590 goto out;
3591 } else {
3592 ql_dbg(ql_dbg_disc, vha, 0xffff,
3593 "%s: Fabric scan failed for %d retries.\n",
3594 __func__, vha->scan.scan_retry);
3596 * Unable to scan any rports. logout loop below
3597 * will unregister all sessions.
3599 list_for_each_entry(fcport, &vha->vp_fcports, list) {
3600 if ((fcport->flags & FCF_FABRIC_DEVICE) != 0) {
3601 fcport->scan_state = QLA_FCPORT_SCAN;
3602 fcport->logout_on_delete = 0;
3605 goto login_logout;
3608 vha->scan.scan_retry = 0;
3610 list_for_each_entry(fcport, &vha->vp_fcports, list)
3611 fcport->scan_state = QLA_FCPORT_SCAN;
3613 for (i = 0; i < vha->hw->max_fibre_devices; i++) {
3614 u64 wwn;
3615 int k;
3617 rp = &vha->scan.l[i];
3618 found = false;
3620 wwn = wwn_to_u64(rp->port_name);
3621 if (wwn == 0)
3622 continue;
3624 /* Remove duplicate NPORT ID entries from switch data base */
3625 for (k = i + 1; k < vha->hw->max_fibre_devices; k++) {
3626 trp = &vha->scan.l[k];
3627 if (rp->id.b24 == trp->id.b24) {
3628 dup = 1;
3629 dup_cnt++;
3630 ql_dbg(ql_dbg_disc + ql_dbg_verbose,
3631 vha, 0xffff,
3632 "Detected duplicate NPORT ID from switch data base: ID %06x WWN %8phN WWN %8phN\n",
3633 rp->id.b24, rp->port_name, trp->port_name);
3634 memset(trp, 0, sizeof(*trp));
3638 if (!memcmp(rp->port_name, vha->port_name, WWN_SIZE))
3639 continue;
3641 /* Bypass reserved domain fields. */
3642 if ((rp->id.b.domain & 0xf0) == 0xf0)
3643 continue;
3645 /* Bypass virtual ports of the same host. */
3646 if (qla2x00_is_a_vp(vha, wwn))
3647 continue;
3649 list_for_each_entry(fcport, &vha->vp_fcports, list) {
3650 if (memcmp(rp->port_name, fcport->port_name, WWN_SIZE))
3651 continue;
3652 fcport->scan_state = QLA_FCPORT_FOUND;
3653 found = true;
3655 * If device was not a fabric device before.
3657 if ((fcport->flags & FCF_FABRIC_DEVICE) == 0) {
3658 qla2x00_clear_loop_id(fcport);
3659 fcport->flags |= FCF_FABRIC_DEVICE;
3660 } else if (fcport->d_id.b24 != rp->id.b24 ||
3661 fcport->scan_needed) {
3662 qlt_schedule_sess_for_deletion(fcport);
3664 fcport->d_id.b24 = rp->id.b24;
3665 fcport->scan_needed = 0;
3666 break;
3669 if (!found) {
3670 ql_dbg(ql_dbg_disc, vha, 0xffff,
3671 "%s %d %8phC post new sess\n",
3672 __func__, __LINE__, rp->port_name);
3673 qla24xx_post_newsess_work(vha, &rp->id, rp->port_name,
3674 rp->node_name, NULL, rp->fc4type);
3678 if (dup) {
3679 ql_log(ql_log_warn, vha, 0xffff,
3680 "Detected %d duplicate NPORT ID(s) from switch data base\n",
3681 dup_cnt);
3684 login_logout:
3686 * Logout all previous fabric dev marked lost, except FCP2 devices.
3688 list_for_each_entry(fcport, &vha->vp_fcports, list) {
3689 if ((fcport->flags & FCF_FABRIC_DEVICE) == 0) {
3690 fcport->scan_needed = 0;
3691 continue;
3694 if (fcport->scan_state != QLA_FCPORT_FOUND) {
3695 fcport->scan_needed = 0;
3696 if ((qla_dual_mode_enabled(vha) ||
3697 qla_ini_mode_enabled(vha)) &&
3698 atomic_read(&fcport->state) == FCS_ONLINE) {
3699 if (fcport->loop_id != FC_NO_LOOP_ID) {
3700 if (fcport->flags & FCF_FCP2_DEVICE)
3701 fcport->logout_on_delete = 0;
3703 ql_dbg(ql_dbg_disc, vha, 0x20f0,
3704 "%s %d %8phC post del sess\n",
3705 __func__, __LINE__,
3706 fcport->port_name);
3708 qlt_schedule_sess_for_deletion(fcport);
3709 continue;
3712 } else {
3713 if (fcport->scan_needed ||
3714 fcport->disc_state != DSC_LOGIN_COMPLETE) {
3715 if (fcport->login_retry == 0) {
3716 fcport->login_retry =
3717 vha->hw->login_retry_count;
3718 ql_dbg(ql_dbg_disc, vha, 0x20a3,
3719 "Port login retry %8phN, lid 0x%04x retry cnt=%d.\n",
3720 fcport->port_name, fcport->loop_id,
3721 fcport->login_retry);
3723 fcport->scan_needed = 0;
3724 qla24xx_fcport_handle_login(vha, fcport);
3729 recheck = 1;
3730 out:
3731 qla24xx_sp_unmap(vha, sp);
3732 spin_lock_irqsave(&vha->work_lock, flags);
3733 vha->scan.scan_flags &= ~SF_SCANNING;
3734 spin_unlock_irqrestore(&vha->work_lock, flags);
3736 if (recheck) {
3737 list_for_each_entry(fcport, &vha->vp_fcports, list) {
3738 if (fcport->scan_needed) {
3739 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
3740 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
3741 break;
3747 static int qla2x00_post_gnnft_gpnft_done_work(struct scsi_qla_host *vha,
3748 srb_t *sp, int cmd)
3750 struct qla_work_evt *e;
3752 if (cmd != QLA_EVT_GPNFT_DONE && cmd != QLA_EVT_GNNFT_DONE)
3753 return QLA_PARAMETER_ERROR;
3755 e = qla2x00_alloc_work(vha, cmd);
3756 if (!e)
3757 return QLA_FUNCTION_FAILED;
3759 e->u.iosb.sp = sp;
3761 return qla2x00_post_work(vha, e);
3764 static int qla2x00_post_nvme_gpnft_work(struct scsi_qla_host *vha,
3765 srb_t *sp, int cmd)
3767 struct qla_work_evt *e;
3769 if (cmd != QLA_EVT_GPNFT)
3770 return QLA_PARAMETER_ERROR;
3772 e = qla2x00_alloc_work(vha, cmd);
3773 if (!e)
3774 return QLA_FUNCTION_FAILED;
3776 e->u.gpnft.fc4_type = FC4_TYPE_NVME;
3777 e->u.gpnft.sp = sp;
3779 return qla2x00_post_work(vha, e);
3782 static void qla2x00_find_free_fcp_nvme_slot(struct scsi_qla_host *vha,
3783 struct srb *sp)
3785 struct qla_hw_data *ha = vha->hw;
3786 int num_fibre_dev = ha->max_fibre_devices;
3787 struct ct_sns_req *ct_req =
3788 (struct ct_sns_req *)sp->u.iocb_cmd.u.ctarg.req;
3789 struct ct_sns_gpnft_rsp *ct_rsp =
3790 (struct ct_sns_gpnft_rsp *)sp->u.iocb_cmd.u.ctarg.rsp;
3791 struct ct_sns_gpn_ft_data *d;
3792 struct fab_scan_rp *rp;
3793 u16 cmd = be16_to_cpu(ct_req->command);
3794 u8 fc4_type = sp->gen2;
3795 int i, j, k;
3796 port_id_t id;
3797 u8 found;
3798 u64 wwn;
3800 j = 0;
3801 for (i = 0; i < num_fibre_dev; i++) {
3802 d = &ct_rsp->entries[i];
3804 id.b.rsvd_1 = 0;
3805 id.b.domain = d->port_id[0];
3806 id.b.area = d->port_id[1];
3807 id.b.al_pa = d->port_id[2];
3808 wwn = wwn_to_u64(d->port_name);
3810 if (id.b24 == 0 || wwn == 0)
3811 continue;
3813 if (fc4_type == FC4_TYPE_FCP_SCSI) {
3814 if (cmd == GPN_FT_CMD) {
3815 rp = &vha->scan.l[j];
3816 rp->id = id;
3817 memcpy(rp->port_name, d->port_name, 8);
3818 j++;
3819 rp->fc4type = FS_FC4TYPE_FCP;
3820 } else {
3821 for (k = 0; k < num_fibre_dev; k++) {
3822 rp = &vha->scan.l[k];
3823 if (id.b24 == rp->id.b24) {
3824 memcpy(rp->node_name,
3825 d->port_name, 8);
3826 break;
3830 } else {
3831 /* Search if the fibre device supports FC4_TYPE_NVME */
3832 if (cmd == GPN_FT_CMD) {
3833 found = 0;
3835 for (k = 0; k < num_fibre_dev; k++) {
3836 rp = &vha->scan.l[k];
3837 if (!memcmp(rp->port_name,
3838 d->port_name, 8)) {
3840 * Supports FC-NVMe & FCP
3842 rp->fc4type |= FS_FC4TYPE_NVME;
3843 found = 1;
3844 break;
3848 /* We found new FC-NVMe only port */
3849 if (!found) {
3850 for (k = 0; k < num_fibre_dev; k++) {
3851 rp = &vha->scan.l[k];
3852 if (wwn_to_u64(rp->port_name)) {
3853 continue;
3854 } else {
3855 rp->id = id;
3856 memcpy(rp->port_name,
3857 d->port_name, 8);
3858 rp->fc4type =
3859 FS_FC4TYPE_NVME;
3860 break;
3864 } else {
3865 for (k = 0; k < num_fibre_dev; k++) {
3866 rp = &vha->scan.l[k];
3867 if (id.b24 == rp->id.b24) {
3868 memcpy(rp->node_name,
3869 d->port_name, 8);
3870 break;
3878 static void qla2x00_async_gpnft_gnnft_sp_done(srb_t *sp, int res)
3880 struct scsi_qla_host *vha = sp->vha;
3881 struct ct_sns_req *ct_req =
3882 (struct ct_sns_req *)sp->u.iocb_cmd.u.ctarg.req;
3883 u16 cmd = be16_to_cpu(ct_req->command);
3884 u8 fc4_type = sp->gen2;
3885 unsigned long flags;
3886 int rc;
3888 /* gen2 field is holding the fc4type */
3889 ql_dbg(ql_dbg_disc, vha, 0xffff,
3890 "Async done-%s res %x FC4Type %x\n",
3891 sp->name, res, sp->gen2);
3893 del_timer(&sp->u.iocb_cmd.timer);
3894 sp->rc = res;
3895 if (res) {
3896 unsigned long flags;
3897 const char *name = sp->name;
3900 * We are in an Interrupt context, queue up this
3901 * sp for GNNFT_DONE work. This will allow all
3902 * the resource to get freed up.
3904 rc = qla2x00_post_gnnft_gpnft_done_work(vha, sp,
3905 QLA_EVT_GNNFT_DONE);
3906 if (rc) {
3907 /* Cleanup here to prevent memory leak */
3908 qla24xx_sp_unmap(vha, sp);
3910 spin_lock_irqsave(&vha->work_lock, flags);
3911 vha->scan.scan_flags &= ~SF_SCANNING;
3912 vha->scan.scan_retry++;
3913 spin_unlock_irqrestore(&vha->work_lock, flags);
3915 if (vha->scan.scan_retry < MAX_SCAN_RETRIES) {
3916 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
3917 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
3918 qla2xxx_wake_dpc(vha);
3919 } else {
3920 ql_dbg(ql_dbg_disc, vha, 0xffff,
3921 "Async done-%s rescan failed on all retries.\n",
3922 name);
3925 return;
3928 qla2x00_find_free_fcp_nvme_slot(vha, sp);
3930 if ((fc4_type == FC4_TYPE_FCP_SCSI) && vha->flags.nvme_enabled &&
3931 cmd == GNN_FT_CMD) {
3932 spin_lock_irqsave(&vha->work_lock, flags);
3933 vha->scan.scan_flags &= ~SF_SCANNING;
3934 spin_unlock_irqrestore(&vha->work_lock, flags);
3936 sp->rc = res;
3937 rc = qla2x00_post_nvme_gpnft_work(vha, sp, QLA_EVT_GPNFT);
3938 if (rc) {
3939 qla24xx_sp_unmap(vha, sp);
3940 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
3941 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
3943 return;
3946 if (cmd == GPN_FT_CMD) {
3947 rc = qla2x00_post_gnnft_gpnft_done_work(vha, sp,
3948 QLA_EVT_GPNFT_DONE);
3949 } else {
3950 rc = qla2x00_post_gnnft_gpnft_done_work(vha, sp,
3951 QLA_EVT_GNNFT_DONE);
3954 if (rc) {
3955 qla24xx_sp_unmap(vha, sp);
3956 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
3957 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
3958 return;
3963 * Get WWNN list for fc4_type
3965 * It is assumed the same SRB is re-used from GPNFT to avoid
3966 * mem free & re-alloc
3968 static int qla24xx_async_gnnft(scsi_qla_host_t *vha, struct srb *sp,
3969 u8 fc4_type)
3971 int rval = QLA_FUNCTION_FAILED;
3972 struct ct_sns_req *ct_req;
3973 struct ct_sns_pkt *ct_sns;
3974 unsigned long flags;
3976 if (!vha->flags.online) {
3977 spin_lock_irqsave(&vha->work_lock, flags);
3978 vha->scan.scan_flags &= ~SF_SCANNING;
3979 spin_unlock_irqrestore(&vha->work_lock, flags);
3980 goto done_free_sp;
3983 if (!sp->u.iocb_cmd.u.ctarg.req || !sp->u.iocb_cmd.u.ctarg.rsp) {
3984 ql_log(ql_log_warn, vha, 0xffff,
3985 "%s: req %p rsp %p are not setup\n",
3986 __func__, sp->u.iocb_cmd.u.ctarg.req,
3987 sp->u.iocb_cmd.u.ctarg.rsp);
3988 spin_lock_irqsave(&vha->work_lock, flags);
3989 vha->scan.scan_flags &= ~SF_SCANNING;
3990 spin_unlock_irqrestore(&vha->work_lock, flags);
3991 WARN_ON(1);
3992 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
3993 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
3994 goto done_free_sp;
3997 ql_dbg(ql_dbg_disc, vha, 0xfffff,
3998 "%s: FC4Type %x, CT-PASSTHRU %s command ctarg rsp size %d, ctarg req size %d\n",
3999 __func__, fc4_type, sp->name, sp->u.iocb_cmd.u.ctarg.rsp_size,
4000 sp->u.iocb_cmd.u.ctarg.req_size);
4002 sp->type = SRB_CT_PTHRU_CMD;
4003 sp->name = "gnnft";
4004 sp->gen1 = vha->hw->base_qpair->chip_reset;
4005 sp->gen2 = fc4_type;
4007 sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
4008 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
4010 memset(sp->u.iocb_cmd.u.ctarg.rsp, 0, sp->u.iocb_cmd.u.ctarg.rsp_size);
4011 memset(sp->u.iocb_cmd.u.ctarg.req, 0, sp->u.iocb_cmd.u.ctarg.req_size);
4013 ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.req;
4014 /* CT_IU preamble */
4015 ct_req = qla2x00_prep_ct_req(ct_sns, GNN_FT_CMD,
4016 sp->u.iocb_cmd.u.ctarg.rsp_size);
4018 /* GPN_FT req */
4019 ct_req->req.gpn_ft.port_type = fc4_type;
4021 sp->u.iocb_cmd.u.ctarg.req_size = GNN_FT_REQ_SIZE;
4022 sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
4024 sp->done = qla2x00_async_gpnft_gnnft_sp_done;
4026 ql_dbg(ql_dbg_disc, vha, 0xffff,
4027 "Async-%s hdl=%x FC4Type %x.\n", sp->name,
4028 sp->handle, ct_req->req.gpn_ft.port_type);
4030 rval = qla2x00_start_sp(sp);
4031 if (rval != QLA_SUCCESS) {
4032 goto done_free_sp;
4035 return rval;
4037 done_free_sp:
4038 if (sp->u.iocb_cmd.u.ctarg.req) {
4039 dma_free_coherent(&vha->hw->pdev->dev,
4040 sp->u.iocb_cmd.u.ctarg.req_allocated_size,
4041 sp->u.iocb_cmd.u.ctarg.req,
4042 sp->u.iocb_cmd.u.ctarg.req_dma);
4043 sp->u.iocb_cmd.u.ctarg.req = NULL;
4045 if (sp->u.iocb_cmd.u.ctarg.rsp) {
4046 dma_free_coherent(&vha->hw->pdev->dev,
4047 sp->u.iocb_cmd.u.ctarg.rsp_allocated_size,
4048 sp->u.iocb_cmd.u.ctarg.rsp,
4049 sp->u.iocb_cmd.u.ctarg.rsp_dma);
4050 sp->u.iocb_cmd.u.ctarg.rsp = NULL;
4053 sp->free(sp);
4055 spin_lock_irqsave(&vha->work_lock, flags);
4056 vha->scan.scan_flags &= ~SF_SCANNING;
4057 if (vha->scan.scan_flags == 0) {
4058 ql_dbg(ql_dbg_disc, vha, 0xffff,
4059 "%s: schedule\n", __func__);
4060 vha->scan.scan_flags |= SF_QUEUED;
4061 schedule_delayed_work(&vha->scan.scan_work, 5);
4063 spin_unlock_irqrestore(&vha->work_lock, flags);
4066 return rval;
4067 } /* GNNFT */
4069 void qla24xx_async_gpnft_done(scsi_qla_host_t *vha, srb_t *sp)
4071 ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0xffff,
4072 "%s enter\n", __func__);
4073 qla24xx_async_gnnft(vha, sp, sp->gen2);
4076 /* Get WWPN list for certain fc4_type */
4077 int qla24xx_async_gpnft(scsi_qla_host_t *vha, u8 fc4_type, srb_t *sp)
4079 int rval = QLA_FUNCTION_FAILED;
4080 struct ct_sns_req *ct_req;
4081 struct ct_sns_pkt *ct_sns;
4082 u32 rspsz;
4083 unsigned long flags;
4085 ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0xffff,
4086 "%s enter\n", __func__);
4088 if (!vha->flags.online)
4089 return rval;
4091 spin_lock_irqsave(&vha->work_lock, flags);
4092 if (vha->scan.scan_flags & SF_SCANNING) {
4093 spin_unlock_irqrestore(&vha->work_lock, flags);
4094 ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0xffff,
4095 "%s: scan active\n", __func__);
4096 return rval;
4098 vha->scan.scan_flags |= SF_SCANNING;
4099 spin_unlock_irqrestore(&vha->work_lock, flags);
4101 if (fc4_type == FC4_TYPE_FCP_SCSI) {
4102 ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0xffff,
4103 "%s: Performing FCP Scan\n", __func__);
4105 if (sp)
4106 sp->free(sp); /* should not happen */
4108 sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL);
4109 if (!sp) {
4110 spin_lock_irqsave(&vha->work_lock, flags);
4111 vha->scan.scan_flags &= ~SF_SCANNING;
4112 spin_unlock_irqrestore(&vha->work_lock, flags);
4113 return rval;
4116 sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev,
4117 sizeof(struct ct_sns_pkt),
4118 &sp->u.iocb_cmd.u.ctarg.req_dma,
4119 GFP_KERNEL);
4120 sp->u.iocb_cmd.u.ctarg.req_allocated_size = sizeof(struct ct_sns_pkt);
4121 if (!sp->u.iocb_cmd.u.ctarg.req) {
4122 ql_log(ql_log_warn, vha, 0xffff,
4123 "Failed to allocate ct_sns request.\n");
4124 spin_lock_irqsave(&vha->work_lock, flags);
4125 vha->scan.scan_flags &= ~SF_SCANNING;
4126 spin_unlock_irqrestore(&vha->work_lock, flags);
4127 qla2x00_rel_sp(sp);
4128 return rval;
4130 sp->u.iocb_cmd.u.ctarg.req_size = GPN_FT_REQ_SIZE;
4132 rspsz = sizeof(struct ct_sns_gpnft_rsp) +
4133 ((vha->hw->max_fibre_devices - 1) *
4134 sizeof(struct ct_sns_gpn_ft_data));
4136 sp->u.iocb_cmd.u.ctarg.rsp = dma_alloc_coherent(&vha->hw->pdev->dev,
4137 rspsz,
4138 &sp->u.iocb_cmd.u.ctarg.rsp_dma,
4139 GFP_KERNEL);
4140 sp->u.iocb_cmd.u.ctarg.rsp_allocated_size = rspsz;
4141 if (!sp->u.iocb_cmd.u.ctarg.rsp) {
4142 ql_log(ql_log_warn, vha, 0xffff,
4143 "Failed to allocate ct_sns request.\n");
4144 spin_lock_irqsave(&vha->work_lock, flags);
4145 vha->scan.scan_flags &= ~SF_SCANNING;
4146 spin_unlock_irqrestore(&vha->work_lock, flags);
4147 dma_free_coherent(&vha->hw->pdev->dev,
4148 sp->u.iocb_cmd.u.ctarg.req_allocated_size,
4149 sp->u.iocb_cmd.u.ctarg.req,
4150 sp->u.iocb_cmd.u.ctarg.req_dma);
4151 sp->u.iocb_cmd.u.ctarg.req = NULL;
4152 qla2x00_rel_sp(sp);
4153 return rval;
4155 sp->u.iocb_cmd.u.ctarg.rsp_size = rspsz;
4157 ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0xffff,
4158 "%s scan list size %d\n", __func__, vha->scan.size);
4160 memset(vha->scan.l, 0, vha->scan.size);
4161 } else if (!sp) {
4162 ql_dbg(ql_dbg_disc, vha, 0xffff,
4163 "NVME scan did not provide SP\n");
4164 return rval;
4167 sp->type = SRB_CT_PTHRU_CMD;
4168 sp->name = "gpnft";
4169 sp->gen1 = vha->hw->base_qpair->chip_reset;
4170 sp->gen2 = fc4_type;
4172 sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
4173 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
4175 rspsz = sp->u.iocb_cmd.u.ctarg.rsp_size;
4176 memset(sp->u.iocb_cmd.u.ctarg.rsp, 0, sp->u.iocb_cmd.u.ctarg.rsp_size);
4177 memset(sp->u.iocb_cmd.u.ctarg.req, 0, sp->u.iocb_cmd.u.ctarg.req_size);
4179 ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.req;
4180 /* CT_IU preamble */
4181 ct_req = qla2x00_prep_ct_req(ct_sns, GPN_FT_CMD, rspsz);
4183 /* GPN_FT req */
4184 ct_req->req.gpn_ft.port_type = fc4_type;
4186 sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
4188 sp->done = qla2x00_async_gpnft_gnnft_sp_done;
4190 ql_dbg(ql_dbg_disc, vha, 0xffff,
4191 "Async-%s hdl=%x FC4Type %x.\n", sp->name,
4192 sp->handle, ct_req->req.gpn_ft.port_type);
4194 rval = qla2x00_start_sp(sp);
4195 if (rval != QLA_SUCCESS) {
4196 goto done_free_sp;
4199 return rval;
4201 done_free_sp:
4202 if (sp->u.iocb_cmd.u.ctarg.req) {
4203 dma_free_coherent(&vha->hw->pdev->dev,
4204 sp->u.iocb_cmd.u.ctarg.req_allocated_size,
4205 sp->u.iocb_cmd.u.ctarg.req,
4206 sp->u.iocb_cmd.u.ctarg.req_dma);
4207 sp->u.iocb_cmd.u.ctarg.req = NULL;
4209 if (sp->u.iocb_cmd.u.ctarg.rsp) {
4210 dma_free_coherent(&vha->hw->pdev->dev,
4211 sp->u.iocb_cmd.u.ctarg.rsp_allocated_size,
4212 sp->u.iocb_cmd.u.ctarg.rsp,
4213 sp->u.iocb_cmd.u.ctarg.rsp_dma);
4214 sp->u.iocb_cmd.u.ctarg.rsp = NULL;
4217 sp->free(sp);
4219 spin_lock_irqsave(&vha->work_lock, flags);
4220 vha->scan.scan_flags &= ~SF_SCANNING;
4221 if (vha->scan.scan_flags == 0) {
4222 ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0xffff,
4223 "%s: Scan scheduled.\n", __func__);
4224 vha->scan.scan_flags |= SF_QUEUED;
4225 schedule_delayed_work(&vha->scan.scan_work, 5);
4227 spin_unlock_irqrestore(&vha->work_lock, flags);
4230 return rval;
4233 void qla_scan_work_fn(struct work_struct *work)
4235 struct fab_scan *s = container_of(to_delayed_work(work),
4236 struct fab_scan, scan_work);
4237 struct scsi_qla_host *vha = container_of(s, struct scsi_qla_host,
4238 scan);
4239 unsigned long flags;
4241 ql_dbg(ql_dbg_disc, vha, 0xffff,
4242 "%s: schedule loop resync\n", __func__);
4243 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
4244 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
4245 qla2xxx_wake_dpc(vha);
4246 spin_lock_irqsave(&vha->work_lock, flags);
4247 vha->scan.scan_flags &= ~SF_QUEUED;
4248 spin_unlock_irqrestore(&vha->work_lock, flags);
4251 /* GNN_ID */
4252 void qla24xx_handle_gnnid_event(scsi_qla_host_t *vha, struct event_arg *ea)
4254 qla24xx_post_gnl_work(vha, ea->fcport);
4257 static void qla2x00_async_gnnid_sp_done(srb_t *sp, int res)
4259 struct scsi_qla_host *vha = sp->vha;
4260 fc_port_t *fcport = sp->fcport;
4261 u8 *node_name = fcport->ct_desc.ct_sns->p.rsp.rsp.gnn_id.node_name;
4262 struct event_arg ea;
4263 u64 wwnn;
4265 fcport->flags &= ~FCF_ASYNC_SENT;
4266 wwnn = wwn_to_u64(node_name);
4267 if (wwnn)
4268 memcpy(fcport->node_name, node_name, WWN_SIZE);
4270 memset(&ea, 0, sizeof(ea));
4271 ea.fcport = fcport;
4272 ea.sp = sp;
4273 ea.rc = res;
4275 ql_dbg(ql_dbg_disc, vha, 0x204f,
4276 "Async done-%s res %x, WWPN %8phC %8phC\n",
4277 sp->name, res, fcport->port_name, fcport->node_name);
4279 qla24xx_handle_gnnid_event(vha, &ea);
4281 sp->free(sp);
4284 int qla24xx_async_gnnid(scsi_qla_host_t *vha, fc_port_t *fcport)
4286 int rval = QLA_FUNCTION_FAILED;
4287 struct ct_sns_req *ct_req;
4288 srb_t *sp;
4290 if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT))
4291 return rval;
4293 fcport->disc_state = DSC_GNN_ID;
4294 sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC);
4295 if (!sp)
4296 goto done;
4298 fcport->flags |= FCF_ASYNC_SENT;
4299 sp->type = SRB_CT_PTHRU_CMD;
4300 sp->name = "gnnid";
4301 sp->gen1 = fcport->rscn_gen;
4302 sp->gen2 = fcport->login_gen;
4304 sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
4305 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
4307 /* CT_IU preamble */
4308 ct_req = qla2x00_prep_ct_req(fcport->ct_desc.ct_sns, GNN_ID_CMD,
4309 GNN_ID_RSP_SIZE);
4311 /* GNN_ID req */
4312 ct_req->req.port_id.port_id = port_id_to_be_id(fcport->d_id);
4315 /* req & rsp use the same buffer */
4316 sp->u.iocb_cmd.u.ctarg.req = fcport->ct_desc.ct_sns;
4317 sp->u.iocb_cmd.u.ctarg.req_dma = fcport->ct_desc.ct_sns_dma;
4318 sp->u.iocb_cmd.u.ctarg.rsp = fcport->ct_desc.ct_sns;
4319 sp->u.iocb_cmd.u.ctarg.rsp_dma = fcport->ct_desc.ct_sns_dma;
4320 sp->u.iocb_cmd.u.ctarg.req_size = GNN_ID_REQ_SIZE;
4321 sp->u.iocb_cmd.u.ctarg.rsp_size = GNN_ID_RSP_SIZE;
4322 sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
4324 sp->done = qla2x00_async_gnnid_sp_done;
4326 ql_dbg(ql_dbg_disc, vha, 0xffff,
4327 "Async-%s - %8phC hdl=%x loopid=%x portid %06x.\n",
4328 sp->name, fcport->port_name,
4329 sp->handle, fcport->loop_id, fcport->d_id.b24);
4331 rval = qla2x00_start_sp(sp);
4332 if (rval != QLA_SUCCESS)
4333 goto done_free_sp;
4334 return rval;
4336 done_free_sp:
4337 sp->free(sp);
4338 fcport->flags &= ~FCF_ASYNC_SENT;
4339 done:
4340 return rval;
4343 int qla24xx_post_gnnid_work(struct scsi_qla_host *vha, fc_port_t *fcport)
4345 struct qla_work_evt *e;
4346 int ls;
4348 ls = atomic_read(&vha->loop_state);
4349 if (((ls != LOOP_READY) && (ls != LOOP_UP)) ||
4350 test_bit(UNLOADING, &vha->dpc_flags))
4351 return 0;
4353 e = qla2x00_alloc_work(vha, QLA_EVT_GNNID);
4354 if (!e)
4355 return QLA_FUNCTION_FAILED;
4357 e->u.fcport.fcport = fcport;
4358 return qla2x00_post_work(vha, e);
4361 /* GPFN_ID */
4362 void qla24xx_handle_gfpnid_event(scsi_qla_host_t *vha, struct event_arg *ea)
4364 fc_port_t *fcport = ea->fcport;
4366 ql_dbg(ql_dbg_disc, vha, 0xffff,
4367 "%s %8phC DS %d LS %d rc %d login %d|%d rscn %d|%d fcpcnt %d\n",
4368 __func__, fcport->port_name, fcport->disc_state,
4369 fcport->fw_login_state, ea->rc, fcport->login_gen, ea->sp->gen2,
4370 fcport->rscn_gen, ea->sp->gen1, vha->fcport_count);
4372 if (fcport->disc_state == DSC_DELETE_PEND)
4373 return;
4375 if (ea->sp->gen2 != fcport->login_gen) {
4376 /* target side must have changed it. */
4377 ql_dbg(ql_dbg_disc, vha, 0x20d3,
4378 "%s %8phC generation changed\n",
4379 __func__, fcport->port_name);
4380 return;
4381 } else if (ea->sp->gen1 != fcport->rscn_gen) {
4382 return;
4385 qla24xx_post_gpsc_work(vha, fcport);
4388 static void qla2x00_async_gfpnid_sp_done(srb_t *sp, int res)
4390 struct scsi_qla_host *vha = sp->vha;
4391 fc_port_t *fcport = sp->fcport;
4392 u8 *fpn = fcport->ct_desc.ct_sns->p.rsp.rsp.gfpn_id.port_name;
4393 struct event_arg ea;
4394 u64 wwn;
4396 wwn = wwn_to_u64(fpn);
4397 if (wwn)
4398 memcpy(fcport->fabric_port_name, fpn, WWN_SIZE);
4400 memset(&ea, 0, sizeof(ea));
4401 ea.fcport = fcport;
4402 ea.sp = sp;
4403 ea.rc = res;
4405 ql_dbg(ql_dbg_disc, vha, 0x204f,
4406 "Async done-%s res %x, WWPN %8phC %8phC\n",
4407 sp->name, res, fcport->port_name, fcport->fabric_port_name);
4409 qla24xx_handle_gfpnid_event(vha, &ea);
4411 sp->free(sp);
4414 int qla24xx_async_gfpnid(scsi_qla_host_t *vha, fc_port_t *fcport)
4416 int rval = QLA_FUNCTION_FAILED;
4417 struct ct_sns_req *ct_req;
4418 srb_t *sp;
4420 if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT))
4421 return rval;
4423 sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC);
4424 if (!sp)
4425 goto done;
4427 sp->type = SRB_CT_PTHRU_CMD;
4428 sp->name = "gfpnid";
4429 sp->gen1 = fcport->rscn_gen;
4430 sp->gen2 = fcport->login_gen;
4432 sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
4433 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
4435 /* CT_IU preamble */
4436 ct_req = qla2x00_prep_ct_req(fcport->ct_desc.ct_sns, GFPN_ID_CMD,
4437 GFPN_ID_RSP_SIZE);
4439 /* GFPN_ID req */
4440 ct_req->req.port_id.port_id = port_id_to_be_id(fcport->d_id);
4443 /* req & rsp use the same buffer */
4444 sp->u.iocb_cmd.u.ctarg.req = fcport->ct_desc.ct_sns;
4445 sp->u.iocb_cmd.u.ctarg.req_dma = fcport->ct_desc.ct_sns_dma;
4446 sp->u.iocb_cmd.u.ctarg.rsp = fcport->ct_desc.ct_sns;
4447 sp->u.iocb_cmd.u.ctarg.rsp_dma = fcport->ct_desc.ct_sns_dma;
4448 sp->u.iocb_cmd.u.ctarg.req_size = GFPN_ID_REQ_SIZE;
4449 sp->u.iocb_cmd.u.ctarg.rsp_size = GFPN_ID_RSP_SIZE;
4450 sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
4452 sp->done = qla2x00_async_gfpnid_sp_done;
4454 ql_dbg(ql_dbg_disc, vha, 0xffff,
4455 "Async-%s - %8phC hdl=%x loopid=%x portid %06x.\n",
4456 sp->name, fcport->port_name,
4457 sp->handle, fcport->loop_id, fcport->d_id.b24);
4459 rval = qla2x00_start_sp(sp);
4460 if (rval != QLA_SUCCESS)
4461 goto done_free_sp;
4463 return rval;
4465 done_free_sp:
4466 sp->free(sp);
4467 fcport->flags &= ~FCF_ASYNC_SENT;
4468 done:
4469 return rval;
4472 int qla24xx_post_gfpnid_work(struct scsi_qla_host *vha, fc_port_t *fcport)
4474 struct qla_work_evt *e;
4475 int ls;
4477 ls = atomic_read(&vha->loop_state);
4478 if (((ls != LOOP_READY) && (ls != LOOP_UP)) ||
4479 test_bit(UNLOADING, &vha->dpc_flags))
4480 return 0;
4482 e = qla2x00_alloc_work(vha, QLA_EVT_GFPNID);
4483 if (!e)
4484 return QLA_FUNCTION_FAILED;
4486 e->u.fcport.fcport = fcport;
4487 return qla2x00_post_work(vha, e);