workqueue: Make worker_attach/detach_pool() update worker->pool
[linux/fpc-iii.git] / drivers / scsi / qla2xxx / qla_gs.c
blob9e914f9c3ffb375b2a2fbba2cc9d8c38f8544c85
1 /*
2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2014 QLogic Corporation
5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */
7 #include "qla_def.h"
8 #include "qla_target.h"
9 #include <linux/utsname.h>
11 static int qla2x00_sns_ga_nxt(scsi_qla_host_t *, fc_port_t *);
12 static int qla2x00_sns_gid_pt(scsi_qla_host_t *, sw_info_t *);
13 static int qla2x00_sns_gpn_id(scsi_qla_host_t *, sw_info_t *);
14 static int qla2x00_sns_gnn_id(scsi_qla_host_t *, sw_info_t *);
15 static int qla2x00_sns_rft_id(scsi_qla_host_t *);
16 static int qla2x00_sns_rnn_id(scsi_qla_host_t *);
17 static int qla_async_rftid(scsi_qla_host_t *, port_id_t *);
18 static int qla_async_rffid(scsi_qla_host_t *, port_id_t *, u8, u8);
19 static int qla_async_rnnid(scsi_qla_host_t *, port_id_t *, u8*);
20 static int qla_async_rsnn_nn(scsi_qla_host_t *);
22 /**
23 * qla2x00_prep_ms_iocb() - Prepare common MS/CT IOCB fields for SNS CT query.
24 * @vha: HA context
25 * @arg: CT arguments
27 * Returns a pointer to the @vha's ms_iocb.
29 void *
30 qla2x00_prep_ms_iocb(scsi_qla_host_t *vha, struct ct_arg *arg)
32 struct qla_hw_data *ha = vha->hw;
33 ms_iocb_entry_t *ms_pkt;
35 ms_pkt = (ms_iocb_entry_t *)arg->iocb;
36 memset(ms_pkt, 0, sizeof(ms_iocb_entry_t));
38 ms_pkt->entry_type = MS_IOCB_TYPE;
39 ms_pkt->entry_count = 1;
40 SET_TARGET_ID(ha, ms_pkt->loop_id, SIMPLE_NAME_SERVER);
41 ms_pkt->control_flags = cpu_to_le16(CF_READ | CF_HEAD_TAG);
42 ms_pkt->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
43 ms_pkt->cmd_dsd_count = cpu_to_le16(1);
44 ms_pkt->total_dsd_count = cpu_to_le16(2);
45 ms_pkt->rsp_bytecount = cpu_to_le32(arg->rsp_size);
46 ms_pkt->req_bytecount = cpu_to_le32(arg->req_size);
48 ms_pkt->dseg_req_address[0] = cpu_to_le32(LSD(arg->req_dma));
49 ms_pkt->dseg_req_address[1] = cpu_to_le32(MSD(arg->req_dma));
50 ms_pkt->dseg_req_length = ms_pkt->req_bytecount;
52 ms_pkt->dseg_rsp_address[0] = cpu_to_le32(LSD(arg->rsp_dma));
53 ms_pkt->dseg_rsp_address[1] = cpu_to_le32(MSD(arg->rsp_dma));
54 ms_pkt->dseg_rsp_length = ms_pkt->rsp_bytecount;
56 vha->qla_stats.control_requests++;
58 return (ms_pkt);
61 /**
62 * qla24xx_prep_ms_iocb() - Prepare common CT IOCB fields for SNS CT query.
63 * @vha: HA context
64 * @arg: CT arguments
66 * Returns a pointer to the @ha's ms_iocb.
68 void *
69 qla24xx_prep_ms_iocb(scsi_qla_host_t *vha, struct ct_arg *arg)
71 struct qla_hw_data *ha = vha->hw;
72 struct ct_entry_24xx *ct_pkt;
74 ct_pkt = (struct ct_entry_24xx *)arg->iocb;
75 memset(ct_pkt, 0, sizeof(struct ct_entry_24xx));
77 ct_pkt->entry_type = CT_IOCB_TYPE;
78 ct_pkt->entry_count = 1;
79 ct_pkt->nport_handle = cpu_to_le16(arg->nport_handle);
80 ct_pkt->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
81 ct_pkt->cmd_dsd_count = cpu_to_le16(1);
82 ct_pkt->rsp_dsd_count = cpu_to_le16(1);
83 ct_pkt->rsp_byte_count = cpu_to_le32(arg->rsp_size);
84 ct_pkt->cmd_byte_count = cpu_to_le32(arg->req_size);
86 ct_pkt->dseg_0_address[0] = cpu_to_le32(LSD(arg->req_dma));
87 ct_pkt->dseg_0_address[1] = cpu_to_le32(MSD(arg->req_dma));
88 ct_pkt->dseg_0_len = ct_pkt->cmd_byte_count;
90 ct_pkt->dseg_1_address[0] = cpu_to_le32(LSD(arg->rsp_dma));
91 ct_pkt->dseg_1_address[1] = cpu_to_le32(MSD(arg->rsp_dma));
92 ct_pkt->dseg_1_len = ct_pkt->rsp_byte_count;
93 ct_pkt->vp_index = vha->vp_idx;
95 vha->qla_stats.control_requests++;
97 return (ct_pkt);
101 * qla2x00_prep_ct_req() - Prepare common CT request fields for SNS query.
102 * @p: CT request buffer
103 * @cmd: GS command
104 * @rsp_size: response size in bytes
106 * Returns a pointer to the intitialized @ct_req.
108 static inline struct ct_sns_req *
109 qla2x00_prep_ct_req(struct ct_sns_pkt *p, uint16_t cmd, uint16_t rsp_size)
111 memset(p, 0, sizeof(struct ct_sns_pkt));
113 p->p.req.header.revision = 0x01;
114 p->p.req.header.gs_type = 0xFC;
115 p->p.req.header.gs_subtype = 0x02;
116 p->p.req.command = cpu_to_be16(cmd);
117 p->p.req.max_rsp_size = cpu_to_be16((rsp_size - 16) / 4);
119 return &p->p.req;
123 qla2x00_chk_ms_status(scsi_qla_host_t *vha, ms_iocb_entry_t *ms_pkt,
124 struct ct_sns_rsp *ct_rsp, const char *routine)
126 int rval;
127 uint16_t comp_status;
128 struct qla_hw_data *ha = vha->hw;
129 bool lid_is_sns = false;
131 rval = QLA_FUNCTION_FAILED;
132 if (ms_pkt->entry_status != 0) {
133 ql_dbg(ql_dbg_disc, vha, 0x2031,
134 "%s failed, error status (%x) on port_id: %02x%02x%02x.\n",
135 routine, ms_pkt->entry_status, vha->d_id.b.domain,
136 vha->d_id.b.area, vha->d_id.b.al_pa);
137 } else {
138 if (IS_FWI2_CAPABLE(ha))
139 comp_status = le16_to_cpu(
140 ((struct ct_entry_24xx *)ms_pkt)->comp_status);
141 else
142 comp_status = le16_to_cpu(ms_pkt->status);
143 switch (comp_status) {
144 case CS_COMPLETE:
145 case CS_DATA_UNDERRUN:
146 case CS_DATA_OVERRUN: /* Overrun? */
147 if (ct_rsp->header.response !=
148 cpu_to_be16(CT_ACCEPT_RESPONSE)) {
149 ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x2077,
150 "%s failed rejected request on port_id: %02x%02x%02x Completion status 0x%x, response 0x%x\n",
151 routine, vha->d_id.b.domain,
152 vha->d_id.b.area, vha->d_id.b.al_pa,
153 comp_status, ct_rsp->header.response);
154 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha,
155 0x2078, (uint8_t *)&ct_rsp->header,
156 sizeof(struct ct_rsp_hdr));
157 rval = QLA_INVALID_COMMAND;
158 } else
159 rval = QLA_SUCCESS;
160 break;
161 case CS_PORT_LOGGED_OUT:
162 if (IS_FWI2_CAPABLE(ha)) {
163 if (le16_to_cpu(ms_pkt->loop_id.extended) ==
164 NPH_SNS)
165 lid_is_sns = true;
166 } else {
167 if (le16_to_cpu(ms_pkt->loop_id.extended) ==
168 SIMPLE_NAME_SERVER)
169 lid_is_sns = true;
171 if (lid_is_sns) {
172 ql_dbg(ql_dbg_async, vha, 0x502b,
173 "%s failed, Name server has logged out",
174 routine);
175 rval = QLA_NOT_LOGGED_IN;
176 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
177 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
179 break;
180 case CS_TIMEOUT:
181 rval = QLA_FUNCTION_TIMEOUT;
182 /* fall through */
183 default:
184 ql_dbg(ql_dbg_disc, vha, 0x2033,
185 "%s failed, completion status (%x) on port_id: "
186 "%02x%02x%02x.\n", routine, comp_status,
187 vha->d_id.b.domain, vha->d_id.b.area,
188 vha->d_id.b.al_pa);
189 break;
192 return rval;
196 * qla2x00_ga_nxt() - SNS scan for fabric devices via GA_NXT command.
197 * @vha: HA context
198 * @fcport: fcport entry to updated
200 * Returns 0 on success.
203 qla2x00_ga_nxt(scsi_qla_host_t *vha, fc_port_t *fcport)
205 int rval;
207 ms_iocb_entry_t *ms_pkt;
208 struct ct_sns_req *ct_req;
209 struct ct_sns_rsp *ct_rsp;
210 struct qla_hw_data *ha = vha->hw;
211 struct ct_arg arg;
213 if (IS_QLA2100(ha) || IS_QLA2200(ha))
214 return qla2x00_sns_ga_nxt(vha, fcport);
216 arg.iocb = ha->ms_iocb;
217 arg.req_dma = ha->ct_sns_dma;
218 arg.rsp_dma = ha->ct_sns_dma;
219 arg.req_size = GA_NXT_REQ_SIZE;
220 arg.rsp_size = GA_NXT_RSP_SIZE;
221 arg.nport_handle = NPH_SNS;
223 /* Issue GA_NXT */
224 /* Prepare common MS IOCB */
225 ms_pkt = ha->isp_ops->prep_ms_iocb(vha, &arg);
227 /* Prepare CT request */
228 ct_req = qla2x00_prep_ct_req(ha->ct_sns, GA_NXT_CMD,
229 GA_NXT_RSP_SIZE);
230 ct_rsp = &ha->ct_sns->p.rsp;
232 /* Prepare CT arguments -- port_id */
233 ct_req->req.port_id.port_id[0] = fcport->d_id.b.domain;
234 ct_req->req.port_id.port_id[1] = fcport->d_id.b.area;
235 ct_req->req.port_id.port_id[2] = fcport->d_id.b.al_pa;
237 /* Execute MS IOCB */
238 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
239 sizeof(ms_iocb_entry_t));
240 if (rval != QLA_SUCCESS) {
241 /*EMPTY*/
242 ql_dbg(ql_dbg_disc, vha, 0x2062,
243 "GA_NXT issue IOCB failed (%d).\n", rval);
244 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "GA_NXT") !=
245 QLA_SUCCESS) {
246 rval = QLA_FUNCTION_FAILED;
247 } else {
248 /* Populate fc_port_t entry. */
249 fcport->d_id.b.domain = ct_rsp->rsp.ga_nxt.port_id[0];
250 fcport->d_id.b.area = ct_rsp->rsp.ga_nxt.port_id[1];
251 fcport->d_id.b.al_pa = ct_rsp->rsp.ga_nxt.port_id[2];
253 memcpy(fcport->node_name, ct_rsp->rsp.ga_nxt.node_name,
254 WWN_SIZE);
255 memcpy(fcport->port_name, ct_rsp->rsp.ga_nxt.port_name,
256 WWN_SIZE);
258 fcport->fc4_type = (ct_rsp->rsp.ga_nxt.fc4_types[2] & BIT_0) ?
259 FC4_TYPE_FCP_SCSI : FC4_TYPE_OTHER;
261 if (ct_rsp->rsp.ga_nxt.port_type != NS_N_PORT_TYPE &&
262 ct_rsp->rsp.ga_nxt.port_type != NS_NL_PORT_TYPE)
263 fcport->d_id.b.domain = 0xf0;
265 ql_dbg(ql_dbg_disc, vha, 0x2063,
266 "GA_NXT entry - nn %8phN pn %8phN "
267 "port_id=%02x%02x%02x.\n",
268 fcport->node_name, fcport->port_name,
269 fcport->d_id.b.domain, fcport->d_id.b.area,
270 fcport->d_id.b.al_pa);
273 return (rval);
276 static inline int
277 qla2x00_gid_pt_rsp_size(scsi_qla_host_t *vha)
279 return vha->hw->max_fibre_devices * 4 + 16;
283 * qla2x00_gid_pt() - SNS scan for fabric devices via GID_PT command.
284 * @vha: HA context
285 * @list: switch info entries to populate
287 * NOTE: Non-Nx_Ports are not requested.
289 * Returns 0 on success.
292 qla2x00_gid_pt(scsi_qla_host_t *vha, sw_info_t *list)
294 int rval;
295 uint16_t i;
297 ms_iocb_entry_t *ms_pkt;
298 struct ct_sns_req *ct_req;
299 struct ct_sns_rsp *ct_rsp;
301 struct ct_sns_gid_pt_data *gid_data;
302 struct qla_hw_data *ha = vha->hw;
303 uint16_t gid_pt_rsp_size;
304 struct ct_arg arg;
306 if (IS_QLA2100(ha) || IS_QLA2200(ha))
307 return qla2x00_sns_gid_pt(vha, list);
309 gid_data = NULL;
310 gid_pt_rsp_size = qla2x00_gid_pt_rsp_size(vha);
312 arg.iocb = ha->ms_iocb;
313 arg.req_dma = ha->ct_sns_dma;
314 arg.rsp_dma = ha->ct_sns_dma;
315 arg.req_size = GID_PT_REQ_SIZE;
316 arg.rsp_size = gid_pt_rsp_size;
317 arg.nport_handle = NPH_SNS;
319 /* Issue GID_PT */
320 /* Prepare common MS IOCB */
321 ms_pkt = ha->isp_ops->prep_ms_iocb(vha, &arg);
323 /* Prepare CT request */
324 ct_req = qla2x00_prep_ct_req(ha->ct_sns, GID_PT_CMD, gid_pt_rsp_size);
325 ct_rsp = &ha->ct_sns->p.rsp;
327 /* Prepare CT arguments -- port_type */
328 ct_req->req.gid_pt.port_type = NS_NX_PORT_TYPE;
330 /* Execute MS IOCB */
331 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
332 sizeof(ms_iocb_entry_t));
333 if (rval != QLA_SUCCESS) {
334 /*EMPTY*/
335 ql_dbg(ql_dbg_disc, vha, 0x2055,
336 "GID_PT issue IOCB failed (%d).\n", rval);
337 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "GID_PT") !=
338 QLA_SUCCESS) {
339 rval = QLA_FUNCTION_FAILED;
340 } else {
341 /* Set port IDs in switch info list. */
342 for (i = 0; i < ha->max_fibre_devices; i++) {
343 gid_data = &ct_rsp->rsp.gid_pt.entries[i];
344 list[i].d_id.b.domain = gid_data->port_id[0];
345 list[i].d_id.b.area = gid_data->port_id[1];
346 list[i].d_id.b.al_pa = gid_data->port_id[2];
347 memset(list[i].fabric_port_name, 0, WWN_SIZE);
348 list[i].fp_speed = PORT_SPEED_UNKNOWN;
350 /* Last one exit. */
351 if (gid_data->control_byte & BIT_7) {
352 list[i].d_id.b.rsvd_1 = gid_data->control_byte;
353 break;
358 * If we've used all available slots, then the switch is
359 * reporting back more devices than we can handle with this
360 * single call. Return a failed status, and let GA_NXT handle
361 * the overload.
363 if (i == ha->max_fibre_devices)
364 rval = QLA_FUNCTION_FAILED;
367 return (rval);
371 * qla2x00_gpn_id() - SNS Get Port Name (GPN_ID) query.
372 * @vha: HA context
373 * @list: switch info entries to populate
375 * Returns 0 on success.
378 qla2x00_gpn_id(scsi_qla_host_t *vha, sw_info_t *list)
380 int rval = QLA_SUCCESS;
381 uint16_t i;
383 ms_iocb_entry_t *ms_pkt;
384 struct ct_sns_req *ct_req;
385 struct ct_sns_rsp *ct_rsp;
386 struct qla_hw_data *ha = vha->hw;
387 struct ct_arg arg;
389 if (IS_QLA2100(ha) || IS_QLA2200(ha))
390 return qla2x00_sns_gpn_id(vha, list);
392 arg.iocb = ha->ms_iocb;
393 arg.req_dma = ha->ct_sns_dma;
394 arg.rsp_dma = ha->ct_sns_dma;
395 arg.req_size = GPN_ID_REQ_SIZE;
396 arg.rsp_size = GPN_ID_RSP_SIZE;
397 arg.nport_handle = NPH_SNS;
399 for (i = 0; i < ha->max_fibre_devices; i++) {
400 /* Issue GPN_ID */
401 /* Prepare common MS IOCB */
402 ms_pkt = ha->isp_ops->prep_ms_iocb(vha, &arg);
404 /* Prepare CT request */
405 ct_req = qla2x00_prep_ct_req(ha->ct_sns, GPN_ID_CMD,
406 GPN_ID_RSP_SIZE);
407 ct_rsp = &ha->ct_sns->p.rsp;
409 /* Prepare CT arguments -- port_id */
410 ct_req->req.port_id.port_id[0] = list[i].d_id.b.domain;
411 ct_req->req.port_id.port_id[1] = list[i].d_id.b.area;
412 ct_req->req.port_id.port_id[2] = list[i].d_id.b.al_pa;
414 /* Execute MS IOCB */
415 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
416 sizeof(ms_iocb_entry_t));
417 if (rval != QLA_SUCCESS) {
418 /*EMPTY*/
419 ql_dbg(ql_dbg_disc, vha, 0x2056,
420 "GPN_ID issue IOCB failed (%d).\n", rval);
421 break;
422 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp,
423 "GPN_ID") != QLA_SUCCESS) {
424 rval = QLA_FUNCTION_FAILED;
425 break;
426 } else {
427 /* Save portname */
428 memcpy(list[i].port_name,
429 ct_rsp->rsp.gpn_id.port_name, WWN_SIZE);
432 /* Last device exit. */
433 if (list[i].d_id.b.rsvd_1 != 0)
434 break;
437 return (rval);
441 * qla2x00_gnn_id() - SNS Get Node Name (GNN_ID) query.
442 * @vha: HA context
443 * @list: switch info entries to populate
445 * Returns 0 on success.
448 qla2x00_gnn_id(scsi_qla_host_t *vha, sw_info_t *list)
450 int rval = QLA_SUCCESS;
451 uint16_t i;
452 struct qla_hw_data *ha = vha->hw;
453 ms_iocb_entry_t *ms_pkt;
454 struct ct_sns_req *ct_req;
455 struct ct_sns_rsp *ct_rsp;
456 struct ct_arg arg;
458 if (IS_QLA2100(ha) || IS_QLA2200(ha))
459 return qla2x00_sns_gnn_id(vha, list);
461 arg.iocb = ha->ms_iocb;
462 arg.req_dma = ha->ct_sns_dma;
463 arg.rsp_dma = ha->ct_sns_dma;
464 arg.req_size = GNN_ID_REQ_SIZE;
465 arg.rsp_size = GNN_ID_RSP_SIZE;
466 arg.nport_handle = NPH_SNS;
468 for (i = 0; i < ha->max_fibre_devices; i++) {
469 /* Issue GNN_ID */
470 /* Prepare common MS IOCB */
471 ms_pkt = ha->isp_ops->prep_ms_iocb(vha, &arg);
473 /* Prepare CT request */
474 ct_req = qla2x00_prep_ct_req(ha->ct_sns, GNN_ID_CMD,
475 GNN_ID_RSP_SIZE);
476 ct_rsp = &ha->ct_sns->p.rsp;
478 /* Prepare CT arguments -- port_id */
479 ct_req->req.port_id.port_id[0] = list[i].d_id.b.domain;
480 ct_req->req.port_id.port_id[1] = list[i].d_id.b.area;
481 ct_req->req.port_id.port_id[2] = list[i].d_id.b.al_pa;
483 /* Execute MS IOCB */
484 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
485 sizeof(ms_iocb_entry_t));
486 if (rval != QLA_SUCCESS) {
487 /*EMPTY*/
488 ql_dbg(ql_dbg_disc, vha, 0x2057,
489 "GNN_ID issue IOCB failed (%d).\n", rval);
490 break;
491 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp,
492 "GNN_ID") != QLA_SUCCESS) {
493 rval = QLA_FUNCTION_FAILED;
494 break;
495 } else {
496 /* Save nodename */
497 memcpy(list[i].node_name,
498 ct_rsp->rsp.gnn_id.node_name, WWN_SIZE);
500 ql_dbg(ql_dbg_disc, vha, 0x2058,
501 "GID_PT entry - nn %8phN pn %8phN "
502 "portid=%02x%02x%02x.\n",
503 list[i].node_name, list[i].port_name,
504 list[i].d_id.b.domain, list[i].d_id.b.area,
505 list[i].d_id.b.al_pa);
508 /* Last device exit. */
509 if (list[i].d_id.b.rsvd_1 != 0)
510 break;
513 return (rval);
516 static void qla2x00_async_sns_sp_done(void *s, int rc)
518 struct srb *sp = s;
519 struct scsi_qla_host *vha = sp->vha;
520 struct ct_sns_pkt *ct_sns;
521 struct qla_work_evt *e;
523 sp->rc = rc;
524 if (rc == QLA_SUCCESS) {
525 ql_dbg(ql_dbg_disc, vha, 0x204f,
526 "Async done-%s exiting normally.\n",
527 sp->name);
528 } else if (rc == QLA_FUNCTION_TIMEOUT) {
529 ql_dbg(ql_dbg_disc, vha, 0x204f,
530 "Async done-%s timeout\n", sp->name);
531 } else {
532 ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.rsp;
533 memset(ct_sns, 0, sizeof(*ct_sns));
534 sp->retry_count++;
535 if (sp->retry_count > 3)
536 goto err;
538 ql_dbg(ql_dbg_disc, vha, 0x204f,
539 "Async done-%s fail rc %x. Retry count %d\n",
540 sp->name, rc, sp->retry_count);
542 e = qla2x00_alloc_work(vha, QLA_EVT_SP_RETRY);
543 if (!e)
544 goto err2;
546 del_timer(&sp->u.iocb_cmd.timer);
547 e->u.iosb.sp = sp;
548 qla2x00_post_work(vha, e);
549 return;
552 err:
553 e = qla2x00_alloc_work(vha, QLA_EVT_UNMAP);
554 err2:
555 if (!e) {
556 /* please ignore kernel warning. otherwise, we have mem leak. */
557 if (sp->u.iocb_cmd.u.ctarg.req) {
558 dma_free_coherent(&vha->hw->pdev->dev,
559 sizeof(struct ct_sns_pkt),
560 sp->u.iocb_cmd.u.ctarg.req,
561 sp->u.iocb_cmd.u.ctarg.req_dma);
562 sp->u.iocb_cmd.u.ctarg.req = NULL;
565 if (sp->u.iocb_cmd.u.ctarg.rsp) {
566 dma_free_coherent(&vha->hw->pdev->dev,
567 sizeof(struct ct_sns_pkt),
568 sp->u.iocb_cmd.u.ctarg.rsp,
569 sp->u.iocb_cmd.u.ctarg.rsp_dma);
570 sp->u.iocb_cmd.u.ctarg.rsp = NULL;
573 sp->free(sp);
575 return;
578 e->u.iosb.sp = sp;
579 qla2x00_post_work(vha, e);
583 * qla2x00_rft_id() - SNS Register FC-4 TYPEs (RFT_ID) supported by the HBA.
584 * @vha: HA context
586 * Returns 0 on success.
589 qla2x00_rft_id(scsi_qla_host_t *vha)
591 struct qla_hw_data *ha = vha->hw;
593 if (IS_QLA2100(ha) || IS_QLA2200(ha))
594 return qla2x00_sns_rft_id(vha);
596 return qla_async_rftid(vha, &vha->d_id);
599 static int qla_async_rftid(scsi_qla_host_t *vha, port_id_t *d_id)
601 int rval = QLA_MEMORY_ALLOC_FAILED;
602 struct ct_sns_req *ct_req;
603 srb_t *sp;
604 struct ct_sns_pkt *ct_sns;
606 if (!vha->flags.online)
607 goto done;
609 sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL);
610 if (!sp)
611 goto done;
613 sp->type = SRB_CT_PTHRU_CMD;
614 sp->name = "rft_id";
615 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
617 sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev,
618 sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma,
619 GFP_KERNEL);
620 if (!sp->u.iocb_cmd.u.ctarg.req) {
621 ql_log(ql_log_warn, vha, 0xd041,
622 "%s: Failed to allocate ct_sns request.\n",
623 __func__);
624 goto done_free_sp;
627 sp->u.iocb_cmd.u.ctarg.rsp = dma_alloc_coherent(&vha->hw->pdev->dev,
628 sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.rsp_dma,
629 GFP_KERNEL);
630 if (!sp->u.iocb_cmd.u.ctarg.rsp) {
631 ql_log(ql_log_warn, vha, 0xd042,
632 "%s: Failed to allocate ct_sns request.\n",
633 __func__);
634 goto done_free_sp;
636 ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.rsp;
637 memset(ct_sns, 0, sizeof(*ct_sns));
638 ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.req;
640 /* Prepare CT request */
641 ct_req = qla2x00_prep_ct_req(ct_sns, RFT_ID_CMD, RFT_ID_RSP_SIZE);
643 /* Prepare CT arguments -- port_id, FC-4 types */
644 ct_req->req.rft_id.port_id[0] = vha->d_id.b.domain;
645 ct_req->req.rft_id.port_id[1] = vha->d_id.b.area;
646 ct_req->req.rft_id.port_id[2] = vha->d_id.b.al_pa;
647 ct_req->req.rft_id.fc4_types[2] = 0x01; /* FCP-3 */
649 if (vha->flags.nvme_enabled)
650 ct_req->req.rft_id.fc4_types[6] = 1; /* NVMe type 28h */
652 sp->u.iocb_cmd.u.ctarg.req_size = RFT_ID_REQ_SIZE;
653 sp->u.iocb_cmd.u.ctarg.rsp_size = RFT_ID_RSP_SIZE;
654 sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
655 sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
656 sp->done = qla2x00_async_sns_sp_done;
658 rval = qla2x00_start_sp(sp);
659 if (rval != QLA_SUCCESS) {
660 ql_dbg(ql_dbg_disc, vha, 0x2043,
661 "RFT_ID issue IOCB failed (%d).\n", rval);
662 goto done_free_sp;
664 ql_dbg(ql_dbg_disc, vha, 0xffff,
665 "Async-%s - hdl=%x portid %06x.\n",
666 sp->name, sp->handle, d_id->b24);
667 return rval;
668 done_free_sp:
669 sp->free(sp);
670 done:
671 return rval;
675 * qla2x00_rff_id() - SNS Register FC-4 Features (RFF_ID) supported by the HBA.
676 * @vha: HA context
677 * @type: not used
679 * Returns 0 on success.
682 qla2x00_rff_id(scsi_qla_host_t *vha, u8 type)
684 struct qla_hw_data *ha = vha->hw;
686 if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
687 ql_dbg(ql_dbg_disc, vha, 0x2046,
688 "RFF_ID call not supported on ISP2100/ISP2200.\n");
689 return (QLA_SUCCESS);
692 return qla_async_rffid(vha, &vha->d_id, qlt_rff_id(vha),
693 FC4_TYPE_FCP_SCSI);
696 static int qla_async_rffid(scsi_qla_host_t *vha, port_id_t *d_id,
697 u8 fc4feature, u8 fc4type)
699 int rval = QLA_MEMORY_ALLOC_FAILED;
700 struct ct_sns_req *ct_req;
701 srb_t *sp;
702 struct ct_sns_pkt *ct_sns;
704 sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL);
705 if (!sp)
706 goto done;
708 sp->type = SRB_CT_PTHRU_CMD;
709 sp->name = "rff_id";
710 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
712 sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev,
713 sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma,
714 GFP_KERNEL);
715 if (!sp->u.iocb_cmd.u.ctarg.req) {
716 ql_log(ql_log_warn, vha, 0xd041,
717 "%s: Failed to allocate ct_sns request.\n",
718 __func__);
719 goto done_free_sp;
722 sp->u.iocb_cmd.u.ctarg.rsp = dma_alloc_coherent(&vha->hw->pdev->dev,
723 sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.rsp_dma,
724 GFP_KERNEL);
725 if (!sp->u.iocb_cmd.u.ctarg.rsp) {
726 ql_log(ql_log_warn, vha, 0xd042,
727 "%s: Failed to allocate ct_sns request.\n",
728 __func__);
729 goto done_free_sp;
731 ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.rsp;
732 memset(ct_sns, 0, sizeof(*ct_sns));
733 ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.req;
735 /* Prepare CT request */
736 ct_req = qla2x00_prep_ct_req(ct_sns, RFF_ID_CMD, RFF_ID_RSP_SIZE);
738 /* Prepare CT arguments -- port_id, FC-4 feature, FC-4 type */
739 ct_req->req.rff_id.port_id[0] = d_id->b.domain;
740 ct_req->req.rff_id.port_id[1] = d_id->b.area;
741 ct_req->req.rff_id.port_id[2] = d_id->b.al_pa;
742 ct_req->req.rff_id.fc4_feature = fc4feature;
743 ct_req->req.rff_id.fc4_type = fc4type; /* SCSI - FCP */
745 sp->u.iocb_cmd.u.ctarg.req_size = RFF_ID_REQ_SIZE;
746 sp->u.iocb_cmd.u.ctarg.rsp_size = RFF_ID_RSP_SIZE;
747 sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
748 sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
749 sp->done = qla2x00_async_sns_sp_done;
751 rval = qla2x00_start_sp(sp);
752 if (rval != QLA_SUCCESS) {
753 ql_dbg(ql_dbg_disc, vha, 0x2047,
754 "RFF_ID issue IOCB failed (%d).\n", rval);
755 goto done_free_sp;
758 ql_dbg(ql_dbg_disc, vha, 0xffff,
759 "Async-%s - hdl=%x portid %06x feature %x type %x.\n",
760 sp->name, sp->handle, d_id->b24, fc4feature, fc4type);
761 return rval;
763 done_free_sp:
764 sp->free(sp);
765 done:
766 return rval;
770 * qla2x00_rnn_id() - SNS Register Node Name (RNN_ID) of the HBA.
771 * @vha: HA context
773 * Returns 0 on success.
776 qla2x00_rnn_id(scsi_qla_host_t *vha)
778 struct qla_hw_data *ha = vha->hw;
780 if (IS_QLA2100(ha) || IS_QLA2200(ha))
781 return qla2x00_sns_rnn_id(vha);
783 return qla_async_rnnid(vha, &vha->d_id, vha->node_name);
786 static int qla_async_rnnid(scsi_qla_host_t *vha, port_id_t *d_id,
787 u8 *node_name)
789 int rval = QLA_MEMORY_ALLOC_FAILED;
790 struct ct_sns_req *ct_req;
791 srb_t *sp;
792 struct ct_sns_pkt *ct_sns;
794 sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL);
795 if (!sp)
796 goto done;
798 sp->type = SRB_CT_PTHRU_CMD;
799 sp->name = "rnid";
800 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
802 sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev,
803 sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma,
804 GFP_KERNEL);
805 if (!sp->u.iocb_cmd.u.ctarg.req) {
806 ql_log(ql_log_warn, vha, 0xd041,
807 "%s: Failed to allocate ct_sns request.\n",
808 __func__);
809 goto done_free_sp;
812 sp->u.iocb_cmd.u.ctarg.rsp = dma_alloc_coherent(&vha->hw->pdev->dev,
813 sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.rsp_dma,
814 GFP_KERNEL);
815 if (!sp->u.iocb_cmd.u.ctarg.rsp) {
816 ql_log(ql_log_warn, vha, 0xd042,
817 "%s: Failed to allocate ct_sns request.\n",
818 __func__);
819 goto done_free_sp;
821 ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.rsp;
822 memset(ct_sns, 0, sizeof(*ct_sns));
823 ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.req;
825 /* Prepare CT request */
826 ct_req = qla2x00_prep_ct_req(ct_sns, RNN_ID_CMD, RNN_ID_RSP_SIZE);
828 /* Prepare CT arguments -- port_id, node_name */
829 ct_req->req.rnn_id.port_id[0] = vha->d_id.b.domain;
830 ct_req->req.rnn_id.port_id[1] = vha->d_id.b.area;
831 ct_req->req.rnn_id.port_id[2] = vha->d_id.b.al_pa;
832 memcpy(ct_req->req.rnn_id.node_name, vha->node_name, WWN_SIZE);
834 sp->u.iocb_cmd.u.ctarg.req_size = RNN_ID_REQ_SIZE;
835 sp->u.iocb_cmd.u.ctarg.rsp_size = RNN_ID_RSP_SIZE;
836 sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
838 sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
839 sp->done = qla2x00_async_sns_sp_done;
841 rval = qla2x00_start_sp(sp);
842 if (rval != QLA_SUCCESS) {
843 ql_dbg(ql_dbg_disc, vha, 0x204d,
844 "RNN_ID issue IOCB failed (%d).\n", rval);
845 goto done_free_sp;
847 ql_dbg(ql_dbg_disc, vha, 0xffff,
848 "Async-%s - hdl=%x portid %06x\n",
849 sp->name, sp->handle, d_id->b24);
851 return rval;
853 done_free_sp:
854 sp->free(sp);
855 done:
856 return rval;
859 void
860 qla2x00_get_sym_node_name(scsi_qla_host_t *vha, uint8_t *snn, size_t size)
862 struct qla_hw_data *ha = vha->hw;
864 if (IS_QLAFX00(ha))
865 snprintf(snn, size, "%s FW:v%s DVR:v%s", ha->model_number,
866 ha->mr.fw_version, qla2x00_version_str);
867 else
868 snprintf(snn, size,
869 "%s FW:v%d.%02d.%02d DVR:v%s", ha->model_number,
870 ha->fw_major_version, ha->fw_minor_version,
871 ha->fw_subminor_version, qla2x00_version_str);
875 * qla2x00_rsnn_nn() - SNS Register Symbolic Node Name (RSNN_NN) of the HBA.
876 * @vha: HA context
878 * Returns 0 on success.
881 qla2x00_rsnn_nn(scsi_qla_host_t *vha)
883 struct qla_hw_data *ha = vha->hw;
885 if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
886 ql_dbg(ql_dbg_disc, vha, 0x2050,
887 "RSNN_ID call unsupported on ISP2100/ISP2200.\n");
888 return (QLA_SUCCESS);
891 return qla_async_rsnn_nn(vha);
894 static int qla_async_rsnn_nn(scsi_qla_host_t *vha)
896 int rval = QLA_MEMORY_ALLOC_FAILED;
897 struct ct_sns_req *ct_req;
898 srb_t *sp;
899 struct ct_sns_pkt *ct_sns;
901 sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL);
902 if (!sp)
903 goto done;
905 sp->type = SRB_CT_PTHRU_CMD;
906 sp->name = "rsnn_nn";
907 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
909 sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev,
910 sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma,
911 GFP_KERNEL);
912 if (!sp->u.iocb_cmd.u.ctarg.req) {
913 ql_log(ql_log_warn, vha, 0xd041,
914 "%s: Failed to allocate ct_sns request.\n",
915 __func__);
916 goto done_free_sp;
919 sp->u.iocb_cmd.u.ctarg.rsp = dma_alloc_coherent(&vha->hw->pdev->dev,
920 sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.rsp_dma,
921 GFP_KERNEL);
922 if (!sp->u.iocb_cmd.u.ctarg.rsp) {
923 ql_log(ql_log_warn, vha, 0xd042,
924 "%s: Failed to allocate ct_sns request.\n",
925 __func__);
926 goto done_free_sp;
928 ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.rsp;
929 memset(ct_sns, 0, sizeof(*ct_sns));
930 ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.req;
932 /* Prepare CT request */
933 ct_req = qla2x00_prep_ct_req(ct_sns, RSNN_NN_CMD, RSNN_NN_RSP_SIZE);
935 /* Prepare CT arguments -- node_name, symbolic node_name, size */
936 memcpy(ct_req->req.rsnn_nn.node_name, vha->node_name, WWN_SIZE);
938 /* Prepare the Symbolic Node Name */
939 qla2x00_get_sym_node_name(vha, ct_req->req.rsnn_nn.sym_node_name,
940 sizeof(ct_req->req.rsnn_nn.sym_node_name));
941 ct_req->req.rsnn_nn.name_len =
942 (uint8_t)strlen(ct_req->req.rsnn_nn.sym_node_name);
945 sp->u.iocb_cmd.u.ctarg.req_size = 24 + 1 + ct_req->req.rsnn_nn.name_len;
946 sp->u.iocb_cmd.u.ctarg.rsp_size = RSNN_NN_RSP_SIZE;
947 sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
949 sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
950 sp->done = qla2x00_async_sns_sp_done;
952 rval = qla2x00_start_sp(sp);
953 if (rval != QLA_SUCCESS) {
954 ql_dbg(ql_dbg_disc, vha, 0x2043,
955 "RFT_ID issue IOCB failed (%d).\n", rval);
956 goto done_free_sp;
958 ql_dbg(ql_dbg_disc, vha, 0xffff,
959 "Async-%s - hdl=%x.\n",
960 sp->name, sp->handle);
962 return rval;
964 done_free_sp:
965 sp->free(sp);
966 done:
967 return rval;
971 * qla2x00_prep_sns_cmd() - Prepare common SNS command request fields for query.
972 * @vha: HA context
973 * @cmd: GS command
974 * @scmd_len: Subcommand length
975 * @data_size: response size in bytes
977 * Returns a pointer to the @ha's sns_cmd.
979 static inline struct sns_cmd_pkt *
980 qla2x00_prep_sns_cmd(scsi_qla_host_t *vha, uint16_t cmd, uint16_t scmd_len,
981 uint16_t data_size)
983 uint16_t wc;
984 struct sns_cmd_pkt *sns_cmd;
985 struct qla_hw_data *ha = vha->hw;
987 sns_cmd = ha->sns_cmd;
988 memset(sns_cmd, 0, sizeof(struct sns_cmd_pkt));
989 wc = data_size / 2; /* Size in 16bit words. */
990 sns_cmd->p.cmd.buffer_length = cpu_to_le16(wc);
991 sns_cmd->p.cmd.buffer_address[0] = cpu_to_le32(LSD(ha->sns_cmd_dma));
992 sns_cmd->p.cmd.buffer_address[1] = cpu_to_le32(MSD(ha->sns_cmd_dma));
993 sns_cmd->p.cmd.subcommand_length = cpu_to_le16(scmd_len);
994 sns_cmd->p.cmd.subcommand = cpu_to_le16(cmd);
995 wc = (data_size - 16) / 4; /* Size in 32bit words. */
996 sns_cmd->p.cmd.size = cpu_to_le16(wc);
998 vha->qla_stats.control_requests++;
1000 return (sns_cmd);
1004 * qla2x00_sns_ga_nxt() - SNS scan for fabric devices via GA_NXT command.
1005 * @vha: HA context
1006 * @fcport: fcport entry to updated
1008 * This command uses the old Exectute SNS Command mailbox routine.
1010 * Returns 0 on success.
1012 static int
1013 qla2x00_sns_ga_nxt(scsi_qla_host_t *vha, fc_port_t *fcport)
1015 int rval = QLA_SUCCESS;
1016 struct qla_hw_data *ha = vha->hw;
1017 struct sns_cmd_pkt *sns_cmd;
1019 /* Issue GA_NXT. */
1020 /* Prepare SNS command request. */
1021 sns_cmd = qla2x00_prep_sns_cmd(vha, GA_NXT_CMD, GA_NXT_SNS_SCMD_LEN,
1022 GA_NXT_SNS_DATA_SIZE);
1024 /* Prepare SNS command arguments -- port_id. */
1025 sns_cmd->p.cmd.param[0] = fcport->d_id.b.al_pa;
1026 sns_cmd->p.cmd.param[1] = fcport->d_id.b.area;
1027 sns_cmd->p.cmd.param[2] = fcport->d_id.b.domain;
1029 /* Execute SNS command. */
1030 rval = qla2x00_send_sns(vha, ha->sns_cmd_dma, GA_NXT_SNS_CMD_SIZE / 2,
1031 sizeof(struct sns_cmd_pkt));
1032 if (rval != QLA_SUCCESS) {
1033 /*EMPTY*/
1034 ql_dbg(ql_dbg_disc, vha, 0x205f,
1035 "GA_NXT Send SNS failed (%d).\n", rval);
1036 } else if (sns_cmd->p.gan_data[8] != 0x80 ||
1037 sns_cmd->p.gan_data[9] != 0x02) {
1038 ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x2084,
1039 "GA_NXT failed, rejected request ga_nxt_rsp:\n");
1040 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2074,
1041 sns_cmd->p.gan_data, 16);
1042 rval = QLA_FUNCTION_FAILED;
1043 } else {
1044 /* Populate fc_port_t entry. */
1045 fcport->d_id.b.domain = sns_cmd->p.gan_data[17];
1046 fcport->d_id.b.area = sns_cmd->p.gan_data[18];
1047 fcport->d_id.b.al_pa = sns_cmd->p.gan_data[19];
1049 memcpy(fcport->node_name, &sns_cmd->p.gan_data[284], WWN_SIZE);
1050 memcpy(fcport->port_name, &sns_cmd->p.gan_data[20], WWN_SIZE);
1052 if (sns_cmd->p.gan_data[16] != NS_N_PORT_TYPE &&
1053 sns_cmd->p.gan_data[16] != NS_NL_PORT_TYPE)
1054 fcport->d_id.b.domain = 0xf0;
1056 ql_dbg(ql_dbg_disc, vha, 0x2061,
1057 "GA_NXT entry - nn %8phN pn %8phN "
1058 "port_id=%02x%02x%02x.\n",
1059 fcport->node_name, fcport->port_name,
1060 fcport->d_id.b.domain, fcport->d_id.b.area,
1061 fcport->d_id.b.al_pa);
1064 return (rval);
1068 * qla2x00_sns_gid_pt() - SNS scan for fabric devices via GID_PT command.
1069 * @vha: HA context
1070 * @list: switch info entries to populate
1072 * This command uses the old Exectute SNS Command mailbox routine.
1074 * NOTE: Non-Nx_Ports are not requested.
1076 * Returns 0 on success.
1078 static int
1079 qla2x00_sns_gid_pt(scsi_qla_host_t *vha, sw_info_t *list)
1081 int rval;
1082 struct qla_hw_data *ha = vha->hw;
1083 uint16_t i;
1084 uint8_t *entry;
1085 struct sns_cmd_pkt *sns_cmd;
1086 uint16_t gid_pt_sns_data_size;
1088 gid_pt_sns_data_size = qla2x00_gid_pt_rsp_size(vha);
1090 /* Issue GID_PT. */
1091 /* Prepare SNS command request. */
1092 sns_cmd = qla2x00_prep_sns_cmd(vha, GID_PT_CMD, GID_PT_SNS_SCMD_LEN,
1093 gid_pt_sns_data_size);
1095 /* Prepare SNS command arguments -- port_type. */
1096 sns_cmd->p.cmd.param[0] = NS_NX_PORT_TYPE;
1098 /* Execute SNS command. */
1099 rval = qla2x00_send_sns(vha, ha->sns_cmd_dma, GID_PT_SNS_CMD_SIZE / 2,
1100 sizeof(struct sns_cmd_pkt));
1101 if (rval != QLA_SUCCESS) {
1102 /*EMPTY*/
1103 ql_dbg(ql_dbg_disc, vha, 0x206d,
1104 "GID_PT Send SNS failed (%d).\n", rval);
1105 } else if (sns_cmd->p.gid_data[8] != 0x80 ||
1106 sns_cmd->p.gid_data[9] != 0x02) {
1107 ql_dbg(ql_dbg_disc, vha, 0x202f,
1108 "GID_PT failed, rejected request, gid_rsp:\n");
1109 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2081,
1110 sns_cmd->p.gid_data, 16);
1111 rval = QLA_FUNCTION_FAILED;
1112 } else {
1113 /* Set port IDs in switch info list. */
1114 for (i = 0; i < ha->max_fibre_devices; i++) {
1115 entry = &sns_cmd->p.gid_data[(i * 4) + 16];
1116 list[i].d_id.b.domain = entry[1];
1117 list[i].d_id.b.area = entry[2];
1118 list[i].d_id.b.al_pa = entry[3];
1120 /* Last one exit. */
1121 if (entry[0] & BIT_7) {
1122 list[i].d_id.b.rsvd_1 = entry[0];
1123 break;
1128 * If we've used all available slots, then the switch is
1129 * reporting back more devices that we can handle with this
1130 * single call. Return a failed status, and let GA_NXT handle
1131 * the overload.
1133 if (i == ha->max_fibre_devices)
1134 rval = QLA_FUNCTION_FAILED;
1137 return (rval);
1141 * qla2x00_sns_gpn_id() - SNS Get Port Name (GPN_ID) query.
1142 * @vha: HA context
1143 * @list: switch info entries to populate
1145 * This command uses the old Exectute SNS Command mailbox routine.
1147 * Returns 0 on success.
1149 static int
1150 qla2x00_sns_gpn_id(scsi_qla_host_t *vha, sw_info_t *list)
1152 int rval = QLA_SUCCESS;
1153 struct qla_hw_data *ha = vha->hw;
1154 uint16_t i;
1155 struct sns_cmd_pkt *sns_cmd;
1157 for (i = 0; i < ha->max_fibre_devices; i++) {
1158 /* Issue GPN_ID */
1159 /* Prepare SNS command request. */
1160 sns_cmd = qla2x00_prep_sns_cmd(vha, GPN_ID_CMD,
1161 GPN_ID_SNS_SCMD_LEN, GPN_ID_SNS_DATA_SIZE);
1163 /* Prepare SNS command arguments -- port_id. */
1164 sns_cmd->p.cmd.param[0] = list[i].d_id.b.al_pa;
1165 sns_cmd->p.cmd.param[1] = list[i].d_id.b.area;
1166 sns_cmd->p.cmd.param[2] = list[i].d_id.b.domain;
1168 /* Execute SNS command. */
1169 rval = qla2x00_send_sns(vha, ha->sns_cmd_dma,
1170 GPN_ID_SNS_CMD_SIZE / 2, sizeof(struct sns_cmd_pkt));
1171 if (rval != QLA_SUCCESS) {
1172 /*EMPTY*/
1173 ql_dbg(ql_dbg_disc, vha, 0x2032,
1174 "GPN_ID Send SNS failed (%d).\n", rval);
1175 } else if (sns_cmd->p.gpn_data[8] != 0x80 ||
1176 sns_cmd->p.gpn_data[9] != 0x02) {
1177 ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x207e,
1178 "GPN_ID failed, rejected request, gpn_rsp:\n");
1179 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x207f,
1180 sns_cmd->p.gpn_data, 16);
1181 rval = QLA_FUNCTION_FAILED;
1182 } else {
1183 /* Save portname */
1184 memcpy(list[i].port_name, &sns_cmd->p.gpn_data[16],
1185 WWN_SIZE);
1188 /* Last device exit. */
1189 if (list[i].d_id.b.rsvd_1 != 0)
1190 break;
1193 return (rval);
1197 * qla2x00_sns_gnn_id() - SNS Get Node Name (GNN_ID) query.
1198 * @vha: HA context
1199 * @list: switch info entries to populate
1201 * This command uses the old Exectute SNS Command mailbox routine.
1203 * Returns 0 on success.
1205 static int
1206 qla2x00_sns_gnn_id(scsi_qla_host_t *vha, sw_info_t *list)
1208 int rval = QLA_SUCCESS;
1209 struct qla_hw_data *ha = vha->hw;
1210 uint16_t i;
1211 struct sns_cmd_pkt *sns_cmd;
1213 for (i = 0; i < ha->max_fibre_devices; i++) {
1214 /* Issue GNN_ID */
1215 /* Prepare SNS command request. */
1216 sns_cmd = qla2x00_prep_sns_cmd(vha, GNN_ID_CMD,
1217 GNN_ID_SNS_SCMD_LEN, GNN_ID_SNS_DATA_SIZE);
1219 /* Prepare SNS command arguments -- port_id. */
1220 sns_cmd->p.cmd.param[0] = list[i].d_id.b.al_pa;
1221 sns_cmd->p.cmd.param[1] = list[i].d_id.b.area;
1222 sns_cmd->p.cmd.param[2] = list[i].d_id.b.domain;
1224 /* Execute SNS command. */
1225 rval = qla2x00_send_sns(vha, ha->sns_cmd_dma,
1226 GNN_ID_SNS_CMD_SIZE / 2, sizeof(struct sns_cmd_pkt));
1227 if (rval != QLA_SUCCESS) {
1228 /*EMPTY*/
1229 ql_dbg(ql_dbg_disc, vha, 0x203f,
1230 "GNN_ID Send SNS failed (%d).\n", rval);
1231 } else if (sns_cmd->p.gnn_data[8] != 0x80 ||
1232 sns_cmd->p.gnn_data[9] != 0x02) {
1233 ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x2082,
1234 "GNN_ID failed, rejected request, gnn_rsp:\n");
1235 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x207a,
1236 sns_cmd->p.gnn_data, 16);
1237 rval = QLA_FUNCTION_FAILED;
1238 } else {
1239 /* Save nodename */
1240 memcpy(list[i].node_name, &sns_cmd->p.gnn_data[16],
1241 WWN_SIZE);
1243 ql_dbg(ql_dbg_disc, vha, 0x206e,
1244 "GID_PT entry - nn %8phN pn %8phN "
1245 "port_id=%02x%02x%02x.\n",
1246 list[i].node_name, list[i].port_name,
1247 list[i].d_id.b.domain, list[i].d_id.b.area,
1248 list[i].d_id.b.al_pa);
1251 /* Last device exit. */
1252 if (list[i].d_id.b.rsvd_1 != 0)
1253 break;
1256 return (rval);
1260 * qla2x00_snd_rft_id() - SNS Register FC-4 TYPEs (RFT_ID) supported by the HBA.
1261 * @vha: HA context
1263 * This command uses the old Exectute SNS Command mailbox routine.
1265 * Returns 0 on success.
1267 static int
1268 qla2x00_sns_rft_id(scsi_qla_host_t *vha)
1270 int rval;
1271 struct qla_hw_data *ha = vha->hw;
1272 struct sns_cmd_pkt *sns_cmd;
1274 /* Issue RFT_ID. */
1275 /* Prepare SNS command request. */
1276 sns_cmd = qla2x00_prep_sns_cmd(vha, RFT_ID_CMD, RFT_ID_SNS_SCMD_LEN,
1277 RFT_ID_SNS_DATA_SIZE);
1279 /* Prepare SNS command arguments -- port_id, FC-4 types */
1280 sns_cmd->p.cmd.param[0] = vha->d_id.b.al_pa;
1281 sns_cmd->p.cmd.param[1] = vha->d_id.b.area;
1282 sns_cmd->p.cmd.param[2] = vha->d_id.b.domain;
1284 sns_cmd->p.cmd.param[5] = 0x01; /* FCP-3 */
1286 /* Execute SNS command. */
1287 rval = qla2x00_send_sns(vha, ha->sns_cmd_dma, RFT_ID_SNS_CMD_SIZE / 2,
1288 sizeof(struct sns_cmd_pkt));
1289 if (rval != QLA_SUCCESS) {
1290 /*EMPTY*/
1291 ql_dbg(ql_dbg_disc, vha, 0x2060,
1292 "RFT_ID Send SNS failed (%d).\n", rval);
1293 } else if (sns_cmd->p.rft_data[8] != 0x80 ||
1294 sns_cmd->p.rft_data[9] != 0x02) {
1295 ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x2083,
1296 "RFT_ID failed, rejected request rft_rsp:\n");
1297 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2080,
1298 sns_cmd->p.rft_data, 16);
1299 rval = QLA_FUNCTION_FAILED;
1300 } else {
1301 ql_dbg(ql_dbg_disc, vha, 0x2073,
1302 "RFT_ID exiting normally.\n");
1305 return (rval);
1309 * qla2x00_sns_rnn_id() - SNS Register Node Name (RNN_ID) of the HBA.
1310 * @vha: HA context
1312 * This command uses the old Exectute SNS Command mailbox routine.
1314 * Returns 0 on success.
1316 static int
1317 qla2x00_sns_rnn_id(scsi_qla_host_t *vha)
1319 int rval;
1320 struct qla_hw_data *ha = vha->hw;
1321 struct sns_cmd_pkt *sns_cmd;
1323 /* Issue RNN_ID. */
1324 /* Prepare SNS command request. */
1325 sns_cmd = qla2x00_prep_sns_cmd(vha, RNN_ID_CMD, RNN_ID_SNS_SCMD_LEN,
1326 RNN_ID_SNS_DATA_SIZE);
1328 /* Prepare SNS command arguments -- port_id, nodename. */
1329 sns_cmd->p.cmd.param[0] = vha->d_id.b.al_pa;
1330 sns_cmd->p.cmd.param[1] = vha->d_id.b.area;
1331 sns_cmd->p.cmd.param[2] = vha->d_id.b.domain;
1333 sns_cmd->p.cmd.param[4] = vha->node_name[7];
1334 sns_cmd->p.cmd.param[5] = vha->node_name[6];
1335 sns_cmd->p.cmd.param[6] = vha->node_name[5];
1336 sns_cmd->p.cmd.param[7] = vha->node_name[4];
1337 sns_cmd->p.cmd.param[8] = vha->node_name[3];
1338 sns_cmd->p.cmd.param[9] = vha->node_name[2];
1339 sns_cmd->p.cmd.param[10] = vha->node_name[1];
1340 sns_cmd->p.cmd.param[11] = vha->node_name[0];
1342 /* Execute SNS command. */
1343 rval = qla2x00_send_sns(vha, ha->sns_cmd_dma, RNN_ID_SNS_CMD_SIZE / 2,
1344 sizeof(struct sns_cmd_pkt));
1345 if (rval != QLA_SUCCESS) {
1346 /*EMPTY*/
1347 ql_dbg(ql_dbg_disc, vha, 0x204a,
1348 "RNN_ID Send SNS failed (%d).\n", rval);
1349 } else if (sns_cmd->p.rnn_data[8] != 0x80 ||
1350 sns_cmd->p.rnn_data[9] != 0x02) {
1351 ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x207b,
1352 "RNN_ID failed, rejected request, rnn_rsp:\n");
1353 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x207c,
1354 sns_cmd->p.rnn_data, 16);
1355 rval = QLA_FUNCTION_FAILED;
1356 } else {
1357 ql_dbg(ql_dbg_disc, vha, 0x204c,
1358 "RNN_ID exiting normally.\n");
1361 return (rval);
1365 * qla2x00_mgmt_svr_login() - Login to fabric Management Service.
1366 * @vha: HA context
1368 * Returns 0 on success.
1371 qla2x00_mgmt_svr_login(scsi_qla_host_t *vha)
1373 int ret, rval;
1374 uint16_t mb[MAILBOX_REGISTER_COUNT];
1375 struct qla_hw_data *ha = vha->hw;
1376 ret = QLA_SUCCESS;
1377 if (vha->flags.management_server_logged_in)
1378 return ret;
1380 rval = ha->isp_ops->fabric_login(vha, vha->mgmt_svr_loop_id, 0xff, 0xff,
1381 0xfa, mb, BIT_1);
1382 if (rval != QLA_SUCCESS || mb[0] != MBS_COMMAND_COMPLETE) {
1383 if (rval == QLA_MEMORY_ALLOC_FAILED)
1384 ql_dbg(ql_dbg_disc, vha, 0x2085,
1385 "Failed management_server login: loopid=%x "
1386 "rval=%d\n", vha->mgmt_svr_loop_id, rval);
1387 else
1388 ql_dbg(ql_dbg_disc, vha, 0x2024,
1389 "Failed management_server login: loopid=%x "
1390 "mb[0]=%x mb[1]=%x mb[2]=%x mb[6]=%x mb[7]=%x.\n",
1391 vha->mgmt_svr_loop_id, mb[0], mb[1], mb[2], mb[6],
1392 mb[7]);
1393 ret = QLA_FUNCTION_FAILED;
1394 } else
1395 vha->flags.management_server_logged_in = 1;
1397 return ret;
1401 * qla2x00_prep_ms_fdmi_iocb() - Prepare common MS IOCB fields for FDMI query.
1402 * @vha: HA context
1403 * @req_size: request size in bytes
1404 * @rsp_size: response size in bytes
1406 * Returns a pointer to the @ha's ms_iocb.
1408 void *
1409 qla2x00_prep_ms_fdmi_iocb(scsi_qla_host_t *vha, uint32_t req_size,
1410 uint32_t rsp_size)
1412 ms_iocb_entry_t *ms_pkt;
1413 struct qla_hw_data *ha = vha->hw;
1414 ms_pkt = ha->ms_iocb;
1415 memset(ms_pkt, 0, sizeof(ms_iocb_entry_t));
1417 ms_pkt->entry_type = MS_IOCB_TYPE;
1418 ms_pkt->entry_count = 1;
1419 SET_TARGET_ID(ha, ms_pkt->loop_id, vha->mgmt_svr_loop_id);
1420 ms_pkt->control_flags = cpu_to_le16(CF_READ | CF_HEAD_TAG);
1421 ms_pkt->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
1422 ms_pkt->cmd_dsd_count = cpu_to_le16(1);
1423 ms_pkt->total_dsd_count = cpu_to_le16(2);
1424 ms_pkt->rsp_bytecount = cpu_to_le32(rsp_size);
1425 ms_pkt->req_bytecount = cpu_to_le32(req_size);
1427 ms_pkt->dseg_req_address[0] = cpu_to_le32(LSD(ha->ct_sns_dma));
1428 ms_pkt->dseg_req_address[1] = cpu_to_le32(MSD(ha->ct_sns_dma));
1429 ms_pkt->dseg_req_length = ms_pkt->req_bytecount;
1431 ms_pkt->dseg_rsp_address[0] = cpu_to_le32(LSD(ha->ct_sns_dma));
1432 ms_pkt->dseg_rsp_address[1] = cpu_to_le32(MSD(ha->ct_sns_dma));
1433 ms_pkt->dseg_rsp_length = ms_pkt->rsp_bytecount;
1435 return ms_pkt;
1439 * qla24xx_prep_ms_fdmi_iocb() - Prepare common MS IOCB fields for FDMI query.
1440 * @vha: HA context
1441 * @req_size: request size in bytes
1442 * @rsp_size: response size in bytes
1444 * Returns a pointer to the @ha's ms_iocb.
1446 void *
1447 qla24xx_prep_ms_fdmi_iocb(scsi_qla_host_t *vha, uint32_t req_size,
1448 uint32_t rsp_size)
1450 struct ct_entry_24xx *ct_pkt;
1451 struct qla_hw_data *ha = vha->hw;
1453 ct_pkt = (struct ct_entry_24xx *)ha->ms_iocb;
1454 memset(ct_pkt, 0, sizeof(struct ct_entry_24xx));
1456 ct_pkt->entry_type = CT_IOCB_TYPE;
1457 ct_pkt->entry_count = 1;
1458 ct_pkt->nport_handle = cpu_to_le16(vha->mgmt_svr_loop_id);
1459 ct_pkt->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
1460 ct_pkt->cmd_dsd_count = cpu_to_le16(1);
1461 ct_pkt->rsp_dsd_count = cpu_to_le16(1);
1462 ct_pkt->rsp_byte_count = cpu_to_le32(rsp_size);
1463 ct_pkt->cmd_byte_count = cpu_to_le32(req_size);
1465 ct_pkt->dseg_0_address[0] = cpu_to_le32(LSD(ha->ct_sns_dma));
1466 ct_pkt->dseg_0_address[1] = cpu_to_le32(MSD(ha->ct_sns_dma));
1467 ct_pkt->dseg_0_len = ct_pkt->cmd_byte_count;
1469 ct_pkt->dseg_1_address[0] = cpu_to_le32(LSD(ha->ct_sns_dma));
1470 ct_pkt->dseg_1_address[1] = cpu_to_le32(MSD(ha->ct_sns_dma));
1471 ct_pkt->dseg_1_len = ct_pkt->rsp_byte_count;
1472 ct_pkt->vp_index = vha->vp_idx;
1474 return ct_pkt;
1477 static inline ms_iocb_entry_t *
1478 qla2x00_update_ms_fdmi_iocb(scsi_qla_host_t *vha, uint32_t req_size)
1480 struct qla_hw_data *ha = vha->hw;
1481 ms_iocb_entry_t *ms_pkt = ha->ms_iocb;
1482 struct ct_entry_24xx *ct_pkt = (struct ct_entry_24xx *)ha->ms_iocb;
1484 if (IS_FWI2_CAPABLE(ha)) {
1485 ct_pkt->cmd_byte_count = cpu_to_le32(req_size);
1486 ct_pkt->dseg_0_len = ct_pkt->cmd_byte_count;
1487 } else {
1488 ms_pkt->req_bytecount = cpu_to_le32(req_size);
1489 ms_pkt->dseg_req_length = ms_pkt->req_bytecount;
1492 return ms_pkt;
1496 * qla2x00_prep_ct_req() - Prepare common CT request fields for SNS query.
1497 * @p: CT request buffer
1498 * @cmd: GS command
1499 * @rsp_size: response size in bytes
1501 * Returns a pointer to the intitialized @ct_req.
1503 static inline struct ct_sns_req *
1504 qla2x00_prep_ct_fdmi_req(struct ct_sns_pkt *p, uint16_t cmd,
1505 uint16_t rsp_size)
1507 memset(p, 0, sizeof(struct ct_sns_pkt));
1509 p->p.req.header.revision = 0x01;
1510 p->p.req.header.gs_type = 0xFA;
1511 p->p.req.header.gs_subtype = 0x10;
1512 p->p.req.command = cpu_to_be16(cmd);
1513 p->p.req.max_rsp_size = cpu_to_be16((rsp_size - 16) / 4);
1515 return &p->p.req;
1519 * qla2x00_fdmi_rhba() - perform RHBA FDMI registration
1520 * @vha: HA context
1522 * Returns 0 on success.
1524 static int
1525 qla2x00_fdmi_rhba(scsi_qla_host_t *vha)
1527 int rval, alen;
1528 uint32_t size, sn;
1530 ms_iocb_entry_t *ms_pkt;
1531 struct ct_sns_req *ct_req;
1532 struct ct_sns_rsp *ct_rsp;
1533 void *entries;
1534 struct ct_fdmi_hba_attr *eiter;
1535 struct qla_hw_data *ha = vha->hw;
1537 /* Issue RHBA */
1538 /* Prepare common MS IOCB */
1539 /* Request size adjusted after CT preparation */
1540 ms_pkt = ha->isp_ops->prep_ms_fdmi_iocb(vha, 0, RHBA_RSP_SIZE);
1542 /* Prepare CT request */
1543 ct_req = qla2x00_prep_ct_fdmi_req(ha->ct_sns, RHBA_CMD, RHBA_RSP_SIZE);
1544 ct_rsp = &ha->ct_sns->p.rsp;
1546 /* Prepare FDMI command arguments -- attribute block, attributes. */
1547 memcpy(ct_req->req.rhba.hba_identifier, vha->port_name, WWN_SIZE);
1548 ct_req->req.rhba.entry_count = cpu_to_be32(1);
1549 memcpy(ct_req->req.rhba.port_name, vha->port_name, WWN_SIZE);
1550 size = 2 * WWN_SIZE + 4 + 4;
1552 /* Attributes */
1553 ct_req->req.rhba.attrs.count =
1554 cpu_to_be32(FDMI_HBA_ATTR_COUNT);
1555 entries = ct_req->req.rhba.hba_identifier;
1557 /* Nodename. */
1558 eiter = entries + size;
1559 eiter->type = cpu_to_be16(FDMI_HBA_NODE_NAME);
1560 eiter->len = cpu_to_be16(4 + WWN_SIZE);
1561 memcpy(eiter->a.node_name, vha->node_name, WWN_SIZE);
1562 size += 4 + WWN_SIZE;
1564 ql_dbg(ql_dbg_disc, vha, 0x2025,
1565 "NodeName = %8phN.\n", eiter->a.node_name);
1567 /* Manufacturer. */
1568 eiter = entries + size;
1569 eiter->type = cpu_to_be16(FDMI_HBA_MANUFACTURER);
1570 alen = strlen(QLA2XXX_MANUFACTURER);
1571 snprintf(eiter->a.manufacturer, sizeof(eiter->a.manufacturer),
1572 "%s", "QLogic Corporation");
1573 alen += 4 - (alen & 3);
1574 eiter->len = cpu_to_be16(4 + alen);
1575 size += 4 + alen;
1577 ql_dbg(ql_dbg_disc, vha, 0x2026,
1578 "Manufacturer = %s.\n", eiter->a.manufacturer);
1580 /* Serial number. */
1581 eiter = entries + size;
1582 eiter->type = cpu_to_be16(FDMI_HBA_SERIAL_NUMBER);
1583 if (IS_FWI2_CAPABLE(ha))
1584 qla2xxx_get_vpd_field(vha, "SN", eiter->a.serial_num,
1585 sizeof(eiter->a.serial_num));
1586 else {
1587 sn = ((ha->serial0 & 0x1f) << 16) |
1588 (ha->serial2 << 8) | ha->serial1;
1589 snprintf(eiter->a.serial_num, sizeof(eiter->a.serial_num),
1590 "%c%05d", 'A' + sn / 100000, sn % 100000);
1592 alen = strlen(eiter->a.serial_num);
1593 alen += 4 - (alen & 3);
1594 eiter->len = cpu_to_be16(4 + alen);
1595 size += 4 + alen;
1597 ql_dbg(ql_dbg_disc, vha, 0x2027,
1598 "Serial no. = %s.\n", eiter->a.serial_num);
1600 /* Model name. */
1601 eiter = entries + size;
1602 eiter->type = cpu_to_be16(FDMI_HBA_MODEL);
1603 snprintf(eiter->a.model, sizeof(eiter->a.model),
1604 "%s", ha->model_number);
1605 alen = strlen(eiter->a.model);
1606 alen += 4 - (alen & 3);
1607 eiter->len = cpu_to_be16(4 + alen);
1608 size += 4 + alen;
1610 ql_dbg(ql_dbg_disc, vha, 0x2028,
1611 "Model Name = %s.\n", eiter->a.model);
1613 /* Model description. */
1614 eiter = entries + size;
1615 eiter->type = cpu_to_be16(FDMI_HBA_MODEL_DESCRIPTION);
1616 snprintf(eiter->a.model_desc, sizeof(eiter->a.model_desc),
1617 "%s", ha->model_desc);
1618 alen = strlen(eiter->a.model_desc);
1619 alen += 4 - (alen & 3);
1620 eiter->len = cpu_to_be16(4 + alen);
1621 size += 4 + alen;
1623 ql_dbg(ql_dbg_disc, vha, 0x2029,
1624 "Model Desc = %s.\n", eiter->a.model_desc);
1626 /* Hardware version. */
1627 eiter = entries + size;
1628 eiter->type = cpu_to_be16(FDMI_HBA_HARDWARE_VERSION);
1629 if (!IS_FWI2_CAPABLE(ha)) {
1630 snprintf(eiter->a.hw_version, sizeof(eiter->a.hw_version),
1631 "HW:%s", ha->adapter_id);
1632 } else if (qla2xxx_get_vpd_field(vha, "MN", eiter->a.hw_version,
1633 sizeof(eiter->a.hw_version))) {
1635 } else if (qla2xxx_get_vpd_field(vha, "EC", eiter->a.hw_version,
1636 sizeof(eiter->a.hw_version))) {
1638 } else {
1639 snprintf(eiter->a.hw_version, sizeof(eiter->a.hw_version),
1640 "HW:%s", ha->adapter_id);
1642 alen = strlen(eiter->a.hw_version);
1643 alen += 4 - (alen & 3);
1644 eiter->len = cpu_to_be16(4 + alen);
1645 size += 4 + alen;
1647 ql_dbg(ql_dbg_disc, vha, 0x202a,
1648 "Hardware ver = %s.\n", eiter->a.hw_version);
1650 /* Driver version. */
1651 eiter = entries + size;
1652 eiter->type = cpu_to_be16(FDMI_HBA_DRIVER_VERSION);
1653 snprintf(eiter->a.driver_version, sizeof(eiter->a.driver_version),
1654 "%s", qla2x00_version_str);
1655 alen = strlen(eiter->a.driver_version);
1656 alen += 4 - (alen & 3);
1657 eiter->len = cpu_to_be16(4 + alen);
1658 size += 4 + alen;
1660 ql_dbg(ql_dbg_disc, vha, 0x202b,
1661 "Driver ver = %s.\n", eiter->a.driver_version);
1663 /* Option ROM version. */
1664 eiter = entries + size;
1665 eiter->type = cpu_to_be16(FDMI_HBA_OPTION_ROM_VERSION);
1666 snprintf(eiter->a.orom_version, sizeof(eiter->a.orom_version),
1667 "%d.%02d", ha->bios_revision[1], ha->bios_revision[0]);
1668 alen = strlen(eiter->a.orom_version);
1669 alen += 4 - (alen & 3);
1670 eiter->len = cpu_to_be16(4 + alen);
1671 size += 4 + alen;
1673 ql_dbg(ql_dbg_disc, vha , 0x202c,
1674 "Optrom vers = %s.\n", eiter->a.orom_version);
1676 /* Firmware version */
1677 eiter = entries + size;
1678 eiter->type = cpu_to_be16(FDMI_HBA_FIRMWARE_VERSION);
1679 ha->isp_ops->fw_version_str(vha, eiter->a.fw_version,
1680 sizeof(eiter->a.fw_version));
1681 alen = strlen(eiter->a.fw_version);
1682 alen += 4 - (alen & 3);
1683 eiter->len = cpu_to_be16(4 + alen);
1684 size += 4 + alen;
1686 ql_dbg(ql_dbg_disc, vha, 0x202d,
1687 "Firmware vers = %s.\n", eiter->a.fw_version);
1689 /* Update MS request size. */
1690 qla2x00_update_ms_fdmi_iocb(vha, size + 16);
1692 ql_dbg(ql_dbg_disc, vha, 0x202e,
1693 "RHBA identifier = %8phN size=%d.\n",
1694 ct_req->req.rhba.hba_identifier, size);
1695 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2076,
1696 entries, size);
1698 /* Execute MS IOCB */
1699 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
1700 sizeof(ms_iocb_entry_t));
1701 if (rval != QLA_SUCCESS) {
1702 /*EMPTY*/
1703 ql_dbg(ql_dbg_disc, vha, 0x2030,
1704 "RHBA issue IOCB failed (%d).\n", rval);
1705 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RHBA") !=
1706 QLA_SUCCESS) {
1707 rval = QLA_FUNCTION_FAILED;
1708 if (ct_rsp->header.reason_code == CT_REASON_CANNOT_PERFORM &&
1709 ct_rsp->header.explanation_code ==
1710 CT_EXPL_ALREADY_REGISTERED) {
1711 ql_dbg(ql_dbg_disc, vha, 0x2034,
1712 "HBA already registered.\n");
1713 rval = QLA_ALREADY_REGISTERED;
1714 } else {
1715 ql_dbg(ql_dbg_disc, vha, 0x20ad,
1716 "RHBA FDMI registration failed, CT Reason code: 0x%x, CT Explanation 0x%x\n",
1717 ct_rsp->header.reason_code,
1718 ct_rsp->header.explanation_code);
1720 } else {
1721 ql_dbg(ql_dbg_disc, vha, 0x2035,
1722 "RHBA exiting normally.\n");
1725 return rval;
1729 * qla2x00_fdmi_rpa() - perform RPA registration
1730 * @vha: HA context
1732 * Returns 0 on success.
1734 static int
1735 qla2x00_fdmi_rpa(scsi_qla_host_t *vha)
1737 int rval, alen;
1738 uint32_t size;
1739 struct qla_hw_data *ha = vha->hw;
1740 ms_iocb_entry_t *ms_pkt;
1741 struct ct_sns_req *ct_req;
1742 struct ct_sns_rsp *ct_rsp;
1743 void *entries;
1744 struct ct_fdmi_port_attr *eiter;
1745 struct init_cb_24xx *icb24 = (struct init_cb_24xx *)ha->init_cb;
1746 struct new_utsname *p_sysid = NULL;
1748 /* Issue RPA */
1749 /* Prepare common MS IOCB */
1750 /* Request size adjusted after CT preparation */
1751 ms_pkt = ha->isp_ops->prep_ms_fdmi_iocb(vha, 0, RPA_RSP_SIZE);
1753 /* Prepare CT request */
1754 ct_req = qla2x00_prep_ct_fdmi_req(ha->ct_sns, RPA_CMD,
1755 RPA_RSP_SIZE);
1756 ct_rsp = &ha->ct_sns->p.rsp;
1758 /* Prepare FDMI command arguments -- attribute block, attributes. */
1759 memcpy(ct_req->req.rpa.port_name, vha->port_name, WWN_SIZE);
1760 size = WWN_SIZE + 4;
1762 /* Attributes */
1763 ct_req->req.rpa.attrs.count = cpu_to_be32(FDMI_PORT_ATTR_COUNT);
1764 entries = ct_req->req.rpa.port_name;
1766 /* FC4 types. */
1767 eiter = entries + size;
1768 eiter->type = cpu_to_be16(FDMI_PORT_FC4_TYPES);
1769 eiter->len = cpu_to_be16(4 + 32);
1770 eiter->a.fc4_types[2] = 0x01;
1771 size += 4 + 32;
1773 ql_dbg(ql_dbg_disc, vha, 0x2039,
1774 "FC4_TYPES=%02x %02x.\n",
1775 eiter->a.fc4_types[2],
1776 eiter->a.fc4_types[1]);
1778 /* Supported speed. */
1779 eiter = entries + size;
1780 eiter->type = cpu_to_be16(FDMI_PORT_SUPPORT_SPEED);
1781 eiter->len = cpu_to_be16(4 + 4);
1782 if (IS_CNA_CAPABLE(ha))
1783 eiter->a.sup_speed = cpu_to_be32(
1784 FDMI_PORT_SPEED_10GB);
1785 else if (IS_QLA27XX(ha))
1786 eiter->a.sup_speed = cpu_to_be32(
1787 FDMI_PORT_SPEED_32GB|
1788 FDMI_PORT_SPEED_16GB|
1789 FDMI_PORT_SPEED_8GB);
1790 else if (IS_QLA2031(ha))
1791 eiter->a.sup_speed = cpu_to_be32(
1792 FDMI_PORT_SPEED_16GB|
1793 FDMI_PORT_SPEED_8GB|
1794 FDMI_PORT_SPEED_4GB);
1795 else if (IS_QLA25XX(ha))
1796 eiter->a.sup_speed = cpu_to_be32(
1797 FDMI_PORT_SPEED_8GB|
1798 FDMI_PORT_SPEED_4GB|
1799 FDMI_PORT_SPEED_2GB|
1800 FDMI_PORT_SPEED_1GB);
1801 else if (IS_QLA24XX_TYPE(ha))
1802 eiter->a.sup_speed = cpu_to_be32(
1803 FDMI_PORT_SPEED_4GB|
1804 FDMI_PORT_SPEED_2GB|
1805 FDMI_PORT_SPEED_1GB);
1806 else if (IS_QLA23XX(ha))
1807 eiter->a.sup_speed = cpu_to_be32(
1808 FDMI_PORT_SPEED_2GB|
1809 FDMI_PORT_SPEED_1GB);
1810 else
1811 eiter->a.sup_speed = cpu_to_be32(
1812 FDMI_PORT_SPEED_1GB);
1813 size += 4 + 4;
1815 ql_dbg(ql_dbg_disc, vha, 0x203a,
1816 "Supported_Speed=%x.\n", eiter->a.sup_speed);
1818 /* Current speed. */
1819 eiter = entries + size;
1820 eiter->type = cpu_to_be16(FDMI_PORT_CURRENT_SPEED);
1821 eiter->len = cpu_to_be16(4 + 4);
1822 switch (ha->link_data_rate) {
1823 case PORT_SPEED_1GB:
1824 eiter->a.cur_speed =
1825 cpu_to_be32(FDMI_PORT_SPEED_1GB);
1826 break;
1827 case PORT_SPEED_2GB:
1828 eiter->a.cur_speed =
1829 cpu_to_be32(FDMI_PORT_SPEED_2GB);
1830 break;
1831 case PORT_SPEED_4GB:
1832 eiter->a.cur_speed =
1833 cpu_to_be32(FDMI_PORT_SPEED_4GB);
1834 break;
1835 case PORT_SPEED_8GB:
1836 eiter->a.cur_speed =
1837 cpu_to_be32(FDMI_PORT_SPEED_8GB);
1838 break;
1839 case PORT_SPEED_10GB:
1840 eiter->a.cur_speed =
1841 cpu_to_be32(FDMI_PORT_SPEED_10GB);
1842 break;
1843 case PORT_SPEED_16GB:
1844 eiter->a.cur_speed =
1845 cpu_to_be32(FDMI_PORT_SPEED_16GB);
1846 break;
1847 case PORT_SPEED_32GB:
1848 eiter->a.cur_speed =
1849 cpu_to_be32(FDMI_PORT_SPEED_32GB);
1850 break;
1851 default:
1852 eiter->a.cur_speed =
1853 cpu_to_be32(FDMI_PORT_SPEED_UNKNOWN);
1854 break;
1856 size += 4 + 4;
1858 ql_dbg(ql_dbg_disc, vha, 0x203b,
1859 "Current_Speed=%x.\n", eiter->a.cur_speed);
1861 /* Max frame size. */
1862 eiter = entries + size;
1863 eiter->type = cpu_to_be16(FDMI_PORT_MAX_FRAME_SIZE);
1864 eiter->len = cpu_to_be16(4 + 4);
1865 eiter->a.max_frame_size = IS_FWI2_CAPABLE(ha) ?
1866 le16_to_cpu(icb24->frame_payload_size) :
1867 le16_to_cpu(ha->init_cb->frame_payload_size);
1868 eiter->a.max_frame_size = cpu_to_be32(eiter->a.max_frame_size);
1869 size += 4 + 4;
1871 ql_dbg(ql_dbg_disc, vha, 0x203c,
1872 "Max_Frame_Size=%x.\n", eiter->a.max_frame_size);
1874 /* OS device name. */
1875 eiter = entries + size;
1876 eiter->type = cpu_to_be16(FDMI_PORT_OS_DEVICE_NAME);
1877 snprintf(eiter->a.os_dev_name, sizeof(eiter->a.os_dev_name),
1878 "%s:host%lu", QLA2XXX_DRIVER_NAME, vha->host_no);
1879 alen = strlen(eiter->a.os_dev_name);
1880 alen += 4 - (alen & 3);
1881 eiter->len = cpu_to_be16(4 + alen);
1882 size += 4 + alen;
1884 ql_dbg(ql_dbg_disc, vha, 0x204b,
1885 "OS_Device_Name=%s.\n", eiter->a.os_dev_name);
1887 /* Hostname. */
1888 eiter = entries + size;
1889 eiter->type = cpu_to_be16(FDMI_PORT_HOST_NAME);
1890 p_sysid = utsname();
1891 if (p_sysid) {
1892 snprintf(eiter->a.host_name, sizeof(eiter->a.host_name),
1893 "%s", p_sysid->nodename);
1894 } else {
1895 snprintf(eiter->a.host_name, sizeof(eiter->a.host_name),
1896 "%s", fc_host_system_hostname(vha->host));
1898 alen = strlen(eiter->a.host_name);
1899 alen += 4 - (alen & 3);
1900 eiter->len = cpu_to_be16(4 + alen);
1901 size += 4 + alen;
1903 ql_dbg(ql_dbg_disc, vha, 0x203d, "HostName=%s.\n", eiter->a.host_name);
1905 /* Update MS request size. */
1906 qla2x00_update_ms_fdmi_iocb(vha, size + 16);
1908 ql_dbg(ql_dbg_disc, vha, 0x203e,
1909 "RPA portname %016llx, size = %d.\n",
1910 wwn_to_u64(ct_req->req.rpa.port_name), size);
1911 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2079,
1912 entries, size);
1914 /* Execute MS IOCB */
1915 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
1916 sizeof(ms_iocb_entry_t));
1917 if (rval != QLA_SUCCESS) {
1918 /*EMPTY*/
1919 ql_dbg(ql_dbg_disc, vha, 0x2040,
1920 "RPA issue IOCB failed (%d).\n", rval);
1921 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RPA") !=
1922 QLA_SUCCESS) {
1923 rval = QLA_FUNCTION_FAILED;
1924 if (ct_rsp->header.reason_code == CT_REASON_CANNOT_PERFORM &&
1925 ct_rsp->header.explanation_code ==
1926 CT_EXPL_ALREADY_REGISTERED) {
1927 ql_dbg(ql_dbg_disc, vha, 0x20cd,
1928 "RPA already registered.\n");
1929 rval = QLA_ALREADY_REGISTERED;
1932 } else {
1933 ql_dbg(ql_dbg_disc, vha, 0x2041,
1934 "RPA exiting normally.\n");
1937 return rval;
1941 * qla2x00_fdmiv2_rhba() - perform RHBA FDMI v2 registration
1942 * @vha: HA context
1944 * Returns 0 on success.
1946 static int
1947 qla2x00_fdmiv2_rhba(scsi_qla_host_t *vha)
1949 int rval, alen;
1950 uint32_t size, sn;
1951 ms_iocb_entry_t *ms_pkt;
1952 struct ct_sns_req *ct_req;
1953 struct ct_sns_rsp *ct_rsp;
1954 void *entries;
1955 struct ct_fdmiv2_hba_attr *eiter;
1956 struct qla_hw_data *ha = vha->hw;
1957 struct init_cb_24xx *icb24 = (struct init_cb_24xx *)ha->init_cb;
1958 struct new_utsname *p_sysid = NULL;
1960 /* Issue RHBA */
1961 /* Prepare common MS IOCB */
1962 /* Request size adjusted after CT preparation */
1963 ms_pkt = ha->isp_ops->prep_ms_fdmi_iocb(vha, 0, RHBA_RSP_SIZE);
1965 /* Prepare CT request */
1966 ct_req = qla2x00_prep_ct_fdmi_req(ha->ct_sns, RHBA_CMD,
1967 RHBA_RSP_SIZE);
1968 ct_rsp = &ha->ct_sns->p.rsp;
1970 /* Prepare FDMI command arguments -- attribute block, attributes. */
1971 memcpy(ct_req->req.rhba2.hba_identifier, vha->port_name, WWN_SIZE);
1972 ct_req->req.rhba2.entry_count = cpu_to_be32(1);
1973 memcpy(ct_req->req.rhba2.port_name, vha->port_name, WWN_SIZE);
1974 size = 2 * WWN_SIZE + 4 + 4;
1976 /* Attributes */
1977 ct_req->req.rhba2.attrs.count = cpu_to_be32(FDMIV2_HBA_ATTR_COUNT);
1978 entries = ct_req->req.rhba2.hba_identifier;
1980 /* Nodename. */
1981 eiter = entries + size;
1982 eiter->type = cpu_to_be16(FDMI_HBA_NODE_NAME);
1983 eiter->len = cpu_to_be16(4 + WWN_SIZE);
1984 memcpy(eiter->a.node_name, vha->node_name, WWN_SIZE);
1985 size += 4 + WWN_SIZE;
1987 ql_dbg(ql_dbg_disc, vha, 0x207d,
1988 "NodeName = %016llx.\n", wwn_to_u64(eiter->a.node_name));
1990 /* Manufacturer. */
1991 eiter = entries + size;
1992 eiter->type = cpu_to_be16(FDMI_HBA_MANUFACTURER);
1993 snprintf(eiter->a.manufacturer, sizeof(eiter->a.manufacturer),
1994 "%s", "QLogic Corporation");
1995 eiter->a.manufacturer[strlen("QLogic Corporation")] = '\0';
1996 alen = strlen(eiter->a.manufacturer);
1997 alen += 4 - (alen & 3);
1998 eiter->len = cpu_to_be16(4 + alen);
1999 size += 4 + alen;
2001 ql_dbg(ql_dbg_disc, vha, 0x20a5,
2002 "Manufacturer = %s.\n", eiter->a.manufacturer);
2004 /* Serial number. */
2005 eiter = entries + size;
2006 eiter->type = cpu_to_be16(FDMI_HBA_SERIAL_NUMBER);
2007 if (IS_FWI2_CAPABLE(ha))
2008 qla2xxx_get_vpd_field(vha, "SN", eiter->a.serial_num,
2009 sizeof(eiter->a.serial_num));
2010 else {
2011 sn = ((ha->serial0 & 0x1f) << 16) |
2012 (ha->serial2 << 8) | ha->serial1;
2013 snprintf(eiter->a.serial_num, sizeof(eiter->a.serial_num),
2014 "%c%05d", 'A' + sn / 100000, sn % 100000);
2016 alen = strlen(eiter->a.serial_num);
2017 alen += 4 - (alen & 3);
2018 eiter->len = cpu_to_be16(4 + alen);
2019 size += 4 + alen;
2021 ql_dbg(ql_dbg_disc, vha, 0x20a6,
2022 "Serial no. = %s.\n", eiter->a.serial_num);
2024 /* Model name. */
2025 eiter = entries + size;
2026 eiter->type = cpu_to_be16(FDMI_HBA_MODEL);
2027 snprintf(eiter->a.model, sizeof(eiter->a.model),
2028 "%s", ha->model_number);
2029 alen = strlen(eiter->a.model);
2030 alen += 4 - (alen & 3);
2031 eiter->len = cpu_to_be16(4 + alen);
2032 size += 4 + alen;
2034 ql_dbg(ql_dbg_disc, vha, 0x20a7,
2035 "Model Name = %s.\n", eiter->a.model);
2037 /* Model description. */
2038 eiter = entries + size;
2039 eiter->type = cpu_to_be16(FDMI_HBA_MODEL_DESCRIPTION);
2040 snprintf(eiter->a.model_desc, sizeof(eiter->a.model_desc),
2041 "%s", ha->model_desc);
2042 alen = strlen(eiter->a.model_desc);
2043 alen += 4 - (alen & 3);
2044 eiter->len = cpu_to_be16(4 + alen);
2045 size += 4 + alen;
2047 ql_dbg(ql_dbg_disc, vha, 0x20a8,
2048 "Model Desc = %s.\n", eiter->a.model_desc);
2050 /* Hardware version. */
2051 eiter = entries + size;
2052 eiter->type = cpu_to_be16(FDMI_HBA_HARDWARE_VERSION);
2053 if (!IS_FWI2_CAPABLE(ha)) {
2054 snprintf(eiter->a.hw_version, sizeof(eiter->a.hw_version),
2055 "HW:%s", ha->adapter_id);
2056 } else if (qla2xxx_get_vpd_field(vha, "MN", eiter->a.hw_version,
2057 sizeof(eiter->a.hw_version))) {
2059 } else if (qla2xxx_get_vpd_field(vha, "EC", eiter->a.hw_version,
2060 sizeof(eiter->a.hw_version))) {
2062 } else {
2063 snprintf(eiter->a.hw_version, sizeof(eiter->a.hw_version),
2064 "HW:%s", ha->adapter_id);
2066 alen = strlen(eiter->a.hw_version);
2067 alen += 4 - (alen & 3);
2068 eiter->len = cpu_to_be16(4 + alen);
2069 size += 4 + alen;
2071 ql_dbg(ql_dbg_disc, vha, 0x20a9,
2072 "Hardware ver = %s.\n", eiter->a.hw_version);
2074 /* Driver version. */
2075 eiter = entries + size;
2076 eiter->type = cpu_to_be16(FDMI_HBA_DRIVER_VERSION);
2077 snprintf(eiter->a.driver_version, sizeof(eiter->a.driver_version),
2078 "%s", qla2x00_version_str);
2079 alen = strlen(eiter->a.driver_version);
2080 alen += 4 - (alen & 3);
2081 eiter->len = cpu_to_be16(4 + alen);
2082 size += 4 + alen;
2084 ql_dbg(ql_dbg_disc, vha, 0x20aa,
2085 "Driver ver = %s.\n", eiter->a.driver_version);
2087 /* Option ROM version. */
2088 eiter = entries + size;
2089 eiter->type = cpu_to_be16(FDMI_HBA_OPTION_ROM_VERSION);
2090 snprintf(eiter->a.orom_version, sizeof(eiter->a.orom_version),
2091 "%d.%02d", ha->bios_revision[1], ha->bios_revision[0]);
2092 alen = strlen(eiter->a.orom_version);
2093 alen += 4 - (alen & 3);
2094 eiter->len = cpu_to_be16(4 + alen);
2095 size += 4 + alen;
2097 ql_dbg(ql_dbg_disc, vha , 0x20ab,
2098 "Optrom version = %d.%02d.\n", eiter->a.orom_version[1],
2099 eiter->a.orom_version[0]);
2101 /* Firmware version */
2102 eiter = entries + size;
2103 eiter->type = cpu_to_be16(FDMI_HBA_FIRMWARE_VERSION);
2104 ha->isp_ops->fw_version_str(vha, eiter->a.fw_version,
2105 sizeof(eiter->a.fw_version));
2106 alen = strlen(eiter->a.fw_version);
2107 alen += 4 - (alen & 3);
2108 eiter->len = cpu_to_be16(4 + alen);
2109 size += 4 + alen;
2111 ql_dbg(ql_dbg_disc, vha, 0x20ac,
2112 "Firmware vers = %s.\n", eiter->a.fw_version);
2114 /* OS Name and Version */
2115 eiter = entries + size;
2116 eiter->type = cpu_to_be16(FDMI_HBA_OS_NAME_AND_VERSION);
2117 p_sysid = utsname();
2118 if (p_sysid) {
2119 snprintf(eiter->a.os_version, sizeof(eiter->a.os_version),
2120 "%s %s %s",
2121 p_sysid->sysname, p_sysid->release, p_sysid->version);
2122 } else {
2123 snprintf(eiter->a.os_version, sizeof(eiter->a.os_version),
2124 "%s %s", "Linux", fc_host_system_hostname(vha->host));
2126 alen = strlen(eiter->a.os_version);
2127 alen += 4 - (alen & 3);
2128 eiter->len = cpu_to_be16(4 + alen);
2129 size += 4 + alen;
2131 ql_dbg(ql_dbg_disc, vha, 0x20ae,
2132 "OS Name and Version = %s.\n", eiter->a.os_version);
2134 /* MAX CT Payload Length */
2135 eiter = entries + size;
2136 eiter->type = cpu_to_be16(FDMI_HBA_MAXIMUM_CT_PAYLOAD_LENGTH);
2137 eiter->a.max_ct_len = IS_FWI2_CAPABLE(ha) ?
2138 le16_to_cpu(icb24->frame_payload_size) :
2139 le16_to_cpu(ha->init_cb->frame_payload_size);
2140 eiter->a.max_ct_len = cpu_to_be32(eiter->a.max_ct_len);
2141 eiter->len = cpu_to_be16(4 + 4);
2142 size += 4 + 4;
2144 ql_dbg(ql_dbg_disc, vha, 0x20af,
2145 "CT Payload Length = 0x%x.\n", eiter->a.max_ct_len);
2147 /* Node Sybolic Name */
2148 eiter = entries + size;
2149 eiter->type = cpu_to_be16(FDMI_HBA_NODE_SYMBOLIC_NAME);
2150 qla2x00_get_sym_node_name(vha, eiter->a.sym_name,
2151 sizeof(eiter->a.sym_name));
2152 alen = strlen(eiter->a.sym_name);
2153 alen += 4 - (alen & 3);
2154 eiter->len = cpu_to_be16(4 + alen);
2155 size += 4 + alen;
2157 ql_dbg(ql_dbg_disc, vha, 0x20b0,
2158 "Symbolic Name = %s.\n", eiter->a.sym_name);
2160 /* Vendor Id */
2161 eiter = entries + size;
2162 eiter->type = cpu_to_be16(FDMI_HBA_VENDOR_ID);
2163 eiter->a.vendor_id = cpu_to_be32(0x1077);
2164 eiter->len = cpu_to_be16(4 + 4);
2165 size += 4 + 4;
2167 ql_dbg(ql_dbg_disc, vha, 0x20b1,
2168 "Vendor Id = %x.\n", eiter->a.vendor_id);
2170 /* Num Ports */
2171 eiter = entries + size;
2172 eiter->type = cpu_to_be16(FDMI_HBA_NUM_PORTS);
2173 eiter->a.num_ports = cpu_to_be32(1);
2174 eiter->len = cpu_to_be16(4 + 4);
2175 size += 4 + 4;
2177 ql_dbg(ql_dbg_disc, vha, 0x20b2,
2178 "Port Num = %x.\n", eiter->a.num_ports);
2180 /* Fabric Name */
2181 eiter = entries + size;
2182 eiter->type = cpu_to_be16(FDMI_HBA_FABRIC_NAME);
2183 memcpy(eiter->a.fabric_name, vha->fabric_node_name, WWN_SIZE);
2184 eiter->len = cpu_to_be16(4 + WWN_SIZE);
2185 size += 4 + WWN_SIZE;
2187 ql_dbg(ql_dbg_disc, vha, 0x20b3,
2188 "Fabric Name = %016llx.\n", wwn_to_u64(eiter->a.fabric_name));
2190 /* BIOS Version */
2191 eiter = entries + size;
2192 eiter->type = cpu_to_be16(FDMI_HBA_BOOT_BIOS_NAME);
2193 snprintf(eiter->a.bios_name, sizeof(eiter->a.bios_name),
2194 "BIOS %d.%02d", ha->bios_revision[1], ha->bios_revision[0]);
2195 alen = strlen(eiter->a.bios_name);
2196 alen += 4 - (alen & 3);
2197 eiter->len = cpu_to_be16(4 + alen);
2198 size += 4 + alen;
2200 ql_dbg(ql_dbg_disc, vha, 0x20b4,
2201 "BIOS Name = %s\n", eiter->a.bios_name);
2203 /* Vendor Identifier */
2204 eiter = entries + size;
2205 eiter->type = cpu_to_be16(FDMI_HBA_TYPE_VENDOR_IDENTIFIER);
2206 snprintf(eiter->a.vendor_identifier, sizeof(eiter->a.vendor_identifier),
2207 "%s", "QLGC");
2208 alen = strlen(eiter->a.vendor_identifier);
2209 alen += 4 - (alen & 3);
2210 eiter->len = cpu_to_be16(4 + alen);
2211 size += 4 + alen;
2213 ql_dbg(ql_dbg_disc, vha, 0x201b,
2214 "Vendor Identifier = %s.\n", eiter->a.vendor_identifier);
2216 /* Update MS request size. */
2217 qla2x00_update_ms_fdmi_iocb(vha, size + 16);
2219 ql_dbg(ql_dbg_disc, vha, 0x20b5,
2220 "RHBA identifier = %016llx.\n",
2221 wwn_to_u64(ct_req->req.rhba2.hba_identifier));
2222 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x20b6,
2223 entries, size);
2225 /* Execute MS IOCB */
2226 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
2227 sizeof(ms_iocb_entry_t));
2228 if (rval != QLA_SUCCESS) {
2229 /*EMPTY*/
2230 ql_dbg(ql_dbg_disc, vha, 0x20b7,
2231 "RHBA issue IOCB failed (%d).\n", rval);
2232 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RHBA") !=
2233 QLA_SUCCESS) {
2234 rval = QLA_FUNCTION_FAILED;
2236 if (ct_rsp->header.reason_code == CT_REASON_CANNOT_PERFORM &&
2237 ct_rsp->header.explanation_code ==
2238 CT_EXPL_ALREADY_REGISTERED) {
2239 ql_dbg(ql_dbg_disc, vha, 0x20b8,
2240 "HBA already registered.\n");
2241 rval = QLA_ALREADY_REGISTERED;
2242 } else {
2243 ql_dbg(ql_dbg_disc, vha, 0x2016,
2244 "RHBA FDMI v2 failed, CT Reason code: 0x%x, CT Explanation 0x%x\n",
2245 ct_rsp->header.reason_code,
2246 ct_rsp->header.explanation_code);
2248 } else {
2249 ql_dbg(ql_dbg_disc, vha, 0x20b9,
2250 "RHBA FDMI V2 exiting normally.\n");
2253 return rval;
2257 * qla2x00_fdmi_dhba() -
2258 * @vha: HA context
2260 * Returns 0 on success.
2262 static int
2263 qla2x00_fdmi_dhba(scsi_qla_host_t *vha)
2265 int rval;
2266 struct qla_hw_data *ha = vha->hw;
2267 ms_iocb_entry_t *ms_pkt;
2268 struct ct_sns_req *ct_req;
2269 struct ct_sns_rsp *ct_rsp;
2271 /* Issue RPA */
2272 /* Prepare common MS IOCB */
2273 ms_pkt = ha->isp_ops->prep_ms_fdmi_iocb(vha, DHBA_REQ_SIZE,
2274 DHBA_RSP_SIZE);
2276 /* Prepare CT request */
2277 ct_req = qla2x00_prep_ct_fdmi_req(ha->ct_sns, DHBA_CMD, DHBA_RSP_SIZE);
2278 ct_rsp = &ha->ct_sns->p.rsp;
2280 /* Prepare FDMI command arguments -- portname. */
2281 memcpy(ct_req->req.dhba.port_name, vha->port_name, WWN_SIZE);
2283 ql_dbg(ql_dbg_disc, vha, 0x2036,
2284 "DHBA portname = %8phN.\n", ct_req->req.dhba.port_name);
2286 /* Execute MS IOCB */
2287 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
2288 sizeof(ms_iocb_entry_t));
2289 if (rval != QLA_SUCCESS) {
2290 /*EMPTY*/
2291 ql_dbg(ql_dbg_disc, vha, 0x2037,
2292 "DHBA issue IOCB failed (%d).\n", rval);
2293 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "DHBA") !=
2294 QLA_SUCCESS) {
2295 rval = QLA_FUNCTION_FAILED;
2296 } else {
2297 ql_dbg(ql_dbg_disc, vha, 0x2038,
2298 "DHBA exiting normally.\n");
2301 return rval;
2305 * qla2x00_fdmiv2_rpa() -
2306 * @vha: HA context
2308 * Returns 0 on success.
2310 static int
2311 qla2x00_fdmiv2_rpa(scsi_qla_host_t *vha)
2313 int rval, alen;
2314 uint32_t size;
2315 struct qla_hw_data *ha = vha->hw;
2316 ms_iocb_entry_t *ms_pkt;
2317 struct ct_sns_req *ct_req;
2318 struct ct_sns_rsp *ct_rsp;
2319 void *entries;
2320 struct ct_fdmiv2_port_attr *eiter;
2321 struct init_cb_24xx *icb24 = (struct init_cb_24xx *)ha->init_cb;
2322 struct new_utsname *p_sysid = NULL;
2324 /* Issue RPA */
2325 /* Prepare common MS IOCB */
2326 /* Request size adjusted after CT preparation */
2327 ms_pkt = ha->isp_ops->prep_ms_fdmi_iocb(vha, 0, RPA_RSP_SIZE);
2329 /* Prepare CT request */
2330 ct_req = qla2x00_prep_ct_fdmi_req(ha->ct_sns, RPA_CMD, RPA_RSP_SIZE);
2331 ct_rsp = &ha->ct_sns->p.rsp;
2333 /* Prepare FDMI command arguments -- attribute block, attributes. */
2334 memcpy(ct_req->req.rpa2.port_name, vha->port_name, WWN_SIZE);
2335 size = WWN_SIZE + 4;
2337 /* Attributes */
2338 ct_req->req.rpa2.attrs.count = cpu_to_be32(FDMIV2_PORT_ATTR_COUNT);
2339 entries = ct_req->req.rpa2.port_name;
2341 /* FC4 types. */
2342 eiter = entries + size;
2343 eiter->type = cpu_to_be16(FDMI_PORT_FC4_TYPES);
2344 eiter->len = cpu_to_be16(4 + 32);
2345 eiter->a.fc4_types[2] = 0x01;
2346 size += 4 + 32;
2348 ql_dbg(ql_dbg_disc, vha, 0x20ba,
2349 "FC4_TYPES=%02x %02x.\n",
2350 eiter->a.fc4_types[2],
2351 eiter->a.fc4_types[1]);
2353 if (vha->flags.nvme_enabled) {
2354 eiter->a.fc4_types[6] = 1; /* NVMe type 28h */
2355 ql_dbg(ql_dbg_disc, vha, 0x211f,
2356 "NVME FC4 Type = %02x 0x0 0x0 0x0 0x0 0x0.\n",
2357 eiter->a.fc4_types[6]);
2360 /* Supported speed. */
2361 eiter = entries + size;
2362 eiter->type = cpu_to_be16(FDMI_PORT_SUPPORT_SPEED);
2363 eiter->len = cpu_to_be16(4 + 4);
2364 if (IS_CNA_CAPABLE(ha))
2365 eiter->a.sup_speed = cpu_to_be32(
2366 FDMI_PORT_SPEED_10GB);
2367 else if (IS_QLA27XX(ha))
2368 eiter->a.sup_speed = cpu_to_be32(
2369 FDMI_PORT_SPEED_32GB|
2370 FDMI_PORT_SPEED_16GB|
2371 FDMI_PORT_SPEED_8GB);
2372 else if (IS_QLA2031(ha))
2373 eiter->a.sup_speed = cpu_to_be32(
2374 FDMI_PORT_SPEED_16GB|
2375 FDMI_PORT_SPEED_8GB|
2376 FDMI_PORT_SPEED_4GB);
2377 else if (IS_QLA25XX(ha))
2378 eiter->a.sup_speed = cpu_to_be32(
2379 FDMI_PORT_SPEED_8GB|
2380 FDMI_PORT_SPEED_4GB|
2381 FDMI_PORT_SPEED_2GB|
2382 FDMI_PORT_SPEED_1GB);
2383 else if (IS_QLA24XX_TYPE(ha))
2384 eiter->a.sup_speed = cpu_to_be32(
2385 FDMI_PORT_SPEED_4GB|
2386 FDMI_PORT_SPEED_2GB|
2387 FDMI_PORT_SPEED_1GB);
2388 else if (IS_QLA23XX(ha))
2389 eiter->a.sup_speed = cpu_to_be32(
2390 FDMI_PORT_SPEED_2GB|
2391 FDMI_PORT_SPEED_1GB);
2392 else
2393 eiter->a.sup_speed = cpu_to_be32(
2394 FDMI_PORT_SPEED_1GB);
2395 size += 4 + 4;
2397 ql_dbg(ql_dbg_disc, vha, 0x20bb,
2398 "Supported Port Speed = %x.\n", eiter->a.sup_speed);
2400 /* Current speed. */
2401 eiter = entries + size;
2402 eiter->type = cpu_to_be16(FDMI_PORT_CURRENT_SPEED);
2403 eiter->len = cpu_to_be16(4 + 4);
2404 switch (ha->link_data_rate) {
2405 case PORT_SPEED_1GB:
2406 eiter->a.cur_speed = cpu_to_be32(FDMI_PORT_SPEED_1GB);
2407 break;
2408 case PORT_SPEED_2GB:
2409 eiter->a.cur_speed = cpu_to_be32(FDMI_PORT_SPEED_2GB);
2410 break;
2411 case PORT_SPEED_4GB:
2412 eiter->a.cur_speed = cpu_to_be32(FDMI_PORT_SPEED_4GB);
2413 break;
2414 case PORT_SPEED_8GB:
2415 eiter->a.cur_speed = cpu_to_be32(FDMI_PORT_SPEED_8GB);
2416 break;
2417 case PORT_SPEED_10GB:
2418 eiter->a.cur_speed = cpu_to_be32(FDMI_PORT_SPEED_10GB);
2419 break;
2420 case PORT_SPEED_16GB:
2421 eiter->a.cur_speed = cpu_to_be32(FDMI_PORT_SPEED_16GB);
2422 break;
2423 case PORT_SPEED_32GB:
2424 eiter->a.cur_speed = cpu_to_be32(FDMI_PORT_SPEED_32GB);
2425 break;
2426 default:
2427 eiter->a.cur_speed = cpu_to_be32(FDMI_PORT_SPEED_UNKNOWN);
2428 break;
2430 size += 4 + 4;
2432 ql_dbg(ql_dbg_disc, vha, 0x2017,
2433 "Current_Speed = %x.\n", eiter->a.cur_speed);
2435 /* Max frame size. */
2436 eiter = entries + size;
2437 eiter->type = cpu_to_be16(FDMI_PORT_MAX_FRAME_SIZE);
2438 eiter->len = cpu_to_be16(4 + 4);
2439 eiter->a.max_frame_size = IS_FWI2_CAPABLE(ha) ?
2440 le16_to_cpu(icb24->frame_payload_size):
2441 le16_to_cpu(ha->init_cb->frame_payload_size);
2442 eiter->a.max_frame_size = cpu_to_be32(eiter->a.max_frame_size);
2443 size += 4 + 4;
2445 ql_dbg(ql_dbg_disc, vha, 0x20bc,
2446 "Max_Frame_Size = %x.\n", eiter->a.max_frame_size);
2448 /* OS device name. */
2449 eiter = entries + size;
2450 eiter->type = cpu_to_be16(FDMI_PORT_OS_DEVICE_NAME);
2451 alen = strlen(QLA2XXX_DRIVER_NAME);
2452 snprintf(eiter->a.os_dev_name, sizeof(eiter->a.os_dev_name),
2453 "%s:host%lu", QLA2XXX_DRIVER_NAME, vha->host_no);
2454 alen += 4 - (alen & 3);
2455 eiter->len = cpu_to_be16(4 + alen);
2456 size += 4 + alen;
2458 ql_dbg(ql_dbg_disc, vha, 0x20be,
2459 "OS_Device_Name = %s.\n", eiter->a.os_dev_name);
2461 /* Hostname. */
2462 eiter = entries + size;
2463 eiter->type = cpu_to_be16(FDMI_PORT_HOST_NAME);
2464 p_sysid = utsname();
2465 if (p_sysid) {
2466 snprintf(eiter->a.host_name, sizeof(eiter->a.host_name),
2467 "%s", p_sysid->nodename);
2468 } else {
2469 snprintf(eiter->a.host_name, sizeof(eiter->a.host_name),
2470 "%s", fc_host_system_hostname(vha->host));
2472 alen = strlen(eiter->a.host_name);
2473 alen += 4 - (alen & 3);
2474 eiter->len = cpu_to_be16(4 + alen);
2475 size += 4 + alen;
2477 ql_dbg(ql_dbg_disc, vha, 0x201a,
2478 "HostName=%s.\n", eiter->a.host_name);
2480 /* Node Name */
2481 eiter = entries + size;
2482 eiter->type = cpu_to_be16(FDMI_PORT_NODE_NAME);
2483 memcpy(eiter->a.node_name, vha->node_name, WWN_SIZE);
2484 eiter->len = cpu_to_be16(4 + WWN_SIZE);
2485 size += 4 + WWN_SIZE;
2487 ql_dbg(ql_dbg_disc, vha, 0x20c0,
2488 "Node Name = %016llx.\n", wwn_to_u64(eiter->a.node_name));
2490 /* Port Name */
2491 eiter = entries + size;
2492 eiter->type = cpu_to_be16(FDMI_PORT_NAME);
2493 memcpy(eiter->a.port_name, vha->port_name, WWN_SIZE);
2494 eiter->len = cpu_to_be16(4 + WWN_SIZE);
2495 size += 4 + WWN_SIZE;
2497 ql_dbg(ql_dbg_disc, vha, 0x20c1,
2498 "Port Name = %016llx.\n", wwn_to_u64(eiter->a.port_name));
2500 /* Port Symbolic Name */
2501 eiter = entries + size;
2502 eiter->type = cpu_to_be16(FDMI_PORT_SYM_NAME);
2503 qla2x00_get_sym_node_name(vha, eiter->a.port_sym_name,
2504 sizeof(eiter->a.port_sym_name));
2505 alen = strlen(eiter->a.port_sym_name);
2506 alen += 4 - (alen & 3);
2507 eiter->len = cpu_to_be16(4 + alen);
2508 size += 4 + alen;
2510 ql_dbg(ql_dbg_disc, vha, 0x20c2,
2511 "port symbolic name = %s\n", eiter->a.port_sym_name);
2513 /* Port Type */
2514 eiter = entries + size;
2515 eiter->type = cpu_to_be16(FDMI_PORT_TYPE);
2516 eiter->a.port_type = cpu_to_be32(NS_NX_PORT_TYPE);
2517 eiter->len = cpu_to_be16(4 + 4);
2518 size += 4 + 4;
2520 ql_dbg(ql_dbg_disc, vha, 0x20c3,
2521 "Port Type = %x.\n", eiter->a.port_type);
2523 /* Class of Service */
2524 eiter = entries + size;
2525 eiter->type = cpu_to_be16(FDMI_PORT_SUPP_COS);
2526 eiter->a.port_supported_cos = cpu_to_be32(FC_CLASS_3);
2527 eiter->len = cpu_to_be16(4 + 4);
2528 size += 4 + 4;
2530 ql_dbg(ql_dbg_disc, vha, 0x20c4,
2531 "Supported COS = %08x\n", eiter->a.port_supported_cos);
2533 /* Port Fabric Name */
2534 eiter = entries + size;
2535 eiter->type = cpu_to_be16(FDMI_PORT_FABRIC_NAME);
2536 memcpy(eiter->a.fabric_name, vha->fabric_node_name, WWN_SIZE);
2537 eiter->len = cpu_to_be16(4 + WWN_SIZE);
2538 size += 4 + WWN_SIZE;
2540 ql_dbg(ql_dbg_disc, vha, 0x20c5,
2541 "Fabric Name = %016llx.\n", wwn_to_u64(eiter->a.fabric_name));
2543 /* FC4_type */
2544 eiter = entries + size;
2545 eiter->type = cpu_to_be16(FDMI_PORT_FC4_TYPE);
2546 eiter->a.port_fc4_type[0] = 0;
2547 eiter->a.port_fc4_type[1] = 0;
2548 eiter->a.port_fc4_type[2] = 1;
2549 eiter->a.port_fc4_type[3] = 0;
2550 eiter->len = cpu_to_be16(4 + 32);
2551 size += 4 + 32;
2553 ql_dbg(ql_dbg_disc, vha, 0x20c6,
2554 "Port Active FC4 Type = %02x %02x.\n",
2555 eiter->a.port_fc4_type[2], eiter->a.port_fc4_type[1]);
2557 if (vha->flags.nvme_enabled) {
2558 eiter->a.port_fc4_type[4] = 0;
2559 eiter->a.port_fc4_type[5] = 0;
2560 eiter->a.port_fc4_type[6] = 1; /* NVMe type 28h */
2561 ql_dbg(ql_dbg_disc, vha, 0x2120,
2562 "NVME Port Active FC4 Type = %02x 0x0 0x0 0x0 0x0 0x0.\n",
2563 eiter->a.port_fc4_type[6]);
2566 /* Port State */
2567 eiter = entries + size;
2568 eiter->type = cpu_to_be16(FDMI_PORT_STATE);
2569 eiter->a.port_state = cpu_to_be32(1);
2570 eiter->len = cpu_to_be16(4 + 4);
2571 size += 4 + 4;
2573 ql_dbg(ql_dbg_disc, vha, 0x20c7,
2574 "Port State = %x.\n", eiter->a.port_state);
2576 /* Number of Ports */
2577 eiter = entries + size;
2578 eiter->type = cpu_to_be16(FDMI_PORT_COUNT);
2579 eiter->a.num_ports = cpu_to_be32(1);
2580 eiter->len = cpu_to_be16(4 + 4);
2581 size += 4 + 4;
2583 ql_dbg(ql_dbg_disc, vha, 0x20c8,
2584 "Number of ports = %x.\n", eiter->a.num_ports);
2586 /* Port Id */
2587 eiter = entries + size;
2588 eiter->type = cpu_to_be16(FDMI_PORT_ID);
2589 eiter->a.port_id = cpu_to_be32(vha->d_id.b24);
2590 eiter->len = cpu_to_be16(4 + 4);
2591 size += 4 + 4;
2593 ql_dbg(ql_dbg_disc, vha, 0x201c,
2594 "Port Id = %x.\n", eiter->a.port_id);
2596 /* Update MS request size. */
2597 qla2x00_update_ms_fdmi_iocb(vha, size + 16);
2599 ql_dbg(ql_dbg_disc, vha, 0x2018,
2600 "RPA portname= %8phN size=%d.\n", ct_req->req.rpa.port_name, size);
2601 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x20ca,
2602 entries, size);
2604 /* Execute MS IOCB */
2605 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
2606 sizeof(ms_iocb_entry_t));
2607 if (rval != QLA_SUCCESS) {
2608 /*EMPTY*/
2609 ql_dbg(ql_dbg_disc, vha, 0x20cb,
2610 "RPA FDMI v2 issue IOCB failed (%d).\n", rval);
2611 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RPA") !=
2612 QLA_SUCCESS) {
2613 rval = QLA_FUNCTION_FAILED;
2614 if (ct_rsp->header.reason_code == CT_REASON_CANNOT_PERFORM &&
2615 ct_rsp->header.explanation_code ==
2616 CT_EXPL_ALREADY_REGISTERED) {
2617 ql_dbg(ql_dbg_disc, vha, 0x20ce,
2618 "RPA FDMI v2 already registered\n");
2619 rval = QLA_ALREADY_REGISTERED;
2620 } else {
2621 ql_dbg(ql_dbg_disc, vha, 0x2020,
2622 "RPA FDMI v2 failed, CT Reason code: 0x%x, CT Explanation 0x%x\n",
2623 ct_rsp->header.reason_code,
2624 ct_rsp->header.explanation_code);
2626 } else {
2627 ql_dbg(ql_dbg_disc, vha, 0x20cc,
2628 "RPA FDMI V2 exiting normally.\n");
2631 return rval;
2635 * qla2x00_fdmi_register() -
2636 * @vha: HA context
2638 * Returns 0 on success.
2641 qla2x00_fdmi_register(scsi_qla_host_t *vha)
2643 int rval = QLA_FUNCTION_FAILED;
2644 struct qla_hw_data *ha = vha->hw;
2646 if (IS_QLA2100(ha) || IS_QLA2200(ha) ||
2647 IS_QLAFX00(ha))
2648 return QLA_FUNCTION_FAILED;
2650 rval = qla2x00_mgmt_svr_login(vha);
2651 if (rval)
2652 return rval;
2654 rval = qla2x00_fdmiv2_rhba(vha);
2655 if (rval) {
2656 if (rval != QLA_ALREADY_REGISTERED)
2657 goto try_fdmi;
2659 rval = qla2x00_fdmi_dhba(vha);
2660 if (rval)
2661 goto try_fdmi;
2663 rval = qla2x00_fdmiv2_rhba(vha);
2664 if (rval)
2665 goto try_fdmi;
2667 rval = qla2x00_fdmiv2_rpa(vha);
2668 if (rval)
2669 goto try_fdmi;
2671 goto out;
2673 try_fdmi:
2674 rval = qla2x00_fdmi_rhba(vha);
2675 if (rval) {
2676 if (rval != QLA_ALREADY_REGISTERED)
2677 return rval;
2679 rval = qla2x00_fdmi_dhba(vha);
2680 if (rval)
2681 return rval;
2683 rval = qla2x00_fdmi_rhba(vha);
2684 if (rval)
2685 return rval;
2687 rval = qla2x00_fdmi_rpa(vha);
2688 out:
2689 return rval;
2693 * qla2x00_gfpn_id() - SNS Get Fabric Port Name (GFPN_ID) query.
2694 * @vha: HA context
2695 * @list: switch info entries to populate
2697 * Returns 0 on success.
2700 qla2x00_gfpn_id(scsi_qla_host_t *vha, sw_info_t *list)
2702 int rval = QLA_SUCCESS;
2703 uint16_t i;
2704 struct qla_hw_data *ha = vha->hw;
2705 ms_iocb_entry_t *ms_pkt;
2706 struct ct_sns_req *ct_req;
2707 struct ct_sns_rsp *ct_rsp;
2708 struct ct_arg arg;
2710 if (!IS_IIDMA_CAPABLE(ha))
2711 return QLA_FUNCTION_FAILED;
2713 arg.iocb = ha->ms_iocb;
2714 arg.req_dma = ha->ct_sns_dma;
2715 arg.rsp_dma = ha->ct_sns_dma;
2716 arg.req_size = GFPN_ID_REQ_SIZE;
2717 arg.rsp_size = GFPN_ID_RSP_SIZE;
2718 arg.nport_handle = NPH_SNS;
2720 for (i = 0; i < ha->max_fibre_devices; i++) {
2721 /* Issue GFPN_ID */
2722 /* Prepare common MS IOCB */
2723 ms_pkt = ha->isp_ops->prep_ms_iocb(vha, &arg);
2725 /* Prepare CT request */
2726 ct_req = qla2x00_prep_ct_req(ha->ct_sns, GFPN_ID_CMD,
2727 GFPN_ID_RSP_SIZE);
2728 ct_rsp = &ha->ct_sns->p.rsp;
2730 /* Prepare CT arguments -- port_id */
2731 ct_req->req.port_id.port_id[0] = list[i].d_id.b.domain;
2732 ct_req->req.port_id.port_id[1] = list[i].d_id.b.area;
2733 ct_req->req.port_id.port_id[2] = list[i].d_id.b.al_pa;
2735 /* Execute MS IOCB */
2736 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
2737 sizeof(ms_iocb_entry_t));
2738 if (rval != QLA_SUCCESS) {
2739 /*EMPTY*/
2740 ql_dbg(ql_dbg_disc, vha, 0x2023,
2741 "GFPN_ID issue IOCB failed (%d).\n", rval);
2742 break;
2743 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp,
2744 "GFPN_ID") != QLA_SUCCESS) {
2745 rval = QLA_FUNCTION_FAILED;
2746 break;
2747 } else {
2748 /* Save fabric portname */
2749 memcpy(list[i].fabric_port_name,
2750 ct_rsp->rsp.gfpn_id.port_name, WWN_SIZE);
2753 /* Last device exit. */
2754 if (list[i].d_id.b.rsvd_1 != 0)
2755 break;
2758 return (rval);
2762 static inline struct ct_sns_req *
2763 qla24xx_prep_ct_fm_req(struct ct_sns_pkt *p, uint16_t cmd,
2764 uint16_t rsp_size)
2766 memset(p, 0, sizeof(struct ct_sns_pkt));
2768 p->p.req.header.revision = 0x01;
2769 p->p.req.header.gs_type = 0xFA;
2770 p->p.req.header.gs_subtype = 0x01;
2771 p->p.req.command = cpu_to_be16(cmd);
2772 p->p.req.max_rsp_size = cpu_to_be16((rsp_size - 16) / 4);
2774 return &p->p.req;
2778 * qla2x00_gpsc() - FCS Get Port Speed Capabilities (GPSC) query.
2779 * @vha: HA context
2780 * @list: switch info entries to populate
2782 * Returns 0 on success.
2785 qla2x00_gpsc(scsi_qla_host_t *vha, sw_info_t *list)
2787 int rval;
2788 uint16_t i;
2789 struct qla_hw_data *ha = vha->hw;
2790 ms_iocb_entry_t *ms_pkt;
2791 struct ct_sns_req *ct_req;
2792 struct ct_sns_rsp *ct_rsp;
2793 struct ct_arg arg;
2795 if (!IS_IIDMA_CAPABLE(ha))
2796 return QLA_FUNCTION_FAILED;
2797 if (!ha->flags.gpsc_supported)
2798 return QLA_FUNCTION_FAILED;
2800 rval = qla2x00_mgmt_svr_login(vha);
2801 if (rval)
2802 return rval;
2804 arg.iocb = ha->ms_iocb;
2805 arg.req_dma = ha->ct_sns_dma;
2806 arg.rsp_dma = ha->ct_sns_dma;
2807 arg.req_size = GPSC_REQ_SIZE;
2808 arg.rsp_size = GPSC_RSP_SIZE;
2809 arg.nport_handle = vha->mgmt_svr_loop_id;
2811 for (i = 0; i < ha->max_fibre_devices; i++) {
2812 /* Issue GFPN_ID */
2813 /* Prepare common MS IOCB */
2814 ms_pkt = qla24xx_prep_ms_iocb(vha, &arg);
2816 /* Prepare CT request */
2817 ct_req = qla24xx_prep_ct_fm_req(ha->ct_sns, GPSC_CMD,
2818 GPSC_RSP_SIZE);
2819 ct_rsp = &ha->ct_sns->p.rsp;
2821 /* Prepare CT arguments -- port_name */
2822 memcpy(ct_req->req.gpsc.port_name, list[i].fabric_port_name,
2823 WWN_SIZE);
2825 /* Execute MS IOCB */
2826 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
2827 sizeof(ms_iocb_entry_t));
2828 if (rval != QLA_SUCCESS) {
2829 /*EMPTY*/
2830 ql_dbg(ql_dbg_disc, vha, 0x2059,
2831 "GPSC issue IOCB failed (%d).\n", rval);
2832 } else if ((rval = qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp,
2833 "GPSC")) != QLA_SUCCESS) {
2834 /* FM command unsupported? */
2835 if (rval == QLA_INVALID_COMMAND &&
2836 (ct_rsp->header.reason_code ==
2837 CT_REASON_INVALID_COMMAND_CODE ||
2838 ct_rsp->header.reason_code ==
2839 CT_REASON_COMMAND_UNSUPPORTED)) {
2840 ql_dbg(ql_dbg_disc, vha, 0x205a,
2841 "GPSC command unsupported, disabling "
2842 "query.\n");
2843 ha->flags.gpsc_supported = 0;
2844 rval = QLA_FUNCTION_FAILED;
2845 break;
2847 rval = QLA_FUNCTION_FAILED;
2848 } else {
2849 /* Save port-speed */
2850 switch (be16_to_cpu(ct_rsp->rsp.gpsc.speed)) {
2851 case BIT_15:
2852 list[i].fp_speed = PORT_SPEED_1GB;
2853 break;
2854 case BIT_14:
2855 list[i].fp_speed = PORT_SPEED_2GB;
2856 break;
2857 case BIT_13:
2858 list[i].fp_speed = PORT_SPEED_4GB;
2859 break;
2860 case BIT_12:
2861 list[i].fp_speed = PORT_SPEED_10GB;
2862 break;
2863 case BIT_11:
2864 list[i].fp_speed = PORT_SPEED_8GB;
2865 break;
2866 case BIT_10:
2867 list[i].fp_speed = PORT_SPEED_16GB;
2868 break;
2869 case BIT_8:
2870 list[i].fp_speed = PORT_SPEED_32GB;
2871 break;
2874 ql_dbg(ql_dbg_disc, vha, 0x205b,
2875 "GPSC ext entry - fpn "
2876 "%8phN speeds=%04x speed=%04x.\n",
2877 list[i].fabric_port_name,
2878 be16_to_cpu(ct_rsp->rsp.gpsc.speeds),
2879 be16_to_cpu(ct_rsp->rsp.gpsc.speed));
2882 /* Last device exit. */
2883 if (list[i].d_id.b.rsvd_1 != 0)
2884 break;
2887 return (rval);
2891 * qla2x00_gff_id() - SNS Get FC-4 Features (GFF_ID) query.
2893 * @vha: HA context
2894 * @list: switch info entries to populate
2897 void
2898 qla2x00_gff_id(scsi_qla_host_t *vha, sw_info_t *list)
2900 int rval;
2901 uint16_t i;
2903 ms_iocb_entry_t *ms_pkt;
2904 struct ct_sns_req *ct_req;
2905 struct ct_sns_rsp *ct_rsp;
2906 struct qla_hw_data *ha = vha->hw;
2907 uint8_t fcp_scsi_features = 0;
2908 struct ct_arg arg;
2910 for (i = 0; i < ha->max_fibre_devices; i++) {
2911 /* Set default FC4 Type as UNKNOWN so the default is to
2912 * Process this port */
2913 list[i].fc4_type = FC4_TYPE_UNKNOWN;
2915 /* Do not attempt GFF_ID if we are not FWI_2 capable */
2916 if (!IS_FWI2_CAPABLE(ha))
2917 continue;
2919 arg.iocb = ha->ms_iocb;
2920 arg.req_dma = ha->ct_sns_dma;
2921 arg.rsp_dma = ha->ct_sns_dma;
2922 arg.req_size = GFF_ID_REQ_SIZE;
2923 arg.rsp_size = GFF_ID_RSP_SIZE;
2924 arg.nport_handle = NPH_SNS;
2926 /* Prepare common MS IOCB */
2927 ms_pkt = ha->isp_ops->prep_ms_iocb(vha, &arg);
2929 /* Prepare CT request */
2930 ct_req = qla2x00_prep_ct_req(ha->ct_sns, GFF_ID_CMD,
2931 GFF_ID_RSP_SIZE);
2932 ct_rsp = &ha->ct_sns->p.rsp;
2934 /* Prepare CT arguments -- port_id */
2935 ct_req->req.port_id.port_id[0] = list[i].d_id.b.domain;
2936 ct_req->req.port_id.port_id[1] = list[i].d_id.b.area;
2937 ct_req->req.port_id.port_id[2] = list[i].d_id.b.al_pa;
2939 /* Execute MS IOCB */
2940 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
2941 sizeof(ms_iocb_entry_t));
2943 if (rval != QLA_SUCCESS) {
2944 ql_dbg(ql_dbg_disc, vha, 0x205c,
2945 "GFF_ID issue IOCB failed (%d).\n", rval);
2946 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp,
2947 "GFF_ID") != QLA_SUCCESS) {
2948 ql_dbg(ql_dbg_disc, vha, 0x205d,
2949 "GFF_ID IOCB status had a failure status code.\n");
2950 } else {
2951 fcp_scsi_features =
2952 ct_rsp->rsp.gff_id.fc4_features[GFF_FCP_SCSI_OFFSET];
2953 fcp_scsi_features &= 0x0f;
2955 if (fcp_scsi_features)
2956 list[i].fc4_type = FC4_TYPE_FCP_SCSI;
2957 else
2958 list[i].fc4_type = FC4_TYPE_OTHER;
2960 list[i].fc4f_nvme =
2961 ct_rsp->rsp.gff_id.fc4_features[GFF_NVME_OFFSET];
2962 list[i].fc4f_nvme &= 0xf;
2965 /* Last device exit. */
2966 if (list[i].d_id.b.rsvd_1 != 0)
2967 break;
2971 /* GID_PN completion processing. */
2972 void qla24xx_handle_gidpn_event(scsi_qla_host_t *vha, struct event_arg *ea)
2974 fc_port_t *fcport = ea->fcport;
2976 ql_dbg(ql_dbg_disc, vha, 0x201d,
2977 "%s %8phC DS %d LS %d rc %d login %d|%d rscn %d|%d lid %d\n",
2978 __func__, fcport->port_name, fcport->disc_state,
2979 fcport->fw_login_state, ea->rc, fcport->login_gen, ea->sp->gen2,
2980 fcport->rscn_gen, ea->sp->gen1, fcport->loop_id);
2982 if (fcport->disc_state == DSC_DELETE_PEND)
2983 return;
2985 if (ea->sp->gen2 != fcport->login_gen) {
2986 /* PLOGI/PRLI/LOGO came in while cmd was out.*/
2987 ql_dbg(ql_dbg_disc, vha, 0x201e,
2988 "%s %8phC generation changed rscn %d|%d n",
2989 __func__, fcport->port_name, fcport->last_rscn_gen,
2990 fcport->rscn_gen);
2991 return;
2994 if (!ea->rc) {
2995 if (ea->sp->gen1 == fcport->rscn_gen) {
2996 fcport->scan_state = QLA_FCPORT_FOUND;
2997 fcport->flags |= FCF_FABRIC_DEVICE;
2999 if (fcport->d_id.b24 == ea->id.b24) {
3000 /* cable plugged into the same place */
3001 switch (vha->host->active_mode) {
3002 case MODE_TARGET:
3003 if (fcport->fw_login_state ==
3004 DSC_LS_PRLI_COMP) {
3005 u16 data[2];
3007 * Late RSCN was delivered.
3008 * Remote port already login'ed.
3010 ql_dbg(ql_dbg_disc, vha, 0x201f,
3011 "%s %d %8phC post adisc\n",
3012 __func__, __LINE__,
3013 fcport->port_name);
3014 data[0] = data[1] = 0;
3015 qla2x00_post_async_adisc_work(
3016 vha, fcport, data);
3018 break;
3019 case MODE_INITIATOR:
3020 case MODE_DUAL:
3021 default:
3022 ql_dbg(ql_dbg_disc, vha, 0x201f,
3023 "%s %d %8phC post %s\n", __func__,
3024 __LINE__, fcport->port_name,
3025 (atomic_read(&fcport->state) ==
3026 FCS_ONLINE) ? "adisc" : "gnl");
3028 if (atomic_read(&fcport->state) ==
3029 FCS_ONLINE) {
3030 u16 data[2];
3032 data[0] = data[1] = 0;
3033 qla2x00_post_async_adisc_work(
3034 vha, fcport, data);
3035 } else {
3036 qla24xx_post_gnl_work(vha,
3037 fcport);
3039 break;
3041 } else { /* fcport->d_id.b24 != ea->id.b24 */
3042 fcport->d_id.b24 = ea->id.b24;
3043 fcport->id_changed = 1;
3044 if (fcport->deleted != QLA_SESS_DELETED) {
3045 ql_dbg(ql_dbg_disc, vha, 0x2021,
3046 "%s %d %8phC post del sess\n",
3047 __func__, __LINE__, fcport->port_name);
3048 qlt_schedule_sess_for_deletion(fcport);
3051 } else { /* ea->sp->gen1 != fcport->rscn_gen */
3052 ql_dbg(ql_dbg_disc, vha, 0x2022,
3053 "%s %d %8phC post gidpn\n",
3054 __func__, __LINE__, fcport->port_name);
3055 /* rscn came in while cmd was out */
3056 qla24xx_post_gidpn_work(vha, fcport);
3058 } else { /* ea->rc */
3059 /* cable pulled */
3060 if (ea->sp->gen1 == fcport->rscn_gen) {
3061 if (ea->sp->gen2 == fcport->login_gen) {
3062 ql_dbg(ql_dbg_disc, vha, 0x2042,
3063 "%s %d %8phC post del sess\n", __func__,
3064 __LINE__, fcport->port_name);
3065 qlt_schedule_sess_for_deletion(fcport);
3066 } else {
3067 ql_dbg(ql_dbg_disc, vha, 0x2045,
3068 "%s %d %8phC login\n", __func__, __LINE__,
3069 fcport->port_name);
3070 qla24xx_fcport_handle_login(vha, fcport);
3072 } else {
3073 ql_dbg(ql_dbg_disc, vha, 0x2049,
3074 "%s %d %8phC post gidpn\n", __func__, __LINE__,
3075 fcport->port_name);
3076 qla24xx_post_gidpn_work(vha, fcport);
3079 } /* gidpn_event */
3081 static void qla2x00_async_gidpn_sp_done(void *s, int res)
3083 struct srb *sp = s;
3084 struct scsi_qla_host *vha = sp->vha;
3085 fc_port_t *fcport = sp->fcport;
3086 u8 *id = fcport->ct_desc.ct_sns->p.rsp.rsp.gid_pn.port_id;
3087 struct event_arg ea;
3089 fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
3091 memset(&ea, 0, sizeof(ea));
3092 ea.fcport = fcport;
3093 ea.id.b.domain = id[0];
3094 ea.id.b.area = id[1];
3095 ea.id.b.al_pa = id[2];
3096 ea.sp = sp;
3097 ea.rc = res;
3098 ea.event = FCME_GIDPN_DONE;
3100 if (res == QLA_FUNCTION_TIMEOUT) {
3101 ql_dbg(ql_dbg_disc, sp->vha, 0xffff,
3102 "Async done-%s WWPN %8phC timed out.\n",
3103 sp->name, fcport->port_name);
3104 qla24xx_post_gidpn_work(sp->vha, fcport);
3105 sp->free(sp);
3106 return;
3107 } else if (res) {
3108 ql_dbg(ql_dbg_disc, sp->vha, 0xffff,
3109 "Async done-%s fail res %x, WWPN %8phC\n",
3110 sp->name, res, fcport->port_name);
3111 } else {
3112 ql_dbg(ql_dbg_disc, vha, 0x204f,
3113 "Async done-%s good WWPN %8phC ID %3phC\n",
3114 sp->name, fcport->port_name, id);
3117 qla2x00_fcport_event_handler(vha, &ea);
3119 sp->free(sp);
3122 int qla24xx_async_gidpn(scsi_qla_host_t *vha, fc_port_t *fcport)
3124 int rval = QLA_FUNCTION_FAILED;
3125 struct ct_sns_req *ct_req;
3126 srb_t *sp;
3128 if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT))
3129 return rval;
3131 fcport->disc_state = DSC_GID_PN;
3132 fcport->scan_state = QLA_FCPORT_SCAN;
3133 sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC);
3134 if (!sp)
3135 goto done;
3137 fcport->flags |= FCF_ASYNC_SENT;
3138 sp->type = SRB_CT_PTHRU_CMD;
3139 sp->name = "gidpn";
3140 sp->gen1 = fcport->rscn_gen;
3141 sp->gen2 = fcport->login_gen;
3143 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
3145 /* CT_IU preamble */
3146 ct_req = qla2x00_prep_ct_req(fcport->ct_desc.ct_sns, GID_PN_CMD,
3147 GID_PN_RSP_SIZE);
3149 /* GIDPN req */
3150 memcpy(ct_req->req.gid_pn.port_name, fcport->port_name,
3151 WWN_SIZE);
3153 /* req & rsp use the same buffer */
3154 sp->u.iocb_cmd.u.ctarg.req = fcport->ct_desc.ct_sns;
3155 sp->u.iocb_cmd.u.ctarg.req_dma = fcport->ct_desc.ct_sns_dma;
3156 sp->u.iocb_cmd.u.ctarg.rsp = fcport->ct_desc.ct_sns;
3157 sp->u.iocb_cmd.u.ctarg.rsp_dma = fcport->ct_desc.ct_sns_dma;
3158 sp->u.iocb_cmd.u.ctarg.req_size = GID_PN_REQ_SIZE;
3159 sp->u.iocb_cmd.u.ctarg.rsp_size = GID_PN_RSP_SIZE;
3160 sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
3162 sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
3163 sp->done = qla2x00_async_gidpn_sp_done;
3165 rval = qla2x00_start_sp(sp);
3166 if (rval != QLA_SUCCESS)
3167 goto done_free_sp;
3169 ql_dbg(ql_dbg_disc, vha, 0x20a4,
3170 "Async-%s - %8phC hdl=%x loopid=%x portid %02x%02x%02x.\n",
3171 sp->name, fcport->port_name,
3172 sp->handle, fcport->loop_id, fcport->d_id.b.domain,
3173 fcport->d_id.b.area, fcport->d_id.b.al_pa);
3174 return rval;
3176 done_free_sp:
3177 sp->free(sp);
3178 fcport->flags &= ~FCF_ASYNC_SENT;
3179 done:
3180 fcport->flags &= ~FCF_ASYNC_ACTIVE;
3181 return rval;
3184 int qla24xx_post_gidpn_work(struct scsi_qla_host *vha, fc_port_t *fcport)
3186 struct qla_work_evt *e;
3187 int ls;
3189 ls = atomic_read(&vha->loop_state);
3190 if (((ls != LOOP_READY) && (ls != LOOP_UP)) ||
3191 test_bit(UNLOADING, &vha->dpc_flags))
3192 return 0;
3194 e = qla2x00_alloc_work(vha, QLA_EVT_GIDPN);
3195 if (!e)
3196 return QLA_FUNCTION_FAILED;
3198 e->u.fcport.fcport = fcport;
3199 fcport->flags |= FCF_ASYNC_ACTIVE;
3200 return qla2x00_post_work(vha, e);
3203 int qla24xx_post_gpsc_work(struct scsi_qla_host *vha, fc_port_t *fcport)
3205 struct qla_work_evt *e;
3207 e = qla2x00_alloc_work(vha, QLA_EVT_GPSC);
3208 if (!e)
3209 return QLA_FUNCTION_FAILED;
3211 e->u.fcport.fcport = fcport;
3212 fcport->flags |= FCF_ASYNC_ACTIVE;
3213 return qla2x00_post_work(vha, e);
3216 void qla24xx_handle_gpsc_event(scsi_qla_host_t *vha, struct event_arg *ea)
3218 struct fc_port *fcport = ea->fcport;
3220 ql_dbg(ql_dbg_disc, vha, 0x20d8,
3221 "%s %8phC DS %d LS %d rc %d login %d|%d rscn %d|%d lid %d\n",
3222 __func__, fcport->port_name, fcport->disc_state,
3223 fcport->fw_login_state, ea->rc, ea->sp->gen2, fcport->login_gen,
3224 ea->sp->gen2, fcport->rscn_gen|ea->sp->gen1, fcport->loop_id);
3226 if (fcport->disc_state == DSC_DELETE_PEND)
3227 return;
3229 if (ea->sp->gen2 != fcport->login_gen) {
3230 /* target side must have changed it. */
3231 ql_dbg(ql_dbg_disc, vha, 0x20d3,
3232 "%s %8phC generation changed\n",
3233 __func__, fcport->port_name);
3234 return;
3235 } else if (ea->sp->gen1 != fcport->rscn_gen) {
3236 ql_dbg(ql_dbg_disc, vha, 0x20d4, "%s %d %8phC post gidpn\n",
3237 __func__, __LINE__, fcport->port_name);
3238 qla24xx_post_gidpn_work(vha, fcport);
3239 return;
3242 qla24xx_post_upd_fcport_work(vha, ea->fcport);
3245 static void qla24xx_async_gpsc_sp_done(void *s, int res)
3247 struct srb *sp = s;
3248 struct scsi_qla_host *vha = sp->vha;
3249 struct qla_hw_data *ha = vha->hw;
3250 fc_port_t *fcport = sp->fcport;
3251 struct ct_sns_rsp *ct_rsp;
3252 struct event_arg ea;
3254 ct_rsp = &fcport->ct_desc.ct_sns->p.rsp;
3256 ql_dbg(ql_dbg_disc, vha, 0x2053,
3257 "Async done-%s res %x, WWPN %8phC \n",
3258 sp->name, res, fcport->port_name);
3260 fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
3262 if (res == (DID_ERROR << 16)) {
3263 /* entry status error */
3264 goto done;
3265 } else if (res) {
3266 if ((ct_rsp->header.reason_code ==
3267 CT_REASON_INVALID_COMMAND_CODE) ||
3268 (ct_rsp->header.reason_code ==
3269 CT_REASON_COMMAND_UNSUPPORTED)) {
3270 ql_dbg(ql_dbg_disc, vha, 0x2019,
3271 "GPSC command unsupported, disabling query.\n");
3272 ha->flags.gpsc_supported = 0;
3273 res = QLA_SUCCESS;
3275 } else {
3276 switch (be16_to_cpu(ct_rsp->rsp.gpsc.speed)) {
3277 case BIT_15:
3278 fcport->fp_speed = PORT_SPEED_1GB;
3279 break;
3280 case BIT_14:
3281 fcport->fp_speed = PORT_SPEED_2GB;
3282 break;
3283 case BIT_13:
3284 fcport->fp_speed = PORT_SPEED_4GB;
3285 break;
3286 case BIT_12:
3287 fcport->fp_speed = PORT_SPEED_10GB;
3288 break;
3289 case BIT_11:
3290 fcport->fp_speed = PORT_SPEED_8GB;
3291 break;
3292 case BIT_10:
3293 fcport->fp_speed = PORT_SPEED_16GB;
3294 break;
3295 case BIT_8:
3296 fcport->fp_speed = PORT_SPEED_32GB;
3297 break;
3300 ql_dbg(ql_dbg_disc, vha, 0x2054,
3301 "Async-%s OUT WWPN %8phC speeds=%04x speed=%04x.\n",
3302 sp->name, fcport->fabric_port_name,
3303 be16_to_cpu(ct_rsp->rsp.gpsc.speeds),
3304 be16_to_cpu(ct_rsp->rsp.gpsc.speed));
3306 done:
3307 memset(&ea, 0, sizeof(ea));
3308 ea.event = FCME_GPSC_DONE;
3309 ea.rc = res;
3310 ea.fcport = fcport;
3311 ea.sp = sp;
3312 qla2x00_fcport_event_handler(vha, &ea);
3314 sp->free(sp);
3317 int qla24xx_async_gpsc(scsi_qla_host_t *vha, fc_port_t *fcport)
3319 int rval = QLA_FUNCTION_FAILED;
3320 struct ct_sns_req *ct_req;
3321 srb_t *sp;
3323 if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT))
3324 return rval;
3326 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
3327 if (!sp)
3328 goto done;
3330 fcport->flags |= FCF_ASYNC_SENT;
3331 sp->type = SRB_CT_PTHRU_CMD;
3332 sp->name = "gpsc";
3333 sp->gen1 = fcport->rscn_gen;
3334 sp->gen2 = fcport->login_gen;
3336 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
3338 /* CT_IU preamble */
3339 ct_req = qla24xx_prep_ct_fm_req(fcport->ct_desc.ct_sns, GPSC_CMD,
3340 GPSC_RSP_SIZE);
3342 /* GPSC req */
3343 memcpy(ct_req->req.gpsc.port_name, fcport->fabric_port_name,
3344 WWN_SIZE);
3346 sp->u.iocb_cmd.u.ctarg.req = fcport->ct_desc.ct_sns;
3347 sp->u.iocb_cmd.u.ctarg.req_dma = fcport->ct_desc.ct_sns_dma;
3348 sp->u.iocb_cmd.u.ctarg.rsp = fcport->ct_desc.ct_sns;
3349 sp->u.iocb_cmd.u.ctarg.rsp_dma = fcport->ct_desc.ct_sns_dma;
3350 sp->u.iocb_cmd.u.ctarg.req_size = GPSC_REQ_SIZE;
3351 sp->u.iocb_cmd.u.ctarg.rsp_size = GPSC_RSP_SIZE;
3352 sp->u.iocb_cmd.u.ctarg.nport_handle = vha->mgmt_svr_loop_id;
3354 sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
3355 sp->done = qla24xx_async_gpsc_sp_done;
3357 rval = qla2x00_start_sp(sp);
3358 if (rval != QLA_SUCCESS)
3359 goto done_free_sp;
3361 ql_dbg(ql_dbg_disc, vha, 0x205e,
3362 "Async-%s %8phC hdl=%x loopid=%x portid=%02x%02x%02x.\n",
3363 sp->name, fcport->port_name, sp->handle,
3364 fcport->loop_id, fcport->d_id.b.domain,
3365 fcport->d_id.b.area, fcport->d_id.b.al_pa);
3366 return rval;
3368 done_free_sp:
3369 sp->free(sp);
3370 fcport->flags &= ~FCF_ASYNC_SENT;
3371 done:
3372 fcport->flags &= ~FCF_ASYNC_ACTIVE;
3373 return rval;
3376 int qla24xx_post_gpnid_work(struct scsi_qla_host *vha, port_id_t *id)
3378 struct qla_work_evt *e;
3380 if (test_bit(UNLOADING, &vha->dpc_flags))
3381 return 0;
3383 e = qla2x00_alloc_work(vha, QLA_EVT_GPNID);
3384 if (!e)
3385 return QLA_FUNCTION_FAILED;
3387 e->u.gpnid.id = *id;
3388 return qla2x00_post_work(vha, e);
3391 void qla24xx_sp_unmap(scsi_qla_host_t *vha, srb_t *sp)
3393 if (sp->u.iocb_cmd.u.ctarg.req) {
3394 dma_free_coherent(&vha->hw->pdev->dev,
3395 sizeof(struct ct_sns_pkt),
3396 sp->u.iocb_cmd.u.ctarg.req,
3397 sp->u.iocb_cmd.u.ctarg.req_dma);
3398 sp->u.iocb_cmd.u.ctarg.req = NULL;
3400 if (sp->u.iocb_cmd.u.ctarg.rsp) {
3401 dma_free_coherent(&vha->hw->pdev->dev,
3402 sizeof(struct ct_sns_pkt),
3403 sp->u.iocb_cmd.u.ctarg.rsp,
3404 sp->u.iocb_cmd.u.ctarg.rsp_dma);
3405 sp->u.iocb_cmd.u.ctarg.rsp = NULL;
3408 sp->free(sp);
3411 void qla24xx_handle_gpnid_event(scsi_qla_host_t *vha, struct event_arg *ea)
3413 fc_port_t *fcport, *conflict, *t;
3414 u16 data[2];
3416 ql_dbg(ql_dbg_disc, vha, 0xffff,
3417 "%s %d port_id: %06x\n",
3418 __func__, __LINE__, ea->id.b24);
3420 if (ea->rc) {
3421 /* cable is disconnected */
3422 list_for_each_entry_safe(fcport, t, &vha->vp_fcports, list) {
3423 if (fcport->d_id.b24 == ea->id.b24) {
3424 ql_dbg(ql_dbg_disc, vha, 0xffff,
3425 "%s %d %8phC DS %d\n",
3426 __func__, __LINE__,
3427 fcport->port_name,
3428 fcport->disc_state);
3429 fcport->scan_state = QLA_FCPORT_SCAN;
3430 switch (fcport->disc_state) {
3431 case DSC_DELETED:
3432 case DSC_DELETE_PEND:
3433 break;
3434 default:
3435 ql_dbg(ql_dbg_disc, vha, 0xffff,
3436 "%s %d %8phC post del sess\n",
3437 __func__, __LINE__,
3438 fcport->port_name);
3439 qlt_schedule_sess_for_deletion(fcport);
3440 break;
3444 } else {
3445 /* cable is connected */
3446 fcport = qla2x00_find_fcport_by_wwpn(vha, ea->port_name, 1);
3447 if (fcport) {
3448 list_for_each_entry_safe(conflict, t, &vha->vp_fcports,
3449 list) {
3450 if ((conflict->d_id.b24 == ea->id.b24) &&
3451 (fcport != conflict)) {
3452 /* 2 fcports with conflict Nport ID or
3453 * an existing fcport is having nport ID
3454 * conflict with new fcport.
3457 ql_dbg(ql_dbg_disc, vha, 0xffff,
3458 "%s %d %8phC DS %d\n",
3459 __func__, __LINE__,
3460 conflict->port_name,
3461 conflict->disc_state);
3462 conflict->scan_state = QLA_FCPORT_SCAN;
3463 switch (conflict->disc_state) {
3464 case DSC_DELETED:
3465 case DSC_DELETE_PEND:
3466 break;
3467 default:
3468 ql_dbg(ql_dbg_disc, vha, 0xffff,
3469 "%s %d %8phC post del sess\n",
3470 __func__, __LINE__,
3471 conflict->port_name);
3472 qlt_schedule_sess_for_deletion
3473 (conflict);
3474 break;
3479 fcport->rscn_gen++;
3480 fcport->scan_state = QLA_FCPORT_FOUND;
3481 fcport->flags |= FCF_FABRIC_DEVICE;
3482 switch (fcport->disc_state) {
3483 case DSC_LOGIN_COMPLETE:
3484 /* recheck session is still intact. */
3485 ql_dbg(ql_dbg_disc, vha, 0x210d,
3486 "%s %d %8phC revalidate session with ADISC\n",
3487 __func__, __LINE__, fcport->port_name);
3488 data[0] = data[1] = 0;
3489 qla2x00_post_async_adisc_work(vha, fcport,
3490 data);
3491 break;
3492 case DSC_DELETED:
3493 ql_dbg(ql_dbg_disc, vha, 0x210d,
3494 "%s %d %8phC login\n", __func__, __LINE__,
3495 fcport->port_name);
3496 fcport->d_id = ea->id;
3497 qla24xx_fcport_handle_login(vha, fcport);
3498 break;
3499 case DSC_DELETE_PEND:
3500 fcport->d_id = ea->id;
3501 break;
3502 default:
3503 fcport->d_id = ea->id;
3504 break;
3506 } else {
3507 list_for_each_entry_safe(conflict, t, &vha->vp_fcports,
3508 list) {
3509 if (conflict->d_id.b24 == ea->id.b24) {
3510 /* 2 fcports with conflict Nport ID or
3511 * an existing fcport is having nport ID
3512 * conflict with new fcport.
3514 ql_dbg(ql_dbg_disc, vha, 0xffff,
3515 "%s %d %8phC DS %d\n",
3516 __func__, __LINE__,
3517 conflict->port_name,
3518 conflict->disc_state);
3520 conflict->scan_state = QLA_FCPORT_SCAN;
3521 switch (conflict->disc_state) {
3522 case DSC_DELETED:
3523 case DSC_DELETE_PEND:
3524 break;
3525 default:
3526 ql_dbg(ql_dbg_disc, vha, 0xffff,
3527 "%s %d %8phC post del sess\n",
3528 __func__, __LINE__,
3529 conflict->port_name);
3530 qlt_schedule_sess_for_deletion
3531 (conflict);
3532 break;
3537 /* create new fcport */
3538 ql_dbg(ql_dbg_disc, vha, 0x2065,
3539 "%s %d %8phC post new sess\n",
3540 __func__, __LINE__, ea->port_name);
3541 qla24xx_post_newsess_work(vha, &ea->id,
3542 ea->port_name, NULL, NULL, FC4_TYPE_UNKNOWN);
3547 static void qla2x00_async_gpnid_sp_done(void *s, int res)
3549 struct srb *sp = s;
3550 struct scsi_qla_host *vha = sp->vha;
3551 struct ct_sns_req *ct_req =
3552 (struct ct_sns_req *)sp->u.iocb_cmd.u.ctarg.req;
3553 struct ct_sns_rsp *ct_rsp =
3554 (struct ct_sns_rsp *)sp->u.iocb_cmd.u.ctarg.rsp;
3555 struct event_arg ea;
3556 struct qla_work_evt *e;
3557 unsigned long flags;
3559 if (res)
3560 ql_dbg(ql_dbg_disc, vha, 0x2066,
3561 "Async done-%s fail res %x rscn gen %d ID %3phC. %8phC\n",
3562 sp->name, res, sp->gen1, ct_req->req.port_id.port_id,
3563 ct_rsp->rsp.gpn_id.port_name);
3564 else
3565 ql_dbg(ql_dbg_disc, vha, 0x2066,
3566 "Async done-%s good rscn gen %d ID %3phC. %8phC\n",
3567 sp->name, sp->gen1, ct_req->req.port_id.port_id,
3568 ct_rsp->rsp.gpn_id.port_name);
3570 memset(&ea, 0, sizeof(ea));
3571 memcpy(ea.port_name, ct_rsp->rsp.gpn_id.port_name, WWN_SIZE);
3572 ea.sp = sp;
3573 ea.id.b.domain = ct_req->req.port_id.port_id[0];
3574 ea.id.b.area = ct_req->req.port_id.port_id[1];
3575 ea.id.b.al_pa = ct_req->req.port_id.port_id[2];
3576 ea.rc = res;
3577 ea.event = FCME_GPNID_DONE;
3579 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
3580 list_del(&sp->elem);
3581 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
3583 if (res) {
3584 if (res == QLA_FUNCTION_TIMEOUT) {
3585 qla24xx_post_gpnid_work(sp->vha, &ea.id);
3586 sp->free(sp);
3587 return;
3589 } else if (sp->gen1) {
3590 /* There was another RSCN for this Nport ID */
3591 qla24xx_post_gpnid_work(sp->vha, &ea.id);
3592 sp->free(sp);
3593 return;
3596 qla2x00_fcport_event_handler(vha, &ea);
3598 e = qla2x00_alloc_work(vha, QLA_EVT_UNMAP);
3599 if (!e) {
3600 /* please ignore kernel warning. otherwise, we have mem leak. */
3601 if (sp->u.iocb_cmd.u.ctarg.req) {
3602 dma_free_coherent(&vha->hw->pdev->dev,
3603 sizeof(struct ct_sns_pkt),
3604 sp->u.iocb_cmd.u.ctarg.req,
3605 sp->u.iocb_cmd.u.ctarg.req_dma);
3606 sp->u.iocb_cmd.u.ctarg.req = NULL;
3608 if (sp->u.iocb_cmd.u.ctarg.rsp) {
3609 dma_free_coherent(&vha->hw->pdev->dev,
3610 sizeof(struct ct_sns_pkt),
3611 sp->u.iocb_cmd.u.ctarg.rsp,
3612 sp->u.iocb_cmd.u.ctarg.rsp_dma);
3613 sp->u.iocb_cmd.u.ctarg.rsp = NULL;
3616 sp->free(sp);
3617 return;
3620 e->u.iosb.sp = sp;
3621 qla2x00_post_work(vha, e);
3624 /* Get WWPN with Nport ID. */
3625 int qla24xx_async_gpnid(scsi_qla_host_t *vha, port_id_t *id)
3627 int rval = QLA_FUNCTION_FAILED;
3628 struct ct_sns_req *ct_req;
3629 srb_t *sp, *tsp;
3630 struct ct_sns_pkt *ct_sns;
3631 unsigned long flags;
3633 if (!vha->flags.online)
3634 goto done;
3636 sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL);
3637 if (!sp)
3638 goto done;
3640 sp->type = SRB_CT_PTHRU_CMD;
3641 sp->name = "gpnid";
3642 sp->u.iocb_cmd.u.ctarg.id = *id;
3643 sp->gen1 = 0;
3644 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
3646 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
3647 list_for_each_entry(tsp, &vha->gpnid_list, elem) {
3648 if (tsp->u.iocb_cmd.u.ctarg.id.b24 == id->b24) {
3649 tsp->gen1++;
3650 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
3651 sp->free(sp);
3652 goto done;
3655 list_add_tail(&sp->elem, &vha->gpnid_list);
3656 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
3658 sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev,
3659 sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma,
3660 GFP_KERNEL);
3661 if (!sp->u.iocb_cmd.u.ctarg.req) {
3662 ql_log(ql_log_warn, vha, 0xd041,
3663 "Failed to allocate ct_sns request.\n");
3664 goto done_free_sp;
3667 sp->u.iocb_cmd.u.ctarg.rsp = dma_alloc_coherent(&vha->hw->pdev->dev,
3668 sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.rsp_dma,
3669 GFP_KERNEL);
3670 if (!sp->u.iocb_cmd.u.ctarg.rsp) {
3671 ql_log(ql_log_warn, vha, 0xd042,
3672 "Failed to allocate ct_sns request.\n");
3673 goto done_free_sp;
3676 ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.rsp;
3677 memset(ct_sns, 0, sizeof(*ct_sns));
3679 ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.req;
3680 /* CT_IU preamble */
3681 ct_req = qla2x00_prep_ct_req(ct_sns, GPN_ID_CMD, GPN_ID_RSP_SIZE);
3683 /* GPN_ID req */
3684 ct_req->req.port_id.port_id[0] = id->b.domain;
3685 ct_req->req.port_id.port_id[1] = id->b.area;
3686 ct_req->req.port_id.port_id[2] = id->b.al_pa;
3688 sp->u.iocb_cmd.u.ctarg.req_size = GPN_ID_REQ_SIZE;
3689 sp->u.iocb_cmd.u.ctarg.rsp_size = GPN_ID_RSP_SIZE;
3690 sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
3692 sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
3693 sp->done = qla2x00_async_gpnid_sp_done;
3695 rval = qla2x00_start_sp(sp);
3696 if (rval != QLA_SUCCESS)
3697 goto done_free_sp;
3699 ql_dbg(ql_dbg_disc, vha, 0x2067,
3700 "Async-%s hdl=%x ID %3phC.\n", sp->name,
3701 sp->handle, ct_req->req.port_id.port_id);
3702 return rval;
3704 done_free_sp:
3705 if (sp->u.iocb_cmd.u.ctarg.req) {
3706 dma_free_coherent(&vha->hw->pdev->dev,
3707 sizeof(struct ct_sns_pkt),
3708 sp->u.iocb_cmd.u.ctarg.req,
3709 sp->u.iocb_cmd.u.ctarg.req_dma);
3710 sp->u.iocb_cmd.u.ctarg.req = NULL;
3712 if (sp->u.iocb_cmd.u.ctarg.rsp) {
3713 dma_free_coherent(&vha->hw->pdev->dev,
3714 sizeof(struct ct_sns_pkt),
3715 sp->u.iocb_cmd.u.ctarg.rsp,
3716 sp->u.iocb_cmd.u.ctarg.rsp_dma);
3717 sp->u.iocb_cmd.u.ctarg.rsp = NULL;
3720 sp->free(sp);
3721 done:
3722 return rval;
3725 void qla24xx_handle_gffid_event(scsi_qla_host_t *vha, struct event_arg *ea)
3727 fc_port_t *fcport = ea->fcport;
3729 qla24xx_post_gnl_work(vha, fcport);
3732 void qla24xx_async_gffid_sp_done(void *s, int res)
3734 struct srb *sp = s;
3735 struct scsi_qla_host *vha = sp->vha;
3736 fc_port_t *fcport = sp->fcport;
3737 struct ct_sns_rsp *ct_rsp;
3738 struct event_arg ea;
3740 ql_dbg(ql_dbg_disc, vha, 0x2133,
3741 "Async done-%s res %x ID %x. %8phC\n",
3742 sp->name, res, fcport->d_id.b24, fcport->port_name);
3744 fcport->flags &= ~FCF_ASYNC_SENT;
3745 ct_rsp = &fcport->ct_desc.ct_sns->p.rsp;
3747 * FC-GS-7, 5.2.3.12 FC-4 Features - format
3748 * The format of the FC-4 Features object, as defined by the FC-4,
3749 * Shall be an array of 4-bit values, one for each type code value
3751 if (!res) {
3752 if (ct_rsp->rsp.gff_id.fc4_features[GFF_FCP_SCSI_OFFSET] & 0xf) {
3753 /* w1 b00:03 */
3754 fcport->fc4_type =
3755 ct_rsp->rsp.gff_id.fc4_features[GFF_FCP_SCSI_OFFSET];
3756 fcport->fc4_type &= 0xf;
3759 if (ct_rsp->rsp.gff_id.fc4_features[GFF_NVME_OFFSET] & 0xf) {
3760 /* w5 [00:03]/28h */
3761 fcport->fc4f_nvme =
3762 ct_rsp->rsp.gff_id.fc4_features[GFF_NVME_OFFSET];
3763 fcport->fc4f_nvme &= 0xf;
3767 memset(&ea, 0, sizeof(ea));
3768 ea.sp = sp;
3769 ea.fcport = sp->fcport;
3770 ea.rc = res;
3771 ea.event = FCME_GFFID_DONE;
3773 qla2x00_fcport_event_handler(vha, &ea);
3774 sp->free(sp);
3777 /* Get FC4 Feature with Nport ID. */
3778 int qla24xx_async_gffid(scsi_qla_host_t *vha, fc_port_t *fcport)
3780 int rval = QLA_FUNCTION_FAILED;
3781 struct ct_sns_req *ct_req;
3782 srb_t *sp;
3784 if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT))
3785 return rval;
3787 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
3788 if (!sp)
3789 return rval;
3791 fcport->flags |= FCF_ASYNC_SENT;
3792 sp->type = SRB_CT_PTHRU_CMD;
3793 sp->name = "gffid";
3794 sp->gen1 = fcport->rscn_gen;
3795 sp->gen2 = fcport->login_gen;
3797 sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
3798 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
3800 /* CT_IU preamble */
3801 ct_req = qla2x00_prep_ct_req(fcport->ct_desc.ct_sns, GFF_ID_CMD,
3802 GFF_ID_RSP_SIZE);
3804 ct_req->req.gff_id.port_id[0] = fcport->d_id.b.domain;
3805 ct_req->req.gff_id.port_id[1] = fcport->d_id.b.area;
3806 ct_req->req.gff_id.port_id[2] = fcport->d_id.b.al_pa;
3808 sp->u.iocb_cmd.u.ctarg.req = fcport->ct_desc.ct_sns;
3809 sp->u.iocb_cmd.u.ctarg.req_dma = fcport->ct_desc.ct_sns_dma;
3810 sp->u.iocb_cmd.u.ctarg.rsp = fcport->ct_desc.ct_sns;
3811 sp->u.iocb_cmd.u.ctarg.rsp_dma = fcport->ct_desc.ct_sns_dma;
3812 sp->u.iocb_cmd.u.ctarg.req_size = GFF_ID_REQ_SIZE;
3813 sp->u.iocb_cmd.u.ctarg.rsp_size = GFF_ID_RSP_SIZE;
3814 sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
3816 sp->done = qla24xx_async_gffid_sp_done;
3818 rval = qla2x00_start_sp(sp);
3819 if (rval != QLA_SUCCESS)
3820 goto done_free_sp;
3822 ql_dbg(ql_dbg_disc, vha, 0x2132,
3823 "Async-%s hdl=%x %8phC.\n", sp->name,
3824 sp->handle, fcport->port_name);
3826 return rval;
3827 done_free_sp:
3828 sp->free(sp);
3829 fcport->flags &= ~FCF_ASYNC_SENT;
3830 return rval;
3833 /* GPN_FT + GNN_FT*/
3834 static int qla2x00_is_a_vp(scsi_qla_host_t *vha, u64 wwn)
3836 struct qla_hw_data *ha = vha->hw;
3837 scsi_qla_host_t *vp;
3838 unsigned long flags;
3839 u64 twwn;
3840 int rc = 0;
3842 if (!ha->num_vhosts)
3843 return 0;
3845 spin_lock_irqsave(&ha->vport_slock, flags);
3846 list_for_each_entry(vp, &ha->vp_list, list) {
3847 twwn = wwn_to_u64(vp->port_name);
3848 if (wwn == twwn) {
3849 rc = 1;
3850 break;
3853 spin_unlock_irqrestore(&ha->vport_slock, flags);
3855 return rc;
3858 void qla24xx_async_gnnft_done(scsi_qla_host_t *vha, srb_t *sp)
3860 fc_port_t *fcport;
3861 u32 i, rc;
3862 bool found;
3863 struct fab_scan_rp *rp;
3864 unsigned long flags;
3866 ql_dbg(ql_dbg_disc, vha, 0xffff,
3867 "%s enter\n", __func__);
3869 if (sp->gen1 != vha->hw->base_qpair->chip_reset) {
3870 ql_dbg(ql_dbg_disc, vha, 0xffff,
3871 "%s scan stop due to chip reset %x/%x\n",
3872 sp->name, sp->gen1, vha->hw->base_qpair->chip_reset);
3873 goto out;
3876 rc = sp->rc;
3877 if (rc) {
3878 vha->scan.scan_retry++;
3879 if (vha->scan.scan_retry < MAX_SCAN_RETRIES) {
3880 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
3881 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
3882 } else {
3883 ql_dbg(ql_dbg_disc, vha, 0xffff,
3884 "Fabric scan failed on all retries.\n");
3886 goto out;
3888 vha->scan.scan_retry = 0;
3890 list_for_each_entry(fcport, &vha->vp_fcports, list)
3891 fcport->scan_state = QLA_FCPORT_SCAN;
3893 for (i = 0; i < vha->hw->max_fibre_devices; i++) {
3894 u64 wwn;
3896 rp = &vha->scan.l[i];
3897 found = false;
3899 wwn = wwn_to_u64(rp->port_name);
3900 if (wwn == 0)
3901 continue;
3903 if (!memcmp(rp->port_name, vha->port_name, WWN_SIZE))
3904 continue;
3906 /* Bypass reserved domain fields. */
3907 if ((rp->id.b.domain & 0xf0) == 0xf0)
3908 continue;
3910 /* Bypass virtual ports of the same host. */
3911 if (qla2x00_is_a_vp(vha, wwn))
3912 continue;
3914 list_for_each_entry(fcport, &vha->vp_fcports, list) {
3915 if (memcmp(rp->port_name, fcport->port_name, WWN_SIZE))
3916 continue;
3917 fcport->scan_state = QLA_FCPORT_FOUND;
3918 fcport->d_id.b24 = rp->id.b24;
3919 found = true;
3921 * If device was not a fabric device before.
3923 if ((fcport->flags & FCF_FABRIC_DEVICE) == 0) {
3924 qla2x00_clear_loop_id(fcport);
3925 fcport->flags |= FCF_FABRIC_DEVICE;
3927 break;
3930 if (!found) {
3931 ql_dbg(ql_dbg_disc, vha, 0xffff,
3932 "%s %d %8phC post new sess\n",
3933 __func__, __LINE__, rp->port_name);
3934 qla24xx_post_newsess_work(vha, &rp->id, rp->port_name,
3935 rp->node_name, NULL, rp->fc4type);
3940 * Logout all previous fabric dev marked lost, except FCP2 devices.
3942 list_for_each_entry(fcport, &vha->vp_fcports, list) {
3943 if ((fcport->flags & FCF_FABRIC_DEVICE) == 0)
3944 continue;
3946 if (fcport->scan_state != QLA_FCPORT_FOUND) {
3947 if ((qla_dual_mode_enabled(vha) ||
3948 qla_ini_mode_enabled(vha)) &&
3949 atomic_read(&fcport->state) == FCS_ONLINE) {
3950 qla2x00_mark_device_lost(vha, fcport,
3951 ql2xplogiabsentdevice, 0);
3953 if (fcport->loop_id != FC_NO_LOOP_ID &&
3954 (fcport->flags & FCF_FCP2_DEVICE) == 0) {
3955 ql_dbg(ql_dbg_disc, vha, 0x20f0,
3956 "%s %d %8phC post del sess\n",
3957 __func__, __LINE__,
3958 fcport->port_name);
3960 qlt_schedule_sess_for_deletion(fcport);
3961 continue;
3964 } else
3965 qla24xx_fcport_handle_login(vha, fcport);
3968 out:
3969 qla24xx_sp_unmap(vha, sp);
3970 spin_lock_irqsave(&vha->work_lock, flags);
3971 vha->scan.scan_flags &= ~SF_SCANNING;
3972 spin_unlock_irqrestore(&vha->work_lock, flags);
3975 static void qla2x00_find_free_fcp_nvme_slot(struct scsi_qla_host *vha,
3976 struct srb *sp)
3978 struct qla_hw_data *ha = vha->hw;
3979 int num_fibre_dev = ha->max_fibre_devices;
3980 struct ct_sns_req *ct_req =
3981 (struct ct_sns_req *)sp->u.iocb_cmd.u.ctarg.req;
3982 struct ct_sns_gpnft_rsp *ct_rsp =
3983 (struct ct_sns_gpnft_rsp *)sp->u.iocb_cmd.u.ctarg.rsp;
3984 struct ct_sns_gpn_ft_data *d;
3985 struct fab_scan_rp *rp;
3986 u16 cmd = be16_to_cpu(ct_req->command);
3987 u8 fc4_type = sp->gen2;
3988 int i, j, k;
3989 port_id_t id;
3990 u8 found;
3991 u64 wwn;
3993 j = 0;
3994 for (i = 0; i < num_fibre_dev; i++) {
3995 d = &ct_rsp->entries[i];
3997 id.b.rsvd_1 = 0;
3998 id.b.domain = d->port_id[0];
3999 id.b.area = d->port_id[1];
4000 id.b.al_pa = d->port_id[2];
4001 wwn = wwn_to_u64(d->port_name);
4003 if (id.b24 == 0 || wwn == 0)
4004 continue;
4006 if (fc4_type == FC4_TYPE_FCP_SCSI) {
4007 if (cmd == GPN_FT_CMD) {
4008 rp = &vha->scan.l[j];
4009 rp->id = id;
4010 memcpy(rp->port_name, d->port_name, 8);
4011 j++;
4012 rp->fc4type = FS_FC4TYPE_FCP;
4013 } else {
4014 for (k = 0; k < num_fibre_dev; k++) {
4015 rp = &vha->scan.l[k];
4016 if (id.b24 == rp->id.b24) {
4017 memcpy(rp->node_name,
4018 d->port_name, 8);
4019 break;
4023 } else {
4024 /* Search if the fibre device supports FC4_TYPE_NVME */
4025 if (cmd == GPN_FT_CMD) {
4026 found = 0;
4028 for (k = 0; k < num_fibre_dev; k++) {
4029 rp = &vha->scan.l[k];
4030 if (!memcmp(rp->port_name,
4031 d->port_name, 8)) {
4033 * Supports FC-NVMe & FCP
4035 rp->fc4type |= FS_FC4TYPE_NVME;
4036 found = 1;
4037 break;
4041 /* We found new FC-NVMe only port */
4042 if (!found) {
4043 for (k = 0; k < num_fibre_dev; k++) {
4044 rp = &vha->scan.l[k];
4045 if (wwn_to_u64(rp->port_name)) {
4046 continue;
4047 } else {
4048 rp->id = id;
4049 memcpy(rp->port_name,
4050 d->port_name, 8);
4051 rp->fc4type =
4052 FS_FC4TYPE_NVME;
4053 break;
4057 } else {
4058 for (k = 0; k < num_fibre_dev; k++) {
4059 rp = &vha->scan.l[k];
4060 if (id.b24 == rp->id.b24) {
4061 memcpy(rp->node_name,
4062 d->port_name, 8);
4063 break;
4071 static void qla2x00_async_gpnft_gnnft_sp_done(void *s, int res)
4073 struct srb *sp = s;
4074 struct scsi_qla_host *vha = sp->vha;
4075 struct qla_work_evt *e;
4076 struct ct_sns_req *ct_req =
4077 (struct ct_sns_req *)sp->u.iocb_cmd.u.ctarg.req;
4078 u16 cmd = be16_to_cpu(ct_req->command);
4079 u8 fc4_type = sp->gen2;
4080 unsigned long flags;
4082 /* gen2 field is holding the fc4type */
4083 ql_dbg(ql_dbg_disc, vha, 0xffff,
4084 "Async done-%s res %x FC4Type %x\n",
4085 sp->name, res, sp->gen2);
4087 if (res) {
4088 unsigned long flags;
4090 sp->free(sp);
4091 spin_lock_irqsave(&vha->work_lock, flags);
4092 vha->scan.scan_flags &= ~SF_SCANNING;
4093 vha->scan.scan_retry++;
4094 spin_unlock_irqrestore(&vha->work_lock, flags);
4096 if (vha->scan.scan_retry < MAX_SCAN_RETRIES) {
4097 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
4098 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
4099 qla2xxx_wake_dpc(vha);
4100 } else {
4101 ql_dbg(ql_dbg_disc, sp->vha, 0xffff,
4102 "Async done-%s rescan failed on all retries\n",
4103 sp->name);
4105 return;
4108 if (!res)
4109 qla2x00_find_free_fcp_nvme_slot(vha, sp);
4111 if ((fc4_type == FC4_TYPE_FCP_SCSI) && vha->flags.nvme_enabled &&
4112 cmd == GNN_FT_CMD) {
4113 del_timer(&sp->u.iocb_cmd.timer);
4114 spin_lock_irqsave(&vha->work_lock, flags);
4115 vha->scan.scan_flags &= ~SF_SCANNING;
4116 spin_unlock_irqrestore(&vha->work_lock, flags);
4118 e = qla2x00_alloc_work(vha, QLA_EVT_GPNFT);
4119 if (!e) {
4121 * please ignore kernel warning. Otherwise,
4122 * we have mem leak.
4124 if (sp->u.iocb_cmd.u.ctarg.req) {
4125 dma_free_coherent(&vha->hw->pdev->dev,
4126 sizeof(struct ct_sns_pkt),
4127 sp->u.iocb_cmd.u.ctarg.req,
4128 sp->u.iocb_cmd.u.ctarg.req_dma);
4129 sp->u.iocb_cmd.u.ctarg.req = NULL;
4131 if (sp->u.iocb_cmd.u.ctarg.rsp) {
4132 dma_free_coherent(&vha->hw->pdev->dev,
4133 sizeof(struct ct_sns_pkt),
4134 sp->u.iocb_cmd.u.ctarg.rsp,
4135 sp->u.iocb_cmd.u.ctarg.rsp_dma);
4136 sp->u.iocb_cmd.u.ctarg.rsp = NULL;
4139 ql_dbg(ql_dbg_disc, vha, 0xffff,
4140 "Async done-%s unable to alloc work element\n",
4141 sp->name);
4142 sp->free(sp);
4143 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
4144 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
4145 return;
4147 e->u.gpnft.fc4_type = FC4_TYPE_NVME;
4148 sp->rc = res;
4149 e->u.gpnft.sp = sp;
4151 qla2x00_post_work(vha, e);
4152 return;
4155 if (cmd == GPN_FT_CMD)
4156 e = qla2x00_alloc_work(vha, QLA_EVT_GPNFT_DONE);
4157 else
4158 e = qla2x00_alloc_work(vha, QLA_EVT_GNNFT_DONE);
4159 if (!e) {
4160 /* please ignore kernel warning. Otherwise, we have mem leak. */
4161 if (sp->u.iocb_cmd.u.ctarg.req) {
4162 dma_free_coherent(&vha->hw->pdev->dev,
4163 sizeof(struct ct_sns_pkt),
4164 sp->u.iocb_cmd.u.ctarg.req,
4165 sp->u.iocb_cmd.u.ctarg.req_dma);
4166 sp->u.iocb_cmd.u.ctarg.req = NULL;
4168 if (sp->u.iocb_cmd.u.ctarg.rsp) {
4169 dma_free_coherent(&vha->hw->pdev->dev,
4170 sizeof(struct ct_sns_pkt),
4171 sp->u.iocb_cmd.u.ctarg.rsp,
4172 sp->u.iocb_cmd.u.ctarg.rsp_dma);
4173 sp->u.iocb_cmd.u.ctarg.rsp = NULL;
4176 ql_dbg(ql_dbg_disc, vha, 0xffff,
4177 "Async done-%s unable to alloc work element\n",
4178 sp->name);
4179 sp->free(sp);
4180 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
4181 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
4182 return;
4185 sp->rc = res;
4186 e->u.iosb.sp = sp;
4188 qla2x00_post_work(vha, e);
4192 * Get WWNN list for fc4_type
4194 * It is assumed the same SRB is re-used from GPNFT to avoid
4195 * mem free & re-alloc
4197 static int qla24xx_async_gnnft(scsi_qla_host_t *vha, struct srb *sp,
4198 u8 fc4_type)
4200 int rval = QLA_FUNCTION_FAILED;
4201 struct ct_sns_req *ct_req;
4202 struct ct_sns_pkt *ct_sns;
4203 unsigned long flags;
4205 if (!vha->flags.online) {
4206 spin_lock_irqsave(&vha->work_lock, flags);
4207 vha->scan.scan_flags &= ~SF_SCANNING;
4208 spin_unlock_irqrestore(&vha->work_lock, flags);
4209 goto done_free_sp;
4212 if (!sp->u.iocb_cmd.u.ctarg.req || !sp->u.iocb_cmd.u.ctarg.rsp) {
4213 ql_log(ql_log_warn, vha, 0xffff,
4214 "%s: req %p rsp %p are not setup\n",
4215 __func__, sp->u.iocb_cmd.u.ctarg.req,
4216 sp->u.iocb_cmd.u.ctarg.rsp);
4217 spin_lock_irqsave(&vha->work_lock, flags);
4218 vha->scan.scan_flags &= ~SF_SCANNING;
4219 spin_unlock_irqrestore(&vha->work_lock, flags);
4220 WARN_ON(1);
4221 goto done_free_sp;
4224 ql_dbg(ql_dbg_disc, vha, 0xfffff,
4225 "%s: FC4Type %x, CT-PASSTRHU %s command ctarg rsp size %d, ctarg req size %d\n",
4226 __func__, fc4_type, sp->name, sp->u.iocb_cmd.u.ctarg.rsp_size,
4227 sp->u.iocb_cmd.u.ctarg.req_size);
4229 sp->type = SRB_CT_PTHRU_CMD;
4230 sp->name = "gnnft";
4231 sp->gen1 = vha->hw->base_qpair->chip_reset;
4232 sp->gen2 = fc4_type;
4234 sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
4235 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
4237 memset(sp->u.iocb_cmd.u.ctarg.rsp, 0, sp->u.iocb_cmd.u.ctarg.rsp_size);
4238 memset(sp->u.iocb_cmd.u.ctarg.req, 0, sp->u.iocb_cmd.u.ctarg.req_size);
4240 ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.req;
4241 /* CT_IU preamble */
4242 ct_req = qla2x00_prep_ct_req(ct_sns, GNN_FT_CMD,
4243 sp->u.iocb_cmd.u.ctarg.rsp_size);
4245 /* GPN_FT req */
4246 ct_req->req.gpn_ft.port_type = fc4_type;
4248 sp->u.iocb_cmd.u.ctarg.req_size = GNN_FT_REQ_SIZE;
4249 sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
4251 sp->done = qla2x00_async_gpnft_gnnft_sp_done;
4253 rval = qla2x00_start_sp(sp);
4254 if (rval != QLA_SUCCESS)
4255 goto done_free_sp;
4257 ql_dbg(ql_dbg_disc, vha, 0xffff,
4258 "Async-%s hdl=%x FC4Type %x.\n", sp->name,
4259 sp->handle, ct_req->req.gpn_ft.port_type);
4260 return rval;
4262 done_free_sp:
4263 if (sp->u.iocb_cmd.u.ctarg.req) {
4264 dma_free_coherent(&vha->hw->pdev->dev,
4265 sizeof(struct ct_sns_pkt),
4266 sp->u.iocb_cmd.u.ctarg.req,
4267 sp->u.iocb_cmd.u.ctarg.req_dma);
4268 sp->u.iocb_cmd.u.ctarg.req = NULL;
4270 if (sp->u.iocb_cmd.u.ctarg.rsp) {
4271 dma_free_coherent(&vha->hw->pdev->dev,
4272 sizeof(struct ct_sns_pkt),
4273 sp->u.iocb_cmd.u.ctarg.rsp,
4274 sp->u.iocb_cmd.u.ctarg.rsp_dma);
4275 sp->u.iocb_cmd.u.ctarg.rsp = NULL;
4278 sp->free(sp);
4280 return rval;
4281 } /* GNNFT */
4283 void qla24xx_async_gpnft_done(scsi_qla_host_t *vha, srb_t *sp)
4285 ql_dbg(ql_dbg_disc, vha, 0xffff,
4286 "%s enter\n", __func__);
4287 del_timer(&sp->u.iocb_cmd.timer);
4288 qla24xx_async_gnnft(vha, sp, sp->gen2);
4291 /* Get WWPN list for certain fc4_type */
4292 int qla24xx_async_gpnft(scsi_qla_host_t *vha, u8 fc4_type, srb_t *sp)
4294 int rval = QLA_FUNCTION_FAILED;
4295 struct ct_sns_req *ct_req;
4296 struct ct_sns_pkt *ct_sns;
4297 u32 rspsz;
4298 unsigned long flags;
4300 ql_dbg(ql_dbg_disc, vha, 0xffff,
4301 "%s enter\n", __func__);
4303 if (!vha->flags.online)
4304 return rval;
4306 spin_lock_irqsave(&vha->work_lock, flags);
4307 if (vha->scan.scan_flags & SF_SCANNING) {
4308 spin_unlock_irqrestore(&vha->work_lock, flags);
4309 ql_dbg(ql_dbg_disc, vha, 0xffff, "scan active\n");
4310 return rval;
4312 vha->scan.scan_flags |= SF_SCANNING;
4313 spin_unlock_irqrestore(&vha->work_lock, flags);
4315 if (fc4_type == FC4_TYPE_FCP_SCSI) {
4316 ql_dbg(ql_dbg_disc, vha, 0xffff,
4317 "%s: Performing FCP Scan\n", __func__);
4319 if (sp)
4320 sp->free(sp); /* should not happen */
4322 sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL);
4323 if (!sp) {
4324 spin_lock_irqsave(&vha->work_lock, flags);
4325 vha->scan.scan_flags &= ~SF_SCANNING;
4326 spin_unlock_irqrestore(&vha->work_lock, flags);
4327 return rval;
4330 sp->u.iocb_cmd.u.ctarg.req = dma_zalloc_coherent(
4331 &vha->hw->pdev->dev, sizeof(struct ct_sns_pkt),
4332 &sp->u.iocb_cmd.u.ctarg.req_dma, GFP_KERNEL);
4333 if (!sp->u.iocb_cmd.u.ctarg.req) {
4334 ql_log(ql_log_warn, vha, 0xffff,
4335 "Failed to allocate ct_sns request.\n");
4336 spin_lock_irqsave(&vha->work_lock, flags);
4337 vha->scan.scan_flags &= ~SF_SCANNING;
4338 spin_unlock_irqrestore(&vha->work_lock, flags);
4339 goto done_free_sp;
4341 sp->u.iocb_cmd.u.ctarg.req_size = GPN_FT_REQ_SIZE;
4343 rspsz = sizeof(struct ct_sns_gpnft_rsp) +
4344 ((vha->hw->max_fibre_devices - 1) *
4345 sizeof(struct ct_sns_gpn_ft_data));
4347 sp->u.iocb_cmd.u.ctarg.rsp = dma_zalloc_coherent(
4348 &vha->hw->pdev->dev, rspsz,
4349 &sp->u.iocb_cmd.u.ctarg.rsp_dma, GFP_KERNEL);
4350 if (!sp->u.iocb_cmd.u.ctarg.rsp) {
4351 ql_log(ql_log_warn, vha, 0xffff,
4352 "Failed to allocate ct_sns request.\n");
4353 spin_lock_irqsave(&vha->work_lock, flags);
4354 vha->scan.scan_flags &= ~SF_SCANNING;
4355 spin_unlock_irqrestore(&vha->work_lock, flags);
4356 goto done_free_sp;
4358 sp->u.iocb_cmd.u.ctarg.rsp_size = rspsz;
4360 ql_dbg(ql_dbg_disc, vha, 0xffff,
4361 "%s scan list size %d\n", __func__, vha->scan.size);
4363 memset(vha->scan.l, 0, vha->scan.size);
4364 } else if (!sp) {
4365 ql_dbg(ql_dbg_disc, vha, 0xffff,
4366 "NVME scan did not provide SP\n");
4367 return rval;
4370 sp->type = SRB_CT_PTHRU_CMD;
4371 sp->name = "gpnft";
4372 sp->gen1 = vha->hw->base_qpair->chip_reset;
4373 sp->gen2 = fc4_type;
4375 sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
4376 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
4378 rspsz = sizeof(struct ct_sns_gpnft_rsp) +
4379 ((vha->hw->max_fibre_devices - 1) *
4380 sizeof(struct ct_sns_gpn_ft_data));
4382 ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.req;
4383 /* CT_IU preamble */
4384 ct_req = qla2x00_prep_ct_req(ct_sns, GPN_FT_CMD, rspsz);
4386 /* GPN_FT req */
4387 ct_req->req.gpn_ft.port_type = fc4_type;
4389 sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
4391 sp->done = qla2x00_async_gpnft_gnnft_sp_done;
4393 rval = qla2x00_start_sp(sp);
4394 if (rval != QLA_SUCCESS) {
4395 spin_lock_irqsave(&vha->work_lock, flags);
4396 vha->scan.scan_flags &= ~SF_SCANNING;
4397 spin_unlock_irqrestore(&vha->work_lock, flags);
4398 goto done_free_sp;
4401 ql_dbg(ql_dbg_disc, vha, 0xffff,
4402 "Async-%s hdl=%x FC4Type %x.\n", sp->name,
4403 sp->handle, ct_req->req.gpn_ft.port_type);
4404 return rval;
4406 done_free_sp:
4407 if (sp->u.iocb_cmd.u.ctarg.req) {
4408 dma_free_coherent(&vha->hw->pdev->dev,
4409 sizeof(struct ct_sns_pkt),
4410 sp->u.iocb_cmd.u.ctarg.req,
4411 sp->u.iocb_cmd.u.ctarg.req_dma);
4412 sp->u.iocb_cmd.u.ctarg.req = NULL;
4414 if (sp->u.iocb_cmd.u.ctarg.rsp) {
4415 dma_free_coherent(&vha->hw->pdev->dev,
4416 sizeof(struct ct_sns_pkt),
4417 sp->u.iocb_cmd.u.ctarg.rsp,
4418 sp->u.iocb_cmd.u.ctarg.rsp_dma);
4419 sp->u.iocb_cmd.u.ctarg.rsp = NULL;
4422 sp->free(sp);
4424 return rval;
4427 void qla_scan_work_fn(struct work_struct *work)
4429 struct fab_scan *s = container_of(to_delayed_work(work),
4430 struct fab_scan, scan_work);
4431 struct scsi_qla_host *vha = container_of(s, struct scsi_qla_host,
4432 scan);
4433 unsigned long flags;
4435 ql_dbg(ql_dbg_disc, vha, 0xffff,
4436 "%s: schedule loop resync\n", __func__);
4437 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
4438 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
4439 qla2xxx_wake_dpc(vha);
4440 spin_lock_irqsave(&vha->work_lock, flags);
4441 vha->scan.scan_flags &= ~SF_QUEUED;
4442 spin_unlock_irqrestore(&vha->work_lock, flags);
4445 /* GNN_ID */
4446 void qla24xx_handle_gnnid_event(scsi_qla_host_t *vha, struct event_arg *ea)
4448 qla24xx_post_gnl_work(vha, ea->fcport);
4451 static void qla2x00_async_gnnid_sp_done(void *s, int res)
4453 struct srb *sp = s;
4454 struct scsi_qla_host *vha = sp->vha;
4455 fc_port_t *fcport = sp->fcport;
4456 u8 *node_name = fcport->ct_desc.ct_sns->p.rsp.rsp.gnn_id.node_name;
4457 struct event_arg ea;
4458 u64 wwnn;
4460 fcport->flags &= ~FCF_ASYNC_SENT;
4461 wwnn = wwn_to_u64(node_name);
4462 if (wwnn)
4463 memcpy(fcport->node_name, node_name, WWN_SIZE);
4465 memset(&ea, 0, sizeof(ea));
4466 ea.fcport = fcport;
4467 ea.sp = sp;
4468 ea.rc = res;
4469 ea.event = FCME_GNNID_DONE;
4471 ql_dbg(ql_dbg_disc, vha, 0x204f,
4472 "Async done-%s res %x, WWPN %8phC %8phC\n",
4473 sp->name, res, fcport->port_name, fcport->node_name);
4475 qla2x00_fcport_event_handler(vha, &ea);
4477 sp->free(sp);
4480 int qla24xx_async_gnnid(scsi_qla_host_t *vha, fc_port_t *fcport)
4482 int rval = QLA_FUNCTION_FAILED;
4483 struct ct_sns_req *ct_req;
4484 srb_t *sp;
4486 if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT))
4487 return rval;
4489 fcport->disc_state = DSC_GNN_ID;
4490 sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC);
4491 if (!sp)
4492 goto done;
4494 fcport->flags |= FCF_ASYNC_SENT;
4495 sp->type = SRB_CT_PTHRU_CMD;
4496 sp->name = "gnnid";
4497 sp->gen1 = fcport->rscn_gen;
4498 sp->gen2 = fcport->login_gen;
4500 sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
4501 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
4503 /* CT_IU preamble */
4504 ct_req = qla2x00_prep_ct_req(fcport->ct_desc.ct_sns, GNN_ID_CMD,
4505 GNN_ID_RSP_SIZE);
4507 /* GNN_ID req */
4508 ct_req->req.port_id.port_id[0] = fcport->d_id.b.domain;
4509 ct_req->req.port_id.port_id[1] = fcport->d_id.b.area;
4510 ct_req->req.port_id.port_id[2] = fcport->d_id.b.al_pa;
4513 /* req & rsp use the same buffer */
4514 sp->u.iocb_cmd.u.ctarg.req = fcport->ct_desc.ct_sns;
4515 sp->u.iocb_cmd.u.ctarg.req_dma = fcport->ct_desc.ct_sns_dma;
4516 sp->u.iocb_cmd.u.ctarg.rsp = fcport->ct_desc.ct_sns;
4517 sp->u.iocb_cmd.u.ctarg.rsp_dma = fcport->ct_desc.ct_sns_dma;
4518 sp->u.iocb_cmd.u.ctarg.req_size = GNN_ID_REQ_SIZE;
4519 sp->u.iocb_cmd.u.ctarg.rsp_size = GNN_ID_RSP_SIZE;
4520 sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
4522 sp->done = qla2x00_async_gnnid_sp_done;
4524 rval = qla2x00_start_sp(sp);
4525 if (rval != QLA_SUCCESS)
4526 goto done_free_sp;
4527 ql_dbg(ql_dbg_disc, vha, 0xffff,
4528 "Async-%s - %8phC hdl=%x loopid=%x portid %06x.\n",
4529 sp->name, fcport->port_name,
4530 sp->handle, fcport->loop_id, fcport->d_id.b24);
4531 return rval;
4533 done_free_sp:
4534 sp->free(sp);
4535 fcport->flags &= ~FCF_ASYNC_SENT;
4536 done:
4537 return rval;
4540 int qla24xx_post_gnnid_work(struct scsi_qla_host *vha, fc_port_t *fcport)
4542 struct qla_work_evt *e;
4543 int ls;
4545 ls = atomic_read(&vha->loop_state);
4546 if (((ls != LOOP_READY) && (ls != LOOP_UP)) ||
4547 test_bit(UNLOADING, &vha->dpc_flags))
4548 return 0;
4550 e = qla2x00_alloc_work(vha, QLA_EVT_GNNID);
4551 if (!e)
4552 return QLA_FUNCTION_FAILED;
4554 e->u.fcport.fcport = fcport;
4555 return qla2x00_post_work(vha, e);
4558 /* GPFN_ID */
4559 void qla24xx_handle_gfpnid_event(scsi_qla_host_t *vha, struct event_arg *ea)
4561 fc_port_t *fcport = ea->fcport;
4563 ql_dbg(ql_dbg_disc, vha, 0xffff,
4564 "%s %8phC DS %d LS %d rc %d login %d|%d rscn %d|%d fcpcnt %d\n",
4565 __func__, fcport->port_name, fcport->disc_state,
4566 fcport->fw_login_state, ea->rc, fcport->login_gen, ea->sp->gen2,
4567 fcport->rscn_gen, ea->sp->gen1, vha->fcport_count);
4569 if (fcport->disc_state == DSC_DELETE_PEND)
4570 return;
4572 if (ea->sp->gen2 != fcport->login_gen) {
4573 /* target side must have changed it. */
4574 ql_dbg(ql_dbg_disc, vha, 0x20d3,
4575 "%s %8phC generation changed\n",
4576 __func__, fcport->port_name);
4577 return;
4578 } else if (ea->sp->gen1 != fcport->rscn_gen) {
4579 ql_dbg(ql_dbg_disc, vha, 0x20d4, "%s %d %8phC post gidpn\n",
4580 __func__, __LINE__, fcport->port_name);
4581 qla24xx_post_gidpn_work(vha, fcport);
4582 return;
4585 qla24xx_post_gpsc_work(vha, fcport);
4588 static void qla2x00_async_gfpnid_sp_done(void *s, int res)
4590 struct srb *sp = s;
4591 struct scsi_qla_host *vha = sp->vha;
4592 fc_port_t *fcport = sp->fcport;
4593 u8 *fpn = fcport->ct_desc.ct_sns->p.rsp.rsp.gfpn_id.port_name;
4594 struct event_arg ea;
4595 u64 wwn;
4597 fcport->flags &= ~FCF_ASYNC_SENT;
4598 wwn = wwn_to_u64(fpn);
4599 if (wwn)
4600 memcpy(fcport->fabric_port_name, fpn, WWN_SIZE);
4602 memset(&ea, 0, sizeof(ea));
4603 ea.fcport = fcport;
4604 ea.sp = sp;
4605 ea.rc = res;
4606 ea.event = FCME_GFPNID_DONE;
4608 ql_dbg(ql_dbg_disc, vha, 0x204f,
4609 "Async done-%s res %x, WWPN %8phC %8phC\n",
4610 sp->name, res, fcport->port_name, fcport->fabric_port_name);
4612 qla2x00_fcport_event_handler(vha, &ea);
4614 sp->free(sp);
4617 int qla24xx_async_gfpnid(scsi_qla_host_t *vha, fc_port_t *fcport)
4619 int rval = QLA_FUNCTION_FAILED;
4620 struct ct_sns_req *ct_req;
4621 srb_t *sp;
4623 if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT))
4624 return rval;
4626 fcport->disc_state = DSC_GFPN_ID;
4627 sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC);
4628 if (!sp)
4629 goto done;
4631 fcport->flags |= FCF_ASYNC_SENT;
4632 sp->type = SRB_CT_PTHRU_CMD;
4633 sp->name = "gfpnid";
4634 sp->gen1 = fcport->rscn_gen;
4635 sp->gen2 = fcport->login_gen;
4637 sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
4638 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
4640 /* CT_IU preamble */
4641 ct_req = qla2x00_prep_ct_req(fcport->ct_desc.ct_sns, GFPN_ID_CMD,
4642 GFPN_ID_RSP_SIZE);
4644 /* GFPN_ID req */
4645 ct_req->req.port_id.port_id[0] = fcport->d_id.b.domain;
4646 ct_req->req.port_id.port_id[1] = fcport->d_id.b.area;
4647 ct_req->req.port_id.port_id[2] = fcport->d_id.b.al_pa;
4650 /* req & rsp use the same buffer */
4651 sp->u.iocb_cmd.u.ctarg.req = fcport->ct_desc.ct_sns;
4652 sp->u.iocb_cmd.u.ctarg.req_dma = fcport->ct_desc.ct_sns_dma;
4653 sp->u.iocb_cmd.u.ctarg.rsp = fcport->ct_desc.ct_sns;
4654 sp->u.iocb_cmd.u.ctarg.rsp_dma = fcport->ct_desc.ct_sns_dma;
4655 sp->u.iocb_cmd.u.ctarg.req_size = GFPN_ID_REQ_SIZE;
4656 sp->u.iocb_cmd.u.ctarg.rsp_size = GFPN_ID_RSP_SIZE;
4657 sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
4659 sp->done = qla2x00_async_gfpnid_sp_done;
4661 rval = qla2x00_start_sp(sp);
4662 if (rval != QLA_SUCCESS)
4663 goto done_free_sp;
4665 ql_dbg(ql_dbg_disc, vha, 0xffff,
4666 "Async-%s - %8phC hdl=%x loopid=%x portid %06x.\n",
4667 sp->name, fcport->port_name,
4668 sp->handle, fcport->loop_id, fcport->d_id.b24);
4669 return rval;
4671 done_free_sp:
4672 sp->free(sp);
4673 fcport->flags &= ~FCF_ASYNC_SENT;
4674 done:
4675 return rval;
4678 int qla24xx_post_gfpnid_work(struct scsi_qla_host *vha, fc_port_t *fcport)
4680 struct qla_work_evt *e;
4681 int ls;
4683 ls = atomic_read(&vha->loop_state);
4684 if (((ls != LOOP_READY) && (ls != LOOP_UP)) ||
4685 test_bit(UNLOADING, &vha->dpc_flags))
4686 return 0;
4688 e = qla2x00_alloc_work(vha, QLA_EVT_GFPNID);
4689 if (!e)
4690 return QLA_FUNCTION_FAILED;
4692 e->u.fcport.fcport = fcport;
4693 return qla2x00_post_work(vha, e);