Merge tag 'regmap-fix-v5.11-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux/fpc-iii.git] / drivers / scsi / qla2xxx / qla_gs.c
blob391ac75e3de3825d68ebbc6620d1ab9f2f231b20
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * QLogic Fibre Channel HBA Driver
4 * Copyright (c) 2003-2014 QLogic Corporation
5 */
6 #include "qla_def.h"
7 #include "qla_target.h"
8 #include <linux/utsname.h>
10 static int qla2x00_sns_ga_nxt(scsi_qla_host_t *, fc_port_t *);
11 static int qla2x00_sns_gid_pt(scsi_qla_host_t *, sw_info_t *);
12 static int qla2x00_sns_gpn_id(scsi_qla_host_t *, sw_info_t *);
13 static int qla2x00_sns_gnn_id(scsi_qla_host_t *, sw_info_t *);
14 static int qla2x00_sns_rft_id(scsi_qla_host_t *);
15 static int qla2x00_sns_rnn_id(scsi_qla_host_t *);
16 static int qla_async_rftid(scsi_qla_host_t *, port_id_t *);
17 static int qla_async_rffid(scsi_qla_host_t *, port_id_t *, u8, u8);
18 static int qla_async_rnnid(scsi_qla_host_t *, port_id_t *, u8*);
19 static int qla_async_rsnn_nn(scsi_qla_host_t *);
23 /**
24 * qla2x00_prep_ms_iocb() - Prepare common MS/CT IOCB fields for SNS CT query.
25 * @vha: HA context
26 * @arg: CT arguments
28 * Returns a pointer to the @vha's ms_iocb.
30 void *
31 qla2x00_prep_ms_iocb(scsi_qla_host_t *vha, struct ct_arg *arg)
33 struct qla_hw_data *ha = vha->hw;
34 ms_iocb_entry_t *ms_pkt;
36 ms_pkt = (ms_iocb_entry_t *)arg->iocb;
37 memset(ms_pkt, 0, sizeof(ms_iocb_entry_t));
39 ms_pkt->entry_type = MS_IOCB_TYPE;
40 ms_pkt->entry_count = 1;
41 SET_TARGET_ID(ha, ms_pkt->loop_id, SIMPLE_NAME_SERVER);
42 ms_pkt->control_flags = cpu_to_le16(CF_READ | CF_HEAD_TAG);
43 ms_pkt->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
44 ms_pkt->cmd_dsd_count = cpu_to_le16(1);
45 ms_pkt->total_dsd_count = cpu_to_le16(2);
46 ms_pkt->rsp_bytecount = cpu_to_le32(arg->rsp_size);
47 ms_pkt->req_bytecount = cpu_to_le32(arg->req_size);
49 put_unaligned_le64(arg->req_dma, &ms_pkt->req_dsd.address);
50 ms_pkt->req_dsd.length = ms_pkt->req_bytecount;
52 put_unaligned_le64(arg->rsp_dma, &ms_pkt->rsp_dsd.address);
53 ms_pkt->rsp_dsd.length = ms_pkt->rsp_bytecount;
55 vha->qla_stats.control_requests++;
57 return (ms_pkt);
60 /**
61 * qla24xx_prep_ms_iocb() - Prepare common CT IOCB fields for SNS CT query.
62 * @vha: HA context
63 * @arg: CT arguments
65 * Returns a pointer to the @ha's ms_iocb.
67 void *
68 qla24xx_prep_ms_iocb(scsi_qla_host_t *vha, struct ct_arg *arg)
70 struct qla_hw_data *ha = vha->hw;
71 struct ct_entry_24xx *ct_pkt;
73 ct_pkt = (struct ct_entry_24xx *)arg->iocb;
74 memset(ct_pkt, 0, sizeof(struct ct_entry_24xx));
76 ct_pkt->entry_type = CT_IOCB_TYPE;
77 ct_pkt->entry_count = 1;
78 ct_pkt->nport_handle = cpu_to_le16(arg->nport_handle);
79 ct_pkt->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
80 ct_pkt->cmd_dsd_count = cpu_to_le16(1);
81 ct_pkt->rsp_dsd_count = cpu_to_le16(1);
82 ct_pkt->rsp_byte_count = cpu_to_le32(arg->rsp_size);
83 ct_pkt->cmd_byte_count = cpu_to_le32(arg->req_size);
85 put_unaligned_le64(arg->req_dma, &ct_pkt->dsd[0].address);
86 ct_pkt->dsd[0].length = ct_pkt->cmd_byte_count;
88 put_unaligned_le64(arg->rsp_dma, &ct_pkt->dsd[1].address);
89 ct_pkt->dsd[1].length = ct_pkt->rsp_byte_count;
90 ct_pkt->vp_index = vha->vp_idx;
92 vha->qla_stats.control_requests++;
94 return (ct_pkt);
97 /**
98 * qla2x00_prep_ct_req() - Prepare common CT request fields for SNS query.
99 * @p: CT request buffer
100 * @cmd: GS command
101 * @rsp_size: response size in bytes
103 * Returns a pointer to the intitialized @ct_req.
105 static inline struct ct_sns_req *
106 qla2x00_prep_ct_req(struct ct_sns_pkt *p, uint16_t cmd, uint16_t rsp_size)
108 memset(p, 0, sizeof(struct ct_sns_pkt));
110 p->p.req.header.revision = 0x01;
111 p->p.req.header.gs_type = 0xFC;
112 p->p.req.header.gs_subtype = 0x02;
113 p->p.req.command = cpu_to_be16(cmd);
114 p->p.req.max_rsp_size = cpu_to_be16((rsp_size - 16) / 4);
116 return &p->p.req;
120 qla2x00_chk_ms_status(scsi_qla_host_t *vha, ms_iocb_entry_t *ms_pkt,
121 struct ct_sns_rsp *ct_rsp, const char *routine)
123 int rval;
124 uint16_t comp_status;
125 struct qla_hw_data *ha = vha->hw;
126 bool lid_is_sns = false;
128 rval = QLA_FUNCTION_FAILED;
129 if (ms_pkt->entry_status != 0) {
130 ql_dbg(ql_dbg_disc, vha, 0x2031,
131 "%s failed, error status (%x) on port_id: %02x%02x%02x.\n",
132 routine, ms_pkt->entry_status, vha->d_id.b.domain,
133 vha->d_id.b.area, vha->d_id.b.al_pa);
134 } else {
135 if (IS_FWI2_CAPABLE(ha))
136 comp_status = le16_to_cpu(
137 ((struct ct_entry_24xx *)ms_pkt)->comp_status);
138 else
139 comp_status = le16_to_cpu(ms_pkt->status);
140 switch (comp_status) {
141 case CS_COMPLETE:
142 case CS_DATA_UNDERRUN:
143 case CS_DATA_OVERRUN: /* Overrun? */
144 if (ct_rsp->header.response !=
145 cpu_to_be16(CT_ACCEPT_RESPONSE)) {
146 ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x2077,
147 "%s failed rejected request on port_id: %02x%02x%02x Completion status 0x%x, response 0x%x\n",
148 routine, vha->d_id.b.domain,
149 vha->d_id.b.area, vha->d_id.b.al_pa,
150 comp_status, ct_rsp->header.response);
151 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha,
152 0x2078, ct_rsp,
153 offsetof(typeof(*ct_rsp), rsp));
154 rval = QLA_INVALID_COMMAND;
155 } else
156 rval = QLA_SUCCESS;
157 break;
158 case CS_PORT_LOGGED_OUT:
159 if (IS_FWI2_CAPABLE(ha)) {
160 if (le16_to_cpu(ms_pkt->loop_id.extended) ==
161 NPH_SNS)
162 lid_is_sns = true;
163 } else {
164 if (le16_to_cpu(ms_pkt->loop_id.extended) ==
165 SIMPLE_NAME_SERVER)
166 lid_is_sns = true;
168 if (lid_is_sns) {
169 ql_dbg(ql_dbg_async, vha, 0x502b,
170 "%s failed, Name server has logged out",
171 routine);
172 rval = QLA_NOT_LOGGED_IN;
173 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
174 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
176 break;
177 case CS_TIMEOUT:
178 rval = QLA_FUNCTION_TIMEOUT;
179 fallthrough;
180 default:
181 ql_dbg(ql_dbg_disc, vha, 0x2033,
182 "%s failed, completion status (%x) on port_id: "
183 "%02x%02x%02x.\n", routine, comp_status,
184 vha->d_id.b.domain, vha->d_id.b.area,
185 vha->d_id.b.al_pa);
186 break;
189 return rval;
193 * qla2x00_ga_nxt() - SNS scan for fabric devices via GA_NXT command.
194 * @vha: HA context
195 * @fcport: fcport entry to updated
197 * Returns 0 on success.
200 qla2x00_ga_nxt(scsi_qla_host_t *vha, fc_port_t *fcport)
202 int rval;
204 ms_iocb_entry_t *ms_pkt;
205 struct ct_sns_req *ct_req;
206 struct ct_sns_rsp *ct_rsp;
207 struct qla_hw_data *ha = vha->hw;
208 struct ct_arg arg;
210 if (IS_QLA2100(ha) || IS_QLA2200(ha))
211 return qla2x00_sns_ga_nxt(vha, fcport);
213 arg.iocb = ha->ms_iocb;
214 arg.req_dma = ha->ct_sns_dma;
215 arg.rsp_dma = ha->ct_sns_dma;
216 arg.req_size = GA_NXT_REQ_SIZE;
217 arg.rsp_size = GA_NXT_RSP_SIZE;
218 arg.nport_handle = NPH_SNS;
220 /* Issue GA_NXT */
221 /* Prepare common MS IOCB */
222 ms_pkt = ha->isp_ops->prep_ms_iocb(vha, &arg);
224 /* Prepare CT request */
225 ct_req = qla2x00_prep_ct_req(ha->ct_sns, GA_NXT_CMD,
226 GA_NXT_RSP_SIZE);
227 ct_rsp = &ha->ct_sns->p.rsp;
229 /* Prepare CT arguments -- port_id */
230 ct_req->req.port_id.port_id = port_id_to_be_id(fcport->d_id);
232 /* Execute MS IOCB */
233 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
234 sizeof(ms_iocb_entry_t));
235 if (rval != QLA_SUCCESS) {
236 /*EMPTY*/
237 ql_dbg(ql_dbg_disc, vha, 0x2062,
238 "GA_NXT issue IOCB failed (%d).\n", rval);
239 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "GA_NXT") !=
240 QLA_SUCCESS) {
241 rval = QLA_FUNCTION_FAILED;
242 } else {
243 /* Populate fc_port_t entry. */
244 fcport->d_id = be_to_port_id(ct_rsp->rsp.ga_nxt.port_id);
246 memcpy(fcport->node_name, ct_rsp->rsp.ga_nxt.node_name,
247 WWN_SIZE);
248 memcpy(fcport->port_name, ct_rsp->rsp.ga_nxt.port_name,
249 WWN_SIZE);
251 fcport->fc4_type = (ct_rsp->rsp.ga_nxt.fc4_types[2] & BIT_0) ?
252 FS_FC4TYPE_FCP : FC4_TYPE_OTHER;
254 if (ct_rsp->rsp.ga_nxt.port_type != NS_N_PORT_TYPE &&
255 ct_rsp->rsp.ga_nxt.port_type != NS_NL_PORT_TYPE)
256 fcport->d_id.b.domain = 0xf0;
258 ql_dbg(ql_dbg_disc, vha, 0x2063,
259 "GA_NXT entry - nn %8phN pn %8phN "
260 "port_id=%02x%02x%02x.\n",
261 fcport->node_name, fcport->port_name,
262 fcport->d_id.b.domain, fcport->d_id.b.area,
263 fcport->d_id.b.al_pa);
266 return (rval);
269 static inline int
270 qla2x00_gid_pt_rsp_size(scsi_qla_host_t *vha)
272 return vha->hw->max_fibre_devices * 4 + 16;
276 * qla2x00_gid_pt() - SNS scan for fabric devices via GID_PT command.
277 * @vha: HA context
278 * @list: switch info entries to populate
280 * NOTE: Non-Nx_Ports are not requested.
282 * Returns 0 on success.
285 qla2x00_gid_pt(scsi_qla_host_t *vha, sw_info_t *list)
287 int rval;
288 uint16_t i;
290 ms_iocb_entry_t *ms_pkt;
291 struct ct_sns_req *ct_req;
292 struct ct_sns_rsp *ct_rsp;
294 struct ct_sns_gid_pt_data *gid_data;
295 struct qla_hw_data *ha = vha->hw;
296 uint16_t gid_pt_rsp_size;
297 struct ct_arg arg;
299 if (IS_QLA2100(ha) || IS_QLA2200(ha))
300 return qla2x00_sns_gid_pt(vha, list);
302 gid_data = NULL;
303 gid_pt_rsp_size = qla2x00_gid_pt_rsp_size(vha);
305 arg.iocb = ha->ms_iocb;
306 arg.req_dma = ha->ct_sns_dma;
307 arg.rsp_dma = ha->ct_sns_dma;
308 arg.req_size = GID_PT_REQ_SIZE;
309 arg.rsp_size = gid_pt_rsp_size;
310 arg.nport_handle = NPH_SNS;
312 /* Issue GID_PT */
313 /* Prepare common MS IOCB */
314 ms_pkt = ha->isp_ops->prep_ms_iocb(vha, &arg);
316 /* Prepare CT request */
317 ct_req = qla2x00_prep_ct_req(ha->ct_sns, GID_PT_CMD, gid_pt_rsp_size);
318 ct_rsp = &ha->ct_sns->p.rsp;
320 /* Prepare CT arguments -- port_type */
321 ct_req->req.gid_pt.port_type = NS_NX_PORT_TYPE;
323 /* Execute MS IOCB */
324 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
325 sizeof(ms_iocb_entry_t));
326 if (rval != QLA_SUCCESS) {
327 /*EMPTY*/
328 ql_dbg(ql_dbg_disc, vha, 0x2055,
329 "GID_PT issue IOCB failed (%d).\n", rval);
330 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "GID_PT") !=
331 QLA_SUCCESS) {
332 rval = QLA_FUNCTION_FAILED;
333 } else {
334 /* Set port IDs in switch info list. */
335 for (i = 0; i < ha->max_fibre_devices; i++) {
336 gid_data = &ct_rsp->rsp.gid_pt.entries[i];
337 list[i].d_id = be_to_port_id(gid_data->port_id);
338 memset(list[i].fabric_port_name, 0, WWN_SIZE);
339 list[i].fp_speed = PORT_SPEED_UNKNOWN;
341 /* Last one exit. */
342 if (gid_data->control_byte & BIT_7) {
343 list[i].d_id.b.rsvd_1 = gid_data->control_byte;
344 break;
349 * If we've used all available slots, then the switch is
350 * reporting back more devices than we can handle with this
351 * single call. Return a failed status, and let GA_NXT handle
352 * the overload.
354 if (i == ha->max_fibre_devices)
355 rval = QLA_FUNCTION_FAILED;
358 return (rval);
362 * qla2x00_gpn_id() - SNS Get Port Name (GPN_ID) query.
363 * @vha: HA context
364 * @list: switch info entries to populate
366 * Returns 0 on success.
369 qla2x00_gpn_id(scsi_qla_host_t *vha, sw_info_t *list)
371 int rval = QLA_SUCCESS;
372 uint16_t i;
374 ms_iocb_entry_t *ms_pkt;
375 struct ct_sns_req *ct_req;
376 struct ct_sns_rsp *ct_rsp;
377 struct qla_hw_data *ha = vha->hw;
378 struct ct_arg arg;
380 if (IS_QLA2100(ha) || IS_QLA2200(ha))
381 return qla2x00_sns_gpn_id(vha, list);
383 arg.iocb = ha->ms_iocb;
384 arg.req_dma = ha->ct_sns_dma;
385 arg.rsp_dma = ha->ct_sns_dma;
386 arg.req_size = GPN_ID_REQ_SIZE;
387 arg.rsp_size = GPN_ID_RSP_SIZE;
388 arg.nport_handle = NPH_SNS;
390 for (i = 0; i < ha->max_fibre_devices; i++) {
391 /* Issue GPN_ID */
392 /* Prepare common MS IOCB */
393 ms_pkt = ha->isp_ops->prep_ms_iocb(vha, &arg);
395 /* Prepare CT request */
396 ct_req = qla2x00_prep_ct_req(ha->ct_sns, GPN_ID_CMD,
397 GPN_ID_RSP_SIZE);
398 ct_rsp = &ha->ct_sns->p.rsp;
400 /* Prepare CT arguments -- port_id */
401 ct_req->req.port_id.port_id = port_id_to_be_id(list[i].d_id);
403 /* Execute MS IOCB */
404 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
405 sizeof(ms_iocb_entry_t));
406 if (rval != QLA_SUCCESS) {
407 /*EMPTY*/
408 ql_dbg(ql_dbg_disc, vha, 0x2056,
409 "GPN_ID issue IOCB failed (%d).\n", rval);
410 break;
411 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp,
412 "GPN_ID") != QLA_SUCCESS) {
413 rval = QLA_FUNCTION_FAILED;
414 break;
415 } else {
416 /* Save portname */
417 memcpy(list[i].port_name,
418 ct_rsp->rsp.gpn_id.port_name, WWN_SIZE);
421 /* Last device exit. */
422 if (list[i].d_id.b.rsvd_1 != 0)
423 break;
426 return (rval);
430 * qla2x00_gnn_id() - SNS Get Node Name (GNN_ID) query.
431 * @vha: HA context
432 * @list: switch info entries to populate
434 * Returns 0 on success.
437 qla2x00_gnn_id(scsi_qla_host_t *vha, sw_info_t *list)
439 int rval = QLA_SUCCESS;
440 uint16_t i;
441 struct qla_hw_data *ha = vha->hw;
442 ms_iocb_entry_t *ms_pkt;
443 struct ct_sns_req *ct_req;
444 struct ct_sns_rsp *ct_rsp;
445 struct ct_arg arg;
447 if (IS_QLA2100(ha) || IS_QLA2200(ha))
448 return qla2x00_sns_gnn_id(vha, list);
450 arg.iocb = ha->ms_iocb;
451 arg.req_dma = ha->ct_sns_dma;
452 arg.rsp_dma = ha->ct_sns_dma;
453 arg.req_size = GNN_ID_REQ_SIZE;
454 arg.rsp_size = GNN_ID_RSP_SIZE;
455 arg.nport_handle = NPH_SNS;
457 for (i = 0; i < ha->max_fibre_devices; i++) {
458 /* Issue GNN_ID */
459 /* Prepare common MS IOCB */
460 ms_pkt = ha->isp_ops->prep_ms_iocb(vha, &arg);
462 /* Prepare CT request */
463 ct_req = qla2x00_prep_ct_req(ha->ct_sns, GNN_ID_CMD,
464 GNN_ID_RSP_SIZE);
465 ct_rsp = &ha->ct_sns->p.rsp;
467 /* Prepare CT arguments -- port_id */
468 ct_req->req.port_id.port_id = port_id_to_be_id(list[i].d_id);
470 /* Execute MS IOCB */
471 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
472 sizeof(ms_iocb_entry_t));
473 if (rval != QLA_SUCCESS) {
474 /*EMPTY*/
475 ql_dbg(ql_dbg_disc, vha, 0x2057,
476 "GNN_ID issue IOCB failed (%d).\n", rval);
477 break;
478 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp,
479 "GNN_ID") != QLA_SUCCESS) {
480 rval = QLA_FUNCTION_FAILED;
481 break;
482 } else {
483 /* Save nodename */
484 memcpy(list[i].node_name,
485 ct_rsp->rsp.gnn_id.node_name, WWN_SIZE);
487 ql_dbg(ql_dbg_disc, vha, 0x2058,
488 "GID_PT entry - nn %8phN pn %8phN "
489 "portid=%02x%02x%02x.\n",
490 list[i].node_name, list[i].port_name,
491 list[i].d_id.b.domain, list[i].d_id.b.area,
492 list[i].d_id.b.al_pa);
495 /* Last device exit. */
496 if (list[i].d_id.b.rsvd_1 != 0)
497 break;
500 return (rval);
503 static void qla2x00_async_sns_sp_done(srb_t *sp, int rc)
505 struct scsi_qla_host *vha = sp->vha;
506 struct ct_sns_pkt *ct_sns;
507 struct qla_work_evt *e;
509 sp->rc = rc;
510 if (rc == QLA_SUCCESS) {
511 ql_dbg(ql_dbg_disc, vha, 0x204f,
512 "Async done-%s exiting normally.\n",
513 sp->name);
514 } else if (rc == QLA_FUNCTION_TIMEOUT) {
515 ql_dbg(ql_dbg_disc, vha, 0x204f,
516 "Async done-%s timeout\n", sp->name);
517 } else {
518 ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.rsp;
519 memset(ct_sns, 0, sizeof(*ct_sns));
520 sp->retry_count++;
521 if (sp->retry_count > 3)
522 goto err;
524 ql_dbg(ql_dbg_disc, vha, 0x204f,
525 "Async done-%s fail rc %x. Retry count %d\n",
526 sp->name, rc, sp->retry_count);
528 e = qla2x00_alloc_work(vha, QLA_EVT_SP_RETRY);
529 if (!e)
530 goto err2;
532 del_timer(&sp->u.iocb_cmd.timer);
533 e->u.iosb.sp = sp;
534 qla2x00_post_work(vha, e);
535 return;
538 err:
539 e = qla2x00_alloc_work(vha, QLA_EVT_UNMAP);
540 err2:
541 if (!e) {
542 /* please ignore kernel warning. otherwise, we have mem leak. */
543 if (sp->u.iocb_cmd.u.ctarg.req) {
544 dma_free_coherent(&vha->hw->pdev->dev,
545 sp->u.iocb_cmd.u.ctarg.req_allocated_size,
546 sp->u.iocb_cmd.u.ctarg.req,
547 sp->u.iocb_cmd.u.ctarg.req_dma);
548 sp->u.iocb_cmd.u.ctarg.req = NULL;
551 if (sp->u.iocb_cmd.u.ctarg.rsp) {
552 dma_free_coherent(&vha->hw->pdev->dev,
553 sp->u.iocb_cmd.u.ctarg.rsp_allocated_size,
554 sp->u.iocb_cmd.u.ctarg.rsp,
555 sp->u.iocb_cmd.u.ctarg.rsp_dma);
556 sp->u.iocb_cmd.u.ctarg.rsp = NULL;
559 sp->free(sp);
561 return;
564 e->u.iosb.sp = sp;
565 qla2x00_post_work(vha, e);
569 * qla2x00_rft_id() - SNS Register FC-4 TYPEs (RFT_ID) supported by the HBA.
570 * @vha: HA context
572 * Returns 0 on success.
575 qla2x00_rft_id(scsi_qla_host_t *vha)
577 struct qla_hw_data *ha = vha->hw;
579 if (IS_QLA2100(ha) || IS_QLA2200(ha))
580 return qla2x00_sns_rft_id(vha);
582 return qla_async_rftid(vha, &vha->d_id);
585 static int qla_async_rftid(scsi_qla_host_t *vha, port_id_t *d_id)
587 int rval = QLA_MEMORY_ALLOC_FAILED;
588 struct ct_sns_req *ct_req;
589 srb_t *sp;
590 struct ct_sns_pkt *ct_sns;
592 if (!vha->flags.online)
593 goto done;
595 sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL);
596 if (!sp)
597 goto done;
599 sp->type = SRB_CT_PTHRU_CMD;
600 sp->name = "rft_id";
601 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
603 sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev,
604 sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma,
605 GFP_KERNEL);
606 sp->u.iocb_cmd.u.ctarg.req_allocated_size = sizeof(struct ct_sns_pkt);
607 if (!sp->u.iocb_cmd.u.ctarg.req) {
608 ql_log(ql_log_warn, vha, 0xd041,
609 "%s: Failed to allocate ct_sns request.\n",
610 __func__);
611 goto done_free_sp;
614 sp->u.iocb_cmd.u.ctarg.rsp = dma_alloc_coherent(&vha->hw->pdev->dev,
615 sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.rsp_dma,
616 GFP_KERNEL);
617 sp->u.iocb_cmd.u.ctarg.rsp_allocated_size = sizeof(struct ct_sns_pkt);
618 if (!sp->u.iocb_cmd.u.ctarg.rsp) {
619 ql_log(ql_log_warn, vha, 0xd042,
620 "%s: Failed to allocate ct_sns request.\n",
621 __func__);
622 goto done_free_sp;
624 ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.rsp;
625 memset(ct_sns, 0, sizeof(*ct_sns));
626 ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.req;
628 /* Prepare CT request */
629 ct_req = qla2x00_prep_ct_req(ct_sns, RFT_ID_CMD, RFT_ID_RSP_SIZE);
631 /* Prepare CT arguments -- port_id, FC-4 types */
632 ct_req->req.rft_id.port_id = port_id_to_be_id(vha->d_id);
633 ct_req->req.rft_id.fc4_types[2] = 0x01; /* FCP-3 */
635 if (vha->flags.nvme_enabled)
636 ct_req->req.rft_id.fc4_types[6] = 1; /* NVMe type 28h */
638 sp->u.iocb_cmd.u.ctarg.req_size = RFT_ID_REQ_SIZE;
639 sp->u.iocb_cmd.u.ctarg.rsp_size = RFT_ID_RSP_SIZE;
640 sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
641 sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
642 sp->done = qla2x00_async_sns_sp_done;
644 ql_dbg(ql_dbg_disc, vha, 0xffff,
645 "Async-%s - hdl=%x portid %06x.\n",
646 sp->name, sp->handle, d_id->b24);
648 rval = qla2x00_start_sp(sp);
649 if (rval != QLA_SUCCESS) {
650 ql_dbg(ql_dbg_disc, vha, 0x2043,
651 "RFT_ID issue IOCB failed (%d).\n", rval);
652 goto done_free_sp;
654 return rval;
655 done_free_sp:
656 sp->free(sp);
657 done:
658 return rval;
662 * qla2x00_rff_id() - SNS Register FC-4 Features (RFF_ID) supported by the HBA.
663 * @vha: HA context
664 * @type: not used
666 * Returns 0 on success.
669 qla2x00_rff_id(scsi_qla_host_t *vha, u8 type)
671 struct qla_hw_data *ha = vha->hw;
673 if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
674 ql_dbg(ql_dbg_disc, vha, 0x2046,
675 "RFF_ID call not supported on ISP2100/ISP2200.\n");
676 return (QLA_SUCCESS);
679 return qla_async_rffid(vha, &vha->d_id, qlt_rff_id(vha),
680 FC4_TYPE_FCP_SCSI);
683 static int qla_async_rffid(scsi_qla_host_t *vha, port_id_t *d_id,
684 u8 fc4feature, u8 fc4type)
686 int rval = QLA_MEMORY_ALLOC_FAILED;
687 struct ct_sns_req *ct_req;
688 srb_t *sp;
689 struct ct_sns_pkt *ct_sns;
691 sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL);
692 if (!sp)
693 goto done;
695 sp->type = SRB_CT_PTHRU_CMD;
696 sp->name = "rff_id";
697 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
699 sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev,
700 sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma,
701 GFP_KERNEL);
702 sp->u.iocb_cmd.u.ctarg.req_allocated_size = sizeof(struct ct_sns_pkt);
703 if (!sp->u.iocb_cmd.u.ctarg.req) {
704 ql_log(ql_log_warn, vha, 0xd041,
705 "%s: Failed to allocate ct_sns request.\n",
706 __func__);
707 goto done_free_sp;
710 sp->u.iocb_cmd.u.ctarg.rsp = dma_alloc_coherent(&vha->hw->pdev->dev,
711 sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.rsp_dma,
712 GFP_KERNEL);
713 sp->u.iocb_cmd.u.ctarg.rsp_allocated_size = sizeof(struct ct_sns_pkt);
714 if (!sp->u.iocb_cmd.u.ctarg.rsp) {
715 ql_log(ql_log_warn, vha, 0xd042,
716 "%s: Failed to allocate ct_sns request.\n",
717 __func__);
718 goto done_free_sp;
720 ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.rsp;
721 memset(ct_sns, 0, sizeof(*ct_sns));
722 ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.req;
724 /* Prepare CT request */
725 ct_req = qla2x00_prep_ct_req(ct_sns, RFF_ID_CMD, RFF_ID_RSP_SIZE);
727 /* Prepare CT arguments -- port_id, FC-4 feature, FC-4 type */
728 ct_req->req.rff_id.port_id = port_id_to_be_id(*d_id);
729 ct_req->req.rff_id.fc4_feature = fc4feature;
730 ct_req->req.rff_id.fc4_type = fc4type; /* SCSI - FCP */
732 sp->u.iocb_cmd.u.ctarg.req_size = RFF_ID_REQ_SIZE;
733 sp->u.iocb_cmd.u.ctarg.rsp_size = RFF_ID_RSP_SIZE;
734 sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
735 sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
736 sp->done = qla2x00_async_sns_sp_done;
738 ql_dbg(ql_dbg_disc, vha, 0xffff,
739 "Async-%s - hdl=%x portid %06x feature %x type %x.\n",
740 sp->name, sp->handle, d_id->b24, fc4feature, fc4type);
742 rval = qla2x00_start_sp(sp);
743 if (rval != QLA_SUCCESS) {
744 ql_dbg(ql_dbg_disc, vha, 0x2047,
745 "RFF_ID issue IOCB failed (%d).\n", rval);
746 goto done_free_sp;
749 return rval;
751 done_free_sp:
752 sp->free(sp);
753 done:
754 return rval;
758 * qla2x00_rnn_id() - SNS Register Node Name (RNN_ID) of the HBA.
759 * @vha: HA context
761 * Returns 0 on success.
764 qla2x00_rnn_id(scsi_qla_host_t *vha)
766 struct qla_hw_data *ha = vha->hw;
768 if (IS_QLA2100(ha) || IS_QLA2200(ha))
769 return qla2x00_sns_rnn_id(vha);
771 return qla_async_rnnid(vha, &vha->d_id, vha->node_name);
774 static int qla_async_rnnid(scsi_qla_host_t *vha, port_id_t *d_id,
775 u8 *node_name)
777 int rval = QLA_MEMORY_ALLOC_FAILED;
778 struct ct_sns_req *ct_req;
779 srb_t *sp;
780 struct ct_sns_pkt *ct_sns;
782 sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL);
783 if (!sp)
784 goto done;
786 sp->type = SRB_CT_PTHRU_CMD;
787 sp->name = "rnid";
788 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
790 sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev,
791 sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma,
792 GFP_KERNEL);
793 sp->u.iocb_cmd.u.ctarg.req_allocated_size = sizeof(struct ct_sns_pkt);
794 if (!sp->u.iocb_cmd.u.ctarg.req) {
795 ql_log(ql_log_warn, vha, 0xd041,
796 "%s: Failed to allocate ct_sns request.\n",
797 __func__);
798 goto done_free_sp;
801 sp->u.iocb_cmd.u.ctarg.rsp = dma_alloc_coherent(&vha->hw->pdev->dev,
802 sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.rsp_dma,
803 GFP_KERNEL);
804 sp->u.iocb_cmd.u.ctarg.rsp_allocated_size = sizeof(struct ct_sns_pkt);
805 if (!sp->u.iocb_cmd.u.ctarg.rsp) {
806 ql_log(ql_log_warn, vha, 0xd042,
807 "%s: Failed to allocate ct_sns request.\n",
808 __func__);
809 goto done_free_sp;
811 ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.rsp;
812 memset(ct_sns, 0, sizeof(*ct_sns));
813 ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.req;
815 /* Prepare CT request */
816 ct_req = qla2x00_prep_ct_req(ct_sns, RNN_ID_CMD, RNN_ID_RSP_SIZE);
818 /* Prepare CT arguments -- port_id, node_name */
819 ct_req->req.rnn_id.port_id = port_id_to_be_id(vha->d_id);
820 memcpy(ct_req->req.rnn_id.node_name, vha->node_name, WWN_SIZE);
822 sp->u.iocb_cmd.u.ctarg.req_size = RNN_ID_REQ_SIZE;
823 sp->u.iocb_cmd.u.ctarg.rsp_size = RNN_ID_RSP_SIZE;
824 sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
826 sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
827 sp->done = qla2x00_async_sns_sp_done;
829 ql_dbg(ql_dbg_disc, vha, 0xffff,
830 "Async-%s - hdl=%x portid %06x\n",
831 sp->name, sp->handle, d_id->b24);
833 rval = qla2x00_start_sp(sp);
834 if (rval != QLA_SUCCESS) {
835 ql_dbg(ql_dbg_disc, vha, 0x204d,
836 "RNN_ID issue IOCB failed (%d).\n", rval);
837 goto done_free_sp;
840 return rval;
842 done_free_sp:
843 sp->free(sp);
844 done:
845 return rval;
848 size_t
849 qla2x00_get_sym_node_name(scsi_qla_host_t *vha, uint8_t *snn, size_t size)
851 struct qla_hw_data *ha = vha->hw;
853 if (IS_QLAFX00(ha))
854 return scnprintf(snn, size, "%s FW:v%s DVR:v%s",
855 ha->model_number, ha->mr.fw_version, qla2x00_version_str);
857 return scnprintf(snn, size, "%s FW:v%d.%02d.%02d DVR:v%s",
858 ha->model_number, ha->fw_major_version, ha->fw_minor_version,
859 ha->fw_subminor_version, qla2x00_version_str);
863 * qla2x00_rsnn_nn() - SNS Register Symbolic Node Name (RSNN_NN) of the HBA.
864 * @vha: HA context
866 * Returns 0 on success.
869 qla2x00_rsnn_nn(scsi_qla_host_t *vha)
871 struct qla_hw_data *ha = vha->hw;
873 if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
874 ql_dbg(ql_dbg_disc, vha, 0x2050,
875 "RSNN_ID call unsupported on ISP2100/ISP2200.\n");
876 return (QLA_SUCCESS);
879 return qla_async_rsnn_nn(vha);
882 static int qla_async_rsnn_nn(scsi_qla_host_t *vha)
884 int rval = QLA_MEMORY_ALLOC_FAILED;
885 struct ct_sns_req *ct_req;
886 srb_t *sp;
887 struct ct_sns_pkt *ct_sns;
889 sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL);
890 if (!sp)
891 goto done;
893 sp->type = SRB_CT_PTHRU_CMD;
894 sp->name = "rsnn_nn";
895 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
897 sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev,
898 sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma,
899 GFP_KERNEL);
900 sp->u.iocb_cmd.u.ctarg.req_allocated_size = sizeof(struct ct_sns_pkt);
901 if (!sp->u.iocb_cmd.u.ctarg.req) {
902 ql_log(ql_log_warn, vha, 0xd041,
903 "%s: Failed to allocate ct_sns request.\n",
904 __func__);
905 goto done_free_sp;
908 sp->u.iocb_cmd.u.ctarg.rsp = dma_alloc_coherent(&vha->hw->pdev->dev,
909 sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.rsp_dma,
910 GFP_KERNEL);
911 sp->u.iocb_cmd.u.ctarg.rsp_allocated_size = sizeof(struct ct_sns_pkt);
912 if (!sp->u.iocb_cmd.u.ctarg.rsp) {
913 ql_log(ql_log_warn, vha, 0xd042,
914 "%s: Failed to allocate ct_sns request.\n",
915 __func__);
916 goto done_free_sp;
918 ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.rsp;
919 memset(ct_sns, 0, sizeof(*ct_sns));
920 ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.req;
922 /* Prepare CT request */
923 ct_req = qla2x00_prep_ct_req(ct_sns, RSNN_NN_CMD, RSNN_NN_RSP_SIZE);
925 /* Prepare CT arguments -- node_name, symbolic node_name, size */
926 memcpy(ct_req->req.rsnn_nn.node_name, vha->node_name, WWN_SIZE);
928 /* Prepare the Symbolic Node Name */
929 qla2x00_get_sym_node_name(vha, ct_req->req.rsnn_nn.sym_node_name,
930 sizeof(ct_req->req.rsnn_nn.sym_node_name));
931 ct_req->req.rsnn_nn.name_len =
932 (uint8_t)strlen(ct_req->req.rsnn_nn.sym_node_name);
935 sp->u.iocb_cmd.u.ctarg.req_size = 24 + 1 + ct_req->req.rsnn_nn.name_len;
936 sp->u.iocb_cmd.u.ctarg.rsp_size = RSNN_NN_RSP_SIZE;
937 sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
939 sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
940 sp->done = qla2x00_async_sns_sp_done;
942 ql_dbg(ql_dbg_disc, vha, 0xffff,
943 "Async-%s - hdl=%x.\n",
944 sp->name, sp->handle);
946 rval = qla2x00_start_sp(sp);
947 if (rval != QLA_SUCCESS) {
948 ql_dbg(ql_dbg_disc, vha, 0x2043,
949 "RFT_ID issue IOCB failed (%d).\n", rval);
950 goto done_free_sp;
953 return rval;
955 done_free_sp:
956 sp->free(sp);
957 done:
958 return rval;
962 * qla2x00_prep_sns_cmd() - Prepare common SNS command request fields for query.
963 * @vha: HA context
964 * @cmd: GS command
965 * @scmd_len: Subcommand length
966 * @data_size: response size in bytes
968 * Returns a pointer to the @ha's sns_cmd.
970 static inline struct sns_cmd_pkt *
971 qla2x00_prep_sns_cmd(scsi_qla_host_t *vha, uint16_t cmd, uint16_t scmd_len,
972 uint16_t data_size)
974 uint16_t wc;
975 struct sns_cmd_pkt *sns_cmd;
976 struct qla_hw_data *ha = vha->hw;
978 sns_cmd = ha->sns_cmd;
979 memset(sns_cmd, 0, sizeof(struct sns_cmd_pkt));
980 wc = data_size / 2; /* Size in 16bit words. */
981 sns_cmd->p.cmd.buffer_length = cpu_to_le16(wc);
982 put_unaligned_le64(ha->sns_cmd_dma, &sns_cmd->p.cmd.buffer_address);
983 sns_cmd->p.cmd.subcommand_length = cpu_to_le16(scmd_len);
984 sns_cmd->p.cmd.subcommand = cpu_to_le16(cmd);
985 wc = (data_size - 16) / 4; /* Size in 32bit words. */
986 sns_cmd->p.cmd.size = cpu_to_le16(wc);
988 vha->qla_stats.control_requests++;
990 return (sns_cmd);
994 * qla2x00_sns_ga_nxt() - SNS scan for fabric devices via GA_NXT command.
995 * @vha: HA context
996 * @fcport: fcport entry to updated
998 * This command uses the old Exectute SNS Command mailbox routine.
1000 * Returns 0 on success.
1002 static int
1003 qla2x00_sns_ga_nxt(scsi_qla_host_t *vha, fc_port_t *fcport)
1005 int rval = QLA_SUCCESS;
1006 struct qla_hw_data *ha = vha->hw;
1007 struct sns_cmd_pkt *sns_cmd;
1009 /* Issue GA_NXT. */
1010 /* Prepare SNS command request. */
1011 sns_cmd = qla2x00_prep_sns_cmd(vha, GA_NXT_CMD, GA_NXT_SNS_SCMD_LEN,
1012 GA_NXT_SNS_DATA_SIZE);
1014 /* Prepare SNS command arguments -- port_id. */
1015 sns_cmd->p.cmd.param[0] = fcport->d_id.b.al_pa;
1016 sns_cmd->p.cmd.param[1] = fcport->d_id.b.area;
1017 sns_cmd->p.cmd.param[2] = fcport->d_id.b.domain;
1019 /* Execute SNS command. */
1020 rval = qla2x00_send_sns(vha, ha->sns_cmd_dma, GA_NXT_SNS_CMD_SIZE / 2,
1021 sizeof(struct sns_cmd_pkt));
1022 if (rval != QLA_SUCCESS) {
1023 /*EMPTY*/
1024 ql_dbg(ql_dbg_disc, vha, 0x205f,
1025 "GA_NXT Send SNS failed (%d).\n", rval);
1026 } else if (sns_cmd->p.gan_data[8] != 0x80 ||
1027 sns_cmd->p.gan_data[9] != 0x02) {
1028 ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x2084,
1029 "GA_NXT failed, rejected request ga_nxt_rsp:\n");
1030 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2074,
1031 sns_cmd->p.gan_data, 16);
1032 rval = QLA_FUNCTION_FAILED;
1033 } else {
1034 /* Populate fc_port_t entry. */
1035 fcport->d_id.b.domain = sns_cmd->p.gan_data[17];
1036 fcport->d_id.b.area = sns_cmd->p.gan_data[18];
1037 fcport->d_id.b.al_pa = sns_cmd->p.gan_data[19];
1039 memcpy(fcport->node_name, &sns_cmd->p.gan_data[284], WWN_SIZE);
1040 memcpy(fcport->port_name, &sns_cmd->p.gan_data[20], WWN_SIZE);
1042 if (sns_cmd->p.gan_data[16] != NS_N_PORT_TYPE &&
1043 sns_cmd->p.gan_data[16] != NS_NL_PORT_TYPE)
1044 fcport->d_id.b.domain = 0xf0;
1046 ql_dbg(ql_dbg_disc, vha, 0x2061,
1047 "GA_NXT entry - nn %8phN pn %8phN "
1048 "port_id=%02x%02x%02x.\n",
1049 fcport->node_name, fcport->port_name,
1050 fcport->d_id.b.domain, fcport->d_id.b.area,
1051 fcport->d_id.b.al_pa);
1054 return (rval);
1058 * qla2x00_sns_gid_pt() - SNS scan for fabric devices via GID_PT command.
1059 * @vha: HA context
1060 * @list: switch info entries to populate
1062 * This command uses the old Exectute SNS Command mailbox routine.
1064 * NOTE: Non-Nx_Ports are not requested.
1066 * Returns 0 on success.
1068 static int
1069 qla2x00_sns_gid_pt(scsi_qla_host_t *vha, sw_info_t *list)
1071 int rval;
1072 struct qla_hw_data *ha = vha->hw;
1073 uint16_t i;
1074 uint8_t *entry;
1075 struct sns_cmd_pkt *sns_cmd;
1076 uint16_t gid_pt_sns_data_size;
1078 gid_pt_sns_data_size = qla2x00_gid_pt_rsp_size(vha);
1080 /* Issue GID_PT. */
1081 /* Prepare SNS command request. */
1082 sns_cmd = qla2x00_prep_sns_cmd(vha, GID_PT_CMD, GID_PT_SNS_SCMD_LEN,
1083 gid_pt_sns_data_size);
1085 /* Prepare SNS command arguments -- port_type. */
1086 sns_cmd->p.cmd.param[0] = NS_NX_PORT_TYPE;
1088 /* Execute SNS command. */
1089 rval = qla2x00_send_sns(vha, ha->sns_cmd_dma, GID_PT_SNS_CMD_SIZE / 2,
1090 sizeof(struct sns_cmd_pkt));
1091 if (rval != QLA_SUCCESS) {
1092 /*EMPTY*/
1093 ql_dbg(ql_dbg_disc, vha, 0x206d,
1094 "GID_PT Send SNS failed (%d).\n", rval);
1095 } else if (sns_cmd->p.gid_data[8] != 0x80 ||
1096 sns_cmd->p.gid_data[9] != 0x02) {
1097 ql_dbg(ql_dbg_disc, vha, 0x202f,
1098 "GID_PT failed, rejected request, gid_rsp:\n");
1099 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2081,
1100 sns_cmd->p.gid_data, 16);
1101 rval = QLA_FUNCTION_FAILED;
1102 } else {
1103 /* Set port IDs in switch info list. */
1104 for (i = 0; i < ha->max_fibre_devices; i++) {
1105 entry = &sns_cmd->p.gid_data[(i * 4) + 16];
1106 list[i].d_id.b.domain = entry[1];
1107 list[i].d_id.b.area = entry[2];
1108 list[i].d_id.b.al_pa = entry[3];
1110 /* Last one exit. */
1111 if (entry[0] & BIT_7) {
1112 list[i].d_id.b.rsvd_1 = entry[0];
1113 break;
1118 * If we've used all available slots, then the switch is
1119 * reporting back more devices that we can handle with this
1120 * single call. Return a failed status, and let GA_NXT handle
1121 * the overload.
1123 if (i == ha->max_fibre_devices)
1124 rval = QLA_FUNCTION_FAILED;
1127 return (rval);
1131 * qla2x00_sns_gpn_id() - SNS Get Port Name (GPN_ID) query.
1132 * @vha: HA context
1133 * @list: switch info entries to populate
1135 * This command uses the old Exectute SNS Command mailbox routine.
1137 * Returns 0 on success.
1139 static int
1140 qla2x00_sns_gpn_id(scsi_qla_host_t *vha, sw_info_t *list)
1142 int rval = QLA_SUCCESS;
1143 struct qla_hw_data *ha = vha->hw;
1144 uint16_t i;
1145 struct sns_cmd_pkt *sns_cmd;
1147 for (i = 0; i < ha->max_fibre_devices; i++) {
1148 /* Issue GPN_ID */
1149 /* Prepare SNS command request. */
1150 sns_cmd = qla2x00_prep_sns_cmd(vha, GPN_ID_CMD,
1151 GPN_ID_SNS_SCMD_LEN, GPN_ID_SNS_DATA_SIZE);
1153 /* Prepare SNS command arguments -- port_id. */
1154 sns_cmd->p.cmd.param[0] = list[i].d_id.b.al_pa;
1155 sns_cmd->p.cmd.param[1] = list[i].d_id.b.area;
1156 sns_cmd->p.cmd.param[2] = list[i].d_id.b.domain;
1158 /* Execute SNS command. */
1159 rval = qla2x00_send_sns(vha, ha->sns_cmd_dma,
1160 GPN_ID_SNS_CMD_SIZE / 2, sizeof(struct sns_cmd_pkt));
1161 if (rval != QLA_SUCCESS) {
1162 /*EMPTY*/
1163 ql_dbg(ql_dbg_disc, vha, 0x2032,
1164 "GPN_ID Send SNS failed (%d).\n", rval);
1165 } else if (sns_cmd->p.gpn_data[8] != 0x80 ||
1166 sns_cmd->p.gpn_data[9] != 0x02) {
1167 ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x207e,
1168 "GPN_ID failed, rejected request, gpn_rsp:\n");
1169 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x207f,
1170 sns_cmd->p.gpn_data, 16);
1171 rval = QLA_FUNCTION_FAILED;
1172 } else {
1173 /* Save portname */
1174 memcpy(list[i].port_name, &sns_cmd->p.gpn_data[16],
1175 WWN_SIZE);
1178 /* Last device exit. */
1179 if (list[i].d_id.b.rsvd_1 != 0)
1180 break;
1183 return (rval);
1187 * qla2x00_sns_gnn_id() - SNS Get Node Name (GNN_ID) query.
1188 * @vha: HA context
1189 * @list: switch info entries to populate
1191 * This command uses the old Exectute SNS Command mailbox routine.
1193 * Returns 0 on success.
1195 static int
1196 qla2x00_sns_gnn_id(scsi_qla_host_t *vha, sw_info_t *list)
1198 int rval = QLA_SUCCESS;
1199 struct qla_hw_data *ha = vha->hw;
1200 uint16_t i;
1201 struct sns_cmd_pkt *sns_cmd;
1203 for (i = 0; i < ha->max_fibre_devices; i++) {
1204 /* Issue GNN_ID */
1205 /* Prepare SNS command request. */
1206 sns_cmd = qla2x00_prep_sns_cmd(vha, GNN_ID_CMD,
1207 GNN_ID_SNS_SCMD_LEN, GNN_ID_SNS_DATA_SIZE);
1209 /* Prepare SNS command arguments -- port_id. */
1210 sns_cmd->p.cmd.param[0] = list[i].d_id.b.al_pa;
1211 sns_cmd->p.cmd.param[1] = list[i].d_id.b.area;
1212 sns_cmd->p.cmd.param[2] = list[i].d_id.b.domain;
1214 /* Execute SNS command. */
1215 rval = qla2x00_send_sns(vha, ha->sns_cmd_dma,
1216 GNN_ID_SNS_CMD_SIZE / 2, sizeof(struct sns_cmd_pkt));
1217 if (rval != QLA_SUCCESS) {
1218 /*EMPTY*/
1219 ql_dbg(ql_dbg_disc, vha, 0x203f,
1220 "GNN_ID Send SNS failed (%d).\n", rval);
1221 } else if (sns_cmd->p.gnn_data[8] != 0x80 ||
1222 sns_cmd->p.gnn_data[9] != 0x02) {
1223 ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x2082,
1224 "GNN_ID failed, rejected request, gnn_rsp:\n");
1225 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x207a,
1226 sns_cmd->p.gnn_data, 16);
1227 rval = QLA_FUNCTION_FAILED;
1228 } else {
1229 /* Save nodename */
1230 memcpy(list[i].node_name, &sns_cmd->p.gnn_data[16],
1231 WWN_SIZE);
1233 ql_dbg(ql_dbg_disc, vha, 0x206e,
1234 "GID_PT entry - nn %8phN pn %8phN "
1235 "port_id=%02x%02x%02x.\n",
1236 list[i].node_name, list[i].port_name,
1237 list[i].d_id.b.domain, list[i].d_id.b.area,
1238 list[i].d_id.b.al_pa);
1241 /* Last device exit. */
1242 if (list[i].d_id.b.rsvd_1 != 0)
1243 break;
1246 return (rval);
1250 * qla2x00_snd_rft_id() - SNS Register FC-4 TYPEs (RFT_ID) supported by the HBA.
1251 * @vha: HA context
1253 * This command uses the old Exectute SNS Command mailbox routine.
1255 * Returns 0 on success.
1257 static int
1258 qla2x00_sns_rft_id(scsi_qla_host_t *vha)
1260 int rval;
1261 struct qla_hw_data *ha = vha->hw;
1262 struct sns_cmd_pkt *sns_cmd;
1264 /* Issue RFT_ID. */
1265 /* Prepare SNS command request. */
1266 sns_cmd = qla2x00_prep_sns_cmd(vha, RFT_ID_CMD, RFT_ID_SNS_SCMD_LEN,
1267 RFT_ID_SNS_DATA_SIZE);
1269 /* Prepare SNS command arguments -- port_id, FC-4 types */
1270 sns_cmd->p.cmd.param[0] = vha->d_id.b.al_pa;
1271 sns_cmd->p.cmd.param[1] = vha->d_id.b.area;
1272 sns_cmd->p.cmd.param[2] = vha->d_id.b.domain;
1274 sns_cmd->p.cmd.param[5] = 0x01; /* FCP-3 */
1276 /* Execute SNS command. */
1277 rval = qla2x00_send_sns(vha, ha->sns_cmd_dma, RFT_ID_SNS_CMD_SIZE / 2,
1278 sizeof(struct sns_cmd_pkt));
1279 if (rval != QLA_SUCCESS) {
1280 /*EMPTY*/
1281 ql_dbg(ql_dbg_disc, vha, 0x2060,
1282 "RFT_ID Send SNS failed (%d).\n", rval);
1283 } else if (sns_cmd->p.rft_data[8] != 0x80 ||
1284 sns_cmd->p.rft_data[9] != 0x02) {
1285 ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x2083,
1286 "RFT_ID failed, rejected request rft_rsp:\n");
1287 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2080,
1288 sns_cmd->p.rft_data, 16);
1289 rval = QLA_FUNCTION_FAILED;
1290 } else {
1291 ql_dbg(ql_dbg_disc, vha, 0x2073,
1292 "RFT_ID exiting normally.\n");
1295 return (rval);
1299 * qla2x00_sns_rnn_id() - SNS Register Node Name (RNN_ID) of the HBA.
1300 * @vha: HA context
1302 * This command uses the old Exectute SNS Command mailbox routine.
1304 * Returns 0 on success.
1306 static int
1307 qla2x00_sns_rnn_id(scsi_qla_host_t *vha)
1309 int rval;
1310 struct qla_hw_data *ha = vha->hw;
1311 struct sns_cmd_pkt *sns_cmd;
1313 /* Issue RNN_ID. */
1314 /* Prepare SNS command request. */
1315 sns_cmd = qla2x00_prep_sns_cmd(vha, RNN_ID_CMD, RNN_ID_SNS_SCMD_LEN,
1316 RNN_ID_SNS_DATA_SIZE);
1318 /* Prepare SNS command arguments -- port_id, nodename. */
1319 sns_cmd->p.cmd.param[0] = vha->d_id.b.al_pa;
1320 sns_cmd->p.cmd.param[1] = vha->d_id.b.area;
1321 sns_cmd->p.cmd.param[2] = vha->d_id.b.domain;
1323 sns_cmd->p.cmd.param[4] = vha->node_name[7];
1324 sns_cmd->p.cmd.param[5] = vha->node_name[6];
1325 sns_cmd->p.cmd.param[6] = vha->node_name[5];
1326 sns_cmd->p.cmd.param[7] = vha->node_name[4];
1327 sns_cmd->p.cmd.param[8] = vha->node_name[3];
1328 sns_cmd->p.cmd.param[9] = vha->node_name[2];
1329 sns_cmd->p.cmd.param[10] = vha->node_name[1];
1330 sns_cmd->p.cmd.param[11] = vha->node_name[0];
1332 /* Execute SNS command. */
1333 rval = qla2x00_send_sns(vha, ha->sns_cmd_dma, RNN_ID_SNS_CMD_SIZE / 2,
1334 sizeof(struct sns_cmd_pkt));
1335 if (rval != QLA_SUCCESS) {
1336 /*EMPTY*/
1337 ql_dbg(ql_dbg_disc, vha, 0x204a,
1338 "RNN_ID Send SNS failed (%d).\n", rval);
1339 } else if (sns_cmd->p.rnn_data[8] != 0x80 ||
1340 sns_cmd->p.rnn_data[9] != 0x02) {
1341 ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x207b,
1342 "RNN_ID failed, rejected request, rnn_rsp:\n");
1343 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x207c,
1344 sns_cmd->p.rnn_data, 16);
1345 rval = QLA_FUNCTION_FAILED;
1346 } else {
1347 ql_dbg(ql_dbg_disc, vha, 0x204c,
1348 "RNN_ID exiting normally.\n");
1351 return (rval);
1355 * qla2x00_mgmt_svr_login() - Login to fabric Management Service.
1356 * @vha: HA context
1358 * Returns 0 on success.
1361 qla2x00_mgmt_svr_login(scsi_qla_host_t *vha)
1363 int ret, rval;
1364 uint16_t mb[MAILBOX_REGISTER_COUNT];
1365 struct qla_hw_data *ha = vha->hw;
1367 ret = QLA_SUCCESS;
1368 if (vha->flags.management_server_logged_in)
1369 return ret;
1371 rval = ha->isp_ops->fabric_login(vha, vha->mgmt_svr_loop_id, 0xff, 0xff,
1372 0xfa, mb, BIT_1);
1373 if (rval != QLA_SUCCESS || mb[0] != MBS_COMMAND_COMPLETE) {
1374 if (rval == QLA_MEMORY_ALLOC_FAILED)
1375 ql_dbg(ql_dbg_disc, vha, 0x2085,
1376 "Failed management_server login: loopid=%x "
1377 "rval=%d\n", vha->mgmt_svr_loop_id, rval);
1378 else
1379 ql_dbg(ql_dbg_disc, vha, 0x2024,
1380 "Failed management_server login: loopid=%x "
1381 "mb[0]=%x mb[1]=%x mb[2]=%x mb[6]=%x mb[7]=%x.\n",
1382 vha->mgmt_svr_loop_id, mb[0], mb[1], mb[2], mb[6],
1383 mb[7]);
1384 ret = QLA_FUNCTION_FAILED;
1385 } else
1386 vha->flags.management_server_logged_in = 1;
1388 return ret;
1392 * qla2x00_prep_ms_fdmi_iocb() - Prepare common MS IOCB fields for FDMI query.
1393 * @vha: HA context
1394 * @req_size: request size in bytes
1395 * @rsp_size: response size in bytes
1397 * Returns a pointer to the @ha's ms_iocb.
1399 void *
1400 qla2x00_prep_ms_fdmi_iocb(scsi_qla_host_t *vha, uint32_t req_size,
1401 uint32_t rsp_size)
1403 ms_iocb_entry_t *ms_pkt;
1404 struct qla_hw_data *ha = vha->hw;
1406 ms_pkt = ha->ms_iocb;
1407 memset(ms_pkt, 0, sizeof(ms_iocb_entry_t));
1409 ms_pkt->entry_type = MS_IOCB_TYPE;
1410 ms_pkt->entry_count = 1;
1411 SET_TARGET_ID(ha, ms_pkt->loop_id, vha->mgmt_svr_loop_id);
1412 ms_pkt->control_flags = cpu_to_le16(CF_READ | CF_HEAD_TAG);
1413 ms_pkt->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
1414 ms_pkt->cmd_dsd_count = cpu_to_le16(1);
1415 ms_pkt->total_dsd_count = cpu_to_le16(2);
1416 ms_pkt->rsp_bytecount = cpu_to_le32(rsp_size);
1417 ms_pkt->req_bytecount = cpu_to_le32(req_size);
1419 put_unaligned_le64(ha->ct_sns_dma, &ms_pkt->req_dsd.address);
1420 ms_pkt->req_dsd.length = ms_pkt->req_bytecount;
1422 put_unaligned_le64(ha->ct_sns_dma, &ms_pkt->rsp_dsd.address);
1423 ms_pkt->rsp_dsd.length = ms_pkt->rsp_bytecount;
1425 return ms_pkt;
1429 * qla24xx_prep_ms_fdmi_iocb() - Prepare common MS IOCB fields for FDMI query.
1430 * @vha: HA context
1431 * @req_size: request size in bytes
1432 * @rsp_size: response size in bytes
1434 * Returns a pointer to the @ha's ms_iocb.
1436 void *
1437 qla24xx_prep_ms_fdmi_iocb(scsi_qla_host_t *vha, uint32_t req_size,
1438 uint32_t rsp_size)
1440 struct ct_entry_24xx *ct_pkt;
1441 struct qla_hw_data *ha = vha->hw;
1443 ct_pkt = (struct ct_entry_24xx *)ha->ms_iocb;
1444 memset(ct_pkt, 0, sizeof(struct ct_entry_24xx));
1446 ct_pkt->entry_type = CT_IOCB_TYPE;
1447 ct_pkt->entry_count = 1;
1448 ct_pkt->nport_handle = cpu_to_le16(vha->mgmt_svr_loop_id);
1449 ct_pkt->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
1450 ct_pkt->cmd_dsd_count = cpu_to_le16(1);
1451 ct_pkt->rsp_dsd_count = cpu_to_le16(1);
1452 ct_pkt->rsp_byte_count = cpu_to_le32(rsp_size);
1453 ct_pkt->cmd_byte_count = cpu_to_le32(req_size);
1455 put_unaligned_le64(ha->ct_sns_dma, &ct_pkt->dsd[0].address);
1456 ct_pkt->dsd[0].length = ct_pkt->cmd_byte_count;
1458 put_unaligned_le64(ha->ct_sns_dma, &ct_pkt->dsd[1].address);
1459 ct_pkt->dsd[1].length = ct_pkt->rsp_byte_count;
1460 ct_pkt->vp_index = vha->vp_idx;
1462 return ct_pkt;
1465 static void
1466 qla2x00_update_ms_fdmi_iocb(scsi_qla_host_t *vha, uint32_t req_size)
1468 struct qla_hw_data *ha = vha->hw;
1469 ms_iocb_entry_t *ms_pkt = ha->ms_iocb;
1470 struct ct_entry_24xx *ct_pkt = (struct ct_entry_24xx *)ha->ms_iocb;
1472 if (IS_FWI2_CAPABLE(ha)) {
1473 ct_pkt->cmd_byte_count = cpu_to_le32(req_size);
1474 ct_pkt->dsd[0].length = ct_pkt->cmd_byte_count;
1475 } else {
1476 ms_pkt->req_bytecount = cpu_to_le32(req_size);
1477 ms_pkt->req_dsd.length = ms_pkt->req_bytecount;
1482 * qla2x00_prep_ct_req() - Prepare common CT request fields for SNS query.
1483 * @p: CT request buffer
1484 * @cmd: GS command
1485 * @rsp_size: response size in bytes
1487 * Returns a pointer to the intitialized @ct_req.
1489 static inline struct ct_sns_req *
1490 qla2x00_prep_ct_fdmi_req(struct ct_sns_pkt *p, uint16_t cmd,
1491 uint16_t rsp_size)
1493 memset(p, 0, sizeof(struct ct_sns_pkt));
1495 p->p.req.header.revision = 0x01;
1496 p->p.req.header.gs_type = 0xFA;
1497 p->p.req.header.gs_subtype = 0x10;
1498 p->p.req.command = cpu_to_be16(cmd);
1499 p->p.req.max_rsp_size = cpu_to_be16((rsp_size - 16) / 4);
1501 return &p->p.req;
1504 uint
1505 qla25xx_fdmi_port_speed_capability(struct qla_hw_data *ha)
1507 uint speeds = 0;
1509 if (IS_CNA_CAPABLE(ha))
1510 return FDMI_PORT_SPEED_10GB;
1511 if (IS_QLA28XX(ha) || IS_QLA27XX(ha)) {
1512 if (ha->max_supported_speed == 2) {
1513 if (ha->min_supported_speed <= 6)
1514 speeds |= FDMI_PORT_SPEED_64GB;
1516 if (ha->max_supported_speed == 2 ||
1517 ha->max_supported_speed == 1) {
1518 if (ha->min_supported_speed <= 5)
1519 speeds |= FDMI_PORT_SPEED_32GB;
1521 if (ha->max_supported_speed == 2 ||
1522 ha->max_supported_speed == 1 ||
1523 ha->max_supported_speed == 0) {
1524 if (ha->min_supported_speed <= 4)
1525 speeds |= FDMI_PORT_SPEED_16GB;
1527 if (ha->max_supported_speed == 1 ||
1528 ha->max_supported_speed == 0) {
1529 if (ha->min_supported_speed <= 3)
1530 speeds |= FDMI_PORT_SPEED_8GB;
1532 if (ha->max_supported_speed == 0) {
1533 if (ha->min_supported_speed <= 2)
1534 speeds |= FDMI_PORT_SPEED_4GB;
1536 return speeds;
1538 if (IS_QLA2031(ha)) {
1539 if ((ha->pdev->subsystem_vendor == 0x103C) &&
1540 (ha->pdev->subsystem_device == 0x8002)) {
1541 speeds = FDMI_PORT_SPEED_16GB;
1542 } else {
1543 speeds = FDMI_PORT_SPEED_16GB|FDMI_PORT_SPEED_8GB|
1544 FDMI_PORT_SPEED_4GB;
1546 return speeds;
1548 if (IS_QLA25XX(ha) || IS_QLAFX00(ha))
1549 return FDMI_PORT_SPEED_8GB|FDMI_PORT_SPEED_4GB|
1550 FDMI_PORT_SPEED_2GB|FDMI_PORT_SPEED_1GB;
1551 if (IS_QLA24XX_TYPE(ha))
1552 return FDMI_PORT_SPEED_4GB|FDMI_PORT_SPEED_2GB|
1553 FDMI_PORT_SPEED_1GB;
1554 if (IS_QLA23XX(ha))
1555 return FDMI_PORT_SPEED_2GB|FDMI_PORT_SPEED_1GB;
1556 return FDMI_PORT_SPEED_1GB;
1559 uint
1560 qla25xx_fdmi_port_speed_currently(struct qla_hw_data *ha)
1562 switch (ha->link_data_rate) {
1563 case PORT_SPEED_1GB:
1564 return FDMI_PORT_SPEED_1GB;
1565 case PORT_SPEED_2GB:
1566 return FDMI_PORT_SPEED_2GB;
1567 case PORT_SPEED_4GB:
1568 return FDMI_PORT_SPEED_4GB;
1569 case PORT_SPEED_8GB:
1570 return FDMI_PORT_SPEED_8GB;
1571 case PORT_SPEED_10GB:
1572 return FDMI_PORT_SPEED_10GB;
1573 case PORT_SPEED_16GB:
1574 return FDMI_PORT_SPEED_16GB;
1575 case PORT_SPEED_32GB:
1576 return FDMI_PORT_SPEED_32GB;
1577 case PORT_SPEED_64GB:
1578 return FDMI_PORT_SPEED_64GB;
1579 default:
1580 return FDMI_PORT_SPEED_UNKNOWN;
1585 * qla2x00_hba_attributes() perform HBA attributes registration
1586 * @vha: HA context
1587 * @entries: number of entries to use
1588 * @callopt: Option to issue extended or standard FDMI
1589 * command parameter
1591 * Returns 0 on success.
1593 static unsigned long
1594 qla2x00_hba_attributes(scsi_qla_host_t *vha, void *entries,
1595 unsigned int callopt)
1597 struct qla_hw_data *ha = vha->hw;
1598 struct init_cb_24xx *icb24 = (void *)ha->init_cb;
1599 struct new_utsname *p_sysid = utsname();
1600 struct ct_fdmi_hba_attr *eiter;
1601 uint16_t alen;
1602 unsigned long size = 0;
1604 /* Nodename. */
1605 eiter = entries + size;
1606 eiter->type = cpu_to_be16(FDMI_HBA_NODE_NAME);
1607 memcpy(eiter->a.node_name, vha->node_name, sizeof(eiter->a.node_name));
1608 alen = sizeof(eiter->a.node_name);
1609 alen += FDMI_ATTR_TYPELEN(eiter);
1610 eiter->len = cpu_to_be16(alen);
1611 size += alen;
1612 ql_dbg(ql_dbg_disc, vha, 0x20a0,
1613 "NODENAME = %016llx.\n", wwn_to_u64(eiter->a.node_name));
1614 /* Manufacturer. */
1615 eiter = entries + size;
1616 eiter->type = cpu_to_be16(FDMI_HBA_MANUFACTURER);
1617 alen = scnprintf(
1618 eiter->a.manufacturer, sizeof(eiter->a.manufacturer),
1619 "%s", "QLogic Corporation");
1620 alen += FDMI_ATTR_ALIGNMENT(alen);
1621 alen += FDMI_ATTR_TYPELEN(eiter);
1622 eiter->len = cpu_to_be16(alen);
1623 size += alen;
1624 ql_dbg(ql_dbg_disc, vha, 0x20a1,
1625 "MANUFACTURER = %s.\n", eiter->a.manufacturer);
1626 /* Serial number. */
1627 eiter = entries + size;
1628 eiter->type = cpu_to_be16(FDMI_HBA_SERIAL_NUMBER);
1629 alen = 0;
1630 if (IS_FWI2_CAPABLE(ha)) {
1631 alen = qla2xxx_get_vpd_field(vha, "SN",
1632 eiter->a.serial_num, sizeof(eiter->a.serial_num));
1634 if (!alen) {
1635 uint32_t sn = ((ha->serial0 & 0x1f) << 16) |
1636 (ha->serial2 << 8) | ha->serial1;
1637 alen = scnprintf(
1638 eiter->a.serial_num, sizeof(eiter->a.serial_num),
1639 "%c%05d", 'A' + sn / 100000, sn % 100000);
1641 alen += FDMI_ATTR_ALIGNMENT(alen);
1642 alen += FDMI_ATTR_TYPELEN(eiter);
1643 eiter->len = cpu_to_be16(alen);
1644 size += alen;
1645 ql_dbg(ql_dbg_disc, vha, 0x20a2,
1646 "SERIAL NUMBER = %s.\n", eiter->a.serial_num);
1647 /* Model name. */
1648 eiter = entries + size;
1649 eiter->type = cpu_to_be16(FDMI_HBA_MODEL);
1650 alen = scnprintf(
1651 eiter->a.model, sizeof(eiter->a.model),
1652 "%s", ha->model_number);
1653 alen += FDMI_ATTR_ALIGNMENT(alen);
1654 alen += FDMI_ATTR_TYPELEN(eiter);
1655 eiter->len = cpu_to_be16(alen);
1656 size += alen;
1657 ql_dbg(ql_dbg_disc, vha, 0x20a3,
1658 "MODEL NAME = %s.\n", eiter->a.model);
1659 /* Model description. */
1660 eiter = entries + size;
1661 eiter->type = cpu_to_be16(FDMI_HBA_MODEL_DESCRIPTION);
1662 alen = scnprintf(
1663 eiter->a.model_desc, sizeof(eiter->a.model_desc),
1664 "%s", ha->model_desc);
1665 alen += FDMI_ATTR_ALIGNMENT(alen);
1666 alen += FDMI_ATTR_TYPELEN(eiter);
1667 eiter->len = cpu_to_be16(alen);
1668 size += alen;
1669 ql_dbg(ql_dbg_disc, vha, 0x20a4,
1670 "MODEL DESCRIPTION = %s.\n", eiter->a.model_desc);
1671 /* Hardware version. */
1672 eiter = entries + size;
1673 eiter->type = cpu_to_be16(FDMI_HBA_HARDWARE_VERSION);
1674 alen = 0;
1675 if (IS_FWI2_CAPABLE(ha)) {
1676 if (!alen) {
1677 alen = qla2xxx_get_vpd_field(vha, "MN",
1678 eiter->a.hw_version, sizeof(eiter->a.hw_version));
1680 if (!alen) {
1681 alen = qla2xxx_get_vpd_field(vha, "EC",
1682 eiter->a.hw_version, sizeof(eiter->a.hw_version));
1685 if (!alen) {
1686 alen = scnprintf(
1687 eiter->a.hw_version, sizeof(eiter->a.hw_version),
1688 "HW:%s", ha->adapter_id);
1690 alen += FDMI_ATTR_ALIGNMENT(alen);
1691 alen += FDMI_ATTR_TYPELEN(eiter);
1692 eiter->len = cpu_to_be16(alen);
1693 size += alen;
1694 ql_dbg(ql_dbg_disc, vha, 0x20a5,
1695 "HARDWARE VERSION = %s.\n", eiter->a.hw_version);
1696 /* Driver version. */
1697 eiter = entries + size;
1698 eiter->type = cpu_to_be16(FDMI_HBA_DRIVER_VERSION);
1699 alen = scnprintf(
1700 eiter->a.driver_version, sizeof(eiter->a.driver_version),
1701 "%s", qla2x00_version_str);
1702 alen += FDMI_ATTR_ALIGNMENT(alen);
1703 alen += FDMI_ATTR_TYPELEN(eiter);
1704 eiter->len = cpu_to_be16(alen);
1705 size += alen;
1706 ql_dbg(ql_dbg_disc, vha, 0x20a6,
1707 "DRIVER VERSION = %s.\n", eiter->a.driver_version);
1708 /* Option ROM version. */
1709 eiter = entries + size;
1710 eiter->type = cpu_to_be16(FDMI_HBA_OPTION_ROM_VERSION);
1711 alen = scnprintf(
1712 eiter->a.orom_version, sizeof(eiter->a.orom_version),
1713 "%d.%02d", ha->bios_revision[1], ha->bios_revision[0]);
1714 alen += FDMI_ATTR_ALIGNMENT(alen);
1715 alen += FDMI_ATTR_TYPELEN(eiter);
1716 eiter->len = cpu_to_be16(alen);
1717 size += alen;
1719 ql_dbg(ql_dbg_disc, vha, 0x20a7,
1720 "OPTROM VERSION = %d.%02d.\n",
1721 eiter->a.orom_version[1], eiter->a.orom_version[0]);
1722 /* Firmware version */
1723 eiter = entries + size;
1724 eiter->type = cpu_to_be16(FDMI_HBA_FIRMWARE_VERSION);
1725 ha->isp_ops->fw_version_str(vha, eiter->a.fw_version,
1726 sizeof(eiter->a.fw_version));
1727 alen += FDMI_ATTR_ALIGNMENT(alen);
1728 alen += FDMI_ATTR_TYPELEN(eiter);
1729 eiter->len = cpu_to_be16(alen);
1730 size += alen;
1731 ql_dbg(ql_dbg_disc, vha, 0x20a8,
1732 "FIRMWARE VERSION = %s.\n", eiter->a.fw_version);
1733 if (callopt == CALLOPT_FDMI1)
1734 goto done;
1735 /* OS Name and Version */
1736 eiter = entries + size;
1737 eiter->type = cpu_to_be16(FDMI_HBA_OS_NAME_AND_VERSION);
1738 alen = 0;
1739 if (p_sysid) {
1740 alen = scnprintf(
1741 eiter->a.os_version, sizeof(eiter->a.os_version),
1742 "%s %s %s",
1743 p_sysid->sysname, p_sysid->release, p_sysid->machine);
1745 if (!alen) {
1746 alen = scnprintf(
1747 eiter->a.os_version, sizeof(eiter->a.os_version),
1748 "%s %s",
1749 "Linux", fc_host_system_hostname(vha->host));
1751 alen += FDMI_ATTR_ALIGNMENT(alen);
1752 alen += FDMI_ATTR_TYPELEN(eiter);
1753 eiter->len = cpu_to_be16(alen);
1754 size += alen;
1755 ql_dbg(ql_dbg_disc, vha, 0x20a9,
1756 "OS VERSION = %s.\n", eiter->a.os_version);
1757 /* MAX CT Payload Length */
1758 eiter = entries + size;
1759 eiter->type = cpu_to_be16(FDMI_HBA_MAXIMUM_CT_PAYLOAD_LENGTH);
1760 eiter->a.max_ct_len = cpu_to_be32(le16_to_cpu(IS_FWI2_CAPABLE(ha) ?
1761 icb24->frame_payload_size : ha->init_cb->frame_payload_size));
1762 alen = sizeof(eiter->a.max_ct_len);
1763 alen += FDMI_ATTR_TYPELEN(eiter);
1764 eiter->len = cpu_to_be16(alen);
1765 size += alen;
1766 ql_dbg(ql_dbg_disc, vha, 0x20aa,
1767 "CT PAYLOAD LENGTH = 0x%x.\n", be32_to_cpu(eiter->a.max_ct_len));
1768 /* Node Sybolic Name */
1769 eiter = entries + size;
1770 eiter->type = cpu_to_be16(FDMI_HBA_NODE_SYMBOLIC_NAME);
1771 alen = qla2x00_get_sym_node_name(vha, eiter->a.sym_name,
1772 sizeof(eiter->a.sym_name));
1773 alen += FDMI_ATTR_ALIGNMENT(alen);
1774 alen += FDMI_ATTR_TYPELEN(eiter);
1775 eiter->len = cpu_to_be16(alen);
1776 size += alen;
1777 ql_dbg(ql_dbg_disc, vha, 0x20ab,
1778 "SYMBOLIC NAME = %s.\n", eiter->a.sym_name);
1779 /* Vendor Specific information */
1780 eiter = entries + size;
1781 eiter->type = cpu_to_be16(FDMI_HBA_VENDOR_SPECIFIC_INFO);
1782 eiter->a.vendor_specific_info = cpu_to_be32(PCI_VENDOR_ID_QLOGIC);
1783 alen = sizeof(eiter->a.vendor_specific_info);
1784 alen += FDMI_ATTR_TYPELEN(eiter);
1785 eiter->len = cpu_to_be16(alen);
1786 size += alen;
1787 ql_dbg(ql_dbg_disc, vha, 0x20ac,
1788 "VENDOR SPECIFIC INFO = 0x%x.\n",
1789 be32_to_cpu(eiter->a.vendor_specific_info));
1790 /* Num Ports */
1791 eiter = entries + size;
1792 eiter->type = cpu_to_be16(FDMI_HBA_NUM_PORTS);
1793 eiter->a.num_ports = cpu_to_be32(1);
1794 alen = sizeof(eiter->a.num_ports);
1795 alen += FDMI_ATTR_TYPELEN(eiter);
1796 eiter->len = cpu_to_be16(alen);
1797 size += alen;
1798 ql_dbg(ql_dbg_disc, vha, 0x20ad,
1799 "PORT COUNT = %x.\n", be32_to_cpu(eiter->a.num_ports));
1800 /* Fabric Name */
1801 eiter = entries + size;
1802 eiter->type = cpu_to_be16(FDMI_HBA_FABRIC_NAME);
1803 memcpy(eiter->a.fabric_name, vha->fabric_node_name,
1804 sizeof(eiter->a.fabric_name));
1805 alen = sizeof(eiter->a.fabric_name);
1806 alen += FDMI_ATTR_TYPELEN(eiter);
1807 eiter->len = cpu_to_be16(alen);
1808 size += alen;
1809 ql_dbg(ql_dbg_disc, vha, 0x20ae,
1810 "FABRIC NAME = %016llx.\n", wwn_to_u64(eiter->a.fabric_name));
1811 /* BIOS Version */
1812 eiter = entries + size;
1813 eiter->type = cpu_to_be16(FDMI_HBA_BOOT_BIOS_NAME);
1814 alen = scnprintf(
1815 eiter->a.bios_name, sizeof(eiter->a.bios_name),
1816 "BIOS %d.%02d", ha->bios_revision[1], ha->bios_revision[0]);
1817 alen += FDMI_ATTR_ALIGNMENT(alen);
1818 alen += FDMI_ATTR_TYPELEN(eiter);
1819 eiter->len = cpu_to_be16(alen);
1820 size += alen;
1821 ql_dbg(ql_dbg_disc, vha, 0x20af,
1822 "BIOS NAME = %s\n", eiter->a.bios_name);
1823 /* Vendor Identifier */
1824 eiter = entries + size;
1825 eiter->type = cpu_to_be16(FDMI_HBA_VENDOR_IDENTIFIER);
1826 alen = scnprintf(
1827 eiter->a.vendor_identifier, sizeof(eiter->a.vendor_identifier),
1828 "%s", "QLGC");
1829 alen += FDMI_ATTR_ALIGNMENT(alen);
1830 alen += FDMI_ATTR_TYPELEN(eiter);
1831 eiter->len = cpu_to_be16(alen);
1832 size += alen;
1833 ql_dbg(ql_dbg_disc, vha, 0x20b0,
1834 "VENDOR IDENTIFIER = %s.\n", eiter->a.vendor_identifier);
1835 done:
1836 return size;
1840 * qla2x00_port_attributes() perform Port attributes registration
1841 * @vha: HA context
1842 * @entries: number of entries to use
1843 * @callopt: Option to issue extended or standard FDMI
1844 * command parameter
1846 * Returns 0 on success.
1848 static unsigned long
1849 qla2x00_port_attributes(scsi_qla_host_t *vha, void *entries,
1850 unsigned int callopt)
1852 struct qla_hw_data *ha = vha->hw;
1853 struct init_cb_24xx *icb24 = (void *)ha->init_cb;
1854 struct new_utsname *p_sysid = utsname();
1855 char *hostname = p_sysid ?
1856 p_sysid->nodename : fc_host_system_hostname(vha->host);
1857 struct ct_fdmi_port_attr *eiter;
1858 uint16_t alen;
1859 unsigned long size = 0;
1861 /* FC4 types. */
1862 eiter = entries + size;
1863 eiter->type = cpu_to_be16(FDMI_PORT_FC4_TYPES);
1864 eiter->a.fc4_types[0] = 0x00;
1865 eiter->a.fc4_types[1] = 0x00;
1866 eiter->a.fc4_types[2] = 0x01;
1867 eiter->a.fc4_types[3] = 0x00;
1868 alen = sizeof(eiter->a.fc4_types);
1869 alen += FDMI_ATTR_TYPELEN(eiter);
1870 eiter->len = cpu_to_be16(alen);
1871 size += alen;
1872 ql_dbg(ql_dbg_disc, vha, 0x20c0,
1873 "FC4 TYPES = %016llx.\n", *(uint64_t *)eiter->a.fc4_types);
1874 if (vha->flags.nvme_enabled) {
1875 eiter->a.fc4_types[6] = 1; /* NVMe type 28h */
1876 ql_dbg(ql_dbg_disc, vha, 0x211f,
1877 "NVME FC4 Type = %02x 0x0 0x0 0x0 0x0 0x0.\n",
1878 eiter->a.fc4_types[6]);
1880 /* Supported speed. */
1881 eiter = entries + size;
1882 eiter->type = cpu_to_be16(FDMI_PORT_SUPPORT_SPEED);
1883 eiter->a.sup_speed = cpu_to_be32(
1884 qla25xx_fdmi_port_speed_capability(ha));
1885 alen = sizeof(eiter->a.sup_speed);
1886 alen += FDMI_ATTR_TYPELEN(eiter);
1887 eiter->len = cpu_to_be16(alen);
1888 size += alen;
1889 ql_dbg(ql_dbg_disc, vha, 0x20c1,
1890 "SUPPORTED SPEED = %x.\n", be32_to_cpu(eiter->a.sup_speed));
1891 /* Current speed. */
1892 eiter = entries + size;
1893 eiter->type = cpu_to_be16(FDMI_PORT_CURRENT_SPEED);
1894 eiter->a.cur_speed = cpu_to_be32(
1895 qla25xx_fdmi_port_speed_currently(ha));
1896 alen = sizeof(eiter->a.cur_speed);
1897 alen += FDMI_ATTR_TYPELEN(eiter);
1898 eiter->len = cpu_to_be16(alen);
1899 size += alen;
1900 ql_dbg(ql_dbg_disc, vha, 0x20c2,
1901 "CURRENT SPEED = %x.\n", be32_to_cpu(eiter->a.cur_speed));
1902 /* Max frame size. */
1903 eiter = entries + size;
1904 eiter->type = cpu_to_be16(FDMI_PORT_MAX_FRAME_SIZE);
1905 eiter->a.max_frame_size = cpu_to_be32(le16_to_cpu(IS_FWI2_CAPABLE(ha) ?
1906 icb24->frame_payload_size : ha->init_cb->frame_payload_size));
1907 alen = sizeof(eiter->a.max_frame_size);
1908 alen += FDMI_ATTR_TYPELEN(eiter);
1909 eiter->len = cpu_to_be16(alen);
1910 size += alen;
1911 ql_dbg(ql_dbg_disc, vha, 0x20c3,
1912 "MAX FRAME SIZE = %x.\n", be32_to_cpu(eiter->a.max_frame_size));
1913 /* OS device name. */
1914 eiter = entries + size;
1915 eiter->type = cpu_to_be16(FDMI_PORT_OS_DEVICE_NAME);
1916 alen = scnprintf(
1917 eiter->a.os_dev_name, sizeof(eiter->a.os_dev_name),
1918 "%s:host%lu", QLA2XXX_DRIVER_NAME, vha->host_no);
1919 alen += FDMI_ATTR_ALIGNMENT(alen);
1920 alen += FDMI_ATTR_TYPELEN(eiter);
1921 eiter->len = cpu_to_be16(alen);
1922 size += alen;
1923 ql_dbg(ql_dbg_disc, vha, 0x20c4,
1924 "OS DEVICE NAME = %s.\n", eiter->a.os_dev_name);
1925 /* Hostname. */
1926 eiter = entries + size;
1927 eiter->type = cpu_to_be16(FDMI_PORT_HOST_NAME);
1928 if (!*hostname || !strncmp(hostname, "(none)", 6))
1929 hostname = "Linux-default";
1930 alen = scnprintf(
1931 eiter->a.host_name, sizeof(eiter->a.host_name),
1932 "%s", hostname);
1933 alen += FDMI_ATTR_ALIGNMENT(alen);
1934 alen += FDMI_ATTR_TYPELEN(eiter);
1935 eiter->len = cpu_to_be16(alen);
1936 size += alen;
1937 ql_dbg(ql_dbg_disc, vha, 0x20c5,
1938 "HOSTNAME = %s.\n", eiter->a.host_name);
1940 if (callopt == CALLOPT_FDMI1)
1941 goto done;
1943 /* Node Name */
1944 eiter = entries + size;
1945 eiter->type = cpu_to_be16(FDMI_PORT_NODE_NAME);
1946 memcpy(eiter->a.node_name, vha->node_name, sizeof(eiter->a.node_name));
1947 alen = sizeof(eiter->a.node_name);
1948 alen += FDMI_ATTR_TYPELEN(eiter);
1949 eiter->len = cpu_to_be16(alen);
1950 size += alen;
1951 ql_dbg(ql_dbg_disc, vha, 0x20c6,
1952 "NODENAME = %016llx.\n", wwn_to_u64(eiter->a.node_name));
1954 /* Port Name */
1955 eiter = entries + size;
1956 eiter->type = cpu_to_be16(FDMI_PORT_NAME);
1957 memcpy(eiter->a.port_name, vha->port_name, sizeof(eiter->a.port_name));
1958 alen = sizeof(eiter->a.port_name);
1959 alen += FDMI_ATTR_TYPELEN(eiter);
1960 eiter->len = cpu_to_be16(alen);
1961 size += alen;
1962 ql_dbg(ql_dbg_disc, vha, 0x20c7,
1963 "PORTNAME = %016llx.\n", wwn_to_u64(eiter->a.port_name));
1965 /* Port Symbolic Name */
1966 eiter = entries + size;
1967 eiter->type = cpu_to_be16(FDMI_PORT_SYM_NAME);
1968 alen = qla2x00_get_sym_node_name(vha, eiter->a.port_sym_name,
1969 sizeof(eiter->a.port_sym_name));
1970 alen += FDMI_ATTR_ALIGNMENT(alen);
1971 alen += FDMI_ATTR_TYPELEN(eiter);
1972 eiter->len = cpu_to_be16(alen);
1973 size += alen;
1974 ql_dbg(ql_dbg_disc, vha, 0x20c8,
1975 "PORT SYMBOLIC NAME = %s\n", eiter->a.port_sym_name);
1977 /* Port Type */
1978 eiter = entries + size;
1979 eiter->type = cpu_to_be16(FDMI_PORT_TYPE);
1980 eiter->a.port_type = cpu_to_be32(NS_NX_PORT_TYPE);
1981 alen = sizeof(eiter->a.port_type);
1982 alen += FDMI_ATTR_TYPELEN(eiter);
1983 eiter->len = cpu_to_be16(alen);
1984 size += alen;
1985 ql_dbg(ql_dbg_disc, vha, 0x20c9,
1986 "PORT TYPE = %x.\n", be32_to_cpu(eiter->a.port_type));
1988 /* Supported Class of Service */
1989 eiter = entries + size;
1990 eiter->type = cpu_to_be16(FDMI_PORT_SUPP_COS);
1991 eiter->a.port_supported_cos = cpu_to_be32(FC_CLASS_3);
1992 alen = sizeof(eiter->a.port_supported_cos);
1993 alen += FDMI_ATTR_TYPELEN(eiter);
1994 eiter->len = cpu_to_be16(alen);
1995 size += alen;
1996 ql_dbg(ql_dbg_disc, vha, 0x20ca,
1997 "SUPPORTED COS = %08x\n", be32_to_cpu(eiter->a.port_supported_cos));
1999 /* Port Fabric Name */
2000 eiter = entries + size;
2001 eiter->type = cpu_to_be16(FDMI_PORT_FABRIC_NAME);
2002 memcpy(eiter->a.fabric_name, vha->fabric_node_name,
2003 sizeof(eiter->a.fabric_name));
2004 alen = sizeof(eiter->a.fabric_name);
2005 alen += FDMI_ATTR_TYPELEN(eiter);
2006 eiter->len = cpu_to_be16(alen);
2007 size += alen;
2008 ql_dbg(ql_dbg_disc, vha, 0x20cb,
2009 "FABRIC NAME = %016llx.\n", wwn_to_u64(eiter->a.fabric_name));
2011 /* FC4_type */
2012 eiter = entries + size;
2013 eiter->type = cpu_to_be16(FDMI_PORT_FC4_TYPE);
2014 eiter->a.port_fc4_type[0] = 0x00;
2015 eiter->a.port_fc4_type[1] = 0x00;
2016 eiter->a.port_fc4_type[2] = 0x01;
2017 eiter->a.port_fc4_type[3] = 0x00;
2018 alen = sizeof(eiter->a.port_fc4_type);
2019 alen += FDMI_ATTR_TYPELEN(eiter);
2020 eiter->len = cpu_to_be16(alen);
2021 size += alen;
2022 ql_dbg(ql_dbg_disc, vha, 0x20cc,
2023 "PORT ACTIVE FC4 TYPE = %016llx.\n",
2024 *(uint64_t *)eiter->a.port_fc4_type);
2026 /* Port State */
2027 eiter = entries + size;
2028 eiter->type = cpu_to_be16(FDMI_PORT_STATE);
2029 eiter->a.port_state = cpu_to_be32(2);
2030 alen = sizeof(eiter->a.port_state);
2031 alen += FDMI_ATTR_TYPELEN(eiter);
2032 eiter->len = cpu_to_be16(alen);
2033 size += alen;
2034 ql_dbg(ql_dbg_disc, vha, 0x20cd,
2035 "PORT_STATE = %x.\n", be32_to_cpu(eiter->a.port_state));
2037 /* Number of Ports */
2038 eiter = entries + size;
2039 eiter->type = cpu_to_be16(FDMI_PORT_COUNT);
2040 eiter->a.num_ports = cpu_to_be32(1);
2041 alen = sizeof(eiter->a.num_ports);
2042 alen += FDMI_ATTR_TYPELEN(eiter);
2043 eiter->len = cpu_to_be16(alen);
2044 size += alen;
2045 ql_dbg(ql_dbg_disc, vha, 0x20ce,
2046 "PORT COUNT = %x.\n", be32_to_cpu(eiter->a.num_ports));
2048 /* Port Identifier */
2049 eiter = entries + size;
2050 eiter->type = cpu_to_be16(FDMI_PORT_IDENTIFIER);
2051 eiter->a.port_id = cpu_to_be32(vha->d_id.b24);
2052 alen = sizeof(eiter->a.port_id);
2053 alen += FDMI_ATTR_TYPELEN(eiter);
2054 eiter->len = cpu_to_be16(alen);
2055 size += alen;
2056 ql_dbg(ql_dbg_disc, vha, 0x20cf,
2057 "PORT ID = %x.\n", be32_to_cpu(eiter->a.port_id));
2059 if (callopt == CALLOPT_FDMI2 || !ql2xsmartsan)
2060 goto done;
2062 /* Smart SAN Service Category (Populate Smart SAN Initiator)*/
2063 eiter = entries + size;
2064 eiter->type = cpu_to_be16(FDMI_SMARTSAN_SERVICE);
2065 alen = scnprintf(
2066 eiter->a.smartsan_service, sizeof(eiter->a.smartsan_service),
2067 "%s", "Smart SAN Initiator");
2068 alen += FDMI_ATTR_ALIGNMENT(alen);
2069 alen += FDMI_ATTR_TYPELEN(eiter);
2070 eiter->len = cpu_to_be16(alen);
2071 size += alen;
2072 ql_dbg(ql_dbg_disc, vha, 0x20d0,
2073 "SMARTSAN SERVICE CATEGORY = %s.\n", eiter->a.smartsan_service);
2075 /* Smart SAN GUID (NWWN+PWWN) */
2076 eiter = entries + size;
2077 eiter->type = cpu_to_be16(FDMI_SMARTSAN_GUID);
2078 memcpy(eiter->a.smartsan_guid, vha->node_name, WWN_SIZE);
2079 memcpy(eiter->a.smartsan_guid + WWN_SIZE, vha->port_name, WWN_SIZE);
2080 alen = sizeof(eiter->a.smartsan_guid);
2081 alen += FDMI_ATTR_TYPELEN(eiter);
2082 eiter->len = cpu_to_be16(alen);
2083 size += alen;
2084 ql_dbg(ql_dbg_disc, vha, 0x20d1,
2085 "Smart SAN GUID = %016llx-%016llx\n",
2086 wwn_to_u64(eiter->a.smartsan_guid),
2087 wwn_to_u64(eiter->a.smartsan_guid + WWN_SIZE));
2089 /* Smart SAN Version (populate "Smart SAN Version 1.0") */
2090 eiter = entries + size;
2091 eiter->type = cpu_to_be16(FDMI_SMARTSAN_VERSION);
2092 alen = scnprintf(
2093 eiter->a.smartsan_version, sizeof(eiter->a.smartsan_version),
2094 "%s", "Smart SAN Version 2.0");
2095 alen += FDMI_ATTR_ALIGNMENT(alen);
2096 alen += FDMI_ATTR_TYPELEN(eiter);
2097 eiter->len = cpu_to_be16(alen);
2098 size += alen;
2099 ql_dbg(ql_dbg_disc, vha, 0x20d2,
2100 "SMARTSAN VERSION = %s\n", eiter->a.smartsan_version);
2102 /* Smart SAN Product Name (Specify Adapter Model No) */
2103 eiter = entries + size;
2104 eiter->type = cpu_to_be16(FDMI_SMARTSAN_PROD_NAME);
2105 alen = scnprintf(eiter->a.smartsan_prod_name,
2106 sizeof(eiter->a.smartsan_prod_name),
2107 "ISP%04x", ha->pdev->device);
2108 alen += FDMI_ATTR_ALIGNMENT(alen);
2109 alen += FDMI_ATTR_TYPELEN(eiter);
2110 eiter->len = cpu_to_be16(alen);
2111 size += alen;
2112 ql_dbg(ql_dbg_disc, vha, 0x20d3,
2113 "SMARTSAN PRODUCT NAME = %s\n", eiter->a.smartsan_prod_name);
2115 /* Smart SAN Port Info (specify: 1=Physical, 2=NPIV, 3=SRIOV) */
2116 eiter = entries + size;
2117 eiter->type = cpu_to_be16(FDMI_SMARTSAN_PORT_INFO);
2118 eiter->a.smartsan_port_info = cpu_to_be32(vha->vp_idx ? 2 : 1);
2119 alen = sizeof(eiter->a.smartsan_port_info);
2120 alen += FDMI_ATTR_TYPELEN(eiter);
2121 eiter->len = cpu_to_be16(alen);
2122 size += alen;
2123 ql_dbg(ql_dbg_disc, vha, 0x20d4,
2124 "SMARTSAN PORT INFO = %x\n", eiter->a.smartsan_port_info);
2126 /* Smart SAN Security Support */
2127 eiter = entries + size;
2128 eiter->type = cpu_to_be16(FDMI_SMARTSAN_SECURITY_SUPPORT);
2129 eiter->a.smartsan_security_support = cpu_to_be32(1);
2130 alen = sizeof(eiter->a.smartsan_security_support);
2131 alen += FDMI_ATTR_TYPELEN(eiter);
2132 eiter->len = cpu_to_be16(alen);
2133 size += alen;
2134 ql_dbg(ql_dbg_disc, vha, 0x20d6,
2135 "SMARTSAN SECURITY SUPPORT = %d\n",
2136 be32_to_cpu(eiter->a.smartsan_security_support));
2138 done:
2139 return size;
2143 * qla2x00_fdmi_rhba() - perform RHBA FDMI registration
2144 * @vha: HA context
2145 * @callopt: Option to issue FDMI registration
2147 * Returns 0 on success.
2149 static int
2150 qla2x00_fdmi_rhba(scsi_qla_host_t *vha, unsigned int callopt)
2152 struct qla_hw_data *ha = vha->hw;
2153 unsigned long size = 0;
2154 unsigned int rval, count;
2155 ms_iocb_entry_t *ms_pkt;
2156 struct ct_sns_req *ct_req;
2157 struct ct_sns_rsp *ct_rsp;
2158 void *entries;
2160 count = callopt != CALLOPT_FDMI1 ?
2161 FDMI2_HBA_ATTR_COUNT : FDMI1_HBA_ATTR_COUNT;
2163 size = RHBA_RSP_SIZE;
2165 ql_dbg(ql_dbg_disc, vha, 0x20e0,
2166 "RHBA (callopt=%x count=%u size=%lu).\n", callopt, count, size);
2168 /* Request size adjusted after CT preparation */
2169 ms_pkt = ha->isp_ops->prep_ms_fdmi_iocb(vha, 0, size);
2171 /* Prepare CT request */
2172 ct_req = qla2x00_prep_ct_fdmi_req(ha->ct_sns, RHBA_CMD, size);
2173 ct_rsp = &ha->ct_sns->p.rsp;
2175 /* Prepare FDMI command entries */
2176 memcpy(ct_req->req.rhba.hba_identifier, vha->port_name,
2177 sizeof(ct_req->req.rhba.hba_identifier));
2178 size += sizeof(ct_req->req.rhba.hba_identifier);
2180 ct_req->req.rhba.entry_count = cpu_to_be32(1);
2181 size += sizeof(ct_req->req.rhba.entry_count);
2183 memcpy(ct_req->req.rhba.port_name, vha->port_name,
2184 sizeof(ct_req->req.rhba.port_name));
2185 size += sizeof(ct_req->req.rhba.port_name);
2187 /* Attribute count */
2188 ct_req->req.rhba.attrs.count = cpu_to_be32(count);
2189 size += sizeof(ct_req->req.rhba.attrs.count);
2191 /* Attribute block */
2192 entries = &ct_req->req.rhba.attrs.entry;
2194 size += qla2x00_hba_attributes(vha, entries, callopt);
2196 /* Update MS request size. */
2197 qla2x00_update_ms_fdmi_iocb(vha, size + 16);
2199 ql_dbg(ql_dbg_disc, vha, 0x20e1,
2200 "RHBA %016llx %016llx.\n",
2201 wwn_to_u64(ct_req->req.rhba.hba_identifier),
2202 wwn_to_u64(ct_req->req.rhba.port_name));
2204 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x20e2,
2205 entries, size);
2207 /* Execute MS IOCB */
2208 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
2209 sizeof(*ha->ms_iocb));
2210 if (rval) {
2211 ql_dbg(ql_dbg_disc, vha, 0x20e3,
2212 "RHBA iocb failed (%d).\n", rval);
2213 return rval;
2216 rval = qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RHBA");
2217 if (rval) {
2218 if (ct_rsp->header.reason_code == CT_REASON_CANNOT_PERFORM &&
2219 ct_rsp->header.explanation_code ==
2220 CT_EXPL_ALREADY_REGISTERED) {
2221 ql_dbg(ql_dbg_disc, vha, 0x20e4,
2222 "RHBA already registered.\n");
2223 return QLA_ALREADY_REGISTERED;
2226 ql_dbg(ql_dbg_disc, vha, 0x20e5,
2227 "RHBA failed, CT Reason %#x, CT Explanation %#x\n",
2228 ct_rsp->header.reason_code,
2229 ct_rsp->header.explanation_code);
2230 return rval;
2233 ql_dbg(ql_dbg_disc, vha, 0x20e6, "RHBA exiting normally.\n");
2234 return rval;
2238 static int
2239 qla2x00_fdmi_dhba(scsi_qla_host_t *vha)
2241 int rval;
2242 struct qla_hw_data *ha = vha->hw;
2243 ms_iocb_entry_t *ms_pkt;
2244 struct ct_sns_req *ct_req;
2245 struct ct_sns_rsp *ct_rsp;
2246 /* Issue RPA */
2247 /* Prepare common MS IOCB */
2248 ms_pkt = ha->isp_ops->prep_ms_fdmi_iocb(vha, DHBA_REQ_SIZE,
2249 DHBA_RSP_SIZE);
2250 /* Prepare CT request */
2251 ct_req = qla2x00_prep_ct_fdmi_req(ha->ct_sns, DHBA_CMD, DHBA_RSP_SIZE);
2252 ct_rsp = &ha->ct_sns->p.rsp;
2253 /* Prepare FDMI command arguments -- portname. */
2254 memcpy(ct_req->req.dhba.port_name, vha->port_name, WWN_SIZE);
2255 ql_dbg(ql_dbg_disc, vha, 0x2036,
2256 "DHBA portname = %8phN.\n", ct_req->req.dhba.port_name);
2257 /* Execute MS IOCB */
2258 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
2259 sizeof(ms_iocb_entry_t));
2260 if (rval != QLA_SUCCESS) {
2261 /*EMPTY*/
2262 ql_dbg(ql_dbg_disc, vha, 0x2037,
2263 "DHBA issue IOCB failed (%d).\n", rval);
2264 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "DHBA") !=
2265 QLA_SUCCESS) {
2266 rval = QLA_FUNCTION_FAILED;
2267 } else {
2268 ql_dbg(ql_dbg_disc, vha, 0x2038,
2269 "DHBA exiting normally.\n");
2271 return rval;
2275 * qla2x00_fdmi_rprt() perform RPRT registration
2276 * @vha: HA context
2277 * @callopt: Option to issue extended or standard FDMI
2278 * command parameter
2280 * Returns 0 on success.
2282 static int
2283 qla2x00_fdmi_rprt(scsi_qla_host_t *vha, int callopt)
2285 struct scsi_qla_host *base_vha = pci_get_drvdata(vha->hw->pdev);
2286 struct qla_hw_data *ha = vha->hw;
2287 ulong size = 0;
2288 uint rval, count;
2289 ms_iocb_entry_t *ms_pkt;
2290 struct ct_sns_req *ct_req;
2291 struct ct_sns_rsp *ct_rsp;
2292 void *entries;
2293 count = callopt == CALLOPT_FDMI2_SMARTSAN && ql2xsmartsan ?
2294 FDMI2_SMARTSAN_PORT_ATTR_COUNT :
2295 callopt != CALLOPT_FDMI1 ?
2296 FDMI2_PORT_ATTR_COUNT : FDMI1_PORT_ATTR_COUNT;
2298 size = RPRT_RSP_SIZE;
2299 ql_dbg(ql_dbg_disc, vha, 0x20e8,
2300 "RPRT (callopt=%x count=%u size=%lu).\n", callopt, count, size);
2301 /* Request size adjusted after CT preparation */
2302 ms_pkt = ha->isp_ops->prep_ms_fdmi_iocb(vha, 0, size);
2303 /* Prepare CT request */
2304 ct_req = qla2x00_prep_ct_fdmi_req(ha->ct_sns, RPRT_CMD, size);
2305 ct_rsp = &ha->ct_sns->p.rsp;
2306 /* Prepare FDMI command entries */
2307 memcpy(ct_req->req.rprt.hba_identifier, base_vha->port_name,
2308 sizeof(ct_req->req.rprt.hba_identifier));
2309 size += sizeof(ct_req->req.rprt.hba_identifier);
2310 memcpy(ct_req->req.rprt.port_name, vha->port_name,
2311 sizeof(ct_req->req.rprt.port_name));
2312 size += sizeof(ct_req->req.rprt.port_name);
2313 /* Attribute count */
2314 ct_req->req.rprt.attrs.count = cpu_to_be32(count);
2315 size += sizeof(ct_req->req.rprt.attrs.count);
2316 /* Attribute block */
2317 entries = ct_req->req.rprt.attrs.entry;
2318 size += qla2x00_port_attributes(vha, entries, callopt);
2319 /* Update MS request size. */
2320 qla2x00_update_ms_fdmi_iocb(vha, size + 16);
2321 ql_dbg(ql_dbg_disc, vha, 0x20e9,
2322 "RPRT %016llx %016llx.\n",
2323 wwn_to_u64(ct_req->req.rprt.port_name),
2324 wwn_to_u64(ct_req->req.rprt.port_name));
2325 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x20ea,
2326 entries, size);
2327 /* Execute MS IOCB */
2328 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
2329 sizeof(*ha->ms_iocb));
2330 if (rval) {
2331 ql_dbg(ql_dbg_disc, vha, 0x20eb,
2332 "RPRT iocb failed (%d).\n", rval);
2333 return rval;
2335 rval = qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RPRT");
2336 if (rval) {
2337 if (ct_rsp->header.reason_code == CT_REASON_CANNOT_PERFORM &&
2338 ct_rsp->header.explanation_code ==
2339 CT_EXPL_ALREADY_REGISTERED) {
2340 ql_dbg(ql_dbg_disc, vha, 0x20ec,
2341 "RPRT already registered.\n");
2342 return QLA_ALREADY_REGISTERED;
2345 ql_dbg(ql_dbg_disc, vha, 0x20ed,
2346 "RPRT failed, CT Reason code: %#x, CT Explanation %#x\n",
2347 ct_rsp->header.reason_code,
2348 ct_rsp->header.explanation_code);
2349 return rval;
2351 ql_dbg(ql_dbg_disc, vha, 0x20ee, "RPRT exiting normally.\n");
2352 return rval;
2356 * qla2x00_fdmi_rpa() - perform RPA registration
2357 * @vha: HA context
2358 * @callopt: Option to issue FDMI registration
2360 * Returns 0 on success.
2362 static int
2363 qla2x00_fdmi_rpa(scsi_qla_host_t *vha, uint callopt)
2365 struct qla_hw_data *ha = vha->hw;
2366 ulong size = 0;
2367 uint rval, count;
2368 ms_iocb_entry_t *ms_pkt;
2369 struct ct_sns_req *ct_req;
2370 struct ct_sns_rsp *ct_rsp;
2371 void *entries;
2373 count =
2374 callopt == CALLOPT_FDMI2_SMARTSAN && ql2xsmartsan ?
2375 FDMI2_SMARTSAN_PORT_ATTR_COUNT :
2376 callopt != CALLOPT_FDMI1 ?
2377 FDMI2_PORT_ATTR_COUNT : FDMI1_PORT_ATTR_COUNT;
2379 size =
2380 callopt != CALLOPT_FDMI1 ?
2381 SMARTSAN_RPA_RSP_SIZE : RPA_RSP_SIZE;
2383 ql_dbg(ql_dbg_disc, vha, 0x20f0,
2384 "RPA (callopt=%x count=%u size=%lu).\n", callopt, count, size);
2386 /* Request size adjusted after CT preparation */
2387 ms_pkt = ha->isp_ops->prep_ms_fdmi_iocb(vha, 0, size);
2389 /* Prepare CT request */
2390 ct_req = qla2x00_prep_ct_fdmi_req(ha->ct_sns, RPA_CMD, size);
2391 ct_rsp = &ha->ct_sns->p.rsp;
2393 /* Prepare FDMI command entries. */
2394 memcpy(ct_req->req.rpa.port_name, vha->port_name,
2395 sizeof(ct_req->req.rpa.port_name));
2396 size += sizeof(ct_req->req.rpa.port_name);
2398 /* Attribute count */
2399 ct_req->req.rpa.attrs.count = cpu_to_be32(count);
2400 size += sizeof(ct_req->req.rpa.attrs.count);
2402 /* Attribute block */
2403 entries = ct_req->req.rpa.attrs.entry;
2405 size += qla2x00_port_attributes(vha, entries, callopt);
2407 /* Update MS request size. */
2408 qla2x00_update_ms_fdmi_iocb(vha, size + 16);
2410 ql_dbg(ql_dbg_disc, vha, 0x20f1,
2411 "RPA %016llx.\n", wwn_to_u64(ct_req->req.rpa.port_name));
2413 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x20f2,
2414 entries, size);
2416 /* Execute MS IOCB */
2417 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
2418 sizeof(*ha->ms_iocb));
2419 if (rval) {
2420 ql_dbg(ql_dbg_disc, vha, 0x20f3,
2421 "RPA iocb failed (%d).\n", rval);
2422 return rval;
2425 rval = qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RPA");
2426 if (rval) {
2427 if (ct_rsp->header.reason_code == CT_REASON_CANNOT_PERFORM &&
2428 ct_rsp->header.explanation_code ==
2429 CT_EXPL_ALREADY_REGISTERED) {
2430 ql_dbg(ql_dbg_disc, vha, 0x20f4,
2431 "RPA already registered.\n");
2432 return QLA_ALREADY_REGISTERED;
2435 ql_dbg(ql_dbg_disc, vha, 0x20f5,
2436 "RPA failed, CT Reason code: %#x, CT Explanation %#x\n",
2437 ct_rsp->header.reason_code,
2438 ct_rsp->header.explanation_code);
2439 return rval;
2442 ql_dbg(ql_dbg_disc, vha, 0x20f6, "RPA exiting normally.\n");
2443 return rval;
2447 * qla2x00_fdmi_register() -
2448 * @vha: HA context
2450 * Returns 0 on success.
2453 qla2x00_fdmi_register(scsi_qla_host_t *vha)
2455 int rval = QLA_SUCCESS;
2456 struct qla_hw_data *ha = vha->hw;
2458 if (IS_QLA2100(ha) || IS_QLA2200(ha) ||
2459 IS_QLAFX00(ha))
2460 return rval;
2462 rval = qla2x00_mgmt_svr_login(vha);
2463 if (rval)
2464 return rval;
2466 /* For npiv/vport send rprt only */
2467 if (vha->vp_idx) {
2468 if (ql2xsmartsan)
2469 rval = qla2x00_fdmi_rprt(vha, CALLOPT_FDMI2_SMARTSAN);
2470 if (rval || !ql2xsmartsan)
2471 rval = qla2x00_fdmi_rprt(vha, CALLOPT_FDMI2);
2472 if (rval)
2473 rval = qla2x00_fdmi_rprt(vha, CALLOPT_FDMI1);
2475 return rval;
2478 /* Try fdmi2 first, if fails then try fdmi1 */
2479 rval = qla2x00_fdmi_rhba(vha, CALLOPT_FDMI2);
2480 if (rval) {
2481 if (rval != QLA_ALREADY_REGISTERED)
2482 goto try_fdmi;
2484 rval = qla2x00_fdmi_dhba(vha);
2485 if (rval)
2486 goto try_fdmi;
2488 rval = qla2x00_fdmi_rhba(vha, CALLOPT_FDMI2);
2489 if (rval)
2490 goto try_fdmi;
2493 if (ql2xsmartsan)
2494 rval = qla2x00_fdmi_rpa(vha, CALLOPT_FDMI2_SMARTSAN);
2495 if (rval || !ql2xsmartsan)
2496 rval = qla2x00_fdmi_rpa(vha, CALLOPT_FDMI2);
2497 if (rval)
2498 goto try_fdmi;
2500 return rval;
2502 try_fdmi:
2503 rval = qla2x00_fdmi_rhba(vha, CALLOPT_FDMI1);
2504 if (rval) {
2505 if (rval != QLA_ALREADY_REGISTERED)
2506 return rval;
2508 rval = qla2x00_fdmi_dhba(vha);
2509 if (rval)
2510 return rval;
2512 rval = qla2x00_fdmi_rhba(vha, CALLOPT_FDMI1);
2513 if (rval)
2514 return rval;
2517 rval = qla2x00_fdmi_rpa(vha, CALLOPT_FDMI1);
2519 return rval;
2523 * qla2x00_gfpn_id() - SNS Get Fabric Port Name (GFPN_ID) query.
2524 * @vha: HA context
2525 * @list: switch info entries to populate
2527 * Returns 0 on success.
2530 qla2x00_gfpn_id(scsi_qla_host_t *vha, sw_info_t *list)
2532 int rval = QLA_SUCCESS;
2533 uint16_t i;
2534 struct qla_hw_data *ha = vha->hw;
2535 ms_iocb_entry_t *ms_pkt;
2536 struct ct_sns_req *ct_req;
2537 struct ct_sns_rsp *ct_rsp;
2538 struct ct_arg arg;
2540 if (!IS_IIDMA_CAPABLE(ha))
2541 return QLA_FUNCTION_FAILED;
2543 arg.iocb = ha->ms_iocb;
2544 arg.req_dma = ha->ct_sns_dma;
2545 arg.rsp_dma = ha->ct_sns_dma;
2546 arg.req_size = GFPN_ID_REQ_SIZE;
2547 arg.rsp_size = GFPN_ID_RSP_SIZE;
2548 arg.nport_handle = NPH_SNS;
2550 for (i = 0; i < ha->max_fibre_devices; i++) {
2551 /* Issue GFPN_ID */
2552 /* Prepare common MS IOCB */
2553 ms_pkt = ha->isp_ops->prep_ms_iocb(vha, &arg);
2555 /* Prepare CT request */
2556 ct_req = qla2x00_prep_ct_req(ha->ct_sns, GFPN_ID_CMD,
2557 GFPN_ID_RSP_SIZE);
2558 ct_rsp = &ha->ct_sns->p.rsp;
2560 /* Prepare CT arguments -- port_id */
2561 ct_req->req.port_id.port_id = port_id_to_be_id(list[i].d_id);
2563 /* Execute MS IOCB */
2564 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
2565 sizeof(ms_iocb_entry_t));
2566 if (rval != QLA_SUCCESS) {
2567 /*EMPTY*/
2568 ql_dbg(ql_dbg_disc, vha, 0x2023,
2569 "GFPN_ID issue IOCB failed (%d).\n", rval);
2570 break;
2571 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp,
2572 "GFPN_ID") != QLA_SUCCESS) {
2573 rval = QLA_FUNCTION_FAILED;
2574 break;
2575 } else {
2576 /* Save fabric portname */
2577 memcpy(list[i].fabric_port_name,
2578 ct_rsp->rsp.gfpn_id.port_name, WWN_SIZE);
2581 /* Last device exit. */
2582 if (list[i].d_id.b.rsvd_1 != 0)
2583 break;
2586 return (rval);
2590 static inline struct ct_sns_req *
2591 qla24xx_prep_ct_fm_req(struct ct_sns_pkt *p, uint16_t cmd,
2592 uint16_t rsp_size)
2594 memset(p, 0, sizeof(struct ct_sns_pkt));
2596 p->p.req.header.revision = 0x01;
2597 p->p.req.header.gs_type = 0xFA;
2598 p->p.req.header.gs_subtype = 0x01;
2599 p->p.req.command = cpu_to_be16(cmd);
2600 p->p.req.max_rsp_size = cpu_to_be16((rsp_size - 16) / 4);
2602 return &p->p.req;
2605 static uint16_t
2606 qla2x00_port_speed_capability(uint16_t speed)
2608 switch (speed) {
2609 case BIT_15:
2610 return PORT_SPEED_1GB;
2611 case BIT_14:
2612 return PORT_SPEED_2GB;
2613 case BIT_13:
2614 return PORT_SPEED_4GB;
2615 case BIT_12:
2616 return PORT_SPEED_10GB;
2617 case BIT_11:
2618 return PORT_SPEED_8GB;
2619 case BIT_10:
2620 return PORT_SPEED_16GB;
2621 case BIT_8:
2622 return PORT_SPEED_32GB;
2623 case BIT_7:
2624 return PORT_SPEED_64GB;
2625 default:
2626 return PORT_SPEED_UNKNOWN;
2631 * qla2x00_gpsc() - FCS Get Port Speed Capabilities (GPSC) query.
2632 * @vha: HA context
2633 * @list: switch info entries to populate
2635 * Returns 0 on success.
2638 qla2x00_gpsc(scsi_qla_host_t *vha, sw_info_t *list)
2640 int rval;
2641 uint16_t i;
2642 struct qla_hw_data *ha = vha->hw;
2643 ms_iocb_entry_t *ms_pkt;
2644 struct ct_sns_req *ct_req;
2645 struct ct_sns_rsp *ct_rsp;
2646 struct ct_arg arg;
2648 if (!IS_IIDMA_CAPABLE(ha))
2649 return QLA_FUNCTION_FAILED;
2650 if (!ha->flags.gpsc_supported)
2651 return QLA_FUNCTION_FAILED;
2653 rval = qla2x00_mgmt_svr_login(vha);
2654 if (rval)
2655 return rval;
2657 arg.iocb = ha->ms_iocb;
2658 arg.req_dma = ha->ct_sns_dma;
2659 arg.rsp_dma = ha->ct_sns_dma;
2660 arg.req_size = GPSC_REQ_SIZE;
2661 arg.rsp_size = GPSC_RSP_SIZE;
2662 arg.nport_handle = vha->mgmt_svr_loop_id;
2664 for (i = 0; i < ha->max_fibre_devices; i++) {
2665 /* Issue GFPN_ID */
2666 /* Prepare common MS IOCB */
2667 ms_pkt = qla24xx_prep_ms_iocb(vha, &arg);
2669 /* Prepare CT request */
2670 ct_req = qla24xx_prep_ct_fm_req(ha->ct_sns, GPSC_CMD,
2671 GPSC_RSP_SIZE);
2672 ct_rsp = &ha->ct_sns->p.rsp;
2674 /* Prepare CT arguments -- port_name */
2675 memcpy(ct_req->req.gpsc.port_name, list[i].fabric_port_name,
2676 WWN_SIZE);
2678 /* Execute MS IOCB */
2679 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
2680 sizeof(ms_iocb_entry_t));
2681 if (rval != QLA_SUCCESS) {
2682 /*EMPTY*/
2683 ql_dbg(ql_dbg_disc, vha, 0x2059,
2684 "GPSC issue IOCB failed (%d).\n", rval);
2685 } else if ((rval = qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp,
2686 "GPSC")) != QLA_SUCCESS) {
2687 /* FM command unsupported? */
2688 if (rval == QLA_INVALID_COMMAND &&
2689 (ct_rsp->header.reason_code ==
2690 CT_REASON_INVALID_COMMAND_CODE ||
2691 ct_rsp->header.reason_code ==
2692 CT_REASON_COMMAND_UNSUPPORTED)) {
2693 ql_dbg(ql_dbg_disc, vha, 0x205a,
2694 "GPSC command unsupported, disabling "
2695 "query.\n");
2696 ha->flags.gpsc_supported = 0;
2697 rval = QLA_FUNCTION_FAILED;
2698 break;
2700 rval = QLA_FUNCTION_FAILED;
2701 } else {
2702 list->fp_speed = qla2x00_port_speed_capability(
2703 be16_to_cpu(ct_rsp->rsp.gpsc.speed));
2704 ql_dbg(ql_dbg_disc, vha, 0x205b,
2705 "GPSC ext entry - fpn "
2706 "%8phN speeds=%04x speed=%04x.\n",
2707 list[i].fabric_port_name,
2708 be16_to_cpu(ct_rsp->rsp.gpsc.speeds),
2709 be16_to_cpu(ct_rsp->rsp.gpsc.speed));
2712 /* Last device exit. */
2713 if (list[i].d_id.b.rsvd_1 != 0)
2714 break;
2717 return (rval);
2721 * qla2x00_gff_id() - SNS Get FC-4 Features (GFF_ID) query.
2723 * @vha: HA context
2724 * @list: switch info entries to populate
2727 void
2728 qla2x00_gff_id(scsi_qla_host_t *vha, sw_info_t *list)
2730 int rval;
2731 uint16_t i;
2733 ms_iocb_entry_t *ms_pkt;
2734 struct ct_sns_req *ct_req;
2735 struct ct_sns_rsp *ct_rsp;
2736 struct qla_hw_data *ha = vha->hw;
2737 uint8_t fcp_scsi_features = 0, nvme_features = 0;
2738 struct ct_arg arg;
2740 for (i = 0; i < ha->max_fibre_devices; i++) {
2741 /* Set default FC4 Type as UNKNOWN so the default is to
2742 * Process this port */
2743 list[i].fc4_type = 0;
2745 /* Do not attempt GFF_ID if we are not FWI_2 capable */
2746 if (!IS_FWI2_CAPABLE(ha))
2747 continue;
2749 arg.iocb = ha->ms_iocb;
2750 arg.req_dma = ha->ct_sns_dma;
2751 arg.rsp_dma = ha->ct_sns_dma;
2752 arg.req_size = GFF_ID_REQ_SIZE;
2753 arg.rsp_size = GFF_ID_RSP_SIZE;
2754 arg.nport_handle = NPH_SNS;
2756 /* Prepare common MS IOCB */
2757 ms_pkt = ha->isp_ops->prep_ms_iocb(vha, &arg);
2759 /* Prepare CT request */
2760 ct_req = qla2x00_prep_ct_req(ha->ct_sns, GFF_ID_CMD,
2761 GFF_ID_RSP_SIZE);
2762 ct_rsp = &ha->ct_sns->p.rsp;
2764 /* Prepare CT arguments -- port_id */
2765 ct_req->req.port_id.port_id = port_id_to_be_id(list[i].d_id);
2767 /* Execute MS IOCB */
2768 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
2769 sizeof(ms_iocb_entry_t));
2771 if (rval != QLA_SUCCESS) {
2772 ql_dbg(ql_dbg_disc, vha, 0x205c,
2773 "GFF_ID issue IOCB failed (%d).\n", rval);
2774 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp,
2775 "GFF_ID") != QLA_SUCCESS) {
2776 ql_dbg(ql_dbg_disc, vha, 0x205d,
2777 "GFF_ID IOCB status had a failure status code.\n");
2778 } else {
2779 fcp_scsi_features =
2780 ct_rsp->rsp.gff_id.fc4_features[GFF_FCP_SCSI_OFFSET];
2781 fcp_scsi_features &= 0x0f;
2783 if (fcp_scsi_features) {
2784 list[i].fc4_type = FS_FC4TYPE_FCP;
2785 list[i].fc4_features = fcp_scsi_features;
2788 nvme_features =
2789 ct_rsp->rsp.gff_id.fc4_features[GFF_NVME_OFFSET];
2790 nvme_features &= 0xf;
2792 if (nvme_features) {
2793 list[i].fc4_type |= FS_FC4TYPE_NVME;
2794 list[i].fc4_features = nvme_features;
2798 /* Last device exit. */
2799 if (list[i].d_id.b.rsvd_1 != 0)
2800 break;
2804 int qla24xx_post_gpsc_work(struct scsi_qla_host *vha, fc_port_t *fcport)
2806 struct qla_work_evt *e;
2808 e = qla2x00_alloc_work(vha, QLA_EVT_GPSC);
2809 if (!e)
2810 return QLA_FUNCTION_FAILED;
2812 e->u.fcport.fcport = fcport;
2813 return qla2x00_post_work(vha, e);
2816 void qla24xx_handle_gpsc_event(scsi_qla_host_t *vha, struct event_arg *ea)
2818 struct fc_port *fcport = ea->fcport;
2820 ql_dbg(ql_dbg_disc, vha, 0x20d8,
2821 "%s %8phC DS %d LS %d rc %d login %d|%d rscn %d|%d lid %d\n",
2822 __func__, fcport->port_name, fcport->disc_state,
2823 fcport->fw_login_state, ea->rc, ea->sp->gen2, fcport->login_gen,
2824 ea->sp->gen2, fcport->rscn_gen|ea->sp->gen1, fcport->loop_id);
2826 if (fcport->disc_state == DSC_DELETE_PEND)
2827 return;
2829 if (ea->sp->gen2 != fcport->login_gen) {
2830 /* target side must have changed it. */
2831 ql_dbg(ql_dbg_disc, vha, 0x20d3,
2832 "%s %8phC generation changed\n",
2833 __func__, fcport->port_name);
2834 return;
2835 } else if (ea->sp->gen1 != fcport->rscn_gen) {
2836 return;
2839 qla_post_iidma_work(vha, fcport);
2842 static void qla24xx_async_gpsc_sp_done(srb_t *sp, int res)
2844 struct scsi_qla_host *vha = sp->vha;
2845 struct qla_hw_data *ha = vha->hw;
2846 fc_port_t *fcport = sp->fcport;
2847 struct ct_sns_rsp *ct_rsp;
2848 struct event_arg ea;
2850 ct_rsp = &fcport->ct_desc.ct_sns->p.rsp;
2852 ql_dbg(ql_dbg_disc, vha, 0x2053,
2853 "Async done-%s res %x, WWPN %8phC \n",
2854 sp->name, res, fcport->port_name);
2856 fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
2858 if (res == QLA_FUNCTION_TIMEOUT)
2859 goto done;
2861 if (res == (DID_ERROR << 16)) {
2862 /* entry status error */
2863 goto done;
2864 } else if (res) {
2865 if ((ct_rsp->header.reason_code ==
2866 CT_REASON_INVALID_COMMAND_CODE) ||
2867 (ct_rsp->header.reason_code ==
2868 CT_REASON_COMMAND_UNSUPPORTED)) {
2869 ql_dbg(ql_dbg_disc, vha, 0x2019,
2870 "GPSC command unsupported, disabling query.\n");
2871 ha->flags.gpsc_supported = 0;
2872 goto done;
2874 } else {
2875 fcport->fp_speed = qla2x00_port_speed_capability(
2876 be16_to_cpu(ct_rsp->rsp.gpsc.speed));
2878 ql_dbg(ql_dbg_disc, vha, 0x2054,
2879 "Async-%s OUT WWPN %8phC speeds=%04x speed=%04x.\n",
2880 sp->name, fcport->fabric_port_name,
2881 be16_to_cpu(ct_rsp->rsp.gpsc.speeds),
2882 be16_to_cpu(ct_rsp->rsp.gpsc.speed));
2884 memset(&ea, 0, sizeof(ea));
2885 ea.rc = res;
2886 ea.fcport = fcport;
2887 ea.sp = sp;
2888 qla24xx_handle_gpsc_event(vha, &ea);
2890 done:
2891 sp->free(sp);
2894 int qla24xx_async_gpsc(scsi_qla_host_t *vha, fc_port_t *fcport)
2896 int rval = QLA_FUNCTION_FAILED;
2897 struct ct_sns_req *ct_req;
2898 srb_t *sp;
2900 if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT))
2901 return rval;
2903 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
2904 if (!sp)
2905 goto done;
2907 sp->type = SRB_CT_PTHRU_CMD;
2908 sp->name = "gpsc";
2909 sp->gen1 = fcport->rscn_gen;
2910 sp->gen2 = fcport->login_gen;
2912 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
2914 /* CT_IU preamble */
2915 ct_req = qla24xx_prep_ct_fm_req(fcport->ct_desc.ct_sns, GPSC_CMD,
2916 GPSC_RSP_SIZE);
2918 /* GPSC req */
2919 memcpy(ct_req->req.gpsc.port_name, fcport->fabric_port_name,
2920 WWN_SIZE);
2922 sp->u.iocb_cmd.u.ctarg.req = fcport->ct_desc.ct_sns;
2923 sp->u.iocb_cmd.u.ctarg.req_dma = fcport->ct_desc.ct_sns_dma;
2924 sp->u.iocb_cmd.u.ctarg.rsp = fcport->ct_desc.ct_sns;
2925 sp->u.iocb_cmd.u.ctarg.rsp_dma = fcport->ct_desc.ct_sns_dma;
2926 sp->u.iocb_cmd.u.ctarg.req_size = GPSC_REQ_SIZE;
2927 sp->u.iocb_cmd.u.ctarg.rsp_size = GPSC_RSP_SIZE;
2928 sp->u.iocb_cmd.u.ctarg.nport_handle = vha->mgmt_svr_loop_id;
2930 sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
2931 sp->done = qla24xx_async_gpsc_sp_done;
2933 ql_dbg(ql_dbg_disc, vha, 0x205e,
2934 "Async-%s %8phC hdl=%x loopid=%x portid=%02x%02x%02x.\n",
2935 sp->name, fcport->port_name, sp->handle,
2936 fcport->loop_id, fcport->d_id.b.domain,
2937 fcport->d_id.b.area, fcport->d_id.b.al_pa);
2939 rval = qla2x00_start_sp(sp);
2940 if (rval != QLA_SUCCESS)
2941 goto done_free_sp;
2942 return rval;
2944 done_free_sp:
2945 sp->free(sp);
2946 done:
2947 return rval;
2950 int qla24xx_post_gpnid_work(struct scsi_qla_host *vha, port_id_t *id)
2952 struct qla_work_evt *e;
2954 if (test_bit(UNLOADING, &vha->dpc_flags) ||
2955 (vha->vp_idx && test_bit(VPORT_DELETE, &vha->dpc_flags)))
2956 return 0;
2958 e = qla2x00_alloc_work(vha, QLA_EVT_GPNID);
2959 if (!e)
2960 return QLA_FUNCTION_FAILED;
2962 e->u.gpnid.id = *id;
2963 return qla2x00_post_work(vha, e);
2966 void qla24xx_sp_unmap(scsi_qla_host_t *vha, srb_t *sp)
2968 struct srb_iocb *c = &sp->u.iocb_cmd;
2970 switch (sp->type) {
2971 case SRB_ELS_DCMD:
2972 qla2x00_els_dcmd2_free(vha, &c->u.els_plogi);
2973 break;
2974 case SRB_CT_PTHRU_CMD:
2975 default:
2976 if (sp->u.iocb_cmd.u.ctarg.req) {
2977 dma_free_coherent(&vha->hw->pdev->dev,
2978 sp->u.iocb_cmd.u.ctarg.req_allocated_size,
2979 sp->u.iocb_cmd.u.ctarg.req,
2980 sp->u.iocb_cmd.u.ctarg.req_dma);
2981 sp->u.iocb_cmd.u.ctarg.req = NULL;
2984 if (sp->u.iocb_cmd.u.ctarg.rsp) {
2985 dma_free_coherent(&vha->hw->pdev->dev,
2986 sp->u.iocb_cmd.u.ctarg.rsp_allocated_size,
2987 sp->u.iocb_cmd.u.ctarg.rsp,
2988 sp->u.iocb_cmd.u.ctarg.rsp_dma);
2989 sp->u.iocb_cmd.u.ctarg.rsp = NULL;
2991 break;
2994 sp->free(sp);
2997 void qla24xx_handle_gpnid_event(scsi_qla_host_t *vha, struct event_arg *ea)
2999 fc_port_t *fcport, *conflict, *t;
3000 u16 data[2];
3002 ql_dbg(ql_dbg_disc, vha, 0xffff,
3003 "%s %d port_id: %06x\n",
3004 __func__, __LINE__, ea->id.b24);
3006 if (ea->rc) {
3007 /* cable is disconnected */
3008 list_for_each_entry_safe(fcport, t, &vha->vp_fcports, list) {
3009 if (fcport->d_id.b24 == ea->id.b24)
3010 fcport->scan_state = QLA_FCPORT_SCAN;
3012 qlt_schedule_sess_for_deletion(fcport);
3014 } else {
3015 /* cable is connected */
3016 fcport = qla2x00_find_fcport_by_wwpn(vha, ea->port_name, 1);
3017 if (fcport) {
3018 list_for_each_entry_safe(conflict, t, &vha->vp_fcports,
3019 list) {
3020 if ((conflict->d_id.b24 == ea->id.b24) &&
3021 (fcport != conflict))
3023 * 2 fcports with conflict Nport ID or
3024 * an existing fcport is having nport ID
3025 * conflict with new fcport.
3028 conflict->scan_state = QLA_FCPORT_SCAN;
3030 qlt_schedule_sess_for_deletion(conflict);
3033 fcport->scan_needed = 0;
3034 fcport->rscn_gen++;
3035 fcport->scan_state = QLA_FCPORT_FOUND;
3036 fcport->flags |= FCF_FABRIC_DEVICE;
3037 if (fcport->login_retry == 0) {
3038 fcport->login_retry =
3039 vha->hw->login_retry_count;
3040 ql_dbg(ql_dbg_disc, vha, 0xffff,
3041 "Port login retry %8phN, lid 0x%04x cnt=%d.\n",
3042 fcport->port_name, fcport->loop_id,
3043 fcport->login_retry);
3045 switch (fcport->disc_state) {
3046 case DSC_LOGIN_COMPLETE:
3047 /* recheck session is still intact. */
3048 ql_dbg(ql_dbg_disc, vha, 0x210d,
3049 "%s %d %8phC revalidate session with ADISC\n",
3050 __func__, __LINE__, fcport->port_name);
3051 data[0] = data[1] = 0;
3052 qla2x00_post_async_adisc_work(vha, fcport,
3053 data);
3054 break;
3055 case DSC_DELETED:
3056 ql_dbg(ql_dbg_disc, vha, 0x210d,
3057 "%s %d %8phC login\n", __func__, __LINE__,
3058 fcport->port_name);
3059 fcport->d_id = ea->id;
3060 qla24xx_fcport_handle_login(vha, fcport);
3061 break;
3062 case DSC_DELETE_PEND:
3063 fcport->d_id = ea->id;
3064 break;
3065 default:
3066 fcport->d_id = ea->id;
3067 break;
3069 } else {
3070 list_for_each_entry_safe(conflict, t, &vha->vp_fcports,
3071 list) {
3072 if (conflict->d_id.b24 == ea->id.b24) {
3073 /* 2 fcports with conflict Nport ID or
3074 * an existing fcport is having nport ID
3075 * conflict with new fcport.
3077 ql_dbg(ql_dbg_disc, vha, 0xffff,
3078 "%s %d %8phC DS %d\n",
3079 __func__, __LINE__,
3080 conflict->port_name,
3081 conflict->disc_state);
3083 conflict->scan_state = QLA_FCPORT_SCAN;
3084 qlt_schedule_sess_for_deletion(conflict);
3088 /* create new fcport */
3089 ql_dbg(ql_dbg_disc, vha, 0x2065,
3090 "%s %d %8phC post new sess\n",
3091 __func__, __LINE__, ea->port_name);
3092 qla24xx_post_newsess_work(vha, &ea->id,
3093 ea->port_name, NULL, NULL, 0);
3098 static void qla2x00_async_gpnid_sp_done(srb_t *sp, int res)
3100 struct scsi_qla_host *vha = sp->vha;
3101 struct ct_sns_req *ct_req =
3102 (struct ct_sns_req *)sp->u.iocb_cmd.u.ctarg.req;
3103 struct ct_sns_rsp *ct_rsp =
3104 (struct ct_sns_rsp *)sp->u.iocb_cmd.u.ctarg.rsp;
3105 struct event_arg ea;
3106 struct qla_work_evt *e;
3107 unsigned long flags;
3109 if (res)
3110 ql_dbg(ql_dbg_disc, vha, 0x2066,
3111 "Async done-%s fail res %x rscn gen %d ID %3phC. %8phC\n",
3112 sp->name, res, sp->gen1, &ct_req->req.port_id.port_id,
3113 ct_rsp->rsp.gpn_id.port_name);
3114 else
3115 ql_dbg(ql_dbg_disc, vha, 0x2066,
3116 "Async done-%s good rscn gen %d ID %3phC. %8phC\n",
3117 sp->name, sp->gen1, &ct_req->req.port_id.port_id,
3118 ct_rsp->rsp.gpn_id.port_name);
3120 memset(&ea, 0, sizeof(ea));
3121 memcpy(ea.port_name, ct_rsp->rsp.gpn_id.port_name, WWN_SIZE);
3122 ea.sp = sp;
3123 ea.id = be_to_port_id(ct_req->req.port_id.port_id);
3124 ea.rc = res;
3126 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
3127 list_del(&sp->elem);
3128 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
3130 if (res) {
3131 if (res == QLA_FUNCTION_TIMEOUT) {
3132 qla24xx_post_gpnid_work(sp->vha, &ea.id);
3133 sp->free(sp);
3134 return;
3136 } else if (sp->gen1) {
3137 /* There was another RSCN for this Nport ID */
3138 qla24xx_post_gpnid_work(sp->vha, &ea.id);
3139 sp->free(sp);
3140 return;
3143 qla24xx_handle_gpnid_event(vha, &ea);
3145 e = qla2x00_alloc_work(vha, QLA_EVT_UNMAP);
3146 if (!e) {
3147 /* please ignore kernel warning. otherwise, we have mem leak. */
3148 dma_free_coherent(&vha->hw->pdev->dev,
3149 sp->u.iocb_cmd.u.ctarg.req_allocated_size,
3150 sp->u.iocb_cmd.u.ctarg.req,
3151 sp->u.iocb_cmd.u.ctarg.req_dma);
3152 sp->u.iocb_cmd.u.ctarg.req = NULL;
3154 dma_free_coherent(&vha->hw->pdev->dev,
3155 sp->u.iocb_cmd.u.ctarg.rsp_allocated_size,
3156 sp->u.iocb_cmd.u.ctarg.rsp,
3157 sp->u.iocb_cmd.u.ctarg.rsp_dma);
3158 sp->u.iocb_cmd.u.ctarg.rsp = NULL;
3160 sp->free(sp);
3161 return;
3164 e->u.iosb.sp = sp;
3165 qla2x00_post_work(vha, e);
3168 /* Get WWPN with Nport ID. */
3169 int qla24xx_async_gpnid(scsi_qla_host_t *vha, port_id_t *id)
3171 int rval = QLA_FUNCTION_FAILED;
3172 struct ct_sns_req *ct_req;
3173 srb_t *sp, *tsp;
3174 struct ct_sns_pkt *ct_sns;
3175 unsigned long flags;
3177 if (!vha->flags.online)
3178 goto done;
3180 sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL);
3181 if (!sp)
3182 goto done;
3184 sp->type = SRB_CT_PTHRU_CMD;
3185 sp->name = "gpnid";
3186 sp->u.iocb_cmd.u.ctarg.id = *id;
3187 sp->gen1 = 0;
3188 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
3190 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
3191 list_for_each_entry(tsp, &vha->gpnid_list, elem) {
3192 if (tsp->u.iocb_cmd.u.ctarg.id.b24 == id->b24) {
3193 tsp->gen1++;
3194 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
3195 sp->free(sp);
3196 goto done;
3199 list_add_tail(&sp->elem, &vha->gpnid_list);
3200 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
3202 sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev,
3203 sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma,
3204 GFP_KERNEL);
3205 sp->u.iocb_cmd.u.ctarg.req_allocated_size = sizeof(struct ct_sns_pkt);
3206 if (!sp->u.iocb_cmd.u.ctarg.req) {
3207 ql_log(ql_log_warn, vha, 0xd041,
3208 "Failed to allocate ct_sns request.\n");
3209 goto done_free_sp;
3212 sp->u.iocb_cmd.u.ctarg.rsp = dma_alloc_coherent(&vha->hw->pdev->dev,
3213 sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.rsp_dma,
3214 GFP_KERNEL);
3215 sp->u.iocb_cmd.u.ctarg.rsp_allocated_size = sizeof(struct ct_sns_pkt);
3216 if (!sp->u.iocb_cmd.u.ctarg.rsp) {
3217 ql_log(ql_log_warn, vha, 0xd042,
3218 "Failed to allocate ct_sns request.\n");
3219 goto done_free_sp;
3222 ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.rsp;
3223 memset(ct_sns, 0, sizeof(*ct_sns));
3225 ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.req;
3226 /* CT_IU preamble */
3227 ct_req = qla2x00_prep_ct_req(ct_sns, GPN_ID_CMD, GPN_ID_RSP_SIZE);
3229 /* GPN_ID req */
3230 ct_req->req.port_id.port_id = port_id_to_be_id(*id);
3232 sp->u.iocb_cmd.u.ctarg.req_size = GPN_ID_REQ_SIZE;
3233 sp->u.iocb_cmd.u.ctarg.rsp_size = GPN_ID_RSP_SIZE;
3234 sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
3236 sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
3237 sp->done = qla2x00_async_gpnid_sp_done;
3239 ql_dbg(ql_dbg_disc, vha, 0x2067,
3240 "Async-%s hdl=%x ID %3phC.\n", sp->name,
3241 sp->handle, &ct_req->req.port_id.port_id);
3243 rval = qla2x00_start_sp(sp);
3244 if (rval != QLA_SUCCESS)
3245 goto done_free_sp;
3247 return rval;
3249 done_free_sp:
3250 spin_lock_irqsave(&vha->hw->vport_slock, flags);
3251 list_del(&sp->elem);
3252 spin_unlock_irqrestore(&vha->hw->vport_slock, flags);
3254 if (sp->u.iocb_cmd.u.ctarg.req) {
3255 dma_free_coherent(&vha->hw->pdev->dev,
3256 sizeof(struct ct_sns_pkt),
3257 sp->u.iocb_cmd.u.ctarg.req,
3258 sp->u.iocb_cmd.u.ctarg.req_dma);
3259 sp->u.iocb_cmd.u.ctarg.req = NULL;
3261 if (sp->u.iocb_cmd.u.ctarg.rsp) {
3262 dma_free_coherent(&vha->hw->pdev->dev,
3263 sizeof(struct ct_sns_pkt),
3264 sp->u.iocb_cmd.u.ctarg.rsp,
3265 sp->u.iocb_cmd.u.ctarg.rsp_dma);
3266 sp->u.iocb_cmd.u.ctarg.rsp = NULL;
3269 sp->free(sp);
3270 done:
3271 return rval;
3274 void qla24xx_handle_gffid_event(scsi_qla_host_t *vha, struct event_arg *ea)
3276 fc_port_t *fcport = ea->fcport;
3278 qla24xx_post_gnl_work(vha, fcport);
3281 void qla24xx_async_gffid_sp_done(srb_t *sp, int res)
3283 struct scsi_qla_host *vha = sp->vha;
3284 fc_port_t *fcport = sp->fcport;
3285 struct ct_sns_rsp *ct_rsp;
3286 struct event_arg ea;
3287 uint8_t fc4_scsi_feat;
3288 uint8_t fc4_nvme_feat;
3290 ql_dbg(ql_dbg_disc, vha, 0x2133,
3291 "Async done-%s res %x ID %x. %8phC\n",
3292 sp->name, res, fcport->d_id.b24, fcport->port_name);
3294 fcport->flags &= ~FCF_ASYNC_SENT;
3295 ct_rsp = &fcport->ct_desc.ct_sns->p.rsp;
3296 fc4_scsi_feat = ct_rsp->rsp.gff_id.fc4_features[GFF_FCP_SCSI_OFFSET];
3297 fc4_nvme_feat = ct_rsp->rsp.gff_id.fc4_features[GFF_NVME_OFFSET];
3300 * FC-GS-7, 5.2.3.12 FC-4 Features - format
3301 * The format of the FC-4 Features object, as defined by the FC-4,
3302 * Shall be an array of 4-bit values, one for each type code value
3304 if (!res) {
3305 if (fc4_scsi_feat & 0xf) {
3306 /* w1 b00:03 */
3307 fcport->fc4_type = FS_FC4TYPE_FCP;
3308 fcport->fc4_features = fc4_scsi_feat & 0xf;
3311 if (fc4_nvme_feat & 0xf) {
3312 /* w5 [00:03]/28h */
3313 fcport->fc4_type |= FS_FC4TYPE_NVME;
3314 fcport->fc4_features = fc4_nvme_feat & 0xf;
3318 memset(&ea, 0, sizeof(ea));
3319 ea.sp = sp;
3320 ea.fcport = sp->fcport;
3321 ea.rc = res;
3323 qla24xx_handle_gffid_event(vha, &ea);
3324 sp->free(sp);
3327 /* Get FC4 Feature with Nport ID. */
3328 int qla24xx_async_gffid(scsi_qla_host_t *vha, fc_port_t *fcport)
3330 int rval = QLA_FUNCTION_FAILED;
3331 struct ct_sns_req *ct_req;
3332 srb_t *sp;
3334 if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT))
3335 return rval;
3337 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
3338 if (!sp)
3339 return rval;
3341 fcport->flags |= FCF_ASYNC_SENT;
3342 sp->type = SRB_CT_PTHRU_CMD;
3343 sp->name = "gffid";
3344 sp->gen1 = fcport->rscn_gen;
3345 sp->gen2 = fcport->login_gen;
3347 sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
3348 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
3350 /* CT_IU preamble */
3351 ct_req = qla2x00_prep_ct_req(fcport->ct_desc.ct_sns, GFF_ID_CMD,
3352 GFF_ID_RSP_SIZE);
3354 ct_req->req.gff_id.port_id[0] = fcport->d_id.b.domain;
3355 ct_req->req.gff_id.port_id[1] = fcport->d_id.b.area;
3356 ct_req->req.gff_id.port_id[2] = fcport->d_id.b.al_pa;
3358 sp->u.iocb_cmd.u.ctarg.req = fcport->ct_desc.ct_sns;
3359 sp->u.iocb_cmd.u.ctarg.req_dma = fcport->ct_desc.ct_sns_dma;
3360 sp->u.iocb_cmd.u.ctarg.rsp = fcport->ct_desc.ct_sns;
3361 sp->u.iocb_cmd.u.ctarg.rsp_dma = fcport->ct_desc.ct_sns_dma;
3362 sp->u.iocb_cmd.u.ctarg.req_size = GFF_ID_REQ_SIZE;
3363 sp->u.iocb_cmd.u.ctarg.rsp_size = GFF_ID_RSP_SIZE;
3364 sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
3366 sp->done = qla24xx_async_gffid_sp_done;
3368 ql_dbg(ql_dbg_disc, vha, 0x2132,
3369 "Async-%s hdl=%x %8phC.\n", sp->name,
3370 sp->handle, fcport->port_name);
3372 rval = qla2x00_start_sp(sp);
3373 if (rval != QLA_SUCCESS)
3374 goto done_free_sp;
3376 return rval;
3377 done_free_sp:
3378 sp->free(sp);
3379 fcport->flags &= ~FCF_ASYNC_SENT;
3380 return rval;
3383 /* GPN_FT + GNN_FT*/
3384 static int qla2x00_is_a_vp(scsi_qla_host_t *vha, u64 wwn)
3386 struct qla_hw_data *ha = vha->hw;
3387 scsi_qla_host_t *vp;
3388 unsigned long flags;
3389 u64 twwn;
3390 int rc = 0;
3392 if (!ha->num_vhosts)
3393 return 0;
3395 spin_lock_irqsave(&ha->vport_slock, flags);
3396 list_for_each_entry(vp, &ha->vp_list, list) {
3397 twwn = wwn_to_u64(vp->port_name);
3398 if (wwn == twwn) {
3399 rc = 1;
3400 break;
3403 spin_unlock_irqrestore(&ha->vport_slock, flags);
3405 return rc;
3408 void qla24xx_async_gnnft_done(scsi_qla_host_t *vha, srb_t *sp)
3410 fc_port_t *fcport;
3411 u32 i, rc;
3412 bool found;
3413 struct fab_scan_rp *rp, *trp;
3414 unsigned long flags;
3415 u8 recheck = 0;
3416 u16 dup = 0, dup_cnt = 0;
3418 ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0xffff,
3419 "%s enter\n", __func__);
3421 if (sp->gen1 != vha->hw->base_qpair->chip_reset) {
3422 ql_dbg(ql_dbg_disc, vha, 0xffff,
3423 "%s scan stop due to chip reset %x/%x\n",
3424 sp->name, sp->gen1, vha->hw->base_qpair->chip_reset);
3425 goto out;
3428 rc = sp->rc;
3429 if (rc) {
3430 vha->scan.scan_retry++;
3431 if (vha->scan.scan_retry < MAX_SCAN_RETRIES) {
3432 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
3433 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
3434 goto out;
3435 } else {
3436 ql_dbg(ql_dbg_disc, vha, 0xffff,
3437 "%s: Fabric scan failed for %d retries.\n",
3438 __func__, vha->scan.scan_retry);
3440 * Unable to scan any rports. logout loop below
3441 * will unregister all sessions.
3443 list_for_each_entry(fcport, &vha->vp_fcports, list) {
3444 if ((fcport->flags & FCF_FABRIC_DEVICE) != 0) {
3445 fcport->scan_state = QLA_FCPORT_SCAN;
3448 goto login_logout;
3451 vha->scan.scan_retry = 0;
3453 list_for_each_entry(fcport, &vha->vp_fcports, list)
3454 fcport->scan_state = QLA_FCPORT_SCAN;
3456 for (i = 0; i < vha->hw->max_fibre_devices; i++) {
3457 u64 wwn;
3458 int k;
3460 rp = &vha->scan.l[i];
3461 found = false;
3463 wwn = wwn_to_u64(rp->port_name);
3464 if (wwn == 0)
3465 continue;
3467 /* Remove duplicate NPORT ID entries from switch data base */
3468 for (k = i + 1; k < vha->hw->max_fibre_devices; k++) {
3469 trp = &vha->scan.l[k];
3470 if (rp->id.b24 == trp->id.b24) {
3471 dup = 1;
3472 dup_cnt++;
3473 ql_dbg(ql_dbg_disc + ql_dbg_verbose,
3474 vha, 0xffff,
3475 "Detected duplicate NPORT ID from switch data base: ID %06x WWN %8phN WWN %8phN\n",
3476 rp->id.b24, rp->port_name, trp->port_name);
3477 memset(trp, 0, sizeof(*trp));
3481 if (!memcmp(rp->port_name, vha->port_name, WWN_SIZE))
3482 continue;
3484 /* Bypass reserved domain fields. */
3485 if ((rp->id.b.domain & 0xf0) == 0xf0)
3486 continue;
3488 /* Bypass virtual ports of the same host. */
3489 if (qla2x00_is_a_vp(vha, wwn))
3490 continue;
3492 list_for_each_entry(fcport, &vha->vp_fcports, list) {
3493 if (memcmp(rp->port_name, fcport->port_name, WWN_SIZE))
3494 continue;
3495 fcport->scan_state = QLA_FCPORT_FOUND;
3496 fcport->last_rscn_gen = fcport->rscn_gen;
3497 found = true;
3499 * If device was not a fabric device before.
3501 if ((fcport->flags & FCF_FABRIC_DEVICE) == 0) {
3502 qla2x00_clear_loop_id(fcport);
3503 fcport->flags |= FCF_FABRIC_DEVICE;
3504 } else if (fcport->d_id.b24 != rp->id.b24 ||
3505 (fcport->scan_needed &&
3506 fcport->port_type != FCT_INITIATOR &&
3507 fcport->port_type != FCT_NVME_INITIATOR)) {
3508 qlt_schedule_sess_for_deletion(fcport);
3510 fcport->d_id.b24 = rp->id.b24;
3511 fcport->scan_needed = 0;
3512 break;
3515 if (!found) {
3516 ql_dbg(ql_dbg_disc, vha, 0xffff,
3517 "%s %d %8phC post new sess\n",
3518 __func__, __LINE__, rp->port_name);
3519 qla24xx_post_newsess_work(vha, &rp->id, rp->port_name,
3520 rp->node_name, NULL, rp->fc4type);
3524 if (dup) {
3525 ql_log(ql_log_warn, vha, 0xffff,
3526 "Detected %d duplicate NPORT ID(s) from switch data base\n",
3527 dup_cnt);
3530 login_logout:
3532 * Logout all previous fabric dev marked lost, except FCP2 devices.
3534 list_for_each_entry(fcport, &vha->vp_fcports, list) {
3535 if ((fcport->flags & FCF_FABRIC_DEVICE) == 0) {
3536 fcport->scan_needed = 0;
3537 continue;
3540 if (fcport->scan_state != QLA_FCPORT_FOUND) {
3541 bool do_delete = false;
3543 if (fcport->scan_needed &&
3544 fcport->disc_state == DSC_LOGIN_PEND) {
3545 /* Cable got disconnected after we sent
3546 * a login. Do delete to prevent timeout.
3548 fcport->logout_on_delete = 1;
3549 do_delete = true;
3552 fcport->scan_needed = 0;
3553 if (((qla_dual_mode_enabled(vha) ||
3554 qla_ini_mode_enabled(vha)) &&
3555 atomic_read(&fcport->state) == FCS_ONLINE) ||
3556 do_delete) {
3557 if (fcport->loop_id != FC_NO_LOOP_ID) {
3558 if (fcport->flags & FCF_FCP2_DEVICE)
3559 fcport->logout_on_delete = 0;
3561 ql_log(ql_log_warn, vha, 0x20f0,
3562 "%s %d %8phC post del sess\n",
3563 __func__, __LINE__,
3564 fcport->port_name);
3566 qlt_schedule_sess_for_deletion(fcport);
3567 continue;
3570 } else {
3571 if (fcport->scan_needed ||
3572 fcport->disc_state != DSC_LOGIN_COMPLETE) {
3573 if (fcport->login_retry == 0) {
3574 fcport->login_retry =
3575 vha->hw->login_retry_count;
3576 ql_dbg(ql_dbg_disc, vha, 0x20a3,
3577 "Port login retry %8phN, lid 0x%04x retry cnt=%d.\n",
3578 fcport->port_name, fcport->loop_id,
3579 fcport->login_retry);
3581 fcport->scan_needed = 0;
3582 qla24xx_fcport_handle_login(vha, fcport);
3587 recheck = 1;
3588 out:
3589 qla24xx_sp_unmap(vha, sp);
3590 spin_lock_irqsave(&vha->work_lock, flags);
3591 vha->scan.scan_flags &= ~SF_SCANNING;
3592 spin_unlock_irqrestore(&vha->work_lock, flags);
3594 if (recheck) {
3595 list_for_each_entry(fcport, &vha->vp_fcports, list) {
3596 if (fcport->scan_needed) {
3597 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
3598 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
3599 break;
3605 static int qla2x00_post_gnnft_gpnft_done_work(struct scsi_qla_host *vha,
3606 srb_t *sp, int cmd)
3608 struct qla_work_evt *e;
3610 if (cmd != QLA_EVT_GPNFT_DONE && cmd != QLA_EVT_GNNFT_DONE)
3611 return QLA_PARAMETER_ERROR;
3613 e = qla2x00_alloc_work(vha, cmd);
3614 if (!e)
3615 return QLA_FUNCTION_FAILED;
3617 e->u.iosb.sp = sp;
3619 return qla2x00_post_work(vha, e);
3622 static int qla2x00_post_nvme_gpnft_work(struct scsi_qla_host *vha,
3623 srb_t *sp, int cmd)
3625 struct qla_work_evt *e;
3627 if (cmd != QLA_EVT_GPNFT)
3628 return QLA_PARAMETER_ERROR;
3630 e = qla2x00_alloc_work(vha, cmd);
3631 if (!e)
3632 return QLA_FUNCTION_FAILED;
3634 e->u.gpnft.fc4_type = FC4_TYPE_NVME;
3635 e->u.gpnft.sp = sp;
3637 return qla2x00_post_work(vha, e);
3640 static void qla2x00_find_free_fcp_nvme_slot(struct scsi_qla_host *vha,
3641 struct srb *sp)
3643 struct qla_hw_data *ha = vha->hw;
3644 int num_fibre_dev = ha->max_fibre_devices;
3645 struct ct_sns_req *ct_req =
3646 (struct ct_sns_req *)sp->u.iocb_cmd.u.ctarg.req;
3647 struct ct_sns_gpnft_rsp *ct_rsp =
3648 (struct ct_sns_gpnft_rsp *)sp->u.iocb_cmd.u.ctarg.rsp;
3649 struct ct_sns_gpn_ft_data *d;
3650 struct fab_scan_rp *rp;
3651 u16 cmd = be16_to_cpu(ct_req->command);
3652 u8 fc4_type = sp->gen2;
3653 int i, j, k;
3654 port_id_t id;
3655 u8 found;
3656 u64 wwn;
3658 j = 0;
3659 for (i = 0; i < num_fibre_dev; i++) {
3660 d = &ct_rsp->entries[i];
3662 id.b.rsvd_1 = 0;
3663 id.b.domain = d->port_id[0];
3664 id.b.area = d->port_id[1];
3665 id.b.al_pa = d->port_id[2];
3666 wwn = wwn_to_u64(d->port_name);
3668 if (id.b24 == 0 || wwn == 0)
3669 continue;
3671 if (fc4_type == FC4_TYPE_FCP_SCSI) {
3672 if (cmd == GPN_FT_CMD) {
3673 rp = &vha->scan.l[j];
3674 rp->id = id;
3675 memcpy(rp->port_name, d->port_name, 8);
3676 j++;
3677 rp->fc4type = FS_FC4TYPE_FCP;
3678 } else {
3679 for (k = 0; k < num_fibre_dev; k++) {
3680 rp = &vha->scan.l[k];
3681 if (id.b24 == rp->id.b24) {
3682 memcpy(rp->node_name,
3683 d->port_name, 8);
3684 break;
3688 } else {
3689 /* Search if the fibre device supports FC4_TYPE_NVME */
3690 if (cmd == GPN_FT_CMD) {
3691 found = 0;
3693 for (k = 0; k < num_fibre_dev; k++) {
3694 rp = &vha->scan.l[k];
3695 if (!memcmp(rp->port_name,
3696 d->port_name, 8)) {
3698 * Supports FC-NVMe & FCP
3700 rp->fc4type |= FS_FC4TYPE_NVME;
3701 found = 1;
3702 break;
3706 /* We found new FC-NVMe only port */
3707 if (!found) {
3708 for (k = 0; k < num_fibre_dev; k++) {
3709 rp = &vha->scan.l[k];
3710 if (wwn_to_u64(rp->port_name)) {
3711 continue;
3712 } else {
3713 rp->id = id;
3714 memcpy(rp->port_name,
3715 d->port_name, 8);
3716 rp->fc4type =
3717 FS_FC4TYPE_NVME;
3718 break;
3722 } else {
3723 for (k = 0; k < num_fibre_dev; k++) {
3724 rp = &vha->scan.l[k];
3725 if (id.b24 == rp->id.b24) {
3726 memcpy(rp->node_name,
3727 d->port_name, 8);
3728 break;
3736 static void qla2x00_async_gpnft_gnnft_sp_done(srb_t *sp, int res)
3738 struct scsi_qla_host *vha = sp->vha;
3739 struct ct_sns_req *ct_req =
3740 (struct ct_sns_req *)sp->u.iocb_cmd.u.ctarg.req;
3741 u16 cmd = be16_to_cpu(ct_req->command);
3742 u8 fc4_type = sp->gen2;
3743 unsigned long flags;
3744 int rc;
3746 /* gen2 field is holding the fc4type */
3747 ql_dbg(ql_dbg_disc, vha, 0xffff,
3748 "Async done-%s res %x FC4Type %x\n",
3749 sp->name, res, sp->gen2);
3751 del_timer(&sp->u.iocb_cmd.timer);
3752 sp->rc = res;
3753 if (res) {
3754 unsigned long flags;
3755 const char *name = sp->name;
3757 if (res == QLA_OS_TIMER_EXPIRED) {
3758 /* switch is ignoring all commands.
3759 * This might be a zone disable behavior.
3760 * This means we hit 64s timeout.
3761 * 22s GPNFT + 44s Abort = 64s
3763 ql_dbg(ql_dbg_disc, vha, 0xffff,
3764 "%s: Switch Zone check please .\n",
3765 name);
3766 qla2x00_mark_all_devices_lost(vha);
3770 * We are in an Interrupt context, queue up this
3771 * sp for GNNFT_DONE work. This will allow all
3772 * the resource to get freed up.
3774 rc = qla2x00_post_gnnft_gpnft_done_work(vha, sp,
3775 QLA_EVT_GNNFT_DONE);
3776 if (rc) {
3777 /* Cleanup here to prevent memory leak */
3778 qla24xx_sp_unmap(vha, sp);
3780 spin_lock_irqsave(&vha->work_lock, flags);
3781 vha->scan.scan_flags &= ~SF_SCANNING;
3782 vha->scan.scan_retry++;
3783 spin_unlock_irqrestore(&vha->work_lock, flags);
3785 if (vha->scan.scan_retry < MAX_SCAN_RETRIES) {
3786 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
3787 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
3788 qla2xxx_wake_dpc(vha);
3789 } else {
3790 ql_dbg(ql_dbg_disc, vha, 0xffff,
3791 "Async done-%s rescan failed on all retries.\n",
3792 name);
3795 return;
3798 qla2x00_find_free_fcp_nvme_slot(vha, sp);
3800 if ((fc4_type == FC4_TYPE_FCP_SCSI) && vha->flags.nvme_enabled &&
3801 cmd == GNN_FT_CMD) {
3802 spin_lock_irqsave(&vha->work_lock, flags);
3803 vha->scan.scan_flags &= ~SF_SCANNING;
3804 spin_unlock_irqrestore(&vha->work_lock, flags);
3806 sp->rc = res;
3807 rc = qla2x00_post_nvme_gpnft_work(vha, sp, QLA_EVT_GPNFT);
3808 if (rc) {
3809 qla24xx_sp_unmap(vha, sp);
3810 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
3811 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
3813 return;
3816 if (cmd == GPN_FT_CMD) {
3817 rc = qla2x00_post_gnnft_gpnft_done_work(vha, sp,
3818 QLA_EVT_GPNFT_DONE);
3819 } else {
3820 rc = qla2x00_post_gnnft_gpnft_done_work(vha, sp,
3821 QLA_EVT_GNNFT_DONE);
3824 if (rc) {
3825 qla24xx_sp_unmap(vha, sp);
3826 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
3827 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
3828 return;
3833 * Get WWNN list for fc4_type
3835 * It is assumed the same SRB is re-used from GPNFT to avoid
3836 * mem free & re-alloc
3838 static int qla24xx_async_gnnft(scsi_qla_host_t *vha, struct srb *sp,
3839 u8 fc4_type)
3841 int rval = QLA_FUNCTION_FAILED;
3842 struct ct_sns_req *ct_req;
3843 struct ct_sns_pkt *ct_sns;
3844 unsigned long flags;
3846 if (!vha->flags.online) {
3847 spin_lock_irqsave(&vha->work_lock, flags);
3848 vha->scan.scan_flags &= ~SF_SCANNING;
3849 spin_unlock_irqrestore(&vha->work_lock, flags);
3850 goto done_free_sp;
3853 if (!sp->u.iocb_cmd.u.ctarg.req || !sp->u.iocb_cmd.u.ctarg.rsp) {
3854 ql_log(ql_log_warn, vha, 0xffff,
3855 "%s: req %p rsp %p are not setup\n",
3856 __func__, sp->u.iocb_cmd.u.ctarg.req,
3857 sp->u.iocb_cmd.u.ctarg.rsp);
3858 spin_lock_irqsave(&vha->work_lock, flags);
3859 vha->scan.scan_flags &= ~SF_SCANNING;
3860 spin_unlock_irqrestore(&vha->work_lock, flags);
3861 WARN_ON(1);
3862 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
3863 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
3864 goto done_free_sp;
3867 ql_dbg(ql_dbg_disc, vha, 0xfffff,
3868 "%s: FC4Type %x, CT-PASSTHRU %s command ctarg rsp size %d, ctarg req size %d\n",
3869 __func__, fc4_type, sp->name, sp->u.iocb_cmd.u.ctarg.rsp_size,
3870 sp->u.iocb_cmd.u.ctarg.req_size);
3872 sp->type = SRB_CT_PTHRU_CMD;
3873 sp->name = "gnnft";
3874 sp->gen1 = vha->hw->base_qpair->chip_reset;
3875 sp->gen2 = fc4_type;
3877 sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
3878 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
3880 memset(sp->u.iocb_cmd.u.ctarg.rsp, 0, sp->u.iocb_cmd.u.ctarg.rsp_size);
3881 memset(sp->u.iocb_cmd.u.ctarg.req, 0, sp->u.iocb_cmd.u.ctarg.req_size);
3883 ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.req;
3884 /* CT_IU preamble */
3885 ct_req = qla2x00_prep_ct_req(ct_sns, GNN_FT_CMD,
3886 sp->u.iocb_cmd.u.ctarg.rsp_size);
3888 /* GPN_FT req */
3889 ct_req->req.gpn_ft.port_type = fc4_type;
3891 sp->u.iocb_cmd.u.ctarg.req_size = GNN_FT_REQ_SIZE;
3892 sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
3894 sp->done = qla2x00_async_gpnft_gnnft_sp_done;
3896 ql_dbg(ql_dbg_disc, vha, 0xffff,
3897 "Async-%s hdl=%x FC4Type %x.\n", sp->name,
3898 sp->handle, ct_req->req.gpn_ft.port_type);
3900 rval = qla2x00_start_sp(sp);
3901 if (rval != QLA_SUCCESS) {
3902 goto done_free_sp;
3905 return rval;
3907 done_free_sp:
3908 if (sp->u.iocb_cmd.u.ctarg.req) {
3909 dma_free_coherent(&vha->hw->pdev->dev,
3910 sp->u.iocb_cmd.u.ctarg.req_allocated_size,
3911 sp->u.iocb_cmd.u.ctarg.req,
3912 sp->u.iocb_cmd.u.ctarg.req_dma);
3913 sp->u.iocb_cmd.u.ctarg.req = NULL;
3915 if (sp->u.iocb_cmd.u.ctarg.rsp) {
3916 dma_free_coherent(&vha->hw->pdev->dev,
3917 sp->u.iocb_cmd.u.ctarg.rsp_allocated_size,
3918 sp->u.iocb_cmd.u.ctarg.rsp,
3919 sp->u.iocb_cmd.u.ctarg.rsp_dma);
3920 sp->u.iocb_cmd.u.ctarg.rsp = NULL;
3923 sp->free(sp);
3925 spin_lock_irqsave(&vha->work_lock, flags);
3926 vha->scan.scan_flags &= ~SF_SCANNING;
3927 if (vha->scan.scan_flags == 0) {
3928 ql_dbg(ql_dbg_disc, vha, 0xffff,
3929 "%s: schedule\n", __func__);
3930 vha->scan.scan_flags |= SF_QUEUED;
3931 schedule_delayed_work(&vha->scan.scan_work, 5);
3933 spin_unlock_irqrestore(&vha->work_lock, flags);
3936 return rval;
3937 } /* GNNFT */
3939 void qla24xx_async_gpnft_done(scsi_qla_host_t *vha, srb_t *sp)
3941 ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0xffff,
3942 "%s enter\n", __func__);
3943 qla24xx_async_gnnft(vha, sp, sp->gen2);
3946 /* Get WWPN list for certain fc4_type */
3947 int qla24xx_async_gpnft(scsi_qla_host_t *vha, u8 fc4_type, srb_t *sp)
3949 int rval = QLA_FUNCTION_FAILED;
3950 struct ct_sns_req *ct_req;
3951 struct ct_sns_pkt *ct_sns;
3952 u32 rspsz;
3953 unsigned long flags;
3955 ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0xffff,
3956 "%s enter\n", __func__);
3958 if (!vha->flags.online)
3959 return rval;
3961 spin_lock_irqsave(&vha->work_lock, flags);
3962 if (vha->scan.scan_flags & SF_SCANNING) {
3963 spin_unlock_irqrestore(&vha->work_lock, flags);
3964 ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0xffff,
3965 "%s: scan active\n", __func__);
3966 return rval;
3968 vha->scan.scan_flags |= SF_SCANNING;
3969 spin_unlock_irqrestore(&vha->work_lock, flags);
3971 if (fc4_type == FC4_TYPE_FCP_SCSI) {
3972 ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0xffff,
3973 "%s: Performing FCP Scan\n", __func__);
3975 if (sp)
3976 sp->free(sp); /* should not happen */
3978 sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL);
3979 if (!sp) {
3980 spin_lock_irqsave(&vha->work_lock, flags);
3981 vha->scan.scan_flags &= ~SF_SCANNING;
3982 spin_unlock_irqrestore(&vha->work_lock, flags);
3983 return rval;
3986 sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev,
3987 sizeof(struct ct_sns_pkt),
3988 &sp->u.iocb_cmd.u.ctarg.req_dma,
3989 GFP_KERNEL);
3990 sp->u.iocb_cmd.u.ctarg.req_allocated_size = sizeof(struct ct_sns_pkt);
3991 if (!sp->u.iocb_cmd.u.ctarg.req) {
3992 ql_log(ql_log_warn, vha, 0xffff,
3993 "Failed to allocate ct_sns request.\n");
3994 spin_lock_irqsave(&vha->work_lock, flags);
3995 vha->scan.scan_flags &= ~SF_SCANNING;
3996 spin_unlock_irqrestore(&vha->work_lock, flags);
3997 qla2x00_rel_sp(sp);
3998 return rval;
4000 sp->u.iocb_cmd.u.ctarg.req_size = GPN_FT_REQ_SIZE;
4002 rspsz = sizeof(struct ct_sns_gpnft_rsp) +
4003 ((vha->hw->max_fibre_devices - 1) *
4004 sizeof(struct ct_sns_gpn_ft_data));
4006 sp->u.iocb_cmd.u.ctarg.rsp = dma_alloc_coherent(&vha->hw->pdev->dev,
4007 rspsz,
4008 &sp->u.iocb_cmd.u.ctarg.rsp_dma,
4009 GFP_KERNEL);
4010 sp->u.iocb_cmd.u.ctarg.rsp_allocated_size = rspsz;
4011 if (!sp->u.iocb_cmd.u.ctarg.rsp) {
4012 ql_log(ql_log_warn, vha, 0xffff,
4013 "Failed to allocate ct_sns request.\n");
4014 spin_lock_irqsave(&vha->work_lock, flags);
4015 vha->scan.scan_flags &= ~SF_SCANNING;
4016 spin_unlock_irqrestore(&vha->work_lock, flags);
4017 dma_free_coherent(&vha->hw->pdev->dev,
4018 sp->u.iocb_cmd.u.ctarg.req_allocated_size,
4019 sp->u.iocb_cmd.u.ctarg.req,
4020 sp->u.iocb_cmd.u.ctarg.req_dma);
4021 sp->u.iocb_cmd.u.ctarg.req = NULL;
4022 qla2x00_rel_sp(sp);
4023 return rval;
4025 sp->u.iocb_cmd.u.ctarg.rsp_size = rspsz;
4027 ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0xffff,
4028 "%s scan list size %d\n", __func__, vha->scan.size);
4030 memset(vha->scan.l, 0, vha->scan.size);
4031 } else if (!sp) {
4032 ql_dbg(ql_dbg_disc, vha, 0xffff,
4033 "NVME scan did not provide SP\n");
4034 return rval;
4037 sp->type = SRB_CT_PTHRU_CMD;
4038 sp->name = "gpnft";
4039 sp->gen1 = vha->hw->base_qpair->chip_reset;
4040 sp->gen2 = fc4_type;
4042 sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
4043 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
4045 rspsz = sp->u.iocb_cmd.u.ctarg.rsp_size;
4046 memset(sp->u.iocb_cmd.u.ctarg.rsp, 0, sp->u.iocb_cmd.u.ctarg.rsp_size);
4047 memset(sp->u.iocb_cmd.u.ctarg.req, 0, sp->u.iocb_cmd.u.ctarg.req_size);
4049 ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.req;
4050 /* CT_IU preamble */
4051 ct_req = qla2x00_prep_ct_req(ct_sns, GPN_FT_CMD, rspsz);
4053 /* GPN_FT req */
4054 ct_req->req.gpn_ft.port_type = fc4_type;
4056 sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
4058 sp->done = qla2x00_async_gpnft_gnnft_sp_done;
4060 ql_dbg(ql_dbg_disc, vha, 0xffff,
4061 "Async-%s hdl=%x FC4Type %x.\n", sp->name,
4062 sp->handle, ct_req->req.gpn_ft.port_type);
4064 rval = qla2x00_start_sp(sp);
4065 if (rval != QLA_SUCCESS) {
4066 goto done_free_sp;
4069 return rval;
4071 done_free_sp:
4072 if (sp->u.iocb_cmd.u.ctarg.req) {
4073 dma_free_coherent(&vha->hw->pdev->dev,
4074 sp->u.iocb_cmd.u.ctarg.req_allocated_size,
4075 sp->u.iocb_cmd.u.ctarg.req,
4076 sp->u.iocb_cmd.u.ctarg.req_dma);
4077 sp->u.iocb_cmd.u.ctarg.req = NULL;
4079 if (sp->u.iocb_cmd.u.ctarg.rsp) {
4080 dma_free_coherent(&vha->hw->pdev->dev,
4081 sp->u.iocb_cmd.u.ctarg.rsp_allocated_size,
4082 sp->u.iocb_cmd.u.ctarg.rsp,
4083 sp->u.iocb_cmd.u.ctarg.rsp_dma);
4084 sp->u.iocb_cmd.u.ctarg.rsp = NULL;
4087 sp->free(sp);
4089 spin_lock_irqsave(&vha->work_lock, flags);
4090 vha->scan.scan_flags &= ~SF_SCANNING;
4091 if (vha->scan.scan_flags == 0) {
4092 ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0xffff,
4093 "%s: Scan scheduled.\n", __func__);
4094 vha->scan.scan_flags |= SF_QUEUED;
4095 schedule_delayed_work(&vha->scan.scan_work, 5);
4097 spin_unlock_irqrestore(&vha->work_lock, flags);
4100 return rval;
4103 void qla_scan_work_fn(struct work_struct *work)
4105 struct fab_scan *s = container_of(to_delayed_work(work),
4106 struct fab_scan, scan_work);
4107 struct scsi_qla_host *vha = container_of(s, struct scsi_qla_host,
4108 scan);
4109 unsigned long flags;
4111 ql_dbg(ql_dbg_disc, vha, 0xffff,
4112 "%s: schedule loop resync\n", __func__);
4113 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
4114 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
4115 qla2xxx_wake_dpc(vha);
4116 spin_lock_irqsave(&vha->work_lock, flags);
4117 vha->scan.scan_flags &= ~SF_QUEUED;
4118 spin_unlock_irqrestore(&vha->work_lock, flags);
4121 /* GNN_ID */
4122 void qla24xx_handle_gnnid_event(scsi_qla_host_t *vha, struct event_arg *ea)
4124 qla24xx_post_gnl_work(vha, ea->fcport);
4127 static void qla2x00_async_gnnid_sp_done(srb_t *sp, int res)
4129 struct scsi_qla_host *vha = sp->vha;
4130 fc_port_t *fcport = sp->fcport;
4131 u8 *node_name = fcport->ct_desc.ct_sns->p.rsp.rsp.gnn_id.node_name;
4132 struct event_arg ea;
4133 u64 wwnn;
4135 fcport->flags &= ~FCF_ASYNC_SENT;
4136 wwnn = wwn_to_u64(node_name);
4137 if (wwnn)
4138 memcpy(fcport->node_name, node_name, WWN_SIZE);
4140 memset(&ea, 0, sizeof(ea));
4141 ea.fcport = fcport;
4142 ea.sp = sp;
4143 ea.rc = res;
4145 ql_dbg(ql_dbg_disc, vha, 0x204f,
4146 "Async done-%s res %x, WWPN %8phC %8phC\n",
4147 sp->name, res, fcport->port_name, fcport->node_name);
4149 qla24xx_handle_gnnid_event(vha, &ea);
4151 sp->free(sp);
4154 int qla24xx_async_gnnid(scsi_qla_host_t *vha, fc_port_t *fcport)
4156 int rval = QLA_FUNCTION_FAILED;
4157 struct ct_sns_req *ct_req;
4158 srb_t *sp;
4160 if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT))
4161 return rval;
4163 qla2x00_set_fcport_disc_state(fcport, DSC_GNN_ID);
4164 sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC);
4165 if (!sp)
4166 goto done;
4168 fcport->flags |= FCF_ASYNC_SENT;
4169 sp->type = SRB_CT_PTHRU_CMD;
4170 sp->name = "gnnid";
4171 sp->gen1 = fcport->rscn_gen;
4172 sp->gen2 = fcport->login_gen;
4174 sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
4175 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
4177 /* CT_IU preamble */
4178 ct_req = qla2x00_prep_ct_req(fcport->ct_desc.ct_sns, GNN_ID_CMD,
4179 GNN_ID_RSP_SIZE);
4181 /* GNN_ID req */
4182 ct_req->req.port_id.port_id = port_id_to_be_id(fcport->d_id);
4185 /* req & rsp use the same buffer */
4186 sp->u.iocb_cmd.u.ctarg.req = fcport->ct_desc.ct_sns;
4187 sp->u.iocb_cmd.u.ctarg.req_dma = fcport->ct_desc.ct_sns_dma;
4188 sp->u.iocb_cmd.u.ctarg.rsp = fcport->ct_desc.ct_sns;
4189 sp->u.iocb_cmd.u.ctarg.rsp_dma = fcport->ct_desc.ct_sns_dma;
4190 sp->u.iocb_cmd.u.ctarg.req_size = GNN_ID_REQ_SIZE;
4191 sp->u.iocb_cmd.u.ctarg.rsp_size = GNN_ID_RSP_SIZE;
4192 sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
4194 sp->done = qla2x00_async_gnnid_sp_done;
4196 ql_dbg(ql_dbg_disc, vha, 0xffff,
4197 "Async-%s - %8phC hdl=%x loopid=%x portid %06x.\n",
4198 sp->name, fcport->port_name,
4199 sp->handle, fcport->loop_id, fcport->d_id.b24);
4201 rval = qla2x00_start_sp(sp);
4202 if (rval != QLA_SUCCESS)
4203 goto done_free_sp;
4204 return rval;
4206 done_free_sp:
4207 sp->free(sp);
4208 fcport->flags &= ~FCF_ASYNC_SENT;
4209 done:
4210 return rval;
4213 int qla24xx_post_gnnid_work(struct scsi_qla_host *vha, fc_port_t *fcport)
4215 struct qla_work_evt *e;
4216 int ls;
4218 ls = atomic_read(&vha->loop_state);
4219 if (((ls != LOOP_READY) && (ls != LOOP_UP)) ||
4220 test_bit(UNLOADING, &vha->dpc_flags))
4221 return 0;
4223 e = qla2x00_alloc_work(vha, QLA_EVT_GNNID);
4224 if (!e)
4225 return QLA_FUNCTION_FAILED;
4227 e->u.fcport.fcport = fcport;
4228 return qla2x00_post_work(vha, e);
4231 /* GPFN_ID */
4232 void qla24xx_handle_gfpnid_event(scsi_qla_host_t *vha, struct event_arg *ea)
4234 fc_port_t *fcport = ea->fcport;
4236 ql_dbg(ql_dbg_disc, vha, 0xffff,
4237 "%s %8phC DS %d LS %d rc %d login %d|%d rscn %d|%d fcpcnt %d\n",
4238 __func__, fcport->port_name, fcport->disc_state,
4239 fcport->fw_login_state, ea->rc, fcport->login_gen, ea->sp->gen2,
4240 fcport->rscn_gen, ea->sp->gen1, vha->fcport_count);
4242 if (fcport->disc_state == DSC_DELETE_PEND)
4243 return;
4245 if (ea->sp->gen2 != fcport->login_gen) {
4246 /* target side must have changed it. */
4247 ql_dbg(ql_dbg_disc, vha, 0x20d3,
4248 "%s %8phC generation changed\n",
4249 __func__, fcport->port_name);
4250 return;
4251 } else if (ea->sp->gen1 != fcport->rscn_gen) {
4252 return;
4255 qla24xx_post_gpsc_work(vha, fcport);
4258 static void qla2x00_async_gfpnid_sp_done(srb_t *sp, int res)
4260 struct scsi_qla_host *vha = sp->vha;
4261 fc_port_t *fcport = sp->fcport;
4262 u8 *fpn = fcport->ct_desc.ct_sns->p.rsp.rsp.gfpn_id.port_name;
4263 struct event_arg ea;
4264 u64 wwn;
4266 wwn = wwn_to_u64(fpn);
4267 if (wwn)
4268 memcpy(fcport->fabric_port_name, fpn, WWN_SIZE);
4270 memset(&ea, 0, sizeof(ea));
4271 ea.fcport = fcport;
4272 ea.sp = sp;
4273 ea.rc = res;
4275 ql_dbg(ql_dbg_disc, vha, 0x204f,
4276 "Async done-%s res %x, WWPN %8phC %8phC\n",
4277 sp->name, res, fcport->port_name, fcport->fabric_port_name);
4279 qla24xx_handle_gfpnid_event(vha, &ea);
4281 sp->free(sp);
4284 int qla24xx_async_gfpnid(scsi_qla_host_t *vha, fc_port_t *fcport)
4286 int rval = QLA_FUNCTION_FAILED;
4287 struct ct_sns_req *ct_req;
4288 srb_t *sp;
4290 if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT))
4291 return rval;
4293 sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC);
4294 if (!sp)
4295 goto done;
4297 sp->type = SRB_CT_PTHRU_CMD;
4298 sp->name = "gfpnid";
4299 sp->gen1 = fcport->rscn_gen;
4300 sp->gen2 = fcport->login_gen;
4302 sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
4303 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
4305 /* CT_IU preamble */
4306 ct_req = qla2x00_prep_ct_req(fcport->ct_desc.ct_sns, GFPN_ID_CMD,
4307 GFPN_ID_RSP_SIZE);
4309 /* GFPN_ID req */
4310 ct_req->req.port_id.port_id = port_id_to_be_id(fcport->d_id);
4313 /* req & rsp use the same buffer */
4314 sp->u.iocb_cmd.u.ctarg.req = fcport->ct_desc.ct_sns;
4315 sp->u.iocb_cmd.u.ctarg.req_dma = fcport->ct_desc.ct_sns_dma;
4316 sp->u.iocb_cmd.u.ctarg.rsp = fcport->ct_desc.ct_sns;
4317 sp->u.iocb_cmd.u.ctarg.rsp_dma = fcport->ct_desc.ct_sns_dma;
4318 sp->u.iocb_cmd.u.ctarg.req_size = GFPN_ID_REQ_SIZE;
4319 sp->u.iocb_cmd.u.ctarg.rsp_size = GFPN_ID_RSP_SIZE;
4320 sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
4322 sp->done = qla2x00_async_gfpnid_sp_done;
4324 ql_dbg(ql_dbg_disc, vha, 0xffff,
4325 "Async-%s - %8phC hdl=%x loopid=%x portid %06x.\n",
4326 sp->name, fcport->port_name,
4327 sp->handle, fcport->loop_id, fcport->d_id.b24);
4329 rval = qla2x00_start_sp(sp);
4330 if (rval != QLA_SUCCESS)
4331 goto done_free_sp;
4333 return rval;
4335 done_free_sp:
4336 sp->free(sp);
4337 done:
4338 return rval;
4341 int qla24xx_post_gfpnid_work(struct scsi_qla_host *vha, fc_port_t *fcport)
4343 struct qla_work_evt *e;
4344 int ls;
4346 ls = atomic_read(&vha->loop_state);
4347 if (((ls != LOOP_READY) && (ls != LOOP_UP)) ||
4348 test_bit(UNLOADING, &vha->dpc_flags))
4349 return 0;
4351 e = qla2x00_alloc_work(vha, QLA_EVT_GFPNID);
4352 if (!e)
4353 return QLA_FUNCTION_FAILED;
4355 e->u.fcport.fcport = fcport;
4356 return qla2x00_post_work(vha, e);