2 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
18 #include <linux/uaccess.h>
23 BFA_TRC_FILE(LDRV
, BSG
);
26 bfad_iocmd_ioc_enable(struct bfad_s
*bfad
, void *cmd
)
28 struct bfa_bsg_gen_s
*iocmd
= (struct bfa_bsg_gen_s
*)cmd
;
31 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
32 /* If IOC is not in disabled state - return */
33 if (!bfa_ioc_is_disabled(&bfad
->bfa
.ioc
)) {
34 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
35 iocmd
->status
= BFA_STATUS_OK
;
39 init_completion(&bfad
->enable_comp
);
40 bfa_iocfc_enable(&bfad
->bfa
);
41 iocmd
->status
= BFA_STATUS_OK
;
42 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
43 wait_for_completion(&bfad
->enable_comp
);
49 bfad_iocmd_ioc_disable(struct bfad_s
*bfad
, void *cmd
)
51 struct bfa_bsg_gen_s
*iocmd
= (struct bfa_bsg_gen_s
*)cmd
;
54 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
55 if (bfa_ioc_is_disabled(&bfad
->bfa
.ioc
)) {
56 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
57 iocmd
->status
= BFA_STATUS_OK
;
61 if (bfad
->disable_active
) {
62 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
66 bfad
->disable_active
= BFA_TRUE
;
67 init_completion(&bfad
->disable_comp
);
68 bfa_iocfc_disable(&bfad
->bfa
);
69 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
71 wait_for_completion(&bfad
->disable_comp
);
72 bfad
->disable_active
= BFA_FALSE
;
73 iocmd
->status
= BFA_STATUS_OK
;
79 bfad_iocmd_ioc_get_info(struct bfad_s
*bfad
, void *cmd
)
82 struct bfa_bsg_ioc_info_s
*iocmd
= (struct bfa_bsg_ioc_info_s
*)cmd
;
83 struct bfad_im_port_s
*im_port
;
84 struct bfa_port_attr_s pattr
;
87 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
88 bfa_fcport_get_attr(&bfad
->bfa
, &pattr
);
89 iocmd
->nwwn
= pattr
.nwwn
;
90 iocmd
->pwwn
= pattr
.pwwn
;
91 iocmd
->ioc_type
= bfa_get_type(&bfad
->bfa
);
92 iocmd
->mac
= bfa_get_mac(&bfad
->bfa
);
93 iocmd
->factory_mac
= bfa_get_mfg_mac(&bfad
->bfa
);
94 bfa_get_adapter_serial_num(&bfad
->bfa
, iocmd
->serialnum
);
95 iocmd
->factorynwwn
= pattr
.factorynwwn
;
96 iocmd
->factorypwwn
= pattr
.factorypwwn
;
97 iocmd
->bfad_num
= bfad
->inst_no
;
98 im_port
= bfad
->pport
.im_port
;
99 iocmd
->host
= im_port
->shost
->host_no
;
100 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
102 strcpy(iocmd
->name
, bfad
->adapter_name
);
103 strcpy(iocmd
->port_name
, bfad
->port_name
);
104 strcpy(iocmd
->hwpath
, bfad
->pci_name
);
106 /* set adapter hw path */
107 strcpy(iocmd
->adapter_hwpath
, bfad
->pci_name
);
108 for (i
= 0; iocmd
->adapter_hwpath
[i
] != ':' && i
< BFA_STRING_32
; i
++)
110 for (; iocmd
->adapter_hwpath
[++i
] != ':' && i
< BFA_STRING_32
; )
112 iocmd
->adapter_hwpath
[i
] = '\0';
113 iocmd
->status
= BFA_STATUS_OK
;
118 bfad_iocmd_ioc_get_attr(struct bfad_s
*bfad
, void *cmd
)
120 struct bfa_bsg_ioc_attr_s
*iocmd
= (struct bfa_bsg_ioc_attr_s
*)cmd
;
123 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
124 bfa_ioc_get_attr(&bfad
->bfa
.ioc
, &iocmd
->ioc_attr
);
125 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
127 /* fill in driver attr info */
128 strcpy(iocmd
->ioc_attr
.driver_attr
.driver
, BFAD_DRIVER_NAME
);
129 strncpy(iocmd
->ioc_attr
.driver_attr
.driver_ver
,
130 BFAD_DRIVER_VERSION
, BFA_VERSION_LEN
);
131 strcpy(iocmd
->ioc_attr
.driver_attr
.fw_ver
,
132 iocmd
->ioc_attr
.adapter_attr
.fw_ver
);
133 strcpy(iocmd
->ioc_attr
.driver_attr
.bios_ver
,
134 iocmd
->ioc_attr
.adapter_attr
.optrom_ver
);
136 /* copy chip rev info first otherwise it will be overwritten */
137 memcpy(bfad
->pci_attr
.chip_rev
, iocmd
->ioc_attr
.pci_attr
.chip_rev
,
138 sizeof(bfad
->pci_attr
.chip_rev
));
139 memcpy(&iocmd
->ioc_attr
.pci_attr
, &bfad
->pci_attr
,
140 sizeof(struct bfa_ioc_pci_attr_s
));
142 iocmd
->status
= BFA_STATUS_OK
;
147 bfad_iocmd_ioc_get_stats(struct bfad_s
*bfad
, void *cmd
)
149 struct bfa_bsg_ioc_stats_s
*iocmd
= (struct bfa_bsg_ioc_stats_s
*)cmd
;
151 bfa_ioc_get_stats(&bfad
->bfa
, &iocmd
->ioc_stats
);
152 iocmd
->status
= BFA_STATUS_OK
;
157 bfad_iocmd_ioc_get_fwstats(struct bfad_s
*bfad
, void *cmd
,
158 unsigned int payload_len
)
160 struct bfa_bsg_ioc_fwstats_s
*iocmd
=
161 (struct bfa_bsg_ioc_fwstats_s
*)cmd
;
165 if (bfad_chk_iocmd_sz(payload_len
,
166 sizeof(struct bfa_bsg_ioc_fwstats_s
),
167 sizeof(struct bfa_fw_stats_s
)) != BFA_STATUS_OK
) {
168 iocmd
->status
= BFA_STATUS_VERSION_FAIL
;
172 iocmd_bufptr
= (char *)iocmd
+ sizeof(struct bfa_bsg_ioc_fwstats_s
);
173 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
174 iocmd
->status
= bfa_ioc_fw_stats_get(&bfad
->bfa
.ioc
, iocmd_bufptr
);
175 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
177 if (iocmd
->status
!= BFA_STATUS_OK
) {
178 bfa_trc(bfad
, iocmd
->status
);
182 bfa_trc(bfad
, 0x6666);
187 bfad_iocmd_ioc_reset_stats(struct bfad_s
*bfad
, void *cmd
, unsigned int v_cmd
)
189 struct bfa_bsg_gen_s
*iocmd
= (struct bfa_bsg_gen_s
*)cmd
;
192 if (v_cmd
== IOCMD_IOC_RESET_STATS
) {
193 bfa_ioc_clear_stats(&bfad
->bfa
);
194 iocmd
->status
= BFA_STATUS_OK
;
195 } else if (v_cmd
== IOCMD_IOC_RESET_FWSTATS
) {
196 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
197 iocmd
->status
= bfa_ioc_fw_stats_clear(&bfad
->bfa
.ioc
);
198 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
205 bfad_iocmd_ioc_set_name(struct bfad_s
*bfad
, void *cmd
, unsigned int v_cmd
)
207 struct bfa_bsg_ioc_name_s
*iocmd
= (struct bfa_bsg_ioc_name_s
*) cmd
;
209 if (v_cmd
== IOCMD_IOC_SET_ADAPTER_NAME
)
210 strcpy(bfad
->adapter_name
, iocmd
->name
);
211 else if (v_cmd
== IOCMD_IOC_SET_PORT_NAME
)
212 strcpy(bfad
->port_name
, iocmd
->name
);
214 iocmd
->status
= BFA_STATUS_OK
;
219 bfad_iocmd_iocfc_get_attr(struct bfad_s
*bfad
, void *cmd
)
221 struct bfa_bsg_iocfc_attr_s
*iocmd
= (struct bfa_bsg_iocfc_attr_s
*)cmd
;
223 iocmd
->status
= BFA_STATUS_OK
;
224 bfa_iocfc_get_attr(&bfad
->bfa
, &iocmd
->iocfc_attr
);
230 bfad_iocmd_ioc_fw_sig_inv(struct bfad_s
*bfad
, void *cmd
)
232 struct bfa_bsg_gen_s
*iocmd
= (struct bfa_bsg_gen_s
*)cmd
;
235 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
236 iocmd
->status
= bfa_ioc_fwsig_invalidate(&bfad
->bfa
.ioc
);
237 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
242 bfad_iocmd_iocfc_set_intr(struct bfad_s
*bfad
, void *cmd
)
244 struct bfa_bsg_iocfc_intr_s
*iocmd
= (struct bfa_bsg_iocfc_intr_s
*)cmd
;
247 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
248 iocmd
->status
= bfa_iocfc_israttr_set(&bfad
->bfa
, &iocmd
->attr
);
249 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
255 bfad_iocmd_port_enable(struct bfad_s
*bfad
, void *cmd
)
257 struct bfa_bsg_gen_s
*iocmd
= (struct bfa_bsg_gen_s
*)cmd
;
258 struct bfad_hal_comp fcomp
;
261 init_completion(&fcomp
.comp
);
262 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
263 iocmd
->status
= bfa_port_enable(&bfad
->bfa
.modules
.port
,
264 bfad_hcb_comp
, &fcomp
);
265 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
266 if (iocmd
->status
!= BFA_STATUS_OK
) {
267 bfa_trc(bfad
, iocmd
->status
);
270 wait_for_completion(&fcomp
.comp
);
271 iocmd
->status
= fcomp
.status
;
276 bfad_iocmd_port_disable(struct bfad_s
*bfad
, void *cmd
)
278 struct bfa_bsg_gen_s
*iocmd
= (struct bfa_bsg_gen_s
*)cmd
;
279 struct bfad_hal_comp fcomp
;
282 init_completion(&fcomp
.comp
);
283 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
284 iocmd
->status
= bfa_port_disable(&bfad
->bfa
.modules
.port
,
285 bfad_hcb_comp
, &fcomp
);
286 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
288 if (iocmd
->status
!= BFA_STATUS_OK
) {
289 bfa_trc(bfad
, iocmd
->status
);
292 wait_for_completion(&fcomp
.comp
);
293 iocmd
->status
= fcomp
.status
;
298 bfad_iocmd_port_get_attr(struct bfad_s
*bfad
, void *cmd
)
300 struct bfa_bsg_port_attr_s
*iocmd
= (struct bfa_bsg_port_attr_s
*)cmd
;
301 struct bfa_lport_attr_s port_attr
;
304 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
305 bfa_fcport_get_attr(&bfad
->bfa
, &iocmd
->attr
);
306 bfa_fcs_lport_get_attr(&bfad
->bfa_fcs
.fabric
.bport
, &port_attr
);
307 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
309 if (iocmd
->attr
.topology
!= BFA_PORT_TOPOLOGY_NONE
)
310 iocmd
->attr
.pid
= port_attr
.pid
;
314 iocmd
->attr
.port_type
= port_attr
.port_type
;
315 iocmd
->attr
.loopback
= port_attr
.loopback
;
316 iocmd
->attr
.authfail
= port_attr
.authfail
;
317 strncpy(iocmd
->attr
.port_symname
.symname
,
318 port_attr
.port_cfg
.sym_name
.symname
,
319 sizeof(port_attr
.port_cfg
.sym_name
.symname
));
321 iocmd
->status
= BFA_STATUS_OK
;
326 bfad_iocmd_port_get_stats(struct bfad_s
*bfad
, void *cmd
,
327 unsigned int payload_len
)
329 struct bfa_bsg_port_stats_s
*iocmd
= (struct bfa_bsg_port_stats_s
*)cmd
;
330 struct bfad_hal_comp fcomp
;
334 if (bfad_chk_iocmd_sz(payload_len
,
335 sizeof(struct bfa_bsg_port_stats_s
),
336 sizeof(union bfa_port_stats_u
)) != BFA_STATUS_OK
) {
337 iocmd
->status
= BFA_STATUS_VERSION_FAIL
;
341 iocmd_bufptr
= (char *)iocmd
+ sizeof(struct bfa_bsg_port_stats_s
);
343 init_completion(&fcomp
.comp
);
344 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
345 iocmd
->status
= bfa_port_get_stats(&bfad
->bfa
.modules
.port
,
346 iocmd_bufptr
, bfad_hcb_comp
, &fcomp
);
347 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
348 if (iocmd
->status
!= BFA_STATUS_OK
) {
349 bfa_trc(bfad
, iocmd
->status
);
353 wait_for_completion(&fcomp
.comp
);
354 iocmd
->status
= fcomp
.status
;
360 bfad_iocmd_port_reset_stats(struct bfad_s
*bfad
, void *cmd
)
362 struct bfa_bsg_gen_s
*iocmd
= (struct bfa_bsg_gen_s
*)cmd
;
363 struct bfad_hal_comp fcomp
;
366 init_completion(&fcomp
.comp
);
367 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
368 iocmd
->status
= bfa_port_clear_stats(&bfad
->bfa
.modules
.port
,
369 bfad_hcb_comp
, &fcomp
);
370 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
371 if (iocmd
->status
!= BFA_STATUS_OK
) {
372 bfa_trc(bfad
, iocmd
->status
);
375 wait_for_completion(&fcomp
.comp
);
376 iocmd
->status
= fcomp
.status
;
381 bfad_iocmd_set_port_cfg(struct bfad_s
*bfad
, void *iocmd
, unsigned int v_cmd
)
383 struct bfa_bsg_port_cfg_s
*cmd
= (struct bfa_bsg_port_cfg_s
*)iocmd
;
386 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
387 if (v_cmd
== IOCMD_PORT_CFG_TOPO
)
388 cmd
->status
= bfa_fcport_cfg_topology(&bfad
->bfa
, cmd
->param
);
389 else if (v_cmd
== IOCMD_PORT_CFG_SPEED
)
390 cmd
->status
= bfa_fcport_cfg_speed(&bfad
->bfa
, cmd
->param
);
391 else if (v_cmd
== IOCMD_PORT_CFG_ALPA
)
392 cmd
->status
= bfa_fcport_cfg_hardalpa(&bfad
->bfa
, cmd
->param
);
393 else if (v_cmd
== IOCMD_PORT_CLR_ALPA
)
394 cmd
->status
= bfa_fcport_clr_hardalpa(&bfad
->bfa
);
395 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
401 bfad_iocmd_port_cfg_maxfrsize(struct bfad_s
*bfad
, void *cmd
)
403 struct bfa_bsg_port_cfg_maxfrsize_s
*iocmd
=
404 (struct bfa_bsg_port_cfg_maxfrsize_s
*)cmd
;
407 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
408 iocmd
->status
= bfa_fcport_cfg_maxfrsize(&bfad
->bfa
, iocmd
->maxfrsize
);
409 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
415 bfad_iocmd_port_cfg_bbcr(struct bfad_s
*bfad
, unsigned int cmd
, void *pcmd
)
417 struct bfa_bsg_bbcr_enable_s
*iocmd
=
418 (struct bfa_bsg_bbcr_enable_s
*)pcmd
;
422 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
423 if (cmd
== IOCMD_PORT_BBCR_ENABLE
)
424 rc
= bfa_fcport_cfg_bbcr(&bfad
->bfa
, BFA_TRUE
, iocmd
->bb_scn
);
425 else if (cmd
== IOCMD_PORT_BBCR_DISABLE
)
426 rc
= bfa_fcport_cfg_bbcr(&bfad
->bfa
, BFA_FALSE
, 0);
428 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
431 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
438 bfad_iocmd_port_get_bbcr_attr(struct bfad_s
*bfad
, void *pcmd
)
440 struct bfa_bsg_bbcr_attr_s
*iocmd
= (struct bfa_bsg_bbcr_attr_s
*) pcmd
;
443 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
445 bfa_fcport_get_bbcr_attr(&bfad
->bfa
, &iocmd
->attr
);
446 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
453 bfad_iocmd_lport_get_attr(struct bfad_s
*bfad
, void *cmd
)
455 struct bfa_fcs_lport_s
*fcs_port
;
456 struct bfa_bsg_lport_attr_s
*iocmd
= (struct bfa_bsg_lport_attr_s
*)cmd
;
459 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
460 fcs_port
= bfa_fcs_lookup_port(&bfad
->bfa_fcs
,
461 iocmd
->vf_id
, iocmd
->pwwn
);
462 if (fcs_port
== NULL
) {
463 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
464 iocmd
->status
= BFA_STATUS_UNKNOWN_LWWN
;
468 bfa_fcs_lport_get_attr(fcs_port
, &iocmd
->port_attr
);
469 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
470 iocmd
->status
= BFA_STATUS_OK
;
476 bfad_iocmd_lport_get_stats(struct bfad_s
*bfad
, void *cmd
)
478 struct bfa_fcs_lport_s
*fcs_port
;
479 struct bfa_bsg_lport_stats_s
*iocmd
=
480 (struct bfa_bsg_lport_stats_s
*)cmd
;
483 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
484 fcs_port
= bfa_fcs_lookup_port(&bfad
->bfa_fcs
,
485 iocmd
->vf_id
, iocmd
->pwwn
);
486 if (fcs_port
== NULL
) {
487 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
488 iocmd
->status
= BFA_STATUS_UNKNOWN_LWWN
;
492 bfa_fcs_lport_get_stats(fcs_port
, &iocmd
->port_stats
);
493 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
494 iocmd
->status
= BFA_STATUS_OK
;
500 bfad_iocmd_lport_reset_stats(struct bfad_s
*bfad
, void *cmd
)
502 struct bfa_fcs_lport_s
*fcs_port
;
503 struct bfa_bsg_reset_stats_s
*iocmd
=
504 (struct bfa_bsg_reset_stats_s
*)cmd
;
505 struct bfa_fcpim_s
*fcpim
= BFA_FCPIM(&bfad
->bfa
);
506 struct list_head
*qe
, *qen
;
507 struct bfa_itnim_s
*itnim
;
510 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
511 fcs_port
= bfa_fcs_lookup_port(&bfad
->bfa_fcs
,
512 iocmd
->vf_id
, iocmd
->vpwwn
);
513 if (fcs_port
== NULL
) {
514 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
515 iocmd
->status
= BFA_STATUS_UNKNOWN_LWWN
;
519 bfa_fcs_lport_clear_stats(fcs_port
);
520 /* clear IO stats from all active itnims */
521 list_for_each_safe(qe
, qen
, &fcpim
->itnim_q
) {
522 itnim
= (struct bfa_itnim_s
*) qe
;
523 if (itnim
->rport
->rport_info
.lp_tag
!= fcs_port
->lp_tag
)
525 bfa_itnim_clear_stats(itnim
);
527 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
528 iocmd
->status
= BFA_STATUS_OK
;
534 bfad_iocmd_lport_get_iostats(struct bfad_s
*bfad
, void *cmd
)
536 struct bfa_fcs_lport_s
*fcs_port
;
537 struct bfa_bsg_lport_iostats_s
*iocmd
=
538 (struct bfa_bsg_lport_iostats_s
*)cmd
;
541 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
542 fcs_port
= bfa_fcs_lookup_port(&bfad
->bfa_fcs
,
543 iocmd
->vf_id
, iocmd
->pwwn
);
544 if (fcs_port
== NULL
) {
545 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
546 iocmd
->status
= BFA_STATUS_UNKNOWN_LWWN
;
550 bfa_fcpim_port_iostats(&bfad
->bfa
, &iocmd
->iostats
,
552 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
553 iocmd
->status
= BFA_STATUS_OK
;
559 bfad_iocmd_lport_get_rports(struct bfad_s
*bfad
, void *cmd
,
560 unsigned int payload_len
)
562 struct bfa_bsg_lport_get_rports_s
*iocmd
=
563 (struct bfa_bsg_lport_get_rports_s
*)cmd
;
564 struct bfa_fcs_lport_s
*fcs_port
;
568 if (iocmd
->nrports
== 0)
571 if (bfad_chk_iocmd_sz(payload_len
,
572 sizeof(struct bfa_bsg_lport_get_rports_s
),
573 sizeof(struct bfa_rport_qualifier_s
) * iocmd
->nrports
)
575 iocmd
->status
= BFA_STATUS_VERSION_FAIL
;
579 iocmd_bufptr
= (char *)iocmd
+
580 sizeof(struct bfa_bsg_lport_get_rports_s
);
581 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
582 fcs_port
= bfa_fcs_lookup_port(&bfad
->bfa_fcs
,
583 iocmd
->vf_id
, iocmd
->pwwn
);
584 if (fcs_port
== NULL
) {
585 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
587 iocmd
->status
= BFA_STATUS_UNKNOWN_LWWN
;
591 bfa_fcs_lport_get_rport_quals(fcs_port
,
592 (struct bfa_rport_qualifier_s
*)iocmd_bufptr
,
594 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
595 iocmd
->status
= BFA_STATUS_OK
;
601 bfad_iocmd_rport_get_attr(struct bfad_s
*bfad
, void *cmd
)
603 struct bfa_bsg_rport_attr_s
*iocmd
= (struct bfa_bsg_rport_attr_s
*)cmd
;
604 struct bfa_fcs_lport_s
*fcs_port
;
605 struct bfa_fcs_rport_s
*fcs_rport
;
608 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
609 fcs_port
= bfa_fcs_lookup_port(&bfad
->bfa_fcs
,
610 iocmd
->vf_id
, iocmd
->pwwn
);
611 if (fcs_port
== NULL
) {
613 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
614 iocmd
->status
= BFA_STATUS_UNKNOWN_LWWN
;
619 fcs_rport
= bfa_fcs_lport_get_rport_by_qualifier(fcs_port
,
620 iocmd
->rpwwn
, iocmd
->pid
);
622 fcs_rport
= bfa_fcs_rport_lookup(fcs_port
, iocmd
->rpwwn
);
623 if (fcs_rport
== NULL
) {
625 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
626 iocmd
->status
= BFA_STATUS_UNKNOWN_RWWN
;
630 bfa_fcs_rport_get_attr(fcs_rport
, &iocmd
->attr
);
631 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
632 iocmd
->status
= BFA_STATUS_OK
;
638 bfad_iocmd_rport_get_addr(struct bfad_s
*bfad
, void *cmd
)
640 struct bfa_bsg_rport_scsi_addr_s
*iocmd
=
641 (struct bfa_bsg_rport_scsi_addr_s
*)cmd
;
642 struct bfa_fcs_lport_s
*fcs_port
;
643 struct bfa_fcs_itnim_s
*fcs_itnim
;
644 struct bfad_itnim_s
*drv_itnim
;
647 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
648 fcs_port
= bfa_fcs_lookup_port(&bfad
->bfa_fcs
,
649 iocmd
->vf_id
, iocmd
->pwwn
);
650 if (fcs_port
== NULL
) {
652 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
653 iocmd
->status
= BFA_STATUS_UNKNOWN_LWWN
;
657 fcs_itnim
= bfa_fcs_itnim_lookup(fcs_port
, iocmd
->rpwwn
);
658 if (fcs_itnim
== NULL
) {
660 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
661 iocmd
->status
= BFA_STATUS_UNKNOWN_RWWN
;
665 drv_itnim
= fcs_itnim
->itnim_drv
;
667 if (drv_itnim
&& drv_itnim
->im_port
)
668 iocmd
->host
= drv_itnim
->im_port
->shost
->host_no
;
671 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
672 iocmd
->status
= BFA_STATUS_UNKNOWN_RWWN
;
676 iocmd
->target
= drv_itnim
->scsi_tgt_id
;
677 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
681 iocmd
->status
= BFA_STATUS_OK
;
687 bfad_iocmd_rport_get_stats(struct bfad_s
*bfad
, void *cmd
)
689 struct bfa_bsg_rport_stats_s
*iocmd
=
690 (struct bfa_bsg_rport_stats_s
*)cmd
;
691 struct bfa_fcs_lport_s
*fcs_port
;
692 struct bfa_fcs_rport_s
*fcs_rport
;
695 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
696 fcs_port
= bfa_fcs_lookup_port(&bfad
->bfa_fcs
,
697 iocmd
->vf_id
, iocmd
->pwwn
);
698 if (fcs_port
== NULL
) {
700 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
701 iocmd
->status
= BFA_STATUS_UNKNOWN_LWWN
;
705 fcs_rport
= bfa_fcs_rport_lookup(fcs_port
, iocmd
->rpwwn
);
706 if (fcs_rport
== NULL
) {
708 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
709 iocmd
->status
= BFA_STATUS_UNKNOWN_RWWN
;
713 memcpy((void *)&iocmd
->stats
, (void *)&fcs_rport
->stats
,
714 sizeof(struct bfa_rport_stats_s
));
715 if (bfa_fcs_rport_get_halrport(fcs_rport
)) {
716 memcpy((void *)&iocmd
->stats
.hal_stats
,
717 (void *)&(bfa_fcs_rport_get_halrport(fcs_rport
)->stats
),
718 sizeof(struct bfa_rport_hal_stats_s
));
721 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
722 iocmd
->status
= BFA_STATUS_OK
;
728 bfad_iocmd_rport_clr_stats(struct bfad_s
*bfad
, void *cmd
)
730 struct bfa_bsg_rport_reset_stats_s
*iocmd
=
731 (struct bfa_bsg_rport_reset_stats_s
*)cmd
;
732 struct bfa_fcs_lport_s
*fcs_port
;
733 struct bfa_fcs_rport_s
*fcs_rport
;
734 struct bfa_rport_s
*rport
;
737 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
738 fcs_port
= bfa_fcs_lookup_port(&bfad
->bfa_fcs
,
739 iocmd
->vf_id
, iocmd
->pwwn
);
740 if (fcs_port
== NULL
) {
741 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
742 iocmd
->status
= BFA_STATUS_UNKNOWN_LWWN
;
746 fcs_rport
= bfa_fcs_rport_lookup(fcs_port
, iocmd
->rpwwn
);
747 if (fcs_rport
== NULL
) {
748 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
749 iocmd
->status
= BFA_STATUS_UNKNOWN_RWWN
;
753 memset((char *)&fcs_rport
->stats
, 0, sizeof(struct bfa_rport_stats_s
));
754 rport
= bfa_fcs_rport_get_halrport(fcs_rport
);
756 memset(&rport
->stats
, 0, sizeof(rport
->stats
));
757 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
758 iocmd
->status
= BFA_STATUS_OK
;
764 bfad_iocmd_rport_set_speed(struct bfad_s
*bfad
, void *cmd
)
766 struct bfa_bsg_rport_set_speed_s
*iocmd
=
767 (struct bfa_bsg_rport_set_speed_s
*)cmd
;
768 struct bfa_fcs_lport_s
*fcs_port
;
769 struct bfa_fcs_rport_s
*fcs_rport
;
772 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
773 fcs_port
= bfa_fcs_lookup_port(&bfad
->bfa_fcs
,
774 iocmd
->vf_id
, iocmd
->pwwn
);
775 if (fcs_port
== NULL
) {
776 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
777 iocmd
->status
= BFA_STATUS_UNKNOWN_LWWN
;
781 fcs_rport
= bfa_fcs_rport_lookup(fcs_port
, iocmd
->rpwwn
);
782 if (fcs_rport
== NULL
) {
783 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
784 iocmd
->status
= BFA_STATUS_UNKNOWN_RWWN
;
788 fcs_rport
->rpf
.assigned_speed
= iocmd
->speed
;
789 /* Set this speed in f/w only if the RPSC speed is not available */
790 if (fcs_rport
->rpf
.rpsc_speed
== BFA_PORT_SPEED_UNKNOWN
)
791 if (fcs_rport
->bfa_rport
)
792 bfa_rport_speed(fcs_rport
->bfa_rport
, iocmd
->speed
);
793 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
794 iocmd
->status
= BFA_STATUS_OK
;
800 bfad_iocmd_vport_get_attr(struct bfad_s
*bfad
, void *cmd
)
802 struct bfa_fcs_vport_s
*fcs_vport
;
803 struct bfa_bsg_vport_attr_s
*iocmd
= (struct bfa_bsg_vport_attr_s
*)cmd
;
806 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
807 fcs_vport
= bfa_fcs_vport_lookup(&bfad
->bfa_fcs
,
808 iocmd
->vf_id
, iocmd
->vpwwn
);
809 if (fcs_vport
== NULL
) {
810 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
811 iocmd
->status
= BFA_STATUS_UNKNOWN_VWWN
;
815 bfa_fcs_vport_get_attr(fcs_vport
, &iocmd
->vport_attr
);
816 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
817 iocmd
->status
= BFA_STATUS_OK
;
823 bfad_iocmd_vport_get_stats(struct bfad_s
*bfad
, void *cmd
)
825 struct bfa_fcs_vport_s
*fcs_vport
;
826 struct bfa_bsg_vport_stats_s
*iocmd
=
827 (struct bfa_bsg_vport_stats_s
*)cmd
;
830 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
831 fcs_vport
= bfa_fcs_vport_lookup(&bfad
->bfa_fcs
,
832 iocmd
->vf_id
, iocmd
->vpwwn
);
833 if (fcs_vport
== NULL
) {
834 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
835 iocmd
->status
= BFA_STATUS_UNKNOWN_VWWN
;
839 memcpy((void *)&iocmd
->vport_stats
, (void *)&fcs_vport
->vport_stats
,
840 sizeof(struct bfa_vport_stats_s
));
841 memcpy((void *)&iocmd
->vport_stats
.port_stats
,
842 (void *)&fcs_vport
->lport
.stats
,
843 sizeof(struct bfa_lport_stats_s
));
844 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
845 iocmd
->status
= BFA_STATUS_OK
;
851 bfad_iocmd_vport_clr_stats(struct bfad_s
*bfad
, void *cmd
)
853 struct bfa_fcs_vport_s
*fcs_vport
;
854 struct bfa_bsg_reset_stats_s
*iocmd
=
855 (struct bfa_bsg_reset_stats_s
*)cmd
;
858 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
859 fcs_vport
= bfa_fcs_vport_lookup(&bfad
->bfa_fcs
,
860 iocmd
->vf_id
, iocmd
->vpwwn
);
861 if (fcs_vport
== NULL
) {
862 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
863 iocmd
->status
= BFA_STATUS_UNKNOWN_VWWN
;
867 memset(&fcs_vport
->vport_stats
, 0, sizeof(struct bfa_vport_stats_s
));
868 memset(&fcs_vport
->lport
.stats
, 0, sizeof(struct bfa_lport_stats_s
));
869 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
870 iocmd
->status
= BFA_STATUS_OK
;
876 bfad_iocmd_fabric_get_lports(struct bfad_s
*bfad
, void *cmd
,
877 unsigned int payload_len
)
879 struct bfa_bsg_fabric_get_lports_s
*iocmd
=
880 (struct bfa_bsg_fabric_get_lports_s
*)cmd
;
881 bfa_fcs_vf_t
*fcs_vf
;
882 uint32_t nports
= iocmd
->nports
;
887 iocmd
->status
= BFA_STATUS_EINVAL
;
891 if (bfad_chk_iocmd_sz(payload_len
,
892 sizeof(struct bfa_bsg_fabric_get_lports_s
),
893 sizeof(wwn_t
[iocmd
->nports
])) != BFA_STATUS_OK
) {
894 iocmd
->status
= BFA_STATUS_VERSION_FAIL
;
898 iocmd_bufptr
= (char *)iocmd
+
899 sizeof(struct bfa_bsg_fabric_get_lports_s
);
901 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
902 fcs_vf
= bfa_fcs_vf_lookup(&bfad
->bfa_fcs
, iocmd
->vf_id
);
903 if (fcs_vf
== NULL
) {
904 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
905 iocmd
->status
= BFA_STATUS_UNKNOWN_VFID
;
908 bfa_fcs_vf_get_ports(fcs_vf
, (wwn_t
*)iocmd_bufptr
, &nports
);
909 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
911 iocmd
->nports
= nports
;
912 iocmd
->status
= BFA_STATUS_OK
;
918 bfad_iocmd_qos_set_bw(struct bfad_s
*bfad
, void *pcmd
)
920 struct bfa_bsg_qos_bw_s
*iocmd
= (struct bfa_bsg_qos_bw_s
*)pcmd
;
923 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
924 iocmd
->status
= bfa_fcport_set_qos_bw(&bfad
->bfa
, &iocmd
->qos_bw
);
925 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
931 bfad_iocmd_ratelim(struct bfad_s
*bfad
, unsigned int cmd
, void *pcmd
)
933 struct bfa_bsg_gen_s
*iocmd
= (struct bfa_bsg_gen_s
*)pcmd
;
934 struct bfa_fcport_s
*fcport
= BFA_FCPORT_MOD(&bfad
->bfa
);
937 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
939 if ((fcport
->cfg
.topology
== BFA_PORT_TOPOLOGY_LOOP
) &&
940 (fcport
->topology
== BFA_PORT_TOPOLOGY_LOOP
))
941 iocmd
->status
= BFA_STATUS_TOPOLOGY_LOOP
;
943 if (cmd
== IOCMD_RATELIM_ENABLE
)
944 fcport
->cfg
.ratelimit
= BFA_TRUE
;
945 else if (cmd
== IOCMD_RATELIM_DISABLE
)
946 fcport
->cfg
.ratelimit
= BFA_FALSE
;
948 if (fcport
->cfg
.trl_def_speed
== BFA_PORT_SPEED_UNKNOWN
)
949 fcport
->cfg
.trl_def_speed
= BFA_PORT_SPEED_1GBPS
;
951 iocmd
->status
= BFA_STATUS_OK
;
954 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
960 bfad_iocmd_ratelim_speed(struct bfad_s
*bfad
, unsigned int cmd
, void *pcmd
)
962 struct bfa_bsg_trl_speed_s
*iocmd
= (struct bfa_bsg_trl_speed_s
*)pcmd
;
963 struct bfa_fcport_s
*fcport
= BFA_FCPORT_MOD(&bfad
->bfa
);
966 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
968 /* Auto and speeds greater than the supported speed, are invalid */
969 if ((iocmd
->speed
== BFA_PORT_SPEED_AUTO
) ||
970 (iocmd
->speed
> fcport
->speed_sup
)) {
971 iocmd
->status
= BFA_STATUS_UNSUPP_SPEED
;
972 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
976 if ((fcport
->cfg
.topology
== BFA_PORT_TOPOLOGY_LOOP
) &&
977 (fcport
->topology
== BFA_PORT_TOPOLOGY_LOOP
))
978 iocmd
->status
= BFA_STATUS_TOPOLOGY_LOOP
;
980 fcport
->cfg
.trl_def_speed
= iocmd
->speed
;
981 iocmd
->status
= BFA_STATUS_OK
;
983 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
989 bfad_iocmd_cfg_fcpim(struct bfad_s
*bfad
, void *cmd
)
991 struct bfa_bsg_fcpim_s
*iocmd
= (struct bfa_bsg_fcpim_s
*)cmd
;
994 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
995 bfa_fcpim_path_tov_set(&bfad
->bfa
, iocmd
->param
);
996 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
997 iocmd
->status
= BFA_STATUS_OK
;
1002 bfad_iocmd_fcpim_get_modstats(struct bfad_s
*bfad
, void *cmd
)
1004 struct bfa_bsg_fcpim_modstats_s
*iocmd
=
1005 (struct bfa_bsg_fcpim_modstats_s
*)cmd
;
1006 struct bfa_fcpim_s
*fcpim
= BFA_FCPIM(&bfad
->bfa
);
1007 struct list_head
*qe
, *qen
;
1008 struct bfa_itnim_s
*itnim
;
1009 unsigned long flags
;
1011 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1012 /* accumulate IO stats from itnim */
1013 memset((void *)&iocmd
->modstats
, 0, sizeof(struct bfa_itnim_iostats_s
));
1014 list_for_each_safe(qe
, qen
, &fcpim
->itnim_q
) {
1015 itnim
= (struct bfa_itnim_s
*) qe
;
1016 bfa_fcpim_add_stats(&iocmd
->modstats
, &(itnim
->stats
));
1018 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1019 iocmd
->status
= BFA_STATUS_OK
;
1024 bfad_iocmd_fcpim_clr_modstats(struct bfad_s
*bfad
, void *cmd
)
1026 struct bfa_bsg_fcpim_modstatsclr_s
*iocmd
=
1027 (struct bfa_bsg_fcpim_modstatsclr_s
*)cmd
;
1028 struct bfa_fcpim_s
*fcpim
= BFA_FCPIM(&bfad
->bfa
);
1029 struct list_head
*qe
, *qen
;
1030 struct bfa_itnim_s
*itnim
;
1031 unsigned long flags
;
1033 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1034 list_for_each_safe(qe
, qen
, &fcpim
->itnim_q
) {
1035 itnim
= (struct bfa_itnim_s
*) qe
;
1036 bfa_itnim_clear_stats(itnim
);
1038 memset(&fcpim
->del_itn_stats
, 0,
1039 sizeof(struct bfa_fcpim_del_itn_stats_s
));
1040 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1041 iocmd
->status
= BFA_STATUS_OK
;
1046 bfad_iocmd_fcpim_get_del_itn_stats(struct bfad_s
*bfad
, void *cmd
)
1048 struct bfa_bsg_fcpim_del_itn_stats_s
*iocmd
=
1049 (struct bfa_bsg_fcpim_del_itn_stats_s
*)cmd
;
1050 struct bfa_fcpim_s
*fcpim
= BFA_FCPIM(&bfad
->bfa
);
1051 unsigned long flags
;
1053 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1054 memcpy((void *)&iocmd
->modstats
, (void *)&fcpim
->del_itn_stats
,
1055 sizeof(struct bfa_fcpim_del_itn_stats_s
));
1056 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1058 iocmd
->status
= BFA_STATUS_OK
;
1063 bfad_iocmd_itnim_get_attr(struct bfad_s
*bfad
, void *cmd
)
1065 struct bfa_bsg_itnim_attr_s
*iocmd
= (struct bfa_bsg_itnim_attr_s
*)cmd
;
1066 struct bfa_fcs_lport_s
*fcs_port
;
1067 unsigned long flags
;
1069 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1070 fcs_port
= bfa_fcs_lookup_port(&bfad
->bfa_fcs
,
1071 iocmd
->vf_id
, iocmd
->lpwwn
);
1073 iocmd
->status
= BFA_STATUS_UNKNOWN_LWWN
;
1075 iocmd
->status
= bfa_fcs_itnim_attr_get(fcs_port
,
1076 iocmd
->rpwwn
, &iocmd
->attr
);
1077 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1082 bfad_iocmd_itnim_get_iostats(struct bfad_s
*bfad
, void *cmd
)
1084 struct bfa_bsg_itnim_iostats_s
*iocmd
=
1085 (struct bfa_bsg_itnim_iostats_s
*)cmd
;
1086 struct bfa_fcs_lport_s
*fcs_port
;
1087 struct bfa_fcs_itnim_s
*itnim
;
1088 unsigned long flags
;
1090 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1091 fcs_port
= bfa_fcs_lookup_port(&bfad
->bfa_fcs
,
1092 iocmd
->vf_id
, iocmd
->lpwwn
);
1094 iocmd
->status
= BFA_STATUS_UNKNOWN_LWWN
;
1097 itnim
= bfa_fcs_itnim_lookup(fcs_port
, iocmd
->rpwwn
);
1099 iocmd
->status
= BFA_STATUS_UNKNOWN_RWWN
;
1101 iocmd
->status
= BFA_STATUS_OK
;
1102 if (bfa_fcs_itnim_get_halitn(itnim
))
1103 memcpy((void *)&iocmd
->iostats
, (void *)
1104 &(bfa_fcs_itnim_get_halitn(itnim
)->stats
),
1105 sizeof(struct bfa_itnim_iostats_s
));
1108 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1113 bfad_iocmd_itnim_reset_stats(struct bfad_s
*bfad
, void *cmd
)
1115 struct bfa_bsg_rport_reset_stats_s
*iocmd
=
1116 (struct bfa_bsg_rport_reset_stats_s
*)cmd
;
1117 struct bfa_fcs_lport_s
*fcs_port
;
1118 struct bfa_fcs_itnim_s
*itnim
;
1119 unsigned long flags
;
1121 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1122 fcs_port
= bfa_fcs_lookup_port(&bfad
->bfa_fcs
,
1123 iocmd
->vf_id
, iocmd
->pwwn
);
1125 iocmd
->status
= BFA_STATUS_UNKNOWN_LWWN
;
1127 itnim
= bfa_fcs_itnim_lookup(fcs_port
, iocmd
->rpwwn
);
1129 iocmd
->status
= BFA_STATUS_UNKNOWN_RWWN
;
1131 iocmd
->status
= BFA_STATUS_OK
;
1132 bfa_fcs_itnim_stats_clear(fcs_port
, iocmd
->rpwwn
);
1133 bfa_itnim_clear_stats(bfa_fcs_itnim_get_halitn(itnim
));
1136 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1142 bfad_iocmd_itnim_get_itnstats(struct bfad_s
*bfad
, void *cmd
)
1144 struct bfa_bsg_itnim_itnstats_s
*iocmd
=
1145 (struct bfa_bsg_itnim_itnstats_s
*)cmd
;
1146 struct bfa_fcs_lport_s
*fcs_port
;
1147 struct bfa_fcs_itnim_s
*itnim
;
1148 unsigned long flags
;
1150 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1151 fcs_port
= bfa_fcs_lookup_port(&bfad
->bfa_fcs
,
1152 iocmd
->vf_id
, iocmd
->lpwwn
);
1154 iocmd
->status
= BFA_STATUS_UNKNOWN_LWWN
;
1157 itnim
= bfa_fcs_itnim_lookup(fcs_port
, iocmd
->rpwwn
);
1159 iocmd
->status
= BFA_STATUS_UNKNOWN_RWWN
;
1161 iocmd
->status
= BFA_STATUS_OK
;
1162 bfa_fcs_itnim_stats_get(fcs_port
, iocmd
->rpwwn
,
1166 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1171 bfad_iocmd_fcport_enable(struct bfad_s
*bfad
, void *cmd
)
1173 struct bfa_bsg_gen_s
*iocmd
= (struct bfa_bsg_gen_s
*)cmd
;
1174 unsigned long flags
;
1176 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1177 iocmd
->status
= bfa_fcport_enable(&bfad
->bfa
);
1178 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1184 bfad_iocmd_fcport_disable(struct bfad_s
*bfad
, void *cmd
)
1186 struct bfa_bsg_gen_s
*iocmd
= (struct bfa_bsg_gen_s
*)cmd
;
1187 unsigned long flags
;
1189 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1190 iocmd
->status
= bfa_fcport_disable(&bfad
->bfa
);
1191 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1197 bfad_iocmd_ioc_get_pcifn_cfg(struct bfad_s
*bfad
, void *cmd
)
1199 struct bfa_bsg_pcifn_cfg_s
*iocmd
= (struct bfa_bsg_pcifn_cfg_s
*)cmd
;
1200 struct bfad_hal_comp fcomp
;
1201 unsigned long flags
;
1203 init_completion(&fcomp
.comp
);
1204 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1205 iocmd
->status
= bfa_ablk_query(&bfad
->bfa
.modules
.ablk
,
1207 bfad_hcb_comp
, &fcomp
);
1208 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1209 if (iocmd
->status
!= BFA_STATUS_OK
)
1212 wait_for_completion(&fcomp
.comp
);
1213 iocmd
->status
= fcomp
.status
;
1219 bfad_iocmd_pcifn_create(struct bfad_s
*bfad
, void *cmd
)
1221 struct bfa_bsg_pcifn_s
*iocmd
= (struct bfa_bsg_pcifn_s
*)cmd
;
1222 struct bfad_hal_comp fcomp
;
1223 unsigned long flags
;
1225 init_completion(&fcomp
.comp
);
1226 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1227 iocmd
->status
= bfa_ablk_pf_create(&bfad
->bfa
.modules
.ablk
,
1228 &iocmd
->pcifn_id
, iocmd
->port
,
1229 iocmd
->pcifn_class
, iocmd
->bw_min
,
1230 iocmd
->bw_max
, bfad_hcb_comp
, &fcomp
);
1231 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1232 if (iocmd
->status
!= BFA_STATUS_OK
)
1235 wait_for_completion(&fcomp
.comp
);
1236 iocmd
->status
= fcomp
.status
;
1242 bfad_iocmd_pcifn_delete(struct bfad_s
*bfad
, void *cmd
)
1244 struct bfa_bsg_pcifn_s
*iocmd
= (struct bfa_bsg_pcifn_s
*)cmd
;
1245 struct bfad_hal_comp fcomp
;
1246 unsigned long flags
;
1248 init_completion(&fcomp
.comp
);
1249 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1250 iocmd
->status
= bfa_ablk_pf_delete(&bfad
->bfa
.modules
.ablk
,
1252 bfad_hcb_comp
, &fcomp
);
1253 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1254 if (iocmd
->status
!= BFA_STATUS_OK
)
1257 wait_for_completion(&fcomp
.comp
);
1258 iocmd
->status
= fcomp
.status
;
1264 bfad_iocmd_pcifn_bw(struct bfad_s
*bfad
, void *cmd
)
1266 struct bfa_bsg_pcifn_s
*iocmd
= (struct bfa_bsg_pcifn_s
*)cmd
;
1267 struct bfad_hal_comp fcomp
;
1268 unsigned long flags
;
1270 init_completion(&fcomp
.comp
);
1271 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1272 iocmd
->status
= bfa_ablk_pf_update(&bfad
->bfa
.modules
.ablk
,
1273 iocmd
->pcifn_id
, iocmd
->bw_min
,
1274 iocmd
->bw_max
, bfad_hcb_comp
, &fcomp
);
1275 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1276 bfa_trc(bfad
, iocmd
->status
);
1277 if (iocmd
->status
!= BFA_STATUS_OK
)
1280 wait_for_completion(&fcomp
.comp
);
1281 iocmd
->status
= fcomp
.status
;
1282 bfa_trc(bfad
, iocmd
->status
);
1288 bfad_iocmd_adapter_cfg_mode(struct bfad_s
*bfad
, void *cmd
)
1290 struct bfa_bsg_adapter_cfg_mode_s
*iocmd
=
1291 (struct bfa_bsg_adapter_cfg_mode_s
*)cmd
;
1292 struct bfad_hal_comp fcomp
;
1293 unsigned long flags
= 0;
1295 init_completion(&fcomp
.comp
);
1296 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1297 iocmd
->status
= bfa_ablk_adapter_config(&bfad
->bfa
.modules
.ablk
,
1298 iocmd
->cfg
.mode
, iocmd
->cfg
.max_pf
,
1299 iocmd
->cfg
.max_vf
, bfad_hcb_comp
, &fcomp
);
1300 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1301 if (iocmd
->status
!= BFA_STATUS_OK
)
1304 wait_for_completion(&fcomp
.comp
);
1305 iocmd
->status
= fcomp
.status
;
1311 bfad_iocmd_port_cfg_mode(struct bfad_s
*bfad
, void *cmd
)
1313 struct bfa_bsg_port_cfg_mode_s
*iocmd
=
1314 (struct bfa_bsg_port_cfg_mode_s
*)cmd
;
1315 struct bfad_hal_comp fcomp
;
1316 unsigned long flags
= 0;
1318 init_completion(&fcomp
.comp
);
1319 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1320 iocmd
->status
= bfa_ablk_port_config(&bfad
->bfa
.modules
.ablk
,
1321 iocmd
->instance
, iocmd
->cfg
.mode
,
1322 iocmd
->cfg
.max_pf
, iocmd
->cfg
.max_vf
,
1323 bfad_hcb_comp
, &fcomp
);
1324 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1325 if (iocmd
->status
!= BFA_STATUS_OK
)
1328 wait_for_completion(&fcomp
.comp
);
1329 iocmd
->status
= fcomp
.status
;
1335 bfad_iocmd_ablk_optrom(struct bfad_s
*bfad
, unsigned int cmd
, void *pcmd
)
1337 struct bfa_bsg_gen_s
*iocmd
= (struct bfa_bsg_gen_s
*)pcmd
;
1338 struct bfad_hal_comp fcomp
;
1339 unsigned long flags
;
1341 init_completion(&fcomp
.comp
);
1342 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1343 if (cmd
== IOCMD_FLASH_ENABLE_OPTROM
)
1344 iocmd
->status
= bfa_ablk_optrom_en(&bfad
->bfa
.modules
.ablk
,
1345 bfad_hcb_comp
, &fcomp
);
1347 iocmd
->status
= bfa_ablk_optrom_dis(&bfad
->bfa
.modules
.ablk
,
1348 bfad_hcb_comp
, &fcomp
);
1349 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1351 if (iocmd
->status
!= BFA_STATUS_OK
)
1354 wait_for_completion(&fcomp
.comp
);
1355 iocmd
->status
= fcomp
.status
;
1361 bfad_iocmd_faa_query(struct bfad_s
*bfad
, void *cmd
)
1363 struct bfa_bsg_faa_attr_s
*iocmd
= (struct bfa_bsg_faa_attr_s
*)cmd
;
1364 struct bfad_hal_comp fcomp
;
1365 unsigned long flags
;
1367 init_completion(&fcomp
.comp
);
1368 iocmd
->status
= BFA_STATUS_OK
;
1369 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1370 iocmd
->status
= bfa_faa_query(&bfad
->bfa
, &iocmd
->faa_attr
,
1371 bfad_hcb_comp
, &fcomp
);
1372 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1374 if (iocmd
->status
!= BFA_STATUS_OK
)
1377 wait_for_completion(&fcomp
.comp
);
1378 iocmd
->status
= fcomp
.status
;
1384 bfad_iocmd_cee_attr(struct bfad_s
*bfad
, void *cmd
, unsigned int payload_len
)
1386 struct bfa_bsg_cee_attr_s
*iocmd
=
1387 (struct bfa_bsg_cee_attr_s
*)cmd
;
1389 struct bfad_hal_comp cee_comp
;
1390 unsigned long flags
;
1392 if (bfad_chk_iocmd_sz(payload_len
,
1393 sizeof(struct bfa_bsg_cee_attr_s
),
1394 sizeof(struct bfa_cee_attr_s
)) != BFA_STATUS_OK
) {
1395 iocmd
->status
= BFA_STATUS_VERSION_FAIL
;
1399 iocmd_bufptr
= (char *)iocmd
+ sizeof(struct bfa_bsg_cee_attr_s
);
1401 cee_comp
.status
= 0;
1402 init_completion(&cee_comp
.comp
);
1403 mutex_lock(&bfad_mutex
);
1404 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1405 iocmd
->status
= bfa_cee_get_attr(&bfad
->bfa
.modules
.cee
, iocmd_bufptr
,
1406 bfad_hcb_comp
, &cee_comp
);
1407 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1408 if (iocmd
->status
!= BFA_STATUS_OK
) {
1409 mutex_unlock(&bfad_mutex
);
1410 bfa_trc(bfad
, 0x5555);
1413 wait_for_completion(&cee_comp
.comp
);
1414 mutex_unlock(&bfad_mutex
);
1420 bfad_iocmd_cee_get_stats(struct bfad_s
*bfad
, void *cmd
,
1421 unsigned int payload_len
)
1423 struct bfa_bsg_cee_stats_s
*iocmd
=
1424 (struct bfa_bsg_cee_stats_s
*)cmd
;
1426 struct bfad_hal_comp cee_comp
;
1427 unsigned long flags
;
1429 if (bfad_chk_iocmd_sz(payload_len
,
1430 sizeof(struct bfa_bsg_cee_stats_s
),
1431 sizeof(struct bfa_cee_stats_s
)) != BFA_STATUS_OK
) {
1432 iocmd
->status
= BFA_STATUS_VERSION_FAIL
;
1436 iocmd_bufptr
= (char *)iocmd
+ sizeof(struct bfa_bsg_cee_stats_s
);
1438 cee_comp
.status
= 0;
1439 init_completion(&cee_comp
.comp
);
1440 mutex_lock(&bfad_mutex
);
1441 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1442 iocmd
->status
= bfa_cee_get_stats(&bfad
->bfa
.modules
.cee
, iocmd_bufptr
,
1443 bfad_hcb_comp
, &cee_comp
);
1444 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1445 if (iocmd
->status
!= BFA_STATUS_OK
) {
1446 mutex_unlock(&bfad_mutex
);
1447 bfa_trc(bfad
, 0x5555);
1450 wait_for_completion(&cee_comp
.comp
);
1451 mutex_unlock(&bfad_mutex
);
1457 bfad_iocmd_cee_reset_stats(struct bfad_s
*bfad
, void *cmd
)
1459 struct bfa_bsg_gen_s
*iocmd
= (struct bfa_bsg_gen_s
*)cmd
;
1460 unsigned long flags
;
1462 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1463 iocmd
->status
= bfa_cee_reset_stats(&bfad
->bfa
.modules
.cee
, NULL
, NULL
);
1464 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1465 if (iocmd
->status
!= BFA_STATUS_OK
)
1466 bfa_trc(bfad
, 0x5555);
1471 bfad_iocmd_sfp_media(struct bfad_s
*bfad
, void *cmd
)
1473 struct bfa_bsg_sfp_media_s
*iocmd
= (struct bfa_bsg_sfp_media_s
*)cmd
;
1474 struct bfad_hal_comp fcomp
;
1475 unsigned long flags
;
1477 init_completion(&fcomp
.comp
);
1478 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1479 iocmd
->status
= bfa_sfp_media(BFA_SFP_MOD(&bfad
->bfa
), &iocmd
->media
,
1480 bfad_hcb_comp
, &fcomp
);
1481 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1482 bfa_trc(bfad
, iocmd
->status
);
1483 if (iocmd
->status
!= BFA_STATUS_SFP_NOT_READY
)
1486 wait_for_completion(&fcomp
.comp
);
1487 iocmd
->status
= fcomp
.status
;
1493 bfad_iocmd_sfp_speed(struct bfad_s
*bfad
, void *cmd
)
1495 struct bfa_bsg_sfp_speed_s
*iocmd
= (struct bfa_bsg_sfp_speed_s
*)cmd
;
1496 struct bfad_hal_comp fcomp
;
1497 unsigned long flags
;
1499 init_completion(&fcomp
.comp
);
1500 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1501 iocmd
->status
= bfa_sfp_speed(BFA_SFP_MOD(&bfad
->bfa
), iocmd
->speed
,
1502 bfad_hcb_comp
, &fcomp
);
1503 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1504 bfa_trc(bfad
, iocmd
->status
);
1505 if (iocmd
->status
!= BFA_STATUS_SFP_NOT_READY
)
1507 wait_for_completion(&fcomp
.comp
);
1508 iocmd
->status
= fcomp
.status
;
1514 bfad_iocmd_flash_get_attr(struct bfad_s
*bfad
, void *cmd
)
1516 struct bfa_bsg_flash_attr_s
*iocmd
=
1517 (struct bfa_bsg_flash_attr_s
*)cmd
;
1518 struct bfad_hal_comp fcomp
;
1519 unsigned long flags
;
1521 init_completion(&fcomp
.comp
);
1522 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1523 iocmd
->status
= bfa_flash_get_attr(BFA_FLASH(&bfad
->bfa
), &iocmd
->attr
,
1524 bfad_hcb_comp
, &fcomp
);
1525 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1526 if (iocmd
->status
!= BFA_STATUS_OK
)
1528 wait_for_completion(&fcomp
.comp
);
1529 iocmd
->status
= fcomp
.status
;
1535 bfad_iocmd_flash_erase_part(struct bfad_s
*bfad
, void *cmd
)
1537 struct bfa_bsg_flash_s
*iocmd
= (struct bfa_bsg_flash_s
*)cmd
;
1538 struct bfad_hal_comp fcomp
;
1539 unsigned long flags
;
1541 init_completion(&fcomp
.comp
);
1542 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1543 iocmd
->status
= bfa_flash_erase_part(BFA_FLASH(&bfad
->bfa
), iocmd
->type
,
1544 iocmd
->instance
, bfad_hcb_comp
, &fcomp
);
1545 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1546 if (iocmd
->status
!= BFA_STATUS_OK
)
1548 wait_for_completion(&fcomp
.comp
);
1549 iocmd
->status
= fcomp
.status
;
1555 bfad_iocmd_flash_update_part(struct bfad_s
*bfad
, void *cmd
,
1556 unsigned int payload_len
)
1558 struct bfa_bsg_flash_s
*iocmd
= (struct bfa_bsg_flash_s
*)cmd
;
1560 struct bfad_hal_comp fcomp
;
1561 unsigned long flags
;
1563 if (bfad_chk_iocmd_sz(payload_len
,
1564 sizeof(struct bfa_bsg_flash_s
),
1565 iocmd
->bufsz
) != BFA_STATUS_OK
) {
1566 iocmd
->status
= BFA_STATUS_VERSION_FAIL
;
1570 iocmd_bufptr
= (char *)iocmd
+ sizeof(struct bfa_bsg_flash_s
);
1572 init_completion(&fcomp
.comp
);
1573 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1574 iocmd
->status
= bfa_flash_update_part(BFA_FLASH(&bfad
->bfa
),
1575 iocmd
->type
, iocmd
->instance
, iocmd_bufptr
,
1576 iocmd
->bufsz
, 0, bfad_hcb_comp
, &fcomp
);
1577 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1578 if (iocmd
->status
!= BFA_STATUS_OK
)
1580 wait_for_completion(&fcomp
.comp
);
1581 iocmd
->status
= fcomp
.status
;
1587 bfad_iocmd_flash_read_part(struct bfad_s
*bfad
, void *cmd
,
1588 unsigned int payload_len
)
1590 struct bfa_bsg_flash_s
*iocmd
= (struct bfa_bsg_flash_s
*)cmd
;
1591 struct bfad_hal_comp fcomp
;
1593 unsigned long flags
;
1595 if (bfad_chk_iocmd_sz(payload_len
,
1596 sizeof(struct bfa_bsg_flash_s
),
1597 iocmd
->bufsz
) != BFA_STATUS_OK
) {
1598 iocmd
->status
= BFA_STATUS_VERSION_FAIL
;
1602 iocmd_bufptr
= (char *)iocmd
+ sizeof(struct bfa_bsg_flash_s
);
1604 init_completion(&fcomp
.comp
);
1605 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1606 iocmd
->status
= bfa_flash_read_part(BFA_FLASH(&bfad
->bfa
), iocmd
->type
,
1607 iocmd
->instance
, iocmd_bufptr
, iocmd
->bufsz
, 0,
1608 bfad_hcb_comp
, &fcomp
);
1609 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1610 if (iocmd
->status
!= BFA_STATUS_OK
)
1612 wait_for_completion(&fcomp
.comp
);
1613 iocmd
->status
= fcomp
.status
;
1619 bfad_iocmd_diag_temp(struct bfad_s
*bfad
, void *cmd
)
1621 struct bfa_bsg_diag_get_temp_s
*iocmd
=
1622 (struct bfa_bsg_diag_get_temp_s
*)cmd
;
1623 struct bfad_hal_comp fcomp
;
1624 unsigned long flags
;
1626 init_completion(&fcomp
.comp
);
1627 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1628 iocmd
->status
= bfa_diag_tsensor_query(BFA_DIAG_MOD(&bfad
->bfa
),
1629 &iocmd
->result
, bfad_hcb_comp
, &fcomp
);
1630 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1631 bfa_trc(bfad
, iocmd
->status
);
1632 if (iocmd
->status
!= BFA_STATUS_OK
)
1634 wait_for_completion(&fcomp
.comp
);
1635 iocmd
->status
= fcomp
.status
;
1641 bfad_iocmd_diag_memtest(struct bfad_s
*bfad
, void *cmd
)
1643 struct bfa_bsg_diag_memtest_s
*iocmd
=
1644 (struct bfa_bsg_diag_memtest_s
*)cmd
;
1645 struct bfad_hal_comp fcomp
;
1646 unsigned long flags
;
1648 init_completion(&fcomp
.comp
);
1649 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1650 iocmd
->status
= bfa_diag_memtest(BFA_DIAG_MOD(&bfad
->bfa
),
1651 &iocmd
->memtest
, iocmd
->pat
,
1652 &iocmd
->result
, bfad_hcb_comp
, &fcomp
);
1653 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1654 bfa_trc(bfad
, iocmd
->status
);
1655 if (iocmd
->status
!= BFA_STATUS_OK
)
1657 wait_for_completion(&fcomp
.comp
);
1658 iocmd
->status
= fcomp
.status
;
1664 bfad_iocmd_diag_loopback(struct bfad_s
*bfad
, void *cmd
)
1666 struct bfa_bsg_diag_loopback_s
*iocmd
=
1667 (struct bfa_bsg_diag_loopback_s
*)cmd
;
1668 struct bfad_hal_comp fcomp
;
1669 unsigned long flags
;
1671 init_completion(&fcomp
.comp
);
1672 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1673 iocmd
->status
= bfa_fcdiag_loopback(&bfad
->bfa
, iocmd
->opmode
,
1674 iocmd
->speed
, iocmd
->lpcnt
, iocmd
->pat
,
1675 &iocmd
->result
, bfad_hcb_comp
, &fcomp
);
1676 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1677 bfa_trc(bfad
, iocmd
->status
);
1678 if (iocmd
->status
!= BFA_STATUS_OK
)
1680 wait_for_completion(&fcomp
.comp
);
1681 iocmd
->status
= fcomp
.status
;
1687 bfad_iocmd_diag_fwping(struct bfad_s
*bfad
, void *cmd
)
1689 struct bfa_bsg_diag_fwping_s
*iocmd
=
1690 (struct bfa_bsg_diag_fwping_s
*)cmd
;
1691 struct bfad_hal_comp fcomp
;
1692 unsigned long flags
;
1694 init_completion(&fcomp
.comp
);
1695 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1696 iocmd
->status
= bfa_diag_fwping(BFA_DIAG_MOD(&bfad
->bfa
), iocmd
->cnt
,
1697 iocmd
->pattern
, &iocmd
->result
,
1698 bfad_hcb_comp
, &fcomp
);
1699 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1700 bfa_trc(bfad
, iocmd
->status
);
1701 if (iocmd
->status
!= BFA_STATUS_OK
)
1703 bfa_trc(bfad
, 0x77771);
1704 wait_for_completion(&fcomp
.comp
);
1705 iocmd
->status
= fcomp
.status
;
1711 bfad_iocmd_diag_queuetest(struct bfad_s
*bfad
, void *cmd
)
1713 struct bfa_bsg_diag_qtest_s
*iocmd
= (struct bfa_bsg_diag_qtest_s
*)cmd
;
1714 struct bfad_hal_comp fcomp
;
1715 unsigned long flags
;
1717 init_completion(&fcomp
.comp
);
1718 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1719 iocmd
->status
= bfa_fcdiag_queuetest(&bfad
->bfa
, iocmd
->force
,
1720 iocmd
->queue
, &iocmd
->result
,
1721 bfad_hcb_comp
, &fcomp
);
1722 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1723 if (iocmd
->status
!= BFA_STATUS_OK
)
1725 wait_for_completion(&fcomp
.comp
);
1726 iocmd
->status
= fcomp
.status
;
1732 bfad_iocmd_diag_sfp(struct bfad_s
*bfad
, void *cmd
)
1734 struct bfa_bsg_sfp_show_s
*iocmd
=
1735 (struct bfa_bsg_sfp_show_s
*)cmd
;
1736 struct bfad_hal_comp fcomp
;
1737 unsigned long flags
;
1739 init_completion(&fcomp
.comp
);
1740 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1741 iocmd
->status
= bfa_sfp_show(BFA_SFP_MOD(&bfad
->bfa
), &iocmd
->sfp
,
1742 bfad_hcb_comp
, &fcomp
);
1743 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1744 bfa_trc(bfad
, iocmd
->status
);
1745 if (iocmd
->status
!= BFA_STATUS_OK
)
1747 wait_for_completion(&fcomp
.comp
);
1748 iocmd
->status
= fcomp
.status
;
1749 bfa_trc(bfad
, iocmd
->status
);
1755 bfad_iocmd_diag_led(struct bfad_s
*bfad
, void *cmd
)
1757 struct bfa_bsg_diag_led_s
*iocmd
= (struct bfa_bsg_diag_led_s
*)cmd
;
1758 unsigned long flags
;
1760 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1761 iocmd
->status
= bfa_diag_ledtest(BFA_DIAG_MOD(&bfad
->bfa
),
1763 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1768 bfad_iocmd_diag_beacon_lport(struct bfad_s
*bfad
, void *cmd
)
1770 struct bfa_bsg_diag_beacon_s
*iocmd
=
1771 (struct bfa_bsg_diag_beacon_s
*)cmd
;
1772 unsigned long flags
;
1774 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1775 iocmd
->status
= bfa_diag_beacon_port(BFA_DIAG_MOD(&bfad
->bfa
),
1776 iocmd
->beacon
, iocmd
->link_e2e_beacon
,
1778 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1783 bfad_iocmd_diag_lb_stat(struct bfad_s
*bfad
, void *cmd
)
1785 struct bfa_bsg_diag_lb_stat_s
*iocmd
=
1786 (struct bfa_bsg_diag_lb_stat_s
*)cmd
;
1787 unsigned long flags
;
1789 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1790 iocmd
->status
= bfa_fcdiag_lb_is_running(&bfad
->bfa
);
1791 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1792 bfa_trc(bfad
, iocmd
->status
);
1798 bfad_iocmd_diag_dport_enable(struct bfad_s
*bfad
, void *pcmd
)
1800 struct bfa_bsg_dport_enable_s
*iocmd
=
1801 (struct bfa_bsg_dport_enable_s
*)pcmd
;
1802 unsigned long flags
;
1803 struct bfad_hal_comp fcomp
;
1805 init_completion(&fcomp
.comp
);
1806 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1807 iocmd
->status
= bfa_dport_enable(&bfad
->bfa
, iocmd
->lpcnt
,
1808 iocmd
->pat
, bfad_hcb_comp
, &fcomp
);
1809 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1810 if (iocmd
->status
!= BFA_STATUS_OK
)
1811 bfa_trc(bfad
, iocmd
->status
);
1813 wait_for_completion(&fcomp
.comp
);
1814 iocmd
->status
= fcomp
.status
;
1820 bfad_iocmd_diag_dport_disable(struct bfad_s
*bfad
, void *pcmd
)
1822 struct bfa_bsg_gen_s
*iocmd
= (struct bfa_bsg_gen_s
*)pcmd
;
1823 unsigned long flags
;
1824 struct bfad_hal_comp fcomp
;
1826 init_completion(&fcomp
.comp
);
1827 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1828 iocmd
->status
= bfa_dport_disable(&bfad
->bfa
, bfad_hcb_comp
, &fcomp
);
1829 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1830 if (iocmd
->status
!= BFA_STATUS_OK
)
1831 bfa_trc(bfad
, iocmd
->status
);
1833 wait_for_completion(&fcomp
.comp
);
1834 iocmd
->status
= fcomp
.status
;
1840 bfad_iocmd_diag_dport_start(struct bfad_s
*bfad
, void *pcmd
)
1842 struct bfa_bsg_dport_enable_s
*iocmd
=
1843 (struct bfa_bsg_dport_enable_s
*)pcmd
;
1844 unsigned long flags
;
1845 struct bfad_hal_comp fcomp
;
1847 init_completion(&fcomp
.comp
);
1848 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1849 iocmd
->status
= bfa_dport_start(&bfad
->bfa
, iocmd
->lpcnt
,
1850 iocmd
->pat
, bfad_hcb_comp
,
1852 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1854 if (iocmd
->status
!= BFA_STATUS_OK
) {
1855 bfa_trc(bfad
, iocmd
->status
);
1857 wait_for_completion(&fcomp
.comp
);
1858 iocmd
->status
= fcomp
.status
;
1865 bfad_iocmd_diag_dport_show(struct bfad_s
*bfad
, void *pcmd
)
1867 struct bfa_bsg_diag_dport_show_s
*iocmd
=
1868 (struct bfa_bsg_diag_dport_show_s
*)pcmd
;
1869 unsigned long flags
;
1871 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1872 iocmd
->status
= bfa_dport_show(&bfad
->bfa
, &iocmd
->result
);
1873 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1880 bfad_iocmd_phy_get_attr(struct bfad_s
*bfad
, void *cmd
)
1882 struct bfa_bsg_phy_attr_s
*iocmd
=
1883 (struct bfa_bsg_phy_attr_s
*)cmd
;
1884 struct bfad_hal_comp fcomp
;
1885 unsigned long flags
;
1887 init_completion(&fcomp
.comp
);
1888 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1889 iocmd
->status
= bfa_phy_get_attr(BFA_PHY(&bfad
->bfa
), iocmd
->instance
,
1890 &iocmd
->attr
, bfad_hcb_comp
, &fcomp
);
1891 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1892 if (iocmd
->status
!= BFA_STATUS_OK
)
1894 wait_for_completion(&fcomp
.comp
);
1895 iocmd
->status
= fcomp
.status
;
1901 bfad_iocmd_phy_get_stats(struct bfad_s
*bfad
, void *cmd
)
1903 struct bfa_bsg_phy_stats_s
*iocmd
=
1904 (struct bfa_bsg_phy_stats_s
*)cmd
;
1905 struct bfad_hal_comp fcomp
;
1906 unsigned long flags
;
1908 init_completion(&fcomp
.comp
);
1909 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1910 iocmd
->status
= bfa_phy_get_stats(BFA_PHY(&bfad
->bfa
), iocmd
->instance
,
1911 &iocmd
->stats
, bfad_hcb_comp
, &fcomp
);
1912 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1913 if (iocmd
->status
!= BFA_STATUS_OK
)
1915 wait_for_completion(&fcomp
.comp
);
1916 iocmd
->status
= fcomp
.status
;
1922 bfad_iocmd_phy_read(struct bfad_s
*bfad
, void *cmd
, unsigned int payload_len
)
1924 struct bfa_bsg_phy_s
*iocmd
= (struct bfa_bsg_phy_s
*)cmd
;
1925 struct bfad_hal_comp fcomp
;
1927 unsigned long flags
;
1929 if (bfad_chk_iocmd_sz(payload_len
,
1930 sizeof(struct bfa_bsg_phy_s
),
1931 iocmd
->bufsz
) != BFA_STATUS_OK
) {
1932 iocmd
->status
= BFA_STATUS_VERSION_FAIL
;
1936 iocmd_bufptr
= (char *)iocmd
+ sizeof(struct bfa_bsg_phy_s
);
1937 init_completion(&fcomp
.comp
);
1938 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1939 iocmd
->status
= bfa_phy_read(BFA_PHY(&bfad
->bfa
),
1940 iocmd
->instance
, iocmd_bufptr
, iocmd
->bufsz
,
1941 0, bfad_hcb_comp
, &fcomp
);
1942 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1943 if (iocmd
->status
!= BFA_STATUS_OK
)
1945 wait_for_completion(&fcomp
.comp
);
1946 iocmd
->status
= fcomp
.status
;
1947 if (iocmd
->status
!= BFA_STATUS_OK
)
1954 bfad_iocmd_vhba_query(struct bfad_s
*bfad
, void *cmd
)
1956 struct bfa_bsg_vhba_attr_s
*iocmd
=
1957 (struct bfa_bsg_vhba_attr_s
*)cmd
;
1958 struct bfa_vhba_attr_s
*attr
= &iocmd
->attr
;
1959 unsigned long flags
;
1961 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1962 attr
->pwwn
= bfad
->bfa
.ioc
.attr
->pwwn
;
1963 attr
->nwwn
= bfad
->bfa
.ioc
.attr
->nwwn
;
1964 attr
->plog_enabled
= (bfa_boolean_t
)bfad
->bfa
.plog
->plog_enabled
;
1965 attr
->io_profile
= bfa_fcpim_get_io_profile(&bfad
->bfa
);
1966 attr
->path_tov
= bfa_fcpim_path_tov_get(&bfad
->bfa
);
1967 iocmd
->status
= BFA_STATUS_OK
;
1968 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1973 bfad_iocmd_phy_update(struct bfad_s
*bfad
, void *cmd
, unsigned int payload_len
)
1975 struct bfa_bsg_phy_s
*iocmd
= (struct bfa_bsg_phy_s
*)cmd
;
1977 struct bfad_hal_comp fcomp
;
1978 unsigned long flags
;
1980 if (bfad_chk_iocmd_sz(payload_len
,
1981 sizeof(struct bfa_bsg_phy_s
),
1982 iocmd
->bufsz
) != BFA_STATUS_OK
) {
1983 iocmd
->status
= BFA_STATUS_VERSION_FAIL
;
1987 iocmd_bufptr
= (char *)iocmd
+ sizeof(struct bfa_bsg_phy_s
);
1988 init_completion(&fcomp
.comp
);
1989 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1990 iocmd
->status
= bfa_phy_update(BFA_PHY(&bfad
->bfa
),
1991 iocmd
->instance
, iocmd_bufptr
, iocmd
->bufsz
,
1992 0, bfad_hcb_comp
, &fcomp
);
1993 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1994 if (iocmd
->status
!= BFA_STATUS_OK
)
1996 wait_for_completion(&fcomp
.comp
);
1997 iocmd
->status
= fcomp
.status
;
2003 bfad_iocmd_porglog_get(struct bfad_s
*bfad
, void *cmd
)
2005 struct bfa_bsg_debug_s
*iocmd
= (struct bfa_bsg_debug_s
*)cmd
;
2008 if (iocmd
->bufsz
< sizeof(struct bfa_plog_s
)) {
2009 bfa_trc(bfad
, sizeof(struct bfa_plog_s
));
2010 iocmd
->status
= BFA_STATUS_EINVAL
;
2014 iocmd
->status
= BFA_STATUS_OK
;
2015 iocmd_bufptr
= (char *)iocmd
+ sizeof(struct bfa_bsg_debug_s
);
2016 memcpy(iocmd_bufptr
, (u8
*) &bfad
->plog_buf
, sizeof(struct bfa_plog_s
));
2021 #define BFA_DEBUG_FW_CORE_CHUNK_SZ 0x4000U /* 16K chunks for FW dump */
2023 bfad_iocmd_debug_fw_core(struct bfad_s
*bfad
, void *cmd
,
2024 unsigned int payload_len
)
2026 struct bfa_bsg_debug_s
*iocmd
= (struct bfa_bsg_debug_s
*)cmd
;
2028 unsigned long flags
;
2031 if (bfad_chk_iocmd_sz(payload_len
, sizeof(struct bfa_bsg_debug_s
),
2032 BFA_DEBUG_FW_CORE_CHUNK_SZ
) != BFA_STATUS_OK
) {
2033 iocmd
->status
= BFA_STATUS_VERSION_FAIL
;
2037 if (iocmd
->bufsz
< BFA_DEBUG_FW_CORE_CHUNK_SZ
||
2038 !IS_ALIGNED(iocmd
->bufsz
, sizeof(u16
)) ||
2039 !IS_ALIGNED(iocmd
->offset
, sizeof(u32
))) {
2040 bfa_trc(bfad
, BFA_DEBUG_FW_CORE_CHUNK_SZ
);
2041 iocmd
->status
= BFA_STATUS_EINVAL
;
2045 iocmd_bufptr
= (char *)iocmd
+ sizeof(struct bfa_bsg_debug_s
);
2046 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
2047 offset
= iocmd
->offset
;
2048 iocmd
->status
= bfa_ioc_debug_fwcore(&bfad
->bfa
.ioc
, iocmd_bufptr
,
2049 &offset
, &iocmd
->bufsz
);
2050 iocmd
->offset
= offset
;
2051 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
2057 bfad_iocmd_debug_ctl(struct bfad_s
*bfad
, void *cmd
, unsigned int v_cmd
)
2059 struct bfa_bsg_gen_s
*iocmd
= (struct bfa_bsg_gen_s
*)cmd
;
2060 unsigned long flags
;
2062 if (v_cmd
== IOCMD_DEBUG_FW_STATE_CLR
) {
2063 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
2064 bfad
->bfa
.ioc
.dbg_fwsave_once
= BFA_TRUE
;
2065 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
2066 } else if (v_cmd
== IOCMD_DEBUG_PORTLOG_CLR
)
2067 bfad
->plog_buf
.head
= bfad
->plog_buf
.tail
= 0;
2068 else if (v_cmd
== IOCMD_DEBUG_START_DTRC
)
2069 bfa_trc_init(bfad
->trcmod
);
2070 else if (v_cmd
== IOCMD_DEBUG_STOP_DTRC
)
2071 bfa_trc_stop(bfad
->trcmod
);
2073 iocmd
->status
= BFA_STATUS_OK
;
2078 bfad_iocmd_porglog_ctl(struct bfad_s
*bfad
, void *cmd
)
2080 struct bfa_bsg_portlogctl_s
*iocmd
= (struct bfa_bsg_portlogctl_s
*)cmd
;
2082 if (iocmd
->ctl
== BFA_TRUE
)
2083 bfad
->plog_buf
.plog_enabled
= 1;
2085 bfad
->plog_buf
.plog_enabled
= 0;
2087 iocmd
->status
= BFA_STATUS_OK
;
2092 bfad_iocmd_fcpim_cfg_profile(struct bfad_s
*bfad
, void *cmd
, unsigned int v_cmd
)
2094 struct bfa_bsg_fcpim_profile_s
*iocmd
=
2095 (struct bfa_bsg_fcpim_profile_s
*)cmd
;
2097 unsigned long flags
;
2099 do_gettimeofday(&tv
);
2100 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
2101 if (v_cmd
== IOCMD_FCPIM_PROFILE_ON
)
2102 iocmd
->status
= bfa_fcpim_profile_on(&bfad
->bfa
, tv
.tv_sec
);
2103 else if (v_cmd
== IOCMD_FCPIM_PROFILE_OFF
)
2104 iocmd
->status
= bfa_fcpim_profile_off(&bfad
->bfa
);
2105 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
2111 bfad_iocmd_itnim_get_ioprofile(struct bfad_s
*bfad
, void *cmd
)
2113 struct bfa_bsg_itnim_ioprofile_s
*iocmd
=
2114 (struct bfa_bsg_itnim_ioprofile_s
*)cmd
;
2115 struct bfa_fcs_lport_s
*fcs_port
;
2116 struct bfa_fcs_itnim_s
*itnim
;
2117 unsigned long flags
;
2119 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
2120 fcs_port
= bfa_fcs_lookup_port(&bfad
->bfa_fcs
,
2121 iocmd
->vf_id
, iocmd
->lpwwn
);
2123 iocmd
->status
= BFA_STATUS_UNKNOWN_LWWN
;
2125 itnim
= bfa_fcs_itnim_lookup(fcs_port
, iocmd
->rpwwn
);
2127 iocmd
->status
= BFA_STATUS_UNKNOWN_RWWN
;
2129 iocmd
->status
= bfa_itnim_get_ioprofile(
2130 bfa_fcs_itnim_get_halitn(itnim
),
2133 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
2138 bfad_iocmd_fcport_get_stats(struct bfad_s
*bfad
, void *cmd
)
2140 struct bfa_bsg_fcport_stats_s
*iocmd
=
2141 (struct bfa_bsg_fcport_stats_s
*)cmd
;
2142 struct bfad_hal_comp fcomp
;
2143 unsigned long flags
;
2144 struct bfa_cb_pending_q_s cb_qe
;
2146 init_completion(&fcomp
.comp
);
2147 bfa_pending_q_init(&cb_qe
, (bfa_cb_cbfn_t
)bfad_hcb_comp
,
2148 &fcomp
, &iocmd
->stats
);
2149 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
2150 iocmd
->status
= bfa_fcport_get_stats(&bfad
->bfa
, &cb_qe
);
2151 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
2152 if (iocmd
->status
!= BFA_STATUS_OK
) {
2153 bfa_trc(bfad
, iocmd
->status
);
2156 wait_for_completion(&fcomp
.comp
);
2157 iocmd
->status
= fcomp
.status
;
2163 bfad_iocmd_fcport_reset_stats(struct bfad_s
*bfad
, void *cmd
)
2165 struct bfa_bsg_gen_s
*iocmd
= (struct bfa_bsg_gen_s
*)cmd
;
2166 struct bfad_hal_comp fcomp
;
2167 unsigned long flags
;
2168 struct bfa_cb_pending_q_s cb_qe
;
2170 init_completion(&fcomp
.comp
);
2171 bfa_pending_q_init(&cb_qe
, (bfa_cb_cbfn_t
)bfad_hcb_comp
, &fcomp
, NULL
);
2173 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
2174 iocmd
->status
= bfa_fcport_clear_stats(&bfad
->bfa
, &cb_qe
);
2175 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
2176 if (iocmd
->status
!= BFA_STATUS_OK
) {
2177 bfa_trc(bfad
, iocmd
->status
);
2180 wait_for_completion(&fcomp
.comp
);
2181 iocmd
->status
= fcomp
.status
;
2187 bfad_iocmd_boot_cfg(struct bfad_s
*bfad
, void *cmd
)
2189 struct bfa_bsg_boot_s
*iocmd
= (struct bfa_bsg_boot_s
*)cmd
;
2190 struct bfad_hal_comp fcomp
;
2191 unsigned long flags
;
2193 init_completion(&fcomp
.comp
);
2194 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
2195 iocmd
->status
= bfa_flash_update_part(BFA_FLASH(&bfad
->bfa
),
2196 BFA_FLASH_PART_BOOT
, bfad
->bfa
.ioc
.port_id
,
2197 &iocmd
->cfg
, sizeof(struct bfa_boot_cfg_s
), 0,
2198 bfad_hcb_comp
, &fcomp
);
2199 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
2200 if (iocmd
->status
!= BFA_STATUS_OK
)
2202 wait_for_completion(&fcomp
.comp
);
2203 iocmd
->status
= fcomp
.status
;
2209 bfad_iocmd_boot_query(struct bfad_s
*bfad
, void *cmd
)
2211 struct bfa_bsg_boot_s
*iocmd
= (struct bfa_bsg_boot_s
*)cmd
;
2212 struct bfad_hal_comp fcomp
;
2213 unsigned long flags
;
2215 init_completion(&fcomp
.comp
);
2216 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
2217 iocmd
->status
= bfa_flash_read_part(BFA_FLASH(&bfad
->bfa
),
2218 BFA_FLASH_PART_BOOT
, bfad
->bfa
.ioc
.port_id
,
2219 &iocmd
->cfg
, sizeof(struct bfa_boot_cfg_s
), 0,
2220 bfad_hcb_comp
, &fcomp
);
2221 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
2222 if (iocmd
->status
!= BFA_STATUS_OK
)
2224 wait_for_completion(&fcomp
.comp
);
2225 iocmd
->status
= fcomp
.status
;
2231 bfad_iocmd_preboot_query(struct bfad_s
*bfad
, void *cmd
)
2233 struct bfa_bsg_preboot_s
*iocmd
= (struct bfa_bsg_preboot_s
*)cmd
;
2234 struct bfi_iocfc_cfgrsp_s
*cfgrsp
= bfad
->bfa
.iocfc
.cfgrsp
;
2235 struct bfa_boot_pbc_s
*pbcfg
= &iocmd
->cfg
;
2236 unsigned long flags
;
2238 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
2239 pbcfg
->enable
= cfgrsp
->pbc_cfg
.boot_enabled
;
2240 pbcfg
->nbluns
= cfgrsp
->pbc_cfg
.nbluns
;
2241 pbcfg
->speed
= cfgrsp
->pbc_cfg
.port_speed
;
2242 memcpy(pbcfg
->pblun
, cfgrsp
->pbc_cfg
.blun
, sizeof(pbcfg
->pblun
));
2243 iocmd
->status
= BFA_STATUS_OK
;
2244 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
2250 bfad_iocmd_ethboot_cfg(struct bfad_s
*bfad
, void *cmd
)
2252 struct bfa_bsg_ethboot_s
*iocmd
= (struct bfa_bsg_ethboot_s
*)cmd
;
2253 struct bfad_hal_comp fcomp
;
2254 unsigned long flags
;
2256 init_completion(&fcomp
.comp
);
2257 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
2258 iocmd
->status
= bfa_flash_update_part(BFA_FLASH(&bfad
->bfa
),
2259 BFA_FLASH_PART_PXECFG
,
2260 bfad
->bfa
.ioc
.port_id
, &iocmd
->cfg
,
2261 sizeof(struct bfa_ethboot_cfg_s
), 0,
2262 bfad_hcb_comp
, &fcomp
);
2263 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
2264 if (iocmd
->status
!= BFA_STATUS_OK
)
2266 wait_for_completion(&fcomp
.comp
);
2267 iocmd
->status
= fcomp
.status
;
2273 bfad_iocmd_ethboot_query(struct bfad_s
*bfad
, void *cmd
)
2275 struct bfa_bsg_ethboot_s
*iocmd
= (struct bfa_bsg_ethboot_s
*)cmd
;
2276 struct bfad_hal_comp fcomp
;
2277 unsigned long flags
;
2279 init_completion(&fcomp
.comp
);
2280 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
2281 iocmd
->status
= bfa_flash_read_part(BFA_FLASH(&bfad
->bfa
),
2282 BFA_FLASH_PART_PXECFG
,
2283 bfad
->bfa
.ioc
.port_id
, &iocmd
->cfg
,
2284 sizeof(struct bfa_ethboot_cfg_s
), 0,
2285 bfad_hcb_comp
, &fcomp
);
2286 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
2287 if (iocmd
->status
!= BFA_STATUS_OK
)
2289 wait_for_completion(&fcomp
.comp
);
2290 iocmd
->status
= fcomp
.status
;
2296 bfad_iocmd_cfg_trunk(struct bfad_s
*bfad
, void *cmd
, unsigned int v_cmd
)
2298 struct bfa_bsg_gen_s
*iocmd
= (struct bfa_bsg_gen_s
*)cmd
;
2299 struct bfa_fcport_s
*fcport
= BFA_FCPORT_MOD(&bfad
->bfa
);
2300 struct bfa_fcport_trunk_s
*trunk
= &fcport
->trunk
;
2301 unsigned long flags
;
2303 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
2305 if (bfa_fcport_is_dport(&bfad
->bfa
)) {
2306 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
2307 return BFA_STATUS_DPORT_ERR
;
2310 if ((fcport
->cfg
.topology
== BFA_PORT_TOPOLOGY_LOOP
) ||
2311 (fcport
->topology
== BFA_PORT_TOPOLOGY_LOOP
))
2312 iocmd
->status
= BFA_STATUS_TOPOLOGY_LOOP
;
2314 if (v_cmd
== IOCMD_TRUNK_ENABLE
) {
2315 trunk
->attr
.state
= BFA_TRUNK_OFFLINE
;
2316 bfa_fcport_disable(&bfad
->bfa
);
2317 fcport
->cfg
.trunked
= BFA_TRUE
;
2318 } else if (v_cmd
== IOCMD_TRUNK_DISABLE
) {
2319 trunk
->attr
.state
= BFA_TRUNK_DISABLED
;
2320 bfa_fcport_disable(&bfad
->bfa
);
2321 fcport
->cfg
.trunked
= BFA_FALSE
;
2324 if (!bfa_fcport_is_disabled(&bfad
->bfa
))
2325 bfa_fcport_enable(&bfad
->bfa
);
2327 iocmd
->status
= BFA_STATUS_OK
;
2330 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
2336 bfad_iocmd_trunk_get_attr(struct bfad_s
*bfad
, void *cmd
)
2338 struct bfa_bsg_trunk_attr_s
*iocmd
= (struct bfa_bsg_trunk_attr_s
*)cmd
;
2339 struct bfa_fcport_s
*fcport
= BFA_FCPORT_MOD(&bfad
->bfa
);
2340 struct bfa_fcport_trunk_s
*trunk
= &fcport
->trunk
;
2341 unsigned long flags
;
2343 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
2344 if ((fcport
->cfg
.topology
== BFA_PORT_TOPOLOGY_LOOP
) ||
2345 (fcport
->topology
== BFA_PORT_TOPOLOGY_LOOP
))
2346 iocmd
->status
= BFA_STATUS_TOPOLOGY_LOOP
;
2348 memcpy((void *)&iocmd
->attr
, (void *)&trunk
->attr
,
2349 sizeof(struct bfa_trunk_attr_s
));
2350 iocmd
->attr
.port_id
= bfa_lps_get_base_pid(&bfad
->bfa
);
2351 iocmd
->status
= BFA_STATUS_OK
;
2353 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
2359 bfad_iocmd_qos(struct bfad_s
*bfad
, void *cmd
, unsigned int v_cmd
)
2361 struct bfa_bsg_gen_s
*iocmd
= (struct bfa_bsg_gen_s
*)cmd
;
2362 struct bfa_fcport_s
*fcport
= BFA_FCPORT_MOD(&bfad
->bfa
);
2363 unsigned long flags
;
2365 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
2366 if (bfa_ioc_get_type(&bfad
->bfa
.ioc
) == BFA_IOC_TYPE_FC
) {
2367 if ((fcport
->cfg
.topology
== BFA_PORT_TOPOLOGY_LOOP
) &&
2368 (fcport
->topology
== BFA_PORT_TOPOLOGY_LOOP
))
2369 iocmd
->status
= BFA_STATUS_TOPOLOGY_LOOP
;
2371 if (v_cmd
== IOCMD_QOS_ENABLE
)
2372 fcport
->cfg
.qos_enabled
= BFA_TRUE
;
2373 else if (v_cmd
== IOCMD_QOS_DISABLE
) {
2374 fcport
->cfg
.qos_enabled
= BFA_FALSE
;
2375 fcport
->cfg
.qos_bw
.high
= BFA_QOS_BW_HIGH
;
2376 fcport
->cfg
.qos_bw
.med
= BFA_QOS_BW_MED
;
2377 fcport
->cfg
.qos_bw
.low
= BFA_QOS_BW_LOW
;
2381 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
2387 bfad_iocmd_qos_get_attr(struct bfad_s
*bfad
, void *cmd
)
2389 struct bfa_bsg_qos_attr_s
*iocmd
= (struct bfa_bsg_qos_attr_s
*)cmd
;
2390 struct bfa_fcport_s
*fcport
= BFA_FCPORT_MOD(&bfad
->bfa
);
2391 unsigned long flags
;
2393 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
2394 if ((fcport
->cfg
.topology
== BFA_PORT_TOPOLOGY_LOOP
) &&
2395 (fcport
->topology
== BFA_PORT_TOPOLOGY_LOOP
))
2396 iocmd
->status
= BFA_STATUS_TOPOLOGY_LOOP
;
2398 iocmd
->attr
.state
= fcport
->qos_attr
.state
;
2399 iocmd
->attr
.total_bb_cr
=
2400 be32_to_cpu(fcport
->qos_attr
.total_bb_cr
);
2401 iocmd
->attr
.qos_bw
.high
= fcport
->cfg
.qos_bw
.high
;
2402 iocmd
->attr
.qos_bw
.med
= fcport
->cfg
.qos_bw
.med
;
2403 iocmd
->attr
.qos_bw
.low
= fcport
->cfg
.qos_bw
.low
;
2404 iocmd
->attr
.qos_bw_op
= fcport
->qos_attr
.qos_bw_op
;
2405 iocmd
->status
= BFA_STATUS_OK
;
2407 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
2413 bfad_iocmd_qos_get_vc_attr(struct bfad_s
*bfad
, void *cmd
)
2415 struct bfa_bsg_qos_vc_attr_s
*iocmd
=
2416 (struct bfa_bsg_qos_vc_attr_s
*)cmd
;
2417 struct bfa_fcport_s
*fcport
= BFA_FCPORT_MOD(&bfad
->bfa
);
2418 struct bfa_qos_vc_attr_s
*bfa_vc_attr
= &fcport
->qos_vc_attr
;
2419 unsigned long flags
;
2422 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
2423 iocmd
->attr
.total_vc_count
= be16_to_cpu(bfa_vc_attr
->total_vc_count
);
2424 iocmd
->attr
.shared_credit
= be16_to_cpu(bfa_vc_attr
->shared_credit
);
2425 iocmd
->attr
.elp_opmode_flags
=
2426 be32_to_cpu(bfa_vc_attr
->elp_opmode_flags
);
2428 /* Individual VC info */
2429 while (i
< iocmd
->attr
.total_vc_count
) {
2430 iocmd
->attr
.vc_info
[i
].vc_credit
=
2431 bfa_vc_attr
->vc_info
[i
].vc_credit
;
2432 iocmd
->attr
.vc_info
[i
].borrow_credit
=
2433 bfa_vc_attr
->vc_info
[i
].borrow_credit
;
2434 iocmd
->attr
.vc_info
[i
].priority
=
2435 bfa_vc_attr
->vc_info
[i
].priority
;
2438 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
2440 iocmd
->status
= BFA_STATUS_OK
;
2445 bfad_iocmd_qos_get_stats(struct bfad_s
*bfad
, void *cmd
)
2447 struct bfa_bsg_fcport_stats_s
*iocmd
=
2448 (struct bfa_bsg_fcport_stats_s
*)cmd
;
2449 struct bfad_hal_comp fcomp
;
2450 unsigned long flags
;
2451 struct bfa_cb_pending_q_s cb_qe
;
2452 struct bfa_fcport_s
*fcport
= BFA_FCPORT_MOD(&bfad
->bfa
);
2454 init_completion(&fcomp
.comp
);
2455 bfa_pending_q_init(&cb_qe
, (bfa_cb_cbfn_t
)bfad_hcb_comp
,
2456 &fcomp
, &iocmd
->stats
);
2458 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
2459 WARN_ON(!bfa_ioc_get_fcmode(&bfad
->bfa
.ioc
));
2460 if ((fcport
->cfg
.topology
== BFA_PORT_TOPOLOGY_LOOP
) &&
2461 (fcport
->topology
== BFA_PORT_TOPOLOGY_LOOP
))
2462 iocmd
->status
= BFA_STATUS_TOPOLOGY_LOOP
;
2464 iocmd
->status
= bfa_fcport_get_stats(&bfad
->bfa
, &cb_qe
);
2465 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
2466 if (iocmd
->status
!= BFA_STATUS_OK
) {
2467 bfa_trc(bfad
, iocmd
->status
);
2470 wait_for_completion(&fcomp
.comp
);
2471 iocmd
->status
= fcomp
.status
;
2477 bfad_iocmd_qos_reset_stats(struct bfad_s
*bfad
, void *cmd
)
2479 struct bfa_bsg_gen_s
*iocmd
= (struct bfa_bsg_gen_s
*)cmd
;
2480 struct bfad_hal_comp fcomp
;
2481 unsigned long flags
;
2482 struct bfa_cb_pending_q_s cb_qe
;
2483 struct bfa_fcport_s
*fcport
= BFA_FCPORT_MOD(&bfad
->bfa
);
2485 init_completion(&fcomp
.comp
);
2486 bfa_pending_q_init(&cb_qe
, (bfa_cb_cbfn_t
)bfad_hcb_comp
,
2489 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
2490 WARN_ON(!bfa_ioc_get_fcmode(&bfad
->bfa
.ioc
));
2491 if ((fcport
->cfg
.topology
== BFA_PORT_TOPOLOGY_LOOP
) &&
2492 (fcport
->topology
== BFA_PORT_TOPOLOGY_LOOP
))
2493 iocmd
->status
= BFA_STATUS_TOPOLOGY_LOOP
;
2495 iocmd
->status
= bfa_fcport_clear_stats(&bfad
->bfa
, &cb_qe
);
2496 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
2497 if (iocmd
->status
!= BFA_STATUS_OK
) {
2498 bfa_trc(bfad
, iocmd
->status
);
2501 wait_for_completion(&fcomp
.comp
);
2502 iocmd
->status
= fcomp
.status
;
2508 bfad_iocmd_vf_get_stats(struct bfad_s
*bfad
, void *cmd
)
2510 struct bfa_bsg_vf_stats_s
*iocmd
=
2511 (struct bfa_bsg_vf_stats_s
*)cmd
;
2512 struct bfa_fcs_fabric_s
*fcs_vf
;
2513 unsigned long flags
;
2515 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
2516 fcs_vf
= bfa_fcs_vf_lookup(&bfad
->bfa_fcs
, iocmd
->vf_id
);
2517 if (fcs_vf
== NULL
) {
2518 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
2519 iocmd
->status
= BFA_STATUS_UNKNOWN_VFID
;
2522 memcpy((void *)&iocmd
->stats
, (void *)&fcs_vf
->stats
,
2523 sizeof(struct bfa_vf_stats_s
));
2524 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
2525 iocmd
->status
= BFA_STATUS_OK
;
2531 bfad_iocmd_vf_clr_stats(struct bfad_s
*bfad
, void *cmd
)
2533 struct bfa_bsg_vf_reset_stats_s
*iocmd
=
2534 (struct bfa_bsg_vf_reset_stats_s
*)cmd
;
2535 struct bfa_fcs_fabric_s
*fcs_vf
;
2536 unsigned long flags
;
2538 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
2539 fcs_vf
= bfa_fcs_vf_lookup(&bfad
->bfa_fcs
, iocmd
->vf_id
);
2540 if (fcs_vf
== NULL
) {
2541 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
2542 iocmd
->status
= BFA_STATUS_UNKNOWN_VFID
;
2545 memset((void *)&fcs_vf
->stats
, 0, sizeof(struct bfa_vf_stats_s
));
2546 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
2547 iocmd
->status
= BFA_STATUS_OK
;
2552 /* Function to reset the LUN SCAN mode */
2554 bfad_iocmd_lunmask_reset_lunscan_mode(struct bfad_s
*bfad
, int lunmask_cfg
)
2556 struct bfad_im_port_s
*pport_im
= bfad
->pport
.im_port
;
2557 struct bfad_vport_s
*vport
= NULL
;
2559 /* Set the scsi device LUN SCAN flags for base port */
2560 bfad_reset_sdev_bflags(pport_im
, lunmask_cfg
);
2562 /* Set the scsi device LUN SCAN flags for the vports */
2563 list_for_each_entry(vport
, &bfad
->vport_list
, list_entry
)
2564 bfad_reset_sdev_bflags(vport
->drv_port
.im_port
, lunmask_cfg
);
2568 bfad_iocmd_lunmask(struct bfad_s
*bfad
, void *pcmd
, unsigned int v_cmd
)
2570 struct bfa_bsg_gen_s
*iocmd
= (struct bfa_bsg_gen_s
*)pcmd
;
2571 unsigned long flags
;
2573 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
2574 if (v_cmd
== IOCMD_FCPIM_LUNMASK_ENABLE
) {
2575 iocmd
->status
= bfa_fcpim_lunmask_update(&bfad
->bfa
, BFA_TRUE
);
2576 /* Set the LUN Scanning mode to be Sequential scan */
2577 if (iocmd
->status
== BFA_STATUS_OK
)
2578 bfad_iocmd_lunmask_reset_lunscan_mode(bfad
, BFA_TRUE
);
2579 } else if (v_cmd
== IOCMD_FCPIM_LUNMASK_DISABLE
) {
2580 iocmd
->status
= bfa_fcpim_lunmask_update(&bfad
->bfa
, BFA_FALSE
);
2581 /* Set the LUN Scanning mode to default REPORT_LUNS scan */
2582 if (iocmd
->status
== BFA_STATUS_OK
)
2583 bfad_iocmd_lunmask_reset_lunscan_mode(bfad
, BFA_FALSE
);
2584 } else if (v_cmd
== IOCMD_FCPIM_LUNMASK_CLEAR
)
2585 iocmd
->status
= bfa_fcpim_lunmask_clear(&bfad
->bfa
);
2586 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
2591 bfad_iocmd_fcpim_lunmask_query(struct bfad_s
*bfad
, void *cmd
)
2593 struct bfa_bsg_fcpim_lunmask_query_s
*iocmd
=
2594 (struct bfa_bsg_fcpim_lunmask_query_s
*)cmd
;
2595 struct bfa_lunmask_cfg_s
*lun_mask
= &iocmd
->lun_mask
;
2596 unsigned long flags
;
2598 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
2599 iocmd
->status
= bfa_fcpim_lunmask_query(&bfad
->bfa
, lun_mask
);
2600 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
2605 bfad_iocmd_fcpim_cfg_lunmask(struct bfad_s
*bfad
, void *cmd
, unsigned int v_cmd
)
2607 struct bfa_bsg_fcpim_lunmask_s
*iocmd
=
2608 (struct bfa_bsg_fcpim_lunmask_s
*)cmd
;
2609 unsigned long flags
;
2611 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
2612 if (v_cmd
== IOCMD_FCPIM_LUNMASK_ADD
)
2613 iocmd
->status
= bfa_fcpim_lunmask_add(&bfad
->bfa
, iocmd
->vf_id
,
2614 &iocmd
->pwwn
, iocmd
->rpwwn
, iocmd
->lun
);
2615 else if (v_cmd
== IOCMD_FCPIM_LUNMASK_DELETE
)
2616 iocmd
->status
= bfa_fcpim_lunmask_delete(&bfad
->bfa
,
2617 iocmd
->vf_id
, &iocmd
->pwwn
,
2618 iocmd
->rpwwn
, iocmd
->lun
);
2619 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
2624 bfad_iocmd_fcpim_throttle_query(struct bfad_s
*bfad
, void *cmd
)
2626 struct bfa_bsg_fcpim_throttle_s
*iocmd
=
2627 (struct bfa_bsg_fcpim_throttle_s
*)cmd
;
2628 unsigned long flags
;
2630 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
2631 iocmd
->status
= bfa_fcpim_throttle_get(&bfad
->bfa
,
2632 (void *)&iocmd
->throttle
);
2633 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
2639 bfad_iocmd_fcpim_throttle_set(struct bfad_s
*bfad
, void *cmd
)
2641 struct bfa_bsg_fcpim_throttle_s
*iocmd
=
2642 (struct bfa_bsg_fcpim_throttle_s
*)cmd
;
2643 unsigned long flags
;
2645 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
2646 iocmd
->status
= bfa_fcpim_throttle_set(&bfad
->bfa
,
2647 iocmd
->throttle
.cfg_value
);
2648 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
2654 bfad_iocmd_tfru_read(struct bfad_s
*bfad
, void *cmd
)
2656 struct bfa_bsg_tfru_s
*iocmd
=
2657 (struct bfa_bsg_tfru_s
*)cmd
;
2658 struct bfad_hal_comp fcomp
;
2659 unsigned long flags
= 0;
2661 init_completion(&fcomp
.comp
);
2662 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
2663 iocmd
->status
= bfa_tfru_read(BFA_FRU(&bfad
->bfa
),
2664 &iocmd
->data
, iocmd
->len
, iocmd
->offset
,
2665 bfad_hcb_comp
, &fcomp
);
2666 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
2667 if (iocmd
->status
== BFA_STATUS_OK
) {
2668 wait_for_completion(&fcomp
.comp
);
2669 iocmd
->status
= fcomp
.status
;
2676 bfad_iocmd_tfru_write(struct bfad_s
*bfad
, void *cmd
)
2678 struct bfa_bsg_tfru_s
*iocmd
=
2679 (struct bfa_bsg_tfru_s
*)cmd
;
2680 struct bfad_hal_comp fcomp
;
2681 unsigned long flags
= 0;
2683 init_completion(&fcomp
.comp
);
2684 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
2685 iocmd
->status
= bfa_tfru_write(BFA_FRU(&bfad
->bfa
),
2686 &iocmd
->data
, iocmd
->len
, iocmd
->offset
,
2687 bfad_hcb_comp
, &fcomp
);
2688 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
2689 if (iocmd
->status
== BFA_STATUS_OK
) {
2690 wait_for_completion(&fcomp
.comp
);
2691 iocmd
->status
= fcomp
.status
;
2698 bfad_iocmd_fruvpd_read(struct bfad_s
*bfad
, void *cmd
)
2700 struct bfa_bsg_fruvpd_s
*iocmd
=
2701 (struct bfa_bsg_fruvpd_s
*)cmd
;
2702 struct bfad_hal_comp fcomp
;
2703 unsigned long flags
= 0;
2705 init_completion(&fcomp
.comp
);
2706 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
2707 iocmd
->status
= bfa_fruvpd_read(BFA_FRU(&bfad
->bfa
),
2708 &iocmd
->data
, iocmd
->len
, iocmd
->offset
,
2709 bfad_hcb_comp
, &fcomp
);
2710 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
2711 if (iocmd
->status
== BFA_STATUS_OK
) {
2712 wait_for_completion(&fcomp
.comp
);
2713 iocmd
->status
= fcomp
.status
;
2720 bfad_iocmd_fruvpd_update(struct bfad_s
*bfad
, void *cmd
)
2722 struct bfa_bsg_fruvpd_s
*iocmd
=
2723 (struct bfa_bsg_fruvpd_s
*)cmd
;
2724 struct bfad_hal_comp fcomp
;
2725 unsigned long flags
= 0;
2727 init_completion(&fcomp
.comp
);
2728 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
2729 iocmd
->status
= bfa_fruvpd_update(BFA_FRU(&bfad
->bfa
),
2730 &iocmd
->data
, iocmd
->len
, iocmd
->offset
,
2731 bfad_hcb_comp
, &fcomp
, iocmd
->trfr_cmpl
);
2732 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
2733 if (iocmd
->status
== BFA_STATUS_OK
) {
2734 wait_for_completion(&fcomp
.comp
);
2735 iocmd
->status
= fcomp
.status
;
2742 bfad_iocmd_fruvpd_get_max_size(struct bfad_s
*bfad
, void *cmd
)
2744 struct bfa_bsg_fruvpd_max_size_s
*iocmd
=
2745 (struct bfa_bsg_fruvpd_max_size_s
*)cmd
;
2746 unsigned long flags
= 0;
2748 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
2749 iocmd
->status
= bfa_fruvpd_get_max_size(BFA_FRU(&bfad
->bfa
),
2751 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
2757 bfad_iocmd_handler(struct bfad_s
*bfad
, unsigned int cmd
, void *iocmd
,
2758 unsigned int payload_len
)
2763 case IOCMD_IOC_ENABLE
:
2764 rc
= bfad_iocmd_ioc_enable(bfad
, iocmd
);
2766 case IOCMD_IOC_DISABLE
:
2767 rc
= bfad_iocmd_ioc_disable(bfad
, iocmd
);
2769 case IOCMD_IOC_GET_INFO
:
2770 rc
= bfad_iocmd_ioc_get_info(bfad
, iocmd
);
2772 case IOCMD_IOC_GET_ATTR
:
2773 rc
= bfad_iocmd_ioc_get_attr(bfad
, iocmd
);
2775 case IOCMD_IOC_GET_STATS
:
2776 rc
= bfad_iocmd_ioc_get_stats(bfad
, iocmd
);
2778 case IOCMD_IOC_GET_FWSTATS
:
2779 rc
= bfad_iocmd_ioc_get_fwstats(bfad
, iocmd
, payload_len
);
2781 case IOCMD_IOC_RESET_STATS
:
2782 case IOCMD_IOC_RESET_FWSTATS
:
2783 rc
= bfad_iocmd_ioc_reset_stats(bfad
, iocmd
, cmd
);
2785 case IOCMD_IOC_SET_ADAPTER_NAME
:
2786 case IOCMD_IOC_SET_PORT_NAME
:
2787 rc
= bfad_iocmd_ioc_set_name(bfad
, iocmd
, cmd
);
2789 case IOCMD_IOCFC_GET_ATTR
:
2790 rc
= bfad_iocmd_iocfc_get_attr(bfad
, iocmd
);
2792 case IOCMD_IOCFC_SET_INTR
:
2793 rc
= bfad_iocmd_iocfc_set_intr(bfad
, iocmd
);
2795 case IOCMD_PORT_ENABLE
:
2796 rc
= bfad_iocmd_port_enable(bfad
, iocmd
);
2798 case IOCMD_PORT_DISABLE
:
2799 rc
= bfad_iocmd_port_disable(bfad
, iocmd
);
2801 case IOCMD_PORT_GET_ATTR
:
2802 rc
= bfad_iocmd_port_get_attr(bfad
, iocmd
);
2804 case IOCMD_PORT_GET_STATS
:
2805 rc
= bfad_iocmd_port_get_stats(bfad
, iocmd
, payload_len
);
2807 case IOCMD_PORT_RESET_STATS
:
2808 rc
= bfad_iocmd_port_reset_stats(bfad
, iocmd
);
2810 case IOCMD_PORT_CFG_TOPO
:
2811 case IOCMD_PORT_CFG_SPEED
:
2812 case IOCMD_PORT_CFG_ALPA
:
2813 case IOCMD_PORT_CLR_ALPA
:
2814 rc
= bfad_iocmd_set_port_cfg(bfad
, iocmd
, cmd
);
2816 case IOCMD_PORT_CFG_MAXFRSZ
:
2817 rc
= bfad_iocmd_port_cfg_maxfrsize(bfad
, iocmd
);
2819 case IOCMD_PORT_BBCR_ENABLE
:
2820 case IOCMD_PORT_BBCR_DISABLE
:
2821 rc
= bfad_iocmd_port_cfg_bbcr(bfad
, cmd
, iocmd
);
2823 case IOCMD_PORT_BBCR_GET_ATTR
:
2824 rc
= bfad_iocmd_port_get_bbcr_attr(bfad
, iocmd
);
2826 case IOCMD_LPORT_GET_ATTR
:
2827 rc
= bfad_iocmd_lport_get_attr(bfad
, iocmd
);
2829 case IOCMD_LPORT_GET_STATS
:
2830 rc
= bfad_iocmd_lport_get_stats(bfad
, iocmd
);
2832 case IOCMD_LPORT_RESET_STATS
:
2833 rc
= bfad_iocmd_lport_reset_stats(bfad
, iocmd
);
2835 case IOCMD_LPORT_GET_IOSTATS
:
2836 rc
= bfad_iocmd_lport_get_iostats(bfad
, iocmd
);
2838 case IOCMD_LPORT_GET_RPORTS
:
2839 rc
= bfad_iocmd_lport_get_rports(bfad
, iocmd
, payload_len
);
2841 case IOCMD_RPORT_GET_ATTR
:
2842 rc
= bfad_iocmd_rport_get_attr(bfad
, iocmd
);
2844 case IOCMD_RPORT_GET_ADDR
:
2845 rc
= bfad_iocmd_rport_get_addr(bfad
, iocmd
);
2847 case IOCMD_RPORT_GET_STATS
:
2848 rc
= bfad_iocmd_rport_get_stats(bfad
, iocmd
);
2850 case IOCMD_RPORT_RESET_STATS
:
2851 rc
= bfad_iocmd_rport_clr_stats(bfad
, iocmd
);
2853 case IOCMD_RPORT_SET_SPEED
:
2854 rc
= bfad_iocmd_rport_set_speed(bfad
, iocmd
);
2856 case IOCMD_VPORT_GET_ATTR
:
2857 rc
= bfad_iocmd_vport_get_attr(bfad
, iocmd
);
2859 case IOCMD_VPORT_GET_STATS
:
2860 rc
= bfad_iocmd_vport_get_stats(bfad
, iocmd
);
2862 case IOCMD_VPORT_RESET_STATS
:
2863 rc
= bfad_iocmd_vport_clr_stats(bfad
, iocmd
);
2865 case IOCMD_FABRIC_GET_LPORTS
:
2866 rc
= bfad_iocmd_fabric_get_lports(bfad
, iocmd
, payload_len
);
2868 case IOCMD_RATELIM_ENABLE
:
2869 case IOCMD_RATELIM_DISABLE
:
2870 rc
= bfad_iocmd_ratelim(bfad
, cmd
, iocmd
);
2872 case IOCMD_RATELIM_DEF_SPEED
:
2873 rc
= bfad_iocmd_ratelim_speed(bfad
, cmd
, iocmd
);
2875 case IOCMD_FCPIM_FAILOVER
:
2876 rc
= bfad_iocmd_cfg_fcpim(bfad
, iocmd
);
2878 case IOCMD_FCPIM_MODSTATS
:
2879 rc
= bfad_iocmd_fcpim_get_modstats(bfad
, iocmd
);
2881 case IOCMD_FCPIM_MODSTATSCLR
:
2882 rc
= bfad_iocmd_fcpim_clr_modstats(bfad
, iocmd
);
2884 case IOCMD_FCPIM_DEL_ITN_STATS
:
2885 rc
= bfad_iocmd_fcpim_get_del_itn_stats(bfad
, iocmd
);
2887 case IOCMD_ITNIM_GET_ATTR
:
2888 rc
= bfad_iocmd_itnim_get_attr(bfad
, iocmd
);
2890 case IOCMD_ITNIM_GET_IOSTATS
:
2891 rc
= bfad_iocmd_itnim_get_iostats(bfad
, iocmd
);
2893 case IOCMD_ITNIM_RESET_STATS
:
2894 rc
= bfad_iocmd_itnim_reset_stats(bfad
, iocmd
);
2896 case IOCMD_ITNIM_GET_ITNSTATS
:
2897 rc
= bfad_iocmd_itnim_get_itnstats(bfad
, iocmd
);
2899 case IOCMD_FCPORT_ENABLE
:
2900 rc
= bfad_iocmd_fcport_enable(bfad
, iocmd
);
2902 case IOCMD_FCPORT_DISABLE
:
2903 rc
= bfad_iocmd_fcport_disable(bfad
, iocmd
);
2905 case IOCMD_IOC_PCIFN_CFG
:
2906 rc
= bfad_iocmd_ioc_get_pcifn_cfg(bfad
, iocmd
);
2908 case IOCMD_IOC_FW_SIG_INV
:
2909 rc
= bfad_iocmd_ioc_fw_sig_inv(bfad
, iocmd
);
2911 case IOCMD_PCIFN_CREATE
:
2912 rc
= bfad_iocmd_pcifn_create(bfad
, iocmd
);
2914 case IOCMD_PCIFN_DELETE
:
2915 rc
= bfad_iocmd_pcifn_delete(bfad
, iocmd
);
2917 case IOCMD_PCIFN_BW
:
2918 rc
= bfad_iocmd_pcifn_bw(bfad
, iocmd
);
2920 case IOCMD_ADAPTER_CFG_MODE
:
2921 rc
= bfad_iocmd_adapter_cfg_mode(bfad
, iocmd
);
2923 case IOCMD_PORT_CFG_MODE
:
2924 rc
= bfad_iocmd_port_cfg_mode(bfad
, iocmd
);
2926 case IOCMD_FLASH_ENABLE_OPTROM
:
2927 case IOCMD_FLASH_DISABLE_OPTROM
:
2928 rc
= bfad_iocmd_ablk_optrom(bfad
, cmd
, iocmd
);
2930 case IOCMD_FAA_QUERY
:
2931 rc
= bfad_iocmd_faa_query(bfad
, iocmd
);
2933 case IOCMD_CEE_GET_ATTR
:
2934 rc
= bfad_iocmd_cee_attr(bfad
, iocmd
, payload_len
);
2936 case IOCMD_CEE_GET_STATS
:
2937 rc
= bfad_iocmd_cee_get_stats(bfad
, iocmd
, payload_len
);
2939 case IOCMD_CEE_RESET_STATS
:
2940 rc
= bfad_iocmd_cee_reset_stats(bfad
, iocmd
);
2942 case IOCMD_SFP_MEDIA
:
2943 rc
= bfad_iocmd_sfp_media(bfad
, iocmd
);
2945 case IOCMD_SFP_SPEED
:
2946 rc
= bfad_iocmd_sfp_speed(bfad
, iocmd
);
2948 case IOCMD_FLASH_GET_ATTR
:
2949 rc
= bfad_iocmd_flash_get_attr(bfad
, iocmd
);
2951 case IOCMD_FLASH_ERASE_PART
:
2952 rc
= bfad_iocmd_flash_erase_part(bfad
, iocmd
);
2954 case IOCMD_FLASH_UPDATE_PART
:
2955 rc
= bfad_iocmd_flash_update_part(bfad
, iocmd
, payload_len
);
2957 case IOCMD_FLASH_READ_PART
:
2958 rc
= bfad_iocmd_flash_read_part(bfad
, iocmd
, payload_len
);
2960 case IOCMD_DIAG_TEMP
:
2961 rc
= bfad_iocmd_diag_temp(bfad
, iocmd
);
2963 case IOCMD_DIAG_MEMTEST
:
2964 rc
= bfad_iocmd_diag_memtest(bfad
, iocmd
);
2966 case IOCMD_DIAG_LOOPBACK
:
2967 rc
= bfad_iocmd_diag_loopback(bfad
, iocmd
);
2969 case IOCMD_DIAG_FWPING
:
2970 rc
= bfad_iocmd_diag_fwping(bfad
, iocmd
);
2972 case IOCMD_DIAG_QUEUETEST
:
2973 rc
= bfad_iocmd_diag_queuetest(bfad
, iocmd
);
2975 case IOCMD_DIAG_SFP
:
2976 rc
= bfad_iocmd_diag_sfp(bfad
, iocmd
);
2978 case IOCMD_DIAG_LED
:
2979 rc
= bfad_iocmd_diag_led(bfad
, iocmd
);
2981 case IOCMD_DIAG_BEACON_LPORT
:
2982 rc
= bfad_iocmd_diag_beacon_lport(bfad
, iocmd
);
2984 case IOCMD_DIAG_LB_STAT
:
2985 rc
= bfad_iocmd_diag_lb_stat(bfad
, iocmd
);
2987 case IOCMD_DIAG_DPORT_ENABLE
:
2988 rc
= bfad_iocmd_diag_dport_enable(bfad
, iocmd
);
2990 case IOCMD_DIAG_DPORT_DISABLE
:
2991 rc
= bfad_iocmd_diag_dport_disable(bfad
, iocmd
);
2993 case IOCMD_DIAG_DPORT_SHOW
:
2994 rc
= bfad_iocmd_diag_dport_show(bfad
, iocmd
);
2996 case IOCMD_DIAG_DPORT_START
:
2997 rc
= bfad_iocmd_diag_dport_start(bfad
, iocmd
);
2999 case IOCMD_PHY_GET_ATTR
:
3000 rc
= bfad_iocmd_phy_get_attr(bfad
, iocmd
);
3002 case IOCMD_PHY_GET_STATS
:
3003 rc
= bfad_iocmd_phy_get_stats(bfad
, iocmd
);
3005 case IOCMD_PHY_UPDATE_FW
:
3006 rc
= bfad_iocmd_phy_update(bfad
, iocmd
, payload_len
);
3008 case IOCMD_PHY_READ_FW
:
3009 rc
= bfad_iocmd_phy_read(bfad
, iocmd
, payload_len
);
3011 case IOCMD_VHBA_QUERY
:
3012 rc
= bfad_iocmd_vhba_query(bfad
, iocmd
);
3014 case IOCMD_DEBUG_PORTLOG
:
3015 rc
= bfad_iocmd_porglog_get(bfad
, iocmd
);
3017 case IOCMD_DEBUG_FW_CORE
:
3018 rc
= bfad_iocmd_debug_fw_core(bfad
, iocmd
, payload_len
);
3020 case IOCMD_DEBUG_FW_STATE_CLR
:
3021 case IOCMD_DEBUG_PORTLOG_CLR
:
3022 case IOCMD_DEBUG_START_DTRC
:
3023 case IOCMD_DEBUG_STOP_DTRC
:
3024 rc
= bfad_iocmd_debug_ctl(bfad
, iocmd
, cmd
);
3026 case IOCMD_DEBUG_PORTLOG_CTL
:
3027 rc
= bfad_iocmd_porglog_ctl(bfad
, iocmd
);
3029 case IOCMD_FCPIM_PROFILE_ON
:
3030 case IOCMD_FCPIM_PROFILE_OFF
:
3031 rc
= bfad_iocmd_fcpim_cfg_profile(bfad
, iocmd
, cmd
);
3033 case IOCMD_ITNIM_GET_IOPROFILE
:
3034 rc
= bfad_iocmd_itnim_get_ioprofile(bfad
, iocmd
);
3036 case IOCMD_FCPORT_GET_STATS
:
3037 rc
= bfad_iocmd_fcport_get_stats(bfad
, iocmd
);
3039 case IOCMD_FCPORT_RESET_STATS
:
3040 rc
= bfad_iocmd_fcport_reset_stats(bfad
, iocmd
);
3042 case IOCMD_BOOT_CFG
:
3043 rc
= bfad_iocmd_boot_cfg(bfad
, iocmd
);
3045 case IOCMD_BOOT_QUERY
:
3046 rc
= bfad_iocmd_boot_query(bfad
, iocmd
);
3048 case IOCMD_PREBOOT_QUERY
:
3049 rc
= bfad_iocmd_preboot_query(bfad
, iocmd
);
3051 case IOCMD_ETHBOOT_CFG
:
3052 rc
= bfad_iocmd_ethboot_cfg(bfad
, iocmd
);
3054 case IOCMD_ETHBOOT_QUERY
:
3055 rc
= bfad_iocmd_ethboot_query(bfad
, iocmd
);
3057 case IOCMD_TRUNK_ENABLE
:
3058 case IOCMD_TRUNK_DISABLE
:
3059 rc
= bfad_iocmd_cfg_trunk(bfad
, iocmd
, cmd
);
3061 case IOCMD_TRUNK_GET_ATTR
:
3062 rc
= bfad_iocmd_trunk_get_attr(bfad
, iocmd
);
3064 case IOCMD_QOS_ENABLE
:
3065 case IOCMD_QOS_DISABLE
:
3066 rc
= bfad_iocmd_qos(bfad
, iocmd
, cmd
);
3068 case IOCMD_QOS_GET_ATTR
:
3069 rc
= bfad_iocmd_qos_get_attr(bfad
, iocmd
);
3071 case IOCMD_QOS_GET_VC_ATTR
:
3072 rc
= bfad_iocmd_qos_get_vc_attr(bfad
, iocmd
);
3074 case IOCMD_QOS_GET_STATS
:
3075 rc
= bfad_iocmd_qos_get_stats(bfad
, iocmd
);
3077 case IOCMD_QOS_RESET_STATS
:
3078 rc
= bfad_iocmd_qos_reset_stats(bfad
, iocmd
);
3080 case IOCMD_QOS_SET_BW
:
3081 rc
= bfad_iocmd_qos_set_bw(bfad
, iocmd
);
3083 case IOCMD_VF_GET_STATS
:
3084 rc
= bfad_iocmd_vf_get_stats(bfad
, iocmd
);
3086 case IOCMD_VF_RESET_STATS
:
3087 rc
= bfad_iocmd_vf_clr_stats(bfad
, iocmd
);
3089 case IOCMD_FCPIM_LUNMASK_ENABLE
:
3090 case IOCMD_FCPIM_LUNMASK_DISABLE
:
3091 case IOCMD_FCPIM_LUNMASK_CLEAR
:
3092 rc
= bfad_iocmd_lunmask(bfad
, iocmd
, cmd
);
3094 case IOCMD_FCPIM_LUNMASK_QUERY
:
3095 rc
= bfad_iocmd_fcpim_lunmask_query(bfad
, iocmd
);
3097 case IOCMD_FCPIM_LUNMASK_ADD
:
3098 case IOCMD_FCPIM_LUNMASK_DELETE
:
3099 rc
= bfad_iocmd_fcpim_cfg_lunmask(bfad
, iocmd
, cmd
);
3101 case IOCMD_FCPIM_THROTTLE_QUERY
:
3102 rc
= bfad_iocmd_fcpim_throttle_query(bfad
, iocmd
);
3104 case IOCMD_FCPIM_THROTTLE_SET
:
3105 rc
= bfad_iocmd_fcpim_throttle_set(bfad
, iocmd
);
3108 case IOCMD_TFRU_READ
:
3109 rc
= bfad_iocmd_tfru_read(bfad
, iocmd
);
3111 case IOCMD_TFRU_WRITE
:
3112 rc
= bfad_iocmd_tfru_write(bfad
, iocmd
);
3115 case IOCMD_FRUVPD_READ
:
3116 rc
= bfad_iocmd_fruvpd_read(bfad
, iocmd
);
3118 case IOCMD_FRUVPD_UPDATE
:
3119 rc
= bfad_iocmd_fruvpd_update(bfad
, iocmd
);
3121 case IOCMD_FRUVPD_GET_MAX_SIZE
:
3122 rc
= bfad_iocmd_fruvpd_get_max_size(bfad
, iocmd
);
3132 bfad_im_bsg_vendor_request(struct fc_bsg_job
*job
)
3134 uint32_t vendor_cmd
= job
->request
->rqst_data
.h_vendor
.vendor_cmd
[0];
3135 struct bfad_im_port_s
*im_port
=
3136 (struct bfad_im_port_s
*) job
->shost
->hostdata
[0];
3137 struct bfad_s
*bfad
= im_port
->bfad
;
3138 struct request_queue
*request_q
= job
->req
->q
;
3143 * Set the BSG device request_queue size to 256 to support
3144 * payloads larger than 512*1024K bytes.
3146 blk_queue_max_segments(request_q
, 256);
3148 /* Allocate a temp buffer to hold the passed in user space command */
3149 payload_kbuf
= kzalloc(job
->request_payload
.payload_len
, GFP_KERNEL
);
3150 if (!payload_kbuf
) {
3155 /* Copy the sg_list passed in to a linear buffer: holds the cmnd data */
3156 sg_copy_to_buffer(job
->request_payload
.sg_list
,
3157 job
->request_payload
.sg_cnt
, payload_kbuf
,
3158 job
->request_payload
.payload_len
);
3160 /* Invoke IOCMD handler - to handle all the vendor command requests */
3161 rc
= bfad_iocmd_handler(bfad
, vendor_cmd
, payload_kbuf
,
3162 job
->request_payload
.payload_len
);
3163 if (rc
!= BFA_STATUS_OK
)
3166 /* Copy the response data to the job->reply_payload sg_list */
3167 sg_copy_from_buffer(job
->reply_payload
.sg_list
,
3168 job
->reply_payload
.sg_cnt
,
3170 job
->reply_payload
.payload_len
);
3172 /* free the command buffer */
3173 kfree(payload_kbuf
);
3175 /* Fill the BSG job reply data */
3176 job
->reply_len
= job
->reply_payload
.payload_len
;
3177 job
->reply
->reply_payload_rcv_len
= job
->reply_payload
.payload_len
;
3178 job
->reply
->result
= rc
;
3183 /* free the command buffer */
3184 kfree(payload_kbuf
);
3186 job
->reply
->result
= rc
;
3187 job
->reply_len
= sizeof(uint32_t);
3188 job
->reply
->reply_payload_rcv_len
= 0;
3192 /* FC passthru call backs */
3194 bfad_fcxp_get_req_sgaddr_cb(void *bfad_fcxp
, int sgeid
)
3196 struct bfad_fcxp
*drv_fcxp
= bfad_fcxp
;
3197 struct bfa_sge_s
*sge
;
3200 sge
= drv_fcxp
->req_sge
+ sgeid
;
3201 addr
= (u64
)(size_t) sge
->sg_addr
;
3206 bfad_fcxp_get_req_sglen_cb(void *bfad_fcxp
, int sgeid
)
3208 struct bfad_fcxp
*drv_fcxp
= bfad_fcxp
;
3209 struct bfa_sge_s
*sge
;
3211 sge
= drv_fcxp
->req_sge
+ sgeid
;
3216 bfad_fcxp_get_rsp_sgaddr_cb(void *bfad_fcxp
, int sgeid
)
3218 struct bfad_fcxp
*drv_fcxp
= bfad_fcxp
;
3219 struct bfa_sge_s
*sge
;
3222 sge
= drv_fcxp
->rsp_sge
+ sgeid
;
3223 addr
= (u64
)(size_t) sge
->sg_addr
;
3228 bfad_fcxp_get_rsp_sglen_cb(void *bfad_fcxp
, int sgeid
)
3230 struct bfad_fcxp
*drv_fcxp
= bfad_fcxp
;
3231 struct bfa_sge_s
*sge
;
3233 sge
= drv_fcxp
->rsp_sge
+ sgeid
;
3238 bfad_send_fcpt_cb(void *bfad_fcxp
, struct bfa_fcxp_s
*fcxp
, void *cbarg
,
3239 bfa_status_t req_status
, u32 rsp_len
, u32 resid_len
,
3240 struct fchs_s
*rsp_fchs
)
3242 struct bfad_fcxp
*drv_fcxp
= bfad_fcxp
;
3244 drv_fcxp
->req_status
= req_status
;
3245 drv_fcxp
->rsp_len
= rsp_len
;
3247 /* bfa_fcxp will be automatically freed by BFA */
3248 drv_fcxp
->bfa_fcxp
= NULL
;
3249 complete(&drv_fcxp
->comp
);
3252 struct bfad_buf_info
*
3253 bfad_fcxp_map_sg(struct bfad_s
*bfad
, void *payload_kbuf
,
3254 uint32_t payload_len
, uint32_t *num_sgles
)
3256 struct bfad_buf_info
*buf_base
, *buf_info
;
3257 struct bfa_sge_s
*sg_table
;
3260 buf_base
= kzalloc((sizeof(struct bfad_buf_info
) +
3261 sizeof(struct bfa_sge_s
)) * sge_num
, GFP_KERNEL
);
3265 sg_table
= (struct bfa_sge_s
*) (((uint8_t *)buf_base
) +
3266 (sizeof(struct bfad_buf_info
) * sge_num
));
3268 /* Allocate dma coherent memory */
3269 buf_info
= buf_base
;
3270 buf_info
->size
= payload_len
;
3271 buf_info
->virt
= dma_zalloc_coherent(&bfad
->pcidev
->dev
,
3272 buf_info
->size
, &buf_info
->phys
,
3274 if (!buf_info
->virt
)
3277 /* copy the linear bsg buffer to buf_info */
3278 memcpy(buf_info
->virt
, payload_kbuf
, buf_info
->size
);
3283 sg_table
->sg_len
= buf_info
->size
;
3284 sg_table
->sg_addr
= (void *)(size_t) buf_info
->phys
;
3286 *num_sgles
= sge_num
;
3296 bfad_fcxp_free_mem(struct bfad_s
*bfad
, struct bfad_buf_info
*buf_base
,
3300 struct bfad_buf_info
*buf_info
= buf_base
;
3303 for (i
= 0; i
< num_sgles
; buf_info
++, i
++) {
3304 if (buf_info
->virt
!= NULL
)
3305 dma_free_coherent(&bfad
->pcidev
->dev
,
3306 buf_info
->size
, buf_info
->virt
,
3314 bfad_fcxp_bsg_send(struct fc_bsg_job
*job
, struct bfad_fcxp
*drv_fcxp
,
3315 bfa_bsg_fcpt_t
*bsg_fcpt
)
3317 struct bfa_fcxp_s
*hal_fcxp
;
3318 struct bfad_s
*bfad
= drv_fcxp
->port
->bfad
;
3319 unsigned long flags
;
3322 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
3324 /* Allocate bfa_fcxp structure */
3325 hal_fcxp
= bfa_fcxp_req_rsp_alloc(drv_fcxp
, &bfad
->bfa
,
3326 drv_fcxp
->num_req_sgles
,
3327 drv_fcxp
->num_rsp_sgles
,
3328 bfad_fcxp_get_req_sgaddr_cb
,
3329 bfad_fcxp_get_req_sglen_cb
,
3330 bfad_fcxp_get_rsp_sgaddr_cb
,
3331 bfad_fcxp_get_rsp_sglen_cb
, BFA_TRUE
);
3334 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
3335 return BFA_STATUS_ENOMEM
;
3338 drv_fcxp
->bfa_fcxp
= hal_fcxp
;
3340 lp_tag
= bfa_lps_get_tag_from_pid(&bfad
->bfa
, bsg_fcpt
->fchs
.s_id
);
3342 bfa_fcxp_send(hal_fcxp
, drv_fcxp
->bfa_rport
, bsg_fcpt
->vf_id
, lp_tag
,
3343 bsg_fcpt
->cts
, bsg_fcpt
->cos
,
3344 job
->request_payload
.payload_len
,
3345 &bsg_fcpt
->fchs
, bfad_send_fcpt_cb
, bfad
,
3346 job
->reply_payload
.payload_len
, bsg_fcpt
->tsecs
);
3348 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
3350 return BFA_STATUS_OK
;
3354 bfad_im_bsg_els_ct_request(struct fc_bsg_job
*job
)
3356 struct bfa_bsg_data
*bsg_data
;
3357 struct bfad_im_port_s
*im_port
=
3358 (struct bfad_im_port_s
*) job
->shost
->hostdata
[0];
3359 struct bfad_s
*bfad
= im_port
->bfad
;
3360 bfa_bsg_fcpt_t
*bsg_fcpt
;
3361 struct bfad_fcxp
*drv_fcxp
;
3362 struct bfa_fcs_lport_s
*fcs_port
;
3363 struct bfa_fcs_rport_s
*fcs_rport
;
3364 uint32_t command_type
= job
->request
->msgcode
;
3365 unsigned long flags
;
3366 struct bfad_buf_info
*rsp_buf_info
;
3367 void *req_kbuf
= NULL
, *rsp_kbuf
= NULL
;
3370 job
->reply_len
= sizeof(uint32_t); /* Atleast uint32_t reply_len */
3371 job
->reply
->reply_payload_rcv_len
= 0;
3373 /* Get the payload passed in from userspace */
3374 bsg_data
= (struct bfa_bsg_data
*) (((char *)job
->request
) +
3375 sizeof(struct fc_bsg_request
));
3376 if (bsg_data
== NULL
)
3380 * Allocate buffer for bsg_fcpt and do a copy_from_user op for payload
3381 * buffer of size bsg_data->payload_len
3383 bsg_fcpt
= kzalloc(bsg_data
->payload_len
, GFP_KERNEL
);
3389 if (copy_from_user((uint8_t *)bsg_fcpt
,
3390 (void *)(unsigned long)bsg_data
->payload
,
3391 bsg_data
->payload_len
)) {
3397 drv_fcxp
= kzalloc(sizeof(struct bfad_fcxp
), GFP_KERNEL
);
3398 if (drv_fcxp
== NULL
) {
3404 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
3405 fcs_port
= bfa_fcs_lookup_port(&bfad
->bfa_fcs
, bsg_fcpt
->vf_id
,
3407 if (fcs_port
== NULL
) {
3408 bsg_fcpt
->status
= BFA_STATUS_UNKNOWN_LWWN
;
3409 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
3413 /* Check if the port is online before sending FC Passthru cmd */
3414 if (!bfa_fcs_lport_is_online(fcs_port
)) {
3415 bsg_fcpt
->status
= BFA_STATUS_PORT_OFFLINE
;
3416 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
3420 drv_fcxp
->port
= fcs_port
->bfad_port
;
3422 if (drv_fcxp
->port
->bfad
== 0)
3423 drv_fcxp
->port
->bfad
= bfad
;
3425 /* Fetch the bfa_rport - if nexus needed */
3426 if (command_type
== FC_BSG_HST_ELS_NOLOGIN
||
3427 command_type
== FC_BSG_HST_CT
) {
3428 /* BSG HST commands: no nexus needed */
3429 drv_fcxp
->bfa_rport
= NULL
;
3431 } else if (command_type
== FC_BSG_RPT_ELS
||
3432 command_type
== FC_BSG_RPT_CT
) {
3433 /* BSG RPT commands: nexus needed */
3434 fcs_rport
= bfa_fcs_lport_get_rport_by_pwwn(fcs_port
,
3436 if (fcs_rport
== NULL
) {
3437 bsg_fcpt
->status
= BFA_STATUS_UNKNOWN_RWWN
;
3438 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
3442 drv_fcxp
->bfa_rport
= fcs_rport
->bfa_rport
;
3444 } else { /* Unknown BSG msgcode; return -EINVAL */
3445 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
3449 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
3451 /* allocate memory for req / rsp buffers */
3452 req_kbuf
= kzalloc(job
->request_payload
.payload_len
, GFP_KERNEL
);
3454 printk(KERN_INFO
"bfa %s: fcpt request buffer alloc failed\n",
3460 rsp_kbuf
= kzalloc(job
->reply_payload
.payload_len
, GFP_KERNEL
);
3462 printk(KERN_INFO
"bfa %s: fcpt response buffer alloc failed\n",
3468 /* map req sg - copy the sg_list passed in to the linear buffer */
3469 sg_copy_to_buffer(job
->request_payload
.sg_list
,
3470 job
->request_payload
.sg_cnt
, req_kbuf
,
3471 job
->request_payload
.payload_len
);
3473 drv_fcxp
->reqbuf_info
= bfad_fcxp_map_sg(bfad
, req_kbuf
,
3474 job
->request_payload
.payload_len
,
3475 &drv_fcxp
->num_req_sgles
);
3476 if (!drv_fcxp
->reqbuf_info
) {
3477 printk(KERN_INFO
"bfa %s: fcpt request fcxp_map_sg failed\n",
3483 drv_fcxp
->req_sge
= (struct bfa_sge_s
*)
3484 (((uint8_t *)drv_fcxp
->reqbuf_info
) +
3485 (sizeof(struct bfad_buf_info
) *
3486 drv_fcxp
->num_req_sgles
));
3489 drv_fcxp
->rspbuf_info
= bfad_fcxp_map_sg(bfad
, rsp_kbuf
,
3490 job
->reply_payload
.payload_len
,
3491 &drv_fcxp
->num_rsp_sgles
);
3492 if (!drv_fcxp
->rspbuf_info
) {
3493 printk(KERN_INFO
"bfa %s: fcpt response fcxp_map_sg failed\n",
3499 rsp_buf_info
= (struct bfad_buf_info
*)drv_fcxp
->rspbuf_info
;
3500 drv_fcxp
->rsp_sge
= (struct bfa_sge_s
*)
3501 (((uint8_t *)drv_fcxp
->rspbuf_info
) +
3502 (sizeof(struct bfad_buf_info
) *
3503 drv_fcxp
->num_rsp_sgles
));
3506 init_completion(&drv_fcxp
->comp
);
3507 rc
= bfad_fcxp_bsg_send(job
, drv_fcxp
, bsg_fcpt
);
3508 if (rc
== BFA_STATUS_OK
) {
3509 wait_for_completion(&drv_fcxp
->comp
);
3510 bsg_fcpt
->status
= drv_fcxp
->req_status
;
3512 bsg_fcpt
->status
= rc
;
3516 /* fill the job->reply data */
3517 if (drv_fcxp
->req_status
== BFA_STATUS_OK
) {
3518 job
->reply_len
= drv_fcxp
->rsp_len
;
3519 job
->reply
->reply_payload_rcv_len
= drv_fcxp
->rsp_len
;
3520 job
->reply
->reply_data
.ctels_reply
.status
= FC_CTELS_STATUS_OK
;
3522 job
->reply
->reply_payload_rcv_len
=
3523 sizeof(struct fc_bsg_ctels_reply
);
3524 job
->reply_len
= sizeof(uint32_t);
3525 job
->reply
->reply_data
.ctels_reply
.status
=
3526 FC_CTELS_STATUS_REJECT
;
3529 /* Copy the response data to the reply_payload sg list */
3530 sg_copy_from_buffer(job
->reply_payload
.sg_list
,
3531 job
->reply_payload
.sg_cnt
,
3532 (uint8_t *)rsp_buf_info
->virt
,
3533 job
->reply_payload
.payload_len
);
3536 bfad_fcxp_free_mem(bfad
, drv_fcxp
->rspbuf_info
,
3537 drv_fcxp
->num_rsp_sgles
);
3538 bfad_fcxp_free_mem(bfad
, drv_fcxp
->reqbuf_info
,
3539 drv_fcxp
->num_req_sgles
);
3543 /* Need a copy to user op */
3544 if (copy_to_user((void *)(unsigned long)bsg_data
->payload
,
3545 (void *)bsg_fcpt
, bsg_data
->payload_len
))
3551 job
->reply
->result
= rc
;
3553 if (rc
== BFA_STATUS_OK
)
3560 bfad_im_bsg_request(struct fc_bsg_job
*job
)
3562 uint32_t rc
= BFA_STATUS_OK
;
3564 switch (job
->request
->msgcode
) {
3565 case FC_BSG_HST_VENDOR
:
3566 /* Process BSG HST Vendor requests */
3567 rc
= bfad_im_bsg_vendor_request(job
);
3569 case FC_BSG_HST_ELS_NOLOGIN
:
3570 case FC_BSG_RPT_ELS
:
3573 /* Process BSG ELS/CT commands */
3574 rc
= bfad_im_bsg_els_ct_request(job
);
3577 job
->reply
->result
= rc
= -EINVAL
;
3578 job
->reply
->reply_payload_rcv_len
= 0;
3586 bfad_im_bsg_timeout(struct fc_bsg_job
*job
)
3588 /* Don't complete the BSG job request - return -EAGAIN
3589 * to reset bsg job timeout : for ELS/CT pass thru we
3590 * already have timer to track the request.