2 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
18 #include <linux/uaccess.h>
23 BFA_TRC_FILE(LDRV
, BSG
);
26 bfad_iocmd_ioc_enable(struct bfad_s
*bfad
, void *cmd
)
28 struct bfa_bsg_gen_s
*iocmd
= (struct bfa_bsg_gen_s
*)cmd
;
32 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
33 /* If IOC is not in disabled state - return */
34 if (!bfa_ioc_is_disabled(&bfad
->bfa
.ioc
)) {
35 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
36 iocmd
->status
= BFA_STATUS_OK
;
40 init_completion(&bfad
->enable_comp
);
41 bfa_iocfc_enable(&bfad
->bfa
);
42 iocmd
->status
= BFA_STATUS_OK
;
43 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
44 wait_for_completion(&bfad
->enable_comp
);
50 bfad_iocmd_ioc_disable(struct bfad_s
*bfad
, void *cmd
)
52 struct bfa_bsg_gen_s
*iocmd
= (struct bfa_bsg_gen_s
*)cmd
;
56 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
57 if (bfa_ioc_is_disabled(&bfad
->bfa
.ioc
)) {
58 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
59 iocmd
->status
= BFA_STATUS_OK
;
63 if (bfad
->disable_active
) {
64 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
68 bfad
->disable_active
= BFA_TRUE
;
69 init_completion(&bfad
->disable_comp
);
70 bfa_iocfc_disable(&bfad
->bfa
);
71 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
73 wait_for_completion(&bfad
->disable_comp
);
74 bfad
->disable_active
= BFA_FALSE
;
75 iocmd
->status
= BFA_STATUS_OK
;
81 bfad_iocmd_ioc_get_info(struct bfad_s
*bfad
, void *cmd
)
84 struct bfa_bsg_ioc_info_s
*iocmd
= (struct bfa_bsg_ioc_info_s
*)cmd
;
85 struct bfad_im_port_s
*im_port
;
86 struct bfa_port_attr_s pattr
;
89 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
90 bfa_fcport_get_attr(&bfad
->bfa
, &pattr
);
91 iocmd
->nwwn
= pattr
.nwwn
;
92 iocmd
->pwwn
= pattr
.pwwn
;
93 iocmd
->ioc_type
= bfa_get_type(&bfad
->bfa
);
94 iocmd
->mac
= bfa_get_mac(&bfad
->bfa
);
95 iocmd
->factory_mac
= bfa_get_mfg_mac(&bfad
->bfa
);
96 bfa_get_adapter_serial_num(&bfad
->bfa
, iocmd
->serialnum
);
97 iocmd
->factorynwwn
= pattr
.factorynwwn
;
98 iocmd
->factorypwwn
= pattr
.factorypwwn
;
99 iocmd
->bfad_num
= bfad
->inst_no
;
100 im_port
= bfad
->pport
.im_port
;
101 iocmd
->host
= im_port
->shost
->host_no
;
102 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
104 strcpy(iocmd
->name
, bfad
->adapter_name
);
105 strcpy(iocmd
->port_name
, bfad
->port_name
);
106 strcpy(iocmd
->hwpath
, bfad
->pci_name
);
108 /* set adapter hw path */
109 strcpy(iocmd
->adapter_hwpath
, bfad
->pci_name
);
110 for (i
= 0; iocmd
->adapter_hwpath
[i
] != ':' && i
< BFA_STRING_32
; i
++)
112 for (; iocmd
->adapter_hwpath
[++i
] != ':' && i
< BFA_STRING_32
; )
114 iocmd
->adapter_hwpath
[i
] = '\0';
115 iocmd
->status
= BFA_STATUS_OK
;
120 bfad_iocmd_ioc_get_attr(struct bfad_s
*bfad
, void *cmd
)
122 struct bfa_bsg_ioc_attr_s
*iocmd
= (struct bfa_bsg_ioc_attr_s
*)cmd
;
125 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
126 bfa_ioc_get_attr(&bfad
->bfa
.ioc
, &iocmd
->ioc_attr
);
127 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
129 /* fill in driver attr info */
130 strcpy(iocmd
->ioc_attr
.driver_attr
.driver
, BFAD_DRIVER_NAME
);
131 strncpy(iocmd
->ioc_attr
.driver_attr
.driver_ver
,
132 BFAD_DRIVER_VERSION
, BFA_VERSION_LEN
);
133 strcpy(iocmd
->ioc_attr
.driver_attr
.fw_ver
,
134 iocmd
->ioc_attr
.adapter_attr
.fw_ver
);
135 strcpy(iocmd
->ioc_attr
.driver_attr
.bios_ver
,
136 iocmd
->ioc_attr
.adapter_attr
.optrom_ver
);
138 /* copy chip rev info first otherwise it will be overwritten */
139 memcpy(bfad
->pci_attr
.chip_rev
, iocmd
->ioc_attr
.pci_attr
.chip_rev
,
140 sizeof(bfad
->pci_attr
.chip_rev
));
141 memcpy(&iocmd
->ioc_attr
.pci_attr
, &bfad
->pci_attr
,
142 sizeof(struct bfa_ioc_pci_attr_s
));
144 iocmd
->status
= BFA_STATUS_OK
;
149 bfad_iocmd_ioc_get_stats(struct bfad_s
*bfad
, void *cmd
)
151 struct bfa_bsg_ioc_stats_s
*iocmd
= (struct bfa_bsg_ioc_stats_s
*)cmd
;
153 bfa_ioc_get_stats(&bfad
->bfa
, &iocmd
->ioc_stats
);
154 iocmd
->status
= BFA_STATUS_OK
;
159 bfad_iocmd_ioc_get_fwstats(struct bfad_s
*bfad
, void *cmd
,
160 unsigned int payload_len
)
162 struct bfa_bsg_ioc_fwstats_s
*iocmd
=
163 (struct bfa_bsg_ioc_fwstats_s
*)cmd
;
167 if (bfad_chk_iocmd_sz(payload_len
,
168 sizeof(struct bfa_bsg_ioc_fwstats_s
),
169 sizeof(struct bfa_fw_stats_s
)) != BFA_STATUS_OK
) {
170 iocmd
->status
= BFA_STATUS_VERSION_FAIL
;
174 iocmd_bufptr
= (char *)iocmd
+ sizeof(struct bfa_bsg_ioc_fwstats_s
);
175 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
176 iocmd
->status
= bfa_ioc_fw_stats_get(&bfad
->bfa
.ioc
, iocmd_bufptr
);
177 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
179 if (iocmd
->status
!= BFA_STATUS_OK
) {
180 bfa_trc(bfad
, iocmd
->status
);
184 bfa_trc(bfad
, 0x6666);
189 bfad_iocmd_ioc_reset_stats(struct bfad_s
*bfad
, void *cmd
, unsigned int v_cmd
)
191 struct bfa_bsg_gen_s
*iocmd
= (struct bfa_bsg_gen_s
*)cmd
;
194 if (v_cmd
== IOCMD_IOC_RESET_STATS
) {
195 bfa_ioc_clear_stats(&bfad
->bfa
);
196 iocmd
->status
= BFA_STATUS_OK
;
197 } else if (v_cmd
== IOCMD_IOC_RESET_FWSTATS
) {
198 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
199 iocmd
->status
= bfa_ioc_fw_stats_clear(&bfad
->bfa
.ioc
);
200 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
207 bfad_iocmd_ioc_set_name(struct bfad_s
*bfad
, void *cmd
, unsigned int v_cmd
)
209 struct bfa_bsg_ioc_name_s
*iocmd
= (struct bfa_bsg_ioc_name_s
*) cmd
;
211 if (v_cmd
== IOCMD_IOC_SET_ADAPTER_NAME
)
212 strcpy(bfad
->adapter_name
, iocmd
->name
);
213 else if (v_cmd
== IOCMD_IOC_SET_PORT_NAME
)
214 strcpy(bfad
->port_name
, iocmd
->name
);
216 iocmd
->status
= BFA_STATUS_OK
;
221 bfad_iocmd_iocfc_get_attr(struct bfad_s
*bfad
, void *cmd
)
223 struct bfa_bsg_iocfc_attr_s
*iocmd
= (struct bfa_bsg_iocfc_attr_s
*)cmd
;
225 iocmd
->status
= BFA_STATUS_OK
;
226 bfa_iocfc_get_attr(&bfad
->bfa
, &iocmd
->iocfc_attr
);
232 bfad_iocmd_ioc_fw_sig_inv(struct bfad_s
*bfad
, void *cmd
)
234 struct bfa_bsg_gen_s
*iocmd
= (struct bfa_bsg_gen_s
*)cmd
;
237 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
238 iocmd
->status
= bfa_ioc_fwsig_invalidate(&bfad
->bfa
.ioc
);
239 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
244 bfad_iocmd_iocfc_set_intr(struct bfad_s
*bfad
, void *cmd
)
246 struct bfa_bsg_iocfc_intr_s
*iocmd
= (struct bfa_bsg_iocfc_intr_s
*)cmd
;
249 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
250 iocmd
->status
= bfa_iocfc_israttr_set(&bfad
->bfa
, &iocmd
->attr
);
251 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
257 bfad_iocmd_port_enable(struct bfad_s
*bfad
, void *cmd
)
259 struct bfa_bsg_gen_s
*iocmd
= (struct bfa_bsg_gen_s
*)cmd
;
260 struct bfad_hal_comp fcomp
;
263 init_completion(&fcomp
.comp
);
264 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
265 iocmd
->status
= bfa_port_enable(&bfad
->bfa
.modules
.port
,
266 bfad_hcb_comp
, &fcomp
);
267 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
268 if (iocmd
->status
!= BFA_STATUS_OK
) {
269 bfa_trc(bfad
, iocmd
->status
);
272 wait_for_completion(&fcomp
.comp
);
273 iocmd
->status
= fcomp
.status
;
278 bfad_iocmd_port_disable(struct bfad_s
*bfad
, void *cmd
)
280 struct bfa_bsg_gen_s
*iocmd
= (struct bfa_bsg_gen_s
*)cmd
;
281 struct bfad_hal_comp fcomp
;
284 init_completion(&fcomp
.comp
);
285 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
286 iocmd
->status
= bfa_port_disable(&bfad
->bfa
.modules
.port
,
287 bfad_hcb_comp
, &fcomp
);
288 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
290 if (iocmd
->status
!= BFA_STATUS_OK
) {
291 bfa_trc(bfad
, iocmd
->status
);
294 wait_for_completion(&fcomp
.comp
);
295 iocmd
->status
= fcomp
.status
;
300 bfad_iocmd_port_get_attr(struct bfad_s
*bfad
, void *cmd
)
302 struct bfa_bsg_port_attr_s
*iocmd
= (struct bfa_bsg_port_attr_s
*)cmd
;
303 struct bfa_lport_attr_s port_attr
;
306 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
307 bfa_fcport_get_attr(&bfad
->bfa
, &iocmd
->attr
);
308 bfa_fcs_lport_get_attr(&bfad
->bfa_fcs
.fabric
.bport
, &port_attr
);
309 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
311 if (iocmd
->attr
.topology
!= BFA_PORT_TOPOLOGY_NONE
)
312 iocmd
->attr
.pid
= port_attr
.pid
;
316 iocmd
->attr
.port_type
= port_attr
.port_type
;
317 iocmd
->attr
.loopback
= port_attr
.loopback
;
318 iocmd
->attr
.authfail
= port_attr
.authfail
;
319 strncpy(iocmd
->attr
.port_symname
.symname
,
320 port_attr
.port_cfg
.sym_name
.symname
,
321 sizeof(port_attr
.port_cfg
.sym_name
.symname
));
323 iocmd
->status
= BFA_STATUS_OK
;
328 bfad_iocmd_port_get_stats(struct bfad_s
*bfad
, void *cmd
,
329 unsigned int payload_len
)
331 struct bfa_bsg_port_stats_s
*iocmd
= (struct bfa_bsg_port_stats_s
*)cmd
;
332 struct bfad_hal_comp fcomp
;
336 if (bfad_chk_iocmd_sz(payload_len
,
337 sizeof(struct bfa_bsg_port_stats_s
),
338 sizeof(union bfa_port_stats_u
)) != BFA_STATUS_OK
) {
339 iocmd
->status
= BFA_STATUS_VERSION_FAIL
;
343 iocmd_bufptr
= (char *)iocmd
+ sizeof(struct bfa_bsg_port_stats_s
);
345 init_completion(&fcomp
.comp
);
346 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
347 iocmd
->status
= bfa_port_get_stats(&bfad
->bfa
.modules
.port
,
348 iocmd_bufptr
, bfad_hcb_comp
, &fcomp
);
349 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
350 if (iocmd
->status
!= BFA_STATUS_OK
) {
351 bfa_trc(bfad
, iocmd
->status
);
355 wait_for_completion(&fcomp
.comp
);
356 iocmd
->status
= fcomp
.status
;
362 bfad_iocmd_port_reset_stats(struct bfad_s
*bfad
, void *cmd
)
364 struct bfa_bsg_gen_s
*iocmd
= (struct bfa_bsg_gen_s
*)cmd
;
365 struct bfad_hal_comp fcomp
;
368 init_completion(&fcomp
.comp
);
369 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
370 iocmd
->status
= bfa_port_clear_stats(&bfad
->bfa
.modules
.port
,
371 bfad_hcb_comp
, &fcomp
);
372 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
373 if (iocmd
->status
!= BFA_STATUS_OK
) {
374 bfa_trc(bfad
, iocmd
->status
);
377 wait_for_completion(&fcomp
.comp
);
378 iocmd
->status
= fcomp
.status
;
383 bfad_iocmd_set_port_cfg(struct bfad_s
*bfad
, void *iocmd
, unsigned int v_cmd
)
385 struct bfa_bsg_port_cfg_s
*cmd
= (struct bfa_bsg_port_cfg_s
*)iocmd
;
388 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
389 if (v_cmd
== IOCMD_PORT_CFG_TOPO
)
390 cmd
->status
= bfa_fcport_cfg_topology(&bfad
->bfa
, cmd
->param
);
391 else if (v_cmd
== IOCMD_PORT_CFG_SPEED
)
392 cmd
->status
= bfa_fcport_cfg_speed(&bfad
->bfa
, cmd
->param
);
393 else if (v_cmd
== IOCMD_PORT_CFG_ALPA
)
394 cmd
->status
= bfa_fcport_cfg_hardalpa(&bfad
->bfa
, cmd
->param
);
395 else if (v_cmd
== IOCMD_PORT_CLR_ALPA
)
396 cmd
->status
= bfa_fcport_clr_hardalpa(&bfad
->bfa
);
397 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
403 bfad_iocmd_port_cfg_maxfrsize(struct bfad_s
*bfad
, void *cmd
)
405 struct bfa_bsg_port_cfg_maxfrsize_s
*iocmd
=
406 (struct bfa_bsg_port_cfg_maxfrsize_s
*)cmd
;
409 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
410 iocmd
->status
= bfa_fcport_cfg_maxfrsize(&bfad
->bfa
, iocmd
->maxfrsize
);
411 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
417 bfad_iocmd_port_cfg_bbcr(struct bfad_s
*bfad
, unsigned int cmd
, void *pcmd
)
419 struct bfa_bsg_bbcr_enable_s
*iocmd
=
420 (struct bfa_bsg_bbcr_enable_s
*)pcmd
;
424 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
425 if (cmd
== IOCMD_PORT_BBCR_ENABLE
)
426 rc
= bfa_fcport_cfg_bbcr(&bfad
->bfa
, BFA_TRUE
, iocmd
->bb_scn
);
427 else if (cmd
== IOCMD_PORT_BBCR_DISABLE
)
428 rc
= bfa_fcport_cfg_bbcr(&bfad
->bfa
, BFA_FALSE
, 0);
430 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
433 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
440 bfad_iocmd_port_get_bbcr_attr(struct bfad_s
*bfad
, void *pcmd
)
442 struct bfa_bsg_bbcr_attr_s
*iocmd
= (struct bfa_bsg_bbcr_attr_s
*) pcmd
;
445 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
447 bfa_fcport_get_bbcr_attr(&bfad
->bfa
, &iocmd
->attr
);
448 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
455 bfad_iocmd_lport_get_attr(struct bfad_s
*bfad
, void *cmd
)
457 struct bfa_fcs_lport_s
*fcs_port
;
458 struct bfa_bsg_lport_attr_s
*iocmd
= (struct bfa_bsg_lport_attr_s
*)cmd
;
461 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
462 fcs_port
= bfa_fcs_lookup_port(&bfad
->bfa_fcs
,
463 iocmd
->vf_id
, iocmd
->pwwn
);
464 if (fcs_port
== NULL
) {
465 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
466 iocmd
->status
= BFA_STATUS_UNKNOWN_LWWN
;
470 bfa_fcs_lport_get_attr(fcs_port
, &iocmd
->port_attr
);
471 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
472 iocmd
->status
= BFA_STATUS_OK
;
478 bfad_iocmd_lport_get_stats(struct bfad_s
*bfad
, void *cmd
)
480 struct bfa_fcs_lport_s
*fcs_port
;
481 struct bfa_bsg_lport_stats_s
*iocmd
=
482 (struct bfa_bsg_lport_stats_s
*)cmd
;
485 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
486 fcs_port
= bfa_fcs_lookup_port(&bfad
->bfa_fcs
,
487 iocmd
->vf_id
, iocmd
->pwwn
);
488 if (fcs_port
== NULL
) {
489 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
490 iocmd
->status
= BFA_STATUS_UNKNOWN_LWWN
;
494 bfa_fcs_lport_get_stats(fcs_port
, &iocmd
->port_stats
);
495 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
496 iocmd
->status
= BFA_STATUS_OK
;
502 bfad_iocmd_lport_reset_stats(struct bfad_s
*bfad
, void *cmd
)
504 struct bfa_fcs_lport_s
*fcs_port
;
505 struct bfa_bsg_reset_stats_s
*iocmd
=
506 (struct bfa_bsg_reset_stats_s
*)cmd
;
507 struct bfa_fcpim_s
*fcpim
= BFA_FCPIM(&bfad
->bfa
);
508 struct list_head
*qe
, *qen
;
509 struct bfa_itnim_s
*itnim
;
512 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
513 fcs_port
= bfa_fcs_lookup_port(&bfad
->bfa_fcs
,
514 iocmd
->vf_id
, iocmd
->vpwwn
);
515 if (fcs_port
== NULL
) {
516 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
517 iocmd
->status
= BFA_STATUS_UNKNOWN_LWWN
;
521 bfa_fcs_lport_clear_stats(fcs_port
);
522 /* clear IO stats from all active itnims */
523 list_for_each_safe(qe
, qen
, &fcpim
->itnim_q
) {
524 itnim
= (struct bfa_itnim_s
*) qe
;
525 if (itnim
->rport
->rport_info
.lp_tag
!= fcs_port
->lp_tag
)
527 bfa_itnim_clear_stats(itnim
);
529 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
530 iocmd
->status
= BFA_STATUS_OK
;
536 bfad_iocmd_lport_get_iostats(struct bfad_s
*bfad
, void *cmd
)
538 struct bfa_fcs_lport_s
*fcs_port
;
539 struct bfa_bsg_lport_iostats_s
*iocmd
=
540 (struct bfa_bsg_lport_iostats_s
*)cmd
;
543 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
544 fcs_port
= bfa_fcs_lookup_port(&bfad
->bfa_fcs
,
545 iocmd
->vf_id
, iocmd
->pwwn
);
546 if (fcs_port
== NULL
) {
547 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
548 iocmd
->status
= BFA_STATUS_UNKNOWN_LWWN
;
552 bfa_fcpim_port_iostats(&bfad
->bfa
, &iocmd
->iostats
,
554 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
555 iocmd
->status
= BFA_STATUS_OK
;
561 bfad_iocmd_lport_get_rports(struct bfad_s
*bfad
, void *cmd
,
562 unsigned int payload_len
)
564 struct bfa_bsg_lport_get_rports_s
*iocmd
=
565 (struct bfa_bsg_lport_get_rports_s
*)cmd
;
566 struct bfa_fcs_lport_s
*fcs_port
;
570 if (iocmd
->nrports
== 0)
573 if (bfad_chk_iocmd_sz(payload_len
,
574 sizeof(struct bfa_bsg_lport_get_rports_s
),
575 sizeof(struct bfa_rport_qualifier_s
) * iocmd
->nrports
)
577 iocmd
->status
= BFA_STATUS_VERSION_FAIL
;
581 iocmd_bufptr
= (char *)iocmd
+
582 sizeof(struct bfa_bsg_lport_get_rports_s
);
583 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
584 fcs_port
= bfa_fcs_lookup_port(&bfad
->bfa_fcs
,
585 iocmd
->vf_id
, iocmd
->pwwn
);
586 if (fcs_port
== NULL
) {
587 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
589 iocmd
->status
= BFA_STATUS_UNKNOWN_LWWN
;
593 bfa_fcs_lport_get_rport_quals(fcs_port
,
594 (struct bfa_rport_qualifier_s
*)iocmd_bufptr
,
596 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
597 iocmd
->status
= BFA_STATUS_OK
;
603 bfad_iocmd_rport_get_attr(struct bfad_s
*bfad
, void *cmd
)
605 struct bfa_bsg_rport_attr_s
*iocmd
= (struct bfa_bsg_rport_attr_s
*)cmd
;
606 struct bfa_fcs_lport_s
*fcs_port
;
607 struct bfa_fcs_rport_s
*fcs_rport
;
610 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
611 fcs_port
= bfa_fcs_lookup_port(&bfad
->bfa_fcs
,
612 iocmd
->vf_id
, iocmd
->pwwn
);
613 if (fcs_port
== NULL
) {
615 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
616 iocmd
->status
= BFA_STATUS_UNKNOWN_LWWN
;
621 fcs_rport
= bfa_fcs_lport_get_rport_by_qualifier(fcs_port
,
622 iocmd
->rpwwn
, iocmd
->pid
);
624 fcs_rport
= bfa_fcs_rport_lookup(fcs_port
, iocmd
->rpwwn
);
625 if (fcs_rport
== NULL
) {
627 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
628 iocmd
->status
= BFA_STATUS_UNKNOWN_RWWN
;
632 bfa_fcs_rport_get_attr(fcs_rport
, &iocmd
->attr
);
633 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
634 iocmd
->status
= BFA_STATUS_OK
;
640 bfad_iocmd_rport_get_addr(struct bfad_s
*bfad
, void *cmd
)
642 struct bfa_bsg_rport_scsi_addr_s
*iocmd
=
643 (struct bfa_bsg_rport_scsi_addr_s
*)cmd
;
644 struct bfa_fcs_lport_s
*fcs_port
;
645 struct bfa_fcs_itnim_s
*fcs_itnim
;
646 struct bfad_itnim_s
*drv_itnim
;
649 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
650 fcs_port
= bfa_fcs_lookup_port(&bfad
->bfa_fcs
,
651 iocmd
->vf_id
, iocmd
->pwwn
);
652 if (fcs_port
== NULL
) {
654 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
655 iocmd
->status
= BFA_STATUS_UNKNOWN_LWWN
;
659 fcs_itnim
= bfa_fcs_itnim_lookup(fcs_port
, iocmd
->rpwwn
);
660 if (fcs_itnim
== NULL
) {
662 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
663 iocmd
->status
= BFA_STATUS_UNKNOWN_RWWN
;
667 drv_itnim
= fcs_itnim
->itnim_drv
;
669 if (drv_itnim
&& drv_itnim
->im_port
)
670 iocmd
->host
= drv_itnim
->im_port
->shost
->host_no
;
673 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
674 iocmd
->status
= BFA_STATUS_UNKNOWN_RWWN
;
678 iocmd
->target
= drv_itnim
->scsi_tgt_id
;
679 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
683 iocmd
->status
= BFA_STATUS_OK
;
689 bfad_iocmd_rport_get_stats(struct bfad_s
*bfad
, void *cmd
)
691 struct bfa_bsg_rport_stats_s
*iocmd
=
692 (struct bfa_bsg_rport_stats_s
*)cmd
;
693 struct bfa_fcs_lport_s
*fcs_port
;
694 struct bfa_fcs_rport_s
*fcs_rport
;
697 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
698 fcs_port
= bfa_fcs_lookup_port(&bfad
->bfa_fcs
,
699 iocmd
->vf_id
, iocmd
->pwwn
);
700 if (fcs_port
== NULL
) {
702 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
703 iocmd
->status
= BFA_STATUS_UNKNOWN_LWWN
;
707 fcs_rport
= bfa_fcs_rport_lookup(fcs_port
, iocmd
->rpwwn
);
708 if (fcs_rport
== NULL
) {
710 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
711 iocmd
->status
= BFA_STATUS_UNKNOWN_RWWN
;
715 memcpy((void *)&iocmd
->stats
, (void *)&fcs_rport
->stats
,
716 sizeof(struct bfa_rport_stats_s
));
717 if (bfa_fcs_rport_get_halrport(fcs_rport
)) {
718 memcpy((void *)&iocmd
->stats
.hal_stats
,
719 (void *)&(bfa_fcs_rport_get_halrport(fcs_rport
)->stats
),
720 sizeof(struct bfa_rport_hal_stats_s
));
723 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
724 iocmd
->status
= BFA_STATUS_OK
;
730 bfad_iocmd_rport_clr_stats(struct bfad_s
*bfad
, void *cmd
)
732 struct bfa_bsg_rport_reset_stats_s
*iocmd
=
733 (struct bfa_bsg_rport_reset_stats_s
*)cmd
;
734 struct bfa_fcs_lport_s
*fcs_port
;
735 struct bfa_fcs_rport_s
*fcs_rport
;
736 struct bfa_rport_s
*rport
;
739 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
740 fcs_port
= bfa_fcs_lookup_port(&bfad
->bfa_fcs
,
741 iocmd
->vf_id
, iocmd
->pwwn
);
742 if (fcs_port
== NULL
) {
743 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
744 iocmd
->status
= BFA_STATUS_UNKNOWN_LWWN
;
748 fcs_rport
= bfa_fcs_rport_lookup(fcs_port
, iocmd
->rpwwn
);
749 if (fcs_rport
== NULL
) {
750 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
751 iocmd
->status
= BFA_STATUS_UNKNOWN_RWWN
;
755 memset((char *)&fcs_rport
->stats
, 0, sizeof(struct bfa_rport_stats_s
));
756 rport
= bfa_fcs_rport_get_halrport(fcs_rport
);
758 memset(&rport
->stats
, 0, sizeof(rport
->stats
));
759 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
760 iocmd
->status
= BFA_STATUS_OK
;
766 bfad_iocmd_rport_set_speed(struct bfad_s
*bfad
, void *cmd
)
768 struct bfa_bsg_rport_set_speed_s
*iocmd
=
769 (struct bfa_bsg_rport_set_speed_s
*)cmd
;
770 struct bfa_fcs_lport_s
*fcs_port
;
771 struct bfa_fcs_rport_s
*fcs_rport
;
774 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
775 fcs_port
= bfa_fcs_lookup_port(&bfad
->bfa_fcs
,
776 iocmd
->vf_id
, iocmd
->pwwn
);
777 if (fcs_port
== NULL
) {
778 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
779 iocmd
->status
= BFA_STATUS_UNKNOWN_LWWN
;
783 fcs_rport
= bfa_fcs_rport_lookup(fcs_port
, iocmd
->rpwwn
);
784 if (fcs_rport
== NULL
) {
785 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
786 iocmd
->status
= BFA_STATUS_UNKNOWN_RWWN
;
790 fcs_rport
->rpf
.assigned_speed
= iocmd
->speed
;
791 /* Set this speed in f/w only if the RPSC speed is not available */
792 if (fcs_rport
->rpf
.rpsc_speed
== BFA_PORT_SPEED_UNKNOWN
)
793 if (fcs_rport
->bfa_rport
)
794 bfa_rport_speed(fcs_rport
->bfa_rport
, iocmd
->speed
);
795 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
796 iocmd
->status
= BFA_STATUS_OK
;
802 bfad_iocmd_vport_get_attr(struct bfad_s
*bfad
, void *cmd
)
804 struct bfa_fcs_vport_s
*fcs_vport
;
805 struct bfa_bsg_vport_attr_s
*iocmd
= (struct bfa_bsg_vport_attr_s
*)cmd
;
808 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
809 fcs_vport
= bfa_fcs_vport_lookup(&bfad
->bfa_fcs
,
810 iocmd
->vf_id
, iocmd
->vpwwn
);
811 if (fcs_vport
== NULL
) {
812 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
813 iocmd
->status
= BFA_STATUS_UNKNOWN_VWWN
;
817 bfa_fcs_vport_get_attr(fcs_vport
, &iocmd
->vport_attr
);
818 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
819 iocmd
->status
= BFA_STATUS_OK
;
825 bfad_iocmd_vport_get_stats(struct bfad_s
*bfad
, void *cmd
)
827 struct bfa_fcs_vport_s
*fcs_vport
;
828 struct bfa_bsg_vport_stats_s
*iocmd
=
829 (struct bfa_bsg_vport_stats_s
*)cmd
;
832 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
833 fcs_vport
= bfa_fcs_vport_lookup(&bfad
->bfa_fcs
,
834 iocmd
->vf_id
, iocmd
->vpwwn
);
835 if (fcs_vport
== NULL
) {
836 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
837 iocmd
->status
= BFA_STATUS_UNKNOWN_VWWN
;
841 memcpy((void *)&iocmd
->vport_stats
, (void *)&fcs_vport
->vport_stats
,
842 sizeof(struct bfa_vport_stats_s
));
843 memcpy((void *)&iocmd
->vport_stats
.port_stats
,
844 (void *)&fcs_vport
->lport
.stats
,
845 sizeof(struct bfa_lport_stats_s
));
846 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
847 iocmd
->status
= BFA_STATUS_OK
;
853 bfad_iocmd_vport_clr_stats(struct bfad_s
*bfad
, void *cmd
)
855 struct bfa_fcs_vport_s
*fcs_vport
;
856 struct bfa_bsg_reset_stats_s
*iocmd
=
857 (struct bfa_bsg_reset_stats_s
*)cmd
;
860 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
861 fcs_vport
= bfa_fcs_vport_lookup(&bfad
->bfa_fcs
,
862 iocmd
->vf_id
, iocmd
->vpwwn
);
863 if (fcs_vport
== NULL
) {
864 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
865 iocmd
->status
= BFA_STATUS_UNKNOWN_VWWN
;
869 memset(&fcs_vport
->vport_stats
, 0, sizeof(struct bfa_vport_stats_s
));
870 memset(&fcs_vport
->lport
.stats
, 0, sizeof(struct bfa_lport_stats_s
));
871 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
872 iocmd
->status
= BFA_STATUS_OK
;
878 bfad_iocmd_fabric_get_lports(struct bfad_s
*bfad
, void *cmd
,
879 unsigned int payload_len
)
881 struct bfa_bsg_fabric_get_lports_s
*iocmd
=
882 (struct bfa_bsg_fabric_get_lports_s
*)cmd
;
883 bfa_fcs_vf_t
*fcs_vf
;
884 uint32_t nports
= iocmd
->nports
;
889 iocmd
->status
= BFA_STATUS_EINVAL
;
893 if (bfad_chk_iocmd_sz(payload_len
,
894 sizeof(struct bfa_bsg_fabric_get_lports_s
),
895 sizeof(wwn_t
[iocmd
->nports
])) != BFA_STATUS_OK
) {
896 iocmd
->status
= BFA_STATUS_VERSION_FAIL
;
900 iocmd_bufptr
= (char *)iocmd
+
901 sizeof(struct bfa_bsg_fabric_get_lports_s
);
903 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
904 fcs_vf
= bfa_fcs_vf_lookup(&bfad
->bfa_fcs
, iocmd
->vf_id
);
905 if (fcs_vf
== NULL
) {
906 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
907 iocmd
->status
= BFA_STATUS_UNKNOWN_VFID
;
910 bfa_fcs_vf_get_ports(fcs_vf
, (wwn_t
*)iocmd_bufptr
, &nports
);
911 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
913 iocmd
->nports
= nports
;
914 iocmd
->status
= BFA_STATUS_OK
;
920 bfad_iocmd_qos_set_bw(struct bfad_s
*bfad
, void *pcmd
)
922 struct bfa_bsg_qos_bw_s
*iocmd
= (struct bfa_bsg_qos_bw_s
*)pcmd
;
925 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
926 iocmd
->status
= bfa_fcport_set_qos_bw(&bfad
->bfa
, &iocmd
->qos_bw
);
927 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
933 bfad_iocmd_ratelim(struct bfad_s
*bfad
, unsigned int cmd
, void *pcmd
)
935 struct bfa_bsg_gen_s
*iocmd
= (struct bfa_bsg_gen_s
*)pcmd
;
936 struct bfa_fcport_s
*fcport
= BFA_FCPORT_MOD(&bfad
->bfa
);
939 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
941 if ((fcport
->cfg
.topology
== BFA_PORT_TOPOLOGY_LOOP
) &&
942 (fcport
->topology
== BFA_PORT_TOPOLOGY_LOOP
))
943 iocmd
->status
= BFA_STATUS_TOPOLOGY_LOOP
;
945 if (cmd
== IOCMD_RATELIM_ENABLE
)
946 fcport
->cfg
.ratelimit
= BFA_TRUE
;
947 else if (cmd
== IOCMD_RATELIM_DISABLE
)
948 fcport
->cfg
.ratelimit
= BFA_FALSE
;
950 if (fcport
->cfg
.trl_def_speed
== BFA_PORT_SPEED_UNKNOWN
)
951 fcport
->cfg
.trl_def_speed
= BFA_PORT_SPEED_1GBPS
;
953 iocmd
->status
= BFA_STATUS_OK
;
956 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
962 bfad_iocmd_ratelim_speed(struct bfad_s
*bfad
, unsigned int cmd
, void *pcmd
)
964 struct bfa_bsg_trl_speed_s
*iocmd
= (struct bfa_bsg_trl_speed_s
*)pcmd
;
965 struct bfa_fcport_s
*fcport
= BFA_FCPORT_MOD(&bfad
->bfa
);
968 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
970 /* Auto and speeds greater than the supported speed, are invalid */
971 if ((iocmd
->speed
== BFA_PORT_SPEED_AUTO
) ||
972 (iocmd
->speed
> fcport
->speed_sup
)) {
973 iocmd
->status
= BFA_STATUS_UNSUPP_SPEED
;
974 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
978 if ((fcport
->cfg
.topology
== BFA_PORT_TOPOLOGY_LOOP
) &&
979 (fcport
->topology
== BFA_PORT_TOPOLOGY_LOOP
))
980 iocmd
->status
= BFA_STATUS_TOPOLOGY_LOOP
;
982 fcport
->cfg
.trl_def_speed
= iocmd
->speed
;
983 iocmd
->status
= BFA_STATUS_OK
;
985 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
991 bfad_iocmd_cfg_fcpim(struct bfad_s
*bfad
, void *cmd
)
993 struct bfa_bsg_fcpim_s
*iocmd
= (struct bfa_bsg_fcpim_s
*)cmd
;
996 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
997 bfa_fcpim_path_tov_set(&bfad
->bfa
, iocmd
->param
);
998 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
999 iocmd
->status
= BFA_STATUS_OK
;
1004 bfad_iocmd_fcpim_get_modstats(struct bfad_s
*bfad
, void *cmd
)
1006 struct bfa_bsg_fcpim_modstats_s
*iocmd
=
1007 (struct bfa_bsg_fcpim_modstats_s
*)cmd
;
1008 struct bfa_fcpim_s
*fcpim
= BFA_FCPIM(&bfad
->bfa
);
1009 struct list_head
*qe
, *qen
;
1010 struct bfa_itnim_s
*itnim
;
1011 unsigned long flags
;
1013 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1014 /* accumulate IO stats from itnim */
1015 memset((void *)&iocmd
->modstats
, 0, sizeof(struct bfa_itnim_iostats_s
));
1016 list_for_each_safe(qe
, qen
, &fcpim
->itnim_q
) {
1017 itnim
= (struct bfa_itnim_s
*) qe
;
1018 bfa_fcpim_add_stats(&iocmd
->modstats
, &(itnim
->stats
));
1020 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1021 iocmd
->status
= BFA_STATUS_OK
;
1026 bfad_iocmd_fcpim_clr_modstats(struct bfad_s
*bfad
, void *cmd
)
1028 struct bfa_bsg_fcpim_modstatsclr_s
*iocmd
=
1029 (struct bfa_bsg_fcpim_modstatsclr_s
*)cmd
;
1030 struct bfa_fcpim_s
*fcpim
= BFA_FCPIM(&bfad
->bfa
);
1031 struct list_head
*qe
, *qen
;
1032 struct bfa_itnim_s
*itnim
;
1033 unsigned long flags
;
1035 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1036 list_for_each_safe(qe
, qen
, &fcpim
->itnim_q
) {
1037 itnim
= (struct bfa_itnim_s
*) qe
;
1038 bfa_itnim_clear_stats(itnim
);
1040 memset(&fcpim
->del_itn_stats
, 0,
1041 sizeof(struct bfa_fcpim_del_itn_stats_s
));
1042 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1043 iocmd
->status
= BFA_STATUS_OK
;
1048 bfad_iocmd_fcpim_get_del_itn_stats(struct bfad_s
*bfad
, void *cmd
)
1050 struct bfa_bsg_fcpim_del_itn_stats_s
*iocmd
=
1051 (struct bfa_bsg_fcpim_del_itn_stats_s
*)cmd
;
1052 struct bfa_fcpim_s
*fcpim
= BFA_FCPIM(&bfad
->bfa
);
1053 unsigned long flags
;
1055 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1056 memcpy((void *)&iocmd
->modstats
, (void *)&fcpim
->del_itn_stats
,
1057 sizeof(struct bfa_fcpim_del_itn_stats_s
));
1058 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1060 iocmd
->status
= BFA_STATUS_OK
;
1065 bfad_iocmd_itnim_get_attr(struct bfad_s
*bfad
, void *cmd
)
1067 struct bfa_bsg_itnim_attr_s
*iocmd
= (struct bfa_bsg_itnim_attr_s
*)cmd
;
1068 struct bfa_fcs_lport_s
*fcs_port
;
1069 unsigned long flags
;
1071 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1072 fcs_port
= bfa_fcs_lookup_port(&bfad
->bfa_fcs
,
1073 iocmd
->vf_id
, iocmd
->lpwwn
);
1075 iocmd
->status
= BFA_STATUS_UNKNOWN_LWWN
;
1077 iocmd
->status
= bfa_fcs_itnim_attr_get(fcs_port
,
1078 iocmd
->rpwwn
, &iocmd
->attr
);
1079 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1084 bfad_iocmd_itnim_get_iostats(struct bfad_s
*bfad
, void *cmd
)
1086 struct bfa_bsg_itnim_iostats_s
*iocmd
=
1087 (struct bfa_bsg_itnim_iostats_s
*)cmd
;
1088 struct bfa_fcs_lport_s
*fcs_port
;
1089 struct bfa_fcs_itnim_s
*itnim
;
1090 unsigned long flags
;
1092 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1093 fcs_port
= bfa_fcs_lookup_port(&bfad
->bfa_fcs
,
1094 iocmd
->vf_id
, iocmd
->lpwwn
);
1096 iocmd
->status
= BFA_STATUS_UNKNOWN_LWWN
;
1099 itnim
= bfa_fcs_itnim_lookup(fcs_port
, iocmd
->rpwwn
);
1101 iocmd
->status
= BFA_STATUS_UNKNOWN_RWWN
;
1103 iocmd
->status
= BFA_STATUS_OK
;
1104 if (bfa_fcs_itnim_get_halitn(itnim
))
1105 memcpy((void *)&iocmd
->iostats
, (void *)
1106 &(bfa_fcs_itnim_get_halitn(itnim
)->stats
),
1107 sizeof(struct bfa_itnim_iostats_s
));
1110 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1115 bfad_iocmd_itnim_reset_stats(struct bfad_s
*bfad
, void *cmd
)
1117 struct bfa_bsg_rport_reset_stats_s
*iocmd
=
1118 (struct bfa_bsg_rport_reset_stats_s
*)cmd
;
1119 struct bfa_fcs_lport_s
*fcs_port
;
1120 struct bfa_fcs_itnim_s
*itnim
;
1121 unsigned long flags
;
1123 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1124 fcs_port
= bfa_fcs_lookup_port(&bfad
->bfa_fcs
,
1125 iocmd
->vf_id
, iocmd
->pwwn
);
1127 iocmd
->status
= BFA_STATUS_UNKNOWN_LWWN
;
1129 itnim
= bfa_fcs_itnim_lookup(fcs_port
, iocmd
->rpwwn
);
1131 iocmd
->status
= BFA_STATUS_UNKNOWN_RWWN
;
1133 iocmd
->status
= BFA_STATUS_OK
;
1134 bfa_fcs_itnim_stats_clear(fcs_port
, iocmd
->rpwwn
);
1135 bfa_itnim_clear_stats(bfa_fcs_itnim_get_halitn(itnim
));
1138 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1144 bfad_iocmd_itnim_get_itnstats(struct bfad_s
*bfad
, void *cmd
)
1146 struct bfa_bsg_itnim_itnstats_s
*iocmd
=
1147 (struct bfa_bsg_itnim_itnstats_s
*)cmd
;
1148 struct bfa_fcs_lport_s
*fcs_port
;
1149 struct bfa_fcs_itnim_s
*itnim
;
1150 unsigned long flags
;
1152 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1153 fcs_port
= bfa_fcs_lookup_port(&bfad
->bfa_fcs
,
1154 iocmd
->vf_id
, iocmd
->lpwwn
);
1156 iocmd
->status
= BFA_STATUS_UNKNOWN_LWWN
;
1159 itnim
= bfa_fcs_itnim_lookup(fcs_port
, iocmd
->rpwwn
);
1161 iocmd
->status
= BFA_STATUS_UNKNOWN_RWWN
;
1163 iocmd
->status
= BFA_STATUS_OK
;
1164 bfa_fcs_itnim_stats_get(fcs_port
, iocmd
->rpwwn
,
1168 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1173 bfad_iocmd_fcport_enable(struct bfad_s
*bfad
, void *cmd
)
1175 struct bfa_bsg_gen_s
*iocmd
= (struct bfa_bsg_gen_s
*)cmd
;
1176 unsigned long flags
;
1178 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1179 iocmd
->status
= bfa_fcport_enable(&bfad
->bfa
);
1180 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1186 bfad_iocmd_fcport_disable(struct bfad_s
*bfad
, void *cmd
)
1188 struct bfa_bsg_gen_s
*iocmd
= (struct bfa_bsg_gen_s
*)cmd
;
1189 unsigned long flags
;
1191 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1192 iocmd
->status
= bfa_fcport_disable(&bfad
->bfa
);
1193 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1199 bfad_iocmd_ioc_get_pcifn_cfg(struct bfad_s
*bfad
, void *cmd
)
1201 struct bfa_bsg_pcifn_cfg_s
*iocmd
= (struct bfa_bsg_pcifn_cfg_s
*)cmd
;
1202 struct bfad_hal_comp fcomp
;
1203 unsigned long flags
;
1205 init_completion(&fcomp
.comp
);
1206 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1207 iocmd
->status
= bfa_ablk_query(&bfad
->bfa
.modules
.ablk
,
1209 bfad_hcb_comp
, &fcomp
);
1210 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1211 if (iocmd
->status
!= BFA_STATUS_OK
)
1214 wait_for_completion(&fcomp
.comp
);
1215 iocmd
->status
= fcomp
.status
;
1221 bfad_iocmd_pcifn_create(struct bfad_s
*bfad
, void *cmd
)
1223 struct bfa_bsg_pcifn_s
*iocmd
= (struct bfa_bsg_pcifn_s
*)cmd
;
1224 struct bfad_hal_comp fcomp
;
1225 unsigned long flags
;
1227 init_completion(&fcomp
.comp
);
1228 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1229 iocmd
->status
= bfa_ablk_pf_create(&bfad
->bfa
.modules
.ablk
,
1230 &iocmd
->pcifn_id
, iocmd
->port
,
1231 iocmd
->pcifn_class
, iocmd
->bw_min
,
1232 iocmd
->bw_max
, bfad_hcb_comp
, &fcomp
);
1233 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1234 if (iocmd
->status
!= BFA_STATUS_OK
)
1237 wait_for_completion(&fcomp
.comp
);
1238 iocmd
->status
= fcomp
.status
;
1244 bfad_iocmd_pcifn_delete(struct bfad_s
*bfad
, void *cmd
)
1246 struct bfa_bsg_pcifn_s
*iocmd
= (struct bfa_bsg_pcifn_s
*)cmd
;
1247 struct bfad_hal_comp fcomp
;
1248 unsigned long flags
;
1250 init_completion(&fcomp
.comp
);
1251 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1252 iocmd
->status
= bfa_ablk_pf_delete(&bfad
->bfa
.modules
.ablk
,
1254 bfad_hcb_comp
, &fcomp
);
1255 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1256 if (iocmd
->status
!= BFA_STATUS_OK
)
1259 wait_for_completion(&fcomp
.comp
);
1260 iocmd
->status
= fcomp
.status
;
1266 bfad_iocmd_pcifn_bw(struct bfad_s
*bfad
, void *cmd
)
1268 struct bfa_bsg_pcifn_s
*iocmd
= (struct bfa_bsg_pcifn_s
*)cmd
;
1269 struct bfad_hal_comp fcomp
;
1270 unsigned long flags
;
1272 init_completion(&fcomp
.comp
);
1273 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1274 iocmd
->status
= bfa_ablk_pf_update(&bfad
->bfa
.modules
.ablk
,
1275 iocmd
->pcifn_id
, iocmd
->bw_min
,
1276 iocmd
->bw_max
, bfad_hcb_comp
, &fcomp
);
1277 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1278 bfa_trc(bfad
, iocmd
->status
);
1279 if (iocmd
->status
!= BFA_STATUS_OK
)
1282 wait_for_completion(&fcomp
.comp
);
1283 iocmd
->status
= fcomp
.status
;
1284 bfa_trc(bfad
, iocmd
->status
);
1290 bfad_iocmd_adapter_cfg_mode(struct bfad_s
*bfad
, void *cmd
)
1292 struct bfa_bsg_adapter_cfg_mode_s
*iocmd
=
1293 (struct bfa_bsg_adapter_cfg_mode_s
*)cmd
;
1294 struct bfad_hal_comp fcomp
;
1295 unsigned long flags
= 0;
1297 init_completion(&fcomp
.comp
);
1298 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1299 iocmd
->status
= bfa_ablk_adapter_config(&bfad
->bfa
.modules
.ablk
,
1300 iocmd
->cfg
.mode
, iocmd
->cfg
.max_pf
,
1301 iocmd
->cfg
.max_vf
, bfad_hcb_comp
, &fcomp
);
1302 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1303 if (iocmd
->status
!= BFA_STATUS_OK
)
1306 wait_for_completion(&fcomp
.comp
);
1307 iocmd
->status
= fcomp
.status
;
1313 bfad_iocmd_port_cfg_mode(struct bfad_s
*bfad
, void *cmd
)
1315 struct bfa_bsg_port_cfg_mode_s
*iocmd
=
1316 (struct bfa_bsg_port_cfg_mode_s
*)cmd
;
1317 struct bfad_hal_comp fcomp
;
1318 unsigned long flags
= 0;
1320 init_completion(&fcomp
.comp
);
1321 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1322 iocmd
->status
= bfa_ablk_port_config(&bfad
->bfa
.modules
.ablk
,
1323 iocmd
->instance
, iocmd
->cfg
.mode
,
1324 iocmd
->cfg
.max_pf
, iocmd
->cfg
.max_vf
,
1325 bfad_hcb_comp
, &fcomp
);
1326 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1327 if (iocmd
->status
!= BFA_STATUS_OK
)
1330 wait_for_completion(&fcomp
.comp
);
1331 iocmd
->status
= fcomp
.status
;
1337 bfad_iocmd_ablk_optrom(struct bfad_s
*bfad
, unsigned int cmd
, void *pcmd
)
1339 struct bfa_bsg_gen_s
*iocmd
= (struct bfa_bsg_gen_s
*)pcmd
;
1340 struct bfad_hal_comp fcomp
;
1341 unsigned long flags
;
1343 init_completion(&fcomp
.comp
);
1344 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1345 if (cmd
== IOCMD_FLASH_ENABLE_OPTROM
)
1346 iocmd
->status
= bfa_ablk_optrom_en(&bfad
->bfa
.modules
.ablk
,
1347 bfad_hcb_comp
, &fcomp
);
1349 iocmd
->status
= bfa_ablk_optrom_dis(&bfad
->bfa
.modules
.ablk
,
1350 bfad_hcb_comp
, &fcomp
);
1351 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1353 if (iocmd
->status
!= BFA_STATUS_OK
)
1356 wait_for_completion(&fcomp
.comp
);
1357 iocmd
->status
= fcomp
.status
;
1363 bfad_iocmd_faa_query(struct bfad_s
*bfad
, void *cmd
)
1365 struct bfa_bsg_faa_attr_s
*iocmd
= (struct bfa_bsg_faa_attr_s
*)cmd
;
1366 struct bfad_hal_comp fcomp
;
1367 unsigned long flags
;
1369 init_completion(&fcomp
.comp
);
1370 iocmd
->status
= BFA_STATUS_OK
;
1371 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1372 iocmd
->status
= bfa_faa_query(&bfad
->bfa
, &iocmd
->faa_attr
,
1373 bfad_hcb_comp
, &fcomp
);
1374 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1376 if (iocmd
->status
!= BFA_STATUS_OK
)
1379 wait_for_completion(&fcomp
.comp
);
1380 iocmd
->status
= fcomp
.status
;
1386 bfad_iocmd_cee_attr(struct bfad_s
*bfad
, void *cmd
, unsigned int payload_len
)
1388 struct bfa_bsg_cee_attr_s
*iocmd
=
1389 (struct bfa_bsg_cee_attr_s
*)cmd
;
1391 struct bfad_hal_comp cee_comp
;
1392 unsigned long flags
;
1394 if (bfad_chk_iocmd_sz(payload_len
,
1395 sizeof(struct bfa_bsg_cee_attr_s
),
1396 sizeof(struct bfa_cee_attr_s
)) != BFA_STATUS_OK
) {
1397 iocmd
->status
= BFA_STATUS_VERSION_FAIL
;
1401 iocmd_bufptr
= (char *)iocmd
+ sizeof(struct bfa_bsg_cee_attr_s
);
1403 cee_comp
.status
= 0;
1404 init_completion(&cee_comp
.comp
);
1405 mutex_lock(&bfad_mutex
);
1406 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1407 iocmd
->status
= bfa_cee_get_attr(&bfad
->bfa
.modules
.cee
, iocmd_bufptr
,
1408 bfad_hcb_comp
, &cee_comp
);
1409 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1410 if (iocmd
->status
!= BFA_STATUS_OK
) {
1411 mutex_unlock(&bfad_mutex
);
1412 bfa_trc(bfad
, 0x5555);
1415 wait_for_completion(&cee_comp
.comp
);
1416 mutex_unlock(&bfad_mutex
);
1422 bfad_iocmd_cee_get_stats(struct bfad_s
*bfad
, void *cmd
,
1423 unsigned int payload_len
)
1425 struct bfa_bsg_cee_stats_s
*iocmd
=
1426 (struct bfa_bsg_cee_stats_s
*)cmd
;
1428 struct bfad_hal_comp cee_comp
;
1429 unsigned long flags
;
1431 if (bfad_chk_iocmd_sz(payload_len
,
1432 sizeof(struct bfa_bsg_cee_stats_s
),
1433 sizeof(struct bfa_cee_stats_s
)) != BFA_STATUS_OK
) {
1434 iocmd
->status
= BFA_STATUS_VERSION_FAIL
;
1438 iocmd_bufptr
= (char *)iocmd
+ sizeof(struct bfa_bsg_cee_stats_s
);
1440 cee_comp
.status
= 0;
1441 init_completion(&cee_comp
.comp
);
1442 mutex_lock(&bfad_mutex
);
1443 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1444 iocmd
->status
= bfa_cee_get_stats(&bfad
->bfa
.modules
.cee
, iocmd_bufptr
,
1445 bfad_hcb_comp
, &cee_comp
);
1446 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1447 if (iocmd
->status
!= BFA_STATUS_OK
) {
1448 mutex_unlock(&bfad_mutex
);
1449 bfa_trc(bfad
, 0x5555);
1452 wait_for_completion(&cee_comp
.comp
);
1453 mutex_unlock(&bfad_mutex
);
1459 bfad_iocmd_cee_reset_stats(struct bfad_s
*bfad
, void *cmd
)
1461 struct bfa_bsg_gen_s
*iocmd
= (struct bfa_bsg_gen_s
*)cmd
;
1462 unsigned long flags
;
1464 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1465 iocmd
->status
= bfa_cee_reset_stats(&bfad
->bfa
.modules
.cee
, NULL
, NULL
);
1466 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1467 if (iocmd
->status
!= BFA_STATUS_OK
)
1468 bfa_trc(bfad
, 0x5555);
1473 bfad_iocmd_sfp_media(struct bfad_s
*bfad
, void *cmd
)
1475 struct bfa_bsg_sfp_media_s
*iocmd
= (struct bfa_bsg_sfp_media_s
*)cmd
;
1476 struct bfad_hal_comp fcomp
;
1477 unsigned long flags
;
1479 init_completion(&fcomp
.comp
);
1480 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1481 iocmd
->status
= bfa_sfp_media(BFA_SFP_MOD(&bfad
->bfa
), &iocmd
->media
,
1482 bfad_hcb_comp
, &fcomp
);
1483 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1484 bfa_trc(bfad
, iocmd
->status
);
1485 if (iocmd
->status
!= BFA_STATUS_SFP_NOT_READY
)
1488 wait_for_completion(&fcomp
.comp
);
1489 iocmd
->status
= fcomp
.status
;
1495 bfad_iocmd_sfp_speed(struct bfad_s
*bfad
, void *cmd
)
1497 struct bfa_bsg_sfp_speed_s
*iocmd
= (struct bfa_bsg_sfp_speed_s
*)cmd
;
1498 struct bfad_hal_comp fcomp
;
1499 unsigned long flags
;
1501 init_completion(&fcomp
.comp
);
1502 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1503 iocmd
->status
= bfa_sfp_speed(BFA_SFP_MOD(&bfad
->bfa
), iocmd
->speed
,
1504 bfad_hcb_comp
, &fcomp
);
1505 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1506 bfa_trc(bfad
, iocmd
->status
);
1507 if (iocmd
->status
!= BFA_STATUS_SFP_NOT_READY
)
1509 wait_for_completion(&fcomp
.comp
);
1510 iocmd
->status
= fcomp
.status
;
1516 bfad_iocmd_flash_get_attr(struct bfad_s
*bfad
, void *cmd
)
1518 struct bfa_bsg_flash_attr_s
*iocmd
=
1519 (struct bfa_bsg_flash_attr_s
*)cmd
;
1520 struct bfad_hal_comp fcomp
;
1521 unsigned long flags
;
1523 init_completion(&fcomp
.comp
);
1524 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1525 iocmd
->status
= bfa_flash_get_attr(BFA_FLASH(&bfad
->bfa
), &iocmd
->attr
,
1526 bfad_hcb_comp
, &fcomp
);
1527 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1528 if (iocmd
->status
!= BFA_STATUS_OK
)
1530 wait_for_completion(&fcomp
.comp
);
1531 iocmd
->status
= fcomp
.status
;
1537 bfad_iocmd_flash_erase_part(struct bfad_s
*bfad
, void *cmd
)
1539 struct bfa_bsg_flash_s
*iocmd
= (struct bfa_bsg_flash_s
*)cmd
;
1540 struct bfad_hal_comp fcomp
;
1541 unsigned long flags
;
1543 init_completion(&fcomp
.comp
);
1544 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1545 iocmd
->status
= bfa_flash_erase_part(BFA_FLASH(&bfad
->bfa
), iocmd
->type
,
1546 iocmd
->instance
, bfad_hcb_comp
, &fcomp
);
1547 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1548 if (iocmd
->status
!= BFA_STATUS_OK
)
1550 wait_for_completion(&fcomp
.comp
);
1551 iocmd
->status
= fcomp
.status
;
1557 bfad_iocmd_flash_update_part(struct bfad_s
*bfad
, void *cmd
,
1558 unsigned int payload_len
)
1560 struct bfa_bsg_flash_s
*iocmd
= (struct bfa_bsg_flash_s
*)cmd
;
1562 struct bfad_hal_comp fcomp
;
1563 unsigned long flags
;
1565 if (bfad_chk_iocmd_sz(payload_len
,
1566 sizeof(struct bfa_bsg_flash_s
),
1567 iocmd
->bufsz
) != BFA_STATUS_OK
) {
1568 iocmd
->status
= BFA_STATUS_VERSION_FAIL
;
1572 iocmd_bufptr
= (char *)iocmd
+ sizeof(struct bfa_bsg_flash_s
);
1574 init_completion(&fcomp
.comp
);
1575 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1576 iocmd
->status
= bfa_flash_update_part(BFA_FLASH(&bfad
->bfa
),
1577 iocmd
->type
, iocmd
->instance
, iocmd_bufptr
,
1578 iocmd
->bufsz
, 0, bfad_hcb_comp
, &fcomp
);
1579 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1580 if (iocmd
->status
!= BFA_STATUS_OK
)
1582 wait_for_completion(&fcomp
.comp
);
1583 iocmd
->status
= fcomp
.status
;
1589 bfad_iocmd_flash_read_part(struct bfad_s
*bfad
, void *cmd
,
1590 unsigned int payload_len
)
1592 struct bfa_bsg_flash_s
*iocmd
= (struct bfa_bsg_flash_s
*)cmd
;
1593 struct bfad_hal_comp fcomp
;
1595 unsigned long flags
;
1597 if (bfad_chk_iocmd_sz(payload_len
,
1598 sizeof(struct bfa_bsg_flash_s
),
1599 iocmd
->bufsz
) != BFA_STATUS_OK
) {
1600 iocmd
->status
= BFA_STATUS_VERSION_FAIL
;
1604 iocmd_bufptr
= (char *)iocmd
+ sizeof(struct bfa_bsg_flash_s
);
1606 init_completion(&fcomp
.comp
);
1607 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1608 iocmd
->status
= bfa_flash_read_part(BFA_FLASH(&bfad
->bfa
), iocmd
->type
,
1609 iocmd
->instance
, iocmd_bufptr
, iocmd
->bufsz
, 0,
1610 bfad_hcb_comp
, &fcomp
);
1611 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1612 if (iocmd
->status
!= BFA_STATUS_OK
)
1614 wait_for_completion(&fcomp
.comp
);
1615 iocmd
->status
= fcomp
.status
;
1621 bfad_iocmd_diag_temp(struct bfad_s
*bfad
, void *cmd
)
1623 struct bfa_bsg_diag_get_temp_s
*iocmd
=
1624 (struct bfa_bsg_diag_get_temp_s
*)cmd
;
1625 struct bfad_hal_comp fcomp
;
1626 unsigned long flags
;
1628 init_completion(&fcomp
.comp
);
1629 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1630 iocmd
->status
= bfa_diag_tsensor_query(BFA_DIAG_MOD(&bfad
->bfa
),
1631 &iocmd
->result
, bfad_hcb_comp
, &fcomp
);
1632 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1633 bfa_trc(bfad
, iocmd
->status
);
1634 if (iocmd
->status
!= BFA_STATUS_OK
)
1636 wait_for_completion(&fcomp
.comp
);
1637 iocmd
->status
= fcomp
.status
;
1643 bfad_iocmd_diag_memtest(struct bfad_s
*bfad
, void *cmd
)
1645 struct bfa_bsg_diag_memtest_s
*iocmd
=
1646 (struct bfa_bsg_diag_memtest_s
*)cmd
;
1647 struct bfad_hal_comp fcomp
;
1648 unsigned long flags
;
1650 init_completion(&fcomp
.comp
);
1651 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1652 iocmd
->status
= bfa_diag_memtest(BFA_DIAG_MOD(&bfad
->bfa
),
1653 &iocmd
->memtest
, iocmd
->pat
,
1654 &iocmd
->result
, bfad_hcb_comp
, &fcomp
);
1655 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1656 bfa_trc(bfad
, iocmd
->status
);
1657 if (iocmd
->status
!= BFA_STATUS_OK
)
1659 wait_for_completion(&fcomp
.comp
);
1660 iocmd
->status
= fcomp
.status
;
1666 bfad_iocmd_diag_loopback(struct bfad_s
*bfad
, void *cmd
)
1668 struct bfa_bsg_diag_loopback_s
*iocmd
=
1669 (struct bfa_bsg_diag_loopback_s
*)cmd
;
1670 struct bfad_hal_comp fcomp
;
1671 unsigned long flags
;
1673 init_completion(&fcomp
.comp
);
1674 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1675 iocmd
->status
= bfa_fcdiag_loopback(&bfad
->bfa
, iocmd
->opmode
,
1676 iocmd
->speed
, iocmd
->lpcnt
, iocmd
->pat
,
1677 &iocmd
->result
, bfad_hcb_comp
, &fcomp
);
1678 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1679 bfa_trc(bfad
, iocmd
->status
);
1680 if (iocmd
->status
!= BFA_STATUS_OK
)
1682 wait_for_completion(&fcomp
.comp
);
1683 iocmd
->status
= fcomp
.status
;
1689 bfad_iocmd_diag_fwping(struct bfad_s
*bfad
, void *cmd
)
1691 struct bfa_bsg_diag_fwping_s
*iocmd
=
1692 (struct bfa_bsg_diag_fwping_s
*)cmd
;
1693 struct bfad_hal_comp fcomp
;
1694 unsigned long flags
;
1696 init_completion(&fcomp
.comp
);
1697 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1698 iocmd
->status
= bfa_diag_fwping(BFA_DIAG_MOD(&bfad
->bfa
), iocmd
->cnt
,
1699 iocmd
->pattern
, &iocmd
->result
,
1700 bfad_hcb_comp
, &fcomp
);
1701 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1702 bfa_trc(bfad
, iocmd
->status
);
1703 if (iocmd
->status
!= BFA_STATUS_OK
)
1705 bfa_trc(bfad
, 0x77771);
1706 wait_for_completion(&fcomp
.comp
);
1707 iocmd
->status
= fcomp
.status
;
1713 bfad_iocmd_diag_queuetest(struct bfad_s
*bfad
, void *cmd
)
1715 struct bfa_bsg_diag_qtest_s
*iocmd
= (struct bfa_bsg_diag_qtest_s
*)cmd
;
1716 struct bfad_hal_comp fcomp
;
1717 unsigned long flags
;
1719 init_completion(&fcomp
.comp
);
1720 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1721 iocmd
->status
= bfa_fcdiag_queuetest(&bfad
->bfa
, iocmd
->force
,
1722 iocmd
->queue
, &iocmd
->result
,
1723 bfad_hcb_comp
, &fcomp
);
1724 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1725 if (iocmd
->status
!= BFA_STATUS_OK
)
1727 wait_for_completion(&fcomp
.comp
);
1728 iocmd
->status
= fcomp
.status
;
1734 bfad_iocmd_diag_sfp(struct bfad_s
*bfad
, void *cmd
)
1736 struct bfa_bsg_sfp_show_s
*iocmd
=
1737 (struct bfa_bsg_sfp_show_s
*)cmd
;
1738 struct bfad_hal_comp fcomp
;
1739 unsigned long flags
;
1741 init_completion(&fcomp
.comp
);
1742 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1743 iocmd
->status
= bfa_sfp_show(BFA_SFP_MOD(&bfad
->bfa
), &iocmd
->sfp
,
1744 bfad_hcb_comp
, &fcomp
);
1745 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1746 bfa_trc(bfad
, iocmd
->status
);
1747 if (iocmd
->status
!= BFA_STATUS_OK
)
1749 wait_for_completion(&fcomp
.comp
);
1750 iocmd
->status
= fcomp
.status
;
1751 bfa_trc(bfad
, iocmd
->status
);
1757 bfad_iocmd_diag_led(struct bfad_s
*bfad
, void *cmd
)
1759 struct bfa_bsg_diag_led_s
*iocmd
= (struct bfa_bsg_diag_led_s
*)cmd
;
1760 unsigned long flags
;
1762 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1763 iocmd
->status
= bfa_diag_ledtest(BFA_DIAG_MOD(&bfad
->bfa
),
1765 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1770 bfad_iocmd_diag_beacon_lport(struct bfad_s
*bfad
, void *cmd
)
1772 struct bfa_bsg_diag_beacon_s
*iocmd
=
1773 (struct bfa_bsg_diag_beacon_s
*)cmd
;
1774 unsigned long flags
;
1776 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1777 iocmd
->status
= bfa_diag_beacon_port(BFA_DIAG_MOD(&bfad
->bfa
),
1778 iocmd
->beacon
, iocmd
->link_e2e_beacon
,
1780 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1785 bfad_iocmd_diag_lb_stat(struct bfad_s
*bfad
, void *cmd
)
1787 struct bfa_bsg_diag_lb_stat_s
*iocmd
=
1788 (struct bfa_bsg_diag_lb_stat_s
*)cmd
;
1789 unsigned long flags
;
1791 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1792 iocmd
->status
= bfa_fcdiag_lb_is_running(&bfad
->bfa
);
1793 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1794 bfa_trc(bfad
, iocmd
->status
);
1800 bfad_iocmd_diag_dport_enable(struct bfad_s
*bfad
, void *pcmd
)
1802 struct bfa_bsg_dport_enable_s
*iocmd
=
1803 (struct bfa_bsg_dport_enable_s
*)pcmd
;
1804 unsigned long flags
;
1805 struct bfad_hal_comp fcomp
;
1807 init_completion(&fcomp
.comp
);
1808 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1809 iocmd
->status
= bfa_dport_enable(&bfad
->bfa
, iocmd
->lpcnt
,
1810 iocmd
->pat
, bfad_hcb_comp
, &fcomp
);
1811 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1812 if (iocmd
->status
!= BFA_STATUS_OK
)
1813 bfa_trc(bfad
, iocmd
->status
);
1815 wait_for_completion(&fcomp
.comp
);
1816 iocmd
->status
= fcomp
.status
;
1822 bfad_iocmd_diag_dport_disable(struct bfad_s
*bfad
, void *pcmd
)
1824 struct bfa_bsg_gen_s
*iocmd
= (struct bfa_bsg_gen_s
*)pcmd
;
1825 unsigned long flags
;
1826 struct bfad_hal_comp fcomp
;
1828 init_completion(&fcomp
.comp
);
1829 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1830 iocmd
->status
= bfa_dport_disable(&bfad
->bfa
, bfad_hcb_comp
, &fcomp
);
1831 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1832 if (iocmd
->status
!= BFA_STATUS_OK
)
1833 bfa_trc(bfad
, iocmd
->status
);
1835 wait_for_completion(&fcomp
.comp
);
1836 iocmd
->status
= fcomp
.status
;
1842 bfad_iocmd_diag_dport_start(struct bfad_s
*bfad
, void *pcmd
)
1844 struct bfa_bsg_dport_enable_s
*iocmd
=
1845 (struct bfa_bsg_dport_enable_s
*)pcmd
;
1846 unsigned long flags
;
1847 struct bfad_hal_comp fcomp
;
1849 init_completion(&fcomp
.comp
);
1850 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1851 iocmd
->status
= bfa_dport_start(&bfad
->bfa
, iocmd
->lpcnt
,
1852 iocmd
->pat
, bfad_hcb_comp
,
1854 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1856 if (iocmd
->status
!= BFA_STATUS_OK
) {
1857 bfa_trc(bfad
, iocmd
->status
);
1859 wait_for_completion(&fcomp
.comp
);
1860 iocmd
->status
= fcomp
.status
;
1867 bfad_iocmd_diag_dport_show(struct bfad_s
*bfad
, void *pcmd
)
1869 struct bfa_bsg_diag_dport_show_s
*iocmd
=
1870 (struct bfa_bsg_diag_dport_show_s
*)pcmd
;
1871 unsigned long flags
;
1873 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1874 iocmd
->status
= bfa_dport_show(&bfad
->bfa
, &iocmd
->result
);
1875 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1882 bfad_iocmd_phy_get_attr(struct bfad_s
*bfad
, void *cmd
)
1884 struct bfa_bsg_phy_attr_s
*iocmd
=
1885 (struct bfa_bsg_phy_attr_s
*)cmd
;
1886 struct bfad_hal_comp fcomp
;
1887 unsigned long flags
;
1889 init_completion(&fcomp
.comp
);
1890 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1891 iocmd
->status
= bfa_phy_get_attr(BFA_PHY(&bfad
->bfa
), iocmd
->instance
,
1892 &iocmd
->attr
, bfad_hcb_comp
, &fcomp
);
1893 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1894 if (iocmd
->status
!= BFA_STATUS_OK
)
1896 wait_for_completion(&fcomp
.comp
);
1897 iocmd
->status
= fcomp
.status
;
1903 bfad_iocmd_phy_get_stats(struct bfad_s
*bfad
, void *cmd
)
1905 struct bfa_bsg_phy_stats_s
*iocmd
=
1906 (struct bfa_bsg_phy_stats_s
*)cmd
;
1907 struct bfad_hal_comp fcomp
;
1908 unsigned long flags
;
1910 init_completion(&fcomp
.comp
);
1911 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1912 iocmd
->status
= bfa_phy_get_stats(BFA_PHY(&bfad
->bfa
), iocmd
->instance
,
1913 &iocmd
->stats
, bfad_hcb_comp
, &fcomp
);
1914 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1915 if (iocmd
->status
!= BFA_STATUS_OK
)
1917 wait_for_completion(&fcomp
.comp
);
1918 iocmd
->status
= fcomp
.status
;
1924 bfad_iocmd_phy_read(struct bfad_s
*bfad
, void *cmd
, unsigned int payload_len
)
1926 struct bfa_bsg_phy_s
*iocmd
= (struct bfa_bsg_phy_s
*)cmd
;
1927 struct bfad_hal_comp fcomp
;
1929 unsigned long flags
;
1931 if (bfad_chk_iocmd_sz(payload_len
,
1932 sizeof(struct bfa_bsg_phy_s
),
1933 iocmd
->bufsz
) != BFA_STATUS_OK
) {
1934 iocmd
->status
= BFA_STATUS_VERSION_FAIL
;
1938 iocmd_bufptr
= (char *)iocmd
+ sizeof(struct bfa_bsg_phy_s
);
1939 init_completion(&fcomp
.comp
);
1940 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1941 iocmd
->status
= bfa_phy_read(BFA_PHY(&bfad
->bfa
),
1942 iocmd
->instance
, iocmd_bufptr
, iocmd
->bufsz
,
1943 0, bfad_hcb_comp
, &fcomp
);
1944 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1945 if (iocmd
->status
!= BFA_STATUS_OK
)
1947 wait_for_completion(&fcomp
.comp
);
1948 iocmd
->status
= fcomp
.status
;
1949 if (iocmd
->status
!= BFA_STATUS_OK
)
1956 bfad_iocmd_vhba_query(struct bfad_s
*bfad
, void *cmd
)
1958 struct bfa_bsg_vhba_attr_s
*iocmd
=
1959 (struct bfa_bsg_vhba_attr_s
*)cmd
;
1960 struct bfa_vhba_attr_s
*attr
= &iocmd
->attr
;
1961 unsigned long flags
;
1963 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1964 attr
->pwwn
= bfad
->bfa
.ioc
.attr
->pwwn
;
1965 attr
->nwwn
= bfad
->bfa
.ioc
.attr
->nwwn
;
1966 attr
->plog_enabled
= (bfa_boolean_t
)bfad
->bfa
.plog
->plog_enabled
;
1967 attr
->io_profile
= bfa_fcpim_get_io_profile(&bfad
->bfa
);
1968 attr
->path_tov
= bfa_fcpim_path_tov_get(&bfad
->bfa
);
1969 iocmd
->status
= BFA_STATUS_OK
;
1970 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1975 bfad_iocmd_phy_update(struct bfad_s
*bfad
, void *cmd
, unsigned int payload_len
)
1977 struct bfa_bsg_phy_s
*iocmd
= (struct bfa_bsg_phy_s
*)cmd
;
1979 struct bfad_hal_comp fcomp
;
1980 unsigned long flags
;
1982 if (bfad_chk_iocmd_sz(payload_len
,
1983 sizeof(struct bfa_bsg_phy_s
),
1984 iocmd
->bufsz
) != BFA_STATUS_OK
) {
1985 iocmd
->status
= BFA_STATUS_VERSION_FAIL
;
1989 iocmd_bufptr
= (char *)iocmd
+ sizeof(struct bfa_bsg_phy_s
);
1990 init_completion(&fcomp
.comp
);
1991 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1992 iocmd
->status
= bfa_phy_update(BFA_PHY(&bfad
->bfa
),
1993 iocmd
->instance
, iocmd_bufptr
, iocmd
->bufsz
,
1994 0, bfad_hcb_comp
, &fcomp
);
1995 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1996 if (iocmd
->status
!= BFA_STATUS_OK
)
1998 wait_for_completion(&fcomp
.comp
);
1999 iocmd
->status
= fcomp
.status
;
2005 bfad_iocmd_porglog_get(struct bfad_s
*bfad
, void *cmd
)
2007 struct bfa_bsg_debug_s
*iocmd
= (struct bfa_bsg_debug_s
*)cmd
;
2010 if (iocmd
->bufsz
< sizeof(struct bfa_plog_s
)) {
2011 bfa_trc(bfad
, sizeof(struct bfa_plog_s
));
2012 iocmd
->status
= BFA_STATUS_EINVAL
;
2016 iocmd
->status
= BFA_STATUS_OK
;
2017 iocmd_bufptr
= (char *)iocmd
+ sizeof(struct bfa_bsg_debug_s
);
2018 memcpy(iocmd_bufptr
, (u8
*) &bfad
->plog_buf
, sizeof(struct bfa_plog_s
));
2023 #define BFA_DEBUG_FW_CORE_CHUNK_SZ 0x4000U /* 16K chunks for FW dump */
2025 bfad_iocmd_debug_fw_core(struct bfad_s
*bfad
, void *cmd
,
2026 unsigned int payload_len
)
2028 struct bfa_bsg_debug_s
*iocmd
= (struct bfa_bsg_debug_s
*)cmd
;
2030 unsigned long flags
;
2033 if (bfad_chk_iocmd_sz(payload_len
, sizeof(struct bfa_bsg_debug_s
),
2034 BFA_DEBUG_FW_CORE_CHUNK_SZ
) != BFA_STATUS_OK
) {
2035 iocmd
->status
= BFA_STATUS_VERSION_FAIL
;
2039 if (iocmd
->bufsz
< BFA_DEBUG_FW_CORE_CHUNK_SZ
||
2040 !IS_ALIGNED(iocmd
->bufsz
, sizeof(u16
)) ||
2041 !IS_ALIGNED(iocmd
->offset
, sizeof(u32
))) {
2042 bfa_trc(bfad
, BFA_DEBUG_FW_CORE_CHUNK_SZ
);
2043 iocmd
->status
= BFA_STATUS_EINVAL
;
2047 iocmd_bufptr
= (char *)iocmd
+ sizeof(struct bfa_bsg_debug_s
);
2048 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
2049 offset
= iocmd
->offset
;
2050 iocmd
->status
= bfa_ioc_debug_fwcore(&bfad
->bfa
.ioc
, iocmd_bufptr
,
2051 &offset
, &iocmd
->bufsz
);
2052 iocmd
->offset
= offset
;
2053 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
2059 bfad_iocmd_debug_ctl(struct bfad_s
*bfad
, void *cmd
, unsigned int v_cmd
)
2061 struct bfa_bsg_gen_s
*iocmd
= (struct bfa_bsg_gen_s
*)cmd
;
2062 unsigned long flags
;
2064 if (v_cmd
== IOCMD_DEBUG_FW_STATE_CLR
) {
2065 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
2066 bfad
->bfa
.ioc
.dbg_fwsave_once
= BFA_TRUE
;
2067 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
2068 } else if (v_cmd
== IOCMD_DEBUG_PORTLOG_CLR
)
2069 bfad
->plog_buf
.head
= bfad
->plog_buf
.tail
= 0;
2070 else if (v_cmd
== IOCMD_DEBUG_START_DTRC
)
2071 bfa_trc_init(bfad
->trcmod
);
2072 else if (v_cmd
== IOCMD_DEBUG_STOP_DTRC
)
2073 bfa_trc_stop(bfad
->trcmod
);
2075 iocmd
->status
= BFA_STATUS_OK
;
2080 bfad_iocmd_porglog_ctl(struct bfad_s
*bfad
, void *cmd
)
2082 struct bfa_bsg_portlogctl_s
*iocmd
= (struct bfa_bsg_portlogctl_s
*)cmd
;
2084 if (iocmd
->ctl
== BFA_TRUE
)
2085 bfad
->plog_buf
.plog_enabled
= 1;
2087 bfad
->plog_buf
.plog_enabled
= 0;
2089 iocmd
->status
= BFA_STATUS_OK
;
2094 bfad_iocmd_fcpim_cfg_profile(struct bfad_s
*bfad
, void *cmd
, unsigned int v_cmd
)
2096 struct bfa_bsg_fcpim_profile_s
*iocmd
=
2097 (struct bfa_bsg_fcpim_profile_s
*)cmd
;
2099 unsigned long flags
;
2101 do_gettimeofday(&tv
);
2102 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
2103 if (v_cmd
== IOCMD_FCPIM_PROFILE_ON
)
2104 iocmd
->status
= bfa_fcpim_profile_on(&bfad
->bfa
, tv
.tv_sec
);
2105 else if (v_cmd
== IOCMD_FCPIM_PROFILE_OFF
)
2106 iocmd
->status
= bfa_fcpim_profile_off(&bfad
->bfa
);
2107 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
2113 bfad_iocmd_itnim_get_ioprofile(struct bfad_s
*bfad
, void *cmd
)
2115 struct bfa_bsg_itnim_ioprofile_s
*iocmd
=
2116 (struct bfa_bsg_itnim_ioprofile_s
*)cmd
;
2117 struct bfa_fcs_lport_s
*fcs_port
;
2118 struct bfa_fcs_itnim_s
*itnim
;
2119 unsigned long flags
;
2121 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
2122 fcs_port
= bfa_fcs_lookup_port(&bfad
->bfa_fcs
,
2123 iocmd
->vf_id
, iocmd
->lpwwn
);
2125 iocmd
->status
= BFA_STATUS_UNKNOWN_LWWN
;
2127 itnim
= bfa_fcs_itnim_lookup(fcs_port
, iocmd
->rpwwn
);
2129 iocmd
->status
= BFA_STATUS_UNKNOWN_RWWN
;
2131 iocmd
->status
= bfa_itnim_get_ioprofile(
2132 bfa_fcs_itnim_get_halitn(itnim
),
2135 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
2140 bfad_iocmd_fcport_get_stats(struct bfad_s
*bfad
, void *cmd
)
2142 struct bfa_bsg_fcport_stats_s
*iocmd
=
2143 (struct bfa_bsg_fcport_stats_s
*)cmd
;
2144 struct bfad_hal_comp fcomp
;
2145 unsigned long flags
;
2146 struct bfa_cb_pending_q_s cb_qe
;
2148 init_completion(&fcomp
.comp
);
2149 bfa_pending_q_init(&cb_qe
, (bfa_cb_cbfn_t
)bfad_hcb_comp
,
2150 &fcomp
, &iocmd
->stats
);
2151 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
2152 iocmd
->status
= bfa_fcport_get_stats(&bfad
->bfa
, &cb_qe
);
2153 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
2154 if (iocmd
->status
!= BFA_STATUS_OK
) {
2155 bfa_trc(bfad
, iocmd
->status
);
2158 wait_for_completion(&fcomp
.comp
);
2159 iocmd
->status
= fcomp
.status
;
2165 bfad_iocmd_fcport_reset_stats(struct bfad_s
*bfad
, void *cmd
)
2167 struct bfa_bsg_gen_s
*iocmd
= (struct bfa_bsg_gen_s
*)cmd
;
2168 struct bfad_hal_comp fcomp
;
2169 unsigned long flags
;
2170 struct bfa_cb_pending_q_s cb_qe
;
2172 init_completion(&fcomp
.comp
);
2173 bfa_pending_q_init(&cb_qe
, (bfa_cb_cbfn_t
)bfad_hcb_comp
, &fcomp
, NULL
);
2175 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
2176 iocmd
->status
= bfa_fcport_clear_stats(&bfad
->bfa
, &cb_qe
);
2177 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
2178 if (iocmd
->status
!= BFA_STATUS_OK
) {
2179 bfa_trc(bfad
, iocmd
->status
);
2182 wait_for_completion(&fcomp
.comp
);
2183 iocmd
->status
= fcomp
.status
;
2189 bfad_iocmd_boot_cfg(struct bfad_s
*bfad
, void *cmd
)
2191 struct bfa_bsg_boot_s
*iocmd
= (struct bfa_bsg_boot_s
*)cmd
;
2192 struct bfad_hal_comp fcomp
;
2193 unsigned long flags
;
2195 init_completion(&fcomp
.comp
);
2196 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
2197 iocmd
->status
= bfa_flash_update_part(BFA_FLASH(&bfad
->bfa
),
2198 BFA_FLASH_PART_BOOT
, bfad
->bfa
.ioc
.port_id
,
2199 &iocmd
->cfg
, sizeof(struct bfa_boot_cfg_s
), 0,
2200 bfad_hcb_comp
, &fcomp
);
2201 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
2202 if (iocmd
->status
!= BFA_STATUS_OK
)
2204 wait_for_completion(&fcomp
.comp
);
2205 iocmd
->status
= fcomp
.status
;
2211 bfad_iocmd_boot_query(struct bfad_s
*bfad
, void *cmd
)
2213 struct bfa_bsg_boot_s
*iocmd
= (struct bfa_bsg_boot_s
*)cmd
;
2214 struct bfad_hal_comp fcomp
;
2215 unsigned long flags
;
2217 init_completion(&fcomp
.comp
);
2218 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
2219 iocmd
->status
= bfa_flash_read_part(BFA_FLASH(&bfad
->bfa
),
2220 BFA_FLASH_PART_BOOT
, bfad
->bfa
.ioc
.port_id
,
2221 &iocmd
->cfg
, sizeof(struct bfa_boot_cfg_s
), 0,
2222 bfad_hcb_comp
, &fcomp
);
2223 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
2224 if (iocmd
->status
!= BFA_STATUS_OK
)
2226 wait_for_completion(&fcomp
.comp
);
2227 iocmd
->status
= fcomp
.status
;
2233 bfad_iocmd_preboot_query(struct bfad_s
*bfad
, void *cmd
)
2235 struct bfa_bsg_preboot_s
*iocmd
= (struct bfa_bsg_preboot_s
*)cmd
;
2236 struct bfi_iocfc_cfgrsp_s
*cfgrsp
= bfad
->bfa
.iocfc
.cfgrsp
;
2237 struct bfa_boot_pbc_s
*pbcfg
= &iocmd
->cfg
;
2238 unsigned long flags
;
2240 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
2241 pbcfg
->enable
= cfgrsp
->pbc_cfg
.boot_enabled
;
2242 pbcfg
->nbluns
= cfgrsp
->pbc_cfg
.nbluns
;
2243 pbcfg
->speed
= cfgrsp
->pbc_cfg
.port_speed
;
2244 memcpy(pbcfg
->pblun
, cfgrsp
->pbc_cfg
.blun
, sizeof(pbcfg
->pblun
));
2245 iocmd
->status
= BFA_STATUS_OK
;
2246 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
2252 bfad_iocmd_ethboot_cfg(struct bfad_s
*bfad
, void *cmd
)
2254 struct bfa_bsg_ethboot_s
*iocmd
= (struct bfa_bsg_ethboot_s
*)cmd
;
2255 struct bfad_hal_comp fcomp
;
2256 unsigned long flags
;
2258 init_completion(&fcomp
.comp
);
2259 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
2260 iocmd
->status
= bfa_flash_update_part(BFA_FLASH(&bfad
->bfa
),
2261 BFA_FLASH_PART_PXECFG
,
2262 bfad
->bfa
.ioc
.port_id
, &iocmd
->cfg
,
2263 sizeof(struct bfa_ethboot_cfg_s
), 0,
2264 bfad_hcb_comp
, &fcomp
);
2265 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
2266 if (iocmd
->status
!= BFA_STATUS_OK
)
2268 wait_for_completion(&fcomp
.comp
);
2269 iocmd
->status
= fcomp
.status
;
2275 bfad_iocmd_ethboot_query(struct bfad_s
*bfad
, void *cmd
)
2277 struct bfa_bsg_ethboot_s
*iocmd
= (struct bfa_bsg_ethboot_s
*)cmd
;
2278 struct bfad_hal_comp fcomp
;
2279 unsigned long flags
;
2281 init_completion(&fcomp
.comp
);
2282 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
2283 iocmd
->status
= bfa_flash_read_part(BFA_FLASH(&bfad
->bfa
),
2284 BFA_FLASH_PART_PXECFG
,
2285 bfad
->bfa
.ioc
.port_id
, &iocmd
->cfg
,
2286 sizeof(struct bfa_ethboot_cfg_s
), 0,
2287 bfad_hcb_comp
, &fcomp
);
2288 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
2289 if (iocmd
->status
!= BFA_STATUS_OK
)
2291 wait_for_completion(&fcomp
.comp
);
2292 iocmd
->status
= fcomp
.status
;
2298 bfad_iocmd_cfg_trunk(struct bfad_s
*bfad
, void *cmd
, unsigned int v_cmd
)
2300 struct bfa_bsg_gen_s
*iocmd
= (struct bfa_bsg_gen_s
*)cmd
;
2301 struct bfa_fcport_s
*fcport
= BFA_FCPORT_MOD(&bfad
->bfa
);
2302 struct bfa_fcport_trunk_s
*trunk
= &fcport
->trunk
;
2303 unsigned long flags
;
2305 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
2307 if (bfa_fcport_is_dport(&bfad
->bfa
))
2308 return BFA_STATUS_DPORT_ERR
;
2310 if ((fcport
->cfg
.topology
== BFA_PORT_TOPOLOGY_LOOP
) ||
2311 (fcport
->topology
== BFA_PORT_TOPOLOGY_LOOP
))
2312 iocmd
->status
= BFA_STATUS_TOPOLOGY_LOOP
;
2314 if (v_cmd
== IOCMD_TRUNK_ENABLE
) {
2315 trunk
->attr
.state
= BFA_TRUNK_OFFLINE
;
2316 bfa_fcport_disable(&bfad
->bfa
);
2317 fcport
->cfg
.trunked
= BFA_TRUE
;
2318 } else if (v_cmd
== IOCMD_TRUNK_DISABLE
) {
2319 trunk
->attr
.state
= BFA_TRUNK_DISABLED
;
2320 bfa_fcport_disable(&bfad
->bfa
);
2321 fcport
->cfg
.trunked
= BFA_FALSE
;
2324 if (!bfa_fcport_is_disabled(&bfad
->bfa
))
2325 bfa_fcport_enable(&bfad
->bfa
);
2327 iocmd
->status
= BFA_STATUS_OK
;
2330 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
2336 bfad_iocmd_trunk_get_attr(struct bfad_s
*bfad
, void *cmd
)
2338 struct bfa_bsg_trunk_attr_s
*iocmd
= (struct bfa_bsg_trunk_attr_s
*)cmd
;
2339 struct bfa_fcport_s
*fcport
= BFA_FCPORT_MOD(&bfad
->bfa
);
2340 struct bfa_fcport_trunk_s
*trunk
= &fcport
->trunk
;
2341 unsigned long flags
;
2343 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
2344 if ((fcport
->cfg
.topology
== BFA_PORT_TOPOLOGY_LOOP
) ||
2345 (fcport
->topology
== BFA_PORT_TOPOLOGY_LOOP
))
2346 iocmd
->status
= BFA_STATUS_TOPOLOGY_LOOP
;
2348 memcpy((void *)&iocmd
->attr
, (void *)&trunk
->attr
,
2349 sizeof(struct bfa_trunk_attr_s
));
2350 iocmd
->attr
.port_id
= bfa_lps_get_base_pid(&bfad
->bfa
);
2351 iocmd
->status
= BFA_STATUS_OK
;
2353 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
2359 bfad_iocmd_qos(struct bfad_s
*bfad
, void *cmd
, unsigned int v_cmd
)
2361 struct bfa_bsg_gen_s
*iocmd
= (struct bfa_bsg_gen_s
*)cmd
;
2362 struct bfa_fcport_s
*fcport
= BFA_FCPORT_MOD(&bfad
->bfa
);
2363 unsigned long flags
;
2365 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
2366 if (bfa_ioc_get_type(&bfad
->bfa
.ioc
) == BFA_IOC_TYPE_FC
) {
2367 if ((fcport
->cfg
.topology
== BFA_PORT_TOPOLOGY_LOOP
) &&
2368 (fcport
->topology
== BFA_PORT_TOPOLOGY_LOOP
))
2369 iocmd
->status
= BFA_STATUS_TOPOLOGY_LOOP
;
2371 if (v_cmd
== IOCMD_QOS_ENABLE
)
2372 fcport
->cfg
.qos_enabled
= BFA_TRUE
;
2373 else if (v_cmd
== IOCMD_QOS_DISABLE
) {
2374 fcport
->cfg
.qos_enabled
= BFA_FALSE
;
2375 fcport
->cfg
.qos_bw
.high
= BFA_QOS_BW_HIGH
;
2376 fcport
->cfg
.qos_bw
.med
= BFA_QOS_BW_MED
;
2377 fcport
->cfg
.qos_bw
.low
= BFA_QOS_BW_LOW
;
2381 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
2387 bfad_iocmd_qos_get_attr(struct bfad_s
*bfad
, void *cmd
)
2389 struct bfa_bsg_qos_attr_s
*iocmd
= (struct bfa_bsg_qos_attr_s
*)cmd
;
2390 struct bfa_fcport_s
*fcport
= BFA_FCPORT_MOD(&bfad
->bfa
);
2391 unsigned long flags
;
2393 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
2394 if ((fcport
->cfg
.topology
== BFA_PORT_TOPOLOGY_LOOP
) &&
2395 (fcport
->topology
== BFA_PORT_TOPOLOGY_LOOP
))
2396 iocmd
->status
= BFA_STATUS_TOPOLOGY_LOOP
;
2398 iocmd
->attr
.state
= fcport
->qos_attr
.state
;
2399 iocmd
->attr
.total_bb_cr
=
2400 be32_to_cpu(fcport
->qos_attr
.total_bb_cr
);
2401 iocmd
->attr
.qos_bw
.high
= fcport
->cfg
.qos_bw
.high
;
2402 iocmd
->attr
.qos_bw
.med
= fcport
->cfg
.qos_bw
.med
;
2403 iocmd
->attr
.qos_bw
.low
= fcport
->cfg
.qos_bw
.low
;
2404 iocmd
->attr
.qos_bw_op
= fcport
->qos_attr
.qos_bw_op
;
2405 iocmd
->status
= BFA_STATUS_OK
;
2407 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
2413 bfad_iocmd_qos_get_vc_attr(struct bfad_s
*bfad
, void *cmd
)
2415 struct bfa_bsg_qos_vc_attr_s
*iocmd
=
2416 (struct bfa_bsg_qos_vc_attr_s
*)cmd
;
2417 struct bfa_fcport_s
*fcport
= BFA_FCPORT_MOD(&bfad
->bfa
);
2418 struct bfa_qos_vc_attr_s
*bfa_vc_attr
= &fcport
->qos_vc_attr
;
2419 unsigned long flags
;
2422 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
2423 iocmd
->attr
.total_vc_count
= be16_to_cpu(bfa_vc_attr
->total_vc_count
);
2424 iocmd
->attr
.shared_credit
= be16_to_cpu(bfa_vc_attr
->shared_credit
);
2425 iocmd
->attr
.elp_opmode_flags
=
2426 be32_to_cpu(bfa_vc_attr
->elp_opmode_flags
);
2428 /* Individual VC info */
2429 while (i
< iocmd
->attr
.total_vc_count
) {
2430 iocmd
->attr
.vc_info
[i
].vc_credit
=
2431 bfa_vc_attr
->vc_info
[i
].vc_credit
;
2432 iocmd
->attr
.vc_info
[i
].borrow_credit
=
2433 bfa_vc_attr
->vc_info
[i
].borrow_credit
;
2434 iocmd
->attr
.vc_info
[i
].priority
=
2435 bfa_vc_attr
->vc_info
[i
].priority
;
2438 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
2440 iocmd
->status
= BFA_STATUS_OK
;
2445 bfad_iocmd_qos_get_stats(struct bfad_s
*bfad
, void *cmd
)
2447 struct bfa_bsg_fcport_stats_s
*iocmd
=
2448 (struct bfa_bsg_fcport_stats_s
*)cmd
;
2449 struct bfad_hal_comp fcomp
;
2450 unsigned long flags
;
2451 struct bfa_cb_pending_q_s cb_qe
;
2452 struct bfa_fcport_s
*fcport
= BFA_FCPORT_MOD(&bfad
->bfa
);
2454 init_completion(&fcomp
.comp
);
2455 bfa_pending_q_init(&cb_qe
, (bfa_cb_cbfn_t
)bfad_hcb_comp
,
2456 &fcomp
, &iocmd
->stats
);
2458 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
2459 WARN_ON(!bfa_ioc_get_fcmode(&bfad
->bfa
.ioc
));
2460 if ((fcport
->cfg
.topology
== BFA_PORT_TOPOLOGY_LOOP
) &&
2461 (fcport
->topology
== BFA_PORT_TOPOLOGY_LOOP
))
2462 iocmd
->status
= BFA_STATUS_TOPOLOGY_LOOP
;
2464 iocmd
->status
= bfa_fcport_get_stats(&bfad
->bfa
, &cb_qe
);
2465 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
2466 if (iocmd
->status
!= BFA_STATUS_OK
) {
2467 bfa_trc(bfad
, iocmd
->status
);
2470 wait_for_completion(&fcomp
.comp
);
2471 iocmd
->status
= fcomp
.status
;
2477 bfad_iocmd_qos_reset_stats(struct bfad_s
*bfad
, void *cmd
)
2479 struct bfa_bsg_gen_s
*iocmd
= (struct bfa_bsg_gen_s
*)cmd
;
2480 struct bfad_hal_comp fcomp
;
2481 unsigned long flags
;
2482 struct bfa_cb_pending_q_s cb_qe
;
2483 struct bfa_fcport_s
*fcport
= BFA_FCPORT_MOD(&bfad
->bfa
);
2485 init_completion(&fcomp
.comp
);
2486 bfa_pending_q_init(&cb_qe
, (bfa_cb_cbfn_t
)bfad_hcb_comp
,
2489 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
2490 WARN_ON(!bfa_ioc_get_fcmode(&bfad
->bfa
.ioc
));
2491 if ((fcport
->cfg
.topology
== BFA_PORT_TOPOLOGY_LOOP
) &&
2492 (fcport
->topology
== BFA_PORT_TOPOLOGY_LOOP
))
2493 iocmd
->status
= BFA_STATUS_TOPOLOGY_LOOP
;
2495 iocmd
->status
= bfa_fcport_clear_stats(&bfad
->bfa
, &cb_qe
);
2496 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
2497 if (iocmd
->status
!= BFA_STATUS_OK
) {
2498 bfa_trc(bfad
, iocmd
->status
);
2501 wait_for_completion(&fcomp
.comp
);
2502 iocmd
->status
= fcomp
.status
;
2508 bfad_iocmd_vf_get_stats(struct bfad_s
*bfad
, void *cmd
)
2510 struct bfa_bsg_vf_stats_s
*iocmd
=
2511 (struct bfa_bsg_vf_stats_s
*)cmd
;
2512 struct bfa_fcs_fabric_s
*fcs_vf
;
2513 unsigned long flags
;
2515 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
2516 fcs_vf
= bfa_fcs_vf_lookup(&bfad
->bfa_fcs
, iocmd
->vf_id
);
2517 if (fcs_vf
== NULL
) {
2518 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
2519 iocmd
->status
= BFA_STATUS_UNKNOWN_VFID
;
2522 memcpy((void *)&iocmd
->stats
, (void *)&fcs_vf
->stats
,
2523 sizeof(struct bfa_vf_stats_s
));
2524 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
2525 iocmd
->status
= BFA_STATUS_OK
;
2531 bfad_iocmd_vf_clr_stats(struct bfad_s
*bfad
, void *cmd
)
2533 struct bfa_bsg_vf_reset_stats_s
*iocmd
=
2534 (struct bfa_bsg_vf_reset_stats_s
*)cmd
;
2535 struct bfa_fcs_fabric_s
*fcs_vf
;
2536 unsigned long flags
;
2538 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
2539 fcs_vf
= bfa_fcs_vf_lookup(&bfad
->bfa_fcs
, iocmd
->vf_id
);
2540 if (fcs_vf
== NULL
) {
2541 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
2542 iocmd
->status
= BFA_STATUS_UNKNOWN_VFID
;
2545 memset((void *)&fcs_vf
->stats
, 0, sizeof(struct bfa_vf_stats_s
));
2546 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
2547 iocmd
->status
= BFA_STATUS_OK
;
2552 /* Function to reset the LUN SCAN mode */
2554 bfad_iocmd_lunmask_reset_lunscan_mode(struct bfad_s
*bfad
, int lunmask_cfg
)
2556 struct bfad_im_port_s
*pport_im
= bfad
->pport
.im_port
;
2557 struct bfad_vport_s
*vport
= NULL
;
2559 /* Set the scsi device LUN SCAN flags for base port */
2560 bfad_reset_sdev_bflags(pport_im
, lunmask_cfg
);
2562 /* Set the scsi device LUN SCAN flags for the vports */
2563 list_for_each_entry(vport
, &bfad
->vport_list
, list_entry
)
2564 bfad_reset_sdev_bflags(vport
->drv_port
.im_port
, lunmask_cfg
);
2568 bfad_iocmd_lunmask(struct bfad_s
*bfad
, void *pcmd
, unsigned int v_cmd
)
2570 struct bfa_bsg_gen_s
*iocmd
= (struct bfa_bsg_gen_s
*)pcmd
;
2571 unsigned long flags
;
2573 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
2574 if (v_cmd
== IOCMD_FCPIM_LUNMASK_ENABLE
) {
2575 iocmd
->status
= bfa_fcpim_lunmask_update(&bfad
->bfa
, BFA_TRUE
);
2576 /* Set the LUN Scanning mode to be Sequential scan */
2577 if (iocmd
->status
== BFA_STATUS_OK
)
2578 bfad_iocmd_lunmask_reset_lunscan_mode(bfad
, BFA_TRUE
);
2579 } else if (v_cmd
== IOCMD_FCPIM_LUNMASK_DISABLE
) {
2580 iocmd
->status
= bfa_fcpim_lunmask_update(&bfad
->bfa
, BFA_FALSE
);
2581 /* Set the LUN Scanning mode to default REPORT_LUNS scan */
2582 if (iocmd
->status
== BFA_STATUS_OK
)
2583 bfad_iocmd_lunmask_reset_lunscan_mode(bfad
, BFA_FALSE
);
2584 } else if (v_cmd
== IOCMD_FCPIM_LUNMASK_CLEAR
)
2585 iocmd
->status
= bfa_fcpim_lunmask_clear(&bfad
->bfa
);
2586 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
2591 bfad_iocmd_fcpim_lunmask_query(struct bfad_s
*bfad
, void *cmd
)
2593 struct bfa_bsg_fcpim_lunmask_query_s
*iocmd
=
2594 (struct bfa_bsg_fcpim_lunmask_query_s
*)cmd
;
2595 struct bfa_lunmask_cfg_s
*lun_mask
= &iocmd
->lun_mask
;
2596 unsigned long flags
;
2598 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
2599 iocmd
->status
= bfa_fcpim_lunmask_query(&bfad
->bfa
, lun_mask
);
2600 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
2605 bfad_iocmd_fcpim_cfg_lunmask(struct bfad_s
*bfad
, void *cmd
, unsigned int v_cmd
)
2607 struct bfa_bsg_fcpim_lunmask_s
*iocmd
=
2608 (struct bfa_bsg_fcpim_lunmask_s
*)cmd
;
2609 unsigned long flags
;
2611 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
2612 if (v_cmd
== IOCMD_FCPIM_LUNMASK_ADD
)
2613 iocmd
->status
= bfa_fcpim_lunmask_add(&bfad
->bfa
, iocmd
->vf_id
,
2614 &iocmd
->pwwn
, iocmd
->rpwwn
, iocmd
->lun
);
2615 else if (v_cmd
== IOCMD_FCPIM_LUNMASK_DELETE
)
2616 iocmd
->status
= bfa_fcpim_lunmask_delete(&bfad
->bfa
,
2617 iocmd
->vf_id
, &iocmd
->pwwn
,
2618 iocmd
->rpwwn
, iocmd
->lun
);
2619 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
2624 bfad_iocmd_fcpim_throttle_query(struct bfad_s
*bfad
, void *cmd
)
2626 struct bfa_bsg_fcpim_throttle_s
*iocmd
=
2627 (struct bfa_bsg_fcpim_throttle_s
*)cmd
;
2628 unsigned long flags
;
2630 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
2631 iocmd
->status
= bfa_fcpim_throttle_get(&bfad
->bfa
,
2632 (void *)&iocmd
->throttle
);
2633 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
2639 bfad_iocmd_fcpim_throttle_set(struct bfad_s
*bfad
, void *cmd
)
2641 struct bfa_bsg_fcpim_throttle_s
*iocmd
=
2642 (struct bfa_bsg_fcpim_throttle_s
*)cmd
;
2643 unsigned long flags
;
2645 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
2646 iocmd
->status
= bfa_fcpim_throttle_set(&bfad
->bfa
,
2647 iocmd
->throttle
.cfg_value
);
2648 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
2654 bfad_iocmd_tfru_read(struct bfad_s
*bfad
, void *cmd
)
2656 struct bfa_bsg_tfru_s
*iocmd
=
2657 (struct bfa_bsg_tfru_s
*)cmd
;
2658 struct bfad_hal_comp fcomp
;
2659 unsigned long flags
= 0;
2661 init_completion(&fcomp
.comp
);
2662 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
2663 iocmd
->status
= bfa_tfru_read(BFA_FRU(&bfad
->bfa
),
2664 &iocmd
->data
, iocmd
->len
, iocmd
->offset
,
2665 bfad_hcb_comp
, &fcomp
);
2666 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
2667 if (iocmd
->status
== BFA_STATUS_OK
) {
2668 wait_for_completion(&fcomp
.comp
);
2669 iocmd
->status
= fcomp
.status
;
2676 bfad_iocmd_tfru_write(struct bfad_s
*bfad
, void *cmd
)
2678 struct bfa_bsg_tfru_s
*iocmd
=
2679 (struct bfa_bsg_tfru_s
*)cmd
;
2680 struct bfad_hal_comp fcomp
;
2681 unsigned long flags
= 0;
2683 init_completion(&fcomp
.comp
);
2684 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
2685 iocmd
->status
= bfa_tfru_write(BFA_FRU(&bfad
->bfa
),
2686 &iocmd
->data
, iocmd
->len
, iocmd
->offset
,
2687 bfad_hcb_comp
, &fcomp
);
2688 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
2689 if (iocmd
->status
== BFA_STATUS_OK
) {
2690 wait_for_completion(&fcomp
.comp
);
2691 iocmd
->status
= fcomp
.status
;
2698 bfad_iocmd_fruvpd_read(struct bfad_s
*bfad
, void *cmd
)
2700 struct bfa_bsg_fruvpd_s
*iocmd
=
2701 (struct bfa_bsg_fruvpd_s
*)cmd
;
2702 struct bfad_hal_comp fcomp
;
2703 unsigned long flags
= 0;
2705 init_completion(&fcomp
.comp
);
2706 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
2707 iocmd
->status
= bfa_fruvpd_read(BFA_FRU(&bfad
->bfa
),
2708 &iocmd
->data
, iocmd
->len
, iocmd
->offset
,
2709 bfad_hcb_comp
, &fcomp
);
2710 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
2711 if (iocmd
->status
== BFA_STATUS_OK
) {
2712 wait_for_completion(&fcomp
.comp
);
2713 iocmd
->status
= fcomp
.status
;
2720 bfad_iocmd_fruvpd_update(struct bfad_s
*bfad
, void *cmd
)
2722 struct bfa_bsg_fruvpd_s
*iocmd
=
2723 (struct bfa_bsg_fruvpd_s
*)cmd
;
2724 struct bfad_hal_comp fcomp
;
2725 unsigned long flags
= 0;
2727 init_completion(&fcomp
.comp
);
2728 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
2729 iocmd
->status
= bfa_fruvpd_update(BFA_FRU(&bfad
->bfa
),
2730 &iocmd
->data
, iocmd
->len
, iocmd
->offset
,
2731 bfad_hcb_comp
, &fcomp
, iocmd
->trfr_cmpl
);
2732 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
2733 if (iocmd
->status
== BFA_STATUS_OK
) {
2734 wait_for_completion(&fcomp
.comp
);
2735 iocmd
->status
= fcomp
.status
;
2742 bfad_iocmd_fruvpd_get_max_size(struct bfad_s
*bfad
, void *cmd
)
2744 struct bfa_bsg_fruvpd_max_size_s
*iocmd
=
2745 (struct bfa_bsg_fruvpd_max_size_s
*)cmd
;
2746 unsigned long flags
= 0;
2748 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
2749 iocmd
->status
= bfa_fruvpd_get_max_size(BFA_FRU(&bfad
->bfa
),
2751 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
2757 bfad_iocmd_handler(struct bfad_s
*bfad
, unsigned int cmd
, void *iocmd
,
2758 unsigned int payload_len
)
2763 case IOCMD_IOC_ENABLE
:
2764 rc
= bfad_iocmd_ioc_enable(bfad
, iocmd
);
2766 case IOCMD_IOC_DISABLE
:
2767 rc
= bfad_iocmd_ioc_disable(bfad
, iocmd
);
2769 case IOCMD_IOC_GET_INFO
:
2770 rc
= bfad_iocmd_ioc_get_info(bfad
, iocmd
);
2772 case IOCMD_IOC_GET_ATTR
:
2773 rc
= bfad_iocmd_ioc_get_attr(bfad
, iocmd
);
2775 case IOCMD_IOC_GET_STATS
:
2776 rc
= bfad_iocmd_ioc_get_stats(bfad
, iocmd
);
2778 case IOCMD_IOC_GET_FWSTATS
:
2779 rc
= bfad_iocmd_ioc_get_fwstats(bfad
, iocmd
, payload_len
);
2781 case IOCMD_IOC_RESET_STATS
:
2782 case IOCMD_IOC_RESET_FWSTATS
:
2783 rc
= bfad_iocmd_ioc_reset_stats(bfad
, iocmd
, cmd
);
2785 case IOCMD_IOC_SET_ADAPTER_NAME
:
2786 case IOCMD_IOC_SET_PORT_NAME
:
2787 rc
= bfad_iocmd_ioc_set_name(bfad
, iocmd
, cmd
);
2789 case IOCMD_IOCFC_GET_ATTR
:
2790 rc
= bfad_iocmd_iocfc_get_attr(bfad
, iocmd
);
2792 case IOCMD_IOCFC_SET_INTR
:
2793 rc
= bfad_iocmd_iocfc_set_intr(bfad
, iocmd
);
2795 case IOCMD_PORT_ENABLE
:
2796 rc
= bfad_iocmd_port_enable(bfad
, iocmd
);
2798 case IOCMD_PORT_DISABLE
:
2799 rc
= bfad_iocmd_port_disable(bfad
, iocmd
);
2801 case IOCMD_PORT_GET_ATTR
:
2802 rc
= bfad_iocmd_port_get_attr(bfad
, iocmd
);
2804 case IOCMD_PORT_GET_STATS
:
2805 rc
= bfad_iocmd_port_get_stats(bfad
, iocmd
, payload_len
);
2807 case IOCMD_PORT_RESET_STATS
:
2808 rc
= bfad_iocmd_port_reset_stats(bfad
, iocmd
);
2810 case IOCMD_PORT_CFG_TOPO
:
2811 case IOCMD_PORT_CFG_SPEED
:
2812 case IOCMD_PORT_CFG_ALPA
:
2813 case IOCMD_PORT_CLR_ALPA
:
2814 rc
= bfad_iocmd_set_port_cfg(bfad
, iocmd
, cmd
);
2816 case IOCMD_PORT_CFG_MAXFRSZ
:
2817 rc
= bfad_iocmd_port_cfg_maxfrsize(bfad
, iocmd
);
2819 case IOCMD_PORT_BBCR_ENABLE
:
2820 case IOCMD_PORT_BBCR_DISABLE
:
2821 rc
= bfad_iocmd_port_cfg_bbcr(bfad
, cmd
, iocmd
);
2823 case IOCMD_PORT_BBCR_GET_ATTR
:
2824 rc
= bfad_iocmd_port_get_bbcr_attr(bfad
, iocmd
);
2826 case IOCMD_LPORT_GET_ATTR
:
2827 rc
= bfad_iocmd_lport_get_attr(bfad
, iocmd
);
2829 case IOCMD_LPORT_GET_STATS
:
2830 rc
= bfad_iocmd_lport_get_stats(bfad
, iocmd
);
2832 case IOCMD_LPORT_RESET_STATS
:
2833 rc
= bfad_iocmd_lport_reset_stats(bfad
, iocmd
);
2835 case IOCMD_LPORT_GET_IOSTATS
:
2836 rc
= bfad_iocmd_lport_get_iostats(bfad
, iocmd
);
2838 case IOCMD_LPORT_GET_RPORTS
:
2839 rc
= bfad_iocmd_lport_get_rports(bfad
, iocmd
, payload_len
);
2841 case IOCMD_RPORT_GET_ATTR
:
2842 rc
= bfad_iocmd_rport_get_attr(bfad
, iocmd
);
2844 case IOCMD_RPORT_GET_ADDR
:
2845 rc
= bfad_iocmd_rport_get_addr(bfad
, iocmd
);
2847 case IOCMD_RPORT_GET_STATS
:
2848 rc
= bfad_iocmd_rport_get_stats(bfad
, iocmd
);
2850 case IOCMD_RPORT_RESET_STATS
:
2851 rc
= bfad_iocmd_rport_clr_stats(bfad
, iocmd
);
2853 case IOCMD_RPORT_SET_SPEED
:
2854 rc
= bfad_iocmd_rport_set_speed(bfad
, iocmd
);
2856 case IOCMD_VPORT_GET_ATTR
:
2857 rc
= bfad_iocmd_vport_get_attr(bfad
, iocmd
);
2859 case IOCMD_VPORT_GET_STATS
:
2860 rc
= bfad_iocmd_vport_get_stats(bfad
, iocmd
);
2862 case IOCMD_VPORT_RESET_STATS
:
2863 rc
= bfad_iocmd_vport_clr_stats(bfad
, iocmd
);
2865 case IOCMD_FABRIC_GET_LPORTS
:
2866 rc
= bfad_iocmd_fabric_get_lports(bfad
, iocmd
, payload_len
);
2868 case IOCMD_RATELIM_ENABLE
:
2869 case IOCMD_RATELIM_DISABLE
:
2870 rc
= bfad_iocmd_ratelim(bfad
, cmd
, iocmd
);
2872 case IOCMD_RATELIM_DEF_SPEED
:
2873 rc
= bfad_iocmd_ratelim_speed(bfad
, cmd
, iocmd
);
2875 case IOCMD_FCPIM_FAILOVER
:
2876 rc
= bfad_iocmd_cfg_fcpim(bfad
, iocmd
);
2878 case IOCMD_FCPIM_MODSTATS
:
2879 rc
= bfad_iocmd_fcpim_get_modstats(bfad
, iocmd
);
2881 case IOCMD_FCPIM_MODSTATSCLR
:
2882 rc
= bfad_iocmd_fcpim_clr_modstats(bfad
, iocmd
);
2884 case IOCMD_FCPIM_DEL_ITN_STATS
:
2885 rc
= bfad_iocmd_fcpim_get_del_itn_stats(bfad
, iocmd
);
2887 case IOCMD_ITNIM_GET_ATTR
:
2888 rc
= bfad_iocmd_itnim_get_attr(bfad
, iocmd
);
2890 case IOCMD_ITNIM_GET_IOSTATS
:
2891 rc
= bfad_iocmd_itnim_get_iostats(bfad
, iocmd
);
2893 case IOCMD_ITNIM_RESET_STATS
:
2894 rc
= bfad_iocmd_itnim_reset_stats(bfad
, iocmd
);
2896 case IOCMD_ITNIM_GET_ITNSTATS
:
2897 rc
= bfad_iocmd_itnim_get_itnstats(bfad
, iocmd
);
2899 case IOCMD_FCPORT_ENABLE
:
2900 rc
= bfad_iocmd_fcport_enable(bfad
, iocmd
);
2902 case IOCMD_FCPORT_DISABLE
:
2903 rc
= bfad_iocmd_fcport_disable(bfad
, iocmd
);
2905 case IOCMD_IOC_PCIFN_CFG
:
2906 rc
= bfad_iocmd_ioc_get_pcifn_cfg(bfad
, iocmd
);
2908 case IOCMD_IOC_FW_SIG_INV
:
2909 rc
= bfad_iocmd_ioc_fw_sig_inv(bfad
, iocmd
);
2911 case IOCMD_PCIFN_CREATE
:
2912 rc
= bfad_iocmd_pcifn_create(bfad
, iocmd
);
2914 case IOCMD_PCIFN_DELETE
:
2915 rc
= bfad_iocmd_pcifn_delete(bfad
, iocmd
);
2917 case IOCMD_PCIFN_BW
:
2918 rc
= bfad_iocmd_pcifn_bw(bfad
, iocmd
);
2920 case IOCMD_ADAPTER_CFG_MODE
:
2921 rc
= bfad_iocmd_adapter_cfg_mode(bfad
, iocmd
);
2923 case IOCMD_PORT_CFG_MODE
:
2924 rc
= bfad_iocmd_port_cfg_mode(bfad
, iocmd
);
2926 case IOCMD_FLASH_ENABLE_OPTROM
:
2927 case IOCMD_FLASH_DISABLE_OPTROM
:
2928 rc
= bfad_iocmd_ablk_optrom(bfad
, cmd
, iocmd
);
2930 case IOCMD_FAA_QUERY
:
2931 rc
= bfad_iocmd_faa_query(bfad
, iocmd
);
2933 case IOCMD_CEE_GET_ATTR
:
2934 rc
= bfad_iocmd_cee_attr(bfad
, iocmd
, payload_len
);
2936 case IOCMD_CEE_GET_STATS
:
2937 rc
= bfad_iocmd_cee_get_stats(bfad
, iocmd
, payload_len
);
2939 case IOCMD_CEE_RESET_STATS
:
2940 rc
= bfad_iocmd_cee_reset_stats(bfad
, iocmd
);
2942 case IOCMD_SFP_MEDIA
:
2943 rc
= bfad_iocmd_sfp_media(bfad
, iocmd
);
2945 case IOCMD_SFP_SPEED
:
2946 rc
= bfad_iocmd_sfp_speed(bfad
, iocmd
);
2948 case IOCMD_FLASH_GET_ATTR
:
2949 rc
= bfad_iocmd_flash_get_attr(bfad
, iocmd
);
2951 case IOCMD_FLASH_ERASE_PART
:
2952 rc
= bfad_iocmd_flash_erase_part(bfad
, iocmd
);
2954 case IOCMD_FLASH_UPDATE_PART
:
2955 rc
= bfad_iocmd_flash_update_part(bfad
, iocmd
, payload_len
);
2957 case IOCMD_FLASH_READ_PART
:
2958 rc
= bfad_iocmd_flash_read_part(bfad
, iocmd
, payload_len
);
2960 case IOCMD_DIAG_TEMP
:
2961 rc
= bfad_iocmd_diag_temp(bfad
, iocmd
);
2963 case IOCMD_DIAG_MEMTEST
:
2964 rc
= bfad_iocmd_diag_memtest(bfad
, iocmd
);
2966 case IOCMD_DIAG_LOOPBACK
:
2967 rc
= bfad_iocmd_diag_loopback(bfad
, iocmd
);
2969 case IOCMD_DIAG_FWPING
:
2970 rc
= bfad_iocmd_diag_fwping(bfad
, iocmd
);
2972 case IOCMD_DIAG_QUEUETEST
:
2973 rc
= bfad_iocmd_diag_queuetest(bfad
, iocmd
);
2975 case IOCMD_DIAG_SFP
:
2976 rc
= bfad_iocmd_diag_sfp(bfad
, iocmd
);
2978 case IOCMD_DIAG_LED
:
2979 rc
= bfad_iocmd_diag_led(bfad
, iocmd
);
2981 case IOCMD_DIAG_BEACON_LPORT
:
2982 rc
= bfad_iocmd_diag_beacon_lport(bfad
, iocmd
);
2984 case IOCMD_DIAG_LB_STAT
:
2985 rc
= bfad_iocmd_diag_lb_stat(bfad
, iocmd
);
2987 case IOCMD_DIAG_DPORT_ENABLE
:
2988 rc
= bfad_iocmd_diag_dport_enable(bfad
, iocmd
);
2990 case IOCMD_DIAG_DPORT_DISABLE
:
2991 rc
= bfad_iocmd_diag_dport_disable(bfad
, iocmd
);
2993 case IOCMD_DIAG_DPORT_SHOW
:
2994 rc
= bfad_iocmd_diag_dport_show(bfad
, iocmd
);
2996 case IOCMD_DIAG_DPORT_START
:
2997 rc
= bfad_iocmd_diag_dport_start(bfad
, iocmd
);
2999 case IOCMD_PHY_GET_ATTR
:
3000 rc
= bfad_iocmd_phy_get_attr(bfad
, iocmd
);
3002 case IOCMD_PHY_GET_STATS
:
3003 rc
= bfad_iocmd_phy_get_stats(bfad
, iocmd
);
3005 case IOCMD_PHY_UPDATE_FW
:
3006 rc
= bfad_iocmd_phy_update(bfad
, iocmd
, payload_len
);
3008 case IOCMD_PHY_READ_FW
:
3009 rc
= bfad_iocmd_phy_read(bfad
, iocmd
, payload_len
);
3011 case IOCMD_VHBA_QUERY
:
3012 rc
= bfad_iocmd_vhba_query(bfad
, iocmd
);
3014 case IOCMD_DEBUG_PORTLOG
:
3015 rc
= bfad_iocmd_porglog_get(bfad
, iocmd
);
3017 case IOCMD_DEBUG_FW_CORE
:
3018 rc
= bfad_iocmd_debug_fw_core(bfad
, iocmd
, payload_len
);
3020 case IOCMD_DEBUG_FW_STATE_CLR
:
3021 case IOCMD_DEBUG_PORTLOG_CLR
:
3022 case IOCMD_DEBUG_START_DTRC
:
3023 case IOCMD_DEBUG_STOP_DTRC
:
3024 rc
= bfad_iocmd_debug_ctl(bfad
, iocmd
, cmd
);
3026 case IOCMD_DEBUG_PORTLOG_CTL
:
3027 rc
= bfad_iocmd_porglog_ctl(bfad
, iocmd
);
3029 case IOCMD_FCPIM_PROFILE_ON
:
3030 case IOCMD_FCPIM_PROFILE_OFF
:
3031 rc
= bfad_iocmd_fcpim_cfg_profile(bfad
, iocmd
, cmd
);
3033 case IOCMD_ITNIM_GET_IOPROFILE
:
3034 rc
= bfad_iocmd_itnim_get_ioprofile(bfad
, iocmd
);
3036 case IOCMD_FCPORT_GET_STATS
:
3037 rc
= bfad_iocmd_fcport_get_stats(bfad
, iocmd
);
3039 case IOCMD_FCPORT_RESET_STATS
:
3040 rc
= bfad_iocmd_fcport_reset_stats(bfad
, iocmd
);
3042 case IOCMD_BOOT_CFG
:
3043 rc
= bfad_iocmd_boot_cfg(bfad
, iocmd
);
3045 case IOCMD_BOOT_QUERY
:
3046 rc
= bfad_iocmd_boot_query(bfad
, iocmd
);
3048 case IOCMD_PREBOOT_QUERY
:
3049 rc
= bfad_iocmd_preboot_query(bfad
, iocmd
);
3051 case IOCMD_ETHBOOT_CFG
:
3052 rc
= bfad_iocmd_ethboot_cfg(bfad
, iocmd
);
3054 case IOCMD_ETHBOOT_QUERY
:
3055 rc
= bfad_iocmd_ethboot_query(bfad
, iocmd
);
3057 case IOCMD_TRUNK_ENABLE
:
3058 case IOCMD_TRUNK_DISABLE
:
3059 rc
= bfad_iocmd_cfg_trunk(bfad
, iocmd
, cmd
);
3061 case IOCMD_TRUNK_GET_ATTR
:
3062 rc
= bfad_iocmd_trunk_get_attr(bfad
, iocmd
);
3064 case IOCMD_QOS_ENABLE
:
3065 case IOCMD_QOS_DISABLE
:
3066 rc
= bfad_iocmd_qos(bfad
, iocmd
, cmd
);
3068 case IOCMD_QOS_GET_ATTR
:
3069 rc
= bfad_iocmd_qos_get_attr(bfad
, iocmd
);
3071 case IOCMD_QOS_GET_VC_ATTR
:
3072 rc
= bfad_iocmd_qos_get_vc_attr(bfad
, iocmd
);
3074 case IOCMD_QOS_GET_STATS
:
3075 rc
= bfad_iocmd_qos_get_stats(bfad
, iocmd
);
3077 case IOCMD_QOS_RESET_STATS
:
3078 rc
= bfad_iocmd_qos_reset_stats(bfad
, iocmd
);
3080 case IOCMD_QOS_SET_BW
:
3081 rc
= bfad_iocmd_qos_set_bw(bfad
, iocmd
);
3083 case IOCMD_VF_GET_STATS
:
3084 rc
= bfad_iocmd_vf_get_stats(bfad
, iocmd
);
3086 case IOCMD_VF_RESET_STATS
:
3087 rc
= bfad_iocmd_vf_clr_stats(bfad
, iocmd
);
3089 case IOCMD_FCPIM_LUNMASK_ENABLE
:
3090 case IOCMD_FCPIM_LUNMASK_DISABLE
:
3091 case IOCMD_FCPIM_LUNMASK_CLEAR
:
3092 rc
= bfad_iocmd_lunmask(bfad
, iocmd
, cmd
);
3094 case IOCMD_FCPIM_LUNMASK_QUERY
:
3095 rc
= bfad_iocmd_fcpim_lunmask_query(bfad
, iocmd
);
3097 case IOCMD_FCPIM_LUNMASK_ADD
:
3098 case IOCMD_FCPIM_LUNMASK_DELETE
:
3099 rc
= bfad_iocmd_fcpim_cfg_lunmask(bfad
, iocmd
, cmd
);
3101 case IOCMD_FCPIM_THROTTLE_QUERY
:
3102 rc
= bfad_iocmd_fcpim_throttle_query(bfad
, iocmd
);
3104 case IOCMD_FCPIM_THROTTLE_SET
:
3105 rc
= bfad_iocmd_fcpim_throttle_set(bfad
, iocmd
);
3108 case IOCMD_TFRU_READ
:
3109 rc
= bfad_iocmd_tfru_read(bfad
, iocmd
);
3111 case IOCMD_TFRU_WRITE
:
3112 rc
= bfad_iocmd_tfru_write(bfad
, iocmd
);
3115 case IOCMD_FRUVPD_READ
:
3116 rc
= bfad_iocmd_fruvpd_read(bfad
, iocmd
);
3118 case IOCMD_FRUVPD_UPDATE
:
3119 rc
= bfad_iocmd_fruvpd_update(bfad
, iocmd
);
3121 case IOCMD_FRUVPD_GET_MAX_SIZE
:
3122 rc
= bfad_iocmd_fruvpd_get_max_size(bfad
, iocmd
);
3132 bfad_im_bsg_vendor_request(struct fc_bsg_job
*job
)
3134 uint32_t vendor_cmd
= job
->request
->rqst_data
.h_vendor
.vendor_cmd
[0];
3135 struct bfad_im_port_s
*im_port
=
3136 (struct bfad_im_port_s
*) job
->shost
->hostdata
[0];
3137 struct bfad_s
*bfad
= im_port
->bfad
;
3138 struct request_queue
*request_q
= job
->req
->q
;
3143 * Set the BSG device request_queue size to 256 to support
3144 * payloads larger than 512*1024K bytes.
3146 blk_queue_max_segments(request_q
, 256);
3148 /* Allocate a temp buffer to hold the passed in user space command */
3149 payload_kbuf
= kzalloc(job
->request_payload
.payload_len
, GFP_KERNEL
);
3150 if (!payload_kbuf
) {
3155 /* Copy the sg_list passed in to a linear buffer: holds the cmnd data */
3156 sg_copy_to_buffer(job
->request_payload
.sg_list
,
3157 job
->request_payload
.sg_cnt
, payload_kbuf
,
3158 job
->request_payload
.payload_len
);
3160 /* Invoke IOCMD handler - to handle all the vendor command requests */
3161 rc
= bfad_iocmd_handler(bfad
, vendor_cmd
, payload_kbuf
,
3162 job
->request_payload
.payload_len
);
3163 if (rc
!= BFA_STATUS_OK
)
3166 /* Copy the response data to the job->reply_payload sg_list */
3167 sg_copy_from_buffer(job
->reply_payload
.sg_list
,
3168 job
->reply_payload
.sg_cnt
,
3170 job
->reply_payload
.payload_len
);
3172 /* free the command buffer */
3173 kfree(payload_kbuf
);
3175 /* Fill the BSG job reply data */
3176 job
->reply_len
= job
->reply_payload
.payload_len
;
3177 job
->reply
->reply_payload_rcv_len
= job
->reply_payload
.payload_len
;
3178 job
->reply
->result
= rc
;
3183 /* free the command buffer */
3184 kfree(payload_kbuf
);
3186 job
->reply
->result
= rc
;
3187 job
->reply_len
= sizeof(uint32_t);
3188 job
->reply
->reply_payload_rcv_len
= 0;
3192 /* FC passthru call backs */
3194 bfad_fcxp_get_req_sgaddr_cb(void *bfad_fcxp
, int sgeid
)
3196 struct bfad_fcxp
*drv_fcxp
= bfad_fcxp
;
3197 struct bfa_sge_s
*sge
;
3200 sge
= drv_fcxp
->req_sge
+ sgeid
;
3201 addr
= (u64
)(size_t) sge
->sg_addr
;
3206 bfad_fcxp_get_req_sglen_cb(void *bfad_fcxp
, int sgeid
)
3208 struct bfad_fcxp
*drv_fcxp
= bfad_fcxp
;
3209 struct bfa_sge_s
*sge
;
3211 sge
= drv_fcxp
->req_sge
+ sgeid
;
3216 bfad_fcxp_get_rsp_sgaddr_cb(void *bfad_fcxp
, int sgeid
)
3218 struct bfad_fcxp
*drv_fcxp
= bfad_fcxp
;
3219 struct bfa_sge_s
*sge
;
3222 sge
= drv_fcxp
->rsp_sge
+ sgeid
;
3223 addr
= (u64
)(size_t) sge
->sg_addr
;
3228 bfad_fcxp_get_rsp_sglen_cb(void *bfad_fcxp
, int sgeid
)
3230 struct bfad_fcxp
*drv_fcxp
= bfad_fcxp
;
3231 struct bfa_sge_s
*sge
;
3233 sge
= drv_fcxp
->rsp_sge
+ sgeid
;
3238 bfad_send_fcpt_cb(void *bfad_fcxp
, struct bfa_fcxp_s
*fcxp
, void *cbarg
,
3239 bfa_status_t req_status
, u32 rsp_len
, u32 resid_len
,
3240 struct fchs_s
*rsp_fchs
)
3242 struct bfad_fcxp
*drv_fcxp
= bfad_fcxp
;
3244 drv_fcxp
->req_status
= req_status
;
3245 drv_fcxp
->rsp_len
= rsp_len
;
3247 /* bfa_fcxp will be automatically freed by BFA */
3248 drv_fcxp
->bfa_fcxp
= NULL
;
3249 complete(&drv_fcxp
->comp
);
3252 struct bfad_buf_info
*
3253 bfad_fcxp_map_sg(struct bfad_s
*bfad
, void *payload_kbuf
,
3254 uint32_t payload_len
, uint32_t *num_sgles
)
3256 struct bfad_buf_info
*buf_base
, *buf_info
;
3257 struct bfa_sge_s
*sg_table
;
3260 buf_base
= kzalloc((sizeof(struct bfad_buf_info
) +
3261 sizeof(struct bfa_sge_s
)) * sge_num
, GFP_KERNEL
);
3265 sg_table
= (struct bfa_sge_s
*) (((uint8_t *)buf_base
) +
3266 (sizeof(struct bfad_buf_info
) * sge_num
));
3268 /* Allocate dma coherent memory */
3269 buf_info
= buf_base
;
3270 buf_info
->size
= payload_len
;
3271 buf_info
->virt
= dma_alloc_coherent(&bfad
->pcidev
->dev
, buf_info
->size
,
3272 &buf_info
->phys
, GFP_KERNEL
);
3273 if (!buf_info
->virt
)
3276 /* copy the linear bsg buffer to buf_info */
3277 memset(buf_info
->virt
, 0, buf_info
->size
);
3278 memcpy(buf_info
->virt
, payload_kbuf
, buf_info
->size
);
3283 sg_table
->sg_len
= buf_info
->size
;
3284 sg_table
->sg_addr
= (void *)(size_t) buf_info
->phys
;
3286 *num_sgles
= sge_num
;
3296 bfad_fcxp_free_mem(struct bfad_s
*bfad
, struct bfad_buf_info
*buf_base
,
3300 struct bfad_buf_info
*buf_info
= buf_base
;
3303 for (i
= 0; i
< num_sgles
; buf_info
++, i
++) {
3304 if (buf_info
->virt
!= NULL
)
3305 dma_free_coherent(&bfad
->pcidev
->dev
,
3306 buf_info
->size
, buf_info
->virt
,
3314 bfad_fcxp_bsg_send(struct fc_bsg_job
*job
, struct bfad_fcxp
*drv_fcxp
,
3315 bfa_bsg_fcpt_t
*bsg_fcpt
)
3317 struct bfa_fcxp_s
*hal_fcxp
;
3318 struct bfad_s
*bfad
= drv_fcxp
->port
->bfad
;
3319 unsigned long flags
;
3322 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
3324 /* Allocate bfa_fcxp structure */
3325 hal_fcxp
= bfa_fcxp_req_rsp_alloc(drv_fcxp
, &bfad
->bfa
,
3326 drv_fcxp
->num_req_sgles
,
3327 drv_fcxp
->num_rsp_sgles
,
3328 bfad_fcxp_get_req_sgaddr_cb
,
3329 bfad_fcxp_get_req_sglen_cb
,
3330 bfad_fcxp_get_rsp_sgaddr_cb
,
3331 bfad_fcxp_get_rsp_sglen_cb
, BFA_TRUE
);
3334 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
3335 return BFA_STATUS_ENOMEM
;
3338 drv_fcxp
->bfa_fcxp
= hal_fcxp
;
3340 lp_tag
= bfa_lps_get_tag_from_pid(&bfad
->bfa
, bsg_fcpt
->fchs
.s_id
);
3342 bfa_fcxp_send(hal_fcxp
, drv_fcxp
->bfa_rport
, bsg_fcpt
->vf_id
, lp_tag
,
3343 bsg_fcpt
->cts
, bsg_fcpt
->cos
,
3344 job
->request_payload
.payload_len
,
3345 &bsg_fcpt
->fchs
, bfad_send_fcpt_cb
, bfad
,
3346 job
->reply_payload
.payload_len
, bsg_fcpt
->tsecs
);
3348 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
3350 return BFA_STATUS_OK
;
3354 bfad_im_bsg_els_ct_request(struct fc_bsg_job
*job
)
3356 struct bfa_bsg_data
*bsg_data
;
3357 struct bfad_im_port_s
*im_port
=
3358 (struct bfad_im_port_s
*) job
->shost
->hostdata
[0];
3359 struct bfad_s
*bfad
= im_port
->bfad
;
3360 bfa_bsg_fcpt_t
*bsg_fcpt
;
3361 struct bfad_fcxp
*drv_fcxp
;
3362 struct bfa_fcs_lport_s
*fcs_port
;
3363 struct bfa_fcs_rport_s
*fcs_rport
;
3364 uint32_t command_type
= job
->request
->msgcode
;
3365 unsigned long flags
;
3366 struct bfad_buf_info
*rsp_buf_info
;
3367 void *req_kbuf
= NULL
, *rsp_kbuf
= NULL
;
3370 job
->reply_len
= sizeof(uint32_t); /* Atleast uint32_t reply_len */
3371 job
->reply
->reply_payload_rcv_len
= 0;
3373 /* Get the payload passed in from userspace */
3374 bsg_data
= (struct bfa_bsg_data
*) (((char *)job
->request
) +
3375 sizeof(struct fc_bsg_request
));
3376 if (bsg_data
== NULL
)
3380 * Allocate buffer for bsg_fcpt and do a copy_from_user op for payload
3381 * buffer of size bsg_data->payload_len
3383 bsg_fcpt
= kzalloc(bsg_data
->payload_len
, GFP_KERNEL
);
3389 if (copy_from_user((uint8_t *)bsg_fcpt
,
3390 (void *)(unsigned long)bsg_data
->payload
,
3391 bsg_data
->payload_len
)) {
3397 drv_fcxp
= kzalloc(sizeof(struct bfad_fcxp
), GFP_KERNEL
);
3398 if (drv_fcxp
== NULL
) {
3404 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
3405 fcs_port
= bfa_fcs_lookup_port(&bfad
->bfa_fcs
, bsg_fcpt
->vf_id
,
3407 if (fcs_port
== NULL
) {
3408 bsg_fcpt
->status
= BFA_STATUS_UNKNOWN_LWWN
;
3409 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
3413 /* Check if the port is online before sending FC Passthru cmd */
3414 if (!bfa_fcs_lport_is_online(fcs_port
)) {
3415 bsg_fcpt
->status
= BFA_STATUS_PORT_OFFLINE
;
3416 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
3420 drv_fcxp
->port
= fcs_port
->bfad_port
;
3422 if (drv_fcxp
->port
->bfad
== 0)
3423 drv_fcxp
->port
->bfad
= bfad
;
3425 /* Fetch the bfa_rport - if nexus needed */
3426 if (command_type
== FC_BSG_HST_ELS_NOLOGIN
||
3427 command_type
== FC_BSG_HST_CT
) {
3428 /* BSG HST commands: no nexus needed */
3429 drv_fcxp
->bfa_rport
= NULL
;
3431 } else if (command_type
== FC_BSG_RPT_ELS
||
3432 command_type
== FC_BSG_RPT_CT
) {
3433 /* BSG RPT commands: nexus needed */
3434 fcs_rport
= bfa_fcs_lport_get_rport_by_pwwn(fcs_port
,
3436 if (fcs_rport
== NULL
) {
3437 bsg_fcpt
->status
= BFA_STATUS_UNKNOWN_RWWN
;
3438 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
3442 drv_fcxp
->bfa_rport
= fcs_rport
->bfa_rport
;
3444 } else { /* Unknown BSG msgcode; return -EINVAL */
3445 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
3449 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
3451 /* allocate memory for req / rsp buffers */
3452 req_kbuf
= kzalloc(job
->request_payload
.payload_len
, GFP_KERNEL
);
3454 printk(KERN_INFO
"bfa %s: fcpt request buffer alloc failed\n",
3460 rsp_kbuf
= kzalloc(job
->reply_payload
.payload_len
, GFP_KERNEL
);
3462 printk(KERN_INFO
"bfa %s: fcpt response buffer alloc failed\n",
3468 /* map req sg - copy the sg_list passed in to the linear buffer */
3469 sg_copy_to_buffer(job
->request_payload
.sg_list
,
3470 job
->request_payload
.sg_cnt
, req_kbuf
,
3471 job
->request_payload
.payload_len
);
3473 drv_fcxp
->reqbuf_info
= bfad_fcxp_map_sg(bfad
, req_kbuf
,
3474 job
->request_payload
.payload_len
,
3475 &drv_fcxp
->num_req_sgles
);
3476 if (!drv_fcxp
->reqbuf_info
) {
3477 printk(KERN_INFO
"bfa %s: fcpt request fcxp_map_sg failed\n",
3483 drv_fcxp
->req_sge
= (struct bfa_sge_s
*)
3484 (((uint8_t *)drv_fcxp
->reqbuf_info
) +
3485 (sizeof(struct bfad_buf_info
) *
3486 drv_fcxp
->num_req_sgles
));
3489 drv_fcxp
->rspbuf_info
= bfad_fcxp_map_sg(bfad
, rsp_kbuf
,
3490 job
->reply_payload
.payload_len
,
3491 &drv_fcxp
->num_rsp_sgles
);
3492 if (!drv_fcxp
->rspbuf_info
) {
3493 printk(KERN_INFO
"bfa %s: fcpt response fcxp_map_sg failed\n",
3499 rsp_buf_info
= (struct bfad_buf_info
*)drv_fcxp
->rspbuf_info
;
3500 drv_fcxp
->rsp_sge
= (struct bfa_sge_s
*)
3501 (((uint8_t *)drv_fcxp
->rspbuf_info
) +
3502 (sizeof(struct bfad_buf_info
) *
3503 drv_fcxp
->num_rsp_sgles
));
3506 init_completion(&drv_fcxp
->comp
);
3507 rc
= bfad_fcxp_bsg_send(job
, drv_fcxp
, bsg_fcpt
);
3508 if (rc
== BFA_STATUS_OK
) {
3509 wait_for_completion(&drv_fcxp
->comp
);
3510 bsg_fcpt
->status
= drv_fcxp
->req_status
;
3512 bsg_fcpt
->status
= rc
;
3516 /* fill the job->reply data */
3517 if (drv_fcxp
->req_status
== BFA_STATUS_OK
) {
3518 job
->reply_len
= drv_fcxp
->rsp_len
;
3519 job
->reply
->reply_payload_rcv_len
= drv_fcxp
->rsp_len
;
3520 job
->reply
->reply_data
.ctels_reply
.status
= FC_CTELS_STATUS_OK
;
3522 job
->reply
->reply_payload_rcv_len
=
3523 sizeof(struct fc_bsg_ctels_reply
);
3524 job
->reply_len
= sizeof(uint32_t);
3525 job
->reply
->reply_data
.ctels_reply
.status
=
3526 FC_CTELS_STATUS_REJECT
;
3529 /* Copy the response data to the reply_payload sg list */
3530 sg_copy_from_buffer(job
->reply_payload
.sg_list
,
3531 job
->reply_payload
.sg_cnt
,
3532 (uint8_t *)rsp_buf_info
->virt
,
3533 job
->reply_payload
.payload_len
);
3536 bfad_fcxp_free_mem(bfad
, drv_fcxp
->rspbuf_info
,
3537 drv_fcxp
->num_rsp_sgles
);
3538 bfad_fcxp_free_mem(bfad
, drv_fcxp
->reqbuf_info
,
3539 drv_fcxp
->num_req_sgles
);
3543 /* Need a copy to user op */
3544 if (copy_to_user((void *)(unsigned long)bsg_data
->payload
,
3545 (void *)bsg_fcpt
, bsg_data
->payload_len
))
3551 job
->reply
->result
= rc
;
3553 if (rc
== BFA_STATUS_OK
)
3560 bfad_im_bsg_request(struct fc_bsg_job
*job
)
3562 uint32_t rc
= BFA_STATUS_OK
;
3564 switch (job
->request
->msgcode
) {
3565 case FC_BSG_HST_VENDOR
:
3566 /* Process BSG HST Vendor requests */
3567 rc
= bfad_im_bsg_vendor_request(job
);
3569 case FC_BSG_HST_ELS_NOLOGIN
:
3570 case FC_BSG_RPT_ELS
:
3573 /* Process BSG ELS/CT commands */
3574 rc
= bfad_im_bsg_els_ct_request(job
);
3577 job
->reply
->result
= rc
= -EINVAL
;
3578 job
->reply
->reply_payload_rcv_len
= 0;
3586 bfad_im_bsg_timeout(struct fc_bsg_job
*job
)
3588 /* Don't complete the BSG job request - return -EAGAIN
3589 * to reset bsg job timeout : for ELS/CT pass thru we
3590 * already have timer to track the request.