2 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
18 #include <linux/uaccess.h>
23 BFA_TRC_FILE(LDRV
, BSG
);
26 bfad_iocmd_ioc_enable(struct bfad_s
*bfad
, void *cmd
)
28 struct bfa_bsg_gen_s
*iocmd
= (struct bfa_bsg_gen_s
*)cmd
;
32 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
33 /* If IOC is not in disabled state - return */
34 if (!bfa_ioc_is_disabled(&bfad
->bfa
.ioc
)) {
35 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
36 iocmd
->status
= BFA_STATUS_OK
;
40 init_completion(&bfad
->enable_comp
);
41 bfa_iocfc_enable(&bfad
->bfa
);
42 iocmd
->status
= BFA_STATUS_OK
;
43 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
44 wait_for_completion(&bfad
->enable_comp
);
50 bfad_iocmd_ioc_disable(struct bfad_s
*bfad
, void *cmd
)
52 struct bfa_bsg_gen_s
*iocmd
= (struct bfa_bsg_gen_s
*)cmd
;
56 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
57 if (bfa_ioc_is_disabled(&bfad
->bfa
.ioc
)) {
58 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
59 iocmd
->status
= BFA_STATUS_OK
;
63 if (bfad
->disable_active
) {
64 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
68 bfad
->disable_active
= BFA_TRUE
;
69 init_completion(&bfad
->disable_comp
);
70 bfa_iocfc_disable(&bfad
->bfa
);
71 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
73 wait_for_completion(&bfad
->disable_comp
);
74 bfad
->disable_active
= BFA_FALSE
;
75 iocmd
->status
= BFA_STATUS_OK
;
81 bfad_iocmd_ioc_get_info(struct bfad_s
*bfad
, void *cmd
)
84 struct bfa_bsg_ioc_info_s
*iocmd
= (struct bfa_bsg_ioc_info_s
*)cmd
;
85 struct bfad_im_port_s
*im_port
;
86 struct bfa_port_attr_s pattr
;
89 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
90 bfa_fcport_get_attr(&bfad
->bfa
, &pattr
);
91 iocmd
->nwwn
= pattr
.nwwn
;
92 iocmd
->pwwn
= pattr
.pwwn
;
93 iocmd
->ioc_type
= bfa_get_type(&bfad
->bfa
);
94 iocmd
->mac
= bfa_get_mac(&bfad
->bfa
);
95 iocmd
->factory_mac
= bfa_get_mfg_mac(&bfad
->bfa
);
96 bfa_get_adapter_serial_num(&bfad
->bfa
, iocmd
->serialnum
);
97 iocmd
->factorynwwn
= pattr
.factorynwwn
;
98 iocmd
->factorypwwn
= pattr
.factorypwwn
;
99 iocmd
->bfad_num
= bfad
->inst_no
;
100 im_port
= bfad
->pport
.im_port
;
101 iocmd
->host
= im_port
->shost
->host_no
;
102 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
104 strcpy(iocmd
->name
, bfad
->adapter_name
);
105 strcpy(iocmd
->port_name
, bfad
->port_name
);
106 strcpy(iocmd
->hwpath
, bfad
->pci_name
);
108 /* set adapter hw path */
109 strcpy(iocmd
->adapter_hwpath
, bfad
->pci_name
);
110 for (i
= 0; iocmd
->adapter_hwpath
[i
] != ':' && i
< BFA_STRING_32
; i
++)
112 for (; iocmd
->adapter_hwpath
[++i
] != ':' && i
< BFA_STRING_32
; )
114 iocmd
->adapter_hwpath
[i
] = '\0';
115 iocmd
->status
= BFA_STATUS_OK
;
120 bfad_iocmd_ioc_get_attr(struct bfad_s
*bfad
, void *cmd
)
122 struct bfa_bsg_ioc_attr_s
*iocmd
= (struct bfa_bsg_ioc_attr_s
*)cmd
;
125 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
126 bfa_ioc_get_attr(&bfad
->bfa
.ioc
, &iocmd
->ioc_attr
);
127 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
129 /* fill in driver attr info */
130 strcpy(iocmd
->ioc_attr
.driver_attr
.driver
, BFAD_DRIVER_NAME
);
131 strncpy(iocmd
->ioc_attr
.driver_attr
.driver_ver
,
132 BFAD_DRIVER_VERSION
, BFA_VERSION_LEN
);
133 strcpy(iocmd
->ioc_attr
.driver_attr
.fw_ver
,
134 iocmd
->ioc_attr
.adapter_attr
.fw_ver
);
135 strcpy(iocmd
->ioc_attr
.driver_attr
.bios_ver
,
136 iocmd
->ioc_attr
.adapter_attr
.optrom_ver
);
138 /* copy chip rev info first otherwise it will be overwritten */
139 memcpy(bfad
->pci_attr
.chip_rev
, iocmd
->ioc_attr
.pci_attr
.chip_rev
,
140 sizeof(bfad
->pci_attr
.chip_rev
));
141 memcpy(&iocmd
->ioc_attr
.pci_attr
, &bfad
->pci_attr
,
142 sizeof(struct bfa_ioc_pci_attr_s
));
144 iocmd
->status
= BFA_STATUS_OK
;
149 bfad_iocmd_ioc_get_stats(struct bfad_s
*bfad
, void *cmd
)
151 struct bfa_bsg_ioc_stats_s
*iocmd
= (struct bfa_bsg_ioc_stats_s
*)cmd
;
153 bfa_ioc_get_stats(&bfad
->bfa
, &iocmd
->ioc_stats
);
154 iocmd
->status
= BFA_STATUS_OK
;
159 bfad_iocmd_ioc_get_fwstats(struct bfad_s
*bfad
, void *cmd
,
160 unsigned int payload_len
)
162 struct bfa_bsg_ioc_fwstats_s
*iocmd
=
163 (struct bfa_bsg_ioc_fwstats_s
*)cmd
;
167 if (bfad_chk_iocmd_sz(payload_len
,
168 sizeof(struct bfa_bsg_ioc_fwstats_s
),
169 sizeof(struct bfa_fw_stats_s
)) != BFA_STATUS_OK
) {
170 iocmd
->status
= BFA_STATUS_VERSION_FAIL
;
174 iocmd_bufptr
= (char *)iocmd
+ sizeof(struct bfa_bsg_ioc_fwstats_s
);
175 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
176 iocmd
->status
= bfa_ioc_fw_stats_get(&bfad
->bfa
.ioc
, iocmd_bufptr
);
177 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
179 if (iocmd
->status
!= BFA_STATUS_OK
) {
180 bfa_trc(bfad
, iocmd
->status
);
184 bfa_trc(bfad
, 0x6666);
189 bfad_iocmd_ioc_reset_stats(struct bfad_s
*bfad
, void *cmd
, unsigned int v_cmd
)
191 struct bfa_bsg_gen_s
*iocmd
= (struct bfa_bsg_gen_s
*)cmd
;
194 if (v_cmd
== IOCMD_IOC_RESET_STATS
) {
195 bfa_ioc_clear_stats(&bfad
->bfa
);
196 iocmd
->status
= BFA_STATUS_OK
;
197 } else if (v_cmd
== IOCMD_IOC_RESET_FWSTATS
) {
198 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
199 iocmd
->status
= bfa_ioc_fw_stats_clear(&bfad
->bfa
.ioc
);
200 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
207 bfad_iocmd_ioc_set_name(struct bfad_s
*bfad
, void *cmd
, unsigned int v_cmd
)
209 struct bfa_bsg_ioc_name_s
*iocmd
= (struct bfa_bsg_ioc_name_s
*) cmd
;
211 if (v_cmd
== IOCMD_IOC_SET_ADAPTER_NAME
)
212 strcpy(bfad
->adapter_name
, iocmd
->name
);
213 else if (v_cmd
== IOCMD_IOC_SET_PORT_NAME
)
214 strcpy(bfad
->port_name
, iocmd
->name
);
216 iocmd
->status
= BFA_STATUS_OK
;
221 bfad_iocmd_iocfc_get_attr(struct bfad_s
*bfad
, void *cmd
)
223 struct bfa_bsg_iocfc_attr_s
*iocmd
= (struct bfa_bsg_iocfc_attr_s
*)cmd
;
225 iocmd
->status
= BFA_STATUS_OK
;
226 bfa_iocfc_get_attr(&bfad
->bfa
, &iocmd
->iocfc_attr
);
232 bfad_iocmd_iocfc_set_intr(struct bfad_s
*bfad
, void *cmd
)
234 struct bfa_bsg_iocfc_intr_s
*iocmd
= (struct bfa_bsg_iocfc_intr_s
*)cmd
;
237 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
238 iocmd
->status
= bfa_iocfc_israttr_set(&bfad
->bfa
, &iocmd
->attr
);
239 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
245 bfad_iocmd_port_enable(struct bfad_s
*bfad
, void *cmd
)
247 struct bfa_bsg_gen_s
*iocmd
= (struct bfa_bsg_gen_s
*)cmd
;
248 struct bfad_hal_comp fcomp
;
251 init_completion(&fcomp
.comp
);
252 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
253 iocmd
->status
= bfa_port_enable(&bfad
->bfa
.modules
.port
,
254 bfad_hcb_comp
, &fcomp
);
255 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
256 if (iocmd
->status
!= BFA_STATUS_OK
) {
257 bfa_trc(bfad
, iocmd
->status
);
260 wait_for_completion(&fcomp
.comp
);
261 iocmd
->status
= fcomp
.status
;
266 bfad_iocmd_port_disable(struct bfad_s
*bfad
, void *cmd
)
268 struct bfa_bsg_gen_s
*iocmd
= (struct bfa_bsg_gen_s
*)cmd
;
269 struct bfad_hal_comp fcomp
;
272 init_completion(&fcomp
.comp
);
273 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
274 iocmd
->status
= bfa_port_disable(&bfad
->bfa
.modules
.port
,
275 bfad_hcb_comp
, &fcomp
);
276 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
278 if (iocmd
->status
!= BFA_STATUS_OK
) {
279 bfa_trc(bfad
, iocmd
->status
);
282 wait_for_completion(&fcomp
.comp
);
283 iocmd
->status
= fcomp
.status
;
288 bfad_iocmd_port_get_attr(struct bfad_s
*bfad
, void *cmd
)
290 struct bfa_bsg_port_attr_s
*iocmd
= (struct bfa_bsg_port_attr_s
*)cmd
;
291 struct bfa_lport_attr_s port_attr
;
294 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
295 bfa_fcport_get_attr(&bfad
->bfa
, &iocmd
->attr
);
296 bfa_fcs_lport_get_attr(&bfad
->bfa_fcs
.fabric
.bport
, &port_attr
);
297 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
299 if (iocmd
->attr
.topology
!= BFA_PORT_TOPOLOGY_NONE
)
300 iocmd
->attr
.pid
= port_attr
.pid
;
304 iocmd
->attr
.port_type
= port_attr
.port_type
;
305 iocmd
->attr
.loopback
= port_attr
.loopback
;
306 iocmd
->attr
.authfail
= port_attr
.authfail
;
307 strncpy(iocmd
->attr
.port_symname
.symname
,
308 port_attr
.port_cfg
.sym_name
.symname
,
309 sizeof(port_attr
.port_cfg
.sym_name
.symname
));
311 iocmd
->status
= BFA_STATUS_OK
;
316 bfad_iocmd_port_get_stats(struct bfad_s
*bfad
, void *cmd
,
317 unsigned int payload_len
)
319 struct bfa_bsg_port_stats_s
*iocmd
= (struct bfa_bsg_port_stats_s
*)cmd
;
320 struct bfad_hal_comp fcomp
;
324 if (bfad_chk_iocmd_sz(payload_len
,
325 sizeof(struct bfa_bsg_port_stats_s
),
326 sizeof(union bfa_port_stats_u
)) != BFA_STATUS_OK
) {
327 iocmd
->status
= BFA_STATUS_VERSION_FAIL
;
331 iocmd_bufptr
= (char *)iocmd
+ sizeof(struct bfa_bsg_port_stats_s
);
333 init_completion(&fcomp
.comp
);
334 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
335 iocmd
->status
= bfa_port_get_stats(&bfad
->bfa
.modules
.port
,
336 iocmd_bufptr
, bfad_hcb_comp
, &fcomp
);
337 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
338 if (iocmd
->status
!= BFA_STATUS_OK
) {
339 bfa_trc(bfad
, iocmd
->status
);
343 wait_for_completion(&fcomp
.comp
);
344 iocmd
->status
= fcomp
.status
;
350 bfad_iocmd_port_reset_stats(struct bfad_s
*bfad
, void *cmd
)
352 struct bfa_bsg_gen_s
*iocmd
= (struct bfa_bsg_gen_s
*)cmd
;
353 struct bfad_hal_comp fcomp
;
356 init_completion(&fcomp
.comp
);
357 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
358 iocmd
->status
= bfa_port_clear_stats(&bfad
->bfa
.modules
.port
,
359 bfad_hcb_comp
, &fcomp
);
360 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
361 if (iocmd
->status
!= BFA_STATUS_OK
) {
362 bfa_trc(bfad
, iocmd
->status
);
365 wait_for_completion(&fcomp
.comp
);
366 iocmd
->status
= fcomp
.status
;
371 bfad_iocmd_set_port_cfg(struct bfad_s
*bfad
, void *iocmd
, unsigned int v_cmd
)
373 struct bfa_bsg_port_cfg_s
*cmd
= (struct bfa_bsg_port_cfg_s
*)iocmd
;
376 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
377 if (v_cmd
== IOCMD_PORT_CFG_TOPO
)
378 cmd
->status
= bfa_fcport_cfg_topology(&bfad
->bfa
, cmd
->param
);
379 else if (v_cmd
== IOCMD_PORT_CFG_SPEED
)
380 cmd
->status
= bfa_fcport_cfg_speed(&bfad
->bfa
, cmd
->param
);
381 else if (v_cmd
== IOCMD_PORT_CFG_ALPA
)
382 cmd
->status
= bfa_fcport_cfg_hardalpa(&bfad
->bfa
, cmd
->param
);
383 else if (v_cmd
== IOCMD_PORT_CLR_ALPA
)
384 cmd
->status
= bfa_fcport_clr_hardalpa(&bfad
->bfa
);
385 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
391 bfad_iocmd_port_cfg_maxfrsize(struct bfad_s
*bfad
, void *cmd
)
393 struct bfa_bsg_port_cfg_maxfrsize_s
*iocmd
=
394 (struct bfa_bsg_port_cfg_maxfrsize_s
*)cmd
;
397 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
398 iocmd
->status
= bfa_fcport_cfg_maxfrsize(&bfad
->bfa
, iocmd
->maxfrsize
);
399 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
405 bfad_iocmd_port_cfg_bbcr(struct bfad_s
*bfad
, unsigned int cmd
, void *pcmd
)
407 struct bfa_bsg_bbcr_enable_s
*iocmd
=
408 (struct bfa_bsg_bbcr_enable_s
*)pcmd
;
412 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
413 if (cmd
== IOCMD_PORT_BBCR_ENABLE
)
414 rc
= bfa_fcport_cfg_bbcr(&bfad
->bfa
, BFA_TRUE
, iocmd
->bb_scn
);
415 else if (cmd
== IOCMD_PORT_BBCR_DISABLE
)
416 rc
= bfa_fcport_cfg_bbcr(&bfad
->bfa
, BFA_FALSE
, 0);
418 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
421 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
428 bfad_iocmd_port_get_bbcr_attr(struct bfad_s
*bfad
, void *pcmd
)
430 struct bfa_bsg_bbcr_attr_s
*iocmd
= (struct bfa_bsg_bbcr_attr_s
*) pcmd
;
433 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
435 bfa_fcport_get_bbcr_attr(&bfad
->bfa
, &iocmd
->attr
);
436 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
443 bfad_iocmd_lport_get_attr(struct bfad_s
*bfad
, void *cmd
)
445 struct bfa_fcs_lport_s
*fcs_port
;
446 struct bfa_bsg_lport_attr_s
*iocmd
= (struct bfa_bsg_lport_attr_s
*)cmd
;
449 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
450 fcs_port
= bfa_fcs_lookup_port(&bfad
->bfa_fcs
,
451 iocmd
->vf_id
, iocmd
->pwwn
);
452 if (fcs_port
== NULL
) {
453 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
454 iocmd
->status
= BFA_STATUS_UNKNOWN_LWWN
;
458 bfa_fcs_lport_get_attr(fcs_port
, &iocmd
->port_attr
);
459 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
460 iocmd
->status
= BFA_STATUS_OK
;
466 bfad_iocmd_lport_get_stats(struct bfad_s
*bfad
, void *cmd
)
468 struct bfa_fcs_lport_s
*fcs_port
;
469 struct bfa_bsg_lport_stats_s
*iocmd
=
470 (struct bfa_bsg_lport_stats_s
*)cmd
;
473 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
474 fcs_port
= bfa_fcs_lookup_port(&bfad
->bfa_fcs
,
475 iocmd
->vf_id
, iocmd
->pwwn
);
476 if (fcs_port
== NULL
) {
477 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
478 iocmd
->status
= BFA_STATUS_UNKNOWN_LWWN
;
482 bfa_fcs_lport_get_stats(fcs_port
, &iocmd
->port_stats
);
483 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
484 iocmd
->status
= BFA_STATUS_OK
;
490 bfad_iocmd_lport_reset_stats(struct bfad_s
*bfad
, void *cmd
)
492 struct bfa_fcs_lport_s
*fcs_port
;
493 struct bfa_bsg_reset_stats_s
*iocmd
=
494 (struct bfa_bsg_reset_stats_s
*)cmd
;
495 struct bfa_fcpim_s
*fcpim
= BFA_FCPIM(&bfad
->bfa
);
496 struct list_head
*qe
, *qen
;
497 struct bfa_itnim_s
*itnim
;
500 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
501 fcs_port
= bfa_fcs_lookup_port(&bfad
->bfa_fcs
,
502 iocmd
->vf_id
, iocmd
->vpwwn
);
503 if (fcs_port
== NULL
) {
504 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
505 iocmd
->status
= BFA_STATUS_UNKNOWN_LWWN
;
509 bfa_fcs_lport_clear_stats(fcs_port
);
510 /* clear IO stats from all active itnims */
511 list_for_each_safe(qe
, qen
, &fcpim
->itnim_q
) {
512 itnim
= (struct bfa_itnim_s
*) qe
;
513 if (itnim
->rport
->rport_info
.lp_tag
!= fcs_port
->lp_tag
)
515 bfa_itnim_clear_stats(itnim
);
517 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
518 iocmd
->status
= BFA_STATUS_OK
;
524 bfad_iocmd_lport_get_iostats(struct bfad_s
*bfad
, void *cmd
)
526 struct bfa_fcs_lport_s
*fcs_port
;
527 struct bfa_bsg_lport_iostats_s
*iocmd
=
528 (struct bfa_bsg_lport_iostats_s
*)cmd
;
531 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
532 fcs_port
= bfa_fcs_lookup_port(&bfad
->bfa_fcs
,
533 iocmd
->vf_id
, iocmd
->pwwn
);
534 if (fcs_port
== NULL
) {
535 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
536 iocmd
->status
= BFA_STATUS_UNKNOWN_LWWN
;
540 bfa_fcpim_port_iostats(&bfad
->bfa
, &iocmd
->iostats
,
542 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
543 iocmd
->status
= BFA_STATUS_OK
;
549 bfad_iocmd_lport_get_rports(struct bfad_s
*bfad
, void *cmd
,
550 unsigned int payload_len
)
552 struct bfa_bsg_lport_get_rports_s
*iocmd
=
553 (struct bfa_bsg_lport_get_rports_s
*)cmd
;
554 struct bfa_fcs_lport_s
*fcs_port
;
558 if (iocmd
->nrports
== 0)
561 if (bfad_chk_iocmd_sz(payload_len
,
562 sizeof(struct bfa_bsg_lport_get_rports_s
),
563 sizeof(struct bfa_rport_qualifier_s
) * iocmd
->nrports
)
565 iocmd
->status
= BFA_STATUS_VERSION_FAIL
;
569 iocmd_bufptr
= (char *)iocmd
+
570 sizeof(struct bfa_bsg_lport_get_rports_s
);
571 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
572 fcs_port
= bfa_fcs_lookup_port(&bfad
->bfa_fcs
,
573 iocmd
->vf_id
, iocmd
->pwwn
);
574 if (fcs_port
== NULL
) {
575 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
577 iocmd
->status
= BFA_STATUS_UNKNOWN_LWWN
;
581 bfa_fcs_lport_get_rport_quals(fcs_port
,
582 (struct bfa_rport_qualifier_s
*)iocmd_bufptr
,
584 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
585 iocmd
->status
= BFA_STATUS_OK
;
591 bfad_iocmd_rport_get_attr(struct bfad_s
*bfad
, void *cmd
)
593 struct bfa_bsg_rport_attr_s
*iocmd
= (struct bfa_bsg_rport_attr_s
*)cmd
;
594 struct bfa_fcs_lport_s
*fcs_port
;
595 struct bfa_fcs_rport_s
*fcs_rport
;
598 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
599 fcs_port
= bfa_fcs_lookup_port(&bfad
->bfa_fcs
,
600 iocmd
->vf_id
, iocmd
->pwwn
);
601 if (fcs_port
== NULL
) {
603 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
604 iocmd
->status
= BFA_STATUS_UNKNOWN_LWWN
;
609 fcs_rport
= bfa_fcs_lport_get_rport_by_qualifier(fcs_port
,
610 iocmd
->rpwwn
, iocmd
->pid
);
612 fcs_rport
= bfa_fcs_rport_lookup(fcs_port
, iocmd
->rpwwn
);
613 if (fcs_rport
== NULL
) {
615 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
616 iocmd
->status
= BFA_STATUS_UNKNOWN_RWWN
;
620 bfa_fcs_rport_get_attr(fcs_rport
, &iocmd
->attr
);
621 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
622 iocmd
->status
= BFA_STATUS_OK
;
628 bfad_iocmd_rport_get_addr(struct bfad_s
*bfad
, void *cmd
)
630 struct bfa_bsg_rport_scsi_addr_s
*iocmd
=
631 (struct bfa_bsg_rport_scsi_addr_s
*)cmd
;
632 struct bfa_fcs_lport_s
*fcs_port
;
633 struct bfa_fcs_itnim_s
*fcs_itnim
;
634 struct bfad_itnim_s
*drv_itnim
;
637 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
638 fcs_port
= bfa_fcs_lookup_port(&bfad
->bfa_fcs
,
639 iocmd
->vf_id
, iocmd
->pwwn
);
640 if (fcs_port
== NULL
) {
642 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
643 iocmd
->status
= BFA_STATUS_UNKNOWN_LWWN
;
647 fcs_itnim
= bfa_fcs_itnim_lookup(fcs_port
, iocmd
->rpwwn
);
648 if (fcs_itnim
== NULL
) {
650 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
651 iocmd
->status
= BFA_STATUS_UNKNOWN_RWWN
;
655 drv_itnim
= fcs_itnim
->itnim_drv
;
657 if (drv_itnim
&& drv_itnim
->im_port
)
658 iocmd
->host
= drv_itnim
->im_port
->shost
->host_no
;
661 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
662 iocmd
->status
= BFA_STATUS_UNKNOWN_RWWN
;
666 iocmd
->target
= drv_itnim
->scsi_tgt_id
;
667 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
671 iocmd
->status
= BFA_STATUS_OK
;
677 bfad_iocmd_rport_get_stats(struct bfad_s
*bfad
, void *cmd
)
679 struct bfa_bsg_rport_stats_s
*iocmd
=
680 (struct bfa_bsg_rport_stats_s
*)cmd
;
681 struct bfa_fcs_lport_s
*fcs_port
;
682 struct bfa_fcs_rport_s
*fcs_rport
;
685 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
686 fcs_port
= bfa_fcs_lookup_port(&bfad
->bfa_fcs
,
687 iocmd
->vf_id
, iocmd
->pwwn
);
688 if (fcs_port
== NULL
) {
690 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
691 iocmd
->status
= BFA_STATUS_UNKNOWN_LWWN
;
695 fcs_rport
= bfa_fcs_rport_lookup(fcs_port
, iocmd
->rpwwn
);
696 if (fcs_rport
== NULL
) {
698 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
699 iocmd
->status
= BFA_STATUS_UNKNOWN_RWWN
;
703 memcpy((void *)&iocmd
->stats
, (void *)&fcs_rport
->stats
,
704 sizeof(struct bfa_rport_stats_s
));
705 if (bfa_fcs_rport_get_halrport(fcs_rport
)) {
706 memcpy((void *)&iocmd
->stats
.hal_stats
,
707 (void *)&(bfa_fcs_rport_get_halrport(fcs_rport
)->stats
),
708 sizeof(struct bfa_rport_hal_stats_s
));
711 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
712 iocmd
->status
= BFA_STATUS_OK
;
718 bfad_iocmd_rport_clr_stats(struct bfad_s
*bfad
, void *cmd
)
720 struct bfa_bsg_rport_reset_stats_s
*iocmd
=
721 (struct bfa_bsg_rport_reset_stats_s
*)cmd
;
722 struct bfa_fcs_lport_s
*fcs_port
;
723 struct bfa_fcs_rport_s
*fcs_rport
;
724 struct bfa_rport_s
*rport
;
727 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
728 fcs_port
= bfa_fcs_lookup_port(&bfad
->bfa_fcs
,
729 iocmd
->vf_id
, iocmd
->pwwn
);
730 if (fcs_port
== NULL
) {
731 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
732 iocmd
->status
= BFA_STATUS_UNKNOWN_LWWN
;
736 fcs_rport
= bfa_fcs_rport_lookup(fcs_port
, iocmd
->rpwwn
);
737 if (fcs_rport
== NULL
) {
738 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
739 iocmd
->status
= BFA_STATUS_UNKNOWN_RWWN
;
743 memset((char *)&fcs_rport
->stats
, 0, sizeof(struct bfa_rport_stats_s
));
744 rport
= bfa_fcs_rport_get_halrport(fcs_rport
);
746 memset(&rport
->stats
, 0, sizeof(rport
->stats
));
747 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
748 iocmd
->status
= BFA_STATUS_OK
;
754 bfad_iocmd_rport_set_speed(struct bfad_s
*bfad
, void *cmd
)
756 struct bfa_bsg_rport_set_speed_s
*iocmd
=
757 (struct bfa_bsg_rport_set_speed_s
*)cmd
;
758 struct bfa_fcs_lport_s
*fcs_port
;
759 struct bfa_fcs_rport_s
*fcs_rport
;
762 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
763 fcs_port
= bfa_fcs_lookup_port(&bfad
->bfa_fcs
,
764 iocmd
->vf_id
, iocmd
->pwwn
);
765 if (fcs_port
== NULL
) {
766 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
767 iocmd
->status
= BFA_STATUS_UNKNOWN_LWWN
;
771 fcs_rport
= bfa_fcs_rport_lookup(fcs_port
, iocmd
->rpwwn
);
772 if (fcs_rport
== NULL
) {
773 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
774 iocmd
->status
= BFA_STATUS_UNKNOWN_RWWN
;
778 fcs_rport
->rpf
.assigned_speed
= iocmd
->speed
;
779 /* Set this speed in f/w only if the RPSC speed is not available */
780 if (fcs_rport
->rpf
.rpsc_speed
== BFA_PORT_SPEED_UNKNOWN
)
781 if (fcs_rport
->bfa_rport
)
782 bfa_rport_speed(fcs_rport
->bfa_rport
, iocmd
->speed
);
783 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
784 iocmd
->status
= BFA_STATUS_OK
;
790 bfad_iocmd_vport_get_attr(struct bfad_s
*bfad
, void *cmd
)
792 struct bfa_fcs_vport_s
*fcs_vport
;
793 struct bfa_bsg_vport_attr_s
*iocmd
= (struct bfa_bsg_vport_attr_s
*)cmd
;
796 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
797 fcs_vport
= bfa_fcs_vport_lookup(&bfad
->bfa_fcs
,
798 iocmd
->vf_id
, iocmd
->vpwwn
);
799 if (fcs_vport
== NULL
) {
800 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
801 iocmd
->status
= BFA_STATUS_UNKNOWN_VWWN
;
805 bfa_fcs_vport_get_attr(fcs_vport
, &iocmd
->vport_attr
);
806 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
807 iocmd
->status
= BFA_STATUS_OK
;
813 bfad_iocmd_vport_get_stats(struct bfad_s
*bfad
, void *cmd
)
815 struct bfa_fcs_vport_s
*fcs_vport
;
816 struct bfa_bsg_vport_stats_s
*iocmd
=
817 (struct bfa_bsg_vport_stats_s
*)cmd
;
820 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
821 fcs_vport
= bfa_fcs_vport_lookup(&bfad
->bfa_fcs
,
822 iocmd
->vf_id
, iocmd
->vpwwn
);
823 if (fcs_vport
== NULL
) {
824 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
825 iocmd
->status
= BFA_STATUS_UNKNOWN_VWWN
;
829 memcpy((void *)&iocmd
->vport_stats
, (void *)&fcs_vport
->vport_stats
,
830 sizeof(struct bfa_vport_stats_s
));
831 memcpy((void *)&iocmd
->vport_stats
.port_stats
,
832 (void *)&fcs_vport
->lport
.stats
,
833 sizeof(struct bfa_lport_stats_s
));
834 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
835 iocmd
->status
= BFA_STATUS_OK
;
841 bfad_iocmd_vport_clr_stats(struct bfad_s
*bfad
, void *cmd
)
843 struct bfa_fcs_vport_s
*fcs_vport
;
844 struct bfa_bsg_reset_stats_s
*iocmd
=
845 (struct bfa_bsg_reset_stats_s
*)cmd
;
848 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
849 fcs_vport
= bfa_fcs_vport_lookup(&bfad
->bfa_fcs
,
850 iocmd
->vf_id
, iocmd
->vpwwn
);
851 if (fcs_vport
== NULL
) {
852 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
853 iocmd
->status
= BFA_STATUS_UNKNOWN_VWWN
;
857 memset(&fcs_vport
->vport_stats
, 0, sizeof(struct bfa_vport_stats_s
));
858 memset(&fcs_vport
->lport
.stats
, 0, sizeof(struct bfa_lport_stats_s
));
859 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
860 iocmd
->status
= BFA_STATUS_OK
;
866 bfad_iocmd_fabric_get_lports(struct bfad_s
*bfad
, void *cmd
,
867 unsigned int payload_len
)
869 struct bfa_bsg_fabric_get_lports_s
*iocmd
=
870 (struct bfa_bsg_fabric_get_lports_s
*)cmd
;
871 bfa_fcs_vf_t
*fcs_vf
;
872 uint32_t nports
= iocmd
->nports
;
877 iocmd
->status
= BFA_STATUS_EINVAL
;
881 if (bfad_chk_iocmd_sz(payload_len
,
882 sizeof(struct bfa_bsg_fabric_get_lports_s
),
883 sizeof(wwn_t
[iocmd
->nports
])) != BFA_STATUS_OK
) {
884 iocmd
->status
= BFA_STATUS_VERSION_FAIL
;
888 iocmd_bufptr
= (char *)iocmd
+
889 sizeof(struct bfa_bsg_fabric_get_lports_s
);
891 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
892 fcs_vf
= bfa_fcs_vf_lookup(&bfad
->bfa_fcs
, iocmd
->vf_id
);
893 if (fcs_vf
== NULL
) {
894 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
895 iocmd
->status
= BFA_STATUS_UNKNOWN_VFID
;
898 bfa_fcs_vf_get_ports(fcs_vf
, (wwn_t
*)iocmd_bufptr
, &nports
);
899 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
901 iocmd
->nports
= nports
;
902 iocmd
->status
= BFA_STATUS_OK
;
908 bfad_iocmd_qos_set_bw(struct bfad_s
*bfad
, void *pcmd
)
910 struct bfa_bsg_qos_bw_s
*iocmd
= (struct bfa_bsg_qos_bw_s
*)pcmd
;
913 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
914 iocmd
->status
= bfa_fcport_set_qos_bw(&bfad
->bfa
, &iocmd
->qos_bw
);
915 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
921 bfad_iocmd_ratelim(struct bfad_s
*bfad
, unsigned int cmd
, void *pcmd
)
923 struct bfa_bsg_gen_s
*iocmd
= (struct bfa_bsg_gen_s
*)pcmd
;
924 struct bfa_fcport_s
*fcport
= BFA_FCPORT_MOD(&bfad
->bfa
);
927 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
929 if ((fcport
->cfg
.topology
== BFA_PORT_TOPOLOGY_LOOP
) &&
930 (fcport
->topology
== BFA_PORT_TOPOLOGY_LOOP
))
931 iocmd
->status
= BFA_STATUS_TOPOLOGY_LOOP
;
933 if (cmd
== IOCMD_RATELIM_ENABLE
)
934 fcport
->cfg
.ratelimit
= BFA_TRUE
;
935 else if (cmd
== IOCMD_RATELIM_DISABLE
)
936 fcport
->cfg
.ratelimit
= BFA_FALSE
;
938 if (fcport
->cfg
.trl_def_speed
== BFA_PORT_SPEED_UNKNOWN
)
939 fcport
->cfg
.trl_def_speed
= BFA_PORT_SPEED_1GBPS
;
941 iocmd
->status
= BFA_STATUS_OK
;
944 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
950 bfad_iocmd_ratelim_speed(struct bfad_s
*bfad
, unsigned int cmd
, void *pcmd
)
952 struct bfa_bsg_trl_speed_s
*iocmd
= (struct bfa_bsg_trl_speed_s
*)pcmd
;
953 struct bfa_fcport_s
*fcport
= BFA_FCPORT_MOD(&bfad
->bfa
);
956 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
958 /* Auto and speeds greater than the supported speed, are invalid */
959 if ((iocmd
->speed
== BFA_PORT_SPEED_AUTO
) ||
960 (iocmd
->speed
> fcport
->speed_sup
)) {
961 iocmd
->status
= BFA_STATUS_UNSUPP_SPEED
;
962 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
966 if ((fcport
->cfg
.topology
== BFA_PORT_TOPOLOGY_LOOP
) &&
967 (fcport
->topology
== BFA_PORT_TOPOLOGY_LOOP
))
968 iocmd
->status
= BFA_STATUS_TOPOLOGY_LOOP
;
970 fcport
->cfg
.trl_def_speed
= iocmd
->speed
;
971 iocmd
->status
= BFA_STATUS_OK
;
973 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
979 bfad_iocmd_cfg_fcpim(struct bfad_s
*bfad
, void *cmd
)
981 struct bfa_bsg_fcpim_s
*iocmd
= (struct bfa_bsg_fcpim_s
*)cmd
;
984 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
985 bfa_fcpim_path_tov_set(&bfad
->bfa
, iocmd
->param
);
986 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
987 iocmd
->status
= BFA_STATUS_OK
;
992 bfad_iocmd_fcpim_get_modstats(struct bfad_s
*bfad
, void *cmd
)
994 struct bfa_bsg_fcpim_modstats_s
*iocmd
=
995 (struct bfa_bsg_fcpim_modstats_s
*)cmd
;
996 struct bfa_fcpim_s
*fcpim
= BFA_FCPIM(&bfad
->bfa
);
997 struct list_head
*qe
, *qen
;
998 struct bfa_itnim_s
*itnim
;
1001 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1002 /* accumulate IO stats from itnim */
1003 memset((void *)&iocmd
->modstats
, 0, sizeof(struct bfa_itnim_iostats_s
));
1004 list_for_each_safe(qe
, qen
, &fcpim
->itnim_q
) {
1005 itnim
= (struct bfa_itnim_s
*) qe
;
1006 bfa_fcpim_add_stats(&iocmd
->modstats
, &(itnim
->stats
));
1008 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1009 iocmd
->status
= BFA_STATUS_OK
;
1014 bfad_iocmd_fcpim_clr_modstats(struct bfad_s
*bfad
, void *cmd
)
1016 struct bfa_bsg_fcpim_modstatsclr_s
*iocmd
=
1017 (struct bfa_bsg_fcpim_modstatsclr_s
*)cmd
;
1018 struct bfa_fcpim_s
*fcpim
= BFA_FCPIM(&bfad
->bfa
);
1019 struct list_head
*qe
, *qen
;
1020 struct bfa_itnim_s
*itnim
;
1021 unsigned long flags
;
1023 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1024 list_for_each_safe(qe
, qen
, &fcpim
->itnim_q
) {
1025 itnim
= (struct bfa_itnim_s
*) qe
;
1026 bfa_itnim_clear_stats(itnim
);
1028 memset(&fcpim
->del_itn_stats
, 0,
1029 sizeof(struct bfa_fcpim_del_itn_stats_s
));
1030 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1031 iocmd
->status
= BFA_STATUS_OK
;
1036 bfad_iocmd_fcpim_get_del_itn_stats(struct bfad_s
*bfad
, void *cmd
)
1038 struct bfa_bsg_fcpim_del_itn_stats_s
*iocmd
=
1039 (struct bfa_bsg_fcpim_del_itn_stats_s
*)cmd
;
1040 struct bfa_fcpim_s
*fcpim
= BFA_FCPIM(&bfad
->bfa
);
1041 unsigned long flags
;
1043 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1044 memcpy((void *)&iocmd
->modstats
, (void *)&fcpim
->del_itn_stats
,
1045 sizeof(struct bfa_fcpim_del_itn_stats_s
));
1046 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1048 iocmd
->status
= BFA_STATUS_OK
;
1053 bfad_iocmd_itnim_get_attr(struct bfad_s
*bfad
, void *cmd
)
1055 struct bfa_bsg_itnim_attr_s
*iocmd
= (struct bfa_bsg_itnim_attr_s
*)cmd
;
1056 struct bfa_fcs_lport_s
*fcs_port
;
1057 unsigned long flags
;
1059 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1060 fcs_port
= bfa_fcs_lookup_port(&bfad
->bfa_fcs
,
1061 iocmd
->vf_id
, iocmd
->lpwwn
);
1063 iocmd
->status
= BFA_STATUS_UNKNOWN_LWWN
;
1065 iocmd
->status
= bfa_fcs_itnim_attr_get(fcs_port
,
1066 iocmd
->rpwwn
, &iocmd
->attr
);
1067 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1072 bfad_iocmd_itnim_get_iostats(struct bfad_s
*bfad
, void *cmd
)
1074 struct bfa_bsg_itnim_iostats_s
*iocmd
=
1075 (struct bfa_bsg_itnim_iostats_s
*)cmd
;
1076 struct bfa_fcs_lport_s
*fcs_port
;
1077 struct bfa_fcs_itnim_s
*itnim
;
1078 unsigned long flags
;
1080 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1081 fcs_port
= bfa_fcs_lookup_port(&bfad
->bfa_fcs
,
1082 iocmd
->vf_id
, iocmd
->lpwwn
);
1084 iocmd
->status
= BFA_STATUS_UNKNOWN_LWWN
;
1087 itnim
= bfa_fcs_itnim_lookup(fcs_port
, iocmd
->rpwwn
);
1089 iocmd
->status
= BFA_STATUS_UNKNOWN_RWWN
;
1091 iocmd
->status
= BFA_STATUS_OK
;
1092 if (bfa_fcs_itnim_get_halitn(itnim
))
1093 memcpy((void *)&iocmd
->iostats
, (void *)
1094 &(bfa_fcs_itnim_get_halitn(itnim
)->stats
),
1095 sizeof(struct bfa_itnim_iostats_s
));
1098 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1103 bfad_iocmd_itnim_reset_stats(struct bfad_s
*bfad
, void *cmd
)
1105 struct bfa_bsg_rport_reset_stats_s
*iocmd
=
1106 (struct bfa_bsg_rport_reset_stats_s
*)cmd
;
1107 struct bfa_fcs_lport_s
*fcs_port
;
1108 struct bfa_fcs_itnim_s
*itnim
;
1109 unsigned long flags
;
1111 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1112 fcs_port
= bfa_fcs_lookup_port(&bfad
->bfa_fcs
,
1113 iocmd
->vf_id
, iocmd
->pwwn
);
1115 iocmd
->status
= BFA_STATUS_UNKNOWN_LWWN
;
1117 itnim
= bfa_fcs_itnim_lookup(fcs_port
, iocmd
->rpwwn
);
1119 iocmd
->status
= BFA_STATUS_UNKNOWN_RWWN
;
1121 iocmd
->status
= BFA_STATUS_OK
;
1122 bfa_fcs_itnim_stats_clear(fcs_port
, iocmd
->rpwwn
);
1123 bfa_itnim_clear_stats(bfa_fcs_itnim_get_halitn(itnim
));
1126 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1132 bfad_iocmd_itnim_get_itnstats(struct bfad_s
*bfad
, void *cmd
)
1134 struct bfa_bsg_itnim_itnstats_s
*iocmd
=
1135 (struct bfa_bsg_itnim_itnstats_s
*)cmd
;
1136 struct bfa_fcs_lport_s
*fcs_port
;
1137 struct bfa_fcs_itnim_s
*itnim
;
1138 unsigned long flags
;
1140 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1141 fcs_port
= bfa_fcs_lookup_port(&bfad
->bfa_fcs
,
1142 iocmd
->vf_id
, iocmd
->lpwwn
);
1144 iocmd
->status
= BFA_STATUS_UNKNOWN_LWWN
;
1147 itnim
= bfa_fcs_itnim_lookup(fcs_port
, iocmd
->rpwwn
);
1149 iocmd
->status
= BFA_STATUS_UNKNOWN_RWWN
;
1151 iocmd
->status
= BFA_STATUS_OK
;
1152 bfa_fcs_itnim_stats_get(fcs_port
, iocmd
->rpwwn
,
1156 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1161 bfad_iocmd_fcport_enable(struct bfad_s
*bfad
, void *cmd
)
1163 struct bfa_bsg_gen_s
*iocmd
= (struct bfa_bsg_gen_s
*)cmd
;
1164 unsigned long flags
;
1166 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1167 iocmd
->status
= bfa_fcport_enable(&bfad
->bfa
);
1168 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1174 bfad_iocmd_fcport_disable(struct bfad_s
*bfad
, void *cmd
)
1176 struct bfa_bsg_gen_s
*iocmd
= (struct bfa_bsg_gen_s
*)cmd
;
1177 unsigned long flags
;
1179 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1180 iocmd
->status
= bfa_fcport_disable(&bfad
->bfa
);
1181 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1187 bfad_iocmd_ioc_get_pcifn_cfg(struct bfad_s
*bfad
, void *cmd
)
1189 struct bfa_bsg_pcifn_cfg_s
*iocmd
= (struct bfa_bsg_pcifn_cfg_s
*)cmd
;
1190 struct bfad_hal_comp fcomp
;
1191 unsigned long flags
;
1193 init_completion(&fcomp
.comp
);
1194 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1195 iocmd
->status
= bfa_ablk_query(&bfad
->bfa
.modules
.ablk
,
1197 bfad_hcb_comp
, &fcomp
);
1198 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1199 if (iocmd
->status
!= BFA_STATUS_OK
)
1202 wait_for_completion(&fcomp
.comp
);
1203 iocmd
->status
= fcomp
.status
;
1209 bfad_iocmd_pcifn_create(struct bfad_s
*bfad
, void *cmd
)
1211 struct bfa_bsg_pcifn_s
*iocmd
= (struct bfa_bsg_pcifn_s
*)cmd
;
1212 struct bfad_hal_comp fcomp
;
1213 unsigned long flags
;
1215 init_completion(&fcomp
.comp
);
1216 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1217 iocmd
->status
= bfa_ablk_pf_create(&bfad
->bfa
.modules
.ablk
,
1218 &iocmd
->pcifn_id
, iocmd
->port
,
1219 iocmd
->pcifn_class
, iocmd
->bw_min
,
1220 iocmd
->bw_max
, bfad_hcb_comp
, &fcomp
);
1221 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1222 if (iocmd
->status
!= BFA_STATUS_OK
)
1225 wait_for_completion(&fcomp
.comp
);
1226 iocmd
->status
= fcomp
.status
;
1232 bfad_iocmd_pcifn_delete(struct bfad_s
*bfad
, void *cmd
)
1234 struct bfa_bsg_pcifn_s
*iocmd
= (struct bfa_bsg_pcifn_s
*)cmd
;
1235 struct bfad_hal_comp fcomp
;
1236 unsigned long flags
;
1238 init_completion(&fcomp
.comp
);
1239 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1240 iocmd
->status
= bfa_ablk_pf_delete(&bfad
->bfa
.modules
.ablk
,
1242 bfad_hcb_comp
, &fcomp
);
1243 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1244 if (iocmd
->status
!= BFA_STATUS_OK
)
1247 wait_for_completion(&fcomp
.comp
);
1248 iocmd
->status
= fcomp
.status
;
1254 bfad_iocmd_pcifn_bw(struct bfad_s
*bfad
, void *cmd
)
1256 struct bfa_bsg_pcifn_s
*iocmd
= (struct bfa_bsg_pcifn_s
*)cmd
;
1257 struct bfad_hal_comp fcomp
;
1258 unsigned long flags
;
1260 init_completion(&fcomp
.comp
);
1261 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1262 iocmd
->status
= bfa_ablk_pf_update(&bfad
->bfa
.modules
.ablk
,
1263 iocmd
->pcifn_id
, iocmd
->bw_min
,
1264 iocmd
->bw_max
, bfad_hcb_comp
, &fcomp
);
1265 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1266 bfa_trc(bfad
, iocmd
->status
);
1267 if (iocmd
->status
!= BFA_STATUS_OK
)
1270 wait_for_completion(&fcomp
.comp
);
1271 iocmd
->status
= fcomp
.status
;
1272 bfa_trc(bfad
, iocmd
->status
);
1278 bfad_iocmd_adapter_cfg_mode(struct bfad_s
*bfad
, void *cmd
)
1280 struct bfa_bsg_adapter_cfg_mode_s
*iocmd
=
1281 (struct bfa_bsg_adapter_cfg_mode_s
*)cmd
;
1282 struct bfad_hal_comp fcomp
;
1283 unsigned long flags
= 0;
1285 init_completion(&fcomp
.comp
);
1286 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1287 iocmd
->status
= bfa_ablk_adapter_config(&bfad
->bfa
.modules
.ablk
,
1288 iocmd
->cfg
.mode
, iocmd
->cfg
.max_pf
,
1289 iocmd
->cfg
.max_vf
, bfad_hcb_comp
, &fcomp
);
1290 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1291 if (iocmd
->status
!= BFA_STATUS_OK
)
1294 wait_for_completion(&fcomp
.comp
);
1295 iocmd
->status
= fcomp
.status
;
1301 bfad_iocmd_port_cfg_mode(struct bfad_s
*bfad
, void *cmd
)
1303 struct bfa_bsg_port_cfg_mode_s
*iocmd
=
1304 (struct bfa_bsg_port_cfg_mode_s
*)cmd
;
1305 struct bfad_hal_comp fcomp
;
1306 unsigned long flags
= 0;
1308 init_completion(&fcomp
.comp
);
1309 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1310 iocmd
->status
= bfa_ablk_port_config(&bfad
->bfa
.modules
.ablk
,
1311 iocmd
->instance
, iocmd
->cfg
.mode
,
1312 iocmd
->cfg
.max_pf
, iocmd
->cfg
.max_vf
,
1313 bfad_hcb_comp
, &fcomp
);
1314 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1315 if (iocmd
->status
!= BFA_STATUS_OK
)
1318 wait_for_completion(&fcomp
.comp
);
1319 iocmd
->status
= fcomp
.status
;
1325 bfad_iocmd_ablk_optrom(struct bfad_s
*bfad
, unsigned int cmd
, void *pcmd
)
1327 struct bfa_bsg_gen_s
*iocmd
= (struct bfa_bsg_gen_s
*)pcmd
;
1328 struct bfad_hal_comp fcomp
;
1329 unsigned long flags
;
1331 init_completion(&fcomp
.comp
);
1332 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1333 if (cmd
== IOCMD_FLASH_ENABLE_OPTROM
)
1334 iocmd
->status
= bfa_ablk_optrom_en(&bfad
->bfa
.modules
.ablk
,
1335 bfad_hcb_comp
, &fcomp
);
1337 iocmd
->status
= bfa_ablk_optrom_dis(&bfad
->bfa
.modules
.ablk
,
1338 bfad_hcb_comp
, &fcomp
);
1339 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1341 if (iocmd
->status
!= BFA_STATUS_OK
)
1344 wait_for_completion(&fcomp
.comp
);
1345 iocmd
->status
= fcomp
.status
;
1351 bfad_iocmd_faa_query(struct bfad_s
*bfad
, void *cmd
)
1353 struct bfa_bsg_faa_attr_s
*iocmd
= (struct bfa_bsg_faa_attr_s
*)cmd
;
1354 struct bfad_hal_comp fcomp
;
1355 unsigned long flags
;
1357 init_completion(&fcomp
.comp
);
1358 iocmd
->status
= BFA_STATUS_OK
;
1359 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1360 iocmd
->status
= bfa_faa_query(&bfad
->bfa
, &iocmd
->faa_attr
,
1361 bfad_hcb_comp
, &fcomp
);
1362 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1364 if (iocmd
->status
!= BFA_STATUS_OK
)
1367 wait_for_completion(&fcomp
.comp
);
1368 iocmd
->status
= fcomp
.status
;
1374 bfad_iocmd_cee_attr(struct bfad_s
*bfad
, void *cmd
, unsigned int payload_len
)
1376 struct bfa_bsg_cee_attr_s
*iocmd
=
1377 (struct bfa_bsg_cee_attr_s
*)cmd
;
1379 struct bfad_hal_comp cee_comp
;
1380 unsigned long flags
;
1382 if (bfad_chk_iocmd_sz(payload_len
,
1383 sizeof(struct bfa_bsg_cee_attr_s
),
1384 sizeof(struct bfa_cee_attr_s
)) != BFA_STATUS_OK
) {
1385 iocmd
->status
= BFA_STATUS_VERSION_FAIL
;
1389 iocmd_bufptr
= (char *)iocmd
+ sizeof(struct bfa_bsg_cee_attr_s
);
1391 cee_comp
.status
= 0;
1392 init_completion(&cee_comp
.comp
);
1393 mutex_lock(&bfad_mutex
);
1394 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1395 iocmd
->status
= bfa_cee_get_attr(&bfad
->bfa
.modules
.cee
, iocmd_bufptr
,
1396 bfad_hcb_comp
, &cee_comp
);
1397 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1398 if (iocmd
->status
!= BFA_STATUS_OK
) {
1399 mutex_unlock(&bfad_mutex
);
1400 bfa_trc(bfad
, 0x5555);
1403 wait_for_completion(&cee_comp
.comp
);
1404 mutex_unlock(&bfad_mutex
);
1410 bfad_iocmd_cee_get_stats(struct bfad_s
*bfad
, void *cmd
,
1411 unsigned int payload_len
)
1413 struct bfa_bsg_cee_stats_s
*iocmd
=
1414 (struct bfa_bsg_cee_stats_s
*)cmd
;
1416 struct bfad_hal_comp cee_comp
;
1417 unsigned long flags
;
1419 if (bfad_chk_iocmd_sz(payload_len
,
1420 sizeof(struct bfa_bsg_cee_stats_s
),
1421 sizeof(struct bfa_cee_stats_s
)) != BFA_STATUS_OK
) {
1422 iocmd
->status
= BFA_STATUS_VERSION_FAIL
;
1426 iocmd_bufptr
= (char *)iocmd
+ sizeof(struct bfa_bsg_cee_stats_s
);
1428 cee_comp
.status
= 0;
1429 init_completion(&cee_comp
.comp
);
1430 mutex_lock(&bfad_mutex
);
1431 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1432 iocmd
->status
= bfa_cee_get_stats(&bfad
->bfa
.modules
.cee
, iocmd_bufptr
,
1433 bfad_hcb_comp
, &cee_comp
);
1434 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1435 if (iocmd
->status
!= BFA_STATUS_OK
) {
1436 mutex_unlock(&bfad_mutex
);
1437 bfa_trc(bfad
, 0x5555);
1440 wait_for_completion(&cee_comp
.comp
);
1441 mutex_unlock(&bfad_mutex
);
1447 bfad_iocmd_cee_reset_stats(struct bfad_s
*bfad
, void *cmd
)
1449 struct bfa_bsg_gen_s
*iocmd
= (struct bfa_bsg_gen_s
*)cmd
;
1450 unsigned long flags
;
1452 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1453 iocmd
->status
= bfa_cee_reset_stats(&bfad
->bfa
.modules
.cee
, NULL
, NULL
);
1454 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1455 if (iocmd
->status
!= BFA_STATUS_OK
)
1456 bfa_trc(bfad
, 0x5555);
1461 bfad_iocmd_sfp_media(struct bfad_s
*bfad
, void *cmd
)
1463 struct bfa_bsg_sfp_media_s
*iocmd
= (struct bfa_bsg_sfp_media_s
*)cmd
;
1464 struct bfad_hal_comp fcomp
;
1465 unsigned long flags
;
1467 init_completion(&fcomp
.comp
);
1468 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1469 iocmd
->status
= bfa_sfp_media(BFA_SFP_MOD(&bfad
->bfa
), &iocmd
->media
,
1470 bfad_hcb_comp
, &fcomp
);
1471 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1472 bfa_trc(bfad
, iocmd
->status
);
1473 if (iocmd
->status
!= BFA_STATUS_SFP_NOT_READY
)
1476 wait_for_completion(&fcomp
.comp
);
1477 iocmd
->status
= fcomp
.status
;
1483 bfad_iocmd_sfp_speed(struct bfad_s
*bfad
, void *cmd
)
1485 struct bfa_bsg_sfp_speed_s
*iocmd
= (struct bfa_bsg_sfp_speed_s
*)cmd
;
1486 struct bfad_hal_comp fcomp
;
1487 unsigned long flags
;
1489 init_completion(&fcomp
.comp
);
1490 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1491 iocmd
->status
= bfa_sfp_speed(BFA_SFP_MOD(&bfad
->bfa
), iocmd
->speed
,
1492 bfad_hcb_comp
, &fcomp
);
1493 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1494 bfa_trc(bfad
, iocmd
->status
);
1495 if (iocmd
->status
!= BFA_STATUS_SFP_NOT_READY
)
1497 wait_for_completion(&fcomp
.comp
);
1498 iocmd
->status
= fcomp
.status
;
1504 bfad_iocmd_flash_get_attr(struct bfad_s
*bfad
, void *cmd
)
1506 struct bfa_bsg_flash_attr_s
*iocmd
=
1507 (struct bfa_bsg_flash_attr_s
*)cmd
;
1508 struct bfad_hal_comp fcomp
;
1509 unsigned long flags
;
1511 init_completion(&fcomp
.comp
);
1512 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1513 iocmd
->status
= bfa_flash_get_attr(BFA_FLASH(&bfad
->bfa
), &iocmd
->attr
,
1514 bfad_hcb_comp
, &fcomp
);
1515 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1516 if (iocmd
->status
!= BFA_STATUS_OK
)
1518 wait_for_completion(&fcomp
.comp
);
1519 iocmd
->status
= fcomp
.status
;
1525 bfad_iocmd_flash_erase_part(struct bfad_s
*bfad
, void *cmd
)
1527 struct bfa_bsg_flash_s
*iocmd
= (struct bfa_bsg_flash_s
*)cmd
;
1528 struct bfad_hal_comp fcomp
;
1529 unsigned long flags
;
1531 init_completion(&fcomp
.comp
);
1532 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1533 iocmd
->status
= bfa_flash_erase_part(BFA_FLASH(&bfad
->bfa
), iocmd
->type
,
1534 iocmd
->instance
, bfad_hcb_comp
, &fcomp
);
1535 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1536 if (iocmd
->status
!= BFA_STATUS_OK
)
1538 wait_for_completion(&fcomp
.comp
);
1539 iocmd
->status
= fcomp
.status
;
1545 bfad_iocmd_flash_update_part(struct bfad_s
*bfad
, void *cmd
,
1546 unsigned int payload_len
)
1548 struct bfa_bsg_flash_s
*iocmd
= (struct bfa_bsg_flash_s
*)cmd
;
1550 struct bfad_hal_comp fcomp
;
1551 unsigned long flags
;
1553 if (bfad_chk_iocmd_sz(payload_len
,
1554 sizeof(struct bfa_bsg_flash_s
),
1555 iocmd
->bufsz
) != BFA_STATUS_OK
) {
1556 iocmd
->status
= BFA_STATUS_VERSION_FAIL
;
1560 iocmd_bufptr
= (char *)iocmd
+ sizeof(struct bfa_bsg_flash_s
);
1562 init_completion(&fcomp
.comp
);
1563 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1564 iocmd
->status
= bfa_flash_update_part(BFA_FLASH(&bfad
->bfa
),
1565 iocmd
->type
, iocmd
->instance
, iocmd_bufptr
,
1566 iocmd
->bufsz
, 0, bfad_hcb_comp
, &fcomp
);
1567 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1568 if (iocmd
->status
!= BFA_STATUS_OK
)
1570 wait_for_completion(&fcomp
.comp
);
1571 iocmd
->status
= fcomp
.status
;
1577 bfad_iocmd_flash_read_part(struct bfad_s
*bfad
, void *cmd
,
1578 unsigned int payload_len
)
1580 struct bfa_bsg_flash_s
*iocmd
= (struct bfa_bsg_flash_s
*)cmd
;
1581 struct bfad_hal_comp fcomp
;
1583 unsigned long flags
;
1585 if (bfad_chk_iocmd_sz(payload_len
,
1586 sizeof(struct bfa_bsg_flash_s
),
1587 iocmd
->bufsz
) != BFA_STATUS_OK
) {
1588 iocmd
->status
= BFA_STATUS_VERSION_FAIL
;
1592 iocmd_bufptr
= (char *)iocmd
+ sizeof(struct bfa_bsg_flash_s
);
1594 init_completion(&fcomp
.comp
);
1595 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1596 iocmd
->status
= bfa_flash_read_part(BFA_FLASH(&bfad
->bfa
), iocmd
->type
,
1597 iocmd
->instance
, iocmd_bufptr
, iocmd
->bufsz
, 0,
1598 bfad_hcb_comp
, &fcomp
);
1599 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1600 if (iocmd
->status
!= BFA_STATUS_OK
)
1602 wait_for_completion(&fcomp
.comp
);
1603 iocmd
->status
= fcomp
.status
;
1609 bfad_iocmd_diag_temp(struct bfad_s
*bfad
, void *cmd
)
1611 struct bfa_bsg_diag_get_temp_s
*iocmd
=
1612 (struct bfa_bsg_diag_get_temp_s
*)cmd
;
1613 struct bfad_hal_comp fcomp
;
1614 unsigned long flags
;
1616 init_completion(&fcomp
.comp
);
1617 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1618 iocmd
->status
= bfa_diag_tsensor_query(BFA_DIAG_MOD(&bfad
->bfa
),
1619 &iocmd
->result
, bfad_hcb_comp
, &fcomp
);
1620 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1621 bfa_trc(bfad
, iocmd
->status
);
1622 if (iocmd
->status
!= BFA_STATUS_OK
)
1624 wait_for_completion(&fcomp
.comp
);
1625 iocmd
->status
= fcomp
.status
;
1631 bfad_iocmd_diag_memtest(struct bfad_s
*bfad
, void *cmd
)
1633 struct bfa_bsg_diag_memtest_s
*iocmd
=
1634 (struct bfa_bsg_diag_memtest_s
*)cmd
;
1635 struct bfad_hal_comp fcomp
;
1636 unsigned long flags
;
1638 init_completion(&fcomp
.comp
);
1639 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1640 iocmd
->status
= bfa_diag_memtest(BFA_DIAG_MOD(&bfad
->bfa
),
1641 &iocmd
->memtest
, iocmd
->pat
,
1642 &iocmd
->result
, bfad_hcb_comp
, &fcomp
);
1643 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1644 bfa_trc(bfad
, iocmd
->status
);
1645 if (iocmd
->status
!= BFA_STATUS_OK
)
1647 wait_for_completion(&fcomp
.comp
);
1648 iocmd
->status
= fcomp
.status
;
1654 bfad_iocmd_diag_loopback(struct bfad_s
*bfad
, void *cmd
)
1656 struct bfa_bsg_diag_loopback_s
*iocmd
=
1657 (struct bfa_bsg_diag_loopback_s
*)cmd
;
1658 struct bfad_hal_comp fcomp
;
1659 unsigned long flags
;
1661 init_completion(&fcomp
.comp
);
1662 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1663 iocmd
->status
= bfa_fcdiag_loopback(&bfad
->bfa
, iocmd
->opmode
,
1664 iocmd
->speed
, iocmd
->lpcnt
, iocmd
->pat
,
1665 &iocmd
->result
, bfad_hcb_comp
, &fcomp
);
1666 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1667 bfa_trc(bfad
, iocmd
->status
);
1668 if (iocmd
->status
!= BFA_STATUS_OK
)
1670 wait_for_completion(&fcomp
.comp
);
1671 iocmd
->status
= fcomp
.status
;
1677 bfad_iocmd_diag_fwping(struct bfad_s
*bfad
, void *cmd
)
1679 struct bfa_bsg_diag_fwping_s
*iocmd
=
1680 (struct bfa_bsg_diag_fwping_s
*)cmd
;
1681 struct bfad_hal_comp fcomp
;
1682 unsigned long flags
;
1684 init_completion(&fcomp
.comp
);
1685 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1686 iocmd
->status
= bfa_diag_fwping(BFA_DIAG_MOD(&bfad
->bfa
), iocmd
->cnt
,
1687 iocmd
->pattern
, &iocmd
->result
,
1688 bfad_hcb_comp
, &fcomp
);
1689 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1690 bfa_trc(bfad
, iocmd
->status
);
1691 if (iocmd
->status
!= BFA_STATUS_OK
)
1693 bfa_trc(bfad
, 0x77771);
1694 wait_for_completion(&fcomp
.comp
);
1695 iocmd
->status
= fcomp
.status
;
1701 bfad_iocmd_diag_queuetest(struct bfad_s
*bfad
, void *cmd
)
1703 struct bfa_bsg_diag_qtest_s
*iocmd
= (struct bfa_bsg_diag_qtest_s
*)cmd
;
1704 struct bfad_hal_comp fcomp
;
1705 unsigned long flags
;
1707 init_completion(&fcomp
.comp
);
1708 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1709 iocmd
->status
= bfa_fcdiag_queuetest(&bfad
->bfa
, iocmd
->force
,
1710 iocmd
->queue
, &iocmd
->result
,
1711 bfad_hcb_comp
, &fcomp
);
1712 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1713 if (iocmd
->status
!= BFA_STATUS_OK
)
1715 wait_for_completion(&fcomp
.comp
);
1716 iocmd
->status
= fcomp
.status
;
1722 bfad_iocmd_diag_sfp(struct bfad_s
*bfad
, void *cmd
)
1724 struct bfa_bsg_sfp_show_s
*iocmd
=
1725 (struct bfa_bsg_sfp_show_s
*)cmd
;
1726 struct bfad_hal_comp fcomp
;
1727 unsigned long flags
;
1729 init_completion(&fcomp
.comp
);
1730 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1731 iocmd
->status
= bfa_sfp_show(BFA_SFP_MOD(&bfad
->bfa
), &iocmd
->sfp
,
1732 bfad_hcb_comp
, &fcomp
);
1733 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1734 bfa_trc(bfad
, iocmd
->status
);
1735 if (iocmd
->status
!= BFA_STATUS_OK
)
1737 wait_for_completion(&fcomp
.comp
);
1738 iocmd
->status
= fcomp
.status
;
1739 bfa_trc(bfad
, iocmd
->status
);
1745 bfad_iocmd_diag_led(struct bfad_s
*bfad
, void *cmd
)
1747 struct bfa_bsg_diag_led_s
*iocmd
= (struct bfa_bsg_diag_led_s
*)cmd
;
1748 unsigned long flags
;
1750 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1751 iocmd
->status
= bfa_diag_ledtest(BFA_DIAG_MOD(&bfad
->bfa
),
1753 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1758 bfad_iocmd_diag_beacon_lport(struct bfad_s
*bfad
, void *cmd
)
1760 struct bfa_bsg_diag_beacon_s
*iocmd
=
1761 (struct bfa_bsg_diag_beacon_s
*)cmd
;
1762 unsigned long flags
;
1764 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1765 iocmd
->status
= bfa_diag_beacon_port(BFA_DIAG_MOD(&bfad
->bfa
),
1766 iocmd
->beacon
, iocmd
->link_e2e_beacon
,
1768 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1773 bfad_iocmd_diag_lb_stat(struct bfad_s
*bfad
, void *cmd
)
1775 struct bfa_bsg_diag_lb_stat_s
*iocmd
=
1776 (struct bfa_bsg_diag_lb_stat_s
*)cmd
;
1777 unsigned long flags
;
1779 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1780 iocmd
->status
= bfa_fcdiag_lb_is_running(&bfad
->bfa
);
1781 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1782 bfa_trc(bfad
, iocmd
->status
);
1788 bfad_iocmd_diag_dport_enable(struct bfad_s
*bfad
, void *pcmd
)
1790 struct bfa_bsg_dport_enable_s
*iocmd
=
1791 (struct bfa_bsg_dport_enable_s
*)pcmd
;
1792 unsigned long flags
;
1793 struct bfad_hal_comp fcomp
;
1795 init_completion(&fcomp
.comp
);
1796 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1797 iocmd
->status
= bfa_dport_enable(&bfad
->bfa
, iocmd
->lpcnt
,
1798 iocmd
->pat
, bfad_hcb_comp
, &fcomp
);
1799 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1800 if (iocmd
->status
!= BFA_STATUS_OK
)
1801 bfa_trc(bfad
, iocmd
->status
);
1803 wait_for_completion(&fcomp
.comp
);
1804 iocmd
->status
= fcomp
.status
;
1810 bfad_iocmd_diag_dport_disable(struct bfad_s
*bfad
, void *pcmd
)
1812 struct bfa_bsg_gen_s
*iocmd
= (struct bfa_bsg_gen_s
*)pcmd
;
1813 unsigned long flags
;
1814 struct bfad_hal_comp fcomp
;
1816 init_completion(&fcomp
.comp
);
1817 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1818 iocmd
->status
= bfa_dport_disable(&bfad
->bfa
, bfad_hcb_comp
, &fcomp
);
1819 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1820 if (iocmd
->status
!= BFA_STATUS_OK
)
1821 bfa_trc(bfad
, iocmd
->status
);
1823 wait_for_completion(&fcomp
.comp
);
1824 iocmd
->status
= fcomp
.status
;
1830 bfad_iocmd_diag_dport_start(struct bfad_s
*bfad
, void *pcmd
)
1832 struct bfa_bsg_dport_enable_s
*iocmd
=
1833 (struct bfa_bsg_dport_enable_s
*)pcmd
;
1834 unsigned long flags
;
1835 struct bfad_hal_comp fcomp
;
1837 init_completion(&fcomp
.comp
);
1838 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1839 iocmd
->status
= bfa_dport_start(&bfad
->bfa
, iocmd
->lpcnt
,
1840 iocmd
->pat
, bfad_hcb_comp
,
1842 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1844 if (iocmd
->status
!= BFA_STATUS_OK
) {
1845 bfa_trc(bfad
, iocmd
->status
);
1847 wait_for_completion(&fcomp
.comp
);
1848 iocmd
->status
= fcomp
.status
;
1855 bfad_iocmd_diag_dport_show(struct bfad_s
*bfad
, void *pcmd
)
1857 struct bfa_bsg_diag_dport_show_s
*iocmd
=
1858 (struct bfa_bsg_diag_dport_show_s
*)pcmd
;
1859 unsigned long flags
;
1861 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1862 iocmd
->status
= bfa_dport_show(&bfad
->bfa
, &iocmd
->result
);
1863 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1870 bfad_iocmd_phy_get_attr(struct bfad_s
*bfad
, void *cmd
)
1872 struct bfa_bsg_phy_attr_s
*iocmd
=
1873 (struct bfa_bsg_phy_attr_s
*)cmd
;
1874 struct bfad_hal_comp fcomp
;
1875 unsigned long flags
;
1877 init_completion(&fcomp
.comp
);
1878 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1879 iocmd
->status
= bfa_phy_get_attr(BFA_PHY(&bfad
->bfa
), iocmd
->instance
,
1880 &iocmd
->attr
, bfad_hcb_comp
, &fcomp
);
1881 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1882 if (iocmd
->status
!= BFA_STATUS_OK
)
1884 wait_for_completion(&fcomp
.comp
);
1885 iocmd
->status
= fcomp
.status
;
1891 bfad_iocmd_phy_get_stats(struct bfad_s
*bfad
, void *cmd
)
1893 struct bfa_bsg_phy_stats_s
*iocmd
=
1894 (struct bfa_bsg_phy_stats_s
*)cmd
;
1895 struct bfad_hal_comp fcomp
;
1896 unsigned long flags
;
1898 init_completion(&fcomp
.comp
);
1899 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1900 iocmd
->status
= bfa_phy_get_stats(BFA_PHY(&bfad
->bfa
), iocmd
->instance
,
1901 &iocmd
->stats
, bfad_hcb_comp
, &fcomp
);
1902 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1903 if (iocmd
->status
!= BFA_STATUS_OK
)
1905 wait_for_completion(&fcomp
.comp
);
1906 iocmd
->status
= fcomp
.status
;
1912 bfad_iocmd_phy_read(struct bfad_s
*bfad
, void *cmd
, unsigned int payload_len
)
1914 struct bfa_bsg_phy_s
*iocmd
= (struct bfa_bsg_phy_s
*)cmd
;
1915 struct bfad_hal_comp fcomp
;
1917 unsigned long flags
;
1919 if (bfad_chk_iocmd_sz(payload_len
,
1920 sizeof(struct bfa_bsg_phy_s
),
1921 iocmd
->bufsz
) != BFA_STATUS_OK
) {
1922 iocmd
->status
= BFA_STATUS_VERSION_FAIL
;
1926 iocmd_bufptr
= (char *)iocmd
+ sizeof(struct bfa_bsg_phy_s
);
1927 init_completion(&fcomp
.comp
);
1928 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1929 iocmd
->status
= bfa_phy_read(BFA_PHY(&bfad
->bfa
),
1930 iocmd
->instance
, iocmd_bufptr
, iocmd
->bufsz
,
1931 0, bfad_hcb_comp
, &fcomp
);
1932 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1933 if (iocmd
->status
!= BFA_STATUS_OK
)
1935 wait_for_completion(&fcomp
.comp
);
1936 iocmd
->status
= fcomp
.status
;
1937 if (iocmd
->status
!= BFA_STATUS_OK
)
1944 bfad_iocmd_vhba_query(struct bfad_s
*bfad
, void *cmd
)
1946 struct bfa_bsg_vhba_attr_s
*iocmd
=
1947 (struct bfa_bsg_vhba_attr_s
*)cmd
;
1948 struct bfa_vhba_attr_s
*attr
= &iocmd
->attr
;
1949 unsigned long flags
;
1951 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1952 attr
->pwwn
= bfad
->bfa
.ioc
.attr
->pwwn
;
1953 attr
->nwwn
= bfad
->bfa
.ioc
.attr
->nwwn
;
1954 attr
->plog_enabled
= (bfa_boolean_t
)bfad
->bfa
.plog
->plog_enabled
;
1955 attr
->io_profile
= bfa_fcpim_get_io_profile(&bfad
->bfa
);
1956 attr
->path_tov
= bfa_fcpim_path_tov_get(&bfad
->bfa
);
1957 iocmd
->status
= BFA_STATUS_OK
;
1958 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1963 bfad_iocmd_phy_update(struct bfad_s
*bfad
, void *cmd
, unsigned int payload_len
)
1965 struct bfa_bsg_phy_s
*iocmd
= (struct bfa_bsg_phy_s
*)cmd
;
1967 struct bfad_hal_comp fcomp
;
1968 unsigned long flags
;
1970 if (bfad_chk_iocmd_sz(payload_len
,
1971 sizeof(struct bfa_bsg_phy_s
),
1972 iocmd
->bufsz
) != BFA_STATUS_OK
) {
1973 iocmd
->status
= BFA_STATUS_VERSION_FAIL
;
1977 iocmd_bufptr
= (char *)iocmd
+ sizeof(struct bfa_bsg_phy_s
);
1978 init_completion(&fcomp
.comp
);
1979 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1980 iocmd
->status
= bfa_phy_update(BFA_PHY(&bfad
->bfa
),
1981 iocmd
->instance
, iocmd_bufptr
, iocmd
->bufsz
,
1982 0, bfad_hcb_comp
, &fcomp
);
1983 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1984 if (iocmd
->status
!= BFA_STATUS_OK
)
1986 wait_for_completion(&fcomp
.comp
);
1987 iocmd
->status
= fcomp
.status
;
1993 bfad_iocmd_porglog_get(struct bfad_s
*bfad
, void *cmd
)
1995 struct bfa_bsg_debug_s
*iocmd
= (struct bfa_bsg_debug_s
*)cmd
;
1998 if (iocmd
->bufsz
< sizeof(struct bfa_plog_s
)) {
1999 bfa_trc(bfad
, sizeof(struct bfa_plog_s
));
2000 iocmd
->status
= BFA_STATUS_EINVAL
;
2004 iocmd
->status
= BFA_STATUS_OK
;
2005 iocmd_bufptr
= (char *)iocmd
+ sizeof(struct bfa_bsg_debug_s
);
2006 memcpy(iocmd_bufptr
, (u8
*) &bfad
->plog_buf
, sizeof(struct bfa_plog_s
));
2011 #define BFA_DEBUG_FW_CORE_CHUNK_SZ 0x4000U /* 16K chunks for FW dump */
2013 bfad_iocmd_debug_fw_core(struct bfad_s
*bfad
, void *cmd
,
2014 unsigned int payload_len
)
2016 struct bfa_bsg_debug_s
*iocmd
= (struct bfa_bsg_debug_s
*)cmd
;
2018 unsigned long flags
;
2021 if (bfad_chk_iocmd_sz(payload_len
, sizeof(struct bfa_bsg_debug_s
),
2022 BFA_DEBUG_FW_CORE_CHUNK_SZ
) != BFA_STATUS_OK
) {
2023 iocmd
->status
= BFA_STATUS_VERSION_FAIL
;
2027 if (iocmd
->bufsz
< BFA_DEBUG_FW_CORE_CHUNK_SZ
||
2028 !IS_ALIGNED(iocmd
->bufsz
, sizeof(u16
)) ||
2029 !IS_ALIGNED(iocmd
->offset
, sizeof(u32
))) {
2030 bfa_trc(bfad
, BFA_DEBUG_FW_CORE_CHUNK_SZ
);
2031 iocmd
->status
= BFA_STATUS_EINVAL
;
2035 iocmd_bufptr
= (char *)iocmd
+ sizeof(struct bfa_bsg_debug_s
);
2036 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
2037 offset
= iocmd
->offset
;
2038 iocmd
->status
= bfa_ioc_debug_fwcore(&bfad
->bfa
.ioc
, iocmd_bufptr
,
2039 &offset
, &iocmd
->bufsz
);
2040 iocmd
->offset
= offset
;
2041 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
2047 bfad_iocmd_debug_ctl(struct bfad_s
*bfad
, void *cmd
, unsigned int v_cmd
)
2049 struct bfa_bsg_gen_s
*iocmd
= (struct bfa_bsg_gen_s
*)cmd
;
2050 unsigned long flags
;
2052 if (v_cmd
== IOCMD_DEBUG_FW_STATE_CLR
) {
2053 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
2054 bfad
->bfa
.ioc
.dbg_fwsave_once
= BFA_TRUE
;
2055 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
2056 } else if (v_cmd
== IOCMD_DEBUG_PORTLOG_CLR
)
2057 bfad
->plog_buf
.head
= bfad
->plog_buf
.tail
= 0;
2058 else if (v_cmd
== IOCMD_DEBUG_START_DTRC
)
2059 bfa_trc_init(bfad
->trcmod
);
2060 else if (v_cmd
== IOCMD_DEBUG_STOP_DTRC
)
2061 bfa_trc_stop(bfad
->trcmod
);
2063 iocmd
->status
= BFA_STATUS_OK
;
2068 bfad_iocmd_porglog_ctl(struct bfad_s
*bfad
, void *cmd
)
2070 struct bfa_bsg_portlogctl_s
*iocmd
= (struct bfa_bsg_portlogctl_s
*)cmd
;
2072 if (iocmd
->ctl
== BFA_TRUE
)
2073 bfad
->plog_buf
.plog_enabled
= 1;
2075 bfad
->plog_buf
.plog_enabled
= 0;
2077 iocmd
->status
= BFA_STATUS_OK
;
2082 bfad_iocmd_fcpim_cfg_profile(struct bfad_s
*bfad
, void *cmd
, unsigned int v_cmd
)
2084 struct bfa_bsg_fcpim_profile_s
*iocmd
=
2085 (struct bfa_bsg_fcpim_profile_s
*)cmd
;
2087 unsigned long flags
;
2089 do_gettimeofday(&tv
);
2090 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
2091 if (v_cmd
== IOCMD_FCPIM_PROFILE_ON
)
2092 iocmd
->status
= bfa_fcpim_profile_on(&bfad
->bfa
, tv
.tv_sec
);
2093 else if (v_cmd
== IOCMD_FCPIM_PROFILE_OFF
)
2094 iocmd
->status
= bfa_fcpim_profile_off(&bfad
->bfa
);
2095 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
2101 bfad_iocmd_itnim_get_ioprofile(struct bfad_s
*bfad
, void *cmd
)
2103 struct bfa_bsg_itnim_ioprofile_s
*iocmd
=
2104 (struct bfa_bsg_itnim_ioprofile_s
*)cmd
;
2105 struct bfa_fcs_lport_s
*fcs_port
;
2106 struct bfa_fcs_itnim_s
*itnim
;
2107 unsigned long flags
;
2109 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
2110 fcs_port
= bfa_fcs_lookup_port(&bfad
->bfa_fcs
,
2111 iocmd
->vf_id
, iocmd
->lpwwn
);
2113 iocmd
->status
= BFA_STATUS_UNKNOWN_LWWN
;
2115 itnim
= bfa_fcs_itnim_lookup(fcs_port
, iocmd
->rpwwn
);
2117 iocmd
->status
= BFA_STATUS_UNKNOWN_RWWN
;
2119 iocmd
->status
= bfa_itnim_get_ioprofile(
2120 bfa_fcs_itnim_get_halitn(itnim
),
2123 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
2128 bfad_iocmd_fcport_get_stats(struct bfad_s
*bfad
, void *cmd
)
2130 struct bfa_bsg_fcport_stats_s
*iocmd
=
2131 (struct bfa_bsg_fcport_stats_s
*)cmd
;
2132 struct bfad_hal_comp fcomp
;
2133 unsigned long flags
;
2134 struct bfa_cb_pending_q_s cb_qe
;
2136 init_completion(&fcomp
.comp
);
2137 bfa_pending_q_init(&cb_qe
, (bfa_cb_cbfn_t
)bfad_hcb_comp
,
2138 &fcomp
, &iocmd
->stats
);
2139 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
2140 iocmd
->status
= bfa_fcport_get_stats(&bfad
->bfa
, &cb_qe
);
2141 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
2142 if (iocmd
->status
!= BFA_STATUS_OK
) {
2143 bfa_trc(bfad
, iocmd
->status
);
2146 wait_for_completion(&fcomp
.comp
);
2147 iocmd
->status
= fcomp
.status
;
2153 bfad_iocmd_fcport_reset_stats(struct bfad_s
*bfad
, void *cmd
)
2155 struct bfa_bsg_gen_s
*iocmd
= (struct bfa_bsg_gen_s
*)cmd
;
2156 struct bfad_hal_comp fcomp
;
2157 unsigned long flags
;
2158 struct bfa_cb_pending_q_s cb_qe
;
2160 init_completion(&fcomp
.comp
);
2161 bfa_pending_q_init(&cb_qe
, (bfa_cb_cbfn_t
)bfad_hcb_comp
, &fcomp
, NULL
);
2163 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
2164 iocmd
->status
= bfa_fcport_clear_stats(&bfad
->bfa
, &cb_qe
);
2165 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
2166 if (iocmd
->status
!= BFA_STATUS_OK
) {
2167 bfa_trc(bfad
, iocmd
->status
);
2170 wait_for_completion(&fcomp
.comp
);
2171 iocmd
->status
= fcomp
.status
;
2177 bfad_iocmd_boot_cfg(struct bfad_s
*bfad
, void *cmd
)
2179 struct bfa_bsg_boot_s
*iocmd
= (struct bfa_bsg_boot_s
*)cmd
;
2180 struct bfad_hal_comp fcomp
;
2181 unsigned long flags
;
2183 init_completion(&fcomp
.comp
);
2184 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
2185 iocmd
->status
= bfa_flash_update_part(BFA_FLASH(&bfad
->bfa
),
2186 BFA_FLASH_PART_BOOT
, bfad
->bfa
.ioc
.port_id
,
2187 &iocmd
->cfg
, sizeof(struct bfa_boot_cfg_s
), 0,
2188 bfad_hcb_comp
, &fcomp
);
2189 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
2190 if (iocmd
->status
!= BFA_STATUS_OK
)
2192 wait_for_completion(&fcomp
.comp
);
2193 iocmd
->status
= fcomp
.status
;
2199 bfad_iocmd_boot_query(struct bfad_s
*bfad
, void *cmd
)
2201 struct bfa_bsg_boot_s
*iocmd
= (struct bfa_bsg_boot_s
*)cmd
;
2202 struct bfad_hal_comp fcomp
;
2203 unsigned long flags
;
2205 init_completion(&fcomp
.comp
);
2206 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
2207 iocmd
->status
= bfa_flash_read_part(BFA_FLASH(&bfad
->bfa
),
2208 BFA_FLASH_PART_BOOT
, bfad
->bfa
.ioc
.port_id
,
2209 &iocmd
->cfg
, sizeof(struct bfa_boot_cfg_s
), 0,
2210 bfad_hcb_comp
, &fcomp
);
2211 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
2212 if (iocmd
->status
!= BFA_STATUS_OK
)
2214 wait_for_completion(&fcomp
.comp
);
2215 iocmd
->status
= fcomp
.status
;
2221 bfad_iocmd_preboot_query(struct bfad_s
*bfad
, void *cmd
)
2223 struct bfa_bsg_preboot_s
*iocmd
= (struct bfa_bsg_preboot_s
*)cmd
;
2224 struct bfi_iocfc_cfgrsp_s
*cfgrsp
= bfad
->bfa
.iocfc
.cfgrsp
;
2225 struct bfa_boot_pbc_s
*pbcfg
= &iocmd
->cfg
;
2226 unsigned long flags
;
2228 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
2229 pbcfg
->enable
= cfgrsp
->pbc_cfg
.boot_enabled
;
2230 pbcfg
->nbluns
= cfgrsp
->pbc_cfg
.nbluns
;
2231 pbcfg
->speed
= cfgrsp
->pbc_cfg
.port_speed
;
2232 memcpy(pbcfg
->pblun
, cfgrsp
->pbc_cfg
.blun
, sizeof(pbcfg
->pblun
));
2233 iocmd
->status
= BFA_STATUS_OK
;
2234 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
2240 bfad_iocmd_ethboot_cfg(struct bfad_s
*bfad
, void *cmd
)
2242 struct bfa_bsg_ethboot_s
*iocmd
= (struct bfa_bsg_ethboot_s
*)cmd
;
2243 struct bfad_hal_comp fcomp
;
2244 unsigned long flags
;
2246 init_completion(&fcomp
.comp
);
2247 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
2248 iocmd
->status
= bfa_flash_update_part(BFA_FLASH(&bfad
->bfa
),
2249 BFA_FLASH_PART_PXECFG
,
2250 bfad
->bfa
.ioc
.port_id
, &iocmd
->cfg
,
2251 sizeof(struct bfa_ethboot_cfg_s
), 0,
2252 bfad_hcb_comp
, &fcomp
);
2253 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
2254 if (iocmd
->status
!= BFA_STATUS_OK
)
2256 wait_for_completion(&fcomp
.comp
);
2257 iocmd
->status
= fcomp
.status
;
2263 bfad_iocmd_ethboot_query(struct bfad_s
*bfad
, void *cmd
)
2265 struct bfa_bsg_ethboot_s
*iocmd
= (struct bfa_bsg_ethboot_s
*)cmd
;
2266 struct bfad_hal_comp fcomp
;
2267 unsigned long flags
;
2269 init_completion(&fcomp
.comp
);
2270 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
2271 iocmd
->status
= bfa_flash_read_part(BFA_FLASH(&bfad
->bfa
),
2272 BFA_FLASH_PART_PXECFG
,
2273 bfad
->bfa
.ioc
.port_id
, &iocmd
->cfg
,
2274 sizeof(struct bfa_ethboot_cfg_s
), 0,
2275 bfad_hcb_comp
, &fcomp
);
2276 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
2277 if (iocmd
->status
!= BFA_STATUS_OK
)
2279 wait_for_completion(&fcomp
.comp
);
2280 iocmd
->status
= fcomp
.status
;
2286 bfad_iocmd_cfg_trunk(struct bfad_s
*bfad
, void *cmd
, unsigned int v_cmd
)
2288 struct bfa_bsg_gen_s
*iocmd
= (struct bfa_bsg_gen_s
*)cmd
;
2289 struct bfa_fcport_s
*fcport
= BFA_FCPORT_MOD(&bfad
->bfa
);
2290 struct bfa_fcport_trunk_s
*trunk
= &fcport
->trunk
;
2291 unsigned long flags
;
2293 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
2295 if (bfa_fcport_is_dport(&bfad
->bfa
))
2296 return BFA_STATUS_DPORT_ERR
;
2298 if ((fcport
->cfg
.topology
== BFA_PORT_TOPOLOGY_LOOP
) ||
2299 (fcport
->topology
== BFA_PORT_TOPOLOGY_LOOP
))
2300 iocmd
->status
= BFA_STATUS_TOPOLOGY_LOOP
;
2302 if (v_cmd
== IOCMD_TRUNK_ENABLE
) {
2303 trunk
->attr
.state
= BFA_TRUNK_OFFLINE
;
2304 bfa_fcport_disable(&bfad
->bfa
);
2305 fcport
->cfg
.trunked
= BFA_TRUE
;
2306 } else if (v_cmd
== IOCMD_TRUNK_DISABLE
) {
2307 trunk
->attr
.state
= BFA_TRUNK_DISABLED
;
2308 bfa_fcport_disable(&bfad
->bfa
);
2309 fcport
->cfg
.trunked
= BFA_FALSE
;
2312 if (!bfa_fcport_is_disabled(&bfad
->bfa
))
2313 bfa_fcport_enable(&bfad
->bfa
);
2315 iocmd
->status
= BFA_STATUS_OK
;
2318 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
2324 bfad_iocmd_trunk_get_attr(struct bfad_s
*bfad
, void *cmd
)
2326 struct bfa_bsg_trunk_attr_s
*iocmd
= (struct bfa_bsg_trunk_attr_s
*)cmd
;
2327 struct bfa_fcport_s
*fcport
= BFA_FCPORT_MOD(&bfad
->bfa
);
2328 struct bfa_fcport_trunk_s
*trunk
= &fcport
->trunk
;
2329 unsigned long flags
;
2331 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
2332 if ((fcport
->cfg
.topology
== BFA_PORT_TOPOLOGY_LOOP
) ||
2333 (fcport
->topology
== BFA_PORT_TOPOLOGY_LOOP
))
2334 iocmd
->status
= BFA_STATUS_TOPOLOGY_LOOP
;
2336 memcpy((void *)&iocmd
->attr
, (void *)&trunk
->attr
,
2337 sizeof(struct bfa_trunk_attr_s
));
2338 iocmd
->attr
.port_id
= bfa_lps_get_base_pid(&bfad
->bfa
);
2339 iocmd
->status
= BFA_STATUS_OK
;
2341 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
2347 bfad_iocmd_qos(struct bfad_s
*bfad
, void *cmd
, unsigned int v_cmd
)
2349 struct bfa_bsg_gen_s
*iocmd
= (struct bfa_bsg_gen_s
*)cmd
;
2350 struct bfa_fcport_s
*fcport
= BFA_FCPORT_MOD(&bfad
->bfa
);
2351 unsigned long flags
;
2353 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
2354 if (bfa_ioc_get_type(&bfad
->bfa
.ioc
) == BFA_IOC_TYPE_FC
) {
2355 if ((fcport
->cfg
.topology
== BFA_PORT_TOPOLOGY_LOOP
) &&
2356 (fcport
->topology
== BFA_PORT_TOPOLOGY_LOOP
))
2357 iocmd
->status
= BFA_STATUS_TOPOLOGY_LOOP
;
2359 if (v_cmd
== IOCMD_QOS_ENABLE
)
2360 fcport
->cfg
.qos_enabled
= BFA_TRUE
;
2361 else if (v_cmd
== IOCMD_QOS_DISABLE
) {
2362 fcport
->cfg
.qos_enabled
= BFA_FALSE
;
2363 fcport
->cfg
.qos_bw
.high
= BFA_QOS_BW_HIGH
;
2364 fcport
->cfg
.qos_bw
.med
= BFA_QOS_BW_MED
;
2365 fcport
->cfg
.qos_bw
.low
= BFA_QOS_BW_LOW
;
2369 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
2375 bfad_iocmd_qos_get_attr(struct bfad_s
*bfad
, void *cmd
)
2377 struct bfa_bsg_qos_attr_s
*iocmd
= (struct bfa_bsg_qos_attr_s
*)cmd
;
2378 struct bfa_fcport_s
*fcport
= BFA_FCPORT_MOD(&bfad
->bfa
);
2379 unsigned long flags
;
2381 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
2382 if ((fcport
->cfg
.topology
== BFA_PORT_TOPOLOGY_LOOP
) &&
2383 (fcport
->topology
== BFA_PORT_TOPOLOGY_LOOP
))
2384 iocmd
->status
= BFA_STATUS_TOPOLOGY_LOOP
;
2386 iocmd
->attr
.state
= fcport
->qos_attr
.state
;
2387 iocmd
->attr
.total_bb_cr
=
2388 be32_to_cpu(fcport
->qos_attr
.total_bb_cr
);
2389 iocmd
->attr
.qos_bw
.high
= fcport
->cfg
.qos_bw
.high
;
2390 iocmd
->attr
.qos_bw
.med
= fcport
->cfg
.qos_bw
.med
;
2391 iocmd
->attr
.qos_bw
.low
= fcport
->cfg
.qos_bw
.low
;
2392 iocmd
->attr
.qos_bw_op
= fcport
->qos_attr
.qos_bw_op
;
2393 iocmd
->status
= BFA_STATUS_OK
;
2395 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
2401 bfad_iocmd_qos_get_vc_attr(struct bfad_s
*bfad
, void *cmd
)
2403 struct bfa_bsg_qos_vc_attr_s
*iocmd
=
2404 (struct bfa_bsg_qos_vc_attr_s
*)cmd
;
2405 struct bfa_fcport_s
*fcport
= BFA_FCPORT_MOD(&bfad
->bfa
);
2406 struct bfa_qos_vc_attr_s
*bfa_vc_attr
= &fcport
->qos_vc_attr
;
2407 unsigned long flags
;
2410 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
2411 iocmd
->attr
.total_vc_count
= be16_to_cpu(bfa_vc_attr
->total_vc_count
);
2412 iocmd
->attr
.shared_credit
= be16_to_cpu(bfa_vc_attr
->shared_credit
);
2413 iocmd
->attr
.elp_opmode_flags
=
2414 be32_to_cpu(bfa_vc_attr
->elp_opmode_flags
);
2416 /* Individual VC info */
2417 while (i
< iocmd
->attr
.total_vc_count
) {
2418 iocmd
->attr
.vc_info
[i
].vc_credit
=
2419 bfa_vc_attr
->vc_info
[i
].vc_credit
;
2420 iocmd
->attr
.vc_info
[i
].borrow_credit
=
2421 bfa_vc_attr
->vc_info
[i
].borrow_credit
;
2422 iocmd
->attr
.vc_info
[i
].priority
=
2423 bfa_vc_attr
->vc_info
[i
].priority
;
2426 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
2428 iocmd
->status
= BFA_STATUS_OK
;
2433 bfad_iocmd_qos_get_stats(struct bfad_s
*bfad
, void *cmd
)
2435 struct bfa_bsg_fcport_stats_s
*iocmd
=
2436 (struct bfa_bsg_fcport_stats_s
*)cmd
;
2437 struct bfad_hal_comp fcomp
;
2438 unsigned long flags
;
2439 struct bfa_cb_pending_q_s cb_qe
;
2440 struct bfa_fcport_s
*fcport
= BFA_FCPORT_MOD(&bfad
->bfa
);
2442 init_completion(&fcomp
.comp
);
2443 bfa_pending_q_init(&cb_qe
, (bfa_cb_cbfn_t
)bfad_hcb_comp
,
2444 &fcomp
, &iocmd
->stats
);
2446 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
2447 WARN_ON(!bfa_ioc_get_fcmode(&bfad
->bfa
.ioc
));
2448 if ((fcport
->cfg
.topology
== BFA_PORT_TOPOLOGY_LOOP
) &&
2449 (fcport
->topology
== BFA_PORT_TOPOLOGY_LOOP
))
2450 iocmd
->status
= BFA_STATUS_TOPOLOGY_LOOP
;
2452 iocmd
->status
= bfa_fcport_get_stats(&bfad
->bfa
, &cb_qe
);
2453 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
2454 if (iocmd
->status
!= BFA_STATUS_OK
) {
2455 bfa_trc(bfad
, iocmd
->status
);
2458 wait_for_completion(&fcomp
.comp
);
2459 iocmd
->status
= fcomp
.status
;
2465 bfad_iocmd_qos_reset_stats(struct bfad_s
*bfad
, void *cmd
)
2467 struct bfa_bsg_gen_s
*iocmd
= (struct bfa_bsg_gen_s
*)cmd
;
2468 struct bfad_hal_comp fcomp
;
2469 unsigned long flags
;
2470 struct bfa_cb_pending_q_s cb_qe
;
2471 struct bfa_fcport_s
*fcport
= BFA_FCPORT_MOD(&bfad
->bfa
);
2473 init_completion(&fcomp
.comp
);
2474 bfa_pending_q_init(&cb_qe
, (bfa_cb_cbfn_t
)bfad_hcb_comp
,
2477 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
2478 WARN_ON(!bfa_ioc_get_fcmode(&bfad
->bfa
.ioc
));
2479 if ((fcport
->cfg
.topology
== BFA_PORT_TOPOLOGY_LOOP
) &&
2480 (fcport
->topology
== BFA_PORT_TOPOLOGY_LOOP
))
2481 iocmd
->status
= BFA_STATUS_TOPOLOGY_LOOP
;
2483 iocmd
->status
= bfa_fcport_clear_stats(&bfad
->bfa
, &cb_qe
);
2484 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
2485 if (iocmd
->status
!= BFA_STATUS_OK
) {
2486 bfa_trc(bfad
, iocmd
->status
);
2489 wait_for_completion(&fcomp
.comp
);
2490 iocmd
->status
= fcomp
.status
;
2496 bfad_iocmd_vf_get_stats(struct bfad_s
*bfad
, void *cmd
)
2498 struct bfa_bsg_vf_stats_s
*iocmd
=
2499 (struct bfa_bsg_vf_stats_s
*)cmd
;
2500 struct bfa_fcs_fabric_s
*fcs_vf
;
2501 unsigned long flags
;
2503 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
2504 fcs_vf
= bfa_fcs_vf_lookup(&bfad
->bfa_fcs
, iocmd
->vf_id
);
2505 if (fcs_vf
== NULL
) {
2506 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
2507 iocmd
->status
= BFA_STATUS_UNKNOWN_VFID
;
2510 memcpy((void *)&iocmd
->stats
, (void *)&fcs_vf
->stats
,
2511 sizeof(struct bfa_vf_stats_s
));
2512 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
2513 iocmd
->status
= BFA_STATUS_OK
;
2519 bfad_iocmd_vf_clr_stats(struct bfad_s
*bfad
, void *cmd
)
2521 struct bfa_bsg_vf_reset_stats_s
*iocmd
=
2522 (struct bfa_bsg_vf_reset_stats_s
*)cmd
;
2523 struct bfa_fcs_fabric_s
*fcs_vf
;
2524 unsigned long flags
;
2526 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
2527 fcs_vf
= bfa_fcs_vf_lookup(&bfad
->bfa_fcs
, iocmd
->vf_id
);
2528 if (fcs_vf
== NULL
) {
2529 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
2530 iocmd
->status
= BFA_STATUS_UNKNOWN_VFID
;
2533 memset((void *)&fcs_vf
->stats
, 0, sizeof(struct bfa_vf_stats_s
));
2534 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
2535 iocmd
->status
= BFA_STATUS_OK
;
2540 /* Function to reset the LUN SCAN mode */
2542 bfad_iocmd_lunmask_reset_lunscan_mode(struct bfad_s
*bfad
, int lunmask_cfg
)
2544 struct bfad_im_port_s
*pport_im
= bfad
->pport
.im_port
;
2545 struct bfad_vport_s
*vport
= NULL
;
2547 /* Set the scsi device LUN SCAN flags for base port */
2548 bfad_reset_sdev_bflags(pport_im
, lunmask_cfg
);
2550 /* Set the scsi device LUN SCAN flags for the vports */
2551 list_for_each_entry(vport
, &bfad
->vport_list
, list_entry
)
2552 bfad_reset_sdev_bflags(vport
->drv_port
.im_port
, lunmask_cfg
);
2556 bfad_iocmd_lunmask(struct bfad_s
*bfad
, void *pcmd
, unsigned int v_cmd
)
2558 struct bfa_bsg_gen_s
*iocmd
= (struct bfa_bsg_gen_s
*)pcmd
;
2559 unsigned long flags
;
2561 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
2562 if (v_cmd
== IOCMD_FCPIM_LUNMASK_ENABLE
) {
2563 iocmd
->status
= bfa_fcpim_lunmask_update(&bfad
->bfa
, BFA_TRUE
);
2564 /* Set the LUN Scanning mode to be Sequential scan */
2565 if (iocmd
->status
== BFA_STATUS_OK
)
2566 bfad_iocmd_lunmask_reset_lunscan_mode(bfad
, BFA_TRUE
);
2567 } else if (v_cmd
== IOCMD_FCPIM_LUNMASK_DISABLE
) {
2568 iocmd
->status
= bfa_fcpim_lunmask_update(&bfad
->bfa
, BFA_FALSE
);
2569 /* Set the LUN Scanning mode to default REPORT_LUNS scan */
2570 if (iocmd
->status
== BFA_STATUS_OK
)
2571 bfad_iocmd_lunmask_reset_lunscan_mode(bfad
, BFA_FALSE
);
2572 } else if (v_cmd
== IOCMD_FCPIM_LUNMASK_CLEAR
)
2573 iocmd
->status
= bfa_fcpim_lunmask_clear(&bfad
->bfa
);
2574 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
2579 bfad_iocmd_fcpim_lunmask_query(struct bfad_s
*bfad
, void *cmd
)
2581 struct bfa_bsg_fcpim_lunmask_query_s
*iocmd
=
2582 (struct bfa_bsg_fcpim_lunmask_query_s
*)cmd
;
2583 struct bfa_lunmask_cfg_s
*lun_mask
= &iocmd
->lun_mask
;
2584 unsigned long flags
;
2586 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
2587 iocmd
->status
= bfa_fcpim_lunmask_query(&bfad
->bfa
, lun_mask
);
2588 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
2593 bfad_iocmd_fcpim_cfg_lunmask(struct bfad_s
*bfad
, void *cmd
, unsigned int v_cmd
)
2595 struct bfa_bsg_fcpim_lunmask_s
*iocmd
=
2596 (struct bfa_bsg_fcpim_lunmask_s
*)cmd
;
2597 unsigned long flags
;
2599 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
2600 if (v_cmd
== IOCMD_FCPIM_LUNMASK_ADD
)
2601 iocmd
->status
= bfa_fcpim_lunmask_add(&bfad
->bfa
, iocmd
->vf_id
,
2602 &iocmd
->pwwn
, iocmd
->rpwwn
, iocmd
->lun
);
2603 else if (v_cmd
== IOCMD_FCPIM_LUNMASK_DELETE
)
2604 iocmd
->status
= bfa_fcpim_lunmask_delete(&bfad
->bfa
,
2605 iocmd
->vf_id
, &iocmd
->pwwn
,
2606 iocmd
->rpwwn
, iocmd
->lun
);
2607 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
2612 bfad_iocmd_fcpim_throttle_query(struct bfad_s
*bfad
, void *cmd
)
2614 struct bfa_bsg_fcpim_throttle_s
*iocmd
=
2615 (struct bfa_bsg_fcpim_throttle_s
*)cmd
;
2616 unsigned long flags
;
2618 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
2619 iocmd
->status
= bfa_fcpim_throttle_get(&bfad
->bfa
,
2620 (void *)&iocmd
->throttle
);
2621 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
2627 bfad_iocmd_fcpim_throttle_set(struct bfad_s
*bfad
, void *cmd
)
2629 struct bfa_bsg_fcpim_throttle_s
*iocmd
=
2630 (struct bfa_bsg_fcpim_throttle_s
*)cmd
;
2631 unsigned long flags
;
2633 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
2634 iocmd
->status
= bfa_fcpim_throttle_set(&bfad
->bfa
,
2635 iocmd
->throttle
.cfg_value
);
2636 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
2642 bfad_iocmd_tfru_read(struct bfad_s
*bfad
, void *cmd
)
2644 struct bfa_bsg_tfru_s
*iocmd
=
2645 (struct bfa_bsg_tfru_s
*)cmd
;
2646 struct bfad_hal_comp fcomp
;
2647 unsigned long flags
= 0;
2649 init_completion(&fcomp
.comp
);
2650 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
2651 iocmd
->status
= bfa_tfru_read(BFA_FRU(&bfad
->bfa
),
2652 &iocmd
->data
, iocmd
->len
, iocmd
->offset
,
2653 bfad_hcb_comp
, &fcomp
);
2654 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
2655 if (iocmd
->status
== BFA_STATUS_OK
) {
2656 wait_for_completion(&fcomp
.comp
);
2657 iocmd
->status
= fcomp
.status
;
2664 bfad_iocmd_tfru_write(struct bfad_s
*bfad
, void *cmd
)
2666 struct bfa_bsg_tfru_s
*iocmd
=
2667 (struct bfa_bsg_tfru_s
*)cmd
;
2668 struct bfad_hal_comp fcomp
;
2669 unsigned long flags
= 0;
2671 init_completion(&fcomp
.comp
);
2672 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
2673 iocmd
->status
= bfa_tfru_write(BFA_FRU(&bfad
->bfa
),
2674 &iocmd
->data
, iocmd
->len
, iocmd
->offset
,
2675 bfad_hcb_comp
, &fcomp
);
2676 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
2677 if (iocmd
->status
== BFA_STATUS_OK
) {
2678 wait_for_completion(&fcomp
.comp
);
2679 iocmd
->status
= fcomp
.status
;
2686 bfad_iocmd_fruvpd_read(struct bfad_s
*bfad
, void *cmd
)
2688 struct bfa_bsg_fruvpd_s
*iocmd
=
2689 (struct bfa_bsg_fruvpd_s
*)cmd
;
2690 struct bfad_hal_comp fcomp
;
2691 unsigned long flags
= 0;
2693 init_completion(&fcomp
.comp
);
2694 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
2695 iocmd
->status
= bfa_fruvpd_read(BFA_FRU(&bfad
->bfa
),
2696 &iocmd
->data
, iocmd
->len
, iocmd
->offset
,
2697 bfad_hcb_comp
, &fcomp
);
2698 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
2699 if (iocmd
->status
== BFA_STATUS_OK
) {
2700 wait_for_completion(&fcomp
.comp
);
2701 iocmd
->status
= fcomp
.status
;
2708 bfad_iocmd_fruvpd_update(struct bfad_s
*bfad
, void *cmd
)
2710 struct bfa_bsg_fruvpd_s
*iocmd
=
2711 (struct bfa_bsg_fruvpd_s
*)cmd
;
2712 struct bfad_hal_comp fcomp
;
2713 unsigned long flags
= 0;
2715 init_completion(&fcomp
.comp
);
2716 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
2717 iocmd
->status
= bfa_fruvpd_update(BFA_FRU(&bfad
->bfa
),
2718 &iocmd
->data
, iocmd
->len
, iocmd
->offset
,
2719 bfad_hcb_comp
, &fcomp
, iocmd
->trfr_cmpl
);
2720 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
2721 if (iocmd
->status
== BFA_STATUS_OK
) {
2722 wait_for_completion(&fcomp
.comp
);
2723 iocmd
->status
= fcomp
.status
;
2730 bfad_iocmd_fruvpd_get_max_size(struct bfad_s
*bfad
, void *cmd
)
2732 struct bfa_bsg_fruvpd_max_size_s
*iocmd
=
2733 (struct bfa_bsg_fruvpd_max_size_s
*)cmd
;
2734 unsigned long flags
= 0;
2736 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
2737 iocmd
->status
= bfa_fruvpd_get_max_size(BFA_FRU(&bfad
->bfa
),
2739 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
2745 bfad_iocmd_handler(struct bfad_s
*bfad
, unsigned int cmd
, void *iocmd
,
2746 unsigned int payload_len
)
2751 case IOCMD_IOC_ENABLE
:
2752 rc
= bfad_iocmd_ioc_enable(bfad
, iocmd
);
2754 case IOCMD_IOC_DISABLE
:
2755 rc
= bfad_iocmd_ioc_disable(bfad
, iocmd
);
2757 case IOCMD_IOC_GET_INFO
:
2758 rc
= bfad_iocmd_ioc_get_info(bfad
, iocmd
);
2760 case IOCMD_IOC_GET_ATTR
:
2761 rc
= bfad_iocmd_ioc_get_attr(bfad
, iocmd
);
2763 case IOCMD_IOC_GET_STATS
:
2764 rc
= bfad_iocmd_ioc_get_stats(bfad
, iocmd
);
2766 case IOCMD_IOC_GET_FWSTATS
:
2767 rc
= bfad_iocmd_ioc_get_fwstats(bfad
, iocmd
, payload_len
);
2769 case IOCMD_IOC_RESET_STATS
:
2770 case IOCMD_IOC_RESET_FWSTATS
:
2771 rc
= bfad_iocmd_ioc_reset_stats(bfad
, iocmd
, cmd
);
2773 case IOCMD_IOC_SET_ADAPTER_NAME
:
2774 case IOCMD_IOC_SET_PORT_NAME
:
2775 rc
= bfad_iocmd_ioc_set_name(bfad
, iocmd
, cmd
);
2777 case IOCMD_IOCFC_GET_ATTR
:
2778 rc
= bfad_iocmd_iocfc_get_attr(bfad
, iocmd
);
2780 case IOCMD_IOCFC_SET_INTR
:
2781 rc
= bfad_iocmd_iocfc_set_intr(bfad
, iocmd
);
2783 case IOCMD_PORT_ENABLE
:
2784 rc
= bfad_iocmd_port_enable(bfad
, iocmd
);
2786 case IOCMD_PORT_DISABLE
:
2787 rc
= bfad_iocmd_port_disable(bfad
, iocmd
);
2789 case IOCMD_PORT_GET_ATTR
:
2790 rc
= bfad_iocmd_port_get_attr(bfad
, iocmd
);
2792 case IOCMD_PORT_GET_STATS
:
2793 rc
= bfad_iocmd_port_get_stats(bfad
, iocmd
, payload_len
);
2795 case IOCMD_PORT_RESET_STATS
:
2796 rc
= bfad_iocmd_port_reset_stats(bfad
, iocmd
);
2798 case IOCMD_PORT_CFG_TOPO
:
2799 case IOCMD_PORT_CFG_SPEED
:
2800 case IOCMD_PORT_CFG_ALPA
:
2801 case IOCMD_PORT_CLR_ALPA
:
2802 rc
= bfad_iocmd_set_port_cfg(bfad
, iocmd
, cmd
);
2804 case IOCMD_PORT_CFG_MAXFRSZ
:
2805 rc
= bfad_iocmd_port_cfg_maxfrsize(bfad
, iocmd
);
2807 case IOCMD_PORT_BBCR_ENABLE
:
2808 case IOCMD_PORT_BBCR_DISABLE
:
2809 rc
= bfad_iocmd_port_cfg_bbcr(bfad
, cmd
, iocmd
);
2811 case IOCMD_PORT_BBCR_GET_ATTR
:
2812 rc
= bfad_iocmd_port_get_bbcr_attr(bfad
, iocmd
);
2814 case IOCMD_LPORT_GET_ATTR
:
2815 rc
= bfad_iocmd_lport_get_attr(bfad
, iocmd
);
2817 case IOCMD_LPORT_GET_STATS
:
2818 rc
= bfad_iocmd_lport_get_stats(bfad
, iocmd
);
2820 case IOCMD_LPORT_RESET_STATS
:
2821 rc
= bfad_iocmd_lport_reset_stats(bfad
, iocmd
);
2823 case IOCMD_LPORT_GET_IOSTATS
:
2824 rc
= bfad_iocmd_lport_get_iostats(bfad
, iocmd
);
2826 case IOCMD_LPORT_GET_RPORTS
:
2827 rc
= bfad_iocmd_lport_get_rports(bfad
, iocmd
, payload_len
);
2829 case IOCMD_RPORT_GET_ATTR
:
2830 rc
= bfad_iocmd_rport_get_attr(bfad
, iocmd
);
2832 case IOCMD_RPORT_GET_ADDR
:
2833 rc
= bfad_iocmd_rport_get_addr(bfad
, iocmd
);
2835 case IOCMD_RPORT_GET_STATS
:
2836 rc
= bfad_iocmd_rport_get_stats(bfad
, iocmd
);
2838 case IOCMD_RPORT_RESET_STATS
:
2839 rc
= bfad_iocmd_rport_clr_stats(bfad
, iocmd
);
2841 case IOCMD_RPORT_SET_SPEED
:
2842 rc
= bfad_iocmd_rport_set_speed(bfad
, iocmd
);
2844 case IOCMD_VPORT_GET_ATTR
:
2845 rc
= bfad_iocmd_vport_get_attr(bfad
, iocmd
);
2847 case IOCMD_VPORT_GET_STATS
:
2848 rc
= bfad_iocmd_vport_get_stats(bfad
, iocmd
);
2850 case IOCMD_VPORT_RESET_STATS
:
2851 rc
= bfad_iocmd_vport_clr_stats(bfad
, iocmd
);
2853 case IOCMD_FABRIC_GET_LPORTS
:
2854 rc
= bfad_iocmd_fabric_get_lports(bfad
, iocmd
, payload_len
);
2856 case IOCMD_RATELIM_ENABLE
:
2857 case IOCMD_RATELIM_DISABLE
:
2858 rc
= bfad_iocmd_ratelim(bfad
, cmd
, iocmd
);
2860 case IOCMD_RATELIM_DEF_SPEED
:
2861 rc
= bfad_iocmd_ratelim_speed(bfad
, cmd
, iocmd
);
2863 case IOCMD_FCPIM_FAILOVER
:
2864 rc
= bfad_iocmd_cfg_fcpim(bfad
, iocmd
);
2866 case IOCMD_FCPIM_MODSTATS
:
2867 rc
= bfad_iocmd_fcpim_get_modstats(bfad
, iocmd
);
2869 case IOCMD_FCPIM_MODSTATSCLR
:
2870 rc
= bfad_iocmd_fcpim_clr_modstats(bfad
, iocmd
);
2872 case IOCMD_FCPIM_DEL_ITN_STATS
:
2873 rc
= bfad_iocmd_fcpim_get_del_itn_stats(bfad
, iocmd
);
2875 case IOCMD_ITNIM_GET_ATTR
:
2876 rc
= bfad_iocmd_itnim_get_attr(bfad
, iocmd
);
2878 case IOCMD_ITNIM_GET_IOSTATS
:
2879 rc
= bfad_iocmd_itnim_get_iostats(bfad
, iocmd
);
2881 case IOCMD_ITNIM_RESET_STATS
:
2882 rc
= bfad_iocmd_itnim_reset_stats(bfad
, iocmd
);
2884 case IOCMD_ITNIM_GET_ITNSTATS
:
2885 rc
= bfad_iocmd_itnim_get_itnstats(bfad
, iocmd
);
2887 case IOCMD_FCPORT_ENABLE
:
2888 rc
= bfad_iocmd_fcport_enable(bfad
, iocmd
);
2890 case IOCMD_FCPORT_DISABLE
:
2891 rc
= bfad_iocmd_fcport_disable(bfad
, iocmd
);
2893 case IOCMD_IOC_PCIFN_CFG
:
2894 rc
= bfad_iocmd_ioc_get_pcifn_cfg(bfad
, iocmd
);
2896 case IOCMD_PCIFN_CREATE
:
2897 rc
= bfad_iocmd_pcifn_create(bfad
, iocmd
);
2899 case IOCMD_PCIFN_DELETE
:
2900 rc
= bfad_iocmd_pcifn_delete(bfad
, iocmd
);
2902 case IOCMD_PCIFN_BW
:
2903 rc
= bfad_iocmd_pcifn_bw(bfad
, iocmd
);
2905 case IOCMD_ADAPTER_CFG_MODE
:
2906 rc
= bfad_iocmd_adapter_cfg_mode(bfad
, iocmd
);
2908 case IOCMD_PORT_CFG_MODE
:
2909 rc
= bfad_iocmd_port_cfg_mode(bfad
, iocmd
);
2911 case IOCMD_FLASH_ENABLE_OPTROM
:
2912 case IOCMD_FLASH_DISABLE_OPTROM
:
2913 rc
= bfad_iocmd_ablk_optrom(bfad
, cmd
, iocmd
);
2915 case IOCMD_FAA_QUERY
:
2916 rc
= bfad_iocmd_faa_query(bfad
, iocmd
);
2918 case IOCMD_CEE_GET_ATTR
:
2919 rc
= bfad_iocmd_cee_attr(bfad
, iocmd
, payload_len
);
2921 case IOCMD_CEE_GET_STATS
:
2922 rc
= bfad_iocmd_cee_get_stats(bfad
, iocmd
, payload_len
);
2924 case IOCMD_CEE_RESET_STATS
:
2925 rc
= bfad_iocmd_cee_reset_stats(bfad
, iocmd
);
2927 case IOCMD_SFP_MEDIA
:
2928 rc
= bfad_iocmd_sfp_media(bfad
, iocmd
);
2930 case IOCMD_SFP_SPEED
:
2931 rc
= bfad_iocmd_sfp_speed(bfad
, iocmd
);
2933 case IOCMD_FLASH_GET_ATTR
:
2934 rc
= bfad_iocmd_flash_get_attr(bfad
, iocmd
);
2936 case IOCMD_FLASH_ERASE_PART
:
2937 rc
= bfad_iocmd_flash_erase_part(bfad
, iocmd
);
2939 case IOCMD_FLASH_UPDATE_PART
:
2940 rc
= bfad_iocmd_flash_update_part(bfad
, iocmd
, payload_len
);
2942 case IOCMD_FLASH_READ_PART
:
2943 rc
= bfad_iocmd_flash_read_part(bfad
, iocmd
, payload_len
);
2945 case IOCMD_DIAG_TEMP
:
2946 rc
= bfad_iocmd_diag_temp(bfad
, iocmd
);
2948 case IOCMD_DIAG_MEMTEST
:
2949 rc
= bfad_iocmd_diag_memtest(bfad
, iocmd
);
2951 case IOCMD_DIAG_LOOPBACK
:
2952 rc
= bfad_iocmd_diag_loopback(bfad
, iocmd
);
2954 case IOCMD_DIAG_FWPING
:
2955 rc
= bfad_iocmd_diag_fwping(bfad
, iocmd
);
2957 case IOCMD_DIAG_QUEUETEST
:
2958 rc
= bfad_iocmd_diag_queuetest(bfad
, iocmd
);
2960 case IOCMD_DIAG_SFP
:
2961 rc
= bfad_iocmd_diag_sfp(bfad
, iocmd
);
2963 case IOCMD_DIAG_LED
:
2964 rc
= bfad_iocmd_diag_led(bfad
, iocmd
);
2966 case IOCMD_DIAG_BEACON_LPORT
:
2967 rc
= bfad_iocmd_diag_beacon_lport(bfad
, iocmd
);
2969 case IOCMD_DIAG_LB_STAT
:
2970 rc
= bfad_iocmd_diag_lb_stat(bfad
, iocmd
);
2972 case IOCMD_DIAG_DPORT_ENABLE
:
2973 rc
= bfad_iocmd_diag_dport_enable(bfad
, iocmd
);
2975 case IOCMD_DIAG_DPORT_DISABLE
:
2976 rc
= bfad_iocmd_diag_dport_disable(bfad
, iocmd
);
2978 case IOCMD_DIAG_DPORT_SHOW
:
2979 rc
= bfad_iocmd_diag_dport_show(bfad
, iocmd
);
2981 case IOCMD_DIAG_DPORT_START
:
2982 rc
= bfad_iocmd_diag_dport_start(bfad
, iocmd
);
2984 case IOCMD_PHY_GET_ATTR
:
2985 rc
= bfad_iocmd_phy_get_attr(bfad
, iocmd
);
2987 case IOCMD_PHY_GET_STATS
:
2988 rc
= bfad_iocmd_phy_get_stats(bfad
, iocmd
);
2990 case IOCMD_PHY_UPDATE_FW
:
2991 rc
= bfad_iocmd_phy_update(bfad
, iocmd
, payload_len
);
2993 case IOCMD_PHY_READ_FW
:
2994 rc
= bfad_iocmd_phy_read(bfad
, iocmd
, payload_len
);
2996 case IOCMD_VHBA_QUERY
:
2997 rc
= bfad_iocmd_vhba_query(bfad
, iocmd
);
2999 case IOCMD_DEBUG_PORTLOG
:
3000 rc
= bfad_iocmd_porglog_get(bfad
, iocmd
);
3002 case IOCMD_DEBUG_FW_CORE
:
3003 rc
= bfad_iocmd_debug_fw_core(bfad
, iocmd
, payload_len
);
3005 case IOCMD_DEBUG_FW_STATE_CLR
:
3006 case IOCMD_DEBUG_PORTLOG_CLR
:
3007 case IOCMD_DEBUG_START_DTRC
:
3008 case IOCMD_DEBUG_STOP_DTRC
:
3009 rc
= bfad_iocmd_debug_ctl(bfad
, iocmd
, cmd
);
3011 case IOCMD_DEBUG_PORTLOG_CTL
:
3012 rc
= bfad_iocmd_porglog_ctl(bfad
, iocmd
);
3014 case IOCMD_FCPIM_PROFILE_ON
:
3015 case IOCMD_FCPIM_PROFILE_OFF
:
3016 rc
= bfad_iocmd_fcpim_cfg_profile(bfad
, iocmd
, cmd
);
3018 case IOCMD_ITNIM_GET_IOPROFILE
:
3019 rc
= bfad_iocmd_itnim_get_ioprofile(bfad
, iocmd
);
3021 case IOCMD_FCPORT_GET_STATS
:
3022 rc
= bfad_iocmd_fcport_get_stats(bfad
, iocmd
);
3024 case IOCMD_FCPORT_RESET_STATS
:
3025 rc
= bfad_iocmd_fcport_reset_stats(bfad
, iocmd
);
3027 case IOCMD_BOOT_CFG
:
3028 rc
= bfad_iocmd_boot_cfg(bfad
, iocmd
);
3030 case IOCMD_BOOT_QUERY
:
3031 rc
= bfad_iocmd_boot_query(bfad
, iocmd
);
3033 case IOCMD_PREBOOT_QUERY
:
3034 rc
= bfad_iocmd_preboot_query(bfad
, iocmd
);
3036 case IOCMD_ETHBOOT_CFG
:
3037 rc
= bfad_iocmd_ethboot_cfg(bfad
, iocmd
);
3039 case IOCMD_ETHBOOT_QUERY
:
3040 rc
= bfad_iocmd_ethboot_query(bfad
, iocmd
);
3042 case IOCMD_TRUNK_ENABLE
:
3043 case IOCMD_TRUNK_DISABLE
:
3044 rc
= bfad_iocmd_cfg_trunk(bfad
, iocmd
, cmd
);
3046 case IOCMD_TRUNK_GET_ATTR
:
3047 rc
= bfad_iocmd_trunk_get_attr(bfad
, iocmd
);
3049 case IOCMD_QOS_ENABLE
:
3050 case IOCMD_QOS_DISABLE
:
3051 rc
= bfad_iocmd_qos(bfad
, iocmd
, cmd
);
3053 case IOCMD_QOS_GET_ATTR
:
3054 rc
= bfad_iocmd_qos_get_attr(bfad
, iocmd
);
3056 case IOCMD_QOS_GET_VC_ATTR
:
3057 rc
= bfad_iocmd_qos_get_vc_attr(bfad
, iocmd
);
3059 case IOCMD_QOS_GET_STATS
:
3060 rc
= bfad_iocmd_qos_get_stats(bfad
, iocmd
);
3062 case IOCMD_QOS_RESET_STATS
:
3063 rc
= bfad_iocmd_qos_reset_stats(bfad
, iocmd
);
3065 case IOCMD_QOS_SET_BW
:
3066 rc
= bfad_iocmd_qos_set_bw(bfad
, iocmd
);
3068 case IOCMD_VF_GET_STATS
:
3069 rc
= bfad_iocmd_vf_get_stats(bfad
, iocmd
);
3071 case IOCMD_VF_RESET_STATS
:
3072 rc
= bfad_iocmd_vf_clr_stats(bfad
, iocmd
);
3074 case IOCMD_FCPIM_LUNMASK_ENABLE
:
3075 case IOCMD_FCPIM_LUNMASK_DISABLE
:
3076 case IOCMD_FCPIM_LUNMASK_CLEAR
:
3077 rc
= bfad_iocmd_lunmask(bfad
, iocmd
, cmd
);
3079 case IOCMD_FCPIM_LUNMASK_QUERY
:
3080 rc
= bfad_iocmd_fcpim_lunmask_query(bfad
, iocmd
);
3082 case IOCMD_FCPIM_LUNMASK_ADD
:
3083 case IOCMD_FCPIM_LUNMASK_DELETE
:
3084 rc
= bfad_iocmd_fcpim_cfg_lunmask(bfad
, iocmd
, cmd
);
3086 case IOCMD_FCPIM_THROTTLE_QUERY
:
3087 rc
= bfad_iocmd_fcpim_throttle_query(bfad
, iocmd
);
3089 case IOCMD_FCPIM_THROTTLE_SET
:
3090 rc
= bfad_iocmd_fcpim_throttle_set(bfad
, iocmd
);
3093 case IOCMD_TFRU_READ
:
3094 rc
= bfad_iocmd_tfru_read(bfad
, iocmd
);
3096 case IOCMD_TFRU_WRITE
:
3097 rc
= bfad_iocmd_tfru_write(bfad
, iocmd
);
3100 case IOCMD_FRUVPD_READ
:
3101 rc
= bfad_iocmd_fruvpd_read(bfad
, iocmd
);
3103 case IOCMD_FRUVPD_UPDATE
:
3104 rc
= bfad_iocmd_fruvpd_update(bfad
, iocmd
);
3106 case IOCMD_FRUVPD_GET_MAX_SIZE
:
3107 rc
= bfad_iocmd_fruvpd_get_max_size(bfad
, iocmd
);
3117 bfad_im_bsg_vendor_request(struct fc_bsg_job
*job
)
3119 uint32_t vendor_cmd
= job
->request
->rqst_data
.h_vendor
.vendor_cmd
[0];
3120 struct bfad_im_port_s
*im_port
=
3121 (struct bfad_im_port_s
*) job
->shost
->hostdata
[0];
3122 struct bfad_s
*bfad
= im_port
->bfad
;
3123 struct request_queue
*request_q
= job
->req
->q
;
3128 * Set the BSG device request_queue size to 256 to support
3129 * payloads larger than 512*1024K bytes.
3131 blk_queue_max_segments(request_q
, 256);
3133 /* Allocate a temp buffer to hold the passed in user space command */
3134 payload_kbuf
= kzalloc(job
->request_payload
.payload_len
, GFP_KERNEL
);
3135 if (!payload_kbuf
) {
3140 /* Copy the sg_list passed in to a linear buffer: holds the cmnd data */
3141 sg_copy_to_buffer(job
->request_payload
.sg_list
,
3142 job
->request_payload
.sg_cnt
, payload_kbuf
,
3143 job
->request_payload
.payload_len
);
3145 /* Invoke IOCMD handler - to handle all the vendor command requests */
3146 rc
= bfad_iocmd_handler(bfad
, vendor_cmd
, payload_kbuf
,
3147 job
->request_payload
.payload_len
);
3148 if (rc
!= BFA_STATUS_OK
)
3151 /* Copy the response data to the job->reply_payload sg_list */
3152 sg_copy_from_buffer(job
->reply_payload
.sg_list
,
3153 job
->reply_payload
.sg_cnt
,
3155 job
->reply_payload
.payload_len
);
3157 /* free the command buffer */
3158 kfree(payload_kbuf
);
3160 /* Fill the BSG job reply data */
3161 job
->reply_len
= job
->reply_payload
.payload_len
;
3162 job
->reply
->reply_payload_rcv_len
= job
->reply_payload
.payload_len
;
3163 job
->reply
->result
= rc
;
3168 /* free the command buffer */
3169 kfree(payload_kbuf
);
3171 job
->reply
->result
= rc
;
3172 job
->reply_len
= sizeof(uint32_t);
3173 job
->reply
->reply_payload_rcv_len
= 0;
3177 /* FC passthru call backs */
3179 bfad_fcxp_get_req_sgaddr_cb(void *bfad_fcxp
, int sgeid
)
3181 struct bfad_fcxp
*drv_fcxp
= bfad_fcxp
;
3182 struct bfa_sge_s
*sge
;
3185 sge
= drv_fcxp
->req_sge
+ sgeid
;
3186 addr
= (u64
)(size_t) sge
->sg_addr
;
3191 bfad_fcxp_get_req_sglen_cb(void *bfad_fcxp
, int sgeid
)
3193 struct bfad_fcxp
*drv_fcxp
= bfad_fcxp
;
3194 struct bfa_sge_s
*sge
;
3196 sge
= drv_fcxp
->req_sge
+ sgeid
;
3201 bfad_fcxp_get_rsp_sgaddr_cb(void *bfad_fcxp
, int sgeid
)
3203 struct bfad_fcxp
*drv_fcxp
= bfad_fcxp
;
3204 struct bfa_sge_s
*sge
;
3207 sge
= drv_fcxp
->rsp_sge
+ sgeid
;
3208 addr
= (u64
)(size_t) sge
->sg_addr
;
3213 bfad_fcxp_get_rsp_sglen_cb(void *bfad_fcxp
, int sgeid
)
3215 struct bfad_fcxp
*drv_fcxp
= bfad_fcxp
;
3216 struct bfa_sge_s
*sge
;
3218 sge
= drv_fcxp
->rsp_sge
+ sgeid
;
3223 bfad_send_fcpt_cb(void *bfad_fcxp
, struct bfa_fcxp_s
*fcxp
, void *cbarg
,
3224 bfa_status_t req_status
, u32 rsp_len
, u32 resid_len
,
3225 struct fchs_s
*rsp_fchs
)
3227 struct bfad_fcxp
*drv_fcxp
= bfad_fcxp
;
3229 drv_fcxp
->req_status
= req_status
;
3230 drv_fcxp
->rsp_len
= rsp_len
;
3232 /* bfa_fcxp will be automatically freed by BFA */
3233 drv_fcxp
->bfa_fcxp
= NULL
;
3234 complete(&drv_fcxp
->comp
);
3237 struct bfad_buf_info
*
3238 bfad_fcxp_map_sg(struct bfad_s
*bfad
, void *payload_kbuf
,
3239 uint32_t payload_len
, uint32_t *num_sgles
)
3241 struct bfad_buf_info
*buf_base
, *buf_info
;
3242 struct bfa_sge_s
*sg_table
;
3245 buf_base
= kzalloc((sizeof(struct bfad_buf_info
) +
3246 sizeof(struct bfa_sge_s
)) * sge_num
, GFP_KERNEL
);
3250 sg_table
= (struct bfa_sge_s
*) (((uint8_t *)buf_base
) +
3251 (sizeof(struct bfad_buf_info
) * sge_num
));
3253 /* Allocate dma coherent memory */
3254 buf_info
= buf_base
;
3255 buf_info
->size
= payload_len
;
3256 buf_info
->virt
= dma_alloc_coherent(&bfad
->pcidev
->dev
, buf_info
->size
,
3257 &buf_info
->phys
, GFP_KERNEL
);
3258 if (!buf_info
->virt
)
3261 /* copy the linear bsg buffer to buf_info */
3262 memset(buf_info
->virt
, 0, buf_info
->size
);
3263 memcpy(buf_info
->virt
, payload_kbuf
, buf_info
->size
);
3268 sg_table
->sg_len
= buf_info
->size
;
3269 sg_table
->sg_addr
= (void *)(size_t) buf_info
->phys
;
3271 *num_sgles
= sge_num
;
3281 bfad_fcxp_free_mem(struct bfad_s
*bfad
, struct bfad_buf_info
*buf_base
,
3285 struct bfad_buf_info
*buf_info
= buf_base
;
3288 for (i
= 0; i
< num_sgles
; buf_info
++, i
++) {
3289 if (buf_info
->virt
!= NULL
)
3290 dma_free_coherent(&bfad
->pcidev
->dev
,
3291 buf_info
->size
, buf_info
->virt
,
3299 bfad_fcxp_bsg_send(struct fc_bsg_job
*job
, struct bfad_fcxp
*drv_fcxp
,
3300 bfa_bsg_fcpt_t
*bsg_fcpt
)
3302 struct bfa_fcxp_s
*hal_fcxp
;
3303 struct bfad_s
*bfad
= drv_fcxp
->port
->bfad
;
3304 unsigned long flags
;
3307 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
3309 /* Allocate bfa_fcxp structure */
3310 hal_fcxp
= bfa_fcxp_req_rsp_alloc(drv_fcxp
, &bfad
->bfa
,
3311 drv_fcxp
->num_req_sgles
,
3312 drv_fcxp
->num_rsp_sgles
,
3313 bfad_fcxp_get_req_sgaddr_cb
,
3314 bfad_fcxp_get_req_sglen_cb
,
3315 bfad_fcxp_get_rsp_sgaddr_cb
,
3316 bfad_fcxp_get_rsp_sglen_cb
, BFA_TRUE
);
3319 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
3320 return BFA_STATUS_ENOMEM
;
3323 drv_fcxp
->bfa_fcxp
= hal_fcxp
;
3325 lp_tag
= bfa_lps_get_tag_from_pid(&bfad
->bfa
, bsg_fcpt
->fchs
.s_id
);
3327 bfa_fcxp_send(hal_fcxp
, drv_fcxp
->bfa_rport
, bsg_fcpt
->vf_id
, lp_tag
,
3328 bsg_fcpt
->cts
, bsg_fcpt
->cos
,
3329 job
->request_payload
.payload_len
,
3330 &bsg_fcpt
->fchs
, bfad_send_fcpt_cb
, bfad
,
3331 job
->reply_payload
.payload_len
, bsg_fcpt
->tsecs
);
3333 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
3335 return BFA_STATUS_OK
;
3339 bfad_im_bsg_els_ct_request(struct fc_bsg_job
*job
)
3341 struct bfa_bsg_data
*bsg_data
;
3342 struct bfad_im_port_s
*im_port
=
3343 (struct bfad_im_port_s
*) job
->shost
->hostdata
[0];
3344 struct bfad_s
*bfad
= im_port
->bfad
;
3345 bfa_bsg_fcpt_t
*bsg_fcpt
;
3346 struct bfad_fcxp
*drv_fcxp
;
3347 struct bfa_fcs_lport_s
*fcs_port
;
3348 struct bfa_fcs_rport_s
*fcs_rport
;
3349 uint32_t command_type
= job
->request
->msgcode
;
3350 unsigned long flags
;
3351 struct bfad_buf_info
*rsp_buf_info
;
3352 void *req_kbuf
= NULL
, *rsp_kbuf
= NULL
;
3355 job
->reply_len
= sizeof(uint32_t); /* Atleast uint32_t reply_len */
3356 job
->reply
->reply_payload_rcv_len
= 0;
3358 /* Get the payload passed in from userspace */
3359 bsg_data
= (struct bfa_bsg_data
*) (((char *)job
->request
) +
3360 sizeof(struct fc_bsg_request
));
3361 if (bsg_data
== NULL
)
3365 * Allocate buffer for bsg_fcpt and do a copy_from_user op for payload
3366 * buffer of size bsg_data->payload_len
3368 bsg_fcpt
= kzalloc(bsg_data
->payload_len
, GFP_KERNEL
);
3374 if (copy_from_user((uint8_t *)bsg_fcpt
,
3375 (void *)(unsigned long)bsg_data
->payload
,
3376 bsg_data
->payload_len
)) {
3382 drv_fcxp
= kzalloc(sizeof(struct bfad_fcxp
), GFP_KERNEL
);
3383 if (drv_fcxp
== NULL
) {
3389 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
3390 fcs_port
= bfa_fcs_lookup_port(&bfad
->bfa_fcs
, bsg_fcpt
->vf_id
,
3392 if (fcs_port
== NULL
) {
3393 bsg_fcpt
->status
= BFA_STATUS_UNKNOWN_LWWN
;
3394 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
3398 /* Check if the port is online before sending FC Passthru cmd */
3399 if (!bfa_fcs_lport_is_online(fcs_port
)) {
3400 bsg_fcpt
->status
= BFA_STATUS_PORT_OFFLINE
;
3401 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
3405 drv_fcxp
->port
= fcs_port
->bfad_port
;
3407 if (drv_fcxp
->port
->bfad
== 0)
3408 drv_fcxp
->port
->bfad
= bfad
;
3410 /* Fetch the bfa_rport - if nexus needed */
3411 if (command_type
== FC_BSG_HST_ELS_NOLOGIN
||
3412 command_type
== FC_BSG_HST_CT
) {
3413 /* BSG HST commands: no nexus needed */
3414 drv_fcxp
->bfa_rport
= NULL
;
3416 } else if (command_type
== FC_BSG_RPT_ELS
||
3417 command_type
== FC_BSG_RPT_CT
) {
3418 /* BSG RPT commands: nexus needed */
3419 fcs_rport
= bfa_fcs_lport_get_rport_by_pwwn(fcs_port
,
3421 if (fcs_rport
== NULL
) {
3422 bsg_fcpt
->status
= BFA_STATUS_UNKNOWN_RWWN
;
3423 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
3427 drv_fcxp
->bfa_rport
= fcs_rport
->bfa_rport
;
3429 } else { /* Unknown BSG msgcode; return -EINVAL */
3430 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
3434 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
3436 /* allocate memory for req / rsp buffers */
3437 req_kbuf
= kzalloc(job
->request_payload
.payload_len
, GFP_KERNEL
);
3439 printk(KERN_INFO
"bfa %s: fcpt request buffer alloc failed\n",
3445 rsp_kbuf
= kzalloc(job
->reply_payload
.payload_len
, GFP_KERNEL
);
3447 printk(KERN_INFO
"bfa %s: fcpt response buffer alloc failed\n",
3453 /* map req sg - copy the sg_list passed in to the linear buffer */
3454 sg_copy_to_buffer(job
->request_payload
.sg_list
,
3455 job
->request_payload
.sg_cnt
, req_kbuf
,
3456 job
->request_payload
.payload_len
);
3458 drv_fcxp
->reqbuf_info
= bfad_fcxp_map_sg(bfad
, req_kbuf
,
3459 job
->request_payload
.payload_len
,
3460 &drv_fcxp
->num_req_sgles
);
3461 if (!drv_fcxp
->reqbuf_info
) {
3462 printk(KERN_INFO
"bfa %s: fcpt request fcxp_map_sg failed\n",
3468 drv_fcxp
->req_sge
= (struct bfa_sge_s
*)
3469 (((uint8_t *)drv_fcxp
->reqbuf_info
) +
3470 (sizeof(struct bfad_buf_info
) *
3471 drv_fcxp
->num_req_sgles
));
3474 drv_fcxp
->rspbuf_info
= bfad_fcxp_map_sg(bfad
, rsp_kbuf
,
3475 job
->reply_payload
.payload_len
,
3476 &drv_fcxp
->num_rsp_sgles
);
3477 if (!drv_fcxp
->rspbuf_info
) {
3478 printk(KERN_INFO
"bfa %s: fcpt response fcxp_map_sg failed\n",
3484 rsp_buf_info
= (struct bfad_buf_info
*)drv_fcxp
->rspbuf_info
;
3485 drv_fcxp
->rsp_sge
= (struct bfa_sge_s
*)
3486 (((uint8_t *)drv_fcxp
->rspbuf_info
) +
3487 (sizeof(struct bfad_buf_info
) *
3488 drv_fcxp
->num_rsp_sgles
));
3491 init_completion(&drv_fcxp
->comp
);
3492 rc
= bfad_fcxp_bsg_send(job
, drv_fcxp
, bsg_fcpt
);
3493 if (rc
== BFA_STATUS_OK
) {
3494 wait_for_completion(&drv_fcxp
->comp
);
3495 bsg_fcpt
->status
= drv_fcxp
->req_status
;
3497 bsg_fcpt
->status
= rc
;
3501 /* fill the job->reply data */
3502 if (drv_fcxp
->req_status
== BFA_STATUS_OK
) {
3503 job
->reply_len
= drv_fcxp
->rsp_len
;
3504 job
->reply
->reply_payload_rcv_len
= drv_fcxp
->rsp_len
;
3505 job
->reply
->reply_data
.ctels_reply
.status
= FC_CTELS_STATUS_OK
;
3507 job
->reply
->reply_payload_rcv_len
=
3508 sizeof(struct fc_bsg_ctels_reply
);
3509 job
->reply_len
= sizeof(uint32_t);
3510 job
->reply
->reply_data
.ctels_reply
.status
=
3511 FC_CTELS_STATUS_REJECT
;
3514 /* Copy the response data to the reply_payload sg list */
3515 sg_copy_from_buffer(job
->reply_payload
.sg_list
,
3516 job
->reply_payload
.sg_cnt
,
3517 (uint8_t *)rsp_buf_info
->virt
,
3518 job
->reply_payload
.payload_len
);
3521 bfad_fcxp_free_mem(bfad
, drv_fcxp
->rspbuf_info
,
3522 drv_fcxp
->num_rsp_sgles
);
3523 bfad_fcxp_free_mem(bfad
, drv_fcxp
->reqbuf_info
,
3524 drv_fcxp
->num_req_sgles
);
3528 /* Need a copy to user op */
3529 if (copy_to_user((void *)(unsigned long)bsg_data
->payload
,
3530 (void *)bsg_fcpt
, bsg_data
->payload_len
))
3536 job
->reply
->result
= rc
;
3538 if (rc
== BFA_STATUS_OK
)
3545 bfad_im_bsg_request(struct fc_bsg_job
*job
)
3547 uint32_t rc
= BFA_STATUS_OK
;
3549 switch (job
->request
->msgcode
) {
3550 case FC_BSG_HST_VENDOR
:
3551 /* Process BSG HST Vendor requests */
3552 rc
= bfad_im_bsg_vendor_request(job
);
3554 case FC_BSG_HST_ELS_NOLOGIN
:
3555 case FC_BSG_RPT_ELS
:
3558 /* Process BSG ELS/CT commands */
3559 rc
= bfad_im_bsg_els_ct_request(job
);
3562 job
->reply
->result
= rc
= -EINVAL
;
3563 job
->reply
->reply_payload_rcv_len
= 0;
3571 bfad_im_bsg_timeout(struct fc_bsg_job
*job
)
3573 /* Don't complete the BSG job request - return -EAGAIN
3574 * to reset bsg job timeout : for ELS/CT pass thru we
3575 * already have timer to track the request.