1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
4 * Copyright (c) 2014- QLogic Corporation.
8 * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter.
11 #include <linux/uaccess.h>
16 BFA_TRC_FILE(LDRV
, BSG
);
19 bfad_iocmd_ioc_enable(struct bfad_s
*bfad
, void *cmd
)
21 struct bfa_bsg_gen_s
*iocmd
= (struct bfa_bsg_gen_s
*)cmd
;
24 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
25 /* If IOC is not in disabled state - return */
26 if (!bfa_ioc_is_disabled(&bfad
->bfa
.ioc
)) {
27 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
28 iocmd
->status
= BFA_STATUS_OK
;
32 init_completion(&bfad
->enable_comp
);
33 bfa_iocfc_enable(&bfad
->bfa
);
34 iocmd
->status
= BFA_STATUS_OK
;
35 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
36 wait_for_completion(&bfad
->enable_comp
);
42 bfad_iocmd_ioc_disable(struct bfad_s
*bfad
, void *cmd
)
44 struct bfa_bsg_gen_s
*iocmd
= (struct bfa_bsg_gen_s
*)cmd
;
47 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
48 if (bfa_ioc_is_disabled(&bfad
->bfa
.ioc
)) {
49 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
50 iocmd
->status
= BFA_STATUS_OK
;
54 if (bfad
->disable_active
) {
55 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
59 bfad
->disable_active
= BFA_TRUE
;
60 init_completion(&bfad
->disable_comp
);
61 bfa_iocfc_disable(&bfad
->bfa
);
62 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
64 wait_for_completion(&bfad
->disable_comp
);
65 bfad
->disable_active
= BFA_FALSE
;
66 iocmd
->status
= BFA_STATUS_OK
;
72 bfad_iocmd_ioc_get_info(struct bfad_s
*bfad
, void *cmd
)
75 struct bfa_bsg_ioc_info_s
*iocmd
= (struct bfa_bsg_ioc_info_s
*)cmd
;
76 struct bfad_im_port_s
*im_port
;
77 struct bfa_port_attr_s pattr
;
80 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
81 bfa_fcport_get_attr(&bfad
->bfa
, &pattr
);
82 iocmd
->nwwn
= pattr
.nwwn
;
83 iocmd
->pwwn
= pattr
.pwwn
;
84 iocmd
->ioc_type
= bfa_get_type(&bfad
->bfa
);
85 iocmd
->mac
= bfa_get_mac(&bfad
->bfa
);
86 iocmd
->factory_mac
= bfa_get_mfg_mac(&bfad
->bfa
);
87 bfa_get_adapter_serial_num(&bfad
->bfa
, iocmd
->serialnum
);
88 iocmd
->factorynwwn
= pattr
.factorynwwn
;
89 iocmd
->factorypwwn
= pattr
.factorypwwn
;
90 iocmd
->bfad_num
= bfad
->inst_no
;
91 im_port
= bfad
->pport
.im_port
;
92 iocmd
->host
= im_port
->shost
->host_no
;
93 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
95 strcpy(iocmd
->name
, bfad
->adapter_name
);
96 strcpy(iocmd
->port_name
, bfad
->port_name
);
97 strcpy(iocmd
->hwpath
, bfad
->pci_name
);
99 /* set adapter hw path */
100 strcpy(iocmd
->adapter_hwpath
, bfad
->pci_name
);
101 for (i
= 0; iocmd
->adapter_hwpath
[i
] != ':' && i
< BFA_STRING_32
; i
++)
103 for (; iocmd
->adapter_hwpath
[++i
] != ':' && i
< BFA_STRING_32
; )
105 iocmd
->adapter_hwpath
[i
] = '\0';
106 iocmd
->status
= BFA_STATUS_OK
;
111 bfad_iocmd_ioc_get_attr(struct bfad_s
*bfad
, void *cmd
)
113 struct bfa_bsg_ioc_attr_s
*iocmd
= (struct bfa_bsg_ioc_attr_s
*)cmd
;
116 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
117 bfa_ioc_get_attr(&bfad
->bfa
.ioc
, &iocmd
->ioc_attr
);
118 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
120 /* fill in driver attr info */
121 strcpy(iocmd
->ioc_attr
.driver_attr
.driver
, BFAD_DRIVER_NAME
);
122 strlcpy(iocmd
->ioc_attr
.driver_attr
.driver_ver
,
123 BFAD_DRIVER_VERSION
, BFA_VERSION_LEN
);
124 strcpy(iocmd
->ioc_attr
.driver_attr
.fw_ver
,
125 iocmd
->ioc_attr
.adapter_attr
.fw_ver
);
126 strcpy(iocmd
->ioc_attr
.driver_attr
.bios_ver
,
127 iocmd
->ioc_attr
.adapter_attr
.optrom_ver
);
129 /* copy chip rev info first otherwise it will be overwritten */
130 memcpy(bfad
->pci_attr
.chip_rev
, iocmd
->ioc_attr
.pci_attr
.chip_rev
,
131 sizeof(bfad
->pci_attr
.chip_rev
));
132 memcpy(&iocmd
->ioc_attr
.pci_attr
, &bfad
->pci_attr
,
133 sizeof(struct bfa_ioc_pci_attr_s
));
135 iocmd
->status
= BFA_STATUS_OK
;
140 bfad_iocmd_ioc_get_stats(struct bfad_s
*bfad
, void *cmd
)
142 struct bfa_bsg_ioc_stats_s
*iocmd
= (struct bfa_bsg_ioc_stats_s
*)cmd
;
144 bfa_ioc_get_stats(&bfad
->bfa
, &iocmd
->ioc_stats
);
145 iocmd
->status
= BFA_STATUS_OK
;
150 bfad_iocmd_ioc_get_fwstats(struct bfad_s
*bfad
, void *cmd
,
151 unsigned int payload_len
)
153 struct bfa_bsg_ioc_fwstats_s
*iocmd
=
154 (struct bfa_bsg_ioc_fwstats_s
*)cmd
;
158 if (bfad_chk_iocmd_sz(payload_len
,
159 sizeof(struct bfa_bsg_ioc_fwstats_s
),
160 sizeof(struct bfa_fw_stats_s
)) != BFA_STATUS_OK
) {
161 iocmd
->status
= BFA_STATUS_VERSION_FAIL
;
165 iocmd_bufptr
= (char *)iocmd
+ sizeof(struct bfa_bsg_ioc_fwstats_s
);
166 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
167 iocmd
->status
= bfa_ioc_fw_stats_get(&bfad
->bfa
.ioc
, iocmd_bufptr
);
168 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
170 if (iocmd
->status
!= BFA_STATUS_OK
) {
171 bfa_trc(bfad
, iocmd
->status
);
175 bfa_trc(bfad
, 0x6666);
180 bfad_iocmd_ioc_reset_stats(struct bfad_s
*bfad
, void *cmd
, unsigned int v_cmd
)
182 struct bfa_bsg_gen_s
*iocmd
= (struct bfa_bsg_gen_s
*)cmd
;
185 if (v_cmd
== IOCMD_IOC_RESET_STATS
) {
186 bfa_ioc_clear_stats(&bfad
->bfa
);
187 iocmd
->status
= BFA_STATUS_OK
;
188 } else if (v_cmd
== IOCMD_IOC_RESET_FWSTATS
) {
189 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
190 iocmd
->status
= bfa_ioc_fw_stats_clear(&bfad
->bfa
.ioc
);
191 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
198 bfad_iocmd_ioc_set_name(struct bfad_s
*bfad
, void *cmd
, unsigned int v_cmd
)
200 struct bfa_bsg_ioc_name_s
*iocmd
= (struct bfa_bsg_ioc_name_s
*) cmd
;
202 if (v_cmd
== IOCMD_IOC_SET_ADAPTER_NAME
)
203 strcpy(bfad
->adapter_name
, iocmd
->name
);
204 else if (v_cmd
== IOCMD_IOC_SET_PORT_NAME
)
205 strcpy(bfad
->port_name
, iocmd
->name
);
207 iocmd
->status
= BFA_STATUS_OK
;
212 bfad_iocmd_iocfc_get_attr(struct bfad_s
*bfad
, void *cmd
)
214 struct bfa_bsg_iocfc_attr_s
*iocmd
= (struct bfa_bsg_iocfc_attr_s
*)cmd
;
216 iocmd
->status
= BFA_STATUS_OK
;
217 bfa_iocfc_get_attr(&bfad
->bfa
, &iocmd
->iocfc_attr
);
223 bfad_iocmd_ioc_fw_sig_inv(struct bfad_s
*bfad
, void *cmd
)
225 struct bfa_bsg_gen_s
*iocmd
= (struct bfa_bsg_gen_s
*)cmd
;
228 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
229 iocmd
->status
= bfa_ioc_fwsig_invalidate(&bfad
->bfa
.ioc
);
230 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
235 bfad_iocmd_iocfc_set_intr(struct bfad_s
*bfad
, void *cmd
)
237 struct bfa_bsg_iocfc_intr_s
*iocmd
= (struct bfa_bsg_iocfc_intr_s
*)cmd
;
240 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
241 iocmd
->status
= bfa_iocfc_israttr_set(&bfad
->bfa
, &iocmd
->attr
);
242 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
248 bfad_iocmd_port_enable(struct bfad_s
*bfad
, void *cmd
)
250 struct bfa_bsg_gen_s
*iocmd
= (struct bfa_bsg_gen_s
*)cmd
;
251 struct bfad_hal_comp fcomp
;
254 init_completion(&fcomp
.comp
);
255 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
256 iocmd
->status
= bfa_port_enable(&bfad
->bfa
.modules
.port
,
257 bfad_hcb_comp
, &fcomp
);
258 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
259 if (iocmd
->status
!= BFA_STATUS_OK
) {
260 bfa_trc(bfad
, iocmd
->status
);
263 wait_for_completion(&fcomp
.comp
);
264 iocmd
->status
= fcomp
.status
;
269 bfad_iocmd_port_disable(struct bfad_s
*bfad
, void *cmd
)
271 struct bfa_bsg_gen_s
*iocmd
= (struct bfa_bsg_gen_s
*)cmd
;
272 struct bfad_hal_comp fcomp
;
275 init_completion(&fcomp
.comp
);
276 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
277 iocmd
->status
= bfa_port_disable(&bfad
->bfa
.modules
.port
,
278 bfad_hcb_comp
, &fcomp
);
279 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
281 if (iocmd
->status
!= BFA_STATUS_OK
) {
282 bfa_trc(bfad
, iocmd
->status
);
285 wait_for_completion(&fcomp
.comp
);
286 iocmd
->status
= fcomp
.status
;
291 bfad_iocmd_port_get_attr(struct bfad_s
*bfad
, void *cmd
)
293 struct bfa_bsg_port_attr_s
*iocmd
= (struct bfa_bsg_port_attr_s
*)cmd
;
294 struct bfa_lport_attr_s port_attr
;
297 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
298 bfa_fcport_get_attr(&bfad
->bfa
, &iocmd
->attr
);
299 bfa_fcs_lport_get_attr(&bfad
->bfa_fcs
.fabric
.bport
, &port_attr
);
300 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
302 if (iocmd
->attr
.topology
!= BFA_PORT_TOPOLOGY_NONE
)
303 iocmd
->attr
.pid
= port_attr
.pid
;
307 iocmd
->attr
.port_type
= port_attr
.port_type
;
308 iocmd
->attr
.loopback
= port_attr
.loopback
;
309 iocmd
->attr
.authfail
= port_attr
.authfail
;
310 strlcpy(iocmd
->attr
.port_symname
.symname
,
311 port_attr
.port_cfg
.sym_name
.symname
,
312 sizeof(iocmd
->attr
.port_symname
.symname
));
314 iocmd
->status
= BFA_STATUS_OK
;
319 bfad_iocmd_port_get_stats(struct bfad_s
*bfad
, void *cmd
,
320 unsigned int payload_len
)
322 struct bfa_bsg_port_stats_s
*iocmd
= (struct bfa_bsg_port_stats_s
*)cmd
;
323 struct bfad_hal_comp fcomp
;
327 if (bfad_chk_iocmd_sz(payload_len
,
328 sizeof(struct bfa_bsg_port_stats_s
),
329 sizeof(union bfa_port_stats_u
)) != BFA_STATUS_OK
) {
330 iocmd
->status
= BFA_STATUS_VERSION_FAIL
;
334 iocmd_bufptr
= (char *)iocmd
+ sizeof(struct bfa_bsg_port_stats_s
);
336 init_completion(&fcomp
.comp
);
337 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
338 iocmd
->status
= bfa_port_get_stats(&bfad
->bfa
.modules
.port
,
339 iocmd_bufptr
, bfad_hcb_comp
, &fcomp
);
340 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
341 if (iocmd
->status
!= BFA_STATUS_OK
) {
342 bfa_trc(bfad
, iocmd
->status
);
346 wait_for_completion(&fcomp
.comp
);
347 iocmd
->status
= fcomp
.status
;
353 bfad_iocmd_port_reset_stats(struct bfad_s
*bfad
, void *cmd
)
355 struct bfa_bsg_gen_s
*iocmd
= (struct bfa_bsg_gen_s
*)cmd
;
356 struct bfad_hal_comp fcomp
;
359 init_completion(&fcomp
.comp
);
360 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
361 iocmd
->status
= bfa_port_clear_stats(&bfad
->bfa
.modules
.port
,
362 bfad_hcb_comp
, &fcomp
);
363 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
364 if (iocmd
->status
!= BFA_STATUS_OK
) {
365 bfa_trc(bfad
, iocmd
->status
);
368 wait_for_completion(&fcomp
.comp
);
369 iocmd
->status
= fcomp
.status
;
374 bfad_iocmd_set_port_cfg(struct bfad_s
*bfad
, void *iocmd
, unsigned int v_cmd
)
376 struct bfa_bsg_port_cfg_s
*cmd
= (struct bfa_bsg_port_cfg_s
*)iocmd
;
379 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
380 if (v_cmd
== IOCMD_PORT_CFG_TOPO
)
381 cmd
->status
= bfa_fcport_cfg_topology(&bfad
->bfa
, cmd
->param
);
382 else if (v_cmd
== IOCMD_PORT_CFG_SPEED
)
383 cmd
->status
= bfa_fcport_cfg_speed(&bfad
->bfa
, cmd
->param
);
384 else if (v_cmd
== IOCMD_PORT_CFG_ALPA
)
385 cmd
->status
= bfa_fcport_cfg_hardalpa(&bfad
->bfa
, cmd
->param
);
386 else if (v_cmd
== IOCMD_PORT_CLR_ALPA
)
387 cmd
->status
= bfa_fcport_clr_hardalpa(&bfad
->bfa
);
388 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
394 bfad_iocmd_port_cfg_maxfrsize(struct bfad_s
*bfad
, void *cmd
)
396 struct bfa_bsg_port_cfg_maxfrsize_s
*iocmd
=
397 (struct bfa_bsg_port_cfg_maxfrsize_s
*)cmd
;
400 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
401 iocmd
->status
= bfa_fcport_cfg_maxfrsize(&bfad
->bfa
, iocmd
->maxfrsize
);
402 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
408 bfad_iocmd_port_cfg_bbcr(struct bfad_s
*bfad
, unsigned int cmd
, void *pcmd
)
410 struct bfa_bsg_bbcr_enable_s
*iocmd
=
411 (struct bfa_bsg_bbcr_enable_s
*)pcmd
;
415 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
416 if (cmd
== IOCMD_PORT_BBCR_ENABLE
)
417 rc
= bfa_fcport_cfg_bbcr(&bfad
->bfa
, BFA_TRUE
, iocmd
->bb_scn
);
418 else if (cmd
== IOCMD_PORT_BBCR_DISABLE
)
419 rc
= bfa_fcport_cfg_bbcr(&bfad
->bfa
, BFA_FALSE
, 0);
421 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
424 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
431 bfad_iocmd_port_get_bbcr_attr(struct bfad_s
*bfad
, void *pcmd
)
433 struct bfa_bsg_bbcr_attr_s
*iocmd
= (struct bfa_bsg_bbcr_attr_s
*) pcmd
;
436 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
438 bfa_fcport_get_bbcr_attr(&bfad
->bfa
, &iocmd
->attr
);
439 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
446 bfad_iocmd_lport_get_attr(struct bfad_s
*bfad
, void *cmd
)
448 struct bfa_fcs_lport_s
*fcs_port
;
449 struct bfa_bsg_lport_attr_s
*iocmd
= (struct bfa_bsg_lport_attr_s
*)cmd
;
452 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
453 fcs_port
= bfa_fcs_lookup_port(&bfad
->bfa_fcs
,
454 iocmd
->vf_id
, iocmd
->pwwn
);
455 if (fcs_port
== NULL
) {
456 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
457 iocmd
->status
= BFA_STATUS_UNKNOWN_LWWN
;
461 bfa_fcs_lport_get_attr(fcs_port
, &iocmd
->port_attr
);
462 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
463 iocmd
->status
= BFA_STATUS_OK
;
469 bfad_iocmd_lport_get_stats(struct bfad_s
*bfad
, void *cmd
)
471 struct bfa_fcs_lport_s
*fcs_port
;
472 struct bfa_bsg_lport_stats_s
*iocmd
=
473 (struct bfa_bsg_lport_stats_s
*)cmd
;
476 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
477 fcs_port
= bfa_fcs_lookup_port(&bfad
->bfa_fcs
,
478 iocmd
->vf_id
, iocmd
->pwwn
);
479 if (fcs_port
== NULL
) {
480 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
481 iocmd
->status
= BFA_STATUS_UNKNOWN_LWWN
;
485 bfa_fcs_lport_get_stats(fcs_port
, &iocmd
->port_stats
);
486 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
487 iocmd
->status
= BFA_STATUS_OK
;
493 bfad_iocmd_lport_reset_stats(struct bfad_s
*bfad
, void *cmd
)
495 struct bfa_fcs_lport_s
*fcs_port
;
496 struct bfa_bsg_reset_stats_s
*iocmd
=
497 (struct bfa_bsg_reset_stats_s
*)cmd
;
498 struct bfa_fcpim_s
*fcpim
= BFA_FCPIM(&bfad
->bfa
);
499 struct list_head
*qe
, *qen
;
500 struct bfa_itnim_s
*itnim
;
503 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
504 fcs_port
= bfa_fcs_lookup_port(&bfad
->bfa_fcs
,
505 iocmd
->vf_id
, iocmd
->vpwwn
);
506 if (fcs_port
== NULL
) {
507 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
508 iocmd
->status
= BFA_STATUS_UNKNOWN_LWWN
;
512 bfa_fcs_lport_clear_stats(fcs_port
);
513 /* clear IO stats from all active itnims */
514 list_for_each_safe(qe
, qen
, &fcpim
->itnim_q
) {
515 itnim
= (struct bfa_itnim_s
*) qe
;
516 if (itnim
->rport
->rport_info
.lp_tag
!= fcs_port
->lp_tag
)
518 bfa_itnim_clear_stats(itnim
);
520 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
521 iocmd
->status
= BFA_STATUS_OK
;
527 bfad_iocmd_lport_get_iostats(struct bfad_s
*bfad
, void *cmd
)
529 struct bfa_fcs_lport_s
*fcs_port
;
530 struct bfa_bsg_lport_iostats_s
*iocmd
=
531 (struct bfa_bsg_lport_iostats_s
*)cmd
;
534 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
535 fcs_port
= bfa_fcs_lookup_port(&bfad
->bfa_fcs
,
536 iocmd
->vf_id
, iocmd
->pwwn
);
537 if (fcs_port
== NULL
) {
538 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
539 iocmd
->status
= BFA_STATUS_UNKNOWN_LWWN
;
543 bfa_fcpim_port_iostats(&bfad
->bfa
, &iocmd
->iostats
,
545 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
546 iocmd
->status
= BFA_STATUS_OK
;
552 bfad_iocmd_lport_get_rports(struct bfad_s
*bfad
, void *cmd
,
553 unsigned int payload_len
)
555 struct bfa_bsg_lport_get_rports_s
*iocmd
=
556 (struct bfa_bsg_lport_get_rports_s
*)cmd
;
557 struct bfa_fcs_lport_s
*fcs_port
;
561 if (iocmd
->nrports
== 0)
564 if (bfad_chk_iocmd_sz(payload_len
,
565 sizeof(struct bfa_bsg_lport_get_rports_s
),
566 sizeof(struct bfa_rport_qualifier_s
) * iocmd
->nrports
)
568 iocmd
->status
= BFA_STATUS_VERSION_FAIL
;
572 iocmd_bufptr
= (char *)iocmd
+
573 sizeof(struct bfa_bsg_lport_get_rports_s
);
574 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
575 fcs_port
= bfa_fcs_lookup_port(&bfad
->bfa_fcs
,
576 iocmd
->vf_id
, iocmd
->pwwn
);
577 if (fcs_port
== NULL
) {
578 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
580 iocmd
->status
= BFA_STATUS_UNKNOWN_LWWN
;
584 bfa_fcs_lport_get_rport_quals(fcs_port
,
585 (struct bfa_rport_qualifier_s
*)iocmd_bufptr
,
587 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
588 iocmd
->status
= BFA_STATUS_OK
;
594 bfad_iocmd_rport_get_attr(struct bfad_s
*bfad
, void *cmd
)
596 struct bfa_bsg_rport_attr_s
*iocmd
= (struct bfa_bsg_rport_attr_s
*)cmd
;
597 struct bfa_fcs_lport_s
*fcs_port
;
598 struct bfa_fcs_rport_s
*fcs_rport
;
601 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
602 fcs_port
= bfa_fcs_lookup_port(&bfad
->bfa_fcs
,
603 iocmd
->vf_id
, iocmd
->pwwn
);
604 if (fcs_port
== NULL
) {
606 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
607 iocmd
->status
= BFA_STATUS_UNKNOWN_LWWN
;
612 fcs_rport
= bfa_fcs_lport_get_rport_by_qualifier(fcs_port
,
613 iocmd
->rpwwn
, iocmd
->pid
);
615 fcs_rport
= bfa_fcs_rport_lookup(fcs_port
, iocmd
->rpwwn
);
616 if (fcs_rport
== NULL
) {
618 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
619 iocmd
->status
= BFA_STATUS_UNKNOWN_RWWN
;
623 bfa_fcs_rport_get_attr(fcs_rport
, &iocmd
->attr
);
624 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
625 iocmd
->status
= BFA_STATUS_OK
;
631 bfad_iocmd_rport_get_addr(struct bfad_s
*bfad
, void *cmd
)
633 struct bfa_bsg_rport_scsi_addr_s
*iocmd
=
634 (struct bfa_bsg_rport_scsi_addr_s
*)cmd
;
635 struct bfa_fcs_lport_s
*fcs_port
;
636 struct bfa_fcs_itnim_s
*fcs_itnim
;
637 struct bfad_itnim_s
*drv_itnim
;
640 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
641 fcs_port
= bfa_fcs_lookup_port(&bfad
->bfa_fcs
,
642 iocmd
->vf_id
, iocmd
->pwwn
);
643 if (fcs_port
== NULL
) {
645 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
646 iocmd
->status
= BFA_STATUS_UNKNOWN_LWWN
;
650 fcs_itnim
= bfa_fcs_itnim_lookup(fcs_port
, iocmd
->rpwwn
);
651 if (fcs_itnim
== NULL
) {
653 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
654 iocmd
->status
= BFA_STATUS_UNKNOWN_RWWN
;
658 drv_itnim
= fcs_itnim
->itnim_drv
;
660 if (drv_itnim
&& drv_itnim
->im_port
)
661 iocmd
->host
= drv_itnim
->im_port
->shost
->host_no
;
664 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
665 iocmd
->status
= BFA_STATUS_UNKNOWN_RWWN
;
669 iocmd
->target
= drv_itnim
->scsi_tgt_id
;
670 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
674 iocmd
->status
= BFA_STATUS_OK
;
680 bfad_iocmd_rport_get_stats(struct bfad_s
*bfad
, void *cmd
)
682 struct bfa_bsg_rport_stats_s
*iocmd
=
683 (struct bfa_bsg_rport_stats_s
*)cmd
;
684 struct bfa_fcs_lport_s
*fcs_port
;
685 struct bfa_fcs_rport_s
*fcs_rport
;
688 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
689 fcs_port
= bfa_fcs_lookup_port(&bfad
->bfa_fcs
,
690 iocmd
->vf_id
, iocmd
->pwwn
);
691 if (fcs_port
== NULL
) {
693 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
694 iocmd
->status
= BFA_STATUS_UNKNOWN_LWWN
;
698 fcs_rport
= bfa_fcs_rport_lookup(fcs_port
, iocmd
->rpwwn
);
699 if (fcs_rport
== NULL
) {
701 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
702 iocmd
->status
= BFA_STATUS_UNKNOWN_RWWN
;
706 memcpy((void *)&iocmd
->stats
, (void *)&fcs_rport
->stats
,
707 sizeof(struct bfa_rport_stats_s
));
708 if (bfa_fcs_rport_get_halrport(fcs_rport
)) {
709 memcpy((void *)&iocmd
->stats
.hal_stats
,
710 (void *)&(bfa_fcs_rport_get_halrport(fcs_rport
)->stats
),
711 sizeof(struct bfa_rport_hal_stats_s
));
714 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
715 iocmd
->status
= BFA_STATUS_OK
;
721 bfad_iocmd_rport_clr_stats(struct bfad_s
*bfad
, void *cmd
)
723 struct bfa_bsg_rport_reset_stats_s
*iocmd
=
724 (struct bfa_bsg_rport_reset_stats_s
*)cmd
;
725 struct bfa_fcs_lport_s
*fcs_port
;
726 struct bfa_fcs_rport_s
*fcs_rport
;
727 struct bfa_rport_s
*rport
;
730 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
731 fcs_port
= bfa_fcs_lookup_port(&bfad
->bfa_fcs
,
732 iocmd
->vf_id
, iocmd
->pwwn
);
733 if (fcs_port
== NULL
) {
734 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
735 iocmd
->status
= BFA_STATUS_UNKNOWN_LWWN
;
739 fcs_rport
= bfa_fcs_rport_lookup(fcs_port
, iocmd
->rpwwn
);
740 if (fcs_rport
== NULL
) {
741 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
742 iocmd
->status
= BFA_STATUS_UNKNOWN_RWWN
;
746 memset((char *)&fcs_rport
->stats
, 0, sizeof(struct bfa_rport_stats_s
));
747 rport
= bfa_fcs_rport_get_halrport(fcs_rport
);
749 memset(&rport
->stats
, 0, sizeof(rport
->stats
));
750 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
751 iocmd
->status
= BFA_STATUS_OK
;
757 bfad_iocmd_rport_set_speed(struct bfad_s
*bfad
, void *cmd
)
759 struct bfa_bsg_rport_set_speed_s
*iocmd
=
760 (struct bfa_bsg_rport_set_speed_s
*)cmd
;
761 struct bfa_fcs_lport_s
*fcs_port
;
762 struct bfa_fcs_rport_s
*fcs_rport
;
765 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
766 fcs_port
= bfa_fcs_lookup_port(&bfad
->bfa_fcs
,
767 iocmd
->vf_id
, iocmd
->pwwn
);
768 if (fcs_port
== NULL
) {
769 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
770 iocmd
->status
= BFA_STATUS_UNKNOWN_LWWN
;
774 fcs_rport
= bfa_fcs_rport_lookup(fcs_port
, iocmd
->rpwwn
);
775 if (fcs_rport
== NULL
) {
776 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
777 iocmd
->status
= BFA_STATUS_UNKNOWN_RWWN
;
781 fcs_rport
->rpf
.assigned_speed
= iocmd
->speed
;
782 /* Set this speed in f/w only if the RPSC speed is not available */
783 if (fcs_rport
->rpf
.rpsc_speed
== BFA_PORT_SPEED_UNKNOWN
)
784 if (fcs_rport
->bfa_rport
)
785 bfa_rport_speed(fcs_rport
->bfa_rport
, iocmd
->speed
);
786 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
787 iocmd
->status
= BFA_STATUS_OK
;
793 bfad_iocmd_vport_get_attr(struct bfad_s
*bfad
, void *cmd
)
795 struct bfa_fcs_vport_s
*fcs_vport
;
796 struct bfa_bsg_vport_attr_s
*iocmd
= (struct bfa_bsg_vport_attr_s
*)cmd
;
799 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
800 fcs_vport
= bfa_fcs_vport_lookup(&bfad
->bfa_fcs
,
801 iocmd
->vf_id
, iocmd
->vpwwn
);
802 if (fcs_vport
== NULL
) {
803 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
804 iocmd
->status
= BFA_STATUS_UNKNOWN_VWWN
;
808 bfa_fcs_vport_get_attr(fcs_vport
, &iocmd
->vport_attr
);
809 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
810 iocmd
->status
= BFA_STATUS_OK
;
816 bfad_iocmd_vport_get_stats(struct bfad_s
*bfad
, void *cmd
)
818 struct bfa_fcs_vport_s
*fcs_vport
;
819 struct bfa_bsg_vport_stats_s
*iocmd
=
820 (struct bfa_bsg_vport_stats_s
*)cmd
;
823 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
824 fcs_vport
= bfa_fcs_vport_lookup(&bfad
->bfa_fcs
,
825 iocmd
->vf_id
, iocmd
->vpwwn
);
826 if (fcs_vport
== NULL
) {
827 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
828 iocmd
->status
= BFA_STATUS_UNKNOWN_VWWN
;
832 memcpy((void *)&iocmd
->vport_stats
, (void *)&fcs_vport
->vport_stats
,
833 sizeof(struct bfa_vport_stats_s
));
834 memcpy((void *)&iocmd
->vport_stats
.port_stats
,
835 (void *)&fcs_vport
->lport
.stats
,
836 sizeof(struct bfa_lport_stats_s
));
837 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
838 iocmd
->status
= BFA_STATUS_OK
;
844 bfad_iocmd_vport_clr_stats(struct bfad_s
*bfad
, void *cmd
)
846 struct bfa_fcs_vport_s
*fcs_vport
;
847 struct bfa_bsg_reset_stats_s
*iocmd
=
848 (struct bfa_bsg_reset_stats_s
*)cmd
;
851 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
852 fcs_vport
= bfa_fcs_vport_lookup(&bfad
->bfa_fcs
,
853 iocmd
->vf_id
, iocmd
->vpwwn
);
854 if (fcs_vport
== NULL
) {
855 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
856 iocmd
->status
= BFA_STATUS_UNKNOWN_VWWN
;
860 memset(&fcs_vport
->vport_stats
, 0, sizeof(struct bfa_vport_stats_s
));
861 memset(&fcs_vport
->lport
.stats
, 0, sizeof(struct bfa_lport_stats_s
));
862 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
863 iocmd
->status
= BFA_STATUS_OK
;
869 bfad_iocmd_fabric_get_lports(struct bfad_s
*bfad
, void *cmd
,
870 unsigned int payload_len
)
872 struct bfa_bsg_fabric_get_lports_s
*iocmd
=
873 (struct bfa_bsg_fabric_get_lports_s
*)cmd
;
874 bfa_fcs_vf_t
*fcs_vf
;
875 uint32_t nports
= iocmd
->nports
;
880 iocmd
->status
= BFA_STATUS_EINVAL
;
884 if (bfad_chk_iocmd_sz(payload_len
,
885 sizeof(struct bfa_bsg_fabric_get_lports_s
),
886 sizeof(wwn_t
) * iocmd
->nports
) != BFA_STATUS_OK
) {
887 iocmd
->status
= BFA_STATUS_VERSION_FAIL
;
891 iocmd_bufptr
= (char *)iocmd
+
892 sizeof(struct bfa_bsg_fabric_get_lports_s
);
894 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
895 fcs_vf
= bfa_fcs_vf_lookup(&bfad
->bfa_fcs
, iocmd
->vf_id
);
896 if (fcs_vf
== NULL
) {
897 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
898 iocmd
->status
= BFA_STATUS_UNKNOWN_VFID
;
901 bfa_fcs_vf_get_ports(fcs_vf
, (wwn_t
*)iocmd_bufptr
, &nports
);
902 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
904 iocmd
->nports
= nports
;
905 iocmd
->status
= BFA_STATUS_OK
;
911 bfad_iocmd_qos_set_bw(struct bfad_s
*bfad
, void *pcmd
)
913 struct bfa_bsg_qos_bw_s
*iocmd
= (struct bfa_bsg_qos_bw_s
*)pcmd
;
916 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
917 iocmd
->status
= bfa_fcport_set_qos_bw(&bfad
->bfa
, &iocmd
->qos_bw
);
918 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
924 bfad_iocmd_ratelim(struct bfad_s
*bfad
, unsigned int cmd
, void *pcmd
)
926 struct bfa_bsg_gen_s
*iocmd
= (struct bfa_bsg_gen_s
*)pcmd
;
927 struct bfa_fcport_s
*fcport
= BFA_FCPORT_MOD(&bfad
->bfa
);
930 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
932 if ((fcport
->cfg
.topology
== BFA_PORT_TOPOLOGY_LOOP
) &&
933 (fcport
->topology
== BFA_PORT_TOPOLOGY_LOOP
))
934 iocmd
->status
= BFA_STATUS_TOPOLOGY_LOOP
;
936 if (cmd
== IOCMD_RATELIM_ENABLE
)
937 fcport
->cfg
.ratelimit
= BFA_TRUE
;
938 else if (cmd
== IOCMD_RATELIM_DISABLE
)
939 fcport
->cfg
.ratelimit
= BFA_FALSE
;
941 if (fcport
->cfg
.trl_def_speed
== BFA_PORT_SPEED_UNKNOWN
)
942 fcport
->cfg
.trl_def_speed
= BFA_PORT_SPEED_1GBPS
;
944 iocmd
->status
= BFA_STATUS_OK
;
947 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
953 bfad_iocmd_ratelim_speed(struct bfad_s
*bfad
, unsigned int cmd
, void *pcmd
)
955 struct bfa_bsg_trl_speed_s
*iocmd
= (struct bfa_bsg_trl_speed_s
*)pcmd
;
956 struct bfa_fcport_s
*fcport
= BFA_FCPORT_MOD(&bfad
->bfa
);
959 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
961 /* Auto and speeds greater than the supported speed, are invalid */
962 if ((iocmd
->speed
== BFA_PORT_SPEED_AUTO
) ||
963 (iocmd
->speed
> fcport
->speed_sup
)) {
964 iocmd
->status
= BFA_STATUS_UNSUPP_SPEED
;
965 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
969 if ((fcport
->cfg
.topology
== BFA_PORT_TOPOLOGY_LOOP
) &&
970 (fcport
->topology
== BFA_PORT_TOPOLOGY_LOOP
))
971 iocmd
->status
= BFA_STATUS_TOPOLOGY_LOOP
;
973 fcport
->cfg
.trl_def_speed
= iocmd
->speed
;
974 iocmd
->status
= BFA_STATUS_OK
;
976 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
982 bfad_iocmd_cfg_fcpim(struct bfad_s
*bfad
, void *cmd
)
984 struct bfa_bsg_fcpim_s
*iocmd
= (struct bfa_bsg_fcpim_s
*)cmd
;
987 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
988 bfa_fcpim_path_tov_set(&bfad
->bfa
, iocmd
->param
);
989 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
990 iocmd
->status
= BFA_STATUS_OK
;
995 bfad_iocmd_fcpim_get_modstats(struct bfad_s
*bfad
, void *cmd
)
997 struct bfa_bsg_fcpim_modstats_s
*iocmd
=
998 (struct bfa_bsg_fcpim_modstats_s
*)cmd
;
999 struct bfa_fcpim_s
*fcpim
= BFA_FCPIM(&bfad
->bfa
);
1000 struct list_head
*qe
, *qen
;
1001 struct bfa_itnim_s
*itnim
;
1002 unsigned long flags
;
1004 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1005 /* accumulate IO stats from itnim */
1006 memset((void *)&iocmd
->modstats
, 0, sizeof(struct bfa_itnim_iostats_s
));
1007 list_for_each_safe(qe
, qen
, &fcpim
->itnim_q
) {
1008 itnim
= (struct bfa_itnim_s
*) qe
;
1009 bfa_fcpim_add_stats(&iocmd
->modstats
, &(itnim
->stats
));
1011 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1012 iocmd
->status
= BFA_STATUS_OK
;
1017 bfad_iocmd_fcpim_clr_modstats(struct bfad_s
*bfad
, void *cmd
)
1019 struct bfa_bsg_fcpim_modstatsclr_s
*iocmd
=
1020 (struct bfa_bsg_fcpim_modstatsclr_s
*)cmd
;
1021 struct bfa_fcpim_s
*fcpim
= BFA_FCPIM(&bfad
->bfa
);
1022 struct list_head
*qe
, *qen
;
1023 struct bfa_itnim_s
*itnim
;
1024 unsigned long flags
;
1026 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1027 list_for_each_safe(qe
, qen
, &fcpim
->itnim_q
) {
1028 itnim
= (struct bfa_itnim_s
*) qe
;
1029 bfa_itnim_clear_stats(itnim
);
1031 memset(&fcpim
->del_itn_stats
, 0,
1032 sizeof(struct bfa_fcpim_del_itn_stats_s
));
1033 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1034 iocmd
->status
= BFA_STATUS_OK
;
1039 bfad_iocmd_fcpim_get_del_itn_stats(struct bfad_s
*bfad
, void *cmd
)
1041 struct bfa_bsg_fcpim_del_itn_stats_s
*iocmd
=
1042 (struct bfa_bsg_fcpim_del_itn_stats_s
*)cmd
;
1043 struct bfa_fcpim_s
*fcpim
= BFA_FCPIM(&bfad
->bfa
);
1044 unsigned long flags
;
1046 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1047 memcpy((void *)&iocmd
->modstats
, (void *)&fcpim
->del_itn_stats
,
1048 sizeof(struct bfa_fcpim_del_itn_stats_s
));
1049 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1051 iocmd
->status
= BFA_STATUS_OK
;
1056 bfad_iocmd_itnim_get_attr(struct bfad_s
*bfad
, void *cmd
)
1058 struct bfa_bsg_itnim_attr_s
*iocmd
= (struct bfa_bsg_itnim_attr_s
*)cmd
;
1059 struct bfa_fcs_lport_s
*fcs_port
;
1060 unsigned long flags
;
1062 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1063 fcs_port
= bfa_fcs_lookup_port(&bfad
->bfa_fcs
,
1064 iocmd
->vf_id
, iocmd
->lpwwn
);
1066 iocmd
->status
= BFA_STATUS_UNKNOWN_LWWN
;
1068 iocmd
->status
= bfa_fcs_itnim_attr_get(fcs_port
,
1069 iocmd
->rpwwn
, &iocmd
->attr
);
1070 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1075 bfad_iocmd_itnim_get_iostats(struct bfad_s
*bfad
, void *cmd
)
1077 struct bfa_bsg_itnim_iostats_s
*iocmd
=
1078 (struct bfa_bsg_itnim_iostats_s
*)cmd
;
1079 struct bfa_fcs_lport_s
*fcs_port
;
1080 struct bfa_fcs_itnim_s
*itnim
;
1081 unsigned long flags
;
1083 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1084 fcs_port
= bfa_fcs_lookup_port(&bfad
->bfa_fcs
,
1085 iocmd
->vf_id
, iocmd
->lpwwn
);
1087 iocmd
->status
= BFA_STATUS_UNKNOWN_LWWN
;
1090 itnim
= bfa_fcs_itnim_lookup(fcs_port
, iocmd
->rpwwn
);
1092 iocmd
->status
= BFA_STATUS_UNKNOWN_RWWN
;
1094 iocmd
->status
= BFA_STATUS_OK
;
1095 if (bfa_fcs_itnim_get_halitn(itnim
))
1096 memcpy((void *)&iocmd
->iostats
, (void *)
1097 &(bfa_fcs_itnim_get_halitn(itnim
)->stats
),
1098 sizeof(struct bfa_itnim_iostats_s
));
1101 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1106 bfad_iocmd_itnim_reset_stats(struct bfad_s
*bfad
, void *cmd
)
1108 struct bfa_bsg_rport_reset_stats_s
*iocmd
=
1109 (struct bfa_bsg_rport_reset_stats_s
*)cmd
;
1110 struct bfa_fcs_lport_s
*fcs_port
;
1111 struct bfa_fcs_itnim_s
*itnim
;
1112 unsigned long flags
;
1114 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1115 fcs_port
= bfa_fcs_lookup_port(&bfad
->bfa_fcs
,
1116 iocmd
->vf_id
, iocmd
->pwwn
);
1118 iocmd
->status
= BFA_STATUS_UNKNOWN_LWWN
;
1120 itnim
= bfa_fcs_itnim_lookup(fcs_port
, iocmd
->rpwwn
);
1122 iocmd
->status
= BFA_STATUS_UNKNOWN_RWWN
;
1124 iocmd
->status
= BFA_STATUS_OK
;
1125 bfa_fcs_itnim_stats_clear(fcs_port
, iocmd
->rpwwn
);
1126 bfa_itnim_clear_stats(bfa_fcs_itnim_get_halitn(itnim
));
1129 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1135 bfad_iocmd_itnim_get_itnstats(struct bfad_s
*bfad
, void *cmd
)
1137 struct bfa_bsg_itnim_itnstats_s
*iocmd
=
1138 (struct bfa_bsg_itnim_itnstats_s
*)cmd
;
1139 struct bfa_fcs_lport_s
*fcs_port
;
1140 struct bfa_fcs_itnim_s
*itnim
;
1141 unsigned long flags
;
1143 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1144 fcs_port
= bfa_fcs_lookup_port(&bfad
->bfa_fcs
,
1145 iocmd
->vf_id
, iocmd
->lpwwn
);
1147 iocmd
->status
= BFA_STATUS_UNKNOWN_LWWN
;
1150 itnim
= bfa_fcs_itnim_lookup(fcs_port
, iocmd
->rpwwn
);
1152 iocmd
->status
= BFA_STATUS_UNKNOWN_RWWN
;
1154 iocmd
->status
= BFA_STATUS_OK
;
1155 bfa_fcs_itnim_stats_get(fcs_port
, iocmd
->rpwwn
,
1159 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1164 bfad_iocmd_fcport_enable(struct bfad_s
*bfad
, void *cmd
)
1166 struct bfa_bsg_gen_s
*iocmd
= (struct bfa_bsg_gen_s
*)cmd
;
1167 unsigned long flags
;
1169 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1170 iocmd
->status
= bfa_fcport_enable(&bfad
->bfa
);
1171 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1177 bfad_iocmd_fcport_disable(struct bfad_s
*bfad
, void *cmd
)
1179 struct bfa_bsg_gen_s
*iocmd
= (struct bfa_bsg_gen_s
*)cmd
;
1180 unsigned long flags
;
1182 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1183 iocmd
->status
= bfa_fcport_disable(&bfad
->bfa
);
1184 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1190 bfad_iocmd_ioc_get_pcifn_cfg(struct bfad_s
*bfad
, void *cmd
)
1192 struct bfa_bsg_pcifn_cfg_s
*iocmd
= (struct bfa_bsg_pcifn_cfg_s
*)cmd
;
1193 struct bfad_hal_comp fcomp
;
1194 unsigned long flags
;
1196 init_completion(&fcomp
.comp
);
1197 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1198 iocmd
->status
= bfa_ablk_query(&bfad
->bfa
.modules
.ablk
,
1200 bfad_hcb_comp
, &fcomp
);
1201 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1202 if (iocmd
->status
!= BFA_STATUS_OK
)
1205 wait_for_completion(&fcomp
.comp
);
1206 iocmd
->status
= fcomp
.status
;
1212 bfad_iocmd_pcifn_create(struct bfad_s
*bfad
, void *cmd
)
1214 struct bfa_bsg_pcifn_s
*iocmd
= (struct bfa_bsg_pcifn_s
*)cmd
;
1215 struct bfad_hal_comp fcomp
;
1216 unsigned long flags
;
1218 init_completion(&fcomp
.comp
);
1219 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1220 iocmd
->status
= bfa_ablk_pf_create(&bfad
->bfa
.modules
.ablk
,
1221 &iocmd
->pcifn_id
, iocmd
->port
,
1222 iocmd
->pcifn_class
, iocmd
->bw_min
,
1223 iocmd
->bw_max
, bfad_hcb_comp
, &fcomp
);
1224 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1225 if (iocmd
->status
!= BFA_STATUS_OK
)
1228 wait_for_completion(&fcomp
.comp
);
1229 iocmd
->status
= fcomp
.status
;
1235 bfad_iocmd_pcifn_delete(struct bfad_s
*bfad
, void *cmd
)
1237 struct bfa_bsg_pcifn_s
*iocmd
= (struct bfa_bsg_pcifn_s
*)cmd
;
1238 struct bfad_hal_comp fcomp
;
1239 unsigned long flags
;
1241 init_completion(&fcomp
.comp
);
1242 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1243 iocmd
->status
= bfa_ablk_pf_delete(&bfad
->bfa
.modules
.ablk
,
1245 bfad_hcb_comp
, &fcomp
);
1246 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1247 if (iocmd
->status
!= BFA_STATUS_OK
)
1250 wait_for_completion(&fcomp
.comp
);
1251 iocmd
->status
= fcomp
.status
;
1257 bfad_iocmd_pcifn_bw(struct bfad_s
*bfad
, void *cmd
)
1259 struct bfa_bsg_pcifn_s
*iocmd
= (struct bfa_bsg_pcifn_s
*)cmd
;
1260 struct bfad_hal_comp fcomp
;
1261 unsigned long flags
;
1263 init_completion(&fcomp
.comp
);
1264 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1265 iocmd
->status
= bfa_ablk_pf_update(&bfad
->bfa
.modules
.ablk
,
1266 iocmd
->pcifn_id
, iocmd
->bw_min
,
1267 iocmd
->bw_max
, bfad_hcb_comp
, &fcomp
);
1268 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1269 bfa_trc(bfad
, iocmd
->status
);
1270 if (iocmd
->status
!= BFA_STATUS_OK
)
1273 wait_for_completion(&fcomp
.comp
);
1274 iocmd
->status
= fcomp
.status
;
1275 bfa_trc(bfad
, iocmd
->status
);
1281 bfad_iocmd_adapter_cfg_mode(struct bfad_s
*bfad
, void *cmd
)
1283 struct bfa_bsg_adapter_cfg_mode_s
*iocmd
=
1284 (struct bfa_bsg_adapter_cfg_mode_s
*)cmd
;
1285 struct bfad_hal_comp fcomp
;
1286 unsigned long flags
= 0;
1288 init_completion(&fcomp
.comp
);
1289 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1290 iocmd
->status
= bfa_ablk_adapter_config(&bfad
->bfa
.modules
.ablk
,
1291 iocmd
->cfg
.mode
, iocmd
->cfg
.max_pf
,
1292 iocmd
->cfg
.max_vf
, bfad_hcb_comp
, &fcomp
);
1293 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1294 if (iocmd
->status
!= BFA_STATUS_OK
)
1297 wait_for_completion(&fcomp
.comp
);
1298 iocmd
->status
= fcomp
.status
;
1304 bfad_iocmd_port_cfg_mode(struct bfad_s
*bfad
, void *cmd
)
1306 struct bfa_bsg_port_cfg_mode_s
*iocmd
=
1307 (struct bfa_bsg_port_cfg_mode_s
*)cmd
;
1308 struct bfad_hal_comp fcomp
;
1309 unsigned long flags
= 0;
1311 init_completion(&fcomp
.comp
);
1312 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1313 iocmd
->status
= bfa_ablk_port_config(&bfad
->bfa
.modules
.ablk
,
1314 iocmd
->instance
, iocmd
->cfg
.mode
,
1315 iocmd
->cfg
.max_pf
, iocmd
->cfg
.max_vf
,
1316 bfad_hcb_comp
, &fcomp
);
1317 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1318 if (iocmd
->status
!= BFA_STATUS_OK
)
1321 wait_for_completion(&fcomp
.comp
);
1322 iocmd
->status
= fcomp
.status
;
1328 bfad_iocmd_ablk_optrom(struct bfad_s
*bfad
, unsigned int cmd
, void *pcmd
)
1330 struct bfa_bsg_gen_s
*iocmd
= (struct bfa_bsg_gen_s
*)pcmd
;
1331 struct bfad_hal_comp fcomp
;
1332 unsigned long flags
;
1334 init_completion(&fcomp
.comp
);
1335 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1336 if (cmd
== IOCMD_FLASH_ENABLE_OPTROM
)
1337 iocmd
->status
= bfa_ablk_optrom_en(&bfad
->bfa
.modules
.ablk
,
1338 bfad_hcb_comp
, &fcomp
);
1340 iocmd
->status
= bfa_ablk_optrom_dis(&bfad
->bfa
.modules
.ablk
,
1341 bfad_hcb_comp
, &fcomp
);
1342 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1344 if (iocmd
->status
!= BFA_STATUS_OK
)
1347 wait_for_completion(&fcomp
.comp
);
1348 iocmd
->status
= fcomp
.status
;
1354 bfad_iocmd_faa_query(struct bfad_s
*bfad
, void *cmd
)
1356 struct bfa_bsg_faa_attr_s
*iocmd
= (struct bfa_bsg_faa_attr_s
*)cmd
;
1357 struct bfad_hal_comp fcomp
;
1358 unsigned long flags
;
1360 init_completion(&fcomp
.comp
);
1361 iocmd
->status
= BFA_STATUS_OK
;
1362 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1363 iocmd
->status
= bfa_faa_query(&bfad
->bfa
, &iocmd
->faa_attr
,
1364 bfad_hcb_comp
, &fcomp
);
1365 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1367 if (iocmd
->status
!= BFA_STATUS_OK
)
1370 wait_for_completion(&fcomp
.comp
);
1371 iocmd
->status
= fcomp
.status
;
1377 bfad_iocmd_cee_attr(struct bfad_s
*bfad
, void *cmd
, unsigned int payload_len
)
1379 struct bfa_bsg_cee_attr_s
*iocmd
=
1380 (struct bfa_bsg_cee_attr_s
*)cmd
;
1382 struct bfad_hal_comp cee_comp
;
1383 unsigned long flags
;
1385 if (bfad_chk_iocmd_sz(payload_len
,
1386 sizeof(struct bfa_bsg_cee_attr_s
),
1387 sizeof(struct bfa_cee_attr_s
)) != BFA_STATUS_OK
) {
1388 iocmd
->status
= BFA_STATUS_VERSION_FAIL
;
1392 iocmd_bufptr
= (char *)iocmd
+ sizeof(struct bfa_bsg_cee_attr_s
);
1394 cee_comp
.status
= 0;
1395 init_completion(&cee_comp
.comp
);
1396 mutex_lock(&bfad_mutex
);
1397 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1398 iocmd
->status
= bfa_cee_get_attr(&bfad
->bfa
.modules
.cee
, iocmd_bufptr
,
1399 bfad_hcb_comp
, &cee_comp
);
1400 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1401 if (iocmd
->status
!= BFA_STATUS_OK
) {
1402 mutex_unlock(&bfad_mutex
);
1403 bfa_trc(bfad
, 0x5555);
1406 wait_for_completion(&cee_comp
.comp
);
1407 mutex_unlock(&bfad_mutex
);
1413 bfad_iocmd_cee_get_stats(struct bfad_s
*bfad
, void *cmd
,
1414 unsigned int payload_len
)
1416 struct bfa_bsg_cee_stats_s
*iocmd
=
1417 (struct bfa_bsg_cee_stats_s
*)cmd
;
1419 struct bfad_hal_comp cee_comp
;
1420 unsigned long flags
;
1422 if (bfad_chk_iocmd_sz(payload_len
,
1423 sizeof(struct bfa_bsg_cee_stats_s
),
1424 sizeof(struct bfa_cee_stats_s
)) != BFA_STATUS_OK
) {
1425 iocmd
->status
= BFA_STATUS_VERSION_FAIL
;
1429 iocmd_bufptr
= (char *)iocmd
+ sizeof(struct bfa_bsg_cee_stats_s
);
1431 cee_comp
.status
= 0;
1432 init_completion(&cee_comp
.comp
);
1433 mutex_lock(&bfad_mutex
);
1434 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1435 iocmd
->status
= bfa_cee_get_stats(&bfad
->bfa
.modules
.cee
, iocmd_bufptr
,
1436 bfad_hcb_comp
, &cee_comp
);
1437 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1438 if (iocmd
->status
!= BFA_STATUS_OK
) {
1439 mutex_unlock(&bfad_mutex
);
1440 bfa_trc(bfad
, 0x5555);
1443 wait_for_completion(&cee_comp
.comp
);
1444 mutex_unlock(&bfad_mutex
);
1450 bfad_iocmd_cee_reset_stats(struct bfad_s
*bfad
, void *cmd
)
1452 struct bfa_bsg_gen_s
*iocmd
= (struct bfa_bsg_gen_s
*)cmd
;
1453 unsigned long flags
;
1455 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1456 iocmd
->status
= bfa_cee_reset_stats(&bfad
->bfa
.modules
.cee
, NULL
, NULL
);
1457 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1458 if (iocmd
->status
!= BFA_STATUS_OK
)
1459 bfa_trc(bfad
, 0x5555);
1464 bfad_iocmd_sfp_media(struct bfad_s
*bfad
, void *cmd
)
1466 struct bfa_bsg_sfp_media_s
*iocmd
= (struct bfa_bsg_sfp_media_s
*)cmd
;
1467 struct bfad_hal_comp fcomp
;
1468 unsigned long flags
;
1470 init_completion(&fcomp
.comp
);
1471 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1472 iocmd
->status
= bfa_sfp_media(BFA_SFP_MOD(&bfad
->bfa
), &iocmd
->media
,
1473 bfad_hcb_comp
, &fcomp
);
1474 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1475 bfa_trc(bfad
, iocmd
->status
);
1476 if (iocmd
->status
!= BFA_STATUS_SFP_NOT_READY
)
1479 wait_for_completion(&fcomp
.comp
);
1480 iocmd
->status
= fcomp
.status
;
1486 bfad_iocmd_sfp_speed(struct bfad_s
*bfad
, void *cmd
)
1488 struct bfa_bsg_sfp_speed_s
*iocmd
= (struct bfa_bsg_sfp_speed_s
*)cmd
;
1489 struct bfad_hal_comp fcomp
;
1490 unsigned long flags
;
1492 init_completion(&fcomp
.comp
);
1493 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1494 iocmd
->status
= bfa_sfp_speed(BFA_SFP_MOD(&bfad
->bfa
), iocmd
->speed
,
1495 bfad_hcb_comp
, &fcomp
);
1496 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1497 bfa_trc(bfad
, iocmd
->status
);
1498 if (iocmd
->status
!= BFA_STATUS_SFP_NOT_READY
)
1500 wait_for_completion(&fcomp
.comp
);
1501 iocmd
->status
= fcomp
.status
;
1507 bfad_iocmd_flash_get_attr(struct bfad_s
*bfad
, void *cmd
)
1509 struct bfa_bsg_flash_attr_s
*iocmd
=
1510 (struct bfa_bsg_flash_attr_s
*)cmd
;
1511 struct bfad_hal_comp fcomp
;
1512 unsigned long flags
;
1514 init_completion(&fcomp
.comp
);
1515 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1516 iocmd
->status
= bfa_flash_get_attr(BFA_FLASH(&bfad
->bfa
), &iocmd
->attr
,
1517 bfad_hcb_comp
, &fcomp
);
1518 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1519 if (iocmd
->status
!= BFA_STATUS_OK
)
1521 wait_for_completion(&fcomp
.comp
);
1522 iocmd
->status
= fcomp
.status
;
1528 bfad_iocmd_flash_erase_part(struct bfad_s
*bfad
, void *cmd
)
1530 struct bfa_bsg_flash_s
*iocmd
= (struct bfa_bsg_flash_s
*)cmd
;
1531 struct bfad_hal_comp fcomp
;
1532 unsigned long flags
;
1534 init_completion(&fcomp
.comp
);
1535 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1536 iocmd
->status
= bfa_flash_erase_part(BFA_FLASH(&bfad
->bfa
), iocmd
->type
,
1537 iocmd
->instance
, bfad_hcb_comp
, &fcomp
);
1538 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1539 if (iocmd
->status
!= BFA_STATUS_OK
)
1541 wait_for_completion(&fcomp
.comp
);
1542 iocmd
->status
= fcomp
.status
;
1548 bfad_iocmd_flash_update_part(struct bfad_s
*bfad
, void *cmd
,
1549 unsigned int payload_len
)
1551 struct bfa_bsg_flash_s
*iocmd
= (struct bfa_bsg_flash_s
*)cmd
;
1553 struct bfad_hal_comp fcomp
;
1554 unsigned long flags
;
1556 if (bfad_chk_iocmd_sz(payload_len
,
1557 sizeof(struct bfa_bsg_flash_s
),
1558 iocmd
->bufsz
) != BFA_STATUS_OK
) {
1559 iocmd
->status
= BFA_STATUS_VERSION_FAIL
;
1563 iocmd_bufptr
= (char *)iocmd
+ sizeof(struct bfa_bsg_flash_s
);
1565 init_completion(&fcomp
.comp
);
1566 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1567 iocmd
->status
= bfa_flash_update_part(BFA_FLASH(&bfad
->bfa
),
1568 iocmd
->type
, iocmd
->instance
, iocmd_bufptr
,
1569 iocmd
->bufsz
, 0, bfad_hcb_comp
, &fcomp
);
1570 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1571 if (iocmd
->status
!= BFA_STATUS_OK
)
1573 wait_for_completion(&fcomp
.comp
);
1574 iocmd
->status
= fcomp
.status
;
1580 bfad_iocmd_flash_read_part(struct bfad_s
*bfad
, void *cmd
,
1581 unsigned int payload_len
)
1583 struct bfa_bsg_flash_s
*iocmd
= (struct bfa_bsg_flash_s
*)cmd
;
1584 struct bfad_hal_comp fcomp
;
1586 unsigned long flags
;
1588 if (bfad_chk_iocmd_sz(payload_len
,
1589 sizeof(struct bfa_bsg_flash_s
),
1590 iocmd
->bufsz
) != BFA_STATUS_OK
) {
1591 iocmd
->status
= BFA_STATUS_VERSION_FAIL
;
1595 iocmd_bufptr
= (char *)iocmd
+ sizeof(struct bfa_bsg_flash_s
);
1597 init_completion(&fcomp
.comp
);
1598 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1599 iocmd
->status
= bfa_flash_read_part(BFA_FLASH(&bfad
->bfa
), iocmd
->type
,
1600 iocmd
->instance
, iocmd_bufptr
, iocmd
->bufsz
, 0,
1601 bfad_hcb_comp
, &fcomp
);
1602 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1603 if (iocmd
->status
!= BFA_STATUS_OK
)
1605 wait_for_completion(&fcomp
.comp
);
1606 iocmd
->status
= fcomp
.status
;
1612 bfad_iocmd_diag_temp(struct bfad_s
*bfad
, void *cmd
)
1614 struct bfa_bsg_diag_get_temp_s
*iocmd
=
1615 (struct bfa_bsg_diag_get_temp_s
*)cmd
;
1616 struct bfad_hal_comp fcomp
;
1617 unsigned long flags
;
1619 init_completion(&fcomp
.comp
);
1620 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1621 iocmd
->status
= bfa_diag_tsensor_query(BFA_DIAG_MOD(&bfad
->bfa
),
1622 &iocmd
->result
, bfad_hcb_comp
, &fcomp
);
1623 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1624 bfa_trc(bfad
, iocmd
->status
);
1625 if (iocmd
->status
!= BFA_STATUS_OK
)
1627 wait_for_completion(&fcomp
.comp
);
1628 iocmd
->status
= fcomp
.status
;
1634 bfad_iocmd_diag_memtest(struct bfad_s
*bfad
, void *cmd
)
1636 struct bfa_bsg_diag_memtest_s
*iocmd
=
1637 (struct bfa_bsg_diag_memtest_s
*)cmd
;
1638 struct bfad_hal_comp fcomp
;
1639 unsigned long flags
;
1641 init_completion(&fcomp
.comp
);
1642 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1643 iocmd
->status
= bfa_diag_memtest(BFA_DIAG_MOD(&bfad
->bfa
),
1644 &iocmd
->memtest
, iocmd
->pat
,
1645 &iocmd
->result
, bfad_hcb_comp
, &fcomp
);
1646 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1647 bfa_trc(bfad
, iocmd
->status
);
1648 if (iocmd
->status
!= BFA_STATUS_OK
)
1650 wait_for_completion(&fcomp
.comp
);
1651 iocmd
->status
= fcomp
.status
;
1657 bfad_iocmd_diag_loopback(struct bfad_s
*bfad
, void *cmd
)
1659 struct bfa_bsg_diag_loopback_s
*iocmd
=
1660 (struct bfa_bsg_diag_loopback_s
*)cmd
;
1661 struct bfad_hal_comp fcomp
;
1662 unsigned long flags
;
1664 init_completion(&fcomp
.comp
);
1665 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1666 iocmd
->status
= bfa_fcdiag_loopback(&bfad
->bfa
, iocmd
->opmode
,
1667 iocmd
->speed
, iocmd
->lpcnt
, iocmd
->pat
,
1668 &iocmd
->result
, bfad_hcb_comp
, &fcomp
);
1669 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1670 bfa_trc(bfad
, iocmd
->status
);
1671 if (iocmd
->status
!= BFA_STATUS_OK
)
1673 wait_for_completion(&fcomp
.comp
);
1674 iocmd
->status
= fcomp
.status
;
1680 bfad_iocmd_diag_fwping(struct bfad_s
*bfad
, void *cmd
)
1682 struct bfa_bsg_diag_fwping_s
*iocmd
=
1683 (struct bfa_bsg_diag_fwping_s
*)cmd
;
1684 struct bfad_hal_comp fcomp
;
1685 unsigned long flags
;
1687 init_completion(&fcomp
.comp
);
1688 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1689 iocmd
->status
= bfa_diag_fwping(BFA_DIAG_MOD(&bfad
->bfa
), iocmd
->cnt
,
1690 iocmd
->pattern
, &iocmd
->result
,
1691 bfad_hcb_comp
, &fcomp
);
1692 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1693 bfa_trc(bfad
, iocmd
->status
);
1694 if (iocmd
->status
!= BFA_STATUS_OK
)
1696 bfa_trc(bfad
, 0x77771);
1697 wait_for_completion(&fcomp
.comp
);
1698 iocmd
->status
= fcomp
.status
;
1704 bfad_iocmd_diag_queuetest(struct bfad_s
*bfad
, void *cmd
)
1706 struct bfa_bsg_diag_qtest_s
*iocmd
= (struct bfa_bsg_diag_qtest_s
*)cmd
;
1707 struct bfad_hal_comp fcomp
;
1708 unsigned long flags
;
1710 init_completion(&fcomp
.comp
);
1711 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1712 iocmd
->status
= bfa_fcdiag_queuetest(&bfad
->bfa
, iocmd
->force
,
1713 iocmd
->queue
, &iocmd
->result
,
1714 bfad_hcb_comp
, &fcomp
);
1715 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1716 if (iocmd
->status
!= BFA_STATUS_OK
)
1718 wait_for_completion(&fcomp
.comp
);
1719 iocmd
->status
= fcomp
.status
;
1725 bfad_iocmd_diag_sfp(struct bfad_s
*bfad
, void *cmd
)
1727 struct bfa_bsg_sfp_show_s
*iocmd
=
1728 (struct bfa_bsg_sfp_show_s
*)cmd
;
1729 struct bfad_hal_comp fcomp
;
1730 unsigned long flags
;
1732 init_completion(&fcomp
.comp
);
1733 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1734 iocmd
->status
= bfa_sfp_show(BFA_SFP_MOD(&bfad
->bfa
), &iocmd
->sfp
,
1735 bfad_hcb_comp
, &fcomp
);
1736 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1737 bfa_trc(bfad
, iocmd
->status
);
1738 if (iocmd
->status
!= BFA_STATUS_OK
)
1740 wait_for_completion(&fcomp
.comp
);
1741 iocmd
->status
= fcomp
.status
;
1742 bfa_trc(bfad
, iocmd
->status
);
1748 bfad_iocmd_diag_led(struct bfad_s
*bfad
, void *cmd
)
1750 struct bfa_bsg_diag_led_s
*iocmd
= (struct bfa_bsg_diag_led_s
*)cmd
;
1751 unsigned long flags
;
1753 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1754 iocmd
->status
= bfa_diag_ledtest(BFA_DIAG_MOD(&bfad
->bfa
),
1756 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1761 bfad_iocmd_diag_beacon_lport(struct bfad_s
*bfad
, void *cmd
)
1763 struct bfa_bsg_diag_beacon_s
*iocmd
=
1764 (struct bfa_bsg_diag_beacon_s
*)cmd
;
1765 unsigned long flags
;
1767 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1768 iocmd
->status
= bfa_diag_beacon_port(BFA_DIAG_MOD(&bfad
->bfa
),
1769 iocmd
->beacon
, iocmd
->link_e2e_beacon
,
1771 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1776 bfad_iocmd_diag_lb_stat(struct bfad_s
*bfad
, void *cmd
)
1778 struct bfa_bsg_diag_lb_stat_s
*iocmd
=
1779 (struct bfa_bsg_diag_lb_stat_s
*)cmd
;
1780 unsigned long flags
;
1782 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1783 iocmd
->status
= bfa_fcdiag_lb_is_running(&bfad
->bfa
);
1784 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1785 bfa_trc(bfad
, iocmd
->status
);
1791 bfad_iocmd_diag_dport_enable(struct bfad_s
*bfad
, void *pcmd
)
1793 struct bfa_bsg_dport_enable_s
*iocmd
=
1794 (struct bfa_bsg_dport_enable_s
*)pcmd
;
1795 unsigned long flags
;
1796 struct bfad_hal_comp fcomp
;
1798 init_completion(&fcomp
.comp
);
1799 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1800 iocmd
->status
= bfa_dport_enable(&bfad
->bfa
, iocmd
->lpcnt
,
1801 iocmd
->pat
, bfad_hcb_comp
, &fcomp
);
1802 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1803 if (iocmd
->status
!= BFA_STATUS_OK
)
1804 bfa_trc(bfad
, iocmd
->status
);
1806 wait_for_completion(&fcomp
.comp
);
1807 iocmd
->status
= fcomp
.status
;
1813 bfad_iocmd_diag_dport_disable(struct bfad_s
*bfad
, void *pcmd
)
1815 struct bfa_bsg_gen_s
*iocmd
= (struct bfa_bsg_gen_s
*)pcmd
;
1816 unsigned long flags
;
1817 struct bfad_hal_comp fcomp
;
1819 init_completion(&fcomp
.comp
);
1820 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1821 iocmd
->status
= bfa_dport_disable(&bfad
->bfa
, bfad_hcb_comp
, &fcomp
);
1822 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1823 if (iocmd
->status
!= BFA_STATUS_OK
)
1824 bfa_trc(bfad
, iocmd
->status
);
1826 wait_for_completion(&fcomp
.comp
);
1827 iocmd
->status
= fcomp
.status
;
1833 bfad_iocmd_diag_dport_start(struct bfad_s
*bfad
, void *pcmd
)
1835 struct bfa_bsg_dport_enable_s
*iocmd
=
1836 (struct bfa_bsg_dport_enable_s
*)pcmd
;
1837 unsigned long flags
;
1838 struct bfad_hal_comp fcomp
;
1840 init_completion(&fcomp
.comp
);
1841 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1842 iocmd
->status
= bfa_dport_start(&bfad
->bfa
, iocmd
->lpcnt
,
1843 iocmd
->pat
, bfad_hcb_comp
,
1845 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1847 if (iocmd
->status
!= BFA_STATUS_OK
) {
1848 bfa_trc(bfad
, iocmd
->status
);
1850 wait_for_completion(&fcomp
.comp
);
1851 iocmd
->status
= fcomp
.status
;
1858 bfad_iocmd_diag_dport_show(struct bfad_s
*bfad
, void *pcmd
)
1860 struct bfa_bsg_diag_dport_show_s
*iocmd
=
1861 (struct bfa_bsg_diag_dport_show_s
*)pcmd
;
1862 unsigned long flags
;
1864 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1865 iocmd
->status
= bfa_dport_show(&bfad
->bfa
, &iocmd
->result
);
1866 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1873 bfad_iocmd_phy_get_attr(struct bfad_s
*bfad
, void *cmd
)
1875 struct bfa_bsg_phy_attr_s
*iocmd
=
1876 (struct bfa_bsg_phy_attr_s
*)cmd
;
1877 struct bfad_hal_comp fcomp
;
1878 unsigned long flags
;
1880 init_completion(&fcomp
.comp
);
1881 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1882 iocmd
->status
= bfa_phy_get_attr(BFA_PHY(&bfad
->bfa
), iocmd
->instance
,
1883 &iocmd
->attr
, bfad_hcb_comp
, &fcomp
);
1884 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1885 if (iocmd
->status
!= BFA_STATUS_OK
)
1887 wait_for_completion(&fcomp
.comp
);
1888 iocmd
->status
= fcomp
.status
;
1894 bfad_iocmd_phy_get_stats(struct bfad_s
*bfad
, void *cmd
)
1896 struct bfa_bsg_phy_stats_s
*iocmd
=
1897 (struct bfa_bsg_phy_stats_s
*)cmd
;
1898 struct bfad_hal_comp fcomp
;
1899 unsigned long flags
;
1901 init_completion(&fcomp
.comp
);
1902 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1903 iocmd
->status
= bfa_phy_get_stats(BFA_PHY(&bfad
->bfa
), iocmd
->instance
,
1904 &iocmd
->stats
, bfad_hcb_comp
, &fcomp
);
1905 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1906 if (iocmd
->status
!= BFA_STATUS_OK
)
1908 wait_for_completion(&fcomp
.comp
);
1909 iocmd
->status
= fcomp
.status
;
1915 bfad_iocmd_phy_read(struct bfad_s
*bfad
, void *cmd
, unsigned int payload_len
)
1917 struct bfa_bsg_phy_s
*iocmd
= (struct bfa_bsg_phy_s
*)cmd
;
1918 struct bfad_hal_comp fcomp
;
1920 unsigned long flags
;
1922 if (bfad_chk_iocmd_sz(payload_len
,
1923 sizeof(struct bfa_bsg_phy_s
),
1924 iocmd
->bufsz
) != BFA_STATUS_OK
) {
1925 iocmd
->status
= BFA_STATUS_VERSION_FAIL
;
1929 iocmd_bufptr
= (char *)iocmd
+ sizeof(struct bfa_bsg_phy_s
);
1930 init_completion(&fcomp
.comp
);
1931 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1932 iocmd
->status
= bfa_phy_read(BFA_PHY(&bfad
->bfa
),
1933 iocmd
->instance
, iocmd_bufptr
, iocmd
->bufsz
,
1934 0, bfad_hcb_comp
, &fcomp
);
1935 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1936 if (iocmd
->status
!= BFA_STATUS_OK
)
1938 wait_for_completion(&fcomp
.comp
);
1939 iocmd
->status
= fcomp
.status
;
1940 if (iocmd
->status
!= BFA_STATUS_OK
)
1947 bfad_iocmd_vhba_query(struct bfad_s
*bfad
, void *cmd
)
1949 struct bfa_bsg_vhba_attr_s
*iocmd
=
1950 (struct bfa_bsg_vhba_attr_s
*)cmd
;
1951 struct bfa_vhba_attr_s
*attr
= &iocmd
->attr
;
1952 unsigned long flags
;
1954 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1955 attr
->pwwn
= bfad
->bfa
.ioc
.attr
->pwwn
;
1956 attr
->nwwn
= bfad
->bfa
.ioc
.attr
->nwwn
;
1957 attr
->plog_enabled
= (bfa_boolean_t
)bfad
->bfa
.plog
->plog_enabled
;
1958 attr
->io_profile
= bfa_fcpim_get_io_profile(&bfad
->bfa
);
1959 attr
->path_tov
= bfa_fcpim_path_tov_get(&bfad
->bfa
);
1960 iocmd
->status
= BFA_STATUS_OK
;
1961 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1966 bfad_iocmd_phy_update(struct bfad_s
*bfad
, void *cmd
, unsigned int payload_len
)
1968 struct bfa_bsg_phy_s
*iocmd
= (struct bfa_bsg_phy_s
*)cmd
;
1970 struct bfad_hal_comp fcomp
;
1971 unsigned long flags
;
1973 if (bfad_chk_iocmd_sz(payload_len
,
1974 sizeof(struct bfa_bsg_phy_s
),
1975 iocmd
->bufsz
) != BFA_STATUS_OK
) {
1976 iocmd
->status
= BFA_STATUS_VERSION_FAIL
;
1980 iocmd_bufptr
= (char *)iocmd
+ sizeof(struct bfa_bsg_phy_s
);
1981 init_completion(&fcomp
.comp
);
1982 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1983 iocmd
->status
= bfa_phy_update(BFA_PHY(&bfad
->bfa
),
1984 iocmd
->instance
, iocmd_bufptr
, iocmd
->bufsz
,
1985 0, bfad_hcb_comp
, &fcomp
);
1986 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1987 if (iocmd
->status
!= BFA_STATUS_OK
)
1989 wait_for_completion(&fcomp
.comp
);
1990 iocmd
->status
= fcomp
.status
;
1996 bfad_iocmd_porglog_get(struct bfad_s
*bfad
, void *cmd
)
1998 struct bfa_bsg_debug_s
*iocmd
= (struct bfa_bsg_debug_s
*)cmd
;
2001 if (iocmd
->bufsz
< sizeof(struct bfa_plog_s
)) {
2002 bfa_trc(bfad
, sizeof(struct bfa_plog_s
));
2003 iocmd
->status
= BFA_STATUS_EINVAL
;
2007 iocmd
->status
= BFA_STATUS_OK
;
2008 iocmd_bufptr
= (char *)iocmd
+ sizeof(struct bfa_bsg_debug_s
);
2009 memcpy(iocmd_bufptr
, (u8
*) &bfad
->plog_buf
, sizeof(struct bfa_plog_s
));
2014 #define BFA_DEBUG_FW_CORE_CHUNK_SZ 0x4000U /* 16K chunks for FW dump */
2016 bfad_iocmd_debug_fw_core(struct bfad_s
*bfad
, void *cmd
,
2017 unsigned int payload_len
)
2019 struct bfa_bsg_debug_s
*iocmd
= (struct bfa_bsg_debug_s
*)cmd
;
2021 unsigned long flags
;
2024 if (bfad_chk_iocmd_sz(payload_len
, sizeof(struct bfa_bsg_debug_s
),
2025 BFA_DEBUG_FW_CORE_CHUNK_SZ
) != BFA_STATUS_OK
) {
2026 iocmd
->status
= BFA_STATUS_VERSION_FAIL
;
2030 if (iocmd
->bufsz
< BFA_DEBUG_FW_CORE_CHUNK_SZ
||
2031 !IS_ALIGNED(iocmd
->bufsz
, sizeof(u16
)) ||
2032 !IS_ALIGNED(iocmd
->offset
, sizeof(u32
))) {
2033 bfa_trc(bfad
, BFA_DEBUG_FW_CORE_CHUNK_SZ
);
2034 iocmd
->status
= BFA_STATUS_EINVAL
;
2038 iocmd_bufptr
= (char *)iocmd
+ sizeof(struct bfa_bsg_debug_s
);
2039 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
2040 offset
= iocmd
->offset
;
2041 iocmd
->status
= bfa_ioc_debug_fwcore(&bfad
->bfa
.ioc
, iocmd_bufptr
,
2042 &offset
, &iocmd
->bufsz
);
2043 iocmd
->offset
= offset
;
2044 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
2050 bfad_iocmd_debug_ctl(struct bfad_s
*bfad
, void *cmd
, unsigned int v_cmd
)
2052 struct bfa_bsg_gen_s
*iocmd
= (struct bfa_bsg_gen_s
*)cmd
;
2053 unsigned long flags
;
2055 if (v_cmd
== IOCMD_DEBUG_FW_STATE_CLR
) {
2056 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
2057 bfad
->bfa
.ioc
.dbg_fwsave_once
= BFA_TRUE
;
2058 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
2059 } else if (v_cmd
== IOCMD_DEBUG_PORTLOG_CLR
)
2060 bfad
->plog_buf
.head
= bfad
->plog_buf
.tail
= 0;
2061 else if (v_cmd
== IOCMD_DEBUG_START_DTRC
)
2062 bfa_trc_init(bfad
->trcmod
);
2063 else if (v_cmd
== IOCMD_DEBUG_STOP_DTRC
)
2064 bfa_trc_stop(bfad
->trcmod
);
2066 iocmd
->status
= BFA_STATUS_OK
;
2071 bfad_iocmd_porglog_ctl(struct bfad_s
*bfad
, void *cmd
)
2073 struct bfa_bsg_portlogctl_s
*iocmd
= (struct bfa_bsg_portlogctl_s
*)cmd
;
2075 if (iocmd
->ctl
== BFA_TRUE
)
2076 bfad
->plog_buf
.plog_enabled
= 1;
2078 bfad
->plog_buf
.plog_enabled
= 0;
2080 iocmd
->status
= BFA_STATUS_OK
;
2085 bfad_iocmd_fcpim_cfg_profile(struct bfad_s
*bfad
, void *cmd
, unsigned int v_cmd
)
2087 struct bfa_bsg_fcpim_profile_s
*iocmd
=
2088 (struct bfa_bsg_fcpim_profile_s
*)cmd
;
2089 unsigned long flags
;
2091 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
2092 if (v_cmd
== IOCMD_FCPIM_PROFILE_ON
)
2093 iocmd
->status
= bfa_fcpim_profile_on(&bfad
->bfa
, ktime_get_real_seconds());
2094 else if (v_cmd
== IOCMD_FCPIM_PROFILE_OFF
)
2095 iocmd
->status
= bfa_fcpim_profile_off(&bfad
->bfa
);
2096 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
2102 bfad_iocmd_itnim_get_ioprofile(struct bfad_s
*bfad
, void *cmd
)
2104 struct bfa_bsg_itnim_ioprofile_s
*iocmd
=
2105 (struct bfa_bsg_itnim_ioprofile_s
*)cmd
;
2106 struct bfa_fcs_lport_s
*fcs_port
;
2107 struct bfa_fcs_itnim_s
*itnim
;
2108 unsigned long flags
;
2110 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
2111 fcs_port
= bfa_fcs_lookup_port(&bfad
->bfa_fcs
,
2112 iocmd
->vf_id
, iocmd
->lpwwn
);
2114 iocmd
->status
= BFA_STATUS_UNKNOWN_LWWN
;
2116 itnim
= bfa_fcs_itnim_lookup(fcs_port
, iocmd
->rpwwn
);
2118 iocmd
->status
= BFA_STATUS_UNKNOWN_RWWN
;
2120 iocmd
->status
= bfa_itnim_get_ioprofile(
2121 bfa_fcs_itnim_get_halitn(itnim
),
2124 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
2129 bfad_iocmd_fcport_get_stats(struct bfad_s
*bfad
, void *cmd
)
2131 struct bfa_bsg_fcport_stats_s
*iocmd
=
2132 (struct bfa_bsg_fcport_stats_s
*)cmd
;
2133 struct bfad_hal_comp fcomp
;
2134 unsigned long flags
;
2135 struct bfa_cb_pending_q_s cb_qe
;
2137 init_completion(&fcomp
.comp
);
2138 bfa_pending_q_init(&cb_qe
, (bfa_cb_cbfn_t
)bfad_hcb_comp
,
2139 &fcomp
, &iocmd
->stats
);
2140 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
2141 iocmd
->status
= bfa_fcport_get_stats(&bfad
->bfa
, &cb_qe
);
2142 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
2143 if (iocmd
->status
!= BFA_STATUS_OK
) {
2144 bfa_trc(bfad
, iocmd
->status
);
2147 wait_for_completion(&fcomp
.comp
);
2148 iocmd
->status
= fcomp
.status
;
2154 bfad_iocmd_fcport_reset_stats(struct bfad_s
*bfad
, void *cmd
)
2156 struct bfa_bsg_gen_s
*iocmd
= (struct bfa_bsg_gen_s
*)cmd
;
2157 struct bfad_hal_comp fcomp
;
2158 unsigned long flags
;
2159 struct bfa_cb_pending_q_s cb_qe
;
2161 init_completion(&fcomp
.comp
);
2162 bfa_pending_q_init(&cb_qe
, (bfa_cb_cbfn_t
)bfad_hcb_comp
, &fcomp
, NULL
);
2164 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
2165 iocmd
->status
= bfa_fcport_clear_stats(&bfad
->bfa
, &cb_qe
);
2166 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
2167 if (iocmd
->status
!= BFA_STATUS_OK
) {
2168 bfa_trc(bfad
, iocmd
->status
);
2171 wait_for_completion(&fcomp
.comp
);
2172 iocmd
->status
= fcomp
.status
;
2178 bfad_iocmd_boot_cfg(struct bfad_s
*bfad
, void *cmd
)
2180 struct bfa_bsg_boot_s
*iocmd
= (struct bfa_bsg_boot_s
*)cmd
;
2181 struct bfad_hal_comp fcomp
;
2182 unsigned long flags
;
2184 init_completion(&fcomp
.comp
);
2185 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
2186 iocmd
->status
= bfa_flash_update_part(BFA_FLASH(&bfad
->bfa
),
2187 BFA_FLASH_PART_BOOT
, bfad
->bfa
.ioc
.port_id
,
2188 &iocmd
->cfg
, sizeof(struct bfa_boot_cfg_s
), 0,
2189 bfad_hcb_comp
, &fcomp
);
2190 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
2191 if (iocmd
->status
!= BFA_STATUS_OK
)
2193 wait_for_completion(&fcomp
.comp
);
2194 iocmd
->status
= fcomp
.status
;
2200 bfad_iocmd_boot_query(struct bfad_s
*bfad
, void *cmd
)
2202 struct bfa_bsg_boot_s
*iocmd
= (struct bfa_bsg_boot_s
*)cmd
;
2203 struct bfad_hal_comp fcomp
;
2204 unsigned long flags
;
2206 init_completion(&fcomp
.comp
);
2207 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
2208 iocmd
->status
= bfa_flash_read_part(BFA_FLASH(&bfad
->bfa
),
2209 BFA_FLASH_PART_BOOT
, bfad
->bfa
.ioc
.port_id
,
2210 &iocmd
->cfg
, sizeof(struct bfa_boot_cfg_s
), 0,
2211 bfad_hcb_comp
, &fcomp
);
2212 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
2213 if (iocmd
->status
!= BFA_STATUS_OK
)
2215 wait_for_completion(&fcomp
.comp
);
2216 iocmd
->status
= fcomp
.status
;
2222 bfad_iocmd_preboot_query(struct bfad_s
*bfad
, void *cmd
)
2224 struct bfa_bsg_preboot_s
*iocmd
= (struct bfa_bsg_preboot_s
*)cmd
;
2225 struct bfi_iocfc_cfgrsp_s
*cfgrsp
= bfad
->bfa
.iocfc
.cfgrsp
;
2226 struct bfa_boot_pbc_s
*pbcfg
= &iocmd
->cfg
;
2227 unsigned long flags
;
2229 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
2230 pbcfg
->enable
= cfgrsp
->pbc_cfg
.boot_enabled
;
2231 pbcfg
->nbluns
= cfgrsp
->pbc_cfg
.nbluns
;
2232 pbcfg
->speed
= cfgrsp
->pbc_cfg
.port_speed
;
2233 memcpy(pbcfg
->pblun
, cfgrsp
->pbc_cfg
.blun
, sizeof(pbcfg
->pblun
));
2234 iocmd
->status
= BFA_STATUS_OK
;
2235 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
2241 bfad_iocmd_ethboot_cfg(struct bfad_s
*bfad
, void *cmd
)
2243 struct bfa_bsg_ethboot_s
*iocmd
= (struct bfa_bsg_ethboot_s
*)cmd
;
2244 struct bfad_hal_comp fcomp
;
2245 unsigned long flags
;
2247 init_completion(&fcomp
.comp
);
2248 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
2249 iocmd
->status
= bfa_flash_update_part(BFA_FLASH(&bfad
->bfa
),
2250 BFA_FLASH_PART_PXECFG
,
2251 bfad
->bfa
.ioc
.port_id
, &iocmd
->cfg
,
2252 sizeof(struct bfa_ethboot_cfg_s
), 0,
2253 bfad_hcb_comp
, &fcomp
);
2254 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
2255 if (iocmd
->status
!= BFA_STATUS_OK
)
2257 wait_for_completion(&fcomp
.comp
);
2258 iocmd
->status
= fcomp
.status
;
2264 bfad_iocmd_ethboot_query(struct bfad_s
*bfad
, void *cmd
)
2266 struct bfa_bsg_ethboot_s
*iocmd
= (struct bfa_bsg_ethboot_s
*)cmd
;
2267 struct bfad_hal_comp fcomp
;
2268 unsigned long flags
;
2270 init_completion(&fcomp
.comp
);
2271 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
2272 iocmd
->status
= bfa_flash_read_part(BFA_FLASH(&bfad
->bfa
),
2273 BFA_FLASH_PART_PXECFG
,
2274 bfad
->bfa
.ioc
.port_id
, &iocmd
->cfg
,
2275 sizeof(struct bfa_ethboot_cfg_s
), 0,
2276 bfad_hcb_comp
, &fcomp
);
2277 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
2278 if (iocmd
->status
!= BFA_STATUS_OK
)
2280 wait_for_completion(&fcomp
.comp
);
2281 iocmd
->status
= fcomp
.status
;
2287 bfad_iocmd_cfg_trunk(struct bfad_s
*bfad
, void *cmd
, unsigned int v_cmd
)
2289 struct bfa_bsg_gen_s
*iocmd
= (struct bfa_bsg_gen_s
*)cmd
;
2290 struct bfa_fcport_s
*fcport
= BFA_FCPORT_MOD(&bfad
->bfa
);
2291 struct bfa_fcport_trunk_s
*trunk
= &fcport
->trunk
;
2292 unsigned long flags
;
2294 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
2296 if (bfa_fcport_is_dport(&bfad
->bfa
)) {
2297 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
2298 return BFA_STATUS_DPORT_ERR
;
2301 if ((fcport
->cfg
.topology
== BFA_PORT_TOPOLOGY_LOOP
) ||
2302 (fcport
->topology
== BFA_PORT_TOPOLOGY_LOOP
))
2303 iocmd
->status
= BFA_STATUS_TOPOLOGY_LOOP
;
2305 if (v_cmd
== IOCMD_TRUNK_ENABLE
) {
2306 trunk
->attr
.state
= BFA_TRUNK_OFFLINE
;
2307 bfa_fcport_disable(&bfad
->bfa
);
2308 fcport
->cfg
.trunked
= BFA_TRUE
;
2309 } else if (v_cmd
== IOCMD_TRUNK_DISABLE
) {
2310 trunk
->attr
.state
= BFA_TRUNK_DISABLED
;
2311 bfa_fcport_disable(&bfad
->bfa
);
2312 fcport
->cfg
.trunked
= BFA_FALSE
;
2315 if (!bfa_fcport_is_disabled(&bfad
->bfa
))
2316 bfa_fcport_enable(&bfad
->bfa
);
2318 iocmd
->status
= BFA_STATUS_OK
;
2321 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
2327 bfad_iocmd_trunk_get_attr(struct bfad_s
*bfad
, void *cmd
)
2329 struct bfa_bsg_trunk_attr_s
*iocmd
= (struct bfa_bsg_trunk_attr_s
*)cmd
;
2330 struct bfa_fcport_s
*fcport
= BFA_FCPORT_MOD(&bfad
->bfa
);
2331 struct bfa_fcport_trunk_s
*trunk
= &fcport
->trunk
;
2332 unsigned long flags
;
2334 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
2335 if ((fcport
->cfg
.topology
== BFA_PORT_TOPOLOGY_LOOP
) ||
2336 (fcport
->topology
== BFA_PORT_TOPOLOGY_LOOP
))
2337 iocmd
->status
= BFA_STATUS_TOPOLOGY_LOOP
;
2339 memcpy((void *)&iocmd
->attr
, (void *)&trunk
->attr
,
2340 sizeof(struct bfa_trunk_attr_s
));
2341 iocmd
->attr
.port_id
= bfa_lps_get_base_pid(&bfad
->bfa
);
2342 iocmd
->status
= BFA_STATUS_OK
;
2344 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
2350 bfad_iocmd_qos(struct bfad_s
*bfad
, void *cmd
, unsigned int v_cmd
)
2352 struct bfa_bsg_gen_s
*iocmd
= (struct bfa_bsg_gen_s
*)cmd
;
2353 struct bfa_fcport_s
*fcport
= BFA_FCPORT_MOD(&bfad
->bfa
);
2354 unsigned long flags
;
2356 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
2357 if (bfa_ioc_get_type(&bfad
->bfa
.ioc
) == BFA_IOC_TYPE_FC
) {
2358 if ((fcport
->cfg
.topology
== BFA_PORT_TOPOLOGY_LOOP
) &&
2359 (fcport
->topology
== BFA_PORT_TOPOLOGY_LOOP
))
2360 iocmd
->status
= BFA_STATUS_TOPOLOGY_LOOP
;
2362 if (v_cmd
== IOCMD_QOS_ENABLE
)
2363 fcport
->cfg
.qos_enabled
= BFA_TRUE
;
2364 else if (v_cmd
== IOCMD_QOS_DISABLE
) {
2365 fcport
->cfg
.qos_enabled
= BFA_FALSE
;
2366 fcport
->cfg
.qos_bw
.high
= BFA_QOS_BW_HIGH
;
2367 fcport
->cfg
.qos_bw
.med
= BFA_QOS_BW_MED
;
2368 fcport
->cfg
.qos_bw
.low
= BFA_QOS_BW_LOW
;
2372 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
2378 bfad_iocmd_qos_get_attr(struct bfad_s
*bfad
, void *cmd
)
2380 struct bfa_bsg_qos_attr_s
*iocmd
= (struct bfa_bsg_qos_attr_s
*)cmd
;
2381 struct bfa_fcport_s
*fcport
= BFA_FCPORT_MOD(&bfad
->bfa
);
2382 unsigned long flags
;
2384 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
2385 if ((fcport
->cfg
.topology
== BFA_PORT_TOPOLOGY_LOOP
) &&
2386 (fcport
->topology
== BFA_PORT_TOPOLOGY_LOOP
))
2387 iocmd
->status
= BFA_STATUS_TOPOLOGY_LOOP
;
2389 iocmd
->attr
.state
= fcport
->qos_attr
.state
;
2390 iocmd
->attr
.total_bb_cr
=
2391 be32_to_cpu(fcport
->qos_attr
.total_bb_cr
);
2392 iocmd
->attr
.qos_bw
.high
= fcport
->cfg
.qos_bw
.high
;
2393 iocmd
->attr
.qos_bw
.med
= fcport
->cfg
.qos_bw
.med
;
2394 iocmd
->attr
.qos_bw
.low
= fcport
->cfg
.qos_bw
.low
;
2395 iocmd
->attr
.qos_bw_op
= fcport
->qos_attr
.qos_bw_op
;
2396 iocmd
->status
= BFA_STATUS_OK
;
2398 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
2404 bfad_iocmd_qos_get_vc_attr(struct bfad_s
*bfad
, void *cmd
)
2406 struct bfa_bsg_qos_vc_attr_s
*iocmd
=
2407 (struct bfa_bsg_qos_vc_attr_s
*)cmd
;
2408 struct bfa_fcport_s
*fcport
= BFA_FCPORT_MOD(&bfad
->bfa
);
2409 struct bfa_qos_vc_attr_s
*bfa_vc_attr
= &fcport
->qos_vc_attr
;
2410 unsigned long flags
;
2413 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
2414 iocmd
->attr
.total_vc_count
= be16_to_cpu(bfa_vc_attr
->total_vc_count
);
2415 iocmd
->attr
.shared_credit
= be16_to_cpu(bfa_vc_attr
->shared_credit
);
2416 iocmd
->attr
.elp_opmode_flags
=
2417 be32_to_cpu(bfa_vc_attr
->elp_opmode_flags
);
2419 /* Individual VC info */
2420 while (i
< iocmd
->attr
.total_vc_count
) {
2421 iocmd
->attr
.vc_info
[i
].vc_credit
=
2422 bfa_vc_attr
->vc_info
[i
].vc_credit
;
2423 iocmd
->attr
.vc_info
[i
].borrow_credit
=
2424 bfa_vc_attr
->vc_info
[i
].borrow_credit
;
2425 iocmd
->attr
.vc_info
[i
].priority
=
2426 bfa_vc_attr
->vc_info
[i
].priority
;
2429 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
2431 iocmd
->status
= BFA_STATUS_OK
;
2436 bfad_iocmd_qos_get_stats(struct bfad_s
*bfad
, void *cmd
)
2438 struct bfa_bsg_fcport_stats_s
*iocmd
=
2439 (struct bfa_bsg_fcport_stats_s
*)cmd
;
2440 struct bfad_hal_comp fcomp
;
2441 unsigned long flags
;
2442 struct bfa_cb_pending_q_s cb_qe
;
2443 struct bfa_fcport_s
*fcport
= BFA_FCPORT_MOD(&bfad
->bfa
);
2445 init_completion(&fcomp
.comp
);
2446 bfa_pending_q_init(&cb_qe
, (bfa_cb_cbfn_t
)bfad_hcb_comp
,
2447 &fcomp
, &iocmd
->stats
);
2449 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
2450 WARN_ON(!bfa_ioc_get_fcmode(&bfad
->bfa
.ioc
));
2451 if ((fcport
->cfg
.topology
== BFA_PORT_TOPOLOGY_LOOP
) &&
2452 (fcport
->topology
== BFA_PORT_TOPOLOGY_LOOP
))
2453 iocmd
->status
= BFA_STATUS_TOPOLOGY_LOOP
;
2455 iocmd
->status
= bfa_fcport_get_stats(&bfad
->bfa
, &cb_qe
);
2456 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
2457 if (iocmd
->status
!= BFA_STATUS_OK
) {
2458 bfa_trc(bfad
, iocmd
->status
);
2461 wait_for_completion(&fcomp
.comp
);
2462 iocmd
->status
= fcomp
.status
;
2468 bfad_iocmd_qos_reset_stats(struct bfad_s
*bfad
, void *cmd
)
2470 struct bfa_bsg_gen_s
*iocmd
= (struct bfa_bsg_gen_s
*)cmd
;
2471 struct bfad_hal_comp fcomp
;
2472 unsigned long flags
;
2473 struct bfa_cb_pending_q_s cb_qe
;
2474 struct bfa_fcport_s
*fcport
= BFA_FCPORT_MOD(&bfad
->bfa
);
2476 init_completion(&fcomp
.comp
);
2477 bfa_pending_q_init(&cb_qe
, (bfa_cb_cbfn_t
)bfad_hcb_comp
,
2480 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
2481 WARN_ON(!bfa_ioc_get_fcmode(&bfad
->bfa
.ioc
));
2482 if ((fcport
->cfg
.topology
== BFA_PORT_TOPOLOGY_LOOP
) &&
2483 (fcport
->topology
== BFA_PORT_TOPOLOGY_LOOP
))
2484 iocmd
->status
= BFA_STATUS_TOPOLOGY_LOOP
;
2486 iocmd
->status
= bfa_fcport_clear_stats(&bfad
->bfa
, &cb_qe
);
2487 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
2488 if (iocmd
->status
!= BFA_STATUS_OK
) {
2489 bfa_trc(bfad
, iocmd
->status
);
2492 wait_for_completion(&fcomp
.comp
);
2493 iocmd
->status
= fcomp
.status
;
2499 bfad_iocmd_vf_get_stats(struct bfad_s
*bfad
, void *cmd
)
2501 struct bfa_bsg_vf_stats_s
*iocmd
=
2502 (struct bfa_bsg_vf_stats_s
*)cmd
;
2503 struct bfa_fcs_fabric_s
*fcs_vf
;
2504 unsigned long flags
;
2506 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
2507 fcs_vf
= bfa_fcs_vf_lookup(&bfad
->bfa_fcs
, iocmd
->vf_id
);
2508 if (fcs_vf
== NULL
) {
2509 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
2510 iocmd
->status
= BFA_STATUS_UNKNOWN_VFID
;
2513 memcpy((void *)&iocmd
->stats
, (void *)&fcs_vf
->stats
,
2514 sizeof(struct bfa_vf_stats_s
));
2515 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
2516 iocmd
->status
= BFA_STATUS_OK
;
2522 bfad_iocmd_vf_clr_stats(struct bfad_s
*bfad
, void *cmd
)
2524 struct bfa_bsg_vf_reset_stats_s
*iocmd
=
2525 (struct bfa_bsg_vf_reset_stats_s
*)cmd
;
2526 struct bfa_fcs_fabric_s
*fcs_vf
;
2527 unsigned long flags
;
2529 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
2530 fcs_vf
= bfa_fcs_vf_lookup(&bfad
->bfa_fcs
, iocmd
->vf_id
);
2531 if (fcs_vf
== NULL
) {
2532 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
2533 iocmd
->status
= BFA_STATUS_UNKNOWN_VFID
;
2536 memset((void *)&fcs_vf
->stats
, 0, sizeof(struct bfa_vf_stats_s
));
2537 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
2538 iocmd
->status
= BFA_STATUS_OK
;
2543 /* Function to reset the LUN SCAN mode */
2545 bfad_iocmd_lunmask_reset_lunscan_mode(struct bfad_s
*bfad
, int lunmask_cfg
)
2547 struct bfad_im_port_s
*pport_im
= bfad
->pport
.im_port
;
2548 struct bfad_vport_s
*vport
= NULL
;
2550 /* Set the scsi device LUN SCAN flags for base port */
2551 bfad_reset_sdev_bflags(pport_im
, lunmask_cfg
);
2553 /* Set the scsi device LUN SCAN flags for the vports */
2554 list_for_each_entry(vport
, &bfad
->vport_list
, list_entry
)
2555 bfad_reset_sdev_bflags(vport
->drv_port
.im_port
, lunmask_cfg
);
2559 bfad_iocmd_lunmask(struct bfad_s
*bfad
, void *pcmd
, unsigned int v_cmd
)
2561 struct bfa_bsg_gen_s
*iocmd
= (struct bfa_bsg_gen_s
*)pcmd
;
2562 unsigned long flags
;
2564 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
2565 if (v_cmd
== IOCMD_FCPIM_LUNMASK_ENABLE
) {
2566 iocmd
->status
= bfa_fcpim_lunmask_update(&bfad
->bfa
, BFA_TRUE
);
2567 /* Set the LUN Scanning mode to be Sequential scan */
2568 if (iocmd
->status
== BFA_STATUS_OK
)
2569 bfad_iocmd_lunmask_reset_lunscan_mode(bfad
, BFA_TRUE
);
2570 } else if (v_cmd
== IOCMD_FCPIM_LUNMASK_DISABLE
) {
2571 iocmd
->status
= bfa_fcpim_lunmask_update(&bfad
->bfa
, BFA_FALSE
);
2572 /* Set the LUN Scanning mode to default REPORT_LUNS scan */
2573 if (iocmd
->status
== BFA_STATUS_OK
)
2574 bfad_iocmd_lunmask_reset_lunscan_mode(bfad
, BFA_FALSE
);
2575 } else if (v_cmd
== IOCMD_FCPIM_LUNMASK_CLEAR
)
2576 iocmd
->status
= bfa_fcpim_lunmask_clear(&bfad
->bfa
);
2577 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
2582 bfad_iocmd_fcpim_lunmask_query(struct bfad_s
*bfad
, void *cmd
)
2584 struct bfa_bsg_fcpim_lunmask_query_s
*iocmd
=
2585 (struct bfa_bsg_fcpim_lunmask_query_s
*)cmd
;
2586 struct bfa_lunmask_cfg_s
*lun_mask
= &iocmd
->lun_mask
;
2587 unsigned long flags
;
2589 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
2590 iocmd
->status
= bfa_fcpim_lunmask_query(&bfad
->bfa
, lun_mask
);
2591 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
2596 bfad_iocmd_fcpim_cfg_lunmask(struct bfad_s
*bfad
, void *cmd
, unsigned int v_cmd
)
2598 struct bfa_bsg_fcpim_lunmask_s
*iocmd
=
2599 (struct bfa_bsg_fcpim_lunmask_s
*)cmd
;
2600 unsigned long flags
;
2602 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
2603 if (v_cmd
== IOCMD_FCPIM_LUNMASK_ADD
)
2604 iocmd
->status
= bfa_fcpim_lunmask_add(&bfad
->bfa
, iocmd
->vf_id
,
2605 &iocmd
->pwwn
, iocmd
->rpwwn
, iocmd
->lun
);
2606 else if (v_cmd
== IOCMD_FCPIM_LUNMASK_DELETE
)
2607 iocmd
->status
= bfa_fcpim_lunmask_delete(&bfad
->bfa
,
2608 iocmd
->vf_id
, &iocmd
->pwwn
,
2609 iocmd
->rpwwn
, iocmd
->lun
);
2610 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
2615 bfad_iocmd_fcpim_throttle_query(struct bfad_s
*bfad
, void *cmd
)
2617 struct bfa_bsg_fcpim_throttle_s
*iocmd
=
2618 (struct bfa_bsg_fcpim_throttle_s
*)cmd
;
2619 unsigned long flags
;
2621 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
2622 iocmd
->status
= bfa_fcpim_throttle_get(&bfad
->bfa
,
2623 (void *)&iocmd
->throttle
);
2624 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
2630 bfad_iocmd_fcpim_throttle_set(struct bfad_s
*bfad
, void *cmd
)
2632 struct bfa_bsg_fcpim_throttle_s
*iocmd
=
2633 (struct bfa_bsg_fcpim_throttle_s
*)cmd
;
2634 unsigned long flags
;
2636 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
2637 iocmd
->status
= bfa_fcpim_throttle_set(&bfad
->bfa
,
2638 iocmd
->throttle
.cfg_value
);
2639 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
2645 bfad_iocmd_tfru_read(struct bfad_s
*bfad
, void *cmd
)
2647 struct bfa_bsg_tfru_s
*iocmd
=
2648 (struct bfa_bsg_tfru_s
*)cmd
;
2649 struct bfad_hal_comp fcomp
;
2650 unsigned long flags
= 0;
2652 init_completion(&fcomp
.comp
);
2653 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
2654 iocmd
->status
= bfa_tfru_read(BFA_FRU(&bfad
->bfa
),
2655 &iocmd
->data
, iocmd
->len
, iocmd
->offset
,
2656 bfad_hcb_comp
, &fcomp
);
2657 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
2658 if (iocmd
->status
== BFA_STATUS_OK
) {
2659 wait_for_completion(&fcomp
.comp
);
2660 iocmd
->status
= fcomp
.status
;
2667 bfad_iocmd_tfru_write(struct bfad_s
*bfad
, void *cmd
)
2669 struct bfa_bsg_tfru_s
*iocmd
=
2670 (struct bfa_bsg_tfru_s
*)cmd
;
2671 struct bfad_hal_comp fcomp
;
2672 unsigned long flags
= 0;
2674 init_completion(&fcomp
.comp
);
2675 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
2676 iocmd
->status
= bfa_tfru_write(BFA_FRU(&bfad
->bfa
),
2677 &iocmd
->data
, iocmd
->len
, iocmd
->offset
,
2678 bfad_hcb_comp
, &fcomp
);
2679 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
2680 if (iocmd
->status
== BFA_STATUS_OK
) {
2681 wait_for_completion(&fcomp
.comp
);
2682 iocmd
->status
= fcomp
.status
;
2689 bfad_iocmd_fruvpd_read(struct bfad_s
*bfad
, void *cmd
)
2691 struct bfa_bsg_fruvpd_s
*iocmd
=
2692 (struct bfa_bsg_fruvpd_s
*)cmd
;
2693 struct bfad_hal_comp fcomp
;
2694 unsigned long flags
= 0;
2696 init_completion(&fcomp
.comp
);
2697 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
2698 iocmd
->status
= bfa_fruvpd_read(BFA_FRU(&bfad
->bfa
),
2699 &iocmd
->data
, iocmd
->len
, iocmd
->offset
,
2700 bfad_hcb_comp
, &fcomp
);
2701 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
2702 if (iocmd
->status
== BFA_STATUS_OK
) {
2703 wait_for_completion(&fcomp
.comp
);
2704 iocmd
->status
= fcomp
.status
;
2711 bfad_iocmd_fruvpd_update(struct bfad_s
*bfad
, void *cmd
)
2713 struct bfa_bsg_fruvpd_s
*iocmd
=
2714 (struct bfa_bsg_fruvpd_s
*)cmd
;
2715 struct bfad_hal_comp fcomp
;
2716 unsigned long flags
= 0;
2718 init_completion(&fcomp
.comp
);
2719 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
2720 iocmd
->status
= bfa_fruvpd_update(BFA_FRU(&bfad
->bfa
),
2721 &iocmd
->data
, iocmd
->len
, iocmd
->offset
,
2722 bfad_hcb_comp
, &fcomp
, iocmd
->trfr_cmpl
);
2723 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
2724 if (iocmd
->status
== BFA_STATUS_OK
) {
2725 wait_for_completion(&fcomp
.comp
);
2726 iocmd
->status
= fcomp
.status
;
2733 bfad_iocmd_fruvpd_get_max_size(struct bfad_s
*bfad
, void *cmd
)
2735 struct bfa_bsg_fruvpd_max_size_s
*iocmd
=
2736 (struct bfa_bsg_fruvpd_max_size_s
*)cmd
;
2737 unsigned long flags
= 0;
2739 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
2740 iocmd
->status
= bfa_fruvpd_get_max_size(BFA_FRU(&bfad
->bfa
),
2742 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
2748 bfad_iocmd_handler(struct bfad_s
*bfad
, unsigned int cmd
, void *iocmd
,
2749 unsigned int payload_len
)
2754 case IOCMD_IOC_ENABLE
:
2755 rc
= bfad_iocmd_ioc_enable(bfad
, iocmd
);
2757 case IOCMD_IOC_DISABLE
:
2758 rc
= bfad_iocmd_ioc_disable(bfad
, iocmd
);
2760 case IOCMD_IOC_GET_INFO
:
2761 rc
= bfad_iocmd_ioc_get_info(bfad
, iocmd
);
2763 case IOCMD_IOC_GET_ATTR
:
2764 rc
= bfad_iocmd_ioc_get_attr(bfad
, iocmd
);
2766 case IOCMD_IOC_GET_STATS
:
2767 rc
= bfad_iocmd_ioc_get_stats(bfad
, iocmd
);
2769 case IOCMD_IOC_GET_FWSTATS
:
2770 rc
= bfad_iocmd_ioc_get_fwstats(bfad
, iocmd
, payload_len
);
2772 case IOCMD_IOC_RESET_STATS
:
2773 case IOCMD_IOC_RESET_FWSTATS
:
2774 rc
= bfad_iocmd_ioc_reset_stats(bfad
, iocmd
, cmd
);
2776 case IOCMD_IOC_SET_ADAPTER_NAME
:
2777 case IOCMD_IOC_SET_PORT_NAME
:
2778 rc
= bfad_iocmd_ioc_set_name(bfad
, iocmd
, cmd
);
2780 case IOCMD_IOCFC_GET_ATTR
:
2781 rc
= bfad_iocmd_iocfc_get_attr(bfad
, iocmd
);
2783 case IOCMD_IOCFC_SET_INTR
:
2784 rc
= bfad_iocmd_iocfc_set_intr(bfad
, iocmd
);
2786 case IOCMD_PORT_ENABLE
:
2787 rc
= bfad_iocmd_port_enable(bfad
, iocmd
);
2789 case IOCMD_PORT_DISABLE
:
2790 rc
= bfad_iocmd_port_disable(bfad
, iocmd
);
2792 case IOCMD_PORT_GET_ATTR
:
2793 rc
= bfad_iocmd_port_get_attr(bfad
, iocmd
);
2795 case IOCMD_PORT_GET_STATS
:
2796 rc
= bfad_iocmd_port_get_stats(bfad
, iocmd
, payload_len
);
2798 case IOCMD_PORT_RESET_STATS
:
2799 rc
= bfad_iocmd_port_reset_stats(bfad
, iocmd
);
2801 case IOCMD_PORT_CFG_TOPO
:
2802 case IOCMD_PORT_CFG_SPEED
:
2803 case IOCMD_PORT_CFG_ALPA
:
2804 case IOCMD_PORT_CLR_ALPA
:
2805 rc
= bfad_iocmd_set_port_cfg(bfad
, iocmd
, cmd
);
2807 case IOCMD_PORT_CFG_MAXFRSZ
:
2808 rc
= bfad_iocmd_port_cfg_maxfrsize(bfad
, iocmd
);
2810 case IOCMD_PORT_BBCR_ENABLE
:
2811 case IOCMD_PORT_BBCR_DISABLE
:
2812 rc
= bfad_iocmd_port_cfg_bbcr(bfad
, cmd
, iocmd
);
2814 case IOCMD_PORT_BBCR_GET_ATTR
:
2815 rc
= bfad_iocmd_port_get_bbcr_attr(bfad
, iocmd
);
2817 case IOCMD_LPORT_GET_ATTR
:
2818 rc
= bfad_iocmd_lport_get_attr(bfad
, iocmd
);
2820 case IOCMD_LPORT_GET_STATS
:
2821 rc
= bfad_iocmd_lport_get_stats(bfad
, iocmd
);
2823 case IOCMD_LPORT_RESET_STATS
:
2824 rc
= bfad_iocmd_lport_reset_stats(bfad
, iocmd
);
2826 case IOCMD_LPORT_GET_IOSTATS
:
2827 rc
= bfad_iocmd_lport_get_iostats(bfad
, iocmd
);
2829 case IOCMD_LPORT_GET_RPORTS
:
2830 rc
= bfad_iocmd_lport_get_rports(bfad
, iocmd
, payload_len
);
2832 case IOCMD_RPORT_GET_ATTR
:
2833 rc
= bfad_iocmd_rport_get_attr(bfad
, iocmd
);
2835 case IOCMD_RPORT_GET_ADDR
:
2836 rc
= bfad_iocmd_rport_get_addr(bfad
, iocmd
);
2838 case IOCMD_RPORT_GET_STATS
:
2839 rc
= bfad_iocmd_rport_get_stats(bfad
, iocmd
);
2841 case IOCMD_RPORT_RESET_STATS
:
2842 rc
= bfad_iocmd_rport_clr_stats(bfad
, iocmd
);
2844 case IOCMD_RPORT_SET_SPEED
:
2845 rc
= bfad_iocmd_rport_set_speed(bfad
, iocmd
);
2847 case IOCMD_VPORT_GET_ATTR
:
2848 rc
= bfad_iocmd_vport_get_attr(bfad
, iocmd
);
2850 case IOCMD_VPORT_GET_STATS
:
2851 rc
= bfad_iocmd_vport_get_stats(bfad
, iocmd
);
2853 case IOCMD_VPORT_RESET_STATS
:
2854 rc
= bfad_iocmd_vport_clr_stats(bfad
, iocmd
);
2856 case IOCMD_FABRIC_GET_LPORTS
:
2857 rc
= bfad_iocmd_fabric_get_lports(bfad
, iocmd
, payload_len
);
2859 case IOCMD_RATELIM_ENABLE
:
2860 case IOCMD_RATELIM_DISABLE
:
2861 rc
= bfad_iocmd_ratelim(bfad
, cmd
, iocmd
);
2863 case IOCMD_RATELIM_DEF_SPEED
:
2864 rc
= bfad_iocmd_ratelim_speed(bfad
, cmd
, iocmd
);
2866 case IOCMD_FCPIM_FAILOVER
:
2867 rc
= bfad_iocmd_cfg_fcpim(bfad
, iocmd
);
2869 case IOCMD_FCPIM_MODSTATS
:
2870 rc
= bfad_iocmd_fcpim_get_modstats(bfad
, iocmd
);
2872 case IOCMD_FCPIM_MODSTATSCLR
:
2873 rc
= bfad_iocmd_fcpim_clr_modstats(bfad
, iocmd
);
2875 case IOCMD_FCPIM_DEL_ITN_STATS
:
2876 rc
= bfad_iocmd_fcpim_get_del_itn_stats(bfad
, iocmd
);
2878 case IOCMD_ITNIM_GET_ATTR
:
2879 rc
= bfad_iocmd_itnim_get_attr(bfad
, iocmd
);
2881 case IOCMD_ITNIM_GET_IOSTATS
:
2882 rc
= bfad_iocmd_itnim_get_iostats(bfad
, iocmd
);
2884 case IOCMD_ITNIM_RESET_STATS
:
2885 rc
= bfad_iocmd_itnim_reset_stats(bfad
, iocmd
);
2887 case IOCMD_ITNIM_GET_ITNSTATS
:
2888 rc
= bfad_iocmd_itnim_get_itnstats(bfad
, iocmd
);
2890 case IOCMD_FCPORT_ENABLE
:
2891 rc
= bfad_iocmd_fcport_enable(bfad
, iocmd
);
2893 case IOCMD_FCPORT_DISABLE
:
2894 rc
= bfad_iocmd_fcport_disable(bfad
, iocmd
);
2896 case IOCMD_IOC_PCIFN_CFG
:
2897 rc
= bfad_iocmd_ioc_get_pcifn_cfg(bfad
, iocmd
);
2899 case IOCMD_IOC_FW_SIG_INV
:
2900 rc
= bfad_iocmd_ioc_fw_sig_inv(bfad
, iocmd
);
2902 case IOCMD_PCIFN_CREATE
:
2903 rc
= bfad_iocmd_pcifn_create(bfad
, iocmd
);
2905 case IOCMD_PCIFN_DELETE
:
2906 rc
= bfad_iocmd_pcifn_delete(bfad
, iocmd
);
2908 case IOCMD_PCIFN_BW
:
2909 rc
= bfad_iocmd_pcifn_bw(bfad
, iocmd
);
2911 case IOCMD_ADAPTER_CFG_MODE
:
2912 rc
= bfad_iocmd_adapter_cfg_mode(bfad
, iocmd
);
2914 case IOCMD_PORT_CFG_MODE
:
2915 rc
= bfad_iocmd_port_cfg_mode(bfad
, iocmd
);
2917 case IOCMD_FLASH_ENABLE_OPTROM
:
2918 case IOCMD_FLASH_DISABLE_OPTROM
:
2919 rc
= bfad_iocmd_ablk_optrom(bfad
, cmd
, iocmd
);
2921 case IOCMD_FAA_QUERY
:
2922 rc
= bfad_iocmd_faa_query(bfad
, iocmd
);
2924 case IOCMD_CEE_GET_ATTR
:
2925 rc
= bfad_iocmd_cee_attr(bfad
, iocmd
, payload_len
);
2927 case IOCMD_CEE_GET_STATS
:
2928 rc
= bfad_iocmd_cee_get_stats(bfad
, iocmd
, payload_len
);
2930 case IOCMD_CEE_RESET_STATS
:
2931 rc
= bfad_iocmd_cee_reset_stats(bfad
, iocmd
);
2933 case IOCMD_SFP_MEDIA
:
2934 rc
= bfad_iocmd_sfp_media(bfad
, iocmd
);
2936 case IOCMD_SFP_SPEED
:
2937 rc
= bfad_iocmd_sfp_speed(bfad
, iocmd
);
2939 case IOCMD_FLASH_GET_ATTR
:
2940 rc
= bfad_iocmd_flash_get_attr(bfad
, iocmd
);
2942 case IOCMD_FLASH_ERASE_PART
:
2943 rc
= bfad_iocmd_flash_erase_part(bfad
, iocmd
);
2945 case IOCMD_FLASH_UPDATE_PART
:
2946 rc
= bfad_iocmd_flash_update_part(bfad
, iocmd
, payload_len
);
2948 case IOCMD_FLASH_READ_PART
:
2949 rc
= bfad_iocmd_flash_read_part(bfad
, iocmd
, payload_len
);
2951 case IOCMD_DIAG_TEMP
:
2952 rc
= bfad_iocmd_diag_temp(bfad
, iocmd
);
2954 case IOCMD_DIAG_MEMTEST
:
2955 rc
= bfad_iocmd_diag_memtest(bfad
, iocmd
);
2957 case IOCMD_DIAG_LOOPBACK
:
2958 rc
= bfad_iocmd_diag_loopback(bfad
, iocmd
);
2960 case IOCMD_DIAG_FWPING
:
2961 rc
= bfad_iocmd_diag_fwping(bfad
, iocmd
);
2963 case IOCMD_DIAG_QUEUETEST
:
2964 rc
= bfad_iocmd_diag_queuetest(bfad
, iocmd
);
2966 case IOCMD_DIAG_SFP
:
2967 rc
= bfad_iocmd_diag_sfp(bfad
, iocmd
);
2969 case IOCMD_DIAG_LED
:
2970 rc
= bfad_iocmd_diag_led(bfad
, iocmd
);
2972 case IOCMD_DIAG_BEACON_LPORT
:
2973 rc
= bfad_iocmd_diag_beacon_lport(bfad
, iocmd
);
2975 case IOCMD_DIAG_LB_STAT
:
2976 rc
= bfad_iocmd_diag_lb_stat(bfad
, iocmd
);
2978 case IOCMD_DIAG_DPORT_ENABLE
:
2979 rc
= bfad_iocmd_diag_dport_enable(bfad
, iocmd
);
2981 case IOCMD_DIAG_DPORT_DISABLE
:
2982 rc
= bfad_iocmd_diag_dport_disable(bfad
, iocmd
);
2984 case IOCMD_DIAG_DPORT_SHOW
:
2985 rc
= bfad_iocmd_diag_dport_show(bfad
, iocmd
);
2987 case IOCMD_DIAG_DPORT_START
:
2988 rc
= bfad_iocmd_diag_dport_start(bfad
, iocmd
);
2990 case IOCMD_PHY_GET_ATTR
:
2991 rc
= bfad_iocmd_phy_get_attr(bfad
, iocmd
);
2993 case IOCMD_PHY_GET_STATS
:
2994 rc
= bfad_iocmd_phy_get_stats(bfad
, iocmd
);
2996 case IOCMD_PHY_UPDATE_FW
:
2997 rc
= bfad_iocmd_phy_update(bfad
, iocmd
, payload_len
);
2999 case IOCMD_PHY_READ_FW
:
3000 rc
= bfad_iocmd_phy_read(bfad
, iocmd
, payload_len
);
3002 case IOCMD_VHBA_QUERY
:
3003 rc
= bfad_iocmd_vhba_query(bfad
, iocmd
);
3005 case IOCMD_DEBUG_PORTLOG
:
3006 rc
= bfad_iocmd_porglog_get(bfad
, iocmd
);
3008 case IOCMD_DEBUG_FW_CORE
:
3009 rc
= bfad_iocmd_debug_fw_core(bfad
, iocmd
, payload_len
);
3011 case IOCMD_DEBUG_FW_STATE_CLR
:
3012 case IOCMD_DEBUG_PORTLOG_CLR
:
3013 case IOCMD_DEBUG_START_DTRC
:
3014 case IOCMD_DEBUG_STOP_DTRC
:
3015 rc
= bfad_iocmd_debug_ctl(bfad
, iocmd
, cmd
);
3017 case IOCMD_DEBUG_PORTLOG_CTL
:
3018 rc
= bfad_iocmd_porglog_ctl(bfad
, iocmd
);
3020 case IOCMD_FCPIM_PROFILE_ON
:
3021 case IOCMD_FCPIM_PROFILE_OFF
:
3022 rc
= bfad_iocmd_fcpim_cfg_profile(bfad
, iocmd
, cmd
);
3024 case IOCMD_ITNIM_GET_IOPROFILE
:
3025 rc
= bfad_iocmd_itnim_get_ioprofile(bfad
, iocmd
);
3027 case IOCMD_FCPORT_GET_STATS
:
3028 rc
= bfad_iocmd_fcport_get_stats(bfad
, iocmd
);
3030 case IOCMD_FCPORT_RESET_STATS
:
3031 rc
= bfad_iocmd_fcport_reset_stats(bfad
, iocmd
);
3033 case IOCMD_BOOT_CFG
:
3034 rc
= bfad_iocmd_boot_cfg(bfad
, iocmd
);
3036 case IOCMD_BOOT_QUERY
:
3037 rc
= bfad_iocmd_boot_query(bfad
, iocmd
);
3039 case IOCMD_PREBOOT_QUERY
:
3040 rc
= bfad_iocmd_preboot_query(bfad
, iocmd
);
3042 case IOCMD_ETHBOOT_CFG
:
3043 rc
= bfad_iocmd_ethboot_cfg(bfad
, iocmd
);
3045 case IOCMD_ETHBOOT_QUERY
:
3046 rc
= bfad_iocmd_ethboot_query(bfad
, iocmd
);
3048 case IOCMD_TRUNK_ENABLE
:
3049 case IOCMD_TRUNK_DISABLE
:
3050 rc
= bfad_iocmd_cfg_trunk(bfad
, iocmd
, cmd
);
3052 case IOCMD_TRUNK_GET_ATTR
:
3053 rc
= bfad_iocmd_trunk_get_attr(bfad
, iocmd
);
3055 case IOCMD_QOS_ENABLE
:
3056 case IOCMD_QOS_DISABLE
:
3057 rc
= bfad_iocmd_qos(bfad
, iocmd
, cmd
);
3059 case IOCMD_QOS_GET_ATTR
:
3060 rc
= bfad_iocmd_qos_get_attr(bfad
, iocmd
);
3062 case IOCMD_QOS_GET_VC_ATTR
:
3063 rc
= bfad_iocmd_qos_get_vc_attr(bfad
, iocmd
);
3065 case IOCMD_QOS_GET_STATS
:
3066 rc
= bfad_iocmd_qos_get_stats(bfad
, iocmd
);
3068 case IOCMD_QOS_RESET_STATS
:
3069 rc
= bfad_iocmd_qos_reset_stats(bfad
, iocmd
);
3071 case IOCMD_QOS_SET_BW
:
3072 rc
= bfad_iocmd_qos_set_bw(bfad
, iocmd
);
3074 case IOCMD_VF_GET_STATS
:
3075 rc
= bfad_iocmd_vf_get_stats(bfad
, iocmd
);
3077 case IOCMD_VF_RESET_STATS
:
3078 rc
= bfad_iocmd_vf_clr_stats(bfad
, iocmd
);
3080 case IOCMD_FCPIM_LUNMASK_ENABLE
:
3081 case IOCMD_FCPIM_LUNMASK_DISABLE
:
3082 case IOCMD_FCPIM_LUNMASK_CLEAR
:
3083 rc
= bfad_iocmd_lunmask(bfad
, iocmd
, cmd
);
3085 case IOCMD_FCPIM_LUNMASK_QUERY
:
3086 rc
= bfad_iocmd_fcpim_lunmask_query(bfad
, iocmd
);
3088 case IOCMD_FCPIM_LUNMASK_ADD
:
3089 case IOCMD_FCPIM_LUNMASK_DELETE
:
3090 rc
= bfad_iocmd_fcpim_cfg_lunmask(bfad
, iocmd
, cmd
);
3092 case IOCMD_FCPIM_THROTTLE_QUERY
:
3093 rc
= bfad_iocmd_fcpim_throttle_query(bfad
, iocmd
);
3095 case IOCMD_FCPIM_THROTTLE_SET
:
3096 rc
= bfad_iocmd_fcpim_throttle_set(bfad
, iocmd
);
3099 case IOCMD_TFRU_READ
:
3100 rc
= bfad_iocmd_tfru_read(bfad
, iocmd
);
3102 case IOCMD_TFRU_WRITE
:
3103 rc
= bfad_iocmd_tfru_write(bfad
, iocmd
);
3106 case IOCMD_FRUVPD_READ
:
3107 rc
= bfad_iocmd_fruvpd_read(bfad
, iocmd
);
3109 case IOCMD_FRUVPD_UPDATE
:
3110 rc
= bfad_iocmd_fruvpd_update(bfad
, iocmd
);
3112 case IOCMD_FRUVPD_GET_MAX_SIZE
:
3113 rc
= bfad_iocmd_fruvpd_get_max_size(bfad
, iocmd
);
3123 bfad_im_bsg_vendor_request(struct bsg_job
*job
)
3125 struct fc_bsg_request
*bsg_request
= job
->request
;
3126 struct fc_bsg_reply
*bsg_reply
= job
->reply
;
3127 uint32_t vendor_cmd
= bsg_request
->rqst_data
.h_vendor
.vendor_cmd
[0];
3128 struct Scsi_Host
*shost
= fc_bsg_to_shost(job
);
3129 struct bfad_im_port_s
*im_port
= bfad_get_im_port(shost
);
3130 struct bfad_s
*bfad
= im_port
->bfad
;
3134 /* Allocate a temp buffer to hold the passed in user space command */
3135 payload_kbuf
= kzalloc(job
->request_payload
.payload_len
, GFP_KERNEL
);
3136 if (!payload_kbuf
) {
3141 /* Copy the sg_list passed in to a linear buffer: holds the cmnd data */
3142 sg_copy_to_buffer(job
->request_payload
.sg_list
,
3143 job
->request_payload
.sg_cnt
, payload_kbuf
,
3144 job
->request_payload
.payload_len
);
3146 /* Invoke IOCMD handler - to handle all the vendor command requests */
3147 rc
= bfad_iocmd_handler(bfad
, vendor_cmd
, payload_kbuf
,
3148 job
->request_payload
.payload_len
);
3149 if (rc
!= BFA_STATUS_OK
)
3152 /* Copy the response data to the job->reply_payload sg_list */
3153 sg_copy_from_buffer(job
->reply_payload
.sg_list
,
3154 job
->reply_payload
.sg_cnt
,
3156 job
->reply_payload
.payload_len
);
3158 /* free the command buffer */
3159 kfree(payload_kbuf
);
3161 /* Fill the BSG job reply data */
3162 job
->reply_len
= job
->reply_payload
.payload_len
;
3163 bsg_reply
->reply_payload_rcv_len
= job
->reply_payload
.payload_len
;
3164 bsg_reply
->result
= rc
;
3166 bsg_job_done(job
, bsg_reply
->result
,
3167 bsg_reply
->reply_payload_rcv_len
);
3170 /* free the command buffer */
3171 kfree(payload_kbuf
);
3173 bsg_reply
->result
= rc
;
3174 job
->reply_len
= sizeof(uint32_t);
3175 bsg_reply
->reply_payload_rcv_len
= 0;
3179 /* FC passthru call backs */
3181 bfad_fcxp_get_req_sgaddr_cb(void *bfad_fcxp
, int sgeid
)
3183 struct bfad_fcxp
*drv_fcxp
= bfad_fcxp
;
3184 struct bfa_sge_s
*sge
;
3187 sge
= drv_fcxp
->req_sge
+ sgeid
;
3188 addr
= (u64
)(size_t) sge
->sg_addr
;
3193 bfad_fcxp_get_req_sglen_cb(void *bfad_fcxp
, int sgeid
)
3195 struct bfad_fcxp
*drv_fcxp
= bfad_fcxp
;
3196 struct bfa_sge_s
*sge
;
3198 sge
= drv_fcxp
->req_sge
+ sgeid
;
3203 bfad_fcxp_get_rsp_sgaddr_cb(void *bfad_fcxp
, int sgeid
)
3205 struct bfad_fcxp
*drv_fcxp
= bfad_fcxp
;
3206 struct bfa_sge_s
*sge
;
3209 sge
= drv_fcxp
->rsp_sge
+ sgeid
;
3210 addr
= (u64
)(size_t) sge
->sg_addr
;
3215 bfad_fcxp_get_rsp_sglen_cb(void *bfad_fcxp
, int sgeid
)
3217 struct bfad_fcxp
*drv_fcxp
= bfad_fcxp
;
3218 struct bfa_sge_s
*sge
;
3220 sge
= drv_fcxp
->rsp_sge
+ sgeid
;
3225 bfad_send_fcpt_cb(void *bfad_fcxp
, struct bfa_fcxp_s
*fcxp
, void *cbarg
,
3226 bfa_status_t req_status
, u32 rsp_len
, u32 resid_len
,
3227 struct fchs_s
*rsp_fchs
)
3229 struct bfad_fcxp
*drv_fcxp
= bfad_fcxp
;
3231 drv_fcxp
->req_status
= req_status
;
3232 drv_fcxp
->rsp_len
= rsp_len
;
3234 /* bfa_fcxp will be automatically freed by BFA */
3235 drv_fcxp
->bfa_fcxp
= NULL
;
3236 complete(&drv_fcxp
->comp
);
3239 static struct bfad_buf_info
*
3240 bfad_fcxp_map_sg(struct bfad_s
*bfad
, void *payload_kbuf
,
3241 uint32_t payload_len
, uint32_t *num_sgles
)
3243 struct bfad_buf_info
*buf_base
, *buf_info
;
3244 struct bfa_sge_s
*sg_table
;
3247 buf_base
= kcalloc(sizeof(struct bfad_buf_info
) +
3248 sizeof(struct bfa_sge_s
),
3249 sge_num
, GFP_KERNEL
);
3253 sg_table
= (struct bfa_sge_s
*) (((uint8_t *)buf_base
) +
3254 (sizeof(struct bfad_buf_info
) * sge_num
));
3256 /* Allocate dma coherent memory */
3257 buf_info
= buf_base
;
3258 buf_info
->size
= payload_len
;
3259 buf_info
->virt
= dma_alloc_coherent(&bfad
->pcidev
->dev
,
3260 buf_info
->size
, &buf_info
->phys
,
3262 if (!buf_info
->virt
)
3265 /* copy the linear bsg buffer to buf_info */
3266 memcpy(buf_info
->virt
, payload_kbuf
, buf_info
->size
);
3271 sg_table
->sg_len
= buf_info
->size
;
3272 sg_table
->sg_addr
= (void *)(size_t) buf_info
->phys
;
3274 *num_sgles
= sge_num
;
3284 bfad_fcxp_free_mem(struct bfad_s
*bfad
, struct bfad_buf_info
*buf_base
,
3288 struct bfad_buf_info
*buf_info
= buf_base
;
3291 for (i
= 0; i
< num_sgles
; buf_info
++, i
++) {
3292 if (buf_info
->virt
!= NULL
)
3293 dma_free_coherent(&bfad
->pcidev
->dev
,
3294 buf_info
->size
, buf_info
->virt
,
3302 bfad_fcxp_bsg_send(struct bsg_job
*job
, struct bfad_fcxp
*drv_fcxp
,
3303 bfa_bsg_fcpt_t
*bsg_fcpt
)
3305 struct bfa_fcxp_s
*hal_fcxp
;
3306 struct bfad_s
*bfad
= drv_fcxp
->port
->bfad
;
3307 unsigned long flags
;
3310 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
3312 /* Allocate bfa_fcxp structure */
3313 hal_fcxp
= bfa_fcxp_req_rsp_alloc(drv_fcxp
, &bfad
->bfa
,
3314 drv_fcxp
->num_req_sgles
,
3315 drv_fcxp
->num_rsp_sgles
,
3316 bfad_fcxp_get_req_sgaddr_cb
,
3317 bfad_fcxp_get_req_sglen_cb
,
3318 bfad_fcxp_get_rsp_sgaddr_cb
,
3319 bfad_fcxp_get_rsp_sglen_cb
, BFA_TRUE
);
3322 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
3323 return BFA_STATUS_ENOMEM
;
3326 drv_fcxp
->bfa_fcxp
= hal_fcxp
;
3328 lp_tag
= bfa_lps_get_tag_from_pid(&bfad
->bfa
, bsg_fcpt
->fchs
.s_id
);
3330 bfa_fcxp_send(hal_fcxp
, drv_fcxp
->bfa_rport
, bsg_fcpt
->vf_id
, lp_tag
,
3331 bsg_fcpt
->cts
, bsg_fcpt
->cos
,
3332 job
->request_payload
.payload_len
,
3333 &bsg_fcpt
->fchs
, bfad_send_fcpt_cb
, bfad
,
3334 job
->reply_payload
.payload_len
, bsg_fcpt
->tsecs
);
3336 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
3338 return BFA_STATUS_OK
;
3342 bfad_im_bsg_els_ct_request(struct bsg_job
*job
)
3344 struct bfa_bsg_data
*bsg_data
;
3345 struct Scsi_Host
*shost
= fc_bsg_to_shost(job
);
3346 struct bfad_im_port_s
*im_port
= bfad_get_im_port(shost
);
3347 struct bfad_s
*bfad
= im_port
->bfad
;
3348 bfa_bsg_fcpt_t
*bsg_fcpt
;
3349 struct bfad_fcxp
*drv_fcxp
;
3350 struct bfa_fcs_lport_s
*fcs_port
;
3351 struct bfa_fcs_rport_s
*fcs_rport
;
3352 struct fc_bsg_request
*bsg_request
= job
->request
;
3353 struct fc_bsg_reply
*bsg_reply
= job
->reply
;
3354 uint32_t command_type
= bsg_request
->msgcode
;
3355 unsigned long flags
;
3356 struct bfad_buf_info
*rsp_buf_info
;
3357 void *req_kbuf
= NULL
, *rsp_kbuf
= NULL
;
3360 job
->reply_len
= sizeof(uint32_t); /* Atleast uint32_t reply_len */
3361 bsg_reply
->reply_payload_rcv_len
= 0;
3363 /* Get the payload passed in from userspace */
3364 bsg_data
= (struct bfa_bsg_data
*) (((char *)bsg_request
) +
3365 sizeof(struct fc_bsg_request
));
3366 if (bsg_data
== NULL
)
3370 * Allocate buffer for bsg_fcpt and do a copy_from_user op for payload
3371 * buffer of size bsg_data->payload_len
3373 bsg_fcpt
= kzalloc(bsg_data
->payload_len
, GFP_KERNEL
);
3379 if (copy_from_user((uint8_t *)bsg_fcpt
,
3380 (void *)(unsigned long)bsg_data
->payload
,
3381 bsg_data
->payload_len
)) {
3387 drv_fcxp
= kzalloc(sizeof(struct bfad_fcxp
), GFP_KERNEL
);
3388 if (drv_fcxp
== NULL
) {
3394 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
3395 fcs_port
= bfa_fcs_lookup_port(&bfad
->bfa_fcs
, bsg_fcpt
->vf_id
,
3397 if (fcs_port
== NULL
) {
3398 bsg_fcpt
->status
= BFA_STATUS_UNKNOWN_LWWN
;
3399 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
3403 /* Check if the port is online before sending FC Passthru cmd */
3404 if (!bfa_fcs_lport_is_online(fcs_port
)) {
3405 bsg_fcpt
->status
= BFA_STATUS_PORT_OFFLINE
;
3406 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
3410 drv_fcxp
->port
= fcs_port
->bfad_port
;
3412 if (drv_fcxp
->port
->bfad
== 0)
3413 drv_fcxp
->port
->bfad
= bfad
;
3415 /* Fetch the bfa_rport - if nexus needed */
3416 if (command_type
== FC_BSG_HST_ELS_NOLOGIN
||
3417 command_type
== FC_BSG_HST_CT
) {
3418 /* BSG HST commands: no nexus needed */
3419 drv_fcxp
->bfa_rport
= NULL
;
3421 } else if (command_type
== FC_BSG_RPT_ELS
||
3422 command_type
== FC_BSG_RPT_CT
) {
3423 /* BSG RPT commands: nexus needed */
3424 fcs_rport
= bfa_fcs_lport_get_rport_by_pwwn(fcs_port
,
3426 if (fcs_rport
== NULL
) {
3427 bsg_fcpt
->status
= BFA_STATUS_UNKNOWN_RWWN
;
3428 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
3432 drv_fcxp
->bfa_rport
= fcs_rport
->bfa_rport
;
3434 } else { /* Unknown BSG msgcode; return -EINVAL */
3435 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
3439 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
3441 /* allocate memory for req / rsp buffers */
3442 req_kbuf
= kzalloc(job
->request_payload
.payload_len
, GFP_KERNEL
);
3444 printk(KERN_INFO
"bfa %s: fcpt request buffer alloc failed\n",
3450 rsp_kbuf
= kzalloc(job
->reply_payload
.payload_len
, GFP_KERNEL
);
3452 printk(KERN_INFO
"bfa %s: fcpt response buffer alloc failed\n",
3458 /* map req sg - copy the sg_list passed in to the linear buffer */
3459 sg_copy_to_buffer(job
->request_payload
.sg_list
,
3460 job
->request_payload
.sg_cnt
, req_kbuf
,
3461 job
->request_payload
.payload_len
);
3463 drv_fcxp
->reqbuf_info
= bfad_fcxp_map_sg(bfad
, req_kbuf
,
3464 job
->request_payload
.payload_len
,
3465 &drv_fcxp
->num_req_sgles
);
3466 if (!drv_fcxp
->reqbuf_info
) {
3467 printk(KERN_INFO
"bfa %s: fcpt request fcxp_map_sg failed\n",
3473 drv_fcxp
->req_sge
= (struct bfa_sge_s
*)
3474 (((uint8_t *)drv_fcxp
->reqbuf_info
) +
3475 (sizeof(struct bfad_buf_info
) *
3476 drv_fcxp
->num_req_sgles
));
3479 drv_fcxp
->rspbuf_info
= bfad_fcxp_map_sg(bfad
, rsp_kbuf
,
3480 job
->reply_payload
.payload_len
,
3481 &drv_fcxp
->num_rsp_sgles
);
3482 if (!drv_fcxp
->rspbuf_info
) {
3483 printk(KERN_INFO
"bfa %s: fcpt response fcxp_map_sg failed\n",
3489 rsp_buf_info
= (struct bfad_buf_info
*)drv_fcxp
->rspbuf_info
;
3490 drv_fcxp
->rsp_sge
= (struct bfa_sge_s
*)
3491 (((uint8_t *)drv_fcxp
->rspbuf_info
) +
3492 (sizeof(struct bfad_buf_info
) *
3493 drv_fcxp
->num_rsp_sgles
));
3496 init_completion(&drv_fcxp
->comp
);
3497 rc
= bfad_fcxp_bsg_send(job
, drv_fcxp
, bsg_fcpt
);
3498 if (rc
== BFA_STATUS_OK
) {
3499 wait_for_completion(&drv_fcxp
->comp
);
3500 bsg_fcpt
->status
= drv_fcxp
->req_status
;
3502 bsg_fcpt
->status
= rc
;
3506 /* fill the job->reply data */
3507 if (drv_fcxp
->req_status
== BFA_STATUS_OK
) {
3508 job
->reply_len
= drv_fcxp
->rsp_len
;
3509 bsg_reply
->reply_payload_rcv_len
= drv_fcxp
->rsp_len
;
3510 bsg_reply
->reply_data
.ctels_reply
.status
= FC_CTELS_STATUS_OK
;
3512 bsg_reply
->reply_payload_rcv_len
=
3513 sizeof(struct fc_bsg_ctels_reply
);
3514 job
->reply_len
= sizeof(uint32_t);
3515 bsg_reply
->reply_data
.ctels_reply
.status
=
3516 FC_CTELS_STATUS_REJECT
;
3519 /* Copy the response data to the reply_payload sg list */
3520 sg_copy_from_buffer(job
->reply_payload
.sg_list
,
3521 job
->reply_payload
.sg_cnt
,
3522 (uint8_t *)rsp_buf_info
->virt
,
3523 job
->reply_payload
.payload_len
);
3526 bfad_fcxp_free_mem(bfad
, drv_fcxp
->rspbuf_info
,
3527 drv_fcxp
->num_rsp_sgles
);
3528 bfad_fcxp_free_mem(bfad
, drv_fcxp
->reqbuf_info
,
3529 drv_fcxp
->num_req_sgles
);
3533 /* Need a copy to user op */
3534 if (copy_to_user((void *)(unsigned long)bsg_data
->payload
,
3535 (void *)bsg_fcpt
, bsg_data
->payload_len
))
3541 bsg_reply
->result
= rc
;
3543 if (rc
== BFA_STATUS_OK
)
3544 bsg_job_done(job
, bsg_reply
->result
,
3545 bsg_reply
->reply_payload_rcv_len
);
3551 bfad_im_bsg_request(struct bsg_job
*job
)
3553 struct fc_bsg_request
*bsg_request
= job
->request
;
3554 struct fc_bsg_reply
*bsg_reply
= job
->reply
;
3555 uint32_t rc
= BFA_STATUS_OK
;
3557 switch (bsg_request
->msgcode
) {
3558 case FC_BSG_HST_VENDOR
:
3559 /* Process BSG HST Vendor requests */
3560 rc
= bfad_im_bsg_vendor_request(job
);
3562 case FC_BSG_HST_ELS_NOLOGIN
:
3563 case FC_BSG_RPT_ELS
:
3566 /* Process BSG ELS/CT commands */
3567 rc
= bfad_im_bsg_els_ct_request(job
);
3570 bsg_reply
->result
= rc
= -EINVAL
;
3571 bsg_reply
->reply_payload_rcv_len
= 0;
3579 bfad_im_bsg_timeout(struct bsg_job
*job
)
3581 /* Don't complete the BSG job request - return -EAGAIN
3582 * to reset bsg job timeout : for ELS/CT pass thru we
3583 * already have timer to track the request.