1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
4 * Copyright (c) 2014- QLogic Corporation.
8 * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter.
11 #include <linux/uaccess.h>
16 BFA_TRC_FILE(LDRV
, BSG
);
19 bfad_iocmd_ioc_enable(struct bfad_s
*bfad
, void *cmd
)
21 struct bfa_bsg_gen_s
*iocmd
= (struct bfa_bsg_gen_s
*)cmd
;
24 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
25 /* If IOC is not in disabled state - return */
26 if (!bfa_ioc_is_disabled(&bfad
->bfa
.ioc
)) {
27 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
28 iocmd
->status
= BFA_STATUS_OK
;
32 init_completion(&bfad
->enable_comp
);
33 bfa_iocfc_enable(&bfad
->bfa
);
34 iocmd
->status
= BFA_STATUS_OK
;
35 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
36 wait_for_completion(&bfad
->enable_comp
);
42 bfad_iocmd_ioc_disable(struct bfad_s
*bfad
, void *cmd
)
44 struct bfa_bsg_gen_s
*iocmd
= (struct bfa_bsg_gen_s
*)cmd
;
47 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
48 if (bfa_ioc_is_disabled(&bfad
->bfa
.ioc
)) {
49 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
50 iocmd
->status
= BFA_STATUS_OK
;
54 if (bfad
->disable_active
) {
55 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
59 bfad
->disable_active
= BFA_TRUE
;
60 init_completion(&bfad
->disable_comp
);
61 bfa_iocfc_disable(&bfad
->bfa
);
62 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
64 wait_for_completion(&bfad
->disable_comp
);
65 bfad
->disable_active
= BFA_FALSE
;
66 iocmd
->status
= BFA_STATUS_OK
;
72 bfad_iocmd_ioc_get_info(struct bfad_s
*bfad
, void *cmd
)
75 struct bfa_bsg_ioc_info_s
*iocmd
= (struct bfa_bsg_ioc_info_s
*)cmd
;
76 struct bfad_im_port_s
*im_port
;
77 struct bfa_port_attr_s pattr
;
80 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
81 bfa_fcport_get_attr(&bfad
->bfa
, &pattr
);
82 iocmd
->nwwn
= pattr
.nwwn
;
83 iocmd
->pwwn
= pattr
.pwwn
;
84 iocmd
->ioc_type
= bfa_get_type(&bfad
->bfa
);
85 iocmd
->mac
= bfa_get_mac(&bfad
->bfa
);
86 iocmd
->factory_mac
= bfa_get_mfg_mac(&bfad
->bfa
);
87 bfa_get_adapter_serial_num(&bfad
->bfa
, iocmd
->serialnum
);
88 iocmd
->factorynwwn
= pattr
.factorynwwn
;
89 iocmd
->factorypwwn
= pattr
.factorypwwn
;
90 iocmd
->bfad_num
= bfad
->inst_no
;
91 im_port
= bfad
->pport
.im_port
;
92 iocmd
->host
= im_port
->shost
->host_no
;
93 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
95 strcpy(iocmd
->name
, bfad
->adapter_name
);
96 strcpy(iocmd
->port_name
, bfad
->port_name
);
97 strcpy(iocmd
->hwpath
, bfad
->pci_name
);
99 /* set adapter hw path */
100 strcpy(iocmd
->adapter_hwpath
, bfad
->pci_name
);
101 for (i
= 0; iocmd
->adapter_hwpath
[i
] != ':' && i
< BFA_STRING_32
; i
++)
103 for (; iocmd
->adapter_hwpath
[++i
] != ':' && i
< BFA_STRING_32
; )
105 iocmd
->adapter_hwpath
[i
] = '\0';
106 iocmd
->status
= BFA_STATUS_OK
;
111 bfad_iocmd_ioc_get_attr(struct bfad_s
*bfad
, void *cmd
)
113 struct bfa_bsg_ioc_attr_s
*iocmd
= (struct bfa_bsg_ioc_attr_s
*)cmd
;
116 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
117 bfa_ioc_get_attr(&bfad
->bfa
.ioc
, &iocmd
->ioc_attr
);
118 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
120 /* fill in driver attr info */
121 strcpy(iocmd
->ioc_attr
.driver_attr
.driver
, BFAD_DRIVER_NAME
);
122 strscpy(iocmd
->ioc_attr
.driver_attr
.driver_ver
,
123 BFAD_DRIVER_VERSION
, BFA_VERSION_LEN
);
124 strcpy(iocmd
->ioc_attr
.driver_attr
.fw_ver
,
125 iocmd
->ioc_attr
.adapter_attr
.fw_ver
);
126 strcpy(iocmd
->ioc_attr
.driver_attr
.bios_ver
,
127 iocmd
->ioc_attr
.adapter_attr
.optrom_ver
);
129 /* copy chip rev info first otherwise it will be overwritten */
130 memcpy(bfad
->pci_attr
.chip_rev
, iocmd
->ioc_attr
.pci_attr
.chip_rev
,
131 sizeof(bfad
->pci_attr
.chip_rev
));
132 memcpy(&iocmd
->ioc_attr
.pci_attr
, &bfad
->pci_attr
,
133 sizeof(struct bfa_ioc_pci_attr_s
));
135 iocmd
->status
= BFA_STATUS_OK
;
140 bfad_iocmd_ioc_get_stats(struct bfad_s
*bfad
, void *cmd
)
142 struct bfa_bsg_ioc_stats_s
*iocmd
= (struct bfa_bsg_ioc_stats_s
*)cmd
;
144 bfa_ioc_get_stats(&bfad
->bfa
, &iocmd
->ioc_stats
);
145 iocmd
->status
= BFA_STATUS_OK
;
150 bfad_iocmd_ioc_get_fwstats(struct bfad_s
*bfad
, void *cmd
,
151 unsigned int payload_len
)
153 struct bfa_bsg_ioc_fwstats_s
*iocmd
=
154 (struct bfa_bsg_ioc_fwstats_s
*)cmd
;
158 if (bfad_chk_iocmd_sz(payload_len
,
159 sizeof(struct bfa_bsg_ioc_fwstats_s
),
160 sizeof(struct bfa_fw_stats_s
)) != BFA_STATUS_OK
) {
161 iocmd
->status
= BFA_STATUS_VERSION_FAIL
;
165 iocmd_bufptr
= (char *)iocmd
+ sizeof(struct bfa_bsg_ioc_fwstats_s
);
166 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
167 iocmd
->status
= bfa_ioc_fw_stats_get(&bfad
->bfa
.ioc
, iocmd_bufptr
);
168 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
170 if (iocmd
->status
!= BFA_STATUS_OK
) {
171 bfa_trc(bfad
, iocmd
->status
);
175 bfa_trc(bfad
, 0x6666);
180 bfad_iocmd_ioc_reset_stats(struct bfad_s
*bfad
, void *cmd
, unsigned int v_cmd
)
182 struct bfa_bsg_gen_s
*iocmd
= (struct bfa_bsg_gen_s
*)cmd
;
185 if (v_cmd
== IOCMD_IOC_RESET_STATS
) {
186 bfa_ioc_clear_stats(&bfad
->bfa
);
187 iocmd
->status
= BFA_STATUS_OK
;
188 } else if (v_cmd
== IOCMD_IOC_RESET_FWSTATS
) {
189 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
190 iocmd
->status
= bfa_ioc_fw_stats_clear(&bfad
->bfa
.ioc
);
191 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
198 bfad_iocmd_ioc_set_name(struct bfad_s
*bfad
, void *cmd
, unsigned int v_cmd
)
200 struct bfa_bsg_ioc_name_s
*iocmd
= (struct bfa_bsg_ioc_name_s
*) cmd
;
202 if (v_cmd
== IOCMD_IOC_SET_ADAPTER_NAME
)
203 strcpy(bfad
->adapter_name
, iocmd
->name
);
204 else if (v_cmd
== IOCMD_IOC_SET_PORT_NAME
)
205 strcpy(bfad
->port_name
, iocmd
->name
);
207 iocmd
->status
= BFA_STATUS_OK
;
212 bfad_iocmd_iocfc_get_attr(struct bfad_s
*bfad
, void *cmd
)
214 struct bfa_bsg_iocfc_attr_s
*iocmd
= (struct bfa_bsg_iocfc_attr_s
*)cmd
;
216 iocmd
->status
= BFA_STATUS_OK
;
217 bfa_iocfc_get_attr(&bfad
->bfa
, &iocmd
->iocfc_attr
);
223 bfad_iocmd_ioc_fw_sig_inv(struct bfad_s
*bfad
, void *cmd
)
225 struct bfa_bsg_gen_s
*iocmd
= (struct bfa_bsg_gen_s
*)cmd
;
228 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
229 iocmd
->status
= bfa_ioc_fwsig_invalidate(&bfad
->bfa
.ioc
);
230 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
235 bfad_iocmd_iocfc_set_intr(struct bfad_s
*bfad
, void *cmd
)
237 struct bfa_bsg_iocfc_intr_s
*iocmd
= (struct bfa_bsg_iocfc_intr_s
*)cmd
;
240 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
241 iocmd
->status
= bfa_iocfc_israttr_set(&bfad
->bfa
, &iocmd
->attr
);
242 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
248 bfad_iocmd_port_enable(struct bfad_s
*bfad
, void *cmd
)
250 struct bfa_bsg_gen_s
*iocmd
= (struct bfa_bsg_gen_s
*)cmd
;
251 struct bfad_hal_comp fcomp
;
254 init_completion(&fcomp
.comp
);
255 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
256 iocmd
->status
= bfa_port_enable(&bfad
->bfa
.modules
.port
,
257 bfad_hcb_comp
, &fcomp
);
258 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
259 if (iocmd
->status
!= BFA_STATUS_OK
) {
260 bfa_trc(bfad
, iocmd
->status
);
263 wait_for_completion(&fcomp
.comp
);
264 iocmd
->status
= fcomp
.status
;
269 bfad_iocmd_port_disable(struct bfad_s
*bfad
, void *cmd
)
271 struct bfa_bsg_gen_s
*iocmd
= (struct bfa_bsg_gen_s
*)cmd
;
272 struct bfad_hal_comp fcomp
;
275 init_completion(&fcomp
.comp
);
276 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
277 iocmd
->status
= bfa_port_disable(&bfad
->bfa
.modules
.port
,
278 bfad_hcb_comp
, &fcomp
);
279 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
281 if (iocmd
->status
!= BFA_STATUS_OK
) {
282 bfa_trc(bfad
, iocmd
->status
);
285 wait_for_completion(&fcomp
.comp
);
286 iocmd
->status
= fcomp
.status
;
291 bfad_iocmd_port_get_attr(struct bfad_s
*bfad
, void *cmd
)
293 struct bfa_bsg_port_attr_s
*iocmd
= (struct bfa_bsg_port_attr_s
*)cmd
;
294 struct bfa_lport_attr_s port_attr
;
297 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
298 bfa_fcport_get_attr(&bfad
->bfa
, &iocmd
->attr
);
299 bfa_fcs_lport_get_attr(&bfad
->bfa_fcs
.fabric
.bport
, &port_attr
);
300 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
302 if (iocmd
->attr
.topology
!= BFA_PORT_TOPOLOGY_NONE
)
303 iocmd
->attr
.pid
= port_attr
.pid
;
307 iocmd
->attr
.port_type
= port_attr
.port_type
;
308 iocmd
->attr
.loopback
= port_attr
.loopback
;
309 iocmd
->attr
.authfail
= port_attr
.authfail
;
310 strscpy(iocmd
->attr
.port_symname
.symname
,
311 port_attr
.port_cfg
.sym_name
.symname
,
312 sizeof(iocmd
->attr
.port_symname
.symname
));
314 iocmd
->status
= BFA_STATUS_OK
;
319 bfad_iocmd_port_get_stats(struct bfad_s
*bfad
, void *cmd
,
320 unsigned int payload_len
)
322 struct bfa_bsg_port_stats_s
*iocmd
= (struct bfa_bsg_port_stats_s
*)cmd
;
323 struct bfad_hal_comp fcomp
;
327 if (bfad_chk_iocmd_sz(payload_len
,
328 sizeof(struct bfa_bsg_port_stats_s
),
329 sizeof(union bfa_port_stats_u
)) != BFA_STATUS_OK
) {
330 iocmd
->status
= BFA_STATUS_VERSION_FAIL
;
334 iocmd_bufptr
= (char *)iocmd
+ sizeof(struct bfa_bsg_port_stats_s
);
336 init_completion(&fcomp
.comp
);
337 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
338 iocmd
->status
= bfa_port_get_stats(&bfad
->bfa
.modules
.port
,
339 iocmd_bufptr
, bfad_hcb_comp
, &fcomp
);
340 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
341 if (iocmd
->status
!= BFA_STATUS_OK
) {
342 bfa_trc(bfad
, iocmd
->status
);
346 wait_for_completion(&fcomp
.comp
);
347 iocmd
->status
= fcomp
.status
;
353 bfad_iocmd_port_reset_stats(struct bfad_s
*bfad
, void *cmd
)
355 struct bfa_bsg_gen_s
*iocmd
= (struct bfa_bsg_gen_s
*)cmd
;
356 struct bfad_hal_comp fcomp
;
359 init_completion(&fcomp
.comp
);
360 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
361 iocmd
->status
= bfa_port_clear_stats(&bfad
->bfa
.modules
.port
,
362 bfad_hcb_comp
, &fcomp
);
363 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
364 if (iocmd
->status
!= BFA_STATUS_OK
) {
365 bfa_trc(bfad
, iocmd
->status
);
368 wait_for_completion(&fcomp
.comp
);
369 iocmd
->status
= fcomp
.status
;
374 bfad_iocmd_set_port_cfg(struct bfad_s
*bfad
, void *iocmd
, unsigned int v_cmd
)
376 struct bfa_bsg_port_cfg_s
*cmd
= (struct bfa_bsg_port_cfg_s
*)iocmd
;
379 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
380 if (v_cmd
== IOCMD_PORT_CFG_TOPO
)
381 cmd
->status
= bfa_fcport_cfg_topology(&bfad
->bfa
, cmd
->param
);
382 else if (v_cmd
== IOCMD_PORT_CFG_SPEED
)
383 cmd
->status
= bfa_fcport_cfg_speed(&bfad
->bfa
, cmd
->param
);
384 else if (v_cmd
== IOCMD_PORT_CFG_ALPA
)
385 cmd
->status
= bfa_fcport_cfg_hardalpa(&bfad
->bfa
, cmd
->param
);
386 else if (v_cmd
== IOCMD_PORT_CLR_ALPA
)
387 cmd
->status
= bfa_fcport_clr_hardalpa(&bfad
->bfa
);
388 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
394 bfad_iocmd_port_cfg_maxfrsize(struct bfad_s
*bfad
, void *cmd
)
396 struct bfa_bsg_port_cfg_maxfrsize_s
*iocmd
=
397 (struct bfa_bsg_port_cfg_maxfrsize_s
*)cmd
;
400 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
401 iocmd
->status
= bfa_fcport_cfg_maxfrsize(&bfad
->bfa
, iocmd
->maxfrsize
);
402 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
408 bfad_iocmd_port_cfg_bbcr(struct bfad_s
*bfad
, unsigned int cmd
, void *pcmd
)
410 struct bfa_bsg_bbcr_enable_s
*iocmd
=
411 (struct bfa_bsg_bbcr_enable_s
*)pcmd
;
415 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
416 if (cmd
== IOCMD_PORT_BBCR_ENABLE
)
417 rc
= bfa_fcport_cfg_bbcr(&bfad
->bfa
, BFA_TRUE
, iocmd
->bb_scn
);
418 else if (cmd
== IOCMD_PORT_BBCR_DISABLE
)
419 rc
= bfa_fcport_cfg_bbcr(&bfad
->bfa
, BFA_FALSE
, 0);
421 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
424 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
431 bfad_iocmd_port_get_bbcr_attr(struct bfad_s
*bfad
, void *pcmd
)
433 struct bfa_bsg_bbcr_attr_s
*iocmd
= (struct bfa_bsg_bbcr_attr_s
*) pcmd
;
436 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
438 bfa_fcport_get_bbcr_attr(&bfad
->bfa
, &iocmd
->attr
);
439 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
446 bfad_iocmd_lport_get_attr(struct bfad_s
*bfad
, void *cmd
)
448 struct bfa_fcs_lport_s
*fcs_port
;
449 struct bfa_bsg_lport_attr_s
*iocmd
= (struct bfa_bsg_lport_attr_s
*)cmd
;
452 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
453 fcs_port
= bfa_fcs_lookup_port(&bfad
->bfa_fcs
,
454 iocmd
->vf_id
, iocmd
->pwwn
);
455 if (fcs_port
== NULL
) {
456 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
457 iocmd
->status
= BFA_STATUS_UNKNOWN_LWWN
;
461 bfa_fcs_lport_get_attr(fcs_port
, &iocmd
->port_attr
);
462 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
463 iocmd
->status
= BFA_STATUS_OK
;
469 bfad_iocmd_lport_get_stats(struct bfad_s
*bfad
, void *cmd
)
471 struct bfa_fcs_lport_s
*fcs_port
;
472 struct bfa_bsg_lport_stats_s
*iocmd
=
473 (struct bfa_bsg_lport_stats_s
*)cmd
;
476 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
477 fcs_port
= bfa_fcs_lookup_port(&bfad
->bfa_fcs
,
478 iocmd
->vf_id
, iocmd
->pwwn
);
479 if (fcs_port
== NULL
) {
480 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
481 iocmd
->status
= BFA_STATUS_UNKNOWN_LWWN
;
485 bfa_fcs_lport_get_stats(fcs_port
, &iocmd
->port_stats
);
486 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
487 iocmd
->status
= BFA_STATUS_OK
;
493 bfad_iocmd_lport_reset_stats(struct bfad_s
*bfad
, void *cmd
)
495 struct bfa_fcs_lport_s
*fcs_port
;
496 struct bfa_bsg_reset_stats_s
*iocmd
=
497 (struct bfa_bsg_reset_stats_s
*)cmd
;
498 struct bfa_fcpim_s
*fcpim
= BFA_FCPIM(&bfad
->bfa
);
499 struct list_head
*qe
, *qen
;
500 struct bfa_itnim_s
*itnim
;
503 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
504 fcs_port
= bfa_fcs_lookup_port(&bfad
->bfa_fcs
,
505 iocmd
->vf_id
, iocmd
->vpwwn
);
506 if (fcs_port
== NULL
) {
507 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
508 iocmd
->status
= BFA_STATUS_UNKNOWN_LWWN
;
512 bfa_fcs_lport_clear_stats(fcs_port
);
513 /* clear IO stats from all active itnims */
514 list_for_each_safe(qe
, qen
, &fcpim
->itnim_q
) {
515 itnim
= (struct bfa_itnim_s
*) qe
;
516 if (itnim
->rport
->rport_info
.lp_tag
!= fcs_port
->lp_tag
)
518 bfa_itnim_clear_stats(itnim
);
520 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
521 iocmd
->status
= BFA_STATUS_OK
;
527 bfad_iocmd_lport_get_iostats(struct bfad_s
*bfad
, void *cmd
)
529 struct bfa_fcs_lport_s
*fcs_port
;
530 struct bfa_bsg_lport_iostats_s
*iocmd
=
531 (struct bfa_bsg_lport_iostats_s
*)cmd
;
534 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
535 fcs_port
= bfa_fcs_lookup_port(&bfad
->bfa_fcs
,
536 iocmd
->vf_id
, iocmd
->pwwn
);
537 if (fcs_port
== NULL
) {
538 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
539 iocmd
->status
= BFA_STATUS_UNKNOWN_LWWN
;
543 bfa_fcpim_port_iostats(&bfad
->bfa
, &iocmd
->iostats
,
545 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
546 iocmd
->status
= BFA_STATUS_OK
;
552 bfad_iocmd_lport_get_rports(struct bfad_s
*bfad
, void *cmd
,
553 unsigned int payload_len
)
555 struct bfa_bsg_lport_get_rports_s
*iocmd
=
556 (struct bfa_bsg_lport_get_rports_s
*)cmd
;
557 struct bfa_fcs_lport_s
*fcs_port
;
561 if (iocmd
->nrports
== 0)
564 if (bfad_chk_iocmd_sz(payload_len
,
565 sizeof(struct bfa_bsg_lport_get_rports_s
),
566 sizeof(struct bfa_rport_qualifier_s
) * iocmd
->nrports
)
568 iocmd
->status
= BFA_STATUS_VERSION_FAIL
;
572 iocmd_bufptr
= (char *)iocmd
+
573 sizeof(struct bfa_bsg_lport_get_rports_s
);
574 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
575 fcs_port
= bfa_fcs_lookup_port(&bfad
->bfa_fcs
,
576 iocmd
->vf_id
, iocmd
->pwwn
);
577 if (fcs_port
== NULL
) {
578 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
580 iocmd
->status
= BFA_STATUS_UNKNOWN_LWWN
;
584 bfa_fcs_lport_get_rport_quals(fcs_port
,
585 (struct bfa_rport_qualifier_s
*)iocmd_bufptr
,
587 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
588 iocmd
->status
= BFA_STATUS_OK
;
594 bfad_iocmd_rport_get_attr(struct bfad_s
*bfad
, void *cmd
)
596 struct bfa_bsg_rport_attr_s
*iocmd
= (struct bfa_bsg_rport_attr_s
*)cmd
;
597 struct bfa_fcs_lport_s
*fcs_port
;
598 struct bfa_fcs_rport_s
*fcs_rport
;
601 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
602 fcs_port
= bfa_fcs_lookup_port(&bfad
->bfa_fcs
,
603 iocmd
->vf_id
, iocmd
->pwwn
);
604 if (fcs_port
== NULL
) {
606 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
607 iocmd
->status
= BFA_STATUS_UNKNOWN_LWWN
;
612 fcs_rport
= bfa_fcs_lport_get_rport_by_qualifier(fcs_port
,
613 iocmd
->rpwwn
, iocmd
->pid
);
615 fcs_rport
= bfa_fcs_rport_lookup(fcs_port
, iocmd
->rpwwn
);
616 if (fcs_rport
== NULL
) {
618 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
619 iocmd
->status
= BFA_STATUS_UNKNOWN_RWWN
;
623 bfa_fcs_rport_get_attr(fcs_rport
, &iocmd
->attr
);
624 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
625 iocmd
->status
= BFA_STATUS_OK
;
631 bfad_iocmd_rport_get_addr(struct bfad_s
*bfad
, void *cmd
)
633 struct bfa_bsg_rport_scsi_addr_s
*iocmd
=
634 (struct bfa_bsg_rport_scsi_addr_s
*)cmd
;
635 struct bfa_fcs_lport_s
*fcs_port
;
636 struct bfa_fcs_itnim_s
*fcs_itnim
;
637 struct bfad_itnim_s
*drv_itnim
;
640 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
641 fcs_port
= bfa_fcs_lookup_port(&bfad
->bfa_fcs
,
642 iocmd
->vf_id
, iocmd
->pwwn
);
643 if (fcs_port
== NULL
) {
645 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
646 iocmd
->status
= BFA_STATUS_UNKNOWN_LWWN
;
650 fcs_itnim
= bfa_fcs_itnim_lookup(fcs_port
, iocmd
->rpwwn
);
651 if (fcs_itnim
== NULL
) {
653 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
654 iocmd
->status
= BFA_STATUS_UNKNOWN_RWWN
;
658 drv_itnim
= fcs_itnim
->itnim_drv
;
660 if (drv_itnim
&& drv_itnim
->im_port
)
661 iocmd
->host
= drv_itnim
->im_port
->shost
->host_no
;
664 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
665 iocmd
->status
= BFA_STATUS_UNKNOWN_RWWN
;
669 iocmd
->target
= drv_itnim
->scsi_tgt_id
;
670 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
674 iocmd
->status
= BFA_STATUS_OK
;
680 bfad_iocmd_rport_get_stats(struct bfad_s
*bfad
, void *cmd
)
682 struct bfa_bsg_rport_stats_s
*iocmd
=
683 (struct bfa_bsg_rport_stats_s
*)cmd
;
684 struct bfa_fcs_lport_s
*fcs_port
;
685 struct bfa_fcs_rport_s
*fcs_rport
;
688 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
689 fcs_port
= bfa_fcs_lookup_port(&bfad
->bfa_fcs
,
690 iocmd
->vf_id
, iocmd
->pwwn
);
691 if (fcs_port
== NULL
) {
693 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
694 iocmd
->status
= BFA_STATUS_UNKNOWN_LWWN
;
698 fcs_rport
= bfa_fcs_rport_lookup(fcs_port
, iocmd
->rpwwn
);
699 if (fcs_rport
== NULL
) {
701 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
702 iocmd
->status
= BFA_STATUS_UNKNOWN_RWWN
;
706 memcpy((void *)&iocmd
->stats
, (void *)&fcs_rport
->stats
,
707 sizeof(struct bfa_rport_stats_s
));
708 if (bfa_fcs_rport_get_halrport(fcs_rport
)) {
709 memcpy((void *)&iocmd
->stats
.hal_stats
,
710 (void *)&(bfa_fcs_rport_get_halrport(fcs_rport
)->stats
),
711 sizeof(struct bfa_rport_hal_stats_s
));
714 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
715 iocmd
->status
= BFA_STATUS_OK
;
721 bfad_iocmd_rport_clr_stats(struct bfad_s
*bfad
, void *cmd
)
723 struct bfa_bsg_rport_reset_stats_s
*iocmd
=
724 (struct bfa_bsg_rport_reset_stats_s
*)cmd
;
725 struct bfa_fcs_lport_s
*fcs_port
;
726 struct bfa_fcs_rport_s
*fcs_rport
;
727 struct bfa_rport_s
*rport
;
730 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
731 fcs_port
= bfa_fcs_lookup_port(&bfad
->bfa_fcs
,
732 iocmd
->vf_id
, iocmd
->pwwn
);
733 if (fcs_port
== NULL
) {
734 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
735 iocmd
->status
= BFA_STATUS_UNKNOWN_LWWN
;
739 fcs_rport
= bfa_fcs_rport_lookup(fcs_port
, iocmd
->rpwwn
);
740 if (fcs_rport
== NULL
) {
741 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
742 iocmd
->status
= BFA_STATUS_UNKNOWN_RWWN
;
746 memset((char *)&fcs_rport
->stats
, 0, sizeof(struct bfa_rport_stats_s
));
747 rport
= bfa_fcs_rport_get_halrport(fcs_rport
);
749 memset(&rport
->stats
, 0, sizeof(rport
->stats
));
750 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
751 iocmd
->status
= BFA_STATUS_OK
;
757 bfad_iocmd_rport_set_speed(struct bfad_s
*bfad
, void *cmd
)
759 struct bfa_bsg_rport_set_speed_s
*iocmd
=
760 (struct bfa_bsg_rport_set_speed_s
*)cmd
;
761 struct bfa_fcs_lport_s
*fcs_port
;
762 struct bfa_fcs_rport_s
*fcs_rport
;
765 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
766 fcs_port
= bfa_fcs_lookup_port(&bfad
->bfa_fcs
,
767 iocmd
->vf_id
, iocmd
->pwwn
);
768 if (fcs_port
== NULL
) {
769 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
770 iocmd
->status
= BFA_STATUS_UNKNOWN_LWWN
;
774 fcs_rport
= bfa_fcs_rport_lookup(fcs_port
, iocmd
->rpwwn
);
775 if (fcs_rport
== NULL
) {
776 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
777 iocmd
->status
= BFA_STATUS_UNKNOWN_RWWN
;
781 fcs_rport
->rpf
.assigned_speed
= iocmd
->speed
;
782 /* Set this speed in f/w only if the RPSC speed is not available */
783 if (fcs_rport
->rpf
.rpsc_speed
== BFA_PORT_SPEED_UNKNOWN
)
784 if (fcs_rport
->bfa_rport
)
785 bfa_rport_speed(fcs_rport
->bfa_rport
, iocmd
->speed
);
786 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
787 iocmd
->status
= BFA_STATUS_OK
;
793 bfad_iocmd_vport_get_attr(struct bfad_s
*bfad
, void *cmd
)
795 struct bfa_fcs_vport_s
*fcs_vport
;
796 struct bfa_bsg_vport_attr_s
*iocmd
= (struct bfa_bsg_vport_attr_s
*)cmd
;
799 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
800 fcs_vport
= bfa_fcs_vport_lookup(&bfad
->bfa_fcs
,
801 iocmd
->vf_id
, iocmd
->vpwwn
);
802 if (fcs_vport
== NULL
) {
803 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
804 iocmd
->status
= BFA_STATUS_UNKNOWN_VWWN
;
808 bfa_fcs_vport_get_attr(fcs_vport
, &iocmd
->vport_attr
);
809 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
810 iocmd
->status
= BFA_STATUS_OK
;
816 bfad_iocmd_vport_get_stats(struct bfad_s
*bfad
, void *cmd
)
818 struct bfa_fcs_vport_s
*fcs_vport
;
819 struct bfa_bsg_vport_stats_s
*iocmd
=
820 (struct bfa_bsg_vport_stats_s
*)cmd
;
823 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
824 fcs_vport
= bfa_fcs_vport_lookup(&bfad
->bfa_fcs
,
825 iocmd
->vf_id
, iocmd
->vpwwn
);
826 if (fcs_vport
== NULL
) {
827 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
828 iocmd
->status
= BFA_STATUS_UNKNOWN_VWWN
;
832 memcpy((void *)&iocmd
->vport_stats
, (void *)&fcs_vport
->vport_stats
,
833 sizeof(struct bfa_vport_stats_s
));
834 memcpy((void *)&iocmd
->vport_stats
.port_stats
,
835 (void *)&fcs_vport
->lport
.stats
,
836 sizeof(struct bfa_lport_stats_s
));
837 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
838 iocmd
->status
= BFA_STATUS_OK
;
844 bfad_iocmd_vport_clr_stats(struct bfad_s
*bfad
, void *cmd
)
846 struct bfa_fcs_vport_s
*fcs_vport
;
847 struct bfa_bsg_reset_stats_s
*iocmd
=
848 (struct bfa_bsg_reset_stats_s
*)cmd
;
851 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
852 fcs_vport
= bfa_fcs_vport_lookup(&bfad
->bfa_fcs
,
853 iocmd
->vf_id
, iocmd
->vpwwn
);
854 if (fcs_vport
== NULL
) {
855 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
856 iocmd
->status
= BFA_STATUS_UNKNOWN_VWWN
;
860 memset(&fcs_vport
->vport_stats
, 0, sizeof(struct bfa_vport_stats_s
));
861 memset(&fcs_vport
->lport
.stats
, 0, sizeof(struct bfa_lport_stats_s
));
862 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
863 iocmd
->status
= BFA_STATUS_OK
;
869 bfad_iocmd_fabric_get_lports(struct bfad_s
*bfad
, void *cmd
,
870 unsigned int payload_len
)
872 struct bfa_bsg_fabric_get_lports_s
*iocmd
=
873 (struct bfa_bsg_fabric_get_lports_s
*)cmd
;
874 bfa_fcs_vf_t
*fcs_vf
;
875 uint32_t nports
= iocmd
->nports
;
880 iocmd
->status
= BFA_STATUS_EINVAL
;
884 if (bfad_chk_iocmd_sz(payload_len
,
885 sizeof(struct bfa_bsg_fabric_get_lports_s
),
886 sizeof(wwn_t
) * iocmd
->nports
) != BFA_STATUS_OK
) {
887 iocmd
->status
= BFA_STATUS_VERSION_FAIL
;
891 iocmd_bufptr
= (char *)iocmd
+
892 sizeof(struct bfa_bsg_fabric_get_lports_s
);
894 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
895 fcs_vf
= bfa_fcs_vf_lookup(&bfad
->bfa_fcs
, iocmd
->vf_id
);
896 if (fcs_vf
== NULL
) {
897 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
898 iocmd
->status
= BFA_STATUS_UNKNOWN_VFID
;
901 bfa_fcs_vf_get_ports(fcs_vf
, (wwn_t
*)iocmd_bufptr
, &nports
);
902 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
904 iocmd
->nports
= nports
;
905 iocmd
->status
= BFA_STATUS_OK
;
911 bfad_iocmd_qos_set_bw(struct bfad_s
*bfad
, void *pcmd
)
913 struct bfa_bsg_qos_bw_s
*iocmd
= (struct bfa_bsg_qos_bw_s
*)pcmd
;
916 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
917 iocmd
->status
= bfa_fcport_set_qos_bw(&bfad
->bfa
, &iocmd
->qos_bw
);
918 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
924 bfad_iocmd_ratelim(struct bfad_s
*bfad
, unsigned int cmd
, void *pcmd
)
926 struct bfa_bsg_gen_s
*iocmd
= (struct bfa_bsg_gen_s
*)pcmd
;
927 struct bfa_fcport_s
*fcport
= BFA_FCPORT_MOD(&bfad
->bfa
);
930 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
932 if ((fcport
->cfg
.topology
== BFA_PORT_TOPOLOGY_LOOP
) &&
933 (fcport
->topology
== BFA_PORT_TOPOLOGY_LOOP
))
934 iocmd
->status
= BFA_STATUS_TOPOLOGY_LOOP
;
936 if (cmd
== IOCMD_RATELIM_ENABLE
)
937 fcport
->cfg
.ratelimit
= BFA_TRUE
;
938 else if (cmd
== IOCMD_RATELIM_DISABLE
)
939 fcport
->cfg
.ratelimit
= BFA_FALSE
;
941 if (fcport
->cfg
.trl_def_speed
== BFA_PORT_SPEED_UNKNOWN
)
942 fcport
->cfg
.trl_def_speed
= BFA_PORT_SPEED_1GBPS
;
944 iocmd
->status
= BFA_STATUS_OK
;
947 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
953 bfad_iocmd_ratelim_speed(struct bfad_s
*bfad
, unsigned int cmd
, void *pcmd
)
955 struct bfa_bsg_trl_speed_s
*iocmd
= (struct bfa_bsg_trl_speed_s
*)pcmd
;
956 struct bfa_fcport_s
*fcport
= BFA_FCPORT_MOD(&bfad
->bfa
);
959 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
961 /* Auto and speeds greater than the supported speed, are invalid */
962 if ((iocmd
->speed
== BFA_PORT_SPEED_AUTO
) ||
963 (iocmd
->speed
> fcport
->speed_sup
)) {
964 iocmd
->status
= BFA_STATUS_UNSUPP_SPEED
;
965 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
969 if ((fcport
->cfg
.topology
== BFA_PORT_TOPOLOGY_LOOP
) &&
970 (fcport
->topology
== BFA_PORT_TOPOLOGY_LOOP
))
971 iocmd
->status
= BFA_STATUS_TOPOLOGY_LOOP
;
973 fcport
->cfg
.trl_def_speed
= iocmd
->speed
;
974 iocmd
->status
= BFA_STATUS_OK
;
976 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
982 bfad_iocmd_cfg_fcpim(struct bfad_s
*bfad
, void *cmd
)
984 struct bfa_bsg_fcpim_s
*iocmd
= (struct bfa_bsg_fcpim_s
*)cmd
;
987 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
988 bfa_fcpim_path_tov_set(&bfad
->bfa
, iocmd
->param
);
989 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
990 iocmd
->status
= BFA_STATUS_OK
;
995 bfad_iocmd_fcpim_get_modstats(struct bfad_s
*bfad
, void *cmd
)
997 struct bfa_bsg_fcpim_modstats_s
*iocmd
=
998 (struct bfa_bsg_fcpim_modstats_s
*)cmd
;
999 struct bfa_fcpim_s
*fcpim
= BFA_FCPIM(&bfad
->bfa
);
1000 struct list_head
*qe
, *qen
;
1001 struct bfa_itnim_s
*itnim
;
1002 unsigned long flags
;
1004 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1005 /* accumulate IO stats from itnim */
1006 memset((void *)&iocmd
->modstats
, 0, sizeof(struct bfa_itnim_iostats_s
));
1007 list_for_each_safe(qe
, qen
, &fcpim
->itnim_q
) {
1008 itnim
= (struct bfa_itnim_s
*) qe
;
1009 bfa_fcpim_add_stats(&iocmd
->modstats
, &(itnim
->stats
));
1011 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1012 iocmd
->status
= BFA_STATUS_OK
;
1017 bfad_iocmd_fcpim_clr_modstats(struct bfad_s
*bfad
, void *cmd
)
1019 struct bfa_bsg_fcpim_modstatsclr_s
*iocmd
=
1020 (struct bfa_bsg_fcpim_modstatsclr_s
*)cmd
;
1021 struct bfa_fcpim_s
*fcpim
= BFA_FCPIM(&bfad
->bfa
);
1022 struct list_head
*qe
, *qen
;
1023 struct bfa_itnim_s
*itnim
;
1024 unsigned long flags
;
1026 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1027 list_for_each_safe(qe
, qen
, &fcpim
->itnim_q
) {
1028 itnim
= (struct bfa_itnim_s
*) qe
;
1029 bfa_itnim_clear_stats(itnim
);
1031 memset(&fcpim
->del_itn_stats
, 0,
1032 sizeof(struct bfa_fcpim_del_itn_stats_s
));
1033 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1034 iocmd
->status
= BFA_STATUS_OK
;
1039 bfad_iocmd_fcpim_get_del_itn_stats(struct bfad_s
*bfad
, void *cmd
)
1041 struct bfa_bsg_fcpim_del_itn_stats_s
*iocmd
=
1042 (struct bfa_bsg_fcpim_del_itn_stats_s
*)cmd
;
1043 struct bfa_fcpim_s
*fcpim
= BFA_FCPIM(&bfad
->bfa
);
1044 unsigned long flags
;
1046 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1047 memcpy((void *)&iocmd
->modstats
, (void *)&fcpim
->del_itn_stats
,
1048 sizeof(struct bfa_fcpim_del_itn_stats_s
));
1049 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1051 iocmd
->status
= BFA_STATUS_OK
;
1056 bfad_iocmd_itnim_get_attr(struct bfad_s
*bfad
, void *cmd
)
1058 struct bfa_bsg_itnim_attr_s
*iocmd
= (struct bfa_bsg_itnim_attr_s
*)cmd
;
1059 struct bfa_fcs_lport_s
*fcs_port
;
1060 unsigned long flags
;
1062 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1063 fcs_port
= bfa_fcs_lookup_port(&bfad
->bfa_fcs
,
1064 iocmd
->vf_id
, iocmd
->lpwwn
);
1066 iocmd
->status
= BFA_STATUS_UNKNOWN_LWWN
;
1068 iocmd
->status
= bfa_fcs_itnim_attr_get(fcs_port
,
1069 iocmd
->rpwwn
, &iocmd
->attr
);
1070 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1075 bfad_iocmd_itnim_get_iostats(struct bfad_s
*bfad
, void *cmd
)
1077 struct bfa_bsg_itnim_iostats_s
*iocmd
=
1078 (struct bfa_bsg_itnim_iostats_s
*)cmd
;
1079 struct bfa_fcs_lport_s
*fcs_port
;
1080 struct bfa_fcs_itnim_s
*itnim
;
1081 unsigned long flags
;
1083 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1084 fcs_port
= bfa_fcs_lookup_port(&bfad
->bfa_fcs
,
1085 iocmd
->vf_id
, iocmd
->lpwwn
);
1087 iocmd
->status
= BFA_STATUS_UNKNOWN_LWWN
;
1090 itnim
= bfa_fcs_itnim_lookup(fcs_port
, iocmd
->rpwwn
);
1092 iocmd
->status
= BFA_STATUS_UNKNOWN_RWWN
;
1094 iocmd
->status
= BFA_STATUS_OK
;
1095 if (bfa_fcs_itnim_get_halitn(itnim
))
1096 memcpy((void *)&iocmd
->iostats
, (void *)
1097 &(bfa_fcs_itnim_get_halitn(itnim
)->stats
),
1098 sizeof(struct bfa_itnim_iostats_s
));
1101 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1106 bfad_iocmd_itnim_reset_stats(struct bfad_s
*bfad
, void *cmd
)
1108 struct bfa_bsg_rport_reset_stats_s
*iocmd
=
1109 (struct bfa_bsg_rport_reset_stats_s
*)cmd
;
1110 struct bfa_fcs_lport_s
*fcs_port
;
1111 struct bfa_fcs_itnim_s
*itnim
;
1112 unsigned long flags
;
1114 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1115 fcs_port
= bfa_fcs_lookup_port(&bfad
->bfa_fcs
,
1116 iocmd
->vf_id
, iocmd
->pwwn
);
1118 iocmd
->status
= BFA_STATUS_UNKNOWN_LWWN
;
1120 itnim
= bfa_fcs_itnim_lookup(fcs_port
, iocmd
->rpwwn
);
1122 iocmd
->status
= BFA_STATUS_UNKNOWN_RWWN
;
1124 iocmd
->status
= BFA_STATUS_OK
;
1125 bfa_fcs_itnim_stats_clear(fcs_port
, iocmd
->rpwwn
);
1126 bfa_itnim_clear_stats(bfa_fcs_itnim_get_halitn(itnim
));
1129 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1135 bfad_iocmd_itnim_get_itnstats(struct bfad_s
*bfad
, void *cmd
)
1137 struct bfa_bsg_itnim_itnstats_s
*iocmd
=
1138 (struct bfa_bsg_itnim_itnstats_s
*)cmd
;
1139 struct bfa_fcs_lport_s
*fcs_port
;
1140 struct bfa_fcs_itnim_s
*itnim
;
1141 unsigned long flags
;
1143 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1144 fcs_port
= bfa_fcs_lookup_port(&bfad
->bfa_fcs
,
1145 iocmd
->vf_id
, iocmd
->lpwwn
);
1147 iocmd
->status
= BFA_STATUS_UNKNOWN_LWWN
;
1150 itnim
= bfa_fcs_itnim_lookup(fcs_port
, iocmd
->rpwwn
);
1152 iocmd
->status
= BFA_STATUS_UNKNOWN_RWWN
;
1154 iocmd
->status
= BFA_STATUS_OK
;
1155 bfa_fcs_itnim_stats_get(fcs_port
, iocmd
->rpwwn
,
1159 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1164 bfad_iocmd_fcport_enable(struct bfad_s
*bfad
, void *cmd
)
1166 struct bfa_bsg_gen_s
*iocmd
= (struct bfa_bsg_gen_s
*)cmd
;
1167 unsigned long flags
;
1169 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1170 iocmd
->status
= bfa_fcport_enable(&bfad
->bfa
);
1171 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1177 bfad_iocmd_fcport_disable(struct bfad_s
*bfad
, void *cmd
)
1179 struct bfa_bsg_gen_s
*iocmd
= (struct bfa_bsg_gen_s
*)cmd
;
1180 unsigned long flags
;
1182 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1183 iocmd
->status
= bfa_fcport_disable(&bfad
->bfa
);
1184 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1190 bfad_iocmd_ioc_get_pcifn_cfg(struct bfad_s
*bfad
, void *cmd
)
1192 struct bfa_bsg_pcifn_cfg_s
*iocmd
= (struct bfa_bsg_pcifn_cfg_s
*)cmd
;
1193 struct bfad_hal_comp fcomp
;
1194 unsigned long flags
;
1196 init_completion(&fcomp
.comp
);
1197 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1198 iocmd
->status
= bfa_ablk_query(&bfad
->bfa
.modules
.ablk
,
1200 bfad_hcb_comp
, &fcomp
);
1201 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1202 if (iocmd
->status
!= BFA_STATUS_OK
)
1205 wait_for_completion(&fcomp
.comp
);
1206 iocmd
->status
= fcomp
.status
;
1212 bfad_iocmd_pcifn_create(struct bfad_s
*bfad
, void *cmd
)
1214 struct bfa_bsg_pcifn_s
*iocmd
= (struct bfa_bsg_pcifn_s
*)cmd
;
1215 struct bfad_hal_comp fcomp
;
1216 unsigned long flags
;
1218 init_completion(&fcomp
.comp
);
1219 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1220 iocmd
->status
= bfa_ablk_pf_create(&bfad
->bfa
.modules
.ablk
,
1221 &iocmd
->pcifn_id
, iocmd
->port
,
1222 iocmd
->pcifn_class
, iocmd
->bw_min
,
1223 iocmd
->bw_max
, bfad_hcb_comp
, &fcomp
);
1224 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1225 if (iocmd
->status
!= BFA_STATUS_OK
)
1228 wait_for_completion(&fcomp
.comp
);
1229 iocmd
->status
= fcomp
.status
;
1235 bfad_iocmd_pcifn_delete(struct bfad_s
*bfad
, void *cmd
)
1237 struct bfa_bsg_pcifn_s
*iocmd
= (struct bfa_bsg_pcifn_s
*)cmd
;
1238 struct bfad_hal_comp fcomp
;
1239 unsigned long flags
;
1241 init_completion(&fcomp
.comp
);
1242 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1243 iocmd
->status
= bfa_ablk_pf_delete(&bfad
->bfa
.modules
.ablk
,
1245 bfad_hcb_comp
, &fcomp
);
1246 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1247 if (iocmd
->status
!= BFA_STATUS_OK
)
1250 wait_for_completion(&fcomp
.comp
);
1251 iocmd
->status
= fcomp
.status
;
1257 bfad_iocmd_pcifn_bw(struct bfad_s
*bfad
, void *cmd
)
1259 struct bfa_bsg_pcifn_s
*iocmd
= (struct bfa_bsg_pcifn_s
*)cmd
;
1260 struct bfad_hal_comp fcomp
;
1261 unsigned long flags
;
1263 init_completion(&fcomp
.comp
);
1264 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1265 iocmd
->status
= bfa_ablk_pf_update(&bfad
->bfa
.modules
.ablk
,
1266 iocmd
->pcifn_id
, iocmd
->bw_min
,
1267 iocmd
->bw_max
, bfad_hcb_comp
, &fcomp
);
1268 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1269 bfa_trc(bfad
, iocmd
->status
);
1270 if (iocmd
->status
!= BFA_STATUS_OK
)
1273 wait_for_completion(&fcomp
.comp
);
1274 iocmd
->status
= fcomp
.status
;
1275 bfa_trc(bfad
, iocmd
->status
);
1281 bfad_iocmd_adapter_cfg_mode(struct bfad_s
*bfad
, void *cmd
)
1283 struct bfa_bsg_adapter_cfg_mode_s
*iocmd
=
1284 (struct bfa_bsg_adapter_cfg_mode_s
*)cmd
;
1285 struct bfad_hal_comp fcomp
;
1286 unsigned long flags
= 0;
1288 init_completion(&fcomp
.comp
);
1289 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1290 iocmd
->status
= bfa_ablk_adapter_config(&bfad
->bfa
.modules
.ablk
,
1291 iocmd
->cfg
.mode
, iocmd
->cfg
.max_pf
,
1292 iocmd
->cfg
.max_vf
, bfad_hcb_comp
, &fcomp
);
1293 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1294 if (iocmd
->status
!= BFA_STATUS_OK
)
1297 wait_for_completion(&fcomp
.comp
);
1298 iocmd
->status
= fcomp
.status
;
1304 bfad_iocmd_port_cfg_mode(struct bfad_s
*bfad
, void *cmd
)
1306 struct bfa_bsg_port_cfg_mode_s
*iocmd
=
1307 (struct bfa_bsg_port_cfg_mode_s
*)cmd
;
1308 struct bfad_hal_comp fcomp
;
1309 unsigned long flags
= 0;
1311 init_completion(&fcomp
.comp
);
1312 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1313 iocmd
->status
= bfa_ablk_port_config(&bfad
->bfa
.modules
.ablk
,
1314 iocmd
->instance
, iocmd
->cfg
.mode
,
1315 iocmd
->cfg
.max_pf
, iocmd
->cfg
.max_vf
,
1316 bfad_hcb_comp
, &fcomp
);
1317 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1318 if (iocmd
->status
!= BFA_STATUS_OK
)
1321 wait_for_completion(&fcomp
.comp
);
1322 iocmd
->status
= fcomp
.status
;
1328 bfad_iocmd_ablk_optrom(struct bfad_s
*bfad
, unsigned int cmd
, void *pcmd
)
1330 struct bfa_bsg_gen_s
*iocmd
= (struct bfa_bsg_gen_s
*)pcmd
;
1331 struct bfad_hal_comp fcomp
;
1332 unsigned long flags
;
1334 init_completion(&fcomp
.comp
);
1335 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1336 if (cmd
== IOCMD_FLASH_ENABLE_OPTROM
)
1337 iocmd
->status
= bfa_ablk_optrom_en(&bfad
->bfa
.modules
.ablk
,
1338 bfad_hcb_comp
, &fcomp
);
1340 iocmd
->status
= bfa_ablk_optrom_dis(&bfad
->bfa
.modules
.ablk
,
1341 bfad_hcb_comp
, &fcomp
);
1342 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1344 if (iocmd
->status
!= BFA_STATUS_OK
)
1347 wait_for_completion(&fcomp
.comp
);
1348 iocmd
->status
= fcomp
.status
;
1354 bfad_iocmd_faa_query(struct bfad_s
*bfad
, void *cmd
)
1356 struct bfa_bsg_faa_attr_s
*iocmd
= (struct bfa_bsg_faa_attr_s
*)cmd
;
1357 struct bfad_hal_comp fcomp
;
1358 unsigned long flags
;
1360 init_completion(&fcomp
.comp
);
1361 iocmd
->status
= BFA_STATUS_OK
;
1362 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1363 iocmd
->status
= bfa_faa_query(&bfad
->bfa
, &iocmd
->faa_attr
,
1364 bfad_hcb_comp
, &fcomp
);
1365 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1367 if (iocmd
->status
!= BFA_STATUS_OK
)
1370 wait_for_completion(&fcomp
.comp
);
1371 iocmd
->status
= fcomp
.status
;
1377 bfad_iocmd_cee_attr(struct bfad_s
*bfad
, void *cmd
, unsigned int payload_len
)
1379 struct bfa_bsg_cee_attr_s
*iocmd
=
1380 (struct bfa_bsg_cee_attr_s
*)cmd
;
1382 struct bfad_hal_comp cee_comp
;
1383 unsigned long flags
;
1385 if (bfad_chk_iocmd_sz(payload_len
,
1386 sizeof(struct bfa_bsg_cee_attr_s
),
1387 sizeof(struct bfa_cee_attr_s
)) != BFA_STATUS_OK
) {
1388 iocmd
->status
= BFA_STATUS_VERSION_FAIL
;
1392 iocmd_bufptr
= (char *)iocmd
+ sizeof(struct bfa_bsg_cee_attr_s
);
1394 cee_comp
.status
= 0;
1395 init_completion(&cee_comp
.comp
);
1396 mutex_lock(&bfad_mutex
);
1397 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1398 iocmd
->status
= bfa_cee_get_attr(&bfad
->bfa
.modules
.cee
, iocmd_bufptr
,
1399 bfad_hcb_comp
, &cee_comp
);
1400 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1401 if (iocmd
->status
!= BFA_STATUS_OK
) {
1402 mutex_unlock(&bfad_mutex
);
1403 bfa_trc(bfad
, 0x5555);
1406 wait_for_completion(&cee_comp
.comp
);
1407 mutex_unlock(&bfad_mutex
);
1413 bfad_iocmd_cee_get_stats(struct bfad_s
*bfad
, void *cmd
,
1414 unsigned int payload_len
)
1416 struct bfa_bsg_cee_stats_s
*iocmd
=
1417 (struct bfa_bsg_cee_stats_s
*)cmd
;
1419 struct bfad_hal_comp cee_comp
;
1420 unsigned long flags
;
1422 if (bfad_chk_iocmd_sz(payload_len
,
1423 sizeof(struct bfa_bsg_cee_stats_s
),
1424 sizeof(struct bfa_cee_stats_s
)) != BFA_STATUS_OK
) {
1425 iocmd
->status
= BFA_STATUS_VERSION_FAIL
;
1429 iocmd_bufptr
= (char *)iocmd
+ sizeof(struct bfa_bsg_cee_stats_s
);
1431 cee_comp
.status
= 0;
1432 init_completion(&cee_comp
.comp
);
1433 mutex_lock(&bfad_mutex
);
1434 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1435 iocmd
->status
= bfa_cee_get_stats(&bfad
->bfa
.modules
.cee
, iocmd_bufptr
,
1436 bfad_hcb_comp
, &cee_comp
);
1437 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1438 if (iocmd
->status
!= BFA_STATUS_OK
) {
1439 mutex_unlock(&bfad_mutex
);
1440 bfa_trc(bfad
, 0x5555);
1443 wait_for_completion(&cee_comp
.comp
);
1444 mutex_unlock(&bfad_mutex
);
1450 bfad_iocmd_cee_reset_stats(struct bfad_s
*bfad
, void *cmd
)
1452 struct bfa_bsg_gen_s
*iocmd
= (struct bfa_bsg_gen_s
*)cmd
;
1453 unsigned long flags
;
1455 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1456 iocmd
->status
= bfa_cee_reset_stats(&bfad
->bfa
.modules
.cee
, NULL
, NULL
);
1457 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1458 if (iocmd
->status
!= BFA_STATUS_OK
)
1459 bfa_trc(bfad
, 0x5555);
1464 bfad_iocmd_sfp_media(struct bfad_s
*bfad
, void *cmd
)
1466 struct bfa_bsg_sfp_media_s
*iocmd
= (struct bfa_bsg_sfp_media_s
*)cmd
;
1467 struct bfad_hal_comp fcomp
;
1468 unsigned long flags
;
1470 init_completion(&fcomp
.comp
);
1471 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1472 iocmd
->status
= bfa_sfp_media(BFA_SFP_MOD(&bfad
->bfa
), &iocmd
->media
,
1473 bfad_hcb_comp
, &fcomp
);
1474 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1475 bfa_trc(bfad
, iocmd
->status
);
1476 if (iocmd
->status
!= BFA_STATUS_SFP_NOT_READY
)
1479 wait_for_completion(&fcomp
.comp
);
1480 iocmd
->status
= fcomp
.status
;
1486 bfad_iocmd_sfp_speed(struct bfad_s
*bfad
, void *cmd
)
1488 struct bfa_bsg_sfp_speed_s
*iocmd
= (struct bfa_bsg_sfp_speed_s
*)cmd
;
1489 struct bfad_hal_comp fcomp
;
1490 unsigned long flags
;
1492 init_completion(&fcomp
.comp
);
1493 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1494 iocmd
->status
= bfa_sfp_speed(BFA_SFP_MOD(&bfad
->bfa
), iocmd
->speed
,
1495 bfad_hcb_comp
, &fcomp
);
1496 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1497 bfa_trc(bfad
, iocmd
->status
);
1498 if (iocmd
->status
!= BFA_STATUS_SFP_NOT_READY
)
1500 wait_for_completion(&fcomp
.comp
);
1501 iocmd
->status
= fcomp
.status
;
1507 bfad_iocmd_flash_get_attr(struct bfad_s
*bfad
, void *cmd
)
1509 struct bfa_bsg_flash_attr_s
*iocmd
=
1510 (struct bfa_bsg_flash_attr_s
*)cmd
;
1511 struct bfad_hal_comp fcomp
;
1512 unsigned long flags
;
1514 init_completion(&fcomp
.comp
);
1515 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1516 iocmd
->status
= bfa_flash_get_attr(BFA_FLASH(&bfad
->bfa
), &iocmd
->attr
,
1517 bfad_hcb_comp
, &fcomp
);
1518 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1519 if (iocmd
->status
!= BFA_STATUS_OK
)
1521 wait_for_completion(&fcomp
.comp
);
1522 iocmd
->status
= fcomp
.status
;
1528 bfad_iocmd_flash_erase_part(struct bfad_s
*bfad
, void *cmd
)
1530 struct bfa_bsg_flash_s
*iocmd
= (struct bfa_bsg_flash_s
*)cmd
;
1531 struct bfad_hal_comp fcomp
;
1532 unsigned long flags
;
1534 init_completion(&fcomp
.comp
);
1535 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1536 iocmd
->status
= bfa_flash_erase_part(BFA_FLASH(&bfad
->bfa
), iocmd
->type
,
1537 iocmd
->instance
, bfad_hcb_comp
, &fcomp
);
1538 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1539 if (iocmd
->status
!= BFA_STATUS_OK
)
1541 wait_for_completion(&fcomp
.comp
);
1542 iocmd
->status
= fcomp
.status
;
1548 bfad_iocmd_flash_update_part(struct bfad_s
*bfad
, void *cmd
,
1549 unsigned int payload_len
)
1551 struct bfa_bsg_flash_s
*iocmd
= (struct bfa_bsg_flash_s
*)cmd
;
1553 struct bfad_hal_comp fcomp
;
1554 unsigned long flags
;
1556 if (bfad_chk_iocmd_sz(payload_len
,
1557 sizeof(struct bfa_bsg_flash_s
),
1558 iocmd
->bufsz
) != BFA_STATUS_OK
) {
1559 iocmd
->status
= BFA_STATUS_VERSION_FAIL
;
1563 iocmd_bufptr
= (char *)iocmd
+ sizeof(struct bfa_bsg_flash_s
);
1565 init_completion(&fcomp
.comp
);
1566 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1567 iocmd
->status
= bfa_flash_update_part(BFA_FLASH(&bfad
->bfa
),
1568 iocmd
->type
, iocmd
->instance
, iocmd_bufptr
,
1569 iocmd
->bufsz
, 0, bfad_hcb_comp
, &fcomp
);
1570 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1571 if (iocmd
->status
!= BFA_STATUS_OK
)
1573 wait_for_completion(&fcomp
.comp
);
1574 iocmd
->status
= fcomp
.status
;
1580 bfad_iocmd_flash_read_part(struct bfad_s
*bfad
, void *cmd
,
1581 unsigned int payload_len
)
1583 struct bfa_bsg_flash_s
*iocmd
= (struct bfa_bsg_flash_s
*)cmd
;
1584 struct bfad_hal_comp fcomp
;
1586 unsigned long flags
;
1588 if (bfad_chk_iocmd_sz(payload_len
,
1589 sizeof(struct bfa_bsg_flash_s
),
1590 iocmd
->bufsz
) != BFA_STATUS_OK
) {
1591 iocmd
->status
= BFA_STATUS_VERSION_FAIL
;
1595 iocmd_bufptr
= (char *)iocmd
+ sizeof(struct bfa_bsg_flash_s
);
1597 init_completion(&fcomp
.comp
);
1598 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1599 iocmd
->status
= bfa_flash_read_part(BFA_FLASH(&bfad
->bfa
), iocmd
->type
,
1600 iocmd
->instance
, iocmd_bufptr
, iocmd
->bufsz
, 0,
1601 bfad_hcb_comp
, &fcomp
);
1602 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1603 if (iocmd
->status
!= BFA_STATUS_OK
)
1605 wait_for_completion(&fcomp
.comp
);
1606 iocmd
->status
= fcomp
.status
;
1612 bfad_iocmd_diag_temp(struct bfad_s
*bfad
, void *cmd
)
1614 struct bfa_bsg_diag_get_temp_s
*iocmd
=
1615 (struct bfa_bsg_diag_get_temp_s
*)cmd
;
1616 struct bfad_hal_comp fcomp
;
1617 unsigned long flags
;
1619 init_completion(&fcomp
.comp
);
1620 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1621 iocmd
->status
= bfa_diag_tsensor_query(BFA_DIAG_MOD(&bfad
->bfa
),
1622 &iocmd
->result
, bfad_hcb_comp
, &fcomp
);
1623 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1624 bfa_trc(bfad
, iocmd
->status
);
1625 if (iocmd
->status
!= BFA_STATUS_OK
)
1627 wait_for_completion(&fcomp
.comp
);
1628 iocmd
->status
= fcomp
.status
;
1634 bfad_iocmd_diag_memtest(struct bfad_s
*bfad
, void *cmd
)
1636 struct bfa_bsg_diag_memtest_s
*iocmd
=
1637 (struct bfa_bsg_diag_memtest_s
*)cmd
;
1638 struct bfad_hal_comp fcomp
;
1639 unsigned long flags
;
1641 init_completion(&fcomp
.comp
);
1642 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1643 iocmd
->status
= bfa_diag_memtest(BFA_DIAG_MOD(&bfad
->bfa
),
1644 &iocmd
->memtest
, iocmd
->pat
,
1645 &iocmd
->result
, bfad_hcb_comp
, &fcomp
);
1646 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1647 bfa_trc(bfad
, iocmd
->status
);
1648 if (iocmd
->status
!= BFA_STATUS_OK
)
1650 wait_for_completion(&fcomp
.comp
);
1651 iocmd
->status
= fcomp
.status
;
1657 bfad_iocmd_diag_loopback(struct bfad_s
*bfad
, void *cmd
)
1659 struct bfa_bsg_diag_loopback_s
*iocmd
=
1660 (struct bfa_bsg_diag_loopback_s
*)cmd
;
1661 struct bfad_hal_comp fcomp
;
1662 unsigned long flags
;
1664 init_completion(&fcomp
.comp
);
1665 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1666 iocmd
->status
= bfa_fcdiag_loopback(&bfad
->bfa
, iocmd
->opmode
,
1667 iocmd
->speed
, iocmd
->lpcnt
, iocmd
->pat
,
1668 &iocmd
->result
, bfad_hcb_comp
, &fcomp
);
1669 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1670 bfa_trc(bfad
, iocmd
->status
);
1671 if (iocmd
->status
!= BFA_STATUS_OK
)
1673 wait_for_completion(&fcomp
.comp
);
1674 iocmd
->status
= fcomp
.status
;
1680 bfad_iocmd_diag_fwping(struct bfad_s
*bfad
, void *cmd
)
1682 struct bfa_bsg_diag_fwping_s
*iocmd
=
1683 (struct bfa_bsg_diag_fwping_s
*)cmd
;
1684 struct bfad_hal_comp fcomp
;
1685 unsigned long flags
;
1687 init_completion(&fcomp
.comp
);
1688 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1689 iocmd
->status
= bfa_diag_fwping(BFA_DIAG_MOD(&bfad
->bfa
), iocmd
->cnt
,
1690 iocmd
->pattern
, &iocmd
->result
,
1691 bfad_hcb_comp
, &fcomp
);
1692 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1693 bfa_trc(bfad
, iocmd
->status
);
1694 if (iocmd
->status
!= BFA_STATUS_OK
)
1696 bfa_trc(bfad
, 0x77771);
1697 wait_for_completion(&fcomp
.comp
);
1698 iocmd
->status
= fcomp
.status
;
1704 bfad_iocmd_diag_queuetest(struct bfad_s
*bfad
, void *cmd
)
1706 struct bfa_bsg_diag_qtest_s
*iocmd
= (struct bfa_bsg_diag_qtest_s
*)cmd
;
1707 struct bfad_hal_comp fcomp
;
1708 unsigned long flags
;
1710 init_completion(&fcomp
.comp
);
1711 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1712 iocmd
->status
= bfa_fcdiag_queuetest(&bfad
->bfa
, iocmd
->force
,
1713 iocmd
->queue
, &iocmd
->result
,
1714 bfad_hcb_comp
, &fcomp
);
1715 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1716 if (iocmd
->status
!= BFA_STATUS_OK
)
1718 wait_for_completion(&fcomp
.comp
);
1719 iocmd
->status
= fcomp
.status
;
1725 bfad_iocmd_diag_sfp(struct bfad_s
*bfad
, void *cmd
)
1727 struct bfa_bsg_sfp_show_s
*iocmd
=
1728 (struct bfa_bsg_sfp_show_s
*)cmd
;
1729 struct bfad_hal_comp fcomp
;
1730 unsigned long flags
;
1732 init_completion(&fcomp
.comp
);
1733 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1734 iocmd
->status
= bfa_sfp_show(BFA_SFP_MOD(&bfad
->bfa
), &iocmd
->sfp
,
1735 bfad_hcb_comp
, &fcomp
);
1736 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1737 bfa_trc(bfad
, iocmd
->status
);
1738 if (iocmd
->status
!= BFA_STATUS_OK
)
1740 wait_for_completion(&fcomp
.comp
);
1741 iocmd
->status
= fcomp
.status
;
1742 bfa_trc(bfad
, iocmd
->status
);
1748 bfad_iocmd_diag_led(struct bfad_s
*bfad
, void *cmd
)
1750 struct bfa_bsg_diag_led_s
*iocmd
= (struct bfa_bsg_diag_led_s
*)cmd
;
1751 unsigned long flags
;
1753 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1754 iocmd
->status
= bfa_diag_ledtest(BFA_DIAG_MOD(&bfad
->bfa
),
1756 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1761 bfad_iocmd_diag_beacon_lport(struct bfad_s
*bfad
, void *cmd
)
1763 struct bfa_bsg_diag_beacon_s
*iocmd
=
1764 (struct bfa_bsg_diag_beacon_s
*)cmd
;
1765 unsigned long flags
;
1767 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1768 iocmd
->status
= bfa_diag_beacon_port(BFA_DIAG_MOD(&bfad
->bfa
),
1769 iocmd
->beacon
, iocmd
->link_e2e_beacon
,
1771 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1776 bfad_iocmd_diag_lb_stat(struct bfad_s
*bfad
, void *cmd
)
1778 struct bfa_bsg_diag_lb_stat_s
*iocmd
=
1779 (struct bfa_bsg_diag_lb_stat_s
*)cmd
;
1780 unsigned long flags
;
1782 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1783 iocmd
->status
= bfa_fcdiag_lb_is_running(&bfad
->bfa
);
1784 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1785 bfa_trc(bfad
, iocmd
->status
);
1791 bfad_iocmd_diag_dport_enable(struct bfad_s
*bfad
, void *pcmd
)
1793 struct bfa_bsg_dport_enable_s
*iocmd
=
1794 (struct bfa_bsg_dport_enable_s
*)pcmd
;
1795 unsigned long flags
;
1796 struct bfad_hal_comp fcomp
;
1798 init_completion(&fcomp
.comp
);
1799 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1800 iocmd
->status
= bfa_dport_enable(&bfad
->bfa
, iocmd
->lpcnt
,
1801 iocmd
->pat
, bfad_hcb_comp
, &fcomp
);
1802 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1803 if (iocmd
->status
!= BFA_STATUS_OK
)
1804 bfa_trc(bfad
, iocmd
->status
);
1806 wait_for_completion(&fcomp
.comp
);
1807 iocmd
->status
= fcomp
.status
;
1813 bfad_iocmd_diag_dport_disable(struct bfad_s
*bfad
, void *pcmd
)
1815 struct bfa_bsg_gen_s
*iocmd
= (struct bfa_bsg_gen_s
*)pcmd
;
1816 unsigned long flags
;
1817 struct bfad_hal_comp fcomp
;
1819 init_completion(&fcomp
.comp
);
1820 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1821 iocmd
->status
= bfa_dport_disable(&bfad
->bfa
, bfad_hcb_comp
, &fcomp
);
1822 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1823 if (iocmd
->status
!= BFA_STATUS_OK
)
1824 bfa_trc(bfad
, iocmd
->status
);
1826 wait_for_completion(&fcomp
.comp
);
1827 iocmd
->status
= fcomp
.status
;
1833 bfad_iocmd_diag_dport_start(struct bfad_s
*bfad
, void *pcmd
)
1835 struct bfa_bsg_dport_enable_s
*iocmd
=
1836 (struct bfa_bsg_dport_enable_s
*)pcmd
;
1837 unsigned long flags
;
1838 struct bfad_hal_comp fcomp
;
1840 init_completion(&fcomp
.comp
);
1841 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1842 iocmd
->status
= bfa_dport_start(&bfad
->bfa
, iocmd
->lpcnt
,
1843 iocmd
->pat
, bfad_hcb_comp
,
1845 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1847 if (iocmd
->status
!= BFA_STATUS_OK
) {
1848 bfa_trc(bfad
, iocmd
->status
);
1850 wait_for_completion(&fcomp
.comp
);
1851 iocmd
->status
= fcomp
.status
;
1858 bfad_iocmd_diag_dport_show(struct bfad_s
*bfad
, void *pcmd
)
1860 struct bfa_bsg_diag_dport_show_s
*iocmd
=
1861 (struct bfa_bsg_diag_dport_show_s
*)pcmd
;
1862 unsigned long flags
;
1864 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1865 iocmd
->status
= bfa_dport_show(&bfad
->bfa
, &iocmd
->result
);
1866 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1873 bfad_iocmd_phy_get_attr(struct bfad_s
*bfad
, void *cmd
)
1875 struct bfa_bsg_phy_attr_s
*iocmd
=
1876 (struct bfa_bsg_phy_attr_s
*)cmd
;
1877 struct bfad_hal_comp fcomp
;
1878 unsigned long flags
;
1880 init_completion(&fcomp
.comp
);
1881 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1882 iocmd
->status
= bfa_phy_get_attr(BFA_PHY(&bfad
->bfa
), iocmd
->instance
,
1883 &iocmd
->attr
, bfad_hcb_comp
, &fcomp
);
1884 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1885 if (iocmd
->status
!= BFA_STATUS_OK
)
1887 wait_for_completion(&fcomp
.comp
);
1888 iocmd
->status
= fcomp
.status
;
1894 bfad_iocmd_phy_get_stats(struct bfad_s
*bfad
, void *cmd
)
1896 struct bfa_bsg_phy_stats_s
*iocmd
=
1897 (struct bfa_bsg_phy_stats_s
*)cmd
;
1898 struct bfad_hal_comp fcomp
;
1899 unsigned long flags
;
1901 init_completion(&fcomp
.comp
);
1902 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1903 iocmd
->status
= bfa_phy_get_stats(BFA_PHY(&bfad
->bfa
), iocmd
->instance
,
1904 &iocmd
->stats
, bfad_hcb_comp
, &fcomp
);
1905 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1906 if (iocmd
->status
!= BFA_STATUS_OK
)
1908 wait_for_completion(&fcomp
.comp
);
1909 iocmd
->status
= fcomp
.status
;
1915 bfad_iocmd_phy_read(struct bfad_s
*bfad
, void *cmd
, unsigned int payload_len
)
1917 struct bfa_bsg_phy_s
*iocmd
= (struct bfa_bsg_phy_s
*)cmd
;
1918 struct bfad_hal_comp fcomp
;
1920 unsigned long flags
;
1922 if (bfad_chk_iocmd_sz(payload_len
,
1923 sizeof(struct bfa_bsg_phy_s
),
1924 iocmd
->bufsz
) != BFA_STATUS_OK
) {
1925 iocmd
->status
= BFA_STATUS_VERSION_FAIL
;
1929 iocmd_bufptr
= (char *)iocmd
+ sizeof(struct bfa_bsg_phy_s
);
1930 init_completion(&fcomp
.comp
);
1931 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1932 iocmd
->status
= bfa_phy_read(BFA_PHY(&bfad
->bfa
),
1933 iocmd
->instance
, iocmd_bufptr
, iocmd
->bufsz
,
1934 0, bfad_hcb_comp
, &fcomp
);
1935 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1936 if (iocmd
->status
!= BFA_STATUS_OK
)
1938 wait_for_completion(&fcomp
.comp
);
1939 iocmd
->status
= fcomp
.status
;
1940 if (iocmd
->status
!= BFA_STATUS_OK
)
1947 bfad_iocmd_vhba_query(struct bfad_s
*bfad
, void *cmd
)
1949 struct bfa_bsg_vhba_attr_s
*iocmd
=
1950 (struct bfa_bsg_vhba_attr_s
*)cmd
;
1951 struct bfa_vhba_attr_s
*attr
= &iocmd
->attr
;
1952 unsigned long flags
;
1954 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1955 attr
->pwwn
= bfad
->bfa
.ioc
.attr
->pwwn
;
1956 attr
->nwwn
= bfad
->bfa
.ioc
.attr
->nwwn
;
1957 attr
->plog_enabled
= (bfa_boolean_t
)bfad
->bfa
.plog
->plog_enabled
;
1958 attr
->io_profile
= bfa_fcpim_get_io_profile(&bfad
->bfa
);
1959 attr
->path_tov
= bfa_fcpim_path_tov_get(&bfad
->bfa
);
1960 iocmd
->status
= BFA_STATUS_OK
;
1961 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1966 bfad_iocmd_phy_update(struct bfad_s
*bfad
, void *cmd
, unsigned int payload_len
)
1968 struct bfa_bsg_phy_s
*iocmd
= (struct bfa_bsg_phy_s
*)cmd
;
1970 struct bfad_hal_comp fcomp
;
1971 unsigned long flags
;
1973 if (bfad_chk_iocmd_sz(payload_len
,
1974 sizeof(struct bfa_bsg_phy_s
),
1975 iocmd
->bufsz
) != BFA_STATUS_OK
) {
1976 iocmd
->status
= BFA_STATUS_VERSION_FAIL
;
1980 iocmd_bufptr
= (char *)iocmd
+ sizeof(struct bfa_bsg_phy_s
);
1981 init_completion(&fcomp
.comp
);
1982 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1983 iocmd
->status
= bfa_phy_update(BFA_PHY(&bfad
->bfa
),
1984 iocmd
->instance
, iocmd_bufptr
, iocmd
->bufsz
,
1985 0, bfad_hcb_comp
, &fcomp
);
1986 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1987 if (iocmd
->status
!= BFA_STATUS_OK
)
1989 wait_for_completion(&fcomp
.comp
);
1990 iocmd
->status
= fcomp
.status
;
1996 bfad_iocmd_porglog_get(struct bfad_s
*bfad
, void *cmd
)
1998 struct bfa_bsg_debug_s
*iocmd
= (struct bfa_bsg_debug_s
*)cmd
;
2001 if (iocmd
->bufsz
< sizeof(struct bfa_plog_s
)) {
2002 bfa_trc(bfad
, sizeof(struct bfa_plog_s
));
2003 iocmd
->status
= BFA_STATUS_EINVAL
;
2007 iocmd
->status
= BFA_STATUS_OK
;
2008 iocmd_bufptr
= (char *)iocmd
+ sizeof(struct bfa_bsg_debug_s
);
2009 memcpy(iocmd_bufptr
, (u8
*) &bfad
->plog_buf
, sizeof(struct bfa_plog_s
));
2014 #define BFA_DEBUG_FW_CORE_CHUNK_SZ 0x4000U /* 16K chunks for FW dump */
2016 bfad_iocmd_debug_fw_core(struct bfad_s
*bfad
, void *cmd
,
2017 unsigned int payload_len
)
2019 struct bfa_bsg_debug_s
*iocmd
= (struct bfa_bsg_debug_s
*)cmd
;
2021 unsigned long flags
;
2024 if (bfad_chk_iocmd_sz(payload_len
, sizeof(struct bfa_bsg_debug_s
),
2025 BFA_DEBUG_FW_CORE_CHUNK_SZ
) != BFA_STATUS_OK
) {
2026 iocmd
->status
= BFA_STATUS_VERSION_FAIL
;
2030 if (iocmd
->bufsz
< BFA_DEBUG_FW_CORE_CHUNK_SZ
||
2031 !IS_ALIGNED(iocmd
->bufsz
, sizeof(u16
)) ||
2032 !IS_ALIGNED(iocmd
->offset
, sizeof(u32
))) {
2033 bfa_trc(bfad
, BFA_DEBUG_FW_CORE_CHUNK_SZ
);
2034 iocmd
->status
= BFA_STATUS_EINVAL
;
2038 iocmd_bufptr
= (char *)iocmd
+ sizeof(struct bfa_bsg_debug_s
);
2039 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
2040 offset
= iocmd
->offset
;
2041 iocmd
->status
= bfa_ioc_debug_fwcore(&bfad
->bfa
.ioc
, iocmd_bufptr
,
2042 &offset
, &iocmd
->bufsz
);
2043 iocmd
->offset
= offset
;
2044 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
2050 bfad_iocmd_debug_ctl(struct bfad_s
*bfad
, void *cmd
, unsigned int v_cmd
)
2052 struct bfa_bsg_gen_s
*iocmd
= (struct bfa_bsg_gen_s
*)cmd
;
2053 unsigned long flags
;
2055 if (v_cmd
== IOCMD_DEBUG_FW_STATE_CLR
) {
2056 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
2057 bfad
->bfa
.ioc
.dbg_fwsave_once
= BFA_TRUE
;
2058 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
2059 } else if (v_cmd
== IOCMD_DEBUG_PORTLOG_CLR
)
2060 bfad
->plog_buf
.head
= bfad
->plog_buf
.tail
= 0;
2061 else if (v_cmd
== IOCMD_DEBUG_START_DTRC
)
2062 bfa_trc_init(bfad
->trcmod
);
2063 else if (v_cmd
== IOCMD_DEBUG_STOP_DTRC
)
2064 bfa_trc_stop(bfad
->trcmod
);
2066 iocmd
->status
= BFA_STATUS_OK
;
2071 bfad_iocmd_porglog_ctl(struct bfad_s
*bfad
, void *cmd
)
2073 struct bfa_bsg_portlogctl_s
*iocmd
= (struct bfa_bsg_portlogctl_s
*)cmd
;
2075 if (iocmd
->ctl
== BFA_TRUE
)
2076 bfad
->plog_buf
.plog_enabled
= 1;
2078 bfad
->plog_buf
.plog_enabled
= 0;
2080 iocmd
->status
= BFA_STATUS_OK
;
2085 bfad_iocmd_fcpim_cfg_profile(struct bfad_s
*bfad
, void *cmd
, unsigned int v_cmd
)
2087 struct bfa_bsg_fcpim_profile_s
*iocmd
=
2088 (struct bfa_bsg_fcpim_profile_s
*)cmd
;
2089 unsigned long flags
;
2091 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
2092 if (v_cmd
== IOCMD_FCPIM_PROFILE_ON
)
2093 iocmd
->status
= bfa_fcpim_profile_on(&bfad
->bfa
, ktime_get_real_seconds());
2094 else if (v_cmd
== IOCMD_FCPIM_PROFILE_OFF
)
2095 iocmd
->status
= bfa_fcpim_profile_off(&bfad
->bfa
);
2096 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
2102 bfad_iocmd_itnim_get_ioprofile(struct bfad_s
*bfad
, void *cmd
)
2104 struct bfa_bsg_itnim_ioprofile_s
*iocmd
=
2105 (struct bfa_bsg_itnim_ioprofile_s
*)cmd
;
2106 struct bfa_fcs_lport_s
*fcs_port
;
2107 struct bfa_fcs_itnim_s
*itnim
;
2108 unsigned long flags
;
2110 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
2111 fcs_port
= bfa_fcs_lookup_port(&bfad
->bfa_fcs
,
2112 iocmd
->vf_id
, iocmd
->lpwwn
);
2114 iocmd
->status
= BFA_STATUS_UNKNOWN_LWWN
;
2116 itnim
= bfa_fcs_itnim_lookup(fcs_port
, iocmd
->rpwwn
);
2118 iocmd
->status
= BFA_STATUS_UNKNOWN_RWWN
;
2120 iocmd
->status
= bfa_itnim_get_ioprofile(
2121 bfa_fcs_itnim_get_halitn(itnim
),
2124 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
2129 bfad_iocmd_fcport_get_stats(struct bfad_s
*bfad
, void *cmd
)
2131 struct bfa_bsg_fcport_stats_s
*iocmd
=
2132 (struct bfa_bsg_fcport_stats_s
*)cmd
;
2133 struct bfad_hal_comp fcomp
;
2134 unsigned long flags
;
2135 struct bfa_cb_pending_q_s cb_qe
;
2137 init_completion(&fcomp
.comp
);
2138 bfa_pending_q_init_status(&cb_qe
, bfad_hcb_comp
, &fcomp
, &iocmd
->stats
);
2139 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
2140 iocmd
->status
= bfa_fcport_get_stats(&bfad
->bfa
, &cb_qe
);
2141 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
2142 if (iocmd
->status
!= BFA_STATUS_OK
) {
2143 bfa_trc(bfad
, iocmd
->status
);
2146 wait_for_completion(&fcomp
.comp
);
2147 iocmd
->status
= fcomp
.status
;
2153 bfad_iocmd_fcport_reset_stats(struct bfad_s
*bfad
, void *cmd
)
2155 struct bfa_bsg_gen_s
*iocmd
= (struct bfa_bsg_gen_s
*)cmd
;
2156 struct bfad_hal_comp fcomp
;
2157 unsigned long flags
;
2158 struct bfa_cb_pending_q_s cb_qe
;
2160 init_completion(&fcomp
.comp
);
2161 bfa_pending_q_init_status(&cb_qe
, bfad_hcb_comp
, &fcomp
, NULL
);
2163 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
2164 iocmd
->status
= bfa_fcport_clear_stats(&bfad
->bfa
, &cb_qe
);
2165 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
2166 if (iocmd
->status
!= BFA_STATUS_OK
) {
2167 bfa_trc(bfad
, iocmd
->status
);
2170 wait_for_completion(&fcomp
.comp
);
2171 iocmd
->status
= fcomp
.status
;
2177 bfad_iocmd_boot_cfg(struct bfad_s
*bfad
, void *cmd
)
2179 struct bfa_bsg_boot_s
*iocmd
= (struct bfa_bsg_boot_s
*)cmd
;
2180 struct bfad_hal_comp fcomp
;
2181 unsigned long flags
;
2183 init_completion(&fcomp
.comp
);
2184 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
2185 iocmd
->status
= bfa_flash_update_part(BFA_FLASH(&bfad
->bfa
),
2186 BFA_FLASH_PART_BOOT
, bfad
->bfa
.ioc
.port_id
,
2187 &iocmd
->cfg
, sizeof(struct bfa_boot_cfg_s
), 0,
2188 bfad_hcb_comp
, &fcomp
);
2189 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
2190 if (iocmd
->status
!= BFA_STATUS_OK
)
2192 wait_for_completion(&fcomp
.comp
);
2193 iocmd
->status
= fcomp
.status
;
2199 bfad_iocmd_boot_query(struct bfad_s
*bfad
, void *cmd
)
2201 struct bfa_bsg_boot_s
*iocmd
= (struct bfa_bsg_boot_s
*)cmd
;
2202 struct bfad_hal_comp fcomp
;
2203 unsigned long flags
;
2205 init_completion(&fcomp
.comp
);
2206 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
2207 iocmd
->status
= bfa_flash_read_part(BFA_FLASH(&bfad
->bfa
),
2208 BFA_FLASH_PART_BOOT
, bfad
->bfa
.ioc
.port_id
,
2209 &iocmd
->cfg
, sizeof(struct bfa_boot_cfg_s
), 0,
2210 bfad_hcb_comp
, &fcomp
);
2211 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
2212 if (iocmd
->status
!= BFA_STATUS_OK
)
2214 wait_for_completion(&fcomp
.comp
);
2215 iocmd
->status
= fcomp
.status
;
2221 bfad_iocmd_preboot_query(struct bfad_s
*bfad
, void *cmd
)
2223 struct bfa_bsg_preboot_s
*iocmd
= (struct bfa_bsg_preboot_s
*)cmd
;
2224 struct bfi_iocfc_cfgrsp_s
*cfgrsp
= bfad
->bfa
.iocfc
.cfgrsp
;
2225 struct bfa_boot_pbc_s
*pbcfg
= &iocmd
->cfg
;
2226 unsigned long flags
;
2228 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
2229 pbcfg
->enable
= cfgrsp
->pbc_cfg
.boot_enabled
;
2230 pbcfg
->nbluns
= cfgrsp
->pbc_cfg
.nbluns
;
2231 pbcfg
->speed
= cfgrsp
->pbc_cfg
.port_speed
;
2232 memcpy(pbcfg
->pblun
, cfgrsp
->pbc_cfg
.blun
, sizeof(pbcfg
->pblun
));
2233 iocmd
->status
= BFA_STATUS_OK
;
2234 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
2240 bfad_iocmd_ethboot_cfg(struct bfad_s
*bfad
, void *cmd
)
2242 struct bfa_bsg_ethboot_s
*iocmd
= (struct bfa_bsg_ethboot_s
*)cmd
;
2243 struct bfad_hal_comp fcomp
;
2244 unsigned long flags
;
2246 init_completion(&fcomp
.comp
);
2247 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
2248 iocmd
->status
= bfa_flash_update_part(BFA_FLASH(&bfad
->bfa
),
2249 BFA_FLASH_PART_PXECFG
,
2250 bfad
->bfa
.ioc
.port_id
, &iocmd
->cfg
,
2251 sizeof(struct bfa_ethboot_cfg_s
), 0,
2252 bfad_hcb_comp
, &fcomp
);
2253 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
2254 if (iocmd
->status
!= BFA_STATUS_OK
)
2256 wait_for_completion(&fcomp
.comp
);
2257 iocmd
->status
= fcomp
.status
;
2263 bfad_iocmd_ethboot_query(struct bfad_s
*bfad
, void *cmd
)
2265 struct bfa_bsg_ethboot_s
*iocmd
= (struct bfa_bsg_ethboot_s
*)cmd
;
2266 struct bfad_hal_comp fcomp
;
2267 unsigned long flags
;
2269 init_completion(&fcomp
.comp
);
2270 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
2271 iocmd
->status
= bfa_flash_read_part(BFA_FLASH(&bfad
->bfa
),
2272 BFA_FLASH_PART_PXECFG
,
2273 bfad
->bfa
.ioc
.port_id
, &iocmd
->cfg
,
2274 sizeof(struct bfa_ethboot_cfg_s
), 0,
2275 bfad_hcb_comp
, &fcomp
);
2276 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
2277 if (iocmd
->status
!= BFA_STATUS_OK
)
2279 wait_for_completion(&fcomp
.comp
);
2280 iocmd
->status
= fcomp
.status
;
2286 bfad_iocmd_cfg_trunk(struct bfad_s
*bfad
, void *cmd
, unsigned int v_cmd
)
2288 struct bfa_bsg_gen_s
*iocmd
= (struct bfa_bsg_gen_s
*)cmd
;
2289 struct bfa_fcport_s
*fcport
= BFA_FCPORT_MOD(&bfad
->bfa
);
2290 struct bfa_fcport_trunk_s
*trunk
= &fcport
->trunk
;
2291 unsigned long flags
;
2293 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
2295 if (bfa_fcport_is_dport(&bfad
->bfa
)) {
2296 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
2297 return BFA_STATUS_DPORT_ERR
;
2300 if ((fcport
->cfg
.topology
== BFA_PORT_TOPOLOGY_LOOP
) ||
2301 (fcport
->topology
== BFA_PORT_TOPOLOGY_LOOP
))
2302 iocmd
->status
= BFA_STATUS_TOPOLOGY_LOOP
;
2304 if (v_cmd
== IOCMD_TRUNK_ENABLE
) {
2305 trunk
->attr
.state
= BFA_TRUNK_OFFLINE
;
2306 bfa_fcport_disable(&bfad
->bfa
);
2307 fcport
->cfg
.trunked
= BFA_TRUE
;
2308 } else if (v_cmd
== IOCMD_TRUNK_DISABLE
) {
2309 trunk
->attr
.state
= BFA_TRUNK_DISABLED
;
2310 bfa_fcport_disable(&bfad
->bfa
);
2311 fcport
->cfg
.trunked
= BFA_FALSE
;
2314 if (!bfa_fcport_is_disabled(&bfad
->bfa
))
2315 bfa_fcport_enable(&bfad
->bfa
);
2317 iocmd
->status
= BFA_STATUS_OK
;
2320 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
2326 bfad_iocmd_trunk_get_attr(struct bfad_s
*bfad
, void *cmd
)
2328 struct bfa_bsg_trunk_attr_s
*iocmd
= (struct bfa_bsg_trunk_attr_s
*)cmd
;
2329 struct bfa_fcport_s
*fcport
= BFA_FCPORT_MOD(&bfad
->bfa
);
2330 struct bfa_fcport_trunk_s
*trunk
= &fcport
->trunk
;
2331 unsigned long flags
;
2333 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
2334 if ((fcport
->cfg
.topology
== BFA_PORT_TOPOLOGY_LOOP
) ||
2335 (fcport
->topology
== BFA_PORT_TOPOLOGY_LOOP
))
2336 iocmd
->status
= BFA_STATUS_TOPOLOGY_LOOP
;
2338 memcpy((void *)&iocmd
->attr
, (void *)&trunk
->attr
,
2339 sizeof(struct bfa_trunk_attr_s
));
2340 iocmd
->attr
.port_id
= bfa_lps_get_base_pid(&bfad
->bfa
);
2341 iocmd
->status
= BFA_STATUS_OK
;
2343 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
2349 bfad_iocmd_qos(struct bfad_s
*bfad
, void *cmd
, unsigned int v_cmd
)
2351 struct bfa_bsg_gen_s
*iocmd
= (struct bfa_bsg_gen_s
*)cmd
;
2352 struct bfa_fcport_s
*fcport
= BFA_FCPORT_MOD(&bfad
->bfa
);
2353 unsigned long flags
;
2355 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
2356 if (bfa_ioc_get_type(&bfad
->bfa
.ioc
) == BFA_IOC_TYPE_FC
) {
2357 if ((fcport
->cfg
.topology
== BFA_PORT_TOPOLOGY_LOOP
) &&
2358 (fcport
->topology
== BFA_PORT_TOPOLOGY_LOOP
))
2359 iocmd
->status
= BFA_STATUS_TOPOLOGY_LOOP
;
2361 if (v_cmd
== IOCMD_QOS_ENABLE
)
2362 fcport
->cfg
.qos_enabled
= BFA_TRUE
;
2363 else if (v_cmd
== IOCMD_QOS_DISABLE
) {
2364 fcport
->cfg
.qos_enabled
= BFA_FALSE
;
2365 fcport
->cfg
.qos_bw
.high
= BFA_QOS_BW_HIGH
;
2366 fcport
->cfg
.qos_bw
.med
= BFA_QOS_BW_MED
;
2367 fcport
->cfg
.qos_bw
.low
= BFA_QOS_BW_LOW
;
2371 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
2377 bfad_iocmd_qos_get_attr(struct bfad_s
*bfad
, void *cmd
)
2379 struct bfa_bsg_qos_attr_s
*iocmd
= (struct bfa_bsg_qos_attr_s
*)cmd
;
2380 struct bfa_fcport_s
*fcport
= BFA_FCPORT_MOD(&bfad
->bfa
);
2381 unsigned long flags
;
2383 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
2384 if ((fcport
->cfg
.topology
== BFA_PORT_TOPOLOGY_LOOP
) &&
2385 (fcport
->topology
== BFA_PORT_TOPOLOGY_LOOP
))
2386 iocmd
->status
= BFA_STATUS_TOPOLOGY_LOOP
;
2388 iocmd
->attr
.state
= fcport
->qos_attr
.state
;
2389 iocmd
->attr
.total_bb_cr
=
2390 be32_to_cpu(fcport
->qos_attr
.total_bb_cr
);
2391 iocmd
->attr
.qos_bw
.high
= fcport
->cfg
.qos_bw
.high
;
2392 iocmd
->attr
.qos_bw
.med
= fcport
->cfg
.qos_bw
.med
;
2393 iocmd
->attr
.qos_bw
.low
= fcport
->cfg
.qos_bw
.low
;
2394 iocmd
->attr
.qos_bw_op
= fcport
->qos_attr
.qos_bw_op
;
2395 iocmd
->status
= BFA_STATUS_OK
;
2397 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
2403 bfad_iocmd_qos_get_vc_attr(struct bfad_s
*bfad
, void *cmd
)
2405 struct bfa_bsg_qos_vc_attr_s
*iocmd
=
2406 (struct bfa_bsg_qos_vc_attr_s
*)cmd
;
2407 struct bfa_fcport_s
*fcport
= BFA_FCPORT_MOD(&bfad
->bfa
);
2408 struct bfa_qos_vc_attr_s
*bfa_vc_attr
= &fcport
->qos_vc_attr
;
2409 unsigned long flags
;
2412 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
2413 iocmd
->attr
.total_vc_count
= be16_to_cpu(bfa_vc_attr
->total_vc_count
);
2414 iocmd
->attr
.shared_credit
= be16_to_cpu(bfa_vc_attr
->shared_credit
);
2415 iocmd
->attr
.elp_opmode_flags
=
2416 be32_to_cpu(bfa_vc_attr
->elp_opmode_flags
);
2418 /* Individual VC info */
2419 while (i
< iocmd
->attr
.total_vc_count
) {
2420 iocmd
->attr
.vc_info
[i
].vc_credit
=
2421 bfa_vc_attr
->vc_info
[i
].vc_credit
;
2422 iocmd
->attr
.vc_info
[i
].borrow_credit
=
2423 bfa_vc_attr
->vc_info
[i
].borrow_credit
;
2424 iocmd
->attr
.vc_info
[i
].priority
=
2425 bfa_vc_attr
->vc_info
[i
].priority
;
2428 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
2430 iocmd
->status
= BFA_STATUS_OK
;
2435 bfad_iocmd_qos_get_stats(struct bfad_s
*bfad
, void *cmd
)
2437 struct bfa_bsg_fcport_stats_s
*iocmd
=
2438 (struct bfa_bsg_fcport_stats_s
*)cmd
;
2439 struct bfad_hal_comp fcomp
;
2440 unsigned long flags
;
2441 struct bfa_cb_pending_q_s cb_qe
;
2442 struct bfa_fcport_s
*fcport
= BFA_FCPORT_MOD(&bfad
->bfa
);
2444 init_completion(&fcomp
.comp
);
2445 bfa_pending_q_init_status(&cb_qe
, bfad_hcb_comp
, &fcomp
, &iocmd
->stats
);
2447 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
2448 WARN_ON(!bfa_ioc_get_fcmode(&bfad
->bfa
.ioc
));
2449 if ((fcport
->cfg
.topology
== BFA_PORT_TOPOLOGY_LOOP
) &&
2450 (fcport
->topology
== BFA_PORT_TOPOLOGY_LOOP
))
2451 iocmd
->status
= BFA_STATUS_TOPOLOGY_LOOP
;
2453 iocmd
->status
= bfa_fcport_get_stats(&bfad
->bfa
, &cb_qe
);
2454 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
2455 if (iocmd
->status
!= BFA_STATUS_OK
) {
2456 bfa_trc(bfad
, iocmd
->status
);
2459 wait_for_completion(&fcomp
.comp
);
2460 iocmd
->status
= fcomp
.status
;
2466 bfad_iocmd_qos_reset_stats(struct bfad_s
*bfad
, void *cmd
)
2468 struct bfa_bsg_gen_s
*iocmd
= (struct bfa_bsg_gen_s
*)cmd
;
2469 struct bfad_hal_comp fcomp
;
2470 unsigned long flags
;
2471 struct bfa_cb_pending_q_s cb_qe
;
2472 struct bfa_fcport_s
*fcport
= BFA_FCPORT_MOD(&bfad
->bfa
);
2474 init_completion(&fcomp
.comp
);
2475 bfa_pending_q_init_status(&cb_qe
, bfad_hcb_comp
, &fcomp
, NULL
);
2477 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
2478 WARN_ON(!bfa_ioc_get_fcmode(&bfad
->bfa
.ioc
));
2479 if ((fcport
->cfg
.topology
== BFA_PORT_TOPOLOGY_LOOP
) &&
2480 (fcport
->topology
== BFA_PORT_TOPOLOGY_LOOP
))
2481 iocmd
->status
= BFA_STATUS_TOPOLOGY_LOOP
;
2483 iocmd
->status
= bfa_fcport_clear_stats(&bfad
->bfa
, &cb_qe
);
2484 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
2485 if (iocmd
->status
!= BFA_STATUS_OK
) {
2486 bfa_trc(bfad
, iocmd
->status
);
2489 wait_for_completion(&fcomp
.comp
);
2490 iocmd
->status
= fcomp
.status
;
2496 bfad_iocmd_vf_get_stats(struct bfad_s
*bfad
, void *cmd
)
2498 struct bfa_bsg_vf_stats_s
*iocmd
=
2499 (struct bfa_bsg_vf_stats_s
*)cmd
;
2500 struct bfa_fcs_fabric_s
*fcs_vf
;
2501 unsigned long flags
;
2503 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
2504 fcs_vf
= bfa_fcs_vf_lookup(&bfad
->bfa_fcs
, iocmd
->vf_id
);
2505 if (fcs_vf
== NULL
) {
2506 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
2507 iocmd
->status
= BFA_STATUS_UNKNOWN_VFID
;
2510 memcpy((void *)&iocmd
->stats
, (void *)&fcs_vf
->stats
,
2511 sizeof(struct bfa_vf_stats_s
));
2512 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
2513 iocmd
->status
= BFA_STATUS_OK
;
2519 bfad_iocmd_vf_clr_stats(struct bfad_s
*bfad
, void *cmd
)
2521 struct bfa_bsg_vf_reset_stats_s
*iocmd
=
2522 (struct bfa_bsg_vf_reset_stats_s
*)cmd
;
2523 struct bfa_fcs_fabric_s
*fcs_vf
;
2524 unsigned long flags
;
2526 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
2527 fcs_vf
= bfa_fcs_vf_lookup(&bfad
->bfa_fcs
, iocmd
->vf_id
);
2528 if (fcs_vf
== NULL
) {
2529 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
2530 iocmd
->status
= BFA_STATUS_UNKNOWN_VFID
;
2533 memset((void *)&fcs_vf
->stats
, 0, sizeof(struct bfa_vf_stats_s
));
2534 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
2535 iocmd
->status
= BFA_STATUS_OK
;
2541 * Set the SCSI device sdev_bflags - sdev_bflags are used by the
2542 * SCSI mid-layer to choose LUN Scanning mode REPORT_LUNS vs. Sequential Scan
2544 * Internally iterates over all the ITNIM's part of the im_port & sets the
2545 * sdev_bflags for the scsi_device associated with LUN #0.
2547 static void bfad_reset_sdev_bflags(struct bfad_im_port_s
*im_port
,
2550 const blist_flags_t scan_flags
= BLIST_NOREPORTLUN
| BLIST_SPARSELUN
;
2551 struct bfad_itnim_s
*itnim
;
2552 struct scsi_device
*sdev
;
2553 unsigned long flags
;
2555 spin_lock_irqsave(im_port
->shost
->host_lock
, flags
);
2556 list_for_each_entry(itnim
, &im_port
->itnim_mapped_list
, list_entry
) {
2557 sdev
= __scsi_device_lookup(im_port
->shost
, itnim
->channel
,
2558 itnim
->scsi_tgt_id
, 0);
2560 if (lunmask_cfg
== BFA_TRUE
)
2561 sdev
->sdev_bflags
|= scan_flags
;
2563 sdev
->sdev_bflags
&= ~scan_flags
;
2566 spin_unlock_irqrestore(im_port
->shost
->host_lock
, flags
);
2569 /* Function to reset the LUN SCAN mode */
2571 bfad_iocmd_lunmask_reset_lunscan_mode(struct bfad_s
*bfad
, int lunmask_cfg
)
2573 struct bfad_im_port_s
*pport_im
= bfad
->pport
.im_port
;
2574 struct bfad_vport_s
*vport
= NULL
;
2576 /* Set the scsi device LUN SCAN flags for base port */
2577 bfad_reset_sdev_bflags(pport_im
, lunmask_cfg
);
2579 /* Set the scsi device LUN SCAN flags for the vports */
2580 list_for_each_entry(vport
, &bfad
->vport_list
, list_entry
)
2581 bfad_reset_sdev_bflags(vport
->drv_port
.im_port
, lunmask_cfg
);
2585 bfad_iocmd_lunmask(struct bfad_s
*bfad
, void *pcmd
, unsigned int v_cmd
)
2587 struct bfa_bsg_gen_s
*iocmd
= (struct bfa_bsg_gen_s
*)pcmd
;
2588 unsigned long flags
;
2590 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
2591 if (v_cmd
== IOCMD_FCPIM_LUNMASK_ENABLE
) {
2592 iocmd
->status
= bfa_fcpim_lunmask_update(&bfad
->bfa
, BFA_TRUE
);
2593 /* Set the LUN Scanning mode to be Sequential scan */
2594 if (iocmd
->status
== BFA_STATUS_OK
)
2595 bfad_iocmd_lunmask_reset_lunscan_mode(bfad
, BFA_TRUE
);
2596 } else if (v_cmd
== IOCMD_FCPIM_LUNMASK_DISABLE
) {
2597 iocmd
->status
= bfa_fcpim_lunmask_update(&bfad
->bfa
, BFA_FALSE
);
2598 /* Set the LUN Scanning mode to default REPORT_LUNS scan */
2599 if (iocmd
->status
== BFA_STATUS_OK
)
2600 bfad_iocmd_lunmask_reset_lunscan_mode(bfad
, BFA_FALSE
);
2601 } else if (v_cmd
== IOCMD_FCPIM_LUNMASK_CLEAR
)
2602 iocmd
->status
= bfa_fcpim_lunmask_clear(&bfad
->bfa
);
2603 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
2608 bfad_iocmd_fcpim_lunmask_query(struct bfad_s
*bfad
, void *cmd
)
2610 struct bfa_bsg_fcpim_lunmask_query_s
*iocmd
=
2611 (struct bfa_bsg_fcpim_lunmask_query_s
*)cmd
;
2612 struct bfa_lunmask_cfg_s
*lun_mask
= &iocmd
->lun_mask
;
2613 unsigned long flags
;
2615 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
2616 iocmd
->status
= bfa_fcpim_lunmask_query(&bfad
->bfa
, lun_mask
);
2617 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
2622 bfad_iocmd_fcpim_cfg_lunmask(struct bfad_s
*bfad
, void *cmd
, unsigned int v_cmd
)
2624 struct bfa_bsg_fcpim_lunmask_s
*iocmd
=
2625 (struct bfa_bsg_fcpim_lunmask_s
*)cmd
;
2626 unsigned long flags
;
2628 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
2629 if (v_cmd
== IOCMD_FCPIM_LUNMASK_ADD
)
2630 iocmd
->status
= bfa_fcpim_lunmask_add(&bfad
->bfa
, iocmd
->vf_id
,
2631 &iocmd
->pwwn
, iocmd
->rpwwn
, iocmd
->lun
);
2632 else if (v_cmd
== IOCMD_FCPIM_LUNMASK_DELETE
)
2633 iocmd
->status
= bfa_fcpim_lunmask_delete(&bfad
->bfa
,
2634 iocmd
->vf_id
, &iocmd
->pwwn
,
2635 iocmd
->rpwwn
, iocmd
->lun
);
2636 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
2641 bfad_iocmd_fcpim_throttle_query(struct bfad_s
*bfad
, void *cmd
)
2643 struct bfa_bsg_fcpim_throttle_s
*iocmd
=
2644 (struct bfa_bsg_fcpim_throttle_s
*)cmd
;
2645 unsigned long flags
;
2647 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
2648 iocmd
->status
= bfa_fcpim_throttle_get(&bfad
->bfa
,
2649 (void *)&iocmd
->throttle
);
2650 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
2656 bfad_iocmd_fcpim_throttle_set(struct bfad_s
*bfad
, void *cmd
)
2658 struct bfa_bsg_fcpim_throttle_s
*iocmd
=
2659 (struct bfa_bsg_fcpim_throttle_s
*)cmd
;
2660 unsigned long flags
;
2662 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
2663 iocmd
->status
= bfa_fcpim_throttle_set(&bfad
->bfa
,
2664 iocmd
->throttle
.cfg_value
);
2665 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
2671 bfad_iocmd_tfru_read(struct bfad_s
*bfad
, void *cmd
)
2673 struct bfa_bsg_tfru_s
*iocmd
=
2674 (struct bfa_bsg_tfru_s
*)cmd
;
2675 struct bfad_hal_comp fcomp
;
2676 unsigned long flags
= 0;
2678 init_completion(&fcomp
.comp
);
2679 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
2680 iocmd
->status
= bfa_tfru_read(BFA_FRU(&bfad
->bfa
),
2681 &iocmd
->data
, iocmd
->len
, iocmd
->offset
,
2682 bfad_hcb_comp
, &fcomp
);
2683 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
2684 if (iocmd
->status
== BFA_STATUS_OK
) {
2685 wait_for_completion(&fcomp
.comp
);
2686 iocmd
->status
= fcomp
.status
;
2693 bfad_iocmd_tfru_write(struct bfad_s
*bfad
, void *cmd
)
2695 struct bfa_bsg_tfru_s
*iocmd
=
2696 (struct bfa_bsg_tfru_s
*)cmd
;
2697 struct bfad_hal_comp fcomp
;
2698 unsigned long flags
= 0;
2700 init_completion(&fcomp
.comp
);
2701 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
2702 iocmd
->status
= bfa_tfru_write(BFA_FRU(&bfad
->bfa
),
2703 &iocmd
->data
, iocmd
->len
, iocmd
->offset
,
2704 bfad_hcb_comp
, &fcomp
);
2705 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
2706 if (iocmd
->status
== BFA_STATUS_OK
) {
2707 wait_for_completion(&fcomp
.comp
);
2708 iocmd
->status
= fcomp
.status
;
2715 bfad_iocmd_fruvpd_read(struct bfad_s
*bfad
, void *cmd
)
2717 struct bfa_bsg_fruvpd_s
*iocmd
=
2718 (struct bfa_bsg_fruvpd_s
*)cmd
;
2719 struct bfad_hal_comp fcomp
;
2720 unsigned long flags
= 0;
2722 init_completion(&fcomp
.comp
);
2723 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
2724 iocmd
->status
= bfa_fruvpd_read(BFA_FRU(&bfad
->bfa
),
2725 &iocmd
->data
, iocmd
->len
, iocmd
->offset
,
2726 bfad_hcb_comp
, &fcomp
);
2727 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
2728 if (iocmd
->status
== BFA_STATUS_OK
) {
2729 wait_for_completion(&fcomp
.comp
);
2730 iocmd
->status
= fcomp
.status
;
2737 bfad_iocmd_fruvpd_update(struct bfad_s
*bfad
, void *cmd
)
2739 struct bfa_bsg_fruvpd_s
*iocmd
=
2740 (struct bfa_bsg_fruvpd_s
*)cmd
;
2741 struct bfad_hal_comp fcomp
;
2742 unsigned long flags
= 0;
2744 init_completion(&fcomp
.comp
);
2745 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
2746 iocmd
->status
= bfa_fruvpd_update(BFA_FRU(&bfad
->bfa
),
2747 &iocmd
->data
, iocmd
->len
, iocmd
->offset
,
2748 bfad_hcb_comp
, &fcomp
, iocmd
->trfr_cmpl
);
2749 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
2750 if (iocmd
->status
== BFA_STATUS_OK
) {
2751 wait_for_completion(&fcomp
.comp
);
2752 iocmd
->status
= fcomp
.status
;
2759 bfad_iocmd_fruvpd_get_max_size(struct bfad_s
*bfad
, void *cmd
)
2761 struct bfa_bsg_fruvpd_max_size_s
*iocmd
=
2762 (struct bfa_bsg_fruvpd_max_size_s
*)cmd
;
2763 unsigned long flags
= 0;
2765 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
2766 iocmd
->status
= bfa_fruvpd_get_max_size(BFA_FRU(&bfad
->bfa
),
2768 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
2774 bfad_iocmd_handler(struct bfad_s
*bfad
, unsigned int cmd
, void *iocmd
,
2775 unsigned int payload_len
)
2780 case IOCMD_IOC_ENABLE
:
2781 rc
= bfad_iocmd_ioc_enable(bfad
, iocmd
);
2783 case IOCMD_IOC_DISABLE
:
2784 rc
= bfad_iocmd_ioc_disable(bfad
, iocmd
);
2786 case IOCMD_IOC_GET_INFO
:
2787 rc
= bfad_iocmd_ioc_get_info(bfad
, iocmd
);
2789 case IOCMD_IOC_GET_ATTR
:
2790 rc
= bfad_iocmd_ioc_get_attr(bfad
, iocmd
);
2792 case IOCMD_IOC_GET_STATS
:
2793 rc
= bfad_iocmd_ioc_get_stats(bfad
, iocmd
);
2795 case IOCMD_IOC_GET_FWSTATS
:
2796 rc
= bfad_iocmd_ioc_get_fwstats(bfad
, iocmd
, payload_len
);
2798 case IOCMD_IOC_RESET_STATS
:
2799 case IOCMD_IOC_RESET_FWSTATS
:
2800 rc
= bfad_iocmd_ioc_reset_stats(bfad
, iocmd
, cmd
);
2802 case IOCMD_IOC_SET_ADAPTER_NAME
:
2803 case IOCMD_IOC_SET_PORT_NAME
:
2804 rc
= bfad_iocmd_ioc_set_name(bfad
, iocmd
, cmd
);
2806 case IOCMD_IOCFC_GET_ATTR
:
2807 rc
= bfad_iocmd_iocfc_get_attr(bfad
, iocmd
);
2809 case IOCMD_IOCFC_SET_INTR
:
2810 rc
= bfad_iocmd_iocfc_set_intr(bfad
, iocmd
);
2812 case IOCMD_PORT_ENABLE
:
2813 rc
= bfad_iocmd_port_enable(bfad
, iocmd
);
2815 case IOCMD_PORT_DISABLE
:
2816 rc
= bfad_iocmd_port_disable(bfad
, iocmd
);
2818 case IOCMD_PORT_GET_ATTR
:
2819 rc
= bfad_iocmd_port_get_attr(bfad
, iocmd
);
2821 case IOCMD_PORT_GET_STATS
:
2822 rc
= bfad_iocmd_port_get_stats(bfad
, iocmd
, payload_len
);
2824 case IOCMD_PORT_RESET_STATS
:
2825 rc
= bfad_iocmd_port_reset_stats(bfad
, iocmd
);
2827 case IOCMD_PORT_CFG_TOPO
:
2828 case IOCMD_PORT_CFG_SPEED
:
2829 case IOCMD_PORT_CFG_ALPA
:
2830 case IOCMD_PORT_CLR_ALPA
:
2831 rc
= bfad_iocmd_set_port_cfg(bfad
, iocmd
, cmd
);
2833 case IOCMD_PORT_CFG_MAXFRSZ
:
2834 rc
= bfad_iocmd_port_cfg_maxfrsize(bfad
, iocmd
);
2836 case IOCMD_PORT_BBCR_ENABLE
:
2837 case IOCMD_PORT_BBCR_DISABLE
:
2838 rc
= bfad_iocmd_port_cfg_bbcr(bfad
, cmd
, iocmd
);
2840 case IOCMD_PORT_BBCR_GET_ATTR
:
2841 rc
= bfad_iocmd_port_get_bbcr_attr(bfad
, iocmd
);
2843 case IOCMD_LPORT_GET_ATTR
:
2844 rc
= bfad_iocmd_lport_get_attr(bfad
, iocmd
);
2846 case IOCMD_LPORT_GET_STATS
:
2847 rc
= bfad_iocmd_lport_get_stats(bfad
, iocmd
);
2849 case IOCMD_LPORT_RESET_STATS
:
2850 rc
= bfad_iocmd_lport_reset_stats(bfad
, iocmd
);
2852 case IOCMD_LPORT_GET_IOSTATS
:
2853 rc
= bfad_iocmd_lport_get_iostats(bfad
, iocmd
);
2855 case IOCMD_LPORT_GET_RPORTS
:
2856 rc
= bfad_iocmd_lport_get_rports(bfad
, iocmd
, payload_len
);
2858 case IOCMD_RPORT_GET_ATTR
:
2859 rc
= bfad_iocmd_rport_get_attr(bfad
, iocmd
);
2861 case IOCMD_RPORT_GET_ADDR
:
2862 rc
= bfad_iocmd_rport_get_addr(bfad
, iocmd
);
2864 case IOCMD_RPORT_GET_STATS
:
2865 rc
= bfad_iocmd_rport_get_stats(bfad
, iocmd
);
2867 case IOCMD_RPORT_RESET_STATS
:
2868 rc
= bfad_iocmd_rport_clr_stats(bfad
, iocmd
);
2870 case IOCMD_RPORT_SET_SPEED
:
2871 rc
= bfad_iocmd_rport_set_speed(bfad
, iocmd
);
2873 case IOCMD_VPORT_GET_ATTR
:
2874 rc
= bfad_iocmd_vport_get_attr(bfad
, iocmd
);
2876 case IOCMD_VPORT_GET_STATS
:
2877 rc
= bfad_iocmd_vport_get_stats(bfad
, iocmd
);
2879 case IOCMD_VPORT_RESET_STATS
:
2880 rc
= bfad_iocmd_vport_clr_stats(bfad
, iocmd
);
2882 case IOCMD_FABRIC_GET_LPORTS
:
2883 rc
= bfad_iocmd_fabric_get_lports(bfad
, iocmd
, payload_len
);
2885 case IOCMD_RATELIM_ENABLE
:
2886 case IOCMD_RATELIM_DISABLE
:
2887 rc
= bfad_iocmd_ratelim(bfad
, cmd
, iocmd
);
2889 case IOCMD_RATELIM_DEF_SPEED
:
2890 rc
= bfad_iocmd_ratelim_speed(bfad
, cmd
, iocmd
);
2892 case IOCMD_FCPIM_FAILOVER
:
2893 rc
= bfad_iocmd_cfg_fcpim(bfad
, iocmd
);
2895 case IOCMD_FCPIM_MODSTATS
:
2896 rc
= bfad_iocmd_fcpim_get_modstats(bfad
, iocmd
);
2898 case IOCMD_FCPIM_MODSTATSCLR
:
2899 rc
= bfad_iocmd_fcpim_clr_modstats(bfad
, iocmd
);
2901 case IOCMD_FCPIM_DEL_ITN_STATS
:
2902 rc
= bfad_iocmd_fcpim_get_del_itn_stats(bfad
, iocmd
);
2904 case IOCMD_ITNIM_GET_ATTR
:
2905 rc
= bfad_iocmd_itnim_get_attr(bfad
, iocmd
);
2907 case IOCMD_ITNIM_GET_IOSTATS
:
2908 rc
= bfad_iocmd_itnim_get_iostats(bfad
, iocmd
);
2910 case IOCMD_ITNIM_RESET_STATS
:
2911 rc
= bfad_iocmd_itnim_reset_stats(bfad
, iocmd
);
2913 case IOCMD_ITNIM_GET_ITNSTATS
:
2914 rc
= bfad_iocmd_itnim_get_itnstats(bfad
, iocmd
);
2916 case IOCMD_FCPORT_ENABLE
:
2917 rc
= bfad_iocmd_fcport_enable(bfad
, iocmd
);
2919 case IOCMD_FCPORT_DISABLE
:
2920 rc
= bfad_iocmd_fcport_disable(bfad
, iocmd
);
2922 case IOCMD_IOC_PCIFN_CFG
:
2923 rc
= bfad_iocmd_ioc_get_pcifn_cfg(bfad
, iocmd
);
2925 case IOCMD_IOC_FW_SIG_INV
:
2926 rc
= bfad_iocmd_ioc_fw_sig_inv(bfad
, iocmd
);
2928 case IOCMD_PCIFN_CREATE
:
2929 rc
= bfad_iocmd_pcifn_create(bfad
, iocmd
);
2931 case IOCMD_PCIFN_DELETE
:
2932 rc
= bfad_iocmd_pcifn_delete(bfad
, iocmd
);
2934 case IOCMD_PCIFN_BW
:
2935 rc
= bfad_iocmd_pcifn_bw(bfad
, iocmd
);
2937 case IOCMD_ADAPTER_CFG_MODE
:
2938 rc
= bfad_iocmd_adapter_cfg_mode(bfad
, iocmd
);
2940 case IOCMD_PORT_CFG_MODE
:
2941 rc
= bfad_iocmd_port_cfg_mode(bfad
, iocmd
);
2943 case IOCMD_FLASH_ENABLE_OPTROM
:
2944 case IOCMD_FLASH_DISABLE_OPTROM
:
2945 rc
= bfad_iocmd_ablk_optrom(bfad
, cmd
, iocmd
);
2947 case IOCMD_FAA_QUERY
:
2948 rc
= bfad_iocmd_faa_query(bfad
, iocmd
);
2950 case IOCMD_CEE_GET_ATTR
:
2951 rc
= bfad_iocmd_cee_attr(bfad
, iocmd
, payload_len
);
2953 case IOCMD_CEE_GET_STATS
:
2954 rc
= bfad_iocmd_cee_get_stats(bfad
, iocmd
, payload_len
);
2956 case IOCMD_CEE_RESET_STATS
:
2957 rc
= bfad_iocmd_cee_reset_stats(bfad
, iocmd
);
2959 case IOCMD_SFP_MEDIA
:
2960 rc
= bfad_iocmd_sfp_media(bfad
, iocmd
);
2962 case IOCMD_SFP_SPEED
:
2963 rc
= bfad_iocmd_sfp_speed(bfad
, iocmd
);
2965 case IOCMD_FLASH_GET_ATTR
:
2966 rc
= bfad_iocmd_flash_get_attr(bfad
, iocmd
);
2968 case IOCMD_FLASH_ERASE_PART
:
2969 rc
= bfad_iocmd_flash_erase_part(bfad
, iocmd
);
2971 case IOCMD_FLASH_UPDATE_PART
:
2972 rc
= bfad_iocmd_flash_update_part(bfad
, iocmd
, payload_len
);
2974 case IOCMD_FLASH_READ_PART
:
2975 rc
= bfad_iocmd_flash_read_part(bfad
, iocmd
, payload_len
);
2977 case IOCMD_DIAG_TEMP
:
2978 rc
= bfad_iocmd_diag_temp(bfad
, iocmd
);
2980 case IOCMD_DIAG_MEMTEST
:
2981 rc
= bfad_iocmd_diag_memtest(bfad
, iocmd
);
2983 case IOCMD_DIAG_LOOPBACK
:
2984 rc
= bfad_iocmd_diag_loopback(bfad
, iocmd
);
2986 case IOCMD_DIAG_FWPING
:
2987 rc
= bfad_iocmd_diag_fwping(bfad
, iocmd
);
2989 case IOCMD_DIAG_QUEUETEST
:
2990 rc
= bfad_iocmd_diag_queuetest(bfad
, iocmd
);
2992 case IOCMD_DIAG_SFP
:
2993 rc
= bfad_iocmd_diag_sfp(bfad
, iocmd
);
2995 case IOCMD_DIAG_LED
:
2996 rc
= bfad_iocmd_diag_led(bfad
, iocmd
);
2998 case IOCMD_DIAG_BEACON_LPORT
:
2999 rc
= bfad_iocmd_diag_beacon_lport(bfad
, iocmd
);
3001 case IOCMD_DIAG_LB_STAT
:
3002 rc
= bfad_iocmd_diag_lb_stat(bfad
, iocmd
);
3004 case IOCMD_DIAG_DPORT_ENABLE
:
3005 rc
= bfad_iocmd_diag_dport_enable(bfad
, iocmd
);
3007 case IOCMD_DIAG_DPORT_DISABLE
:
3008 rc
= bfad_iocmd_diag_dport_disable(bfad
, iocmd
);
3010 case IOCMD_DIAG_DPORT_SHOW
:
3011 rc
= bfad_iocmd_diag_dport_show(bfad
, iocmd
);
3013 case IOCMD_DIAG_DPORT_START
:
3014 rc
= bfad_iocmd_diag_dport_start(bfad
, iocmd
);
3016 case IOCMD_PHY_GET_ATTR
:
3017 rc
= bfad_iocmd_phy_get_attr(bfad
, iocmd
);
3019 case IOCMD_PHY_GET_STATS
:
3020 rc
= bfad_iocmd_phy_get_stats(bfad
, iocmd
);
3022 case IOCMD_PHY_UPDATE_FW
:
3023 rc
= bfad_iocmd_phy_update(bfad
, iocmd
, payload_len
);
3025 case IOCMD_PHY_READ_FW
:
3026 rc
= bfad_iocmd_phy_read(bfad
, iocmd
, payload_len
);
3028 case IOCMD_VHBA_QUERY
:
3029 rc
= bfad_iocmd_vhba_query(bfad
, iocmd
);
3031 case IOCMD_DEBUG_PORTLOG
:
3032 rc
= bfad_iocmd_porglog_get(bfad
, iocmd
);
3034 case IOCMD_DEBUG_FW_CORE
:
3035 rc
= bfad_iocmd_debug_fw_core(bfad
, iocmd
, payload_len
);
3037 case IOCMD_DEBUG_FW_STATE_CLR
:
3038 case IOCMD_DEBUG_PORTLOG_CLR
:
3039 case IOCMD_DEBUG_START_DTRC
:
3040 case IOCMD_DEBUG_STOP_DTRC
:
3041 rc
= bfad_iocmd_debug_ctl(bfad
, iocmd
, cmd
);
3043 case IOCMD_DEBUG_PORTLOG_CTL
:
3044 rc
= bfad_iocmd_porglog_ctl(bfad
, iocmd
);
3046 case IOCMD_FCPIM_PROFILE_ON
:
3047 case IOCMD_FCPIM_PROFILE_OFF
:
3048 rc
= bfad_iocmd_fcpim_cfg_profile(bfad
, iocmd
, cmd
);
3050 case IOCMD_ITNIM_GET_IOPROFILE
:
3051 rc
= bfad_iocmd_itnim_get_ioprofile(bfad
, iocmd
);
3053 case IOCMD_FCPORT_GET_STATS
:
3054 rc
= bfad_iocmd_fcport_get_stats(bfad
, iocmd
);
3056 case IOCMD_FCPORT_RESET_STATS
:
3057 rc
= bfad_iocmd_fcport_reset_stats(bfad
, iocmd
);
3059 case IOCMD_BOOT_CFG
:
3060 rc
= bfad_iocmd_boot_cfg(bfad
, iocmd
);
3062 case IOCMD_BOOT_QUERY
:
3063 rc
= bfad_iocmd_boot_query(bfad
, iocmd
);
3065 case IOCMD_PREBOOT_QUERY
:
3066 rc
= bfad_iocmd_preboot_query(bfad
, iocmd
);
3068 case IOCMD_ETHBOOT_CFG
:
3069 rc
= bfad_iocmd_ethboot_cfg(bfad
, iocmd
);
3071 case IOCMD_ETHBOOT_QUERY
:
3072 rc
= bfad_iocmd_ethboot_query(bfad
, iocmd
);
3074 case IOCMD_TRUNK_ENABLE
:
3075 case IOCMD_TRUNK_DISABLE
:
3076 rc
= bfad_iocmd_cfg_trunk(bfad
, iocmd
, cmd
);
3078 case IOCMD_TRUNK_GET_ATTR
:
3079 rc
= bfad_iocmd_trunk_get_attr(bfad
, iocmd
);
3081 case IOCMD_QOS_ENABLE
:
3082 case IOCMD_QOS_DISABLE
:
3083 rc
= bfad_iocmd_qos(bfad
, iocmd
, cmd
);
3085 case IOCMD_QOS_GET_ATTR
:
3086 rc
= bfad_iocmd_qos_get_attr(bfad
, iocmd
);
3088 case IOCMD_QOS_GET_VC_ATTR
:
3089 rc
= bfad_iocmd_qos_get_vc_attr(bfad
, iocmd
);
3091 case IOCMD_QOS_GET_STATS
:
3092 rc
= bfad_iocmd_qos_get_stats(bfad
, iocmd
);
3094 case IOCMD_QOS_RESET_STATS
:
3095 rc
= bfad_iocmd_qos_reset_stats(bfad
, iocmd
);
3097 case IOCMD_QOS_SET_BW
:
3098 rc
= bfad_iocmd_qos_set_bw(bfad
, iocmd
);
3100 case IOCMD_VF_GET_STATS
:
3101 rc
= bfad_iocmd_vf_get_stats(bfad
, iocmd
);
3103 case IOCMD_VF_RESET_STATS
:
3104 rc
= bfad_iocmd_vf_clr_stats(bfad
, iocmd
);
3106 case IOCMD_FCPIM_LUNMASK_ENABLE
:
3107 case IOCMD_FCPIM_LUNMASK_DISABLE
:
3108 case IOCMD_FCPIM_LUNMASK_CLEAR
:
3109 rc
= bfad_iocmd_lunmask(bfad
, iocmd
, cmd
);
3111 case IOCMD_FCPIM_LUNMASK_QUERY
:
3112 rc
= bfad_iocmd_fcpim_lunmask_query(bfad
, iocmd
);
3114 case IOCMD_FCPIM_LUNMASK_ADD
:
3115 case IOCMD_FCPIM_LUNMASK_DELETE
:
3116 rc
= bfad_iocmd_fcpim_cfg_lunmask(bfad
, iocmd
, cmd
);
3118 case IOCMD_FCPIM_THROTTLE_QUERY
:
3119 rc
= bfad_iocmd_fcpim_throttle_query(bfad
, iocmd
);
3121 case IOCMD_FCPIM_THROTTLE_SET
:
3122 rc
= bfad_iocmd_fcpim_throttle_set(bfad
, iocmd
);
3125 case IOCMD_TFRU_READ
:
3126 rc
= bfad_iocmd_tfru_read(bfad
, iocmd
);
3128 case IOCMD_TFRU_WRITE
:
3129 rc
= bfad_iocmd_tfru_write(bfad
, iocmd
);
3132 case IOCMD_FRUVPD_READ
:
3133 rc
= bfad_iocmd_fruvpd_read(bfad
, iocmd
);
3135 case IOCMD_FRUVPD_UPDATE
:
3136 rc
= bfad_iocmd_fruvpd_update(bfad
, iocmd
);
3138 case IOCMD_FRUVPD_GET_MAX_SIZE
:
3139 rc
= bfad_iocmd_fruvpd_get_max_size(bfad
, iocmd
);
3149 bfad_im_bsg_vendor_request(struct bsg_job
*job
)
3151 struct fc_bsg_request
*bsg_request
= job
->request
;
3152 struct fc_bsg_reply
*bsg_reply
= job
->reply
;
3153 uint32_t vendor_cmd
= bsg_request
->rqst_data
.h_vendor
.vendor_cmd
[0];
3154 struct Scsi_Host
*shost
= fc_bsg_to_shost(job
);
3155 struct bfad_im_port_s
*im_port
= bfad_get_im_port(shost
);
3156 struct bfad_s
*bfad
= im_port
->bfad
;
3160 /* Allocate a temp buffer to hold the passed in user space command */
3161 payload_kbuf
= kzalloc(job
->request_payload
.payload_len
, GFP_KERNEL
);
3162 if (!payload_kbuf
) {
3167 /* Copy the sg_list passed in to a linear buffer: holds the cmnd data */
3168 sg_copy_to_buffer(job
->request_payload
.sg_list
,
3169 job
->request_payload
.sg_cnt
, payload_kbuf
,
3170 job
->request_payload
.payload_len
);
3172 /* Invoke IOCMD handler - to handle all the vendor command requests */
3173 rc
= bfad_iocmd_handler(bfad
, vendor_cmd
, payload_kbuf
,
3174 job
->request_payload
.payload_len
);
3175 if (rc
!= BFA_STATUS_OK
)
3178 /* Copy the response data to the job->reply_payload sg_list */
3179 sg_copy_from_buffer(job
->reply_payload
.sg_list
,
3180 job
->reply_payload
.sg_cnt
,
3182 job
->reply_payload
.payload_len
);
3184 /* free the command buffer */
3185 kfree(payload_kbuf
);
3187 /* Fill the BSG job reply data */
3188 job
->reply_len
= job
->reply_payload
.payload_len
;
3189 bsg_reply
->reply_payload_rcv_len
= job
->reply_payload
.payload_len
;
3190 bsg_reply
->result
= rc
;
3192 bsg_job_done(job
, bsg_reply
->result
,
3193 bsg_reply
->reply_payload_rcv_len
);
3196 /* free the command buffer */
3197 kfree(payload_kbuf
);
3199 bsg_reply
->result
= rc
;
3200 job
->reply_len
= sizeof(uint32_t);
3201 bsg_reply
->reply_payload_rcv_len
= 0;
3205 /* FC passthru call backs */
3207 bfad_fcxp_get_req_sgaddr_cb(void *bfad_fcxp
, int sgeid
)
3209 struct bfad_fcxp
*drv_fcxp
= bfad_fcxp
;
3210 struct bfa_sge_s
*sge
;
3213 sge
= drv_fcxp
->req_sge
+ sgeid
;
3214 addr
= (u64
)(size_t) sge
->sg_addr
;
3219 bfad_fcxp_get_req_sglen_cb(void *bfad_fcxp
, int sgeid
)
3221 struct bfad_fcxp
*drv_fcxp
= bfad_fcxp
;
3222 struct bfa_sge_s
*sge
;
3224 sge
= drv_fcxp
->req_sge
+ sgeid
;
3229 bfad_fcxp_get_rsp_sgaddr_cb(void *bfad_fcxp
, int sgeid
)
3231 struct bfad_fcxp
*drv_fcxp
= bfad_fcxp
;
3232 struct bfa_sge_s
*sge
;
3235 sge
= drv_fcxp
->rsp_sge
+ sgeid
;
3236 addr
= (u64
)(size_t) sge
->sg_addr
;
3241 bfad_fcxp_get_rsp_sglen_cb(void *bfad_fcxp
, int sgeid
)
3243 struct bfad_fcxp
*drv_fcxp
= bfad_fcxp
;
3244 struct bfa_sge_s
*sge
;
3246 sge
= drv_fcxp
->rsp_sge
+ sgeid
;
3251 bfad_send_fcpt_cb(void *bfad_fcxp
, struct bfa_fcxp_s
*fcxp
, void *cbarg
,
3252 bfa_status_t req_status
, u32 rsp_len
, u32 resid_len
,
3253 struct fchs_s
*rsp_fchs
)
3255 struct bfad_fcxp
*drv_fcxp
= bfad_fcxp
;
3257 drv_fcxp
->req_status
= req_status
;
3258 drv_fcxp
->rsp_len
= rsp_len
;
3260 /* bfa_fcxp will be automatically freed by BFA */
3261 drv_fcxp
->bfa_fcxp
= NULL
;
3262 complete(&drv_fcxp
->comp
);
3265 static struct bfad_buf_info
*
3266 bfad_fcxp_map_sg(struct bfad_s
*bfad
, void *payload_kbuf
,
3267 uint32_t payload_len
, uint32_t *num_sgles
)
3269 struct bfad_buf_info
*buf_base
, *buf_info
;
3270 struct bfa_sge_s
*sg_table
;
3273 buf_base
= kcalloc(sizeof(struct bfad_buf_info
) +
3274 sizeof(struct bfa_sge_s
),
3275 sge_num
, GFP_KERNEL
);
3279 sg_table
= (struct bfa_sge_s
*) (((uint8_t *)buf_base
) +
3280 (sizeof(struct bfad_buf_info
) * sge_num
));
3282 /* Allocate dma coherent memory */
3283 buf_info
= buf_base
;
3284 buf_info
->size
= payload_len
;
3285 buf_info
->virt
= dma_alloc_coherent(&bfad
->pcidev
->dev
,
3286 buf_info
->size
, &buf_info
->phys
,
3288 if (!buf_info
->virt
)
3291 /* copy the linear bsg buffer to buf_info */
3292 memcpy(buf_info
->virt
, payload_kbuf
, buf_info
->size
);
3297 sg_table
->sg_len
= buf_info
->size
;
3298 sg_table
->sg_addr
= (void *)(size_t) buf_info
->phys
;
3300 *num_sgles
= sge_num
;
3310 bfad_fcxp_free_mem(struct bfad_s
*bfad
, struct bfad_buf_info
*buf_base
,
3314 struct bfad_buf_info
*buf_info
= buf_base
;
3317 for (i
= 0; i
< num_sgles
; buf_info
++, i
++) {
3318 if (buf_info
->virt
!= NULL
)
3319 dma_free_coherent(&bfad
->pcidev
->dev
,
3320 buf_info
->size
, buf_info
->virt
,
3328 bfad_fcxp_bsg_send(struct bsg_job
*job
, struct bfad_fcxp
*drv_fcxp
,
3329 bfa_bsg_fcpt_t
*bsg_fcpt
)
3331 struct bfa_fcxp_s
*hal_fcxp
;
3332 struct bfad_s
*bfad
= drv_fcxp
->port
->bfad
;
3333 unsigned long flags
;
3336 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
3338 /* Allocate bfa_fcxp structure */
3339 hal_fcxp
= bfa_fcxp_req_rsp_alloc(drv_fcxp
, &bfad
->bfa
,
3340 drv_fcxp
->num_req_sgles
,
3341 drv_fcxp
->num_rsp_sgles
,
3342 bfad_fcxp_get_req_sgaddr_cb
,
3343 bfad_fcxp_get_req_sglen_cb
,
3344 bfad_fcxp_get_rsp_sgaddr_cb
,
3345 bfad_fcxp_get_rsp_sglen_cb
, BFA_TRUE
);
3348 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
3349 return BFA_STATUS_ENOMEM
;
3352 drv_fcxp
->bfa_fcxp
= hal_fcxp
;
3354 lp_tag
= bfa_lps_get_tag_from_pid(&bfad
->bfa
, bsg_fcpt
->fchs
.s_id
);
3356 bfa_fcxp_send(hal_fcxp
, drv_fcxp
->bfa_rport
, bsg_fcpt
->vf_id
, lp_tag
,
3357 bsg_fcpt
->cts
, bsg_fcpt
->cos
,
3358 job
->request_payload
.payload_len
,
3359 &bsg_fcpt
->fchs
, bfad_send_fcpt_cb
, bfad
,
3360 job
->reply_payload
.payload_len
, bsg_fcpt
->tsecs
);
3362 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
3364 return BFA_STATUS_OK
;
3368 bfad_im_bsg_els_ct_request(struct bsg_job
*job
)
3370 struct bfa_bsg_data
*bsg_data
;
3371 struct Scsi_Host
*shost
= fc_bsg_to_shost(job
);
3372 struct bfad_im_port_s
*im_port
= bfad_get_im_port(shost
);
3373 struct bfad_s
*bfad
= im_port
->bfad
;
3374 bfa_bsg_fcpt_t
*bsg_fcpt
;
3375 struct bfad_fcxp
*drv_fcxp
;
3376 struct bfa_fcs_lport_s
*fcs_port
;
3377 struct bfa_fcs_rport_s
*fcs_rport
;
3378 struct fc_bsg_request
*bsg_request
= job
->request
;
3379 struct fc_bsg_reply
*bsg_reply
= job
->reply
;
3380 uint32_t command_type
= bsg_request
->msgcode
;
3381 unsigned long flags
;
3382 struct bfad_buf_info
*rsp_buf_info
;
3383 void *req_kbuf
= NULL
, *rsp_kbuf
= NULL
;
3386 job
->reply_len
= sizeof(uint32_t); /* Atleast uint32_t reply_len */
3387 bsg_reply
->reply_payload_rcv_len
= 0;
3389 /* Get the payload passed in from userspace */
3390 bsg_data
= (struct bfa_bsg_data
*) (((char *)bsg_request
) +
3391 sizeof(struct fc_bsg_request
));
3392 if (bsg_data
== NULL
)
3396 * Allocate buffer for bsg_fcpt and do a copy_from_user op for payload
3397 * buffer of size bsg_data->payload_len
3399 bsg_fcpt
= kzalloc(bsg_data
->payload_len
, GFP_KERNEL
);
3405 if (copy_from_user((uint8_t *)bsg_fcpt
,
3406 (void *)(unsigned long)bsg_data
->payload
,
3407 bsg_data
->payload_len
)) {
3413 drv_fcxp
= kzalloc(sizeof(struct bfad_fcxp
), GFP_KERNEL
);
3414 if (drv_fcxp
== NULL
) {
3420 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
3421 fcs_port
= bfa_fcs_lookup_port(&bfad
->bfa_fcs
, bsg_fcpt
->vf_id
,
3423 if (fcs_port
== NULL
) {
3424 bsg_fcpt
->status
= BFA_STATUS_UNKNOWN_LWWN
;
3425 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
3429 /* Check if the port is online before sending FC Passthru cmd */
3430 if (!bfa_fcs_lport_is_online(fcs_port
)) {
3431 bsg_fcpt
->status
= BFA_STATUS_PORT_OFFLINE
;
3432 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
3436 drv_fcxp
->port
= fcs_port
->bfad_port
;
3438 if (!drv_fcxp
->port
->bfad
)
3439 drv_fcxp
->port
->bfad
= bfad
;
3441 /* Fetch the bfa_rport - if nexus needed */
3442 if (command_type
== FC_BSG_HST_ELS_NOLOGIN
||
3443 command_type
== FC_BSG_HST_CT
) {
3444 /* BSG HST commands: no nexus needed */
3445 drv_fcxp
->bfa_rport
= NULL
;
3447 } else if (command_type
== FC_BSG_RPT_ELS
||
3448 command_type
== FC_BSG_RPT_CT
) {
3449 /* BSG RPT commands: nexus needed */
3450 fcs_rport
= bfa_fcs_lport_get_rport_by_pwwn(fcs_port
,
3452 if (fcs_rport
== NULL
) {
3453 bsg_fcpt
->status
= BFA_STATUS_UNKNOWN_RWWN
;
3454 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
3458 drv_fcxp
->bfa_rport
= fcs_rport
->bfa_rport
;
3460 } else { /* Unknown BSG msgcode; return -EINVAL */
3461 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
3465 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
3467 /* allocate memory for req / rsp buffers */
3468 req_kbuf
= kzalloc(job
->request_payload
.payload_len
, GFP_KERNEL
);
3470 printk(KERN_INFO
"bfa %s: fcpt request buffer alloc failed\n",
3476 rsp_kbuf
= kzalloc(job
->reply_payload
.payload_len
, GFP_KERNEL
);
3478 printk(KERN_INFO
"bfa %s: fcpt response buffer alloc failed\n",
3484 /* map req sg - copy the sg_list passed in to the linear buffer */
3485 sg_copy_to_buffer(job
->request_payload
.sg_list
,
3486 job
->request_payload
.sg_cnt
, req_kbuf
,
3487 job
->request_payload
.payload_len
);
3489 drv_fcxp
->reqbuf_info
= bfad_fcxp_map_sg(bfad
, req_kbuf
,
3490 job
->request_payload
.payload_len
,
3491 &drv_fcxp
->num_req_sgles
);
3492 if (!drv_fcxp
->reqbuf_info
) {
3493 printk(KERN_INFO
"bfa %s: fcpt request fcxp_map_sg failed\n",
3499 drv_fcxp
->req_sge
= (struct bfa_sge_s
*)
3500 (((uint8_t *)drv_fcxp
->reqbuf_info
) +
3501 (sizeof(struct bfad_buf_info
) *
3502 drv_fcxp
->num_req_sgles
));
3505 drv_fcxp
->rspbuf_info
= bfad_fcxp_map_sg(bfad
, rsp_kbuf
,
3506 job
->reply_payload
.payload_len
,
3507 &drv_fcxp
->num_rsp_sgles
);
3508 if (!drv_fcxp
->rspbuf_info
) {
3509 printk(KERN_INFO
"bfa %s: fcpt response fcxp_map_sg failed\n",
3515 rsp_buf_info
= (struct bfad_buf_info
*)drv_fcxp
->rspbuf_info
;
3516 drv_fcxp
->rsp_sge
= (struct bfa_sge_s
*)
3517 (((uint8_t *)drv_fcxp
->rspbuf_info
) +
3518 (sizeof(struct bfad_buf_info
) *
3519 drv_fcxp
->num_rsp_sgles
));
3522 init_completion(&drv_fcxp
->comp
);
3523 rc
= bfad_fcxp_bsg_send(job
, drv_fcxp
, bsg_fcpt
);
3524 if (rc
== BFA_STATUS_OK
) {
3525 wait_for_completion(&drv_fcxp
->comp
);
3526 bsg_fcpt
->status
= drv_fcxp
->req_status
;
3528 bsg_fcpt
->status
= rc
;
3532 /* fill the job->reply data */
3533 if (drv_fcxp
->req_status
== BFA_STATUS_OK
) {
3534 job
->reply_len
= drv_fcxp
->rsp_len
;
3535 bsg_reply
->reply_payload_rcv_len
= drv_fcxp
->rsp_len
;
3536 bsg_reply
->reply_data
.ctels_reply
.status
= FC_CTELS_STATUS_OK
;
3538 bsg_reply
->reply_payload_rcv_len
=
3539 sizeof(struct fc_bsg_ctels_reply
);
3540 job
->reply_len
= sizeof(uint32_t);
3541 bsg_reply
->reply_data
.ctels_reply
.status
=
3542 FC_CTELS_STATUS_REJECT
;
3545 /* Copy the response data to the reply_payload sg list */
3546 sg_copy_from_buffer(job
->reply_payload
.sg_list
,
3547 job
->reply_payload
.sg_cnt
,
3548 (uint8_t *)rsp_buf_info
->virt
,
3549 job
->reply_payload
.payload_len
);
3552 bfad_fcxp_free_mem(bfad
, drv_fcxp
->rspbuf_info
,
3553 drv_fcxp
->num_rsp_sgles
);
3554 bfad_fcxp_free_mem(bfad
, drv_fcxp
->reqbuf_info
,
3555 drv_fcxp
->num_req_sgles
);
3559 /* Need a copy to user op */
3560 if (copy_to_user((void *)(unsigned long)bsg_data
->payload
,
3561 (void *)bsg_fcpt
, bsg_data
->payload_len
))
3567 bsg_reply
->result
= rc
;
3569 if (rc
== BFA_STATUS_OK
)
3570 bsg_job_done(job
, bsg_reply
->result
,
3571 bsg_reply
->reply_payload_rcv_len
);
3577 bfad_im_bsg_request(struct bsg_job
*job
)
3579 struct fc_bsg_request
*bsg_request
= job
->request
;
3580 struct fc_bsg_reply
*bsg_reply
= job
->reply
;
3581 uint32_t rc
= BFA_STATUS_OK
;
3583 switch (bsg_request
->msgcode
) {
3584 case FC_BSG_HST_VENDOR
:
3585 /* Process BSG HST Vendor requests */
3586 rc
= bfad_im_bsg_vendor_request(job
);
3588 case FC_BSG_HST_ELS_NOLOGIN
:
3589 case FC_BSG_RPT_ELS
:
3592 /* Process BSG ELS/CT commands */
3593 rc
= bfad_im_bsg_els_ct_request(job
);
3596 bsg_reply
->result
= rc
= -EINVAL
;
3597 bsg_reply
->reply_payload_rcv_len
= 0;
3605 bfad_im_bsg_timeout(struct bsg_job
*job
)
3607 /* Don't complete the BSG job request - return -EAGAIN
3608 * to reset bsg job timeout : for ELS/CT pass thru we
3609 * already have timer to track the request.