Linux 4.19.133
[linux/fpc-iii.git] / drivers / scsi / bfa / bfad_bsg.c
blob5d163ca1b36666041fc22d5413fd2e6ecb1ebd3c
1 /*
2 * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
3 * Copyright (c) 2014- QLogic Corporation.
4 * All rights reserved
5 * www.qlogic.com
7 * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter.
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License (GPL) Version 2 as
11 * published by the Free Software Foundation
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
19 #include <linux/uaccess.h>
20 #include "bfad_drv.h"
21 #include "bfad_im.h"
22 #include "bfad_bsg.h"
24 BFA_TRC_FILE(LDRV, BSG);
26 int
27 bfad_iocmd_ioc_enable(struct bfad_s *bfad, void *cmd)
29 struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
30 unsigned long flags;
32 spin_lock_irqsave(&bfad->bfad_lock, flags);
33 /* If IOC is not in disabled state - return */
34 if (!bfa_ioc_is_disabled(&bfad->bfa.ioc)) {
35 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
36 iocmd->status = BFA_STATUS_OK;
37 return 0;
40 init_completion(&bfad->enable_comp);
41 bfa_iocfc_enable(&bfad->bfa);
42 iocmd->status = BFA_STATUS_OK;
43 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
44 wait_for_completion(&bfad->enable_comp);
46 return 0;
49 int
50 bfad_iocmd_ioc_disable(struct bfad_s *bfad, void *cmd)
52 struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
53 unsigned long flags;
55 spin_lock_irqsave(&bfad->bfad_lock, flags);
56 if (bfa_ioc_is_disabled(&bfad->bfa.ioc)) {
57 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
58 iocmd->status = BFA_STATUS_OK;
59 return 0;
62 if (bfad->disable_active) {
63 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
64 return -EBUSY;
67 bfad->disable_active = BFA_TRUE;
68 init_completion(&bfad->disable_comp);
69 bfa_iocfc_disable(&bfad->bfa);
70 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
72 wait_for_completion(&bfad->disable_comp);
73 bfad->disable_active = BFA_FALSE;
74 iocmd->status = BFA_STATUS_OK;
76 return 0;
79 static int
80 bfad_iocmd_ioc_get_info(struct bfad_s *bfad, void *cmd)
82 int i;
83 struct bfa_bsg_ioc_info_s *iocmd = (struct bfa_bsg_ioc_info_s *)cmd;
84 struct bfad_im_port_s *im_port;
85 struct bfa_port_attr_s pattr;
86 unsigned long flags;
88 spin_lock_irqsave(&bfad->bfad_lock, flags);
89 bfa_fcport_get_attr(&bfad->bfa, &pattr);
90 iocmd->nwwn = pattr.nwwn;
91 iocmd->pwwn = pattr.pwwn;
92 iocmd->ioc_type = bfa_get_type(&bfad->bfa);
93 iocmd->mac = bfa_get_mac(&bfad->bfa);
94 iocmd->factory_mac = bfa_get_mfg_mac(&bfad->bfa);
95 bfa_get_adapter_serial_num(&bfad->bfa, iocmd->serialnum);
96 iocmd->factorynwwn = pattr.factorynwwn;
97 iocmd->factorypwwn = pattr.factorypwwn;
98 iocmd->bfad_num = bfad->inst_no;
99 im_port = bfad->pport.im_port;
100 iocmd->host = im_port->shost->host_no;
101 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
103 strcpy(iocmd->name, bfad->adapter_name);
104 strcpy(iocmd->port_name, bfad->port_name);
105 strcpy(iocmd->hwpath, bfad->pci_name);
107 /* set adapter hw path */
108 strcpy(iocmd->adapter_hwpath, bfad->pci_name);
109 for (i = 0; iocmd->adapter_hwpath[i] != ':' && i < BFA_STRING_32; i++)
111 for (; iocmd->adapter_hwpath[++i] != ':' && i < BFA_STRING_32; )
113 iocmd->adapter_hwpath[i] = '\0';
114 iocmd->status = BFA_STATUS_OK;
115 return 0;
118 static int
119 bfad_iocmd_ioc_get_attr(struct bfad_s *bfad, void *cmd)
121 struct bfa_bsg_ioc_attr_s *iocmd = (struct bfa_bsg_ioc_attr_s *)cmd;
122 unsigned long flags;
124 spin_lock_irqsave(&bfad->bfad_lock, flags);
125 bfa_ioc_get_attr(&bfad->bfa.ioc, &iocmd->ioc_attr);
126 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
128 /* fill in driver attr info */
129 strcpy(iocmd->ioc_attr.driver_attr.driver, BFAD_DRIVER_NAME);
130 strlcpy(iocmd->ioc_attr.driver_attr.driver_ver,
131 BFAD_DRIVER_VERSION, BFA_VERSION_LEN);
132 strcpy(iocmd->ioc_attr.driver_attr.fw_ver,
133 iocmd->ioc_attr.adapter_attr.fw_ver);
134 strcpy(iocmd->ioc_attr.driver_attr.bios_ver,
135 iocmd->ioc_attr.adapter_attr.optrom_ver);
137 /* copy chip rev info first otherwise it will be overwritten */
138 memcpy(bfad->pci_attr.chip_rev, iocmd->ioc_attr.pci_attr.chip_rev,
139 sizeof(bfad->pci_attr.chip_rev));
140 memcpy(&iocmd->ioc_attr.pci_attr, &bfad->pci_attr,
141 sizeof(struct bfa_ioc_pci_attr_s));
143 iocmd->status = BFA_STATUS_OK;
144 return 0;
148 bfad_iocmd_ioc_get_stats(struct bfad_s *bfad, void *cmd)
150 struct bfa_bsg_ioc_stats_s *iocmd = (struct bfa_bsg_ioc_stats_s *)cmd;
152 bfa_ioc_get_stats(&bfad->bfa, &iocmd->ioc_stats);
153 iocmd->status = BFA_STATUS_OK;
154 return 0;
158 bfad_iocmd_ioc_get_fwstats(struct bfad_s *bfad, void *cmd,
159 unsigned int payload_len)
161 struct bfa_bsg_ioc_fwstats_s *iocmd =
162 (struct bfa_bsg_ioc_fwstats_s *)cmd;
163 void *iocmd_bufptr;
164 unsigned long flags;
166 if (bfad_chk_iocmd_sz(payload_len,
167 sizeof(struct bfa_bsg_ioc_fwstats_s),
168 sizeof(struct bfa_fw_stats_s)) != BFA_STATUS_OK) {
169 iocmd->status = BFA_STATUS_VERSION_FAIL;
170 goto out;
173 iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_ioc_fwstats_s);
174 spin_lock_irqsave(&bfad->bfad_lock, flags);
175 iocmd->status = bfa_ioc_fw_stats_get(&bfad->bfa.ioc, iocmd_bufptr);
176 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
178 if (iocmd->status != BFA_STATUS_OK) {
179 bfa_trc(bfad, iocmd->status);
180 goto out;
182 out:
183 bfa_trc(bfad, 0x6666);
184 return 0;
188 bfad_iocmd_ioc_reset_stats(struct bfad_s *bfad, void *cmd, unsigned int v_cmd)
190 struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
191 unsigned long flags;
193 if (v_cmd == IOCMD_IOC_RESET_STATS) {
194 bfa_ioc_clear_stats(&bfad->bfa);
195 iocmd->status = BFA_STATUS_OK;
196 } else if (v_cmd == IOCMD_IOC_RESET_FWSTATS) {
197 spin_lock_irqsave(&bfad->bfad_lock, flags);
198 iocmd->status = bfa_ioc_fw_stats_clear(&bfad->bfa.ioc);
199 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
202 return 0;
206 bfad_iocmd_ioc_set_name(struct bfad_s *bfad, void *cmd, unsigned int v_cmd)
208 struct bfa_bsg_ioc_name_s *iocmd = (struct bfa_bsg_ioc_name_s *) cmd;
210 if (v_cmd == IOCMD_IOC_SET_ADAPTER_NAME)
211 strcpy(bfad->adapter_name, iocmd->name);
212 else if (v_cmd == IOCMD_IOC_SET_PORT_NAME)
213 strcpy(bfad->port_name, iocmd->name);
215 iocmd->status = BFA_STATUS_OK;
216 return 0;
220 bfad_iocmd_iocfc_get_attr(struct bfad_s *bfad, void *cmd)
222 struct bfa_bsg_iocfc_attr_s *iocmd = (struct bfa_bsg_iocfc_attr_s *)cmd;
224 iocmd->status = BFA_STATUS_OK;
225 bfa_iocfc_get_attr(&bfad->bfa, &iocmd->iocfc_attr);
227 return 0;
231 bfad_iocmd_ioc_fw_sig_inv(struct bfad_s *bfad, void *cmd)
233 struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
234 unsigned long flags;
236 spin_lock_irqsave(&bfad->bfad_lock, flags);
237 iocmd->status = bfa_ioc_fwsig_invalidate(&bfad->bfa.ioc);
238 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
239 return 0;
243 bfad_iocmd_iocfc_set_intr(struct bfad_s *bfad, void *cmd)
245 struct bfa_bsg_iocfc_intr_s *iocmd = (struct bfa_bsg_iocfc_intr_s *)cmd;
246 unsigned long flags;
248 spin_lock_irqsave(&bfad->bfad_lock, flags);
249 iocmd->status = bfa_iocfc_israttr_set(&bfad->bfa, &iocmd->attr);
250 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
252 return 0;
256 bfad_iocmd_port_enable(struct bfad_s *bfad, void *cmd)
258 struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
259 struct bfad_hal_comp fcomp;
260 unsigned long flags;
262 init_completion(&fcomp.comp);
263 spin_lock_irqsave(&bfad->bfad_lock, flags);
264 iocmd->status = bfa_port_enable(&bfad->bfa.modules.port,
265 bfad_hcb_comp, &fcomp);
266 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
267 if (iocmd->status != BFA_STATUS_OK) {
268 bfa_trc(bfad, iocmd->status);
269 return 0;
271 wait_for_completion(&fcomp.comp);
272 iocmd->status = fcomp.status;
273 return 0;
277 bfad_iocmd_port_disable(struct bfad_s *bfad, void *cmd)
279 struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
280 struct bfad_hal_comp fcomp;
281 unsigned long flags;
283 init_completion(&fcomp.comp);
284 spin_lock_irqsave(&bfad->bfad_lock, flags);
285 iocmd->status = bfa_port_disable(&bfad->bfa.modules.port,
286 bfad_hcb_comp, &fcomp);
287 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
289 if (iocmd->status != BFA_STATUS_OK) {
290 bfa_trc(bfad, iocmd->status);
291 return 0;
293 wait_for_completion(&fcomp.comp);
294 iocmd->status = fcomp.status;
295 return 0;
298 static int
299 bfad_iocmd_port_get_attr(struct bfad_s *bfad, void *cmd)
301 struct bfa_bsg_port_attr_s *iocmd = (struct bfa_bsg_port_attr_s *)cmd;
302 struct bfa_lport_attr_s port_attr;
303 unsigned long flags;
305 spin_lock_irqsave(&bfad->bfad_lock, flags);
306 bfa_fcport_get_attr(&bfad->bfa, &iocmd->attr);
307 bfa_fcs_lport_get_attr(&bfad->bfa_fcs.fabric.bport, &port_attr);
308 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
310 if (iocmd->attr.topology != BFA_PORT_TOPOLOGY_NONE)
311 iocmd->attr.pid = port_attr.pid;
312 else
313 iocmd->attr.pid = 0;
315 iocmd->attr.port_type = port_attr.port_type;
316 iocmd->attr.loopback = port_attr.loopback;
317 iocmd->attr.authfail = port_attr.authfail;
318 strlcpy(iocmd->attr.port_symname.symname,
319 port_attr.port_cfg.sym_name.symname,
320 sizeof(iocmd->attr.port_symname.symname));
322 iocmd->status = BFA_STATUS_OK;
323 return 0;
327 bfad_iocmd_port_get_stats(struct bfad_s *bfad, void *cmd,
328 unsigned int payload_len)
330 struct bfa_bsg_port_stats_s *iocmd = (struct bfa_bsg_port_stats_s *)cmd;
331 struct bfad_hal_comp fcomp;
332 void *iocmd_bufptr;
333 unsigned long flags;
335 if (bfad_chk_iocmd_sz(payload_len,
336 sizeof(struct bfa_bsg_port_stats_s),
337 sizeof(union bfa_port_stats_u)) != BFA_STATUS_OK) {
338 iocmd->status = BFA_STATUS_VERSION_FAIL;
339 return 0;
342 iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_port_stats_s);
344 init_completion(&fcomp.comp);
345 spin_lock_irqsave(&bfad->bfad_lock, flags);
346 iocmd->status = bfa_port_get_stats(&bfad->bfa.modules.port,
347 iocmd_bufptr, bfad_hcb_comp, &fcomp);
348 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
349 if (iocmd->status != BFA_STATUS_OK) {
350 bfa_trc(bfad, iocmd->status);
351 goto out;
354 wait_for_completion(&fcomp.comp);
355 iocmd->status = fcomp.status;
356 out:
357 return 0;
361 bfad_iocmd_port_reset_stats(struct bfad_s *bfad, void *cmd)
363 struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
364 struct bfad_hal_comp fcomp;
365 unsigned long flags;
367 init_completion(&fcomp.comp);
368 spin_lock_irqsave(&bfad->bfad_lock, flags);
369 iocmd->status = bfa_port_clear_stats(&bfad->bfa.modules.port,
370 bfad_hcb_comp, &fcomp);
371 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
372 if (iocmd->status != BFA_STATUS_OK) {
373 bfa_trc(bfad, iocmd->status);
374 return 0;
376 wait_for_completion(&fcomp.comp);
377 iocmd->status = fcomp.status;
378 return 0;
382 bfad_iocmd_set_port_cfg(struct bfad_s *bfad, void *iocmd, unsigned int v_cmd)
384 struct bfa_bsg_port_cfg_s *cmd = (struct bfa_bsg_port_cfg_s *)iocmd;
385 unsigned long flags;
387 spin_lock_irqsave(&bfad->bfad_lock, flags);
388 if (v_cmd == IOCMD_PORT_CFG_TOPO)
389 cmd->status = bfa_fcport_cfg_topology(&bfad->bfa, cmd->param);
390 else if (v_cmd == IOCMD_PORT_CFG_SPEED)
391 cmd->status = bfa_fcport_cfg_speed(&bfad->bfa, cmd->param);
392 else if (v_cmd == IOCMD_PORT_CFG_ALPA)
393 cmd->status = bfa_fcport_cfg_hardalpa(&bfad->bfa, cmd->param);
394 else if (v_cmd == IOCMD_PORT_CLR_ALPA)
395 cmd->status = bfa_fcport_clr_hardalpa(&bfad->bfa);
396 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
398 return 0;
402 bfad_iocmd_port_cfg_maxfrsize(struct bfad_s *bfad, void *cmd)
404 struct bfa_bsg_port_cfg_maxfrsize_s *iocmd =
405 (struct bfa_bsg_port_cfg_maxfrsize_s *)cmd;
406 unsigned long flags;
408 spin_lock_irqsave(&bfad->bfad_lock, flags);
409 iocmd->status = bfa_fcport_cfg_maxfrsize(&bfad->bfa, iocmd->maxfrsize);
410 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
412 return 0;
416 bfad_iocmd_port_cfg_bbcr(struct bfad_s *bfad, unsigned int cmd, void *pcmd)
418 struct bfa_bsg_bbcr_enable_s *iocmd =
419 (struct bfa_bsg_bbcr_enable_s *)pcmd;
420 unsigned long flags;
421 int rc;
423 spin_lock_irqsave(&bfad->bfad_lock, flags);
424 if (cmd == IOCMD_PORT_BBCR_ENABLE)
425 rc = bfa_fcport_cfg_bbcr(&bfad->bfa, BFA_TRUE, iocmd->bb_scn);
426 else if (cmd == IOCMD_PORT_BBCR_DISABLE)
427 rc = bfa_fcport_cfg_bbcr(&bfad->bfa, BFA_FALSE, 0);
428 else {
429 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
430 return -EINVAL;
432 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
434 iocmd->status = rc;
435 return 0;
439 bfad_iocmd_port_get_bbcr_attr(struct bfad_s *bfad, void *pcmd)
441 struct bfa_bsg_bbcr_attr_s *iocmd = (struct bfa_bsg_bbcr_attr_s *) pcmd;
442 unsigned long flags;
444 spin_lock_irqsave(&bfad->bfad_lock, flags);
445 iocmd->status =
446 bfa_fcport_get_bbcr_attr(&bfad->bfa, &iocmd->attr);
447 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
449 return 0;
453 static int
454 bfad_iocmd_lport_get_attr(struct bfad_s *bfad, void *cmd)
456 struct bfa_fcs_lport_s *fcs_port;
457 struct bfa_bsg_lport_attr_s *iocmd = (struct bfa_bsg_lport_attr_s *)cmd;
458 unsigned long flags;
460 spin_lock_irqsave(&bfad->bfad_lock, flags);
461 fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
462 iocmd->vf_id, iocmd->pwwn);
463 if (fcs_port == NULL) {
464 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
465 iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
466 goto out;
469 bfa_fcs_lport_get_attr(fcs_port, &iocmd->port_attr);
470 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
471 iocmd->status = BFA_STATUS_OK;
472 out:
473 return 0;
477 bfad_iocmd_lport_get_stats(struct bfad_s *bfad, void *cmd)
479 struct bfa_fcs_lport_s *fcs_port;
480 struct bfa_bsg_lport_stats_s *iocmd =
481 (struct bfa_bsg_lport_stats_s *)cmd;
482 unsigned long flags;
484 spin_lock_irqsave(&bfad->bfad_lock, flags);
485 fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
486 iocmd->vf_id, iocmd->pwwn);
487 if (fcs_port == NULL) {
488 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
489 iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
490 goto out;
493 bfa_fcs_lport_get_stats(fcs_port, &iocmd->port_stats);
494 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
495 iocmd->status = BFA_STATUS_OK;
496 out:
497 return 0;
501 bfad_iocmd_lport_reset_stats(struct bfad_s *bfad, void *cmd)
503 struct bfa_fcs_lport_s *fcs_port;
504 struct bfa_bsg_reset_stats_s *iocmd =
505 (struct bfa_bsg_reset_stats_s *)cmd;
506 struct bfa_fcpim_s *fcpim = BFA_FCPIM(&bfad->bfa);
507 struct list_head *qe, *qen;
508 struct bfa_itnim_s *itnim;
509 unsigned long flags;
511 spin_lock_irqsave(&bfad->bfad_lock, flags);
512 fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
513 iocmd->vf_id, iocmd->vpwwn);
514 if (fcs_port == NULL) {
515 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
516 iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
517 goto out;
520 bfa_fcs_lport_clear_stats(fcs_port);
521 /* clear IO stats from all active itnims */
522 list_for_each_safe(qe, qen, &fcpim->itnim_q) {
523 itnim = (struct bfa_itnim_s *) qe;
524 if (itnim->rport->rport_info.lp_tag != fcs_port->lp_tag)
525 continue;
526 bfa_itnim_clear_stats(itnim);
528 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
529 iocmd->status = BFA_STATUS_OK;
530 out:
531 return 0;
535 bfad_iocmd_lport_get_iostats(struct bfad_s *bfad, void *cmd)
537 struct bfa_fcs_lport_s *fcs_port;
538 struct bfa_bsg_lport_iostats_s *iocmd =
539 (struct bfa_bsg_lport_iostats_s *)cmd;
540 unsigned long flags;
542 spin_lock_irqsave(&bfad->bfad_lock, flags);
543 fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
544 iocmd->vf_id, iocmd->pwwn);
545 if (fcs_port == NULL) {
546 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
547 iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
548 goto out;
551 bfa_fcpim_port_iostats(&bfad->bfa, &iocmd->iostats,
552 fcs_port->lp_tag);
553 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
554 iocmd->status = BFA_STATUS_OK;
555 out:
556 return 0;
560 bfad_iocmd_lport_get_rports(struct bfad_s *bfad, void *cmd,
561 unsigned int payload_len)
563 struct bfa_bsg_lport_get_rports_s *iocmd =
564 (struct bfa_bsg_lport_get_rports_s *)cmd;
565 struct bfa_fcs_lport_s *fcs_port;
566 unsigned long flags;
567 void *iocmd_bufptr;
569 if (iocmd->nrports == 0)
570 return -EINVAL;
572 if (bfad_chk_iocmd_sz(payload_len,
573 sizeof(struct bfa_bsg_lport_get_rports_s),
574 sizeof(struct bfa_rport_qualifier_s) * iocmd->nrports)
575 != BFA_STATUS_OK) {
576 iocmd->status = BFA_STATUS_VERSION_FAIL;
577 return 0;
580 iocmd_bufptr = (char *)iocmd +
581 sizeof(struct bfa_bsg_lport_get_rports_s);
582 spin_lock_irqsave(&bfad->bfad_lock, flags);
583 fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
584 iocmd->vf_id, iocmd->pwwn);
585 if (fcs_port == NULL) {
586 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
587 bfa_trc(bfad, 0);
588 iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
589 goto out;
592 bfa_fcs_lport_get_rport_quals(fcs_port,
593 (struct bfa_rport_qualifier_s *)iocmd_bufptr,
594 &iocmd->nrports);
595 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
596 iocmd->status = BFA_STATUS_OK;
597 out:
598 return 0;
602 bfad_iocmd_rport_get_attr(struct bfad_s *bfad, void *cmd)
604 struct bfa_bsg_rport_attr_s *iocmd = (struct bfa_bsg_rport_attr_s *)cmd;
605 struct bfa_fcs_lport_s *fcs_port;
606 struct bfa_fcs_rport_s *fcs_rport;
607 unsigned long flags;
609 spin_lock_irqsave(&bfad->bfad_lock, flags);
610 fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
611 iocmd->vf_id, iocmd->pwwn);
612 if (fcs_port == NULL) {
613 bfa_trc(bfad, 0);
614 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
615 iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
616 goto out;
619 if (iocmd->pid)
620 fcs_rport = bfa_fcs_lport_get_rport_by_qualifier(fcs_port,
621 iocmd->rpwwn, iocmd->pid);
622 else
623 fcs_rport = bfa_fcs_rport_lookup(fcs_port, iocmd->rpwwn);
624 if (fcs_rport == NULL) {
625 bfa_trc(bfad, 0);
626 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
627 iocmd->status = BFA_STATUS_UNKNOWN_RWWN;
628 goto out;
631 bfa_fcs_rport_get_attr(fcs_rport, &iocmd->attr);
632 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
633 iocmd->status = BFA_STATUS_OK;
634 out:
635 return 0;
638 static int
639 bfad_iocmd_rport_get_addr(struct bfad_s *bfad, void *cmd)
641 struct bfa_bsg_rport_scsi_addr_s *iocmd =
642 (struct bfa_bsg_rport_scsi_addr_s *)cmd;
643 struct bfa_fcs_lport_s *fcs_port;
644 struct bfa_fcs_itnim_s *fcs_itnim;
645 struct bfad_itnim_s *drv_itnim;
646 unsigned long flags;
648 spin_lock_irqsave(&bfad->bfad_lock, flags);
649 fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
650 iocmd->vf_id, iocmd->pwwn);
651 if (fcs_port == NULL) {
652 bfa_trc(bfad, 0);
653 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
654 iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
655 goto out;
658 fcs_itnim = bfa_fcs_itnim_lookup(fcs_port, iocmd->rpwwn);
659 if (fcs_itnim == NULL) {
660 bfa_trc(bfad, 0);
661 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
662 iocmd->status = BFA_STATUS_UNKNOWN_RWWN;
663 goto out;
666 drv_itnim = fcs_itnim->itnim_drv;
668 if (drv_itnim && drv_itnim->im_port)
669 iocmd->host = drv_itnim->im_port->shost->host_no;
670 else {
671 bfa_trc(bfad, 0);
672 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
673 iocmd->status = BFA_STATUS_UNKNOWN_RWWN;
674 goto out;
677 iocmd->target = drv_itnim->scsi_tgt_id;
678 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
680 iocmd->bus = 0;
681 iocmd->lun = 0;
682 iocmd->status = BFA_STATUS_OK;
683 out:
684 return 0;
688 bfad_iocmd_rport_get_stats(struct bfad_s *bfad, void *cmd)
690 struct bfa_bsg_rport_stats_s *iocmd =
691 (struct bfa_bsg_rport_stats_s *)cmd;
692 struct bfa_fcs_lport_s *fcs_port;
693 struct bfa_fcs_rport_s *fcs_rport;
694 unsigned long flags;
696 spin_lock_irqsave(&bfad->bfad_lock, flags);
697 fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
698 iocmd->vf_id, iocmd->pwwn);
699 if (fcs_port == NULL) {
700 bfa_trc(bfad, 0);
701 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
702 iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
703 goto out;
706 fcs_rport = bfa_fcs_rport_lookup(fcs_port, iocmd->rpwwn);
707 if (fcs_rport == NULL) {
708 bfa_trc(bfad, 0);
709 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
710 iocmd->status = BFA_STATUS_UNKNOWN_RWWN;
711 goto out;
714 memcpy((void *)&iocmd->stats, (void *)&fcs_rport->stats,
715 sizeof(struct bfa_rport_stats_s));
716 if (bfa_fcs_rport_get_halrport(fcs_rport)) {
717 memcpy((void *)&iocmd->stats.hal_stats,
718 (void *)&(bfa_fcs_rport_get_halrport(fcs_rport)->stats),
719 sizeof(struct bfa_rport_hal_stats_s));
722 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
723 iocmd->status = BFA_STATUS_OK;
724 out:
725 return 0;
729 bfad_iocmd_rport_clr_stats(struct bfad_s *bfad, void *cmd)
731 struct bfa_bsg_rport_reset_stats_s *iocmd =
732 (struct bfa_bsg_rport_reset_stats_s *)cmd;
733 struct bfa_fcs_lport_s *fcs_port;
734 struct bfa_fcs_rport_s *fcs_rport;
735 struct bfa_rport_s *rport;
736 unsigned long flags;
738 spin_lock_irqsave(&bfad->bfad_lock, flags);
739 fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
740 iocmd->vf_id, iocmd->pwwn);
741 if (fcs_port == NULL) {
742 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
743 iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
744 goto out;
747 fcs_rport = bfa_fcs_rport_lookup(fcs_port, iocmd->rpwwn);
748 if (fcs_rport == NULL) {
749 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
750 iocmd->status = BFA_STATUS_UNKNOWN_RWWN;
751 goto out;
754 memset((char *)&fcs_rport->stats, 0, sizeof(struct bfa_rport_stats_s));
755 rport = bfa_fcs_rport_get_halrport(fcs_rport);
756 if (rport)
757 memset(&rport->stats, 0, sizeof(rport->stats));
758 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
759 iocmd->status = BFA_STATUS_OK;
760 out:
761 return 0;
765 bfad_iocmd_rport_set_speed(struct bfad_s *bfad, void *cmd)
767 struct bfa_bsg_rport_set_speed_s *iocmd =
768 (struct bfa_bsg_rport_set_speed_s *)cmd;
769 struct bfa_fcs_lport_s *fcs_port;
770 struct bfa_fcs_rport_s *fcs_rport;
771 unsigned long flags;
773 spin_lock_irqsave(&bfad->bfad_lock, flags);
774 fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
775 iocmd->vf_id, iocmd->pwwn);
776 if (fcs_port == NULL) {
777 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
778 iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
779 goto out;
782 fcs_rport = bfa_fcs_rport_lookup(fcs_port, iocmd->rpwwn);
783 if (fcs_rport == NULL) {
784 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
785 iocmd->status = BFA_STATUS_UNKNOWN_RWWN;
786 goto out;
789 fcs_rport->rpf.assigned_speed = iocmd->speed;
790 /* Set this speed in f/w only if the RPSC speed is not available */
791 if (fcs_rport->rpf.rpsc_speed == BFA_PORT_SPEED_UNKNOWN)
792 if (fcs_rport->bfa_rport)
793 bfa_rport_speed(fcs_rport->bfa_rport, iocmd->speed);
794 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
795 iocmd->status = BFA_STATUS_OK;
796 out:
797 return 0;
801 bfad_iocmd_vport_get_attr(struct bfad_s *bfad, void *cmd)
803 struct bfa_fcs_vport_s *fcs_vport;
804 struct bfa_bsg_vport_attr_s *iocmd = (struct bfa_bsg_vport_attr_s *)cmd;
805 unsigned long flags;
807 spin_lock_irqsave(&bfad->bfad_lock, flags);
808 fcs_vport = bfa_fcs_vport_lookup(&bfad->bfa_fcs,
809 iocmd->vf_id, iocmd->vpwwn);
810 if (fcs_vport == NULL) {
811 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
812 iocmd->status = BFA_STATUS_UNKNOWN_VWWN;
813 goto out;
816 bfa_fcs_vport_get_attr(fcs_vport, &iocmd->vport_attr);
817 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
818 iocmd->status = BFA_STATUS_OK;
819 out:
820 return 0;
824 bfad_iocmd_vport_get_stats(struct bfad_s *bfad, void *cmd)
826 struct bfa_fcs_vport_s *fcs_vport;
827 struct bfa_bsg_vport_stats_s *iocmd =
828 (struct bfa_bsg_vport_stats_s *)cmd;
829 unsigned long flags;
831 spin_lock_irqsave(&bfad->bfad_lock, flags);
832 fcs_vport = bfa_fcs_vport_lookup(&bfad->bfa_fcs,
833 iocmd->vf_id, iocmd->vpwwn);
834 if (fcs_vport == NULL) {
835 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
836 iocmd->status = BFA_STATUS_UNKNOWN_VWWN;
837 goto out;
840 memcpy((void *)&iocmd->vport_stats, (void *)&fcs_vport->vport_stats,
841 sizeof(struct bfa_vport_stats_s));
842 memcpy((void *)&iocmd->vport_stats.port_stats,
843 (void *)&fcs_vport->lport.stats,
844 sizeof(struct bfa_lport_stats_s));
845 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
846 iocmd->status = BFA_STATUS_OK;
847 out:
848 return 0;
852 bfad_iocmd_vport_clr_stats(struct bfad_s *bfad, void *cmd)
854 struct bfa_fcs_vport_s *fcs_vport;
855 struct bfa_bsg_reset_stats_s *iocmd =
856 (struct bfa_bsg_reset_stats_s *)cmd;
857 unsigned long flags;
859 spin_lock_irqsave(&bfad->bfad_lock, flags);
860 fcs_vport = bfa_fcs_vport_lookup(&bfad->bfa_fcs,
861 iocmd->vf_id, iocmd->vpwwn);
862 if (fcs_vport == NULL) {
863 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
864 iocmd->status = BFA_STATUS_UNKNOWN_VWWN;
865 goto out;
868 memset(&fcs_vport->vport_stats, 0, sizeof(struct bfa_vport_stats_s));
869 memset(&fcs_vport->lport.stats, 0, sizeof(struct bfa_lport_stats_s));
870 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
871 iocmd->status = BFA_STATUS_OK;
872 out:
873 return 0;
876 static int
877 bfad_iocmd_fabric_get_lports(struct bfad_s *bfad, void *cmd,
878 unsigned int payload_len)
880 struct bfa_bsg_fabric_get_lports_s *iocmd =
881 (struct bfa_bsg_fabric_get_lports_s *)cmd;
882 bfa_fcs_vf_t *fcs_vf;
883 uint32_t nports = iocmd->nports;
884 unsigned long flags;
885 void *iocmd_bufptr;
887 if (nports == 0) {
888 iocmd->status = BFA_STATUS_EINVAL;
889 goto out;
892 if (bfad_chk_iocmd_sz(payload_len,
893 sizeof(struct bfa_bsg_fabric_get_lports_s),
894 sizeof(wwn_t) * iocmd->nports) != BFA_STATUS_OK) {
895 iocmd->status = BFA_STATUS_VERSION_FAIL;
896 goto out;
899 iocmd_bufptr = (char *)iocmd +
900 sizeof(struct bfa_bsg_fabric_get_lports_s);
902 spin_lock_irqsave(&bfad->bfad_lock, flags);
903 fcs_vf = bfa_fcs_vf_lookup(&bfad->bfa_fcs, iocmd->vf_id);
904 if (fcs_vf == NULL) {
905 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
906 iocmd->status = BFA_STATUS_UNKNOWN_VFID;
907 goto out;
909 bfa_fcs_vf_get_ports(fcs_vf, (wwn_t *)iocmd_bufptr, &nports);
910 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
912 iocmd->nports = nports;
913 iocmd->status = BFA_STATUS_OK;
914 out:
915 return 0;
919 bfad_iocmd_qos_set_bw(struct bfad_s *bfad, void *pcmd)
921 struct bfa_bsg_qos_bw_s *iocmd = (struct bfa_bsg_qos_bw_s *)pcmd;
922 unsigned long flags;
924 spin_lock_irqsave(&bfad->bfad_lock, flags);
925 iocmd->status = bfa_fcport_set_qos_bw(&bfad->bfa, &iocmd->qos_bw);
926 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
928 return 0;
932 bfad_iocmd_ratelim(struct bfad_s *bfad, unsigned int cmd, void *pcmd)
934 struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)pcmd;
935 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa);
936 unsigned long flags;
938 spin_lock_irqsave(&bfad->bfad_lock, flags);
940 if ((fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP) &&
941 (fcport->topology == BFA_PORT_TOPOLOGY_LOOP))
942 iocmd->status = BFA_STATUS_TOPOLOGY_LOOP;
943 else {
944 if (cmd == IOCMD_RATELIM_ENABLE)
945 fcport->cfg.ratelimit = BFA_TRUE;
946 else if (cmd == IOCMD_RATELIM_DISABLE)
947 fcport->cfg.ratelimit = BFA_FALSE;
949 if (fcport->cfg.trl_def_speed == BFA_PORT_SPEED_UNKNOWN)
950 fcport->cfg.trl_def_speed = BFA_PORT_SPEED_1GBPS;
952 iocmd->status = BFA_STATUS_OK;
955 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
957 return 0;
961 bfad_iocmd_ratelim_speed(struct bfad_s *bfad, unsigned int cmd, void *pcmd)
963 struct bfa_bsg_trl_speed_s *iocmd = (struct bfa_bsg_trl_speed_s *)pcmd;
964 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa);
965 unsigned long flags;
967 spin_lock_irqsave(&bfad->bfad_lock, flags);
969 /* Auto and speeds greater than the supported speed, are invalid */
970 if ((iocmd->speed == BFA_PORT_SPEED_AUTO) ||
971 (iocmd->speed > fcport->speed_sup)) {
972 iocmd->status = BFA_STATUS_UNSUPP_SPEED;
973 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
974 return 0;
977 if ((fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP) &&
978 (fcport->topology == BFA_PORT_TOPOLOGY_LOOP))
979 iocmd->status = BFA_STATUS_TOPOLOGY_LOOP;
980 else {
981 fcport->cfg.trl_def_speed = iocmd->speed;
982 iocmd->status = BFA_STATUS_OK;
984 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
986 return 0;
990 bfad_iocmd_cfg_fcpim(struct bfad_s *bfad, void *cmd)
992 struct bfa_bsg_fcpim_s *iocmd = (struct bfa_bsg_fcpim_s *)cmd;
993 unsigned long flags;
995 spin_lock_irqsave(&bfad->bfad_lock, flags);
996 bfa_fcpim_path_tov_set(&bfad->bfa, iocmd->param);
997 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
998 iocmd->status = BFA_STATUS_OK;
999 return 0;
1003 bfad_iocmd_fcpim_get_modstats(struct bfad_s *bfad, void *cmd)
1005 struct bfa_bsg_fcpim_modstats_s *iocmd =
1006 (struct bfa_bsg_fcpim_modstats_s *)cmd;
1007 struct bfa_fcpim_s *fcpim = BFA_FCPIM(&bfad->bfa);
1008 struct list_head *qe, *qen;
1009 struct bfa_itnim_s *itnim;
1010 unsigned long flags;
1012 spin_lock_irqsave(&bfad->bfad_lock, flags);
1013 /* accumulate IO stats from itnim */
1014 memset((void *)&iocmd->modstats, 0, sizeof(struct bfa_itnim_iostats_s));
1015 list_for_each_safe(qe, qen, &fcpim->itnim_q) {
1016 itnim = (struct bfa_itnim_s *) qe;
1017 bfa_fcpim_add_stats(&iocmd->modstats, &(itnim->stats));
1019 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1020 iocmd->status = BFA_STATUS_OK;
1021 return 0;
1025 bfad_iocmd_fcpim_clr_modstats(struct bfad_s *bfad, void *cmd)
1027 struct bfa_bsg_fcpim_modstatsclr_s *iocmd =
1028 (struct bfa_bsg_fcpim_modstatsclr_s *)cmd;
1029 struct bfa_fcpim_s *fcpim = BFA_FCPIM(&bfad->bfa);
1030 struct list_head *qe, *qen;
1031 struct bfa_itnim_s *itnim;
1032 unsigned long flags;
1034 spin_lock_irqsave(&bfad->bfad_lock, flags);
1035 list_for_each_safe(qe, qen, &fcpim->itnim_q) {
1036 itnim = (struct bfa_itnim_s *) qe;
1037 bfa_itnim_clear_stats(itnim);
1039 memset(&fcpim->del_itn_stats, 0,
1040 sizeof(struct bfa_fcpim_del_itn_stats_s));
1041 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1042 iocmd->status = BFA_STATUS_OK;
1043 return 0;
1047 bfad_iocmd_fcpim_get_del_itn_stats(struct bfad_s *bfad, void *cmd)
1049 struct bfa_bsg_fcpim_del_itn_stats_s *iocmd =
1050 (struct bfa_bsg_fcpim_del_itn_stats_s *)cmd;
1051 struct bfa_fcpim_s *fcpim = BFA_FCPIM(&bfad->bfa);
1052 unsigned long flags;
1054 spin_lock_irqsave(&bfad->bfad_lock, flags);
1055 memcpy((void *)&iocmd->modstats, (void *)&fcpim->del_itn_stats,
1056 sizeof(struct bfa_fcpim_del_itn_stats_s));
1057 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1059 iocmd->status = BFA_STATUS_OK;
1060 return 0;
1063 static int
1064 bfad_iocmd_itnim_get_attr(struct bfad_s *bfad, void *cmd)
1066 struct bfa_bsg_itnim_attr_s *iocmd = (struct bfa_bsg_itnim_attr_s *)cmd;
1067 struct bfa_fcs_lport_s *fcs_port;
1068 unsigned long flags;
1070 spin_lock_irqsave(&bfad->bfad_lock, flags);
1071 fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
1072 iocmd->vf_id, iocmd->lpwwn);
1073 if (!fcs_port)
1074 iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
1075 else
1076 iocmd->status = bfa_fcs_itnim_attr_get(fcs_port,
1077 iocmd->rpwwn, &iocmd->attr);
1078 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1079 return 0;
1082 static int
1083 bfad_iocmd_itnim_get_iostats(struct bfad_s *bfad, void *cmd)
1085 struct bfa_bsg_itnim_iostats_s *iocmd =
1086 (struct bfa_bsg_itnim_iostats_s *)cmd;
1087 struct bfa_fcs_lport_s *fcs_port;
1088 struct bfa_fcs_itnim_s *itnim;
1089 unsigned long flags;
1091 spin_lock_irqsave(&bfad->bfad_lock, flags);
1092 fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
1093 iocmd->vf_id, iocmd->lpwwn);
1094 if (!fcs_port) {
1095 iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
1096 bfa_trc(bfad, 0);
1097 } else {
1098 itnim = bfa_fcs_itnim_lookup(fcs_port, iocmd->rpwwn);
1099 if (itnim == NULL)
1100 iocmd->status = BFA_STATUS_UNKNOWN_RWWN;
1101 else {
1102 iocmd->status = BFA_STATUS_OK;
1103 if (bfa_fcs_itnim_get_halitn(itnim))
1104 memcpy((void *)&iocmd->iostats, (void *)
1105 &(bfa_fcs_itnim_get_halitn(itnim)->stats),
1106 sizeof(struct bfa_itnim_iostats_s));
1109 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1110 return 0;
1113 static int
1114 bfad_iocmd_itnim_reset_stats(struct bfad_s *bfad, void *cmd)
1116 struct bfa_bsg_rport_reset_stats_s *iocmd =
1117 (struct bfa_bsg_rport_reset_stats_s *)cmd;
1118 struct bfa_fcs_lport_s *fcs_port;
1119 struct bfa_fcs_itnim_s *itnim;
1120 unsigned long flags;
1122 spin_lock_irqsave(&bfad->bfad_lock, flags);
1123 fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
1124 iocmd->vf_id, iocmd->pwwn);
1125 if (!fcs_port)
1126 iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
1127 else {
1128 itnim = bfa_fcs_itnim_lookup(fcs_port, iocmd->rpwwn);
1129 if (itnim == NULL)
1130 iocmd->status = BFA_STATUS_UNKNOWN_RWWN;
1131 else {
1132 iocmd->status = BFA_STATUS_OK;
1133 bfa_fcs_itnim_stats_clear(fcs_port, iocmd->rpwwn);
1134 bfa_itnim_clear_stats(bfa_fcs_itnim_get_halitn(itnim));
1137 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1139 return 0;
1142 static int
1143 bfad_iocmd_itnim_get_itnstats(struct bfad_s *bfad, void *cmd)
1145 struct bfa_bsg_itnim_itnstats_s *iocmd =
1146 (struct bfa_bsg_itnim_itnstats_s *)cmd;
1147 struct bfa_fcs_lport_s *fcs_port;
1148 struct bfa_fcs_itnim_s *itnim;
1149 unsigned long flags;
1151 spin_lock_irqsave(&bfad->bfad_lock, flags);
1152 fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
1153 iocmd->vf_id, iocmd->lpwwn);
1154 if (!fcs_port) {
1155 iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
1156 bfa_trc(bfad, 0);
1157 } else {
1158 itnim = bfa_fcs_itnim_lookup(fcs_port, iocmd->rpwwn);
1159 if (itnim == NULL)
1160 iocmd->status = BFA_STATUS_UNKNOWN_RWWN;
1161 else {
1162 iocmd->status = BFA_STATUS_OK;
1163 bfa_fcs_itnim_stats_get(fcs_port, iocmd->rpwwn,
1164 &iocmd->itnstats);
1167 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1168 return 0;
1172 bfad_iocmd_fcport_enable(struct bfad_s *bfad, void *cmd)
1174 struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
1175 unsigned long flags;
1177 spin_lock_irqsave(&bfad->bfad_lock, flags);
1178 iocmd->status = bfa_fcport_enable(&bfad->bfa);
1179 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1181 return 0;
1185 bfad_iocmd_fcport_disable(struct bfad_s *bfad, void *cmd)
1187 struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
1188 unsigned long flags;
1190 spin_lock_irqsave(&bfad->bfad_lock, flags);
1191 iocmd->status = bfa_fcport_disable(&bfad->bfa);
1192 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1194 return 0;
1198 bfad_iocmd_ioc_get_pcifn_cfg(struct bfad_s *bfad, void *cmd)
1200 struct bfa_bsg_pcifn_cfg_s *iocmd = (struct bfa_bsg_pcifn_cfg_s *)cmd;
1201 struct bfad_hal_comp fcomp;
1202 unsigned long flags;
1204 init_completion(&fcomp.comp);
1205 spin_lock_irqsave(&bfad->bfad_lock, flags);
1206 iocmd->status = bfa_ablk_query(&bfad->bfa.modules.ablk,
1207 &iocmd->pcifn_cfg,
1208 bfad_hcb_comp, &fcomp);
1209 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1210 if (iocmd->status != BFA_STATUS_OK)
1211 goto out;
1213 wait_for_completion(&fcomp.comp);
1214 iocmd->status = fcomp.status;
1215 out:
1216 return 0;
1220 bfad_iocmd_pcifn_create(struct bfad_s *bfad, void *cmd)
1222 struct bfa_bsg_pcifn_s *iocmd = (struct bfa_bsg_pcifn_s *)cmd;
1223 struct bfad_hal_comp fcomp;
1224 unsigned long flags;
1226 init_completion(&fcomp.comp);
1227 spin_lock_irqsave(&bfad->bfad_lock, flags);
1228 iocmd->status = bfa_ablk_pf_create(&bfad->bfa.modules.ablk,
1229 &iocmd->pcifn_id, iocmd->port,
1230 iocmd->pcifn_class, iocmd->bw_min,
1231 iocmd->bw_max, bfad_hcb_comp, &fcomp);
1232 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1233 if (iocmd->status != BFA_STATUS_OK)
1234 goto out;
1236 wait_for_completion(&fcomp.comp);
1237 iocmd->status = fcomp.status;
1238 out:
1239 return 0;
1243 bfad_iocmd_pcifn_delete(struct bfad_s *bfad, void *cmd)
1245 struct bfa_bsg_pcifn_s *iocmd = (struct bfa_bsg_pcifn_s *)cmd;
1246 struct bfad_hal_comp fcomp;
1247 unsigned long flags;
1249 init_completion(&fcomp.comp);
1250 spin_lock_irqsave(&bfad->bfad_lock, flags);
1251 iocmd->status = bfa_ablk_pf_delete(&bfad->bfa.modules.ablk,
1252 iocmd->pcifn_id,
1253 bfad_hcb_comp, &fcomp);
1254 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1255 if (iocmd->status != BFA_STATUS_OK)
1256 goto out;
1258 wait_for_completion(&fcomp.comp);
1259 iocmd->status = fcomp.status;
1260 out:
1261 return 0;
1265 bfad_iocmd_pcifn_bw(struct bfad_s *bfad, void *cmd)
1267 struct bfa_bsg_pcifn_s *iocmd = (struct bfa_bsg_pcifn_s *)cmd;
1268 struct bfad_hal_comp fcomp;
1269 unsigned long flags;
1271 init_completion(&fcomp.comp);
1272 spin_lock_irqsave(&bfad->bfad_lock, flags);
1273 iocmd->status = bfa_ablk_pf_update(&bfad->bfa.modules.ablk,
1274 iocmd->pcifn_id, iocmd->bw_min,
1275 iocmd->bw_max, bfad_hcb_comp, &fcomp);
1276 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1277 bfa_trc(bfad, iocmd->status);
1278 if (iocmd->status != BFA_STATUS_OK)
1279 goto out;
1281 wait_for_completion(&fcomp.comp);
1282 iocmd->status = fcomp.status;
1283 bfa_trc(bfad, iocmd->status);
1284 out:
1285 return 0;
1289 bfad_iocmd_adapter_cfg_mode(struct bfad_s *bfad, void *cmd)
1291 struct bfa_bsg_adapter_cfg_mode_s *iocmd =
1292 (struct bfa_bsg_adapter_cfg_mode_s *)cmd;
1293 struct bfad_hal_comp fcomp;
1294 unsigned long flags = 0;
1296 init_completion(&fcomp.comp);
1297 spin_lock_irqsave(&bfad->bfad_lock, flags);
1298 iocmd->status = bfa_ablk_adapter_config(&bfad->bfa.modules.ablk,
1299 iocmd->cfg.mode, iocmd->cfg.max_pf,
1300 iocmd->cfg.max_vf, bfad_hcb_comp, &fcomp);
1301 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1302 if (iocmd->status != BFA_STATUS_OK)
1303 goto out;
1305 wait_for_completion(&fcomp.comp);
1306 iocmd->status = fcomp.status;
1307 out:
1308 return 0;
1312 bfad_iocmd_port_cfg_mode(struct bfad_s *bfad, void *cmd)
1314 struct bfa_bsg_port_cfg_mode_s *iocmd =
1315 (struct bfa_bsg_port_cfg_mode_s *)cmd;
1316 struct bfad_hal_comp fcomp;
1317 unsigned long flags = 0;
1319 init_completion(&fcomp.comp);
1320 spin_lock_irqsave(&bfad->bfad_lock, flags);
1321 iocmd->status = bfa_ablk_port_config(&bfad->bfa.modules.ablk,
1322 iocmd->instance, iocmd->cfg.mode,
1323 iocmd->cfg.max_pf, iocmd->cfg.max_vf,
1324 bfad_hcb_comp, &fcomp);
1325 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1326 if (iocmd->status != BFA_STATUS_OK)
1327 goto out;
1329 wait_for_completion(&fcomp.comp);
1330 iocmd->status = fcomp.status;
1331 out:
1332 return 0;
1336 bfad_iocmd_ablk_optrom(struct bfad_s *bfad, unsigned int cmd, void *pcmd)
1338 struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)pcmd;
1339 struct bfad_hal_comp fcomp;
1340 unsigned long flags;
1342 init_completion(&fcomp.comp);
1343 spin_lock_irqsave(&bfad->bfad_lock, flags);
1344 if (cmd == IOCMD_FLASH_ENABLE_OPTROM)
1345 iocmd->status = bfa_ablk_optrom_en(&bfad->bfa.modules.ablk,
1346 bfad_hcb_comp, &fcomp);
1347 else
1348 iocmd->status = bfa_ablk_optrom_dis(&bfad->bfa.modules.ablk,
1349 bfad_hcb_comp, &fcomp);
1350 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1352 if (iocmd->status != BFA_STATUS_OK)
1353 goto out;
1355 wait_for_completion(&fcomp.comp);
1356 iocmd->status = fcomp.status;
1357 out:
1358 return 0;
1362 bfad_iocmd_faa_query(struct bfad_s *bfad, void *cmd)
1364 struct bfa_bsg_faa_attr_s *iocmd = (struct bfa_bsg_faa_attr_s *)cmd;
1365 struct bfad_hal_comp fcomp;
1366 unsigned long flags;
1368 init_completion(&fcomp.comp);
1369 iocmd->status = BFA_STATUS_OK;
1370 spin_lock_irqsave(&bfad->bfad_lock, flags);
1371 iocmd->status = bfa_faa_query(&bfad->bfa, &iocmd->faa_attr,
1372 bfad_hcb_comp, &fcomp);
1373 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1375 if (iocmd->status != BFA_STATUS_OK)
1376 goto out;
1378 wait_for_completion(&fcomp.comp);
1379 iocmd->status = fcomp.status;
1380 out:
1381 return 0;
1385 bfad_iocmd_cee_attr(struct bfad_s *bfad, void *cmd, unsigned int payload_len)
1387 struct bfa_bsg_cee_attr_s *iocmd =
1388 (struct bfa_bsg_cee_attr_s *)cmd;
1389 void *iocmd_bufptr;
1390 struct bfad_hal_comp cee_comp;
1391 unsigned long flags;
1393 if (bfad_chk_iocmd_sz(payload_len,
1394 sizeof(struct bfa_bsg_cee_attr_s),
1395 sizeof(struct bfa_cee_attr_s)) != BFA_STATUS_OK) {
1396 iocmd->status = BFA_STATUS_VERSION_FAIL;
1397 return 0;
1400 iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_cee_attr_s);
1402 cee_comp.status = 0;
1403 init_completion(&cee_comp.comp);
1404 mutex_lock(&bfad_mutex);
1405 spin_lock_irqsave(&bfad->bfad_lock, flags);
1406 iocmd->status = bfa_cee_get_attr(&bfad->bfa.modules.cee, iocmd_bufptr,
1407 bfad_hcb_comp, &cee_comp);
1408 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1409 if (iocmd->status != BFA_STATUS_OK) {
1410 mutex_unlock(&bfad_mutex);
1411 bfa_trc(bfad, 0x5555);
1412 goto out;
1414 wait_for_completion(&cee_comp.comp);
1415 mutex_unlock(&bfad_mutex);
1416 out:
1417 return 0;
1421 bfad_iocmd_cee_get_stats(struct bfad_s *bfad, void *cmd,
1422 unsigned int payload_len)
1424 struct bfa_bsg_cee_stats_s *iocmd =
1425 (struct bfa_bsg_cee_stats_s *)cmd;
1426 void *iocmd_bufptr;
1427 struct bfad_hal_comp cee_comp;
1428 unsigned long flags;
1430 if (bfad_chk_iocmd_sz(payload_len,
1431 sizeof(struct bfa_bsg_cee_stats_s),
1432 sizeof(struct bfa_cee_stats_s)) != BFA_STATUS_OK) {
1433 iocmd->status = BFA_STATUS_VERSION_FAIL;
1434 return 0;
1437 iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_cee_stats_s);
1439 cee_comp.status = 0;
1440 init_completion(&cee_comp.comp);
1441 mutex_lock(&bfad_mutex);
1442 spin_lock_irqsave(&bfad->bfad_lock, flags);
1443 iocmd->status = bfa_cee_get_stats(&bfad->bfa.modules.cee, iocmd_bufptr,
1444 bfad_hcb_comp, &cee_comp);
1445 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1446 if (iocmd->status != BFA_STATUS_OK) {
1447 mutex_unlock(&bfad_mutex);
1448 bfa_trc(bfad, 0x5555);
1449 goto out;
1451 wait_for_completion(&cee_comp.comp);
1452 mutex_unlock(&bfad_mutex);
1453 out:
1454 return 0;
1458 bfad_iocmd_cee_reset_stats(struct bfad_s *bfad, void *cmd)
1460 struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
1461 unsigned long flags;
1463 spin_lock_irqsave(&bfad->bfad_lock, flags);
1464 iocmd->status = bfa_cee_reset_stats(&bfad->bfa.modules.cee, NULL, NULL);
1465 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1466 if (iocmd->status != BFA_STATUS_OK)
1467 bfa_trc(bfad, 0x5555);
1468 return 0;
1472 bfad_iocmd_sfp_media(struct bfad_s *bfad, void *cmd)
1474 struct bfa_bsg_sfp_media_s *iocmd = (struct bfa_bsg_sfp_media_s *)cmd;
1475 struct bfad_hal_comp fcomp;
1476 unsigned long flags;
1478 init_completion(&fcomp.comp);
1479 spin_lock_irqsave(&bfad->bfad_lock, flags);
1480 iocmd->status = bfa_sfp_media(BFA_SFP_MOD(&bfad->bfa), &iocmd->media,
1481 bfad_hcb_comp, &fcomp);
1482 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1483 bfa_trc(bfad, iocmd->status);
1484 if (iocmd->status != BFA_STATUS_SFP_NOT_READY)
1485 goto out;
1487 wait_for_completion(&fcomp.comp);
1488 iocmd->status = fcomp.status;
1489 out:
1490 return 0;
1494 bfad_iocmd_sfp_speed(struct bfad_s *bfad, void *cmd)
1496 struct bfa_bsg_sfp_speed_s *iocmd = (struct bfa_bsg_sfp_speed_s *)cmd;
1497 struct bfad_hal_comp fcomp;
1498 unsigned long flags;
1500 init_completion(&fcomp.comp);
1501 spin_lock_irqsave(&bfad->bfad_lock, flags);
1502 iocmd->status = bfa_sfp_speed(BFA_SFP_MOD(&bfad->bfa), iocmd->speed,
1503 bfad_hcb_comp, &fcomp);
1504 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1505 bfa_trc(bfad, iocmd->status);
1506 if (iocmd->status != BFA_STATUS_SFP_NOT_READY)
1507 goto out;
1508 wait_for_completion(&fcomp.comp);
1509 iocmd->status = fcomp.status;
1510 out:
1511 return 0;
1515 bfad_iocmd_flash_get_attr(struct bfad_s *bfad, void *cmd)
1517 struct bfa_bsg_flash_attr_s *iocmd =
1518 (struct bfa_bsg_flash_attr_s *)cmd;
1519 struct bfad_hal_comp fcomp;
1520 unsigned long flags;
1522 init_completion(&fcomp.comp);
1523 spin_lock_irqsave(&bfad->bfad_lock, flags);
1524 iocmd->status = bfa_flash_get_attr(BFA_FLASH(&bfad->bfa), &iocmd->attr,
1525 bfad_hcb_comp, &fcomp);
1526 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1527 if (iocmd->status != BFA_STATUS_OK)
1528 goto out;
1529 wait_for_completion(&fcomp.comp);
1530 iocmd->status = fcomp.status;
1531 out:
1532 return 0;
1536 bfad_iocmd_flash_erase_part(struct bfad_s *bfad, void *cmd)
1538 struct bfa_bsg_flash_s *iocmd = (struct bfa_bsg_flash_s *)cmd;
1539 struct bfad_hal_comp fcomp;
1540 unsigned long flags;
1542 init_completion(&fcomp.comp);
1543 spin_lock_irqsave(&bfad->bfad_lock, flags);
1544 iocmd->status = bfa_flash_erase_part(BFA_FLASH(&bfad->bfa), iocmd->type,
1545 iocmd->instance, bfad_hcb_comp, &fcomp);
1546 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1547 if (iocmd->status != BFA_STATUS_OK)
1548 goto out;
1549 wait_for_completion(&fcomp.comp);
1550 iocmd->status = fcomp.status;
1551 out:
1552 return 0;
1556 bfad_iocmd_flash_update_part(struct bfad_s *bfad, void *cmd,
1557 unsigned int payload_len)
1559 struct bfa_bsg_flash_s *iocmd = (struct bfa_bsg_flash_s *)cmd;
1560 void *iocmd_bufptr;
1561 struct bfad_hal_comp fcomp;
1562 unsigned long flags;
1564 if (bfad_chk_iocmd_sz(payload_len,
1565 sizeof(struct bfa_bsg_flash_s),
1566 iocmd->bufsz) != BFA_STATUS_OK) {
1567 iocmd->status = BFA_STATUS_VERSION_FAIL;
1568 return 0;
1571 iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_flash_s);
1573 init_completion(&fcomp.comp);
1574 spin_lock_irqsave(&bfad->bfad_lock, flags);
1575 iocmd->status = bfa_flash_update_part(BFA_FLASH(&bfad->bfa),
1576 iocmd->type, iocmd->instance, iocmd_bufptr,
1577 iocmd->bufsz, 0, bfad_hcb_comp, &fcomp);
1578 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1579 if (iocmd->status != BFA_STATUS_OK)
1580 goto out;
1581 wait_for_completion(&fcomp.comp);
1582 iocmd->status = fcomp.status;
1583 out:
1584 return 0;
1588 bfad_iocmd_flash_read_part(struct bfad_s *bfad, void *cmd,
1589 unsigned int payload_len)
1591 struct bfa_bsg_flash_s *iocmd = (struct bfa_bsg_flash_s *)cmd;
1592 struct bfad_hal_comp fcomp;
1593 void *iocmd_bufptr;
1594 unsigned long flags;
1596 if (bfad_chk_iocmd_sz(payload_len,
1597 sizeof(struct bfa_bsg_flash_s),
1598 iocmd->bufsz) != BFA_STATUS_OK) {
1599 iocmd->status = BFA_STATUS_VERSION_FAIL;
1600 return 0;
1603 iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_flash_s);
1605 init_completion(&fcomp.comp);
1606 spin_lock_irqsave(&bfad->bfad_lock, flags);
1607 iocmd->status = bfa_flash_read_part(BFA_FLASH(&bfad->bfa), iocmd->type,
1608 iocmd->instance, iocmd_bufptr, iocmd->bufsz, 0,
1609 bfad_hcb_comp, &fcomp);
1610 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1611 if (iocmd->status != BFA_STATUS_OK)
1612 goto out;
1613 wait_for_completion(&fcomp.comp);
1614 iocmd->status = fcomp.status;
1615 out:
1616 return 0;
1620 bfad_iocmd_diag_temp(struct bfad_s *bfad, void *cmd)
1622 struct bfa_bsg_diag_get_temp_s *iocmd =
1623 (struct bfa_bsg_diag_get_temp_s *)cmd;
1624 struct bfad_hal_comp fcomp;
1625 unsigned long flags;
1627 init_completion(&fcomp.comp);
1628 spin_lock_irqsave(&bfad->bfad_lock, flags);
1629 iocmd->status = bfa_diag_tsensor_query(BFA_DIAG_MOD(&bfad->bfa),
1630 &iocmd->result, bfad_hcb_comp, &fcomp);
1631 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1632 bfa_trc(bfad, iocmd->status);
1633 if (iocmd->status != BFA_STATUS_OK)
1634 goto out;
1635 wait_for_completion(&fcomp.comp);
1636 iocmd->status = fcomp.status;
1637 out:
1638 return 0;
1642 bfad_iocmd_diag_memtest(struct bfad_s *bfad, void *cmd)
1644 struct bfa_bsg_diag_memtest_s *iocmd =
1645 (struct bfa_bsg_diag_memtest_s *)cmd;
1646 struct bfad_hal_comp fcomp;
1647 unsigned long flags;
1649 init_completion(&fcomp.comp);
1650 spin_lock_irqsave(&bfad->bfad_lock, flags);
1651 iocmd->status = bfa_diag_memtest(BFA_DIAG_MOD(&bfad->bfa),
1652 &iocmd->memtest, iocmd->pat,
1653 &iocmd->result, bfad_hcb_comp, &fcomp);
1654 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1655 bfa_trc(bfad, iocmd->status);
1656 if (iocmd->status != BFA_STATUS_OK)
1657 goto out;
1658 wait_for_completion(&fcomp.comp);
1659 iocmd->status = fcomp.status;
1660 out:
1661 return 0;
1665 bfad_iocmd_diag_loopback(struct bfad_s *bfad, void *cmd)
1667 struct bfa_bsg_diag_loopback_s *iocmd =
1668 (struct bfa_bsg_diag_loopback_s *)cmd;
1669 struct bfad_hal_comp fcomp;
1670 unsigned long flags;
1672 init_completion(&fcomp.comp);
1673 spin_lock_irqsave(&bfad->bfad_lock, flags);
1674 iocmd->status = bfa_fcdiag_loopback(&bfad->bfa, iocmd->opmode,
1675 iocmd->speed, iocmd->lpcnt, iocmd->pat,
1676 &iocmd->result, bfad_hcb_comp, &fcomp);
1677 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1678 bfa_trc(bfad, iocmd->status);
1679 if (iocmd->status != BFA_STATUS_OK)
1680 goto out;
1681 wait_for_completion(&fcomp.comp);
1682 iocmd->status = fcomp.status;
1683 out:
1684 return 0;
1688 bfad_iocmd_diag_fwping(struct bfad_s *bfad, void *cmd)
1690 struct bfa_bsg_diag_fwping_s *iocmd =
1691 (struct bfa_bsg_diag_fwping_s *)cmd;
1692 struct bfad_hal_comp fcomp;
1693 unsigned long flags;
1695 init_completion(&fcomp.comp);
1696 spin_lock_irqsave(&bfad->bfad_lock, flags);
1697 iocmd->status = bfa_diag_fwping(BFA_DIAG_MOD(&bfad->bfa), iocmd->cnt,
1698 iocmd->pattern, &iocmd->result,
1699 bfad_hcb_comp, &fcomp);
1700 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1701 bfa_trc(bfad, iocmd->status);
1702 if (iocmd->status != BFA_STATUS_OK)
1703 goto out;
1704 bfa_trc(bfad, 0x77771);
1705 wait_for_completion(&fcomp.comp);
1706 iocmd->status = fcomp.status;
1707 out:
1708 return 0;
1712 bfad_iocmd_diag_queuetest(struct bfad_s *bfad, void *cmd)
1714 struct bfa_bsg_diag_qtest_s *iocmd = (struct bfa_bsg_diag_qtest_s *)cmd;
1715 struct bfad_hal_comp fcomp;
1716 unsigned long flags;
1718 init_completion(&fcomp.comp);
1719 spin_lock_irqsave(&bfad->bfad_lock, flags);
1720 iocmd->status = bfa_fcdiag_queuetest(&bfad->bfa, iocmd->force,
1721 iocmd->queue, &iocmd->result,
1722 bfad_hcb_comp, &fcomp);
1723 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1724 if (iocmd->status != BFA_STATUS_OK)
1725 goto out;
1726 wait_for_completion(&fcomp.comp);
1727 iocmd->status = fcomp.status;
1728 out:
1729 return 0;
1733 bfad_iocmd_diag_sfp(struct bfad_s *bfad, void *cmd)
1735 struct bfa_bsg_sfp_show_s *iocmd =
1736 (struct bfa_bsg_sfp_show_s *)cmd;
1737 struct bfad_hal_comp fcomp;
1738 unsigned long flags;
1740 init_completion(&fcomp.comp);
1741 spin_lock_irqsave(&bfad->bfad_lock, flags);
1742 iocmd->status = bfa_sfp_show(BFA_SFP_MOD(&bfad->bfa), &iocmd->sfp,
1743 bfad_hcb_comp, &fcomp);
1744 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1745 bfa_trc(bfad, iocmd->status);
1746 if (iocmd->status != BFA_STATUS_OK)
1747 goto out;
1748 wait_for_completion(&fcomp.comp);
1749 iocmd->status = fcomp.status;
1750 bfa_trc(bfad, iocmd->status);
1751 out:
1752 return 0;
1756 bfad_iocmd_diag_led(struct bfad_s *bfad, void *cmd)
1758 struct bfa_bsg_diag_led_s *iocmd = (struct bfa_bsg_diag_led_s *)cmd;
1759 unsigned long flags;
1761 spin_lock_irqsave(&bfad->bfad_lock, flags);
1762 iocmd->status = bfa_diag_ledtest(BFA_DIAG_MOD(&bfad->bfa),
1763 &iocmd->ledtest);
1764 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1765 return 0;
1769 bfad_iocmd_diag_beacon_lport(struct bfad_s *bfad, void *cmd)
1771 struct bfa_bsg_diag_beacon_s *iocmd =
1772 (struct bfa_bsg_diag_beacon_s *)cmd;
1773 unsigned long flags;
1775 spin_lock_irqsave(&bfad->bfad_lock, flags);
1776 iocmd->status = bfa_diag_beacon_port(BFA_DIAG_MOD(&bfad->bfa),
1777 iocmd->beacon, iocmd->link_e2e_beacon,
1778 iocmd->second);
1779 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1780 return 0;
1784 bfad_iocmd_diag_lb_stat(struct bfad_s *bfad, void *cmd)
1786 struct bfa_bsg_diag_lb_stat_s *iocmd =
1787 (struct bfa_bsg_diag_lb_stat_s *)cmd;
1788 unsigned long flags;
1790 spin_lock_irqsave(&bfad->bfad_lock, flags);
1791 iocmd->status = bfa_fcdiag_lb_is_running(&bfad->bfa);
1792 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1793 bfa_trc(bfad, iocmd->status);
1795 return 0;
1799 bfad_iocmd_diag_dport_enable(struct bfad_s *bfad, void *pcmd)
1801 struct bfa_bsg_dport_enable_s *iocmd =
1802 (struct bfa_bsg_dport_enable_s *)pcmd;
1803 unsigned long flags;
1804 struct bfad_hal_comp fcomp;
1806 init_completion(&fcomp.comp);
1807 spin_lock_irqsave(&bfad->bfad_lock, flags);
1808 iocmd->status = bfa_dport_enable(&bfad->bfa, iocmd->lpcnt,
1809 iocmd->pat, bfad_hcb_comp, &fcomp);
1810 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1811 if (iocmd->status != BFA_STATUS_OK)
1812 bfa_trc(bfad, iocmd->status);
1813 else {
1814 wait_for_completion(&fcomp.comp);
1815 iocmd->status = fcomp.status;
1817 return 0;
1821 bfad_iocmd_diag_dport_disable(struct bfad_s *bfad, void *pcmd)
1823 struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)pcmd;
1824 unsigned long flags;
1825 struct bfad_hal_comp fcomp;
1827 init_completion(&fcomp.comp);
1828 spin_lock_irqsave(&bfad->bfad_lock, flags);
1829 iocmd->status = bfa_dport_disable(&bfad->bfa, bfad_hcb_comp, &fcomp);
1830 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1831 if (iocmd->status != BFA_STATUS_OK)
1832 bfa_trc(bfad, iocmd->status);
1833 else {
1834 wait_for_completion(&fcomp.comp);
1835 iocmd->status = fcomp.status;
1837 return 0;
1841 bfad_iocmd_diag_dport_start(struct bfad_s *bfad, void *pcmd)
1843 struct bfa_bsg_dport_enable_s *iocmd =
1844 (struct bfa_bsg_dport_enable_s *)pcmd;
1845 unsigned long flags;
1846 struct bfad_hal_comp fcomp;
1848 init_completion(&fcomp.comp);
1849 spin_lock_irqsave(&bfad->bfad_lock, flags);
1850 iocmd->status = bfa_dport_start(&bfad->bfa, iocmd->lpcnt,
1851 iocmd->pat, bfad_hcb_comp,
1852 &fcomp);
1853 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1855 if (iocmd->status != BFA_STATUS_OK) {
1856 bfa_trc(bfad, iocmd->status);
1857 } else {
1858 wait_for_completion(&fcomp.comp);
1859 iocmd->status = fcomp.status;
1862 return 0;
1866 bfad_iocmd_diag_dport_show(struct bfad_s *bfad, void *pcmd)
1868 struct bfa_bsg_diag_dport_show_s *iocmd =
1869 (struct bfa_bsg_diag_dport_show_s *)pcmd;
1870 unsigned long flags;
1872 spin_lock_irqsave(&bfad->bfad_lock, flags);
1873 iocmd->status = bfa_dport_show(&bfad->bfa, &iocmd->result);
1874 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1876 return 0;
1881 bfad_iocmd_phy_get_attr(struct bfad_s *bfad, void *cmd)
1883 struct bfa_bsg_phy_attr_s *iocmd =
1884 (struct bfa_bsg_phy_attr_s *)cmd;
1885 struct bfad_hal_comp fcomp;
1886 unsigned long flags;
1888 init_completion(&fcomp.comp);
1889 spin_lock_irqsave(&bfad->bfad_lock, flags);
1890 iocmd->status = bfa_phy_get_attr(BFA_PHY(&bfad->bfa), iocmd->instance,
1891 &iocmd->attr, bfad_hcb_comp, &fcomp);
1892 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1893 if (iocmd->status != BFA_STATUS_OK)
1894 goto out;
1895 wait_for_completion(&fcomp.comp);
1896 iocmd->status = fcomp.status;
1897 out:
1898 return 0;
1902 bfad_iocmd_phy_get_stats(struct bfad_s *bfad, void *cmd)
1904 struct bfa_bsg_phy_stats_s *iocmd =
1905 (struct bfa_bsg_phy_stats_s *)cmd;
1906 struct bfad_hal_comp fcomp;
1907 unsigned long flags;
1909 init_completion(&fcomp.comp);
1910 spin_lock_irqsave(&bfad->bfad_lock, flags);
1911 iocmd->status = bfa_phy_get_stats(BFA_PHY(&bfad->bfa), iocmd->instance,
1912 &iocmd->stats, bfad_hcb_comp, &fcomp);
1913 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1914 if (iocmd->status != BFA_STATUS_OK)
1915 goto out;
1916 wait_for_completion(&fcomp.comp);
1917 iocmd->status = fcomp.status;
1918 out:
1919 return 0;
1923 bfad_iocmd_phy_read(struct bfad_s *bfad, void *cmd, unsigned int payload_len)
1925 struct bfa_bsg_phy_s *iocmd = (struct bfa_bsg_phy_s *)cmd;
1926 struct bfad_hal_comp fcomp;
1927 void *iocmd_bufptr;
1928 unsigned long flags;
1930 if (bfad_chk_iocmd_sz(payload_len,
1931 sizeof(struct bfa_bsg_phy_s),
1932 iocmd->bufsz) != BFA_STATUS_OK) {
1933 iocmd->status = BFA_STATUS_VERSION_FAIL;
1934 return 0;
1937 iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_phy_s);
1938 init_completion(&fcomp.comp);
1939 spin_lock_irqsave(&bfad->bfad_lock, flags);
1940 iocmd->status = bfa_phy_read(BFA_PHY(&bfad->bfa),
1941 iocmd->instance, iocmd_bufptr, iocmd->bufsz,
1942 0, bfad_hcb_comp, &fcomp);
1943 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1944 if (iocmd->status != BFA_STATUS_OK)
1945 goto out;
1946 wait_for_completion(&fcomp.comp);
1947 iocmd->status = fcomp.status;
1948 if (iocmd->status != BFA_STATUS_OK)
1949 goto out;
1950 out:
1951 return 0;
1955 bfad_iocmd_vhba_query(struct bfad_s *bfad, void *cmd)
1957 struct bfa_bsg_vhba_attr_s *iocmd =
1958 (struct bfa_bsg_vhba_attr_s *)cmd;
1959 struct bfa_vhba_attr_s *attr = &iocmd->attr;
1960 unsigned long flags;
1962 spin_lock_irqsave(&bfad->bfad_lock, flags);
1963 attr->pwwn = bfad->bfa.ioc.attr->pwwn;
1964 attr->nwwn = bfad->bfa.ioc.attr->nwwn;
1965 attr->plog_enabled = (bfa_boolean_t)bfad->bfa.plog->plog_enabled;
1966 attr->io_profile = bfa_fcpim_get_io_profile(&bfad->bfa);
1967 attr->path_tov = bfa_fcpim_path_tov_get(&bfad->bfa);
1968 iocmd->status = BFA_STATUS_OK;
1969 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1970 return 0;
1974 bfad_iocmd_phy_update(struct bfad_s *bfad, void *cmd, unsigned int payload_len)
1976 struct bfa_bsg_phy_s *iocmd = (struct bfa_bsg_phy_s *)cmd;
1977 void *iocmd_bufptr;
1978 struct bfad_hal_comp fcomp;
1979 unsigned long flags;
1981 if (bfad_chk_iocmd_sz(payload_len,
1982 sizeof(struct bfa_bsg_phy_s),
1983 iocmd->bufsz) != BFA_STATUS_OK) {
1984 iocmd->status = BFA_STATUS_VERSION_FAIL;
1985 return 0;
1988 iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_phy_s);
1989 init_completion(&fcomp.comp);
1990 spin_lock_irqsave(&bfad->bfad_lock, flags);
1991 iocmd->status = bfa_phy_update(BFA_PHY(&bfad->bfa),
1992 iocmd->instance, iocmd_bufptr, iocmd->bufsz,
1993 0, bfad_hcb_comp, &fcomp);
1994 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1995 if (iocmd->status != BFA_STATUS_OK)
1996 goto out;
1997 wait_for_completion(&fcomp.comp);
1998 iocmd->status = fcomp.status;
1999 out:
2000 return 0;
2004 bfad_iocmd_porglog_get(struct bfad_s *bfad, void *cmd)
2006 struct bfa_bsg_debug_s *iocmd = (struct bfa_bsg_debug_s *)cmd;
2007 void *iocmd_bufptr;
2009 if (iocmd->bufsz < sizeof(struct bfa_plog_s)) {
2010 bfa_trc(bfad, sizeof(struct bfa_plog_s));
2011 iocmd->status = BFA_STATUS_EINVAL;
2012 goto out;
2015 iocmd->status = BFA_STATUS_OK;
2016 iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_debug_s);
2017 memcpy(iocmd_bufptr, (u8 *) &bfad->plog_buf, sizeof(struct bfa_plog_s));
2018 out:
2019 return 0;
2022 #define BFA_DEBUG_FW_CORE_CHUNK_SZ 0x4000U /* 16K chunks for FW dump */
2024 bfad_iocmd_debug_fw_core(struct bfad_s *bfad, void *cmd,
2025 unsigned int payload_len)
2027 struct bfa_bsg_debug_s *iocmd = (struct bfa_bsg_debug_s *)cmd;
2028 void *iocmd_bufptr;
2029 unsigned long flags;
2030 u32 offset;
2032 if (bfad_chk_iocmd_sz(payload_len, sizeof(struct bfa_bsg_debug_s),
2033 BFA_DEBUG_FW_CORE_CHUNK_SZ) != BFA_STATUS_OK) {
2034 iocmd->status = BFA_STATUS_VERSION_FAIL;
2035 return 0;
2038 if (iocmd->bufsz < BFA_DEBUG_FW_CORE_CHUNK_SZ ||
2039 !IS_ALIGNED(iocmd->bufsz, sizeof(u16)) ||
2040 !IS_ALIGNED(iocmd->offset, sizeof(u32))) {
2041 bfa_trc(bfad, BFA_DEBUG_FW_CORE_CHUNK_SZ);
2042 iocmd->status = BFA_STATUS_EINVAL;
2043 goto out;
2046 iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_debug_s);
2047 spin_lock_irqsave(&bfad->bfad_lock, flags);
2048 offset = iocmd->offset;
2049 iocmd->status = bfa_ioc_debug_fwcore(&bfad->bfa.ioc, iocmd_bufptr,
2050 &offset, &iocmd->bufsz);
2051 iocmd->offset = offset;
2052 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2053 out:
2054 return 0;
2058 bfad_iocmd_debug_ctl(struct bfad_s *bfad, void *cmd, unsigned int v_cmd)
2060 struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
2061 unsigned long flags;
2063 if (v_cmd == IOCMD_DEBUG_FW_STATE_CLR) {
2064 spin_lock_irqsave(&bfad->bfad_lock, flags);
2065 bfad->bfa.ioc.dbg_fwsave_once = BFA_TRUE;
2066 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2067 } else if (v_cmd == IOCMD_DEBUG_PORTLOG_CLR)
2068 bfad->plog_buf.head = bfad->plog_buf.tail = 0;
2069 else if (v_cmd == IOCMD_DEBUG_START_DTRC)
2070 bfa_trc_init(bfad->trcmod);
2071 else if (v_cmd == IOCMD_DEBUG_STOP_DTRC)
2072 bfa_trc_stop(bfad->trcmod);
2074 iocmd->status = BFA_STATUS_OK;
2075 return 0;
2079 bfad_iocmd_porglog_ctl(struct bfad_s *bfad, void *cmd)
2081 struct bfa_bsg_portlogctl_s *iocmd = (struct bfa_bsg_portlogctl_s *)cmd;
2083 if (iocmd->ctl == BFA_TRUE)
2084 bfad->plog_buf.plog_enabled = 1;
2085 else
2086 bfad->plog_buf.plog_enabled = 0;
2088 iocmd->status = BFA_STATUS_OK;
2089 return 0;
2093 bfad_iocmd_fcpim_cfg_profile(struct bfad_s *bfad, void *cmd, unsigned int v_cmd)
2095 struct bfa_bsg_fcpim_profile_s *iocmd =
2096 (struct bfa_bsg_fcpim_profile_s *)cmd;
2097 unsigned long flags;
2099 spin_lock_irqsave(&bfad->bfad_lock, flags);
2100 if (v_cmd == IOCMD_FCPIM_PROFILE_ON)
2101 iocmd->status = bfa_fcpim_profile_on(&bfad->bfa, ktime_get_real_seconds());
2102 else if (v_cmd == IOCMD_FCPIM_PROFILE_OFF)
2103 iocmd->status = bfa_fcpim_profile_off(&bfad->bfa);
2104 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2106 return 0;
2109 static int
2110 bfad_iocmd_itnim_get_ioprofile(struct bfad_s *bfad, void *cmd)
2112 struct bfa_bsg_itnim_ioprofile_s *iocmd =
2113 (struct bfa_bsg_itnim_ioprofile_s *)cmd;
2114 struct bfa_fcs_lport_s *fcs_port;
2115 struct bfa_fcs_itnim_s *itnim;
2116 unsigned long flags;
2118 spin_lock_irqsave(&bfad->bfad_lock, flags);
2119 fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
2120 iocmd->vf_id, iocmd->lpwwn);
2121 if (!fcs_port)
2122 iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
2123 else {
2124 itnim = bfa_fcs_itnim_lookup(fcs_port, iocmd->rpwwn);
2125 if (itnim == NULL)
2126 iocmd->status = BFA_STATUS_UNKNOWN_RWWN;
2127 else
2128 iocmd->status = bfa_itnim_get_ioprofile(
2129 bfa_fcs_itnim_get_halitn(itnim),
2130 &iocmd->ioprofile);
2132 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2133 return 0;
2137 bfad_iocmd_fcport_get_stats(struct bfad_s *bfad, void *cmd)
2139 struct bfa_bsg_fcport_stats_s *iocmd =
2140 (struct bfa_bsg_fcport_stats_s *)cmd;
2141 struct bfad_hal_comp fcomp;
2142 unsigned long flags;
2143 struct bfa_cb_pending_q_s cb_qe;
2145 init_completion(&fcomp.comp);
2146 bfa_pending_q_init(&cb_qe, (bfa_cb_cbfn_t)bfad_hcb_comp,
2147 &fcomp, &iocmd->stats);
2148 spin_lock_irqsave(&bfad->bfad_lock, flags);
2149 iocmd->status = bfa_fcport_get_stats(&bfad->bfa, &cb_qe);
2150 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2151 if (iocmd->status != BFA_STATUS_OK) {
2152 bfa_trc(bfad, iocmd->status);
2153 goto out;
2155 wait_for_completion(&fcomp.comp);
2156 iocmd->status = fcomp.status;
2157 out:
2158 return 0;
2162 bfad_iocmd_fcport_reset_stats(struct bfad_s *bfad, void *cmd)
2164 struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
2165 struct bfad_hal_comp fcomp;
2166 unsigned long flags;
2167 struct bfa_cb_pending_q_s cb_qe;
2169 init_completion(&fcomp.comp);
2170 bfa_pending_q_init(&cb_qe, (bfa_cb_cbfn_t)bfad_hcb_comp, &fcomp, NULL);
2172 spin_lock_irqsave(&bfad->bfad_lock, flags);
2173 iocmd->status = bfa_fcport_clear_stats(&bfad->bfa, &cb_qe);
2174 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2175 if (iocmd->status != BFA_STATUS_OK) {
2176 bfa_trc(bfad, iocmd->status);
2177 goto out;
2179 wait_for_completion(&fcomp.comp);
2180 iocmd->status = fcomp.status;
2181 out:
2182 return 0;
2186 bfad_iocmd_boot_cfg(struct bfad_s *bfad, void *cmd)
2188 struct bfa_bsg_boot_s *iocmd = (struct bfa_bsg_boot_s *)cmd;
2189 struct bfad_hal_comp fcomp;
2190 unsigned long flags;
2192 init_completion(&fcomp.comp);
2193 spin_lock_irqsave(&bfad->bfad_lock, flags);
2194 iocmd->status = bfa_flash_update_part(BFA_FLASH(&bfad->bfa),
2195 BFA_FLASH_PART_BOOT, bfad->bfa.ioc.port_id,
2196 &iocmd->cfg, sizeof(struct bfa_boot_cfg_s), 0,
2197 bfad_hcb_comp, &fcomp);
2198 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2199 if (iocmd->status != BFA_STATUS_OK)
2200 goto out;
2201 wait_for_completion(&fcomp.comp);
2202 iocmd->status = fcomp.status;
2203 out:
2204 return 0;
2208 bfad_iocmd_boot_query(struct bfad_s *bfad, void *cmd)
2210 struct bfa_bsg_boot_s *iocmd = (struct bfa_bsg_boot_s *)cmd;
2211 struct bfad_hal_comp fcomp;
2212 unsigned long flags;
2214 init_completion(&fcomp.comp);
2215 spin_lock_irqsave(&bfad->bfad_lock, flags);
2216 iocmd->status = bfa_flash_read_part(BFA_FLASH(&bfad->bfa),
2217 BFA_FLASH_PART_BOOT, bfad->bfa.ioc.port_id,
2218 &iocmd->cfg, sizeof(struct bfa_boot_cfg_s), 0,
2219 bfad_hcb_comp, &fcomp);
2220 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2221 if (iocmd->status != BFA_STATUS_OK)
2222 goto out;
2223 wait_for_completion(&fcomp.comp);
2224 iocmd->status = fcomp.status;
2225 out:
2226 return 0;
2230 bfad_iocmd_preboot_query(struct bfad_s *bfad, void *cmd)
2232 struct bfa_bsg_preboot_s *iocmd = (struct bfa_bsg_preboot_s *)cmd;
2233 struct bfi_iocfc_cfgrsp_s *cfgrsp = bfad->bfa.iocfc.cfgrsp;
2234 struct bfa_boot_pbc_s *pbcfg = &iocmd->cfg;
2235 unsigned long flags;
2237 spin_lock_irqsave(&bfad->bfad_lock, flags);
2238 pbcfg->enable = cfgrsp->pbc_cfg.boot_enabled;
2239 pbcfg->nbluns = cfgrsp->pbc_cfg.nbluns;
2240 pbcfg->speed = cfgrsp->pbc_cfg.port_speed;
2241 memcpy(pbcfg->pblun, cfgrsp->pbc_cfg.blun, sizeof(pbcfg->pblun));
2242 iocmd->status = BFA_STATUS_OK;
2243 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2245 return 0;
2249 bfad_iocmd_ethboot_cfg(struct bfad_s *bfad, void *cmd)
2251 struct bfa_bsg_ethboot_s *iocmd = (struct bfa_bsg_ethboot_s *)cmd;
2252 struct bfad_hal_comp fcomp;
2253 unsigned long flags;
2255 init_completion(&fcomp.comp);
2256 spin_lock_irqsave(&bfad->bfad_lock, flags);
2257 iocmd->status = bfa_flash_update_part(BFA_FLASH(&bfad->bfa),
2258 BFA_FLASH_PART_PXECFG,
2259 bfad->bfa.ioc.port_id, &iocmd->cfg,
2260 sizeof(struct bfa_ethboot_cfg_s), 0,
2261 bfad_hcb_comp, &fcomp);
2262 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2263 if (iocmd->status != BFA_STATUS_OK)
2264 goto out;
2265 wait_for_completion(&fcomp.comp);
2266 iocmd->status = fcomp.status;
2267 out:
2268 return 0;
2272 bfad_iocmd_ethboot_query(struct bfad_s *bfad, void *cmd)
2274 struct bfa_bsg_ethboot_s *iocmd = (struct bfa_bsg_ethboot_s *)cmd;
2275 struct bfad_hal_comp fcomp;
2276 unsigned long flags;
2278 init_completion(&fcomp.comp);
2279 spin_lock_irqsave(&bfad->bfad_lock, flags);
2280 iocmd->status = bfa_flash_read_part(BFA_FLASH(&bfad->bfa),
2281 BFA_FLASH_PART_PXECFG,
2282 bfad->bfa.ioc.port_id, &iocmd->cfg,
2283 sizeof(struct bfa_ethboot_cfg_s), 0,
2284 bfad_hcb_comp, &fcomp);
2285 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2286 if (iocmd->status != BFA_STATUS_OK)
2287 goto out;
2288 wait_for_completion(&fcomp.comp);
2289 iocmd->status = fcomp.status;
2290 out:
2291 return 0;
2295 bfad_iocmd_cfg_trunk(struct bfad_s *bfad, void *cmd, unsigned int v_cmd)
2297 struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
2298 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa);
2299 struct bfa_fcport_trunk_s *trunk = &fcport->trunk;
2300 unsigned long flags;
2302 spin_lock_irqsave(&bfad->bfad_lock, flags);
2304 if (bfa_fcport_is_dport(&bfad->bfa)) {
2305 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2306 return BFA_STATUS_DPORT_ERR;
2309 if ((fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP) ||
2310 (fcport->topology == BFA_PORT_TOPOLOGY_LOOP))
2311 iocmd->status = BFA_STATUS_TOPOLOGY_LOOP;
2312 else {
2313 if (v_cmd == IOCMD_TRUNK_ENABLE) {
2314 trunk->attr.state = BFA_TRUNK_OFFLINE;
2315 bfa_fcport_disable(&bfad->bfa);
2316 fcport->cfg.trunked = BFA_TRUE;
2317 } else if (v_cmd == IOCMD_TRUNK_DISABLE) {
2318 trunk->attr.state = BFA_TRUNK_DISABLED;
2319 bfa_fcport_disable(&bfad->bfa);
2320 fcport->cfg.trunked = BFA_FALSE;
2323 if (!bfa_fcport_is_disabled(&bfad->bfa))
2324 bfa_fcport_enable(&bfad->bfa);
2326 iocmd->status = BFA_STATUS_OK;
2329 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2331 return 0;
2335 bfad_iocmd_trunk_get_attr(struct bfad_s *bfad, void *cmd)
2337 struct bfa_bsg_trunk_attr_s *iocmd = (struct bfa_bsg_trunk_attr_s *)cmd;
2338 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa);
2339 struct bfa_fcport_trunk_s *trunk = &fcport->trunk;
2340 unsigned long flags;
2342 spin_lock_irqsave(&bfad->bfad_lock, flags);
2343 if ((fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP) ||
2344 (fcport->topology == BFA_PORT_TOPOLOGY_LOOP))
2345 iocmd->status = BFA_STATUS_TOPOLOGY_LOOP;
2346 else {
2347 memcpy((void *)&iocmd->attr, (void *)&trunk->attr,
2348 sizeof(struct bfa_trunk_attr_s));
2349 iocmd->attr.port_id = bfa_lps_get_base_pid(&bfad->bfa);
2350 iocmd->status = BFA_STATUS_OK;
2352 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2354 return 0;
2358 bfad_iocmd_qos(struct bfad_s *bfad, void *cmd, unsigned int v_cmd)
2360 struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
2361 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa);
2362 unsigned long flags;
2364 spin_lock_irqsave(&bfad->bfad_lock, flags);
2365 if (bfa_ioc_get_type(&bfad->bfa.ioc) == BFA_IOC_TYPE_FC) {
2366 if ((fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP) &&
2367 (fcport->topology == BFA_PORT_TOPOLOGY_LOOP))
2368 iocmd->status = BFA_STATUS_TOPOLOGY_LOOP;
2369 else {
2370 if (v_cmd == IOCMD_QOS_ENABLE)
2371 fcport->cfg.qos_enabled = BFA_TRUE;
2372 else if (v_cmd == IOCMD_QOS_DISABLE) {
2373 fcport->cfg.qos_enabled = BFA_FALSE;
2374 fcport->cfg.qos_bw.high = BFA_QOS_BW_HIGH;
2375 fcport->cfg.qos_bw.med = BFA_QOS_BW_MED;
2376 fcport->cfg.qos_bw.low = BFA_QOS_BW_LOW;
2380 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2382 return 0;
2386 bfad_iocmd_qos_get_attr(struct bfad_s *bfad, void *cmd)
2388 struct bfa_bsg_qos_attr_s *iocmd = (struct bfa_bsg_qos_attr_s *)cmd;
2389 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa);
2390 unsigned long flags;
2392 spin_lock_irqsave(&bfad->bfad_lock, flags);
2393 if ((fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP) &&
2394 (fcport->topology == BFA_PORT_TOPOLOGY_LOOP))
2395 iocmd->status = BFA_STATUS_TOPOLOGY_LOOP;
2396 else {
2397 iocmd->attr.state = fcport->qos_attr.state;
2398 iocmd->attr.total_bb_cr =
2399 be32_to_cpu(fcport->qos_attr.total_bb_cr);
2400 iocmd->attr.qos_bw.high = fcport->cfg.qos_bw.high;
2401 iocmd->attr.qos_bw.med = fcport->cfg.qos_bw.med;
2402 iocmd->attr.qos_bw.low = fcport->cfg.qos_bw.low;
2403 iocmd->attr.qos_bw_op = fcport->qos_attr.qos_bw_op;
2404 iocmd->status = BFA_STATUS_OK;
2406 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2408 return 0;
2412 bfad_iocmd_qos_get_vc_attr(struct bfad_s *bfad, void *cmd)
2414 struct bfa_bsg_qos_vc_attr_s *iocmd =
2415 (struct bfa_bsg_qos_vc_attr_s *)cmd;
2416 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa);
2417 struct bfa_qos_vc_attr_s *bfa_vc_attr = &fcport->qos_vc_attr;
2418 unsigned long flags;
2419 u32 i = 0;
2421 spin_lock_irqsave(&bfad->bfad_lock, flags);
2422 iocmd->attr.total_vc_count = be16_to_cpu(bfa_vc_attr->total_vc_count);
2423 iocmd->attr.shared_credit = be16_to_cpu(bfa_vc_attr->shared_credit);
2424 iocmd->attr.elp_opmode_flags =
2425 be32_to_cpu(bfa_vc_attr->elp_opmode_flags);
2427 /* Individual VC info */
2428 while (i < iocmd->attr.total_vc_count) {
2429 iocmd->attr.vc_info[i].vc_credit =
2430 bfa_vc_attr->vc_info[i].vc_credit;
2431 iocmd->attr.vc_info[i].borrow_credit =
2432 bfa_vc_attr->vc_info[i].borrow_credit;
2433 iocmd->attr.vc_info[i].priority =
2434 bfa_vc_attr->vc_info[i].priority;
2435 i++;
2437 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2439 iocmd->status = BFA_STATUS_OK;
2440 return 0;
2444 bfad_iocmd_qos_get_stats(struct bfad_s *bfad, void *cmd)
2446 struct bfa_bsg_fcport_stats_s *iocmd =
2447 (struct bfa_bsg_fcport_stats_s *)cmd;
2448 struct bfad_hal_comp fcomp;
2449 unsigned long flags;
2450 struct bfa_cb_pending_q_s cb_qe;
2451 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa);
2453 init_completion(&fcomp.comp);
2454 bfa_pending_q_init(&cb_qe, (bfa_cb_cbfn_t)bfad_hcb_comp,
2455 &fcomp, &iocmd->stats);
2457 spin_lock_irqsave(&bfad->bfad_lock, flags);
2458 WARN_ON(!bfa_ioc_get_fcmode(&bfad->bfa.ioc));
2459 if ((fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP) &&
2460 (fcport->topology == BFA_PORT_TOPOLOGY_LOOP))
2461 iocmd->status = BFA_STATUS_TOPOLOGY_LOOP;
2462 else
2463 iocmd->status = bfa_fcport_get_stats(&bfad->bfa, &cb_qe);
2464 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2465 if (iocmd->status != BFA_STATUS_OK) {
2466 bfa_trc(bfad, iocmd->status);
2467 goto out;
2469 wait_for_completion(&fcomp.comp);
2470 iocmd->status = fcomp.status;
2471 out:
2472 return 0;
2476 bfad_iocmd_qos_reset_stats(struct bfad_s *bfad, void *cmd)
2478 struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
2479 struct bfad_hal_comp fcomp;
2480 unsigned long flags;
2481 struct bfa_cb_pending_q_s cb_qe;
2482 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa);
2484 init_completion(&fcomp.comp);
2485 bfa_pending_q_init(&cb_qe, (bfa_cb_cbfn_t)bfad_hcb_comp,
2486 &fcomp, NULL);
2488 spin_lock_irqsave(&bfad->bfad_lock, flags);
2489 WARN_ON(!bfa_ioc_get_fcmode(&bfad->bfa.ioc));
2490 if ((fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP) &&
2491 (fcport->topology == BFA_PORT_TOPOLOGY_LOOP))
2492 iocmd->status = BFA_STATUS_TOPOLOGY_LOOP;
2493 else
2494 iocmd->status = bfa_fcport_clear_stats(&bfad->bfa, &cb_qe);
2495 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2496 if (iocmd->status != BFA_STATUS_OK) {
2497 bfa_trc(bfad, iocmd->status);
2498 goto out;
2500 wait_for_completion(&fcomp.comp);
2501 iocmd->status = fcomp.status;
2502 out:
2503 return 0;
2507 bfad_iocmd_vf_get_stats(struct bfad_s *bfad, void *cmd)
2509 struct bfa_bsg_vf_stats_s *iocmd =
2510 (struct bfa_bsg_vf_stats_s *)cmd;
2511 struct bfa_fcs_fabric_s *fcs_vf;
2512 unsigned long flags;
2514 spin_lock_irqsave(&bfad->bfad_lock, flags);
2515 fcs_vf = bfa_fcs_vf_lookup(&bfad->bfa_fcs, iocmd->vf_id);
2516 if (fcs_vf == NULL) {
2517 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2518 iocmd->status = BFA_STATUS_UNKNOWN_VFID;
2519 goto out;
2521 memcpy((void *)&iocmd->stats, (void *)&fcs_vf->stats,
2522 sizeof(struct bfa_vf_stats_s));
2523 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2524 iocmd->status = BFA_STATUS_OK;
2525 out:
2526 return 0;
2530 bfad_iocmd_vf_clr_stats(struct bfad_s *bfad, void *cmd)
2532 struct bfa_bsg_vf_reset_stats_s *iocmd =
2533 (struct bfa_bsg_vf_reset_stats_s *)cmd;
2534 struct bfa_fcs_fabric_s *fcs_vf;
2535 unsigned long flags;
2537 spin_lock_irqsave(&bfad->bfad_lock, flags);
2538 fcs_vf = bfa_fcs_vf_lookup(&bfad->bfa_fcs, iocmd->vf_id);
2539 if (fcs_vf == NULL) {
2540 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2541 iocmd->status = BFA_STATUS_UNKNOWN_VFID;
2542 goto out;
2544 memset((void *)&fcs_vf->stats, 0, sizeof(struct bfa_vf_stats_s));
2545 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2546 iocmd->status = BFA_STATUS_OK;
2547 out:
2548 return 0;
2551 /* Function to reset the LUN SCAN mode */
2552 static void
2553 bfad_iocmd_lunmask_reset_lunscan_mode(struct bfad_s *bfad, int lunmask_cfg)
2555 struct bfad_im_port_s *pport_im = bfad->pport.im_port;
2556 struct bfad_vport_s *vport = NULL;
2558 /* Set the scsi device LUN SCAN flags for base port */
2559 bfad_reset_sdev_bflags(pport_im, lunmask_cfg);
2561 /* Set the scsi device LUN SCAN flags for the vports */
2562 list_for_each_entry(vport, &bfad->vport_list, list_entry)
2563 bfad_reset_sdev_bflags(vport->drv_port.im_port, lunmask_cfg);
2567 bfad_iocmd_lunmask(struct bfad_s *bfad, void *pcmd, unsigned int v_cmd)
2569 struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)pcmd;
2570 unsigned long flags;
2572 spin_lock_irqsave(&bfad->bfad_lock, flags);
2573 if (v_cmd == IOCMD_FCPIM_LUNMASK_ENABLE) {
2574 iocmd->status = bfa_fcpim_lunmask_update(&bfad->bfa, BFA_TRUE);
2575 /* Set the LUN Scanning mode to be Sequential scan */
2576 if (iocmd->status == BFA_STATUS_OK)
2577 bfad_iocmd_lunmask_reset_lunscan_mode(bfad, BFA_TRUE);
2578 } else if (v_cmd == IOCMD_FCPIM_LUNMASK_DISABLE) {
2579 iocmd->status = bfa_fcpim_lunmask_update(&bfad->bfa, BFA_FALSE);
2580 /* Set the LUN Scanning mode to default REPORT_LUNS scan */
2581 if (iocmd->status == BFA_STATUS_OK)
2582 bfad_iocmd_lunmask_reset_lunscan_mode(bfad, BFA_FALSE);
2583 } else if (v_cmd == IOCMD_FCPIM_LUNMASK_CLEAR)
2584 iocmd->status = bfa_fcpim_lunmask_clear(&bfad->bfa);
2585 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2586 return 0;
2590 bfad_iocmd_fcpim_lunmask_query(struct bfad_s *bfad, void *cmd)
2592 struct bfa_bsg_fcpim_lunmask_query_s *iocmd =
2593 (struct bfa_bsg_fcpim_lunmask_query_s *)cmd;
2594 struct bfa_lunmask_cfg_s *lun_mask = &iocmd->lun_mask;
2595 unsigned long flags;
2597 spin_lock_irqsave(&bfad->bfad_lock, flags);
2598 iocmd->status = bfa_fcpim_lunmask_query(&bfad->bfa, lun_mask);
2599 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2600 return 0;
2604 bfad_iocmd_fcpim_cfg_lunmask(struct bfad_s *bfad, void *cmd, unsigned int v_cmd)
2606 struct bfa_bsg_fcpim_lunmask_s *iocmd =
2607 (struct bfa_bsg_fcpim_lunmask_s *)cmd;
2608 unsigned long flags;
2610 spin_lock_irqsave(&bfad->bfad_lock, flags);
2611 if (v_cmd == IOCMD_FCPIM_LUNMASK_ADD)
2612 iocmd->status = bfa_fcpim_lunmask_add(&bfad->bfa, iocmd->vf_id,
2613 &iocmd->pwwn, iocmd->rpwwn, iocmd->lun);
2614 else if (v_cmd == IOCMD_FCPIM_LUNMASK_DELETE)
2615 iocmd->status = bfa_fcpim_lunmask_delete(&bfad->bfa,
2616 iocmd->vf_id, &iocmd->pwwn,
2617 iocmd->rpwwn, iocmd->lun);
2618 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2619 return 0;
2623 bfad_iocmd_fcpim_throttle_query(struct bfad_s *bfad, void *cmd)
2625 struct bfa_bsg_fcpim_throttle_s *iocmd =
2626 (struct bfa_bsg_fcpim_throttle_s *)cmd;
2627 unsigned long flags;
2629 spin_lock_irqsave(&bfad->bfad_lock, flags);
2630 iocmd->status = bfa_fcpim_throttle_get(&bfad->bfa,
2631 (void *)&iocmd->throttle);
2632 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2634 return 0;
2638 bfad_iocmd_fcpim_throttle_set(struct bfad_s *bfad, void *cmd)
2640 struct bfa_bsg_fcpim_throttle_s *iocmd =
2641 (struct bfa_bsg_fcpim_throttle_s *)cmd;
2642 unsigned long flags;
2644 spin_lock_irqsave(&bfad->bfad_lock, flags);
2645 iocmd->status = bfa_fcpim_throttle_set(&bfad->bfa,
2646 iocmd->throttle.cfg_value);
2647 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2649 return 0;
2653 bfad_iocmd_tfru_read(struct bfad_s *bfad, void *cmd)
2655 struct bfa_bsg_tfru_s *iocmd =
2656 (struct bfa_bsg_tfru_s *)cmd;
2657 struct bfad_hal_comp fcomp;
2658 unsigned long flags = 0;
2660 init_completion(&fcomp.comp);
2661 spin_lock_irqsave(&bfad->bfad_lock, flags);
2662 iocmd->status = bfa_tfru_read(BFA_FRU(&bfad->bfa),
2663 &iocmd->data, iocmd->len, iocmd->offset,
2664 bfad_hcb_comp, &fcomp);
2665 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2666 if (iocmd->status == BFA_STATUS_OK) {
2667 wait_for_completion(&fcomp.comp);
2668 iocmd->status = fcomp.status;
2671 return 0;
2675 bfad_iocmd_tfru_write(struct bfad_s *bfad, void *cmd)
2677 struct bfa_bsg_tfru_s *iocmd =
2678 (struct bfa_bsg_tfru_s *)cmd;
2679 struct bfad_hal_comp fcomp;
2680 unsigned long flags = 0;
2682 init_completion(&fcomp.comp);
2683 spin_lock_irqsave(&bfad->bfad_lock, flags);
2684 iocmd->status = bfa_tfru_write(BFA_FRU(&bfad->bfa),
2685 &iocmd->data, iocmd->len, iocmd->offset,
2686 bfad_hcb_comp, &fcomp);
2687 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2688 if (iocmd->status == BFA_STATUS_OK) {
2689 wait_for_completion(&fcomp.comp);
2690 iocmd->status = fcomp.status;
2693 return 0;
2697 bfad_iocmd_fruvpd_read(struct bfad_s *bfad, void *cmd)
2699 struct bfa_bsg_fruvpd_s *iocmd =
2700 (struct bfa_bsg_fruvpd_s *)cmd;
2701 struct bfad_hal_comp fcomp;
2702 unsigned long flags = 0;
2704 init_completion(&fcomp.comp);
2705 spin_lock_irqsave(&bfad->bfad_lock, flags);
2706 iocmd->status = bfa_fruvpd_read(BFA_FRU(&bfad->bfa),
2707 &iocmd->data, iocmd->len, iocmd->offset,
2708 bfad_hcb_comp, &fcomp);
2709 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2710 if (iocmd->status == BFA_STATUS_OK) {
2711 wait_for_completion(&fcomp.comp);
2712 iocmd->status = fcomp.status;
2715 return 0;
2719 bfad_iocmd_fruvpd_update(struct bfad_s *bfad, void *cmd)
2721 struct bfa_bsg_fruvpd_s *iocmd =
2722 (struct bfa_bsg_fruvpd_s *)cmd;
2723 struct bfad_hal_comp fcomp;
2724 unsigned long flags = 0;
2726 init_completion(&fcomp.comp);
2727 spin_lock_irqsave(&bfad->bfad_lock, flags);
2728 iocmd->status = bfa_fruvpd_update(BFA_FRU(&bfad->bfa),
2729 &iocmd->data, iocmd->len, iocmd->offset,
2730 bfad_hcb_comp, &fcomp, iocmd->trfr_cmpl);
2731 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2732 if (iocmd->status == BFA_STATUS_OK) {
2733 wait_for_completion(&fcomp.comp);
2734 iocmd->status = fcomp.status;
2737 return 0;
2741 bfad_iocmd_fruvpd_get_max_size(struct bfad_s *bfad, void *cmd)
2743 struct bfa_bsg_fruvpd_max_size_s *iocmd =
2744 (struct bfa_bsg_fruvpd_max_size_s *)cmd;
2745 unsigned long flags = 0;
2747 spin_lock_irqsave(&bfad->bfad_lock, flags);
2748 iocmd->status = bfa_fruvpd_get_max_size(BFA_FRU(&bfad->bfa),
2749 &iocmd->max_size);
2750 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2752 return 0;
2755 static int
2756 bfad_iocmd_handler(struct bfad_s *bfad, unsigned int cmd, void *iocmd,
2757 unsigned int payload_len)
2759 int rc = -EINVAL;
2761 switch (cmd) {
2762 case IOCMD_IOC_ENABLE:
2763 rc = bfad_iocmd_ioc_enable(bfad, iocmd);
2764 break;
2765 case IOCMD_IOC_DISABLE:
2766 rc = bfad_iocmd_ioc_disable(bfad, iocmd);
2767 break;
2768 case IOCMD_IOC_GET_INFO:
2769 rc = bfad_iocmd_ioc_get_info(bfad, iocmd);
2770 break;
2771 case IOCMD_IOC_GET_ATTR:
2772 rc = bfad_iocmd_ioc_get_attr(bfad, iocmd);
2773 break;
2774 case IOCMD_IOC_GET_STATS:
2775 rc = bfad_iocmd_ioc_get_stats(bfad, iocmd);
2776 break;
2777 case IOCMD_IOC_GET_FWSTATS:
2778 rc = bfad_iocmd_ioc_get_fwstats(bfad, iocmd, payload_len);
2779 break;
2780 case IOCMD_IOC_RESET_STATS:
2781 case IOCMD_IOC_RESET_FWSTATS:
2782 rc = bfad_iocmd_ioc_reset_stats(bfad, iocmd, cmd);
2783 break;
2784 case IOCMD_IOC_SET_ADAPTER_NAME:
2785 case IOCMD_IOC_SET_PORT_NAME:
2786 rc = bfad_iocmd_ioc_set_name(bfad, iocmd, cmd);
2787 break;
2788 case IOCMD_IOCFC_GET_ATTR:
2789 rc = bfad_iocmd_iocfc_get_attr(bfad, iocmd);
2790 break;
2791 case IOCMD_IOCFC_SET_INTR:
2792 rc = bfad_iocmd_iocfc_set_intr(bfad, iocmd);
2793 break;
2794 case IOCMD_PORT_ENABLE:
2795 rc = bfad_iocmd_port_enable(bfad, iocmd);
2796 break;
2797 case IOCMD_PORT_DISABLE:
2798 rc = bfad_iocmd_port_disable(bfad, iocmd);
2799 break;
2800 case IOCMD_PORT_GET_ATTR:
2801 rc = bfad_iocmd_port_get_attr(bfad, iocmd);
2802 break;
2803 case IOCMD_PORT_GET_STATS:
2804 rc = bfad_iocmd_port_get_stats(bfad, iocmd, payload_len);
2805 break;
2806 case IOCMD_PORT_RESET_STATS:
2807 rc = bfad_iocmd_port_reset_stats(bfad, iocmd);
2808 break;
2809 case IOCMD_PORT_CFG_TOPO:
2810 case IOCMD_PORT_CFG_SPEED:
2811 case IOCMD_PORT_CFG_ALPA:
2812 case IOCMD_PORT_CLR_ALPA:
2813 rc = bfad_iocmd_set_port_cfg(bfad, iocmd, cmd);
2814 break;
2815 case IOCMD_PORT_CFG_MAXFRSZ:
2816 rc = bfad_iocmd_port_cfg_maxfrsize(bfad, iocmd);
2817 break;
2818 case IOCMD_PORT_BBCR_ENABLE:
2819 case IOCMD_PORT_BBCR_DISABLE:
2820 rc = bfad_iocmd_port_cfg_bbcr(bfad, cmd, iocmd);
2821 break;
2822 case IOCMD_PORT_BBCR_GET_ATTR:
2823 rc = bfad_iocmd_port_get_bbcr_attr(bfad, iocmd);
2824 break;
2825 case IOCMD_LPORT_GET_ATTR:
2826 rc = bfad_iocmd_lport_get_attr(bfad, iocmd);
2827 break;
2828 case IOCMD_LPORT_GET_STATS:
2829 rc = bfad_iocmd_lport_get_stats(bfad, iocmd);
2830 break;
2831 case IOCMD_LPORT_RESET_STATS:
2832 rc = bfad_iocmd_lport_reset_stats(bfad, iocmd);
2833 break;
2834 case IOCMD_LPORT_GET_IOSTATS:
2835 rc = bfad_iocmd_lport_get_iostats(bfad, iocmd);
2836 break;
2837 case IOCMD_LPORT_GET_RPORTS:
2838 rc = bfad_iocmd_lport_get_rports(bfad, iocmd, payload_len);
2839 break;
2840 case IOCMD_RPORT_GET_ATTR:
2841 rc = bfad_iocmd_rport_get_attr(bfad, iocmd);
2842 break;
2843 case IOCMD_RPORT_GET_ADDR:
2844 rc = bfad_iocmd_rport_get_addr(bfad, iocmd);
2845 break;
2846 case IOCMD_RPORT_GET_STATS:
2847 rc = bfad_iocmd_rport_get_stats(bfad, iocmd);
2848 break;
2849 case IOCMD_RPORT_RESET_STATS:
2850 rc = bfad_iocmd_rport_clr_stats(bfad, iocmd);
2851 break;
2852 case IOCMD_RPORT_SET_SPEED:
2853 rc = bfad_iocmd_rport_set_speed(bfad, iocmd);
2854 break;
2855 case IOCMD_VPORT_GET_ATTR:
2856 rc = bfad_iocmd_vport_get_attr(bfad, iocmd);
2857 break;
2858 case IOCMD_VPORT_GET_STATS:
2859 rc = bfad_iocmd_vport_get_stats(bfad, iocmd);
2860 break;
2861 case IOCMD_VPORT_RESET_STATS:
2862 rc = bfad_iocmd_vport_clr_stats(bfad, iocmd);
2863 break;
2864 case IOCMD_FABRIC_GET_LPORTS:
2865 rc = bfad_iocmd_fabric_get_lports(bfad, iocmd, payload_len);
2866 break;
2867 case IOCMD_RATELIM_ENABLE:
2868 case IOCMD_RATELIM_DISABLE:
2869 rc = bfad_iocmd_ratelim(bfad, cmd, iocmd);
2870 break;
2871 case IOCMD_RATELIM_DEF_SPEED:
2872 rc = bfad_iocmd_ratelim_speed(bfad, cmd, iocmd);
2873 break;
2874 case IOCMD_FCPIM_FAILOVER:
2875 rc = bfad_iocmd_cfg_fcpim(bfad, iocmd);
2876 break;
2877 case IOCMD_FCPIM_MODSTATS:
2878 rc = bfad_iocmd_fcpim_get_modstats(bfad, iocmd);
2879 break;
2880 case IOCMD_FCPIM_MODSTATSCLR:
2881 rc = bfad_iocmd_fcpim_clr_modstats(bfad, iocmd);
2882 break;
2883 case IOCMD_FCPIM_DEL_ITN_STATS:
2884 rc = bfad_iocmd_fcpim_get_del_itn_stats(bfad, iocmd);
2885 break;
2886 case IOCMD_ITNIM_GET_ATTR:
2887 rc = bfad_iocmd_itnim_get_attr(bfad, iocmd);
2888 break;
2889 case IOCMD_ITNIM_GET_IOSTATS:
2890 rc = bfad_iocmd_itnim_get_iostats(bfad, iocmd);
2891 break;
2892 case IOCMD_ITNIM_RESET_STATS:
2893 rc = bfad_iocmd_itnim_reset_stats(bfad, iocmd);
2894 break;
2895 case IOCMD_ITNIM_GET_ITNSTATS:
2896 rc = bfad_iocmd_itnim_get_itnstats(bfad, iocmd);
2897 break;
2898 case IOCMD_FCPORT_ENABLE:
2899 rc = bfad_iocmd_fcport_enable(bfad, iocmd);
2900 break;
2901 case IOCMD_FCPORT_DISABLE:
2902 rc = bfad_iocmd_fcport_disable(bfad, iocmd);
2903 break;
2904 case IOCMD_IOC_PCIFN_CFG:
2905 rc = bfad_iocmd_ioc_get_pcifn_cfg(bfad, iocmd);
2906 break;
2907 case IOCMD_IOC_FW_SIG_INV:
2908 rc = bfad_iocmd_ioc_fw_sig_inv(bfad, iocmd);
2909 break;
2910 case IOCMD_PCIFN_CREATE:
2911 rc = bfad_iocmd_pcifn_create(bfad, iocmd);
2912 break;
2913 case IOCMD_PCIFN_DELETE:
2914 rc = bfad_iocmd_pcifn_delete(bfad, iocmd);
2915 break;
2916 case IOCMD_PCIFN_BW:
2917 rc = bfad_iocmd_pcifn_bw(bfad, iocmd);
2918 break;
2919 case IOCMD_ADAPTER_CFG_MODE:
2920 rc = bfad_iocmd_adapter_cfg_mode(bfad, iocmd);
2921 break;
2922 case IOCMD_PORT_CFG_MODE:
2923 rc = bfad_iocmd_port_cfg_mode(bfad, iocmd);
2924 break;
2925 case IOCMD_FLASH_ENABLE_OPTROM:
2926 case IOCMD_FLASH_DISABLE_OPTROM:
2927 rc = bfad_iocmd_ablk_optrom(bfad, cmd, iocmd);
2928 break;
2929 case IOCMD_FAA_QUERY:
2930 rc = bfad_iocmd_faa_query(bfad, iocmd);
2931 break;
2932 case IOCMD_CEE_GET_ATTR:
2933 rc = bfad_iocmd_cee_attr(bfad, iocmd, payload_len);
2934 break;
2935 case IOCMD_CEE_GET_STATS:
2936 rc = bfad_iocmd_cee_get_stats(bfad, iocmd, payload_len);
2937 break;
2938 case IOCMD_CEE_RESET_STATS:
2939 rc = bfad_iocmd_cee_reset_stats(bfad, iocmd);
2940 break;
2941 case IOCMD_SFP_MEDIA:
2942 rc = bfad_iocmd_sfp_media(bfad, iocmd);
2943 break;
2944 case IOCMD_SFP_SPEED:
2945 rc = bfad_iocmd_sfp_speed(bfad, iocmd);
2946 break;
2947 case IOCMD_FLASH_GET_ATTR:
2948 rc = bfad_iocmd_flash_get_attr(bfad, iocmd);
2949 break;
2950 case IOCMD_FLASH_ERASE_PART:
2951 rc = bfad_iocmd_flash_erase_part(bfad, iocmd);
2952 break;
2953 case IOCMD_FLASH_UPDATE_PART:
2954 rc = bfad_iocmd_flash_update_part(bfad, iocmd, payload_len);
2955 break;
2956 case IOCMD_FLASH_READ_PART:
2957 rc = bfad_iocmd_flash_read_part(bfad, iocmd, payload_len);
2958 break;
2959 case IOCMD_DIAG_TEMP:
2960 rc = bfad_iocmd_diag_temp(bfad, iocmd);
2961 break;
2962 case IOCMD_DIAG_MEMTEST:
2963 rc = bfad_iocmd_diag_memtest(bfad, iocmd);
2964 break;
2965 case IOCMD_DIAG_LOOPBACK:
2966 rc = bfad_iocmd_diag_loopback(bfad, iocmd);
2967 break;
2968 case IOCMD_DIAG_FWPING:
2969 rc = bfad_iocmd_diag_fwping(bfad, iocmd);
2970 break;
2971 case IOCMD_DIAG_QUEUETEST:
2972 rc = bfad_iocmd_diag_queuetest(bfad, iocmd);
2973 break;
2974 case IOCMD_DIAG_SFP:
2975 rc = bfad_iocmd_diag_sfp(bfad, iocmd);
2976 break;
2977 case IOCMD_DIAG_LED:
2978 rc = bfad_iocmd_diag_led(bfad, iocmd);
2979 break;
2980 case IOCMD_DIAG_BEACON_LPORT:
2981 rc = bfad_iocmd_diag_beacon_lport(bfad, iocmd);
2982 break;
2983 case IOCMD_DIAG_LB_STAT:
2984 rc = bfad_iocmd_diag_lb_stat(bfad, iocmd);
2985 break;
2986 case IOCMD_DIAG_DPORT_ENABLE:
2987 rc = bfad_iocmd_diag_dport_enable(bfad, iocmd);
2988 break;
2989 case IOCMD_DIAG_DPORT_DISABLE:
2990 rc = bfad_iocmd_diag_dport_disable(bfad, iocmd);
2991 break;
2992 case IOCMD_DIAG_DPORT_SHOW:
2993 rc = bfad_iocmd_diag_dport_show(bfad, iocmd);
2994 break;
2995 case IOCMD_DIAG_DPORT_START:
2996 rc = bfad_iocmd_diag_dport_start(bfad, iocmd);
2997 break;
2998 case IOCMD_PHY_GET_ATTR:
2999 rc = bfad_iocmd_phy_get_attr(bfad, iocmd);
3000 break;
3001 case IOCMD_PHY_GET_STATS:
3002 rc = bfad_iocmd_phy_get_stats(bfad, iocmd);
3003 break;
3004 case IOCMD_PHY_UPDATE_FW:
3005 rc = bfad_iocmd_phy_update(bfad, iocmd, payload_len);
3006 break;
3007 case IOCMD_PHY_READ_FW:
3008 rc = bfad_iocmd_phy_read(bfad, iocmd, payload_len);
3009 break;
3010 case IOCMD_VHBA_QUERY:
3011 rc = bfad_iocmd_vhba_query(bfad, iocmd);
3012 break;
3013 case IOCMD_DEBUG_PORTLOG:
3014 rc = bfad_iocmd_porglog_get(bfad, iocmd);
3015 break;
3016 case IOCMD_DEBUG_FW_CORE:
3017 rc = bfad_iocmd_debug_fw_core(bfad, iocmd, payload_len);
3018 break;
3019 case IOCMD_DEBUG_FW_STATE_CLR:
3020 case IOCMD_DEBUG_PORTLOG_CLR:
3021 case IOCMD_DEBUG_START_DTRC:
3022 case IOCMD_DEBUG_STOP_DTRC:
3023 rc = bfad_iocmd_debug_ctl(bfad, iocmd, cmd);
3024 break;
3025 case IOCMD_DEBUG_PORTLOG_CTL:
3026 rc = bfad_iocmd_porglog_ctl(bfad, iocmd);
3027 break;
3028 case IOCMD_FCPIM_PROFILE_ON:
3029 case IOCMD_FCPIM_PROFILE_OFF:
3030 rc = bfad_iocmd_fcpim_cfg_profile(bfad, iocmd, cmd);
3031 break;
3032 case IOCMD_ITNIM_GET_IOPROFILE:
3033 rc = bfad_iocmd_itnim_get_ioprofile(bfad, iocmd);
3034 break;
3035 case IOCMD_FCPORT_GET_STATS:
3036 rc = bfad_iocmd_fcport_get_stats(bfad, iocmd);
3037 break;
3038 case IOCMD_FCPORT_RESET_STATS:
3039 rc = bfad_iocmd_fcport_reset_stats(bfad, iocmd);
3040 break;
3041 case IOCMD_BOOT_CFG:
3042 rc = bfad_iocmd_boot_cfg(bfad, iocmd);
3043 break;
3044 case IOCMD_BOOT_QUERY:
3045 rc = bfad_iocmd_boot_query(bfad, iocmd);
3046 break;
3047 case IOCMD_PREBOOT_QUERY:
3048 rc = bfad_iocmd_preboot_query(bfad, iocmd);
3049 break;
3050 case IOCMD_ETHBOOT_CFG:
3051 rc = bfad_iocmd_ethboot_cfg(bfad, iocmd);
3052 break;
3053 case IOCMD_ETHBOOT_QUERY:
3054 rc = bfad_iocmd_ethboot_query(bfad, iocmd);
3055 break;
3056 case IOCMD_TRUNK_ENABLE:
3057 case IOCMD_TRUNK_DISABLE:
3058 rc = bfad_iocmd_cfg_trunk(bfad, iocmd, cmd);
3059 break;
3060 case IOCMD_TRUNK_GET_ATTR:
3061 rc = bfad_iocmd_trunk_get_attr(bfad, iocmd);
3062 break;
3063 case IOCMD_QOS_ENABLE:
3064 case IOCMD_QOS_DISABLE:
3065 rc = bfad_iocmd_qos(bfad, iocmd, cmd);
3066 break;
3067 case IOCMD_QOS_GET_ATTR:
3068 rc = bfad_iocmd_qos_get_attr(bfad, iocmd);
3069 break;
3070 case IOCMD_QOS_GET_VC_ATTR:
3071 rc = bfad_iocmd_qos_get_vc_attr(bfad, iocmd);
3072 break;
3073 case IOCMD_QOS_GET_STATS:
3074 rc = bfad_iocmd_qos_get_stats(bfad, iocmd);
3075 break;
3076 case IOCMD_QOS_RESET_STATS:
3077 rc = bfad_iocmd_qos_reset_stats(bfad, iocmd);
3078 break;
3079 case IOCMD_QOS_SET_BW:
3080 rc = bfad_iocmd_qos_set_bw(bfad, iocmd);
3081 break;
3082 case IOCMD_VF_GET_STATS:
3083 rc = bfad_iocmd_vf_get_stats(bfad, iocmd);
3084 break;
3085 case IOCMD_VF_RESET_STATS:
3086 rc = bfad_iocmd_vf_clr_stats(bfad, iocmd);
3087 break;
3088 case IOCMD_FCPIM_LUNMASK_ENABLE:
3089 case IOCMD_FCPIM_LUNMASK_DISABLE:
3090 case IOCMD_FCPIM_LUNMASK_CLEAR:
3091 rc = bfad_iocmd_lunmask(bfad, iocmd, cmd);
3092 break;
3093 case IOCMD_FCPIM_LUNMASK_QUERY:
3094 rc = bfad_iocmd_fcpim_lunmask_query(bfad, iocmd);
3095 break;
3096 case IOCMD_FCPIM_LUNMASK_ADD:
3097 case IOCMD_FCPIM_LUNMASK_DELETE:
3098 rc = bfad_iocmd_fcpim_cfg_lunmask(bfad, iocmd, cmd);
3099 break;
3100 case IOCMD_FCPIM_THROTTLE_QUERY:
3101 rc = bfad_iocmd_fcpim_throttle_query(bfad, iocmd);
3102 break;
3103 case IOCMD_FCPIM_THROTTLE_SET:
3104 rc = bfad_iocmd_fcpim_throttle_set(bfad, iocmd);
3105 break;
3106 /* TFRU */
3107 case IOCMD_TFRU_READ:
3108 rc = bfad_iocmd_tfru_read(bfad, iocmd);
3109 break;
3110 case IOCMD_TFRU_WRITE:
3111 rc = bfad_iocmd_tfru_write(bfad, iocmd);
3112 break;
3113 /* FRU */
3114 case IOCMD_FRUVPD_READ:
3115 rc = bfad_iocmd_fruvpd_read(bfad, iocmd);
3116 break;
3117 case IOCMD_FRUVPD_UPDATE:
3118 rc = bfad_iocmd_fruvpd_update(bfad, iocmd);
3119 break;
3120 case IOCMD_FRUVPD_GET_MAX_SIZE:
3121 rc = bfad_iocmd_fruvpd_get_max_size(bfad, iocmd);
3122 break;
3123 default:
3124 rc = -EINVAL;
3125 break;
3127 return rc;
3130 static int
3131 bfad_im_bsg_vendor_request(struct bsg_job *job)
3133 struct fc_bsg_request *bsg_request = job->request;
3134 struct fc_bsg_reply *bsg_reply = job->reply;
3135 uint32_t vendor_cmd = bsg_request->rqst_data.h_vendor.vendor_cmd[0];
3136 struct Scsi_Host *shost = fc_bsg_to_shost(job);
3137 struct bfad_im_port_s *im_port = bfad_get_im_port(shost);
3138 struct bfad_s *bfad = im_port->bfad;
3139 void *payload_kbuf;
3140 int rc = -EINVAL;
3142 /* Allocate a temp buffer to hold the passed in user space command */
3143 payload_kbuf = kzalloc(job->request_payload.payload_len, GFP_KERNEL);
3144 if (!payload_kbuf) {
3145 rc = -ENOMEM;
3146 goto out;
3149 /* Copy the sg_list passed in to a linear buffer: holds the cmnd data */
3150 sg_copy_to_buffer(job->request_payload.sg_list,
3151 job->request_payload.sg_cnt, payload_kbuf,
3152 job->request_payload.payload_len);
3154 /* Invoke IOCMD handler - to handle all the vendor command requests */
3155 rc = bfad_iocmd_handler(bfad, vendor_cmd, payload_kbuf,
3156 job->request_payload.payload_len);
3157 if (rc != BFA_STATUS_OK)
3158 goto error;
3160 /* Copy the response data to the job->reply_payload sg_list */
3161 sg_copy_from_buffer(job->reply_payload.sg_list,
3162 job->reply_payload.sg_cnt,
3163 payload_kbuf,
3164 job->reply_payload.payload_len);
3166 /* free the command buffer */
3167 kfree(payload_kbuf);
3169 /* Fill the BSG job reply data */
3170 job->reply_len = job->reply_payload.payload_len;
3171 bsg_reply->reply_payload_rcv_len = job->reply_payload.payload_len;
3172 bsg_reply->result = rc;
3174 bsg_job_done(job, bsg_reply->result,
3175 bsg_reply->reply_payload_rcv_len);
3176 return rc;
3177 error:
3178 /* free the command buffer */
3179 kfree(payload_kbuf);
3180 out:
3181 bsg_reply->result = rc;
3182 job->reply_len = sizeof(uint32_t);
3183 bsg_reply->reply_payload_rcv_len = 0;
3184 return rc;
3187 /* FC passthru call backs */
3189 bfad_fcxp_get_req_sgaddr_cb(void *bfad_fcxp, int sgeid)
3191 struct bfad_fcxp *drv_fcxp = bfad_fcxp;
3192 struct bfa_sge_s *sge;
3193 u64 addr;
3195 sge = drv_fcxp->req_sge + sgeid;
3196 addr = (u64)(size_t) sge->sg_addr;
3197 return addr;
3201 bfad_fcxp_get_req_sglen_cb(void *bfad_fcxp, int sgeid)
3203 struct bfad_fcxp *drv_fcxp = bfad_fcxp;
3204 struct bfa_sge_s *sge;
3206 sge = drv_fcxp->req_sge + sgeid;
3207 return sge->sg_len;
3211 bfad_fcxp_get_rsp_sgaddr_cb(void *bfad_fcxp, int sgeid)
3213 struct bfad_fcxp *drv_fcxp = bfad_fcxp;
3214 struct bfa_sge_s *sge;
3215 u64 addr;
3217 sge = drv_fcxp->rsp_sge + sgeid;
3218 addr = (u64)(size_t) sge->sg_addr;
3219 return addr;
3223 bfad_fcxp_get_rsp_sglen_cb(void *bfad_fcxp, int sgeid)
3225 struct bfad_fcxp *drv_fcxp = bfad_fcxp;
3226 struct bfa_sge_s *sge;
3228 sge = drv_fcxp->rsp_sge + sgeid;
3229 return sge->sg_len;
3232 void
3233 bfad_send_fcpt_cb(void *bfad_fcxp, struct bfa_fcxp_s *fcxp, void *cbarg,
3234 bfa_status_t req_status, u32 rsp_len, u32 resid_len,
3235 struct fchs_s *rsp_fchs)
3237 struct bfad_fcxp *drv_fcxp = bfad_fcxp;
3239 drv_fcxp->req_status = req_status;
3240 drv_fcxp->rsp_len = rsp_len;
3242 /* bfa_fcxp will be automatically freed by BFA */
3243 drv_fcxp->bfa_fcxp = NULL;
3244 complete(&drv_fcxp->comp);
3247 struct bfad_buf_info *
3248 bfad_fcxp_map_sg(struct bfad_s *bfad, void *payload_kbuf,
3249 uint32_t payload_len, uint32_t *num_sgles)
3251 struct bfad_buf_info *buf_base, *buf_info;
3252 struct bfa_sge_s *sg_table;
3253 int sge_num = 1;
3255 buf_base = kcalloc(sizeof(struct bfad_buf_info) +
3256 sizeof(struct bfa_sge_s),
3257 sge_num, GFP_KERNEL);
3258 if (!buf_base)
3259 return NULL;
3261 sg_table = (struct bfa_sge_s *) (((uint8_t *)buf_base) +
3262 (sizeof(struct bfad_buf_info) * sge_num));
3264 /* Allocate dma coherent memory */
3265 buf_info = buf_base;
3266 buf_info->size = payload_len;
3267 buf_info->virt = dma_zalloc_coherent(&bfad->pcidev->dev,
3268 buf_info->size, &buf_info->phys,
3269 GFP_KERNEL);
3270 if (!buf_info->virt)
3271 goto out_free_mem;
3273 /* copy the linear bsg buffer to buf_info */
3274 memcpy(buf_info->virt, payload_kbuf, buf_info->size);
3277 * Setup SG table
3279 sg_table->sg_len = buf_info->size;
3280 sg_table->sg_addr = (void *)(size_t) buf_info->phys;
3282 *num_sgles = sge_num;
3284 return buf_base;
3286 out_free_mem:
3287 kfree(buf_base);
3288 return NULL;
3291 void
3292 bfad_fcxp_free_mem(struct bfad_s *bfad, struct bfad_buf_info *buf_base,
3293 uint32_t num_sgles)
3295 int i;
3296 struct bfad_buf_info *buf_info = buf_base;
3298 if (buf_base) {
3299 for (i = 0; i < num_sgles; buf_info++, i++) {
3300 if (buf_info->virt != NULL)
3301 dma_free_coherent(&bfad->pcidev->dev,
3302 buf_info->size, buf_info->virt,
3303 buf_info->phys);
3305 kfree(buf_base);
3310 bfad_fcxp_bsg_send(struct bsg_job *job, struct bfad_fcxp *drv_fcxp,
3311 bfa_bsg_fcpt_t *bsg_fcpt)
3313 struct bfa_fcxp_s *hal_fcxp;
3314 struct bfad_s *bfad = drv_fcxp->port->bfad;
3315 unsigned long flags;
3316 uint8_t lp_tag;
3318 spin_lock_irqsave(&bfad->bfad_lock, flags);
3320 /* Allocate bfa_fcxp structure */
3321 hal_fcxp = bfa_fcxp_req_rsp_alloc(drv_fcxp, &bfad->bfa,
3322 drv_fcxp->num_req_sgles,
3323 drv_fcxp->num_rsp_sgles,
3324 bfad_fcxp_get_req_sgaddr_cb,
3325 bfad_fcxp_get_req_sglen_cb,
3326 bfad_fcxp_get_rsp_sgaddr_cb,
3327 bfad_fcxp_get_rsp_sglen_cb, BFA_TRUE);
3328 if (!hal_fcxp) {
3329 bfa_trc(bfad, 0);
3330 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
3331 return BFA_STATUS_ENOMEM;
3334 drv_fcxp->bfa_fcxp = hal_fcxp;
3336 lp_tag = bfa_lps_get_tag_from_pid(&bfad->bfa, bsg_fcpt->fchs.s_id);
3338 bfa_fcxp_send(hal_fcxp, drv_fcxp->bfa_rport, bsg_fcpt->vf_id, lp_tag,
3339 bsg_fcpt->cts, bsg_fcpt->cos,
3340 job->request_payload.payload_len,
3341 &bsg_fcpt->fchs, bfad_send_fcpt_cb, bfad,
3342 job->reply_payload.payload_len, bsg_fcpt->tsecs);
3344 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
3346 return BFA_STATUS_OK;
3350 bfad_im_bsg_els_ct_request(struct bsg_job *job)
3352 struct bfa_bsg_data *bsg_data;
3353 struct Scsi_Host *shost = fc_bsg_to_shost(job);
3354 struct bfad_im_port_s *im_port = bfad_get_im_port(shost);
3355 struct bfad_s *bfad = im_port->bfad;
3356 bfa_bsg_fcpt_t *bsg_fcpt;
3357 struct bfad_fcxp *drv_fcxp;
3358 struct bfa_fcs_lport_s *fcs_port;
3359 struct bfa_fcs_rport_s *fcs_rport;
3360 struct fc_bsg_request *bsg_request = job->request;
3361 struct fc_bsg_reply *bsg_reply = job->reply;
3362 uint32_t command_type = bsg_request->msgcode;
3363 unsigned long flags;
3364 struct bfad_buf_info *rsp_buf_info;
3365 void *req_kbuf = NULL, *rsp_kbuf = NULL;
3366 int rc = -EINVAL;
3368 job->reply_len = sizeof(uint32_t); /* Atleast uint32_t reply_len */
3369 bsg_reply->reply_payload_rcv_len = 0;
3371 /* Get the payload passed in from userspace */
3372 bsg_data = (struct bfa_bsg_data *) (((char *)bsg_request) +
3373 sizeof(struct fc_bsg_request));
3374 if (bsg_data == NULL)
3375 goto out;
3378 * Allocate buffer for bsg_fcpt and do a copy_from_user op for payload
3379 * buffer of size bsg_data->payload_len
3381 bsg_fcpt = kzalloc(bsg_data->payload_len, GFP_KERNEL);
3382 if (!bsg_fcpt) {
3383 rc = -ENOMEM;
3384 goto out;
3387 if (copy_from_user((uint8_t *)bsg_fcpt,
3388 (void *)(unsigned long)bsg_data->payload,
3389 bsg_data->payload_len)) {
3390 kfree(bsg_fcpt);
3391 rc = -EIO;
3392 goto out;
3395 drv_fcxp = kzalloc(sizeof(struct bfad_fcxp), GFP_KERNEL);
3396 if (drv_fcxp == NULL) {
3397 kfree(bsg_fcpt);
3398 rc = -ENOMEM;
3399 goto out;
3402 spin_lock_irqsave(&bfad->bfad_lock, flags);
3403 fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs, bsg_fcpt->vf_id,
3404 bsg_fcpt->lpwwn);
3405 if (fcs_port == NULL) {
3406 bsg_fcpt->status = BFA_STATUS_UNKNOWN_LWWN;
3407 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
3408 goto out_free_mem;
3411 /* Check if the port is online before sending FC Passthru cmd */
3412 if (!bfa_fcs_lport_is_online(fcs_port)) {
3413 bsg_fcpt->status = BFA_STATUS_PORT_OFFLINE;
3414 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
3415 goto out_free_mem;
3418 drv_fcxp->port = fcs_port->bfad_port;
3420 if (drv_fcxp->port->bfad == 0)
3421 drv_fcxp->port->bfad = bfad;
3423 /* Fetch the bfa_rport - if nexus needed */
3424 if (command_type == FC_BSG_HST_ELS_NOLOGIN ||
3425 command_type == FC_BSG_HST_CT) {
3426 /* BSG HST commands: no nexus needed */
3427 drv_fcxp->bfa_rport = NULL;
3429 } else if (command_type == FC_BSG_RPT_ELS ||
3430 command_type == FC_BSG_RPT_CT) {
3431 /* BSG RPT commands: nexus needed */
3432 fcs_rport = bfa_fcs_lport_get_rport_by_pwwn(fcs_port,
3433 bsg_fcpt->dpwwn);
3434 if (fcs_rport == NULL) {
3435 bsg_fcpt->status = BFA_STATUS_UNKNOWN_RWWN;
3436 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
3437 goto out_free_mem;
3440 drv_fcxp->bfa_rport = fcs_rport->bfa_rport;
3442 } else { /* Unknown BSG msgcode; return -EINVAL */
3443 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
3444 goto out_free_mem;
3447 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
3449 /* allocate memory for req / rsp buffers */
3450 req_kbuf = kzalloc(job->request_payload.payload_len, GFP_KERNEL);
3451 if (!req_kbuf) {
3452 printk(KERN_INFO "bfa %s: fcpt request buffer alloc failed\n",
3453 bfad->pci_name);
3454 rc = -ENOMEM;
3455 goto out_free_mem;
3458 rsp_kbuf = kzalloc(job->reply_payload.payload_len, GFP_KERNEL);
3459 if (!rsp_kbuf) {
3460 printk(KERN_INFO "bfa %s: fcpt response buffer alloc failed\n",
3461 bfad->pci_name);
3462 rc = -ENOMEM;
3463 goto out_free_mem;
3466 /* map req sg - copy the sg_list passed in to the linear buffer */
3467 sg_copy_to_buffer(job->request_payload.sg_list,
3468 job->request_payload.sg_cnt, req_kbuf,
3469 job->request_payload.payload_len);
3471 drv_fcxp->reqbuf_info = bfad_fcxp_map_sg(bfad, req_kbuf,
3472 job->request_payload.payload_len,
3473 &drv_fcxp->num_req_sgles);
3474 if (!drv_fcxp->reqbuf_info) {
3475 printk(KERN_INFO "bfa %s: fcpt request fcxp_map_sg failed\n",
3476 bfad->pci_name);
3477 rc = -ENOMEM;
3478 goto out_free_mem;
3481 drv_fcxp->req_sge = (struct bfa_sge_s *)
3482 (((uint8_t *)drv_fcxp->reqbuf_info) +
3483 (sizeof(struct bfad_buf_info) *
3484 drv_fcxp->num_req_sgles));
3486 /* map rsp sg */
3487 drv_fcxp->rspbuf_info = bfad_fcxp_map_sg(bfad, rsp_kbuf,
3488 job->reply_payload.payload_len,
3489 &drv_fcxp->num_rsp_sgles);
3490 if (!drv_fcxp->rspbuf_info) {
3491 printk(KERN_INFO "bfa %s: fcpt response fcxp_map_sg failed\n",
3492 bfad->pci_name);
3493 rc = -ENOMEM;
3494 goto out_free_mem;
3497 rsp_buf_info = (struct bfad_buf_info *)drv_fcxp->rspbuf_info;
3498 drv_fcxp->rsp_sge = (struct bfa_sge_s *)
3499 (((uint8_t *)drv_fcxp->rspbuf_info) +
3500 (sizeof(struct bfad_buf_info) *
3501 drv_fcxp->num_rsp_sgles));
3503 /* fcxp send */
3504 init_completion(&drv_fcxp->comp);
3505 rc = bfad_fcxp_bsg_send(job, drv_fcxp, bsg_fcpt);
3506 if (rc == BFA_STATUS_OK) {
3507 wait_for_completion(&drv_fcxp->comp);
3508 bsg_fcpt->status = drv_fcxp->req_status;
3509 } else {
3510 bsg_fcpt->status = rc;
3511 goto out_free_mem;
3514 /* fill the job->reply data */
3515 if (drv_fcxp->req_status == BFA_STATUS_OK) {
3516 job->reply_len = drv_fcxp->rsp_len;
3517 bsg_reply->reply_payload_rcv_len = drv_fcxp->rsp_len;
3518 bsg_reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK;
3519 } else {
3520 bsg_reply->reply_payload_rcv_len =
3521 sizeof(struct fc_bsg_ctels_reply);
3522 job->reply_len = sizeof(uint32_t);
3523 bsg_reply->reply_data.ctels_reply.status =
3524 FC_CTELS_STATUS_REJECT;
3527 /* Copy the response data to the reply_payload sg list */
3528 sg_copy_from_buffer(job->reply_payload.sg_list,
3529 job->reply_payload.sg_cnt,
3530 (uint8_t *)rsp_buf_info->virt,
3531 job->reply_payload.payload_len);
3533 out_free_mem:
3534 bfad_fcxp_free_mem(bfad, drv_fcxp->rspbuf_info,
3535 drv_fcxp->num_rsp_sgles);
3536 bfad_fcxp_free_mem(bfad, drv_fcxp->reqbuf_info,
3537 drv_fcxp->num_req_sgles);
3538 kfree(req_kbuf);
3539 kfree(rsp_kbuf);
3541 /* Need a copy to user op */
3542 if (copy_to_user((void *)(unsigned long)bsg_data->payload,
3543 (void *)bsg_fcpt, bsg_data->payload_len))
3544 rc = -EIO;
3546 kfree(bsg_fcpt);
3547 kfree(drv_fcxp);
3548 out:
3549 bsg_reply->result = rc;
3551 if (rc == BFA_STATUS_OK)
3552 bsg_job_done(job, bsg_reply->result,
3553 bsg_reply->reply_payload_rcv_len);
3555 return rc;
3559 bfad_im_bsg_request(struct bsg_job *job)
3561 struct fc_bsg_request *bsg_request = job->request;
3562 struct fc_bsg_reply *bsg_reply = job->reply;
3563 uint32_t rc = BFA_STATUS_OK;
3565 switch (bsg_request->msgcode) {
3566 case FC_BSG_HST_VENDOR:
3567 /* Process BSG HST Vendor requests */
3568 rc = bfad_im_bsg_vendor_request(job);
3569 break;
3570 case FC_BSG_HST_ELS_NOLOGIN:
3571 case FC_BSG_RPT_ELS:
3572 case FC_BSG_HST_CT:
3573 case FC_BSG_RPT_CT:
3574 /* Process BSG ELS/CT commands */
3575 rc = bfad_im_bsg_els_ct_request(job);
3576 break;
3577 default:
3578 bsg_reply->result = rc = -EINVAL;
3579 bsg_reply->reply_payload_rcv_len = 0;
3580 break;
3583 return rc;
3587 bfad_im_bsg_timeout(struct bsg_job *job)
3589 /* Don't complete the BSG job request - return -EAGAIN
3590 * to reset bsg job timeout : for ELS/CT pass thru we
3591 * already have timer to track the request.
3593 return -EAGAIN;