2 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
19 #include "bfa_modules.h"
22 BFA_TRC_FILE(HAL
, CORE
);
25 * BFA module list terminated by NULL
27 static struct bfa_module_s
*hal_mods
[] = {
41 * Message handlers for various modules.
43 static bfa_isr_func_t bfa_isrs
[BFI_MC_MAX
] = {
44 bfa_isr_unhandled
, /* NONE */
45 bfa_isr_unhandled
, /* BFI_MC_IOC */
46 bfa_fcdiag_intr
, /* BFI_MC_DIAG */
47 bfa_isr_unhandled
, /* BFI_MC_FLASH */
48 bfa_isr_unhandled
, /* BFI_MC_CEE */
49 bfa_fcport_isr
, /* BFI_MC_FCPORT */
50 bfa_isr_unhandled
, /* BFI_MC_IOCFC */
51 bfa_isr_unhandled
, /* BFI_MC_LL */
52 bfa_uf_isr
, /* BFI_MC_UF */
53 bfa_fcxp_isr
, /* BFI_MC_FCXP */
54 bfa_lps_isr
, /* BFI_MC_LPS */
55 bfa_rport_isr
, /* BFI_MC_RPORT */
56 bfa_itn_isr
, /* BFI_MC_ITN */
57 bfa_isr_unhandled
, /* BFI_MC_IOIM_READ */
58 bfa_isr_unhandled
, /* BFI_MC_IOIM_WRITE */
59 bfa_isr_unhandled
, /* BFI_MC_IOIM_IO */
60 bfa_ioim_isr
, /* BFI_MC_IOIM */
61 bfa_ioim_good_comp_isr
, /* BFI_MC_IOIM_IOCOM */
62 bfa_tskim_isr
, /* BFI_MC_TSKIM */
63 bfa_isr_unhandled
, /* BFI_MC_SBOOT */
64 bfa_isr_unhandled
, /* BFI_MC_IPFC */
65 bfa_isr_unhandled
, /* BFI_MC_PORT */
66 bfa_isr_unhandled
, /* --------- */
67 bfa_isr_unhandled
, /* --------- */
68 bfa_isr_unhandled
, /* --------- */
69 bfa_isr_unhandled
, /* --------- */
70 bfa_isr_unhandled
, /* --------- */
71 bfa_isr_unhandled
, /* --------- */
72 bfa_isr_unhandled
, /* --------- */
73 bfa_isr_unhandled
, /* --------- */
74 bfa_isr_unhandled
, /* --------- */
75 bfa_isr_unhandled
, /* --------- */
78 * Message handlers for mailbox command classes
80 static bfa_ioc_mbox_mcfunc_t bfa_mbox_isrs
[BFI_MC_MAX
] = {
82 NULL
, /* BFI_MC_IOC */
83 NULL
, /* BFI_MC_DIAG */
84 NULL
, /* BFI_MC_FLASH */
85 NULL
, /* BFI_MC_CEE */
86 NULL
, /* BFI_MC_PORT */
87 bfa_iocfc_isr
, /* BFI_MC_IOCFC */
94 bfa_com_port_attach(struct bfa_s
*bfa
)
96 struct bfa_port_s
*port
= &bfa
->modules
.port
;
97 struct bfa_mem_dma_s
*port_dma
= BFA_MEM_PORT_DMA(bfa
);
99 bfa_port_attach(port
, &bfa
->ioc
, bfa
, bfa
->trcmod
);
100 bfa_port_mem_claim(port
, port_dma
->kva_curp
, port_dma
->dma_curp
);
107 bfa_com_ablk_attach(struct bfa_s
*bfa
)
109 struct bfa_ablk_s
*ablk
= &bfa
->modules
.ablk
;
110 struct bfa_mem_dma_s
*ablk_dma
= BFA_MEM_ABLK_DMA(bfa
);
112 bfa_ablk_attach(ablk
, &bfa
->ioc
);
113 bfa_ablk_memclaim(ablk
, ablk_dma
->kva_curp
, ablk_dma
->dma_curp
);
117 bfa_com_cee_attach(struct bfa_s
*bfa
)
119 struct bfa_cee_s
*cee
= &bfa
->modules
.cee
;
120 struct bfa_mem_dma_s
*cee_dma
= BFA_MEM_CEE_DMA(bfa
);
122 cee
->trcmod
= bfa
->trcmod
;
123 bfa_cee_attach(cee
, &bfa
->ioc
, bfa
);
124 bfa_cee_mem_claim(cee
, cee_dma
->kva_curp
, cee_dma
->dma_curp
);
128 bfa_com_sfp_attach(struct bfa_s
*bfa
)
130 struct bfa_sfp_s
*sfp
= BFA_SFP_MOD(bfa
);
131 struct bfa_mem_dma_s
*sfp_dma
= BFA_MEM_SFP_DMA(bfa
);
133 bfa_sfp_attach(sfp
, &bfa
->ioc
, bfa
, bfa
->trcmod
);
134 bfa_sfp_memclaim(sfp
, sfp_dma
->kva_curp
, sfp_dma
->dma_curp
);
138 bfa_com_flash_attach(struct bfa_s
*bfa
, bfa_boolean_t mincfg
)
140 struct bfa_flash_s
*flash
= BFA_FLASH(bfa
);
141 struct bfa_mem_dma_s
*flash_dma
= BFA_MEM_FLASH_DMA(bfa
);
143 bfa_flash_attach(flash
, &bfa
->ioc
, bfa
, bfa
->trcmod
, mincfg
);
144 bfa_flash_memclaim(flash
, flash_dma
->kva_curp
,
145 flash_dma
->dma_curp
, mincfg
);
149 bfa_com_diag_attach(struct bfa_s
*bfa
)
151 struct bfa_diag_s
*diag
= BFA_DIAG_MOD(bfa
);
152 struct bfa_mem_dma_s
*diag_dma
= BFA_MEM_DIAG_DMA(bfa
);
154 bfa_diag_attach(diag
, &bfa
->ioc
, bfa
, bfa_fcport_beacon
, bfa
->trcmod
);
155 bfa_diag_memclaim(diag
, diag_dma
->kva_curp
, diag_dma
->dma_curp
);
159 bfa_com_phy_attach(struct bfa_s
*bfa
, bfa_boolean_t mincfg
)
161 struct bfa_phy_s
*phy
= BFA_PHY(bfa
);
162 struct bfa_mem_dma_s
*phy_dma
= BFA_MEM_PHY_DMA(bfa
);
164 bfa_phy_attach(phy
, &bfa
->ioc
, bfa
, bfa
->trcmod
, mincfg
);
165 bfa_phy_memclaim(phy
, phy_dma
->kva_curp
, phy_dma
->dma_curp
, mincfg
);
169 * BFA IOC FC related definitions
173 * IOC local definitions
175 #define BFA_IOCFC_TOV 5000 /* msecs */
178 BFA_IOCFC_ACT_NONE
= 0,
179 BFA_IOCFC_ACT_INIT
= 1,
180 BFA_IOCFC_ACT_STOP
= 2,
181 BFA_IOCFC_ACT_DISABLE
= 3,
182 BFA_IOCFC_ACT_ENABLE
= 4,
185 #define DEF_CFG_NUM_FABRICS 1
186 #define DEF_CFG_NUM_LPORTS 256
187 #define DEF_CFG_NUM_CQS 4
188 #define DEF_CFG_NUM_IOIM_REQS (BFA_IOIM_MAX)
189 #define DEF_CFG_NUM_TSKIM_REQS 128
190 #define DEF_CFG_NUM_FCXP_REQS 64
191 #define DEF_CFG_NUM_UF_BUFS 64
192 #define DEF_CFG_NUM_RPORTS 1024
193 #define DEF_CFG_NUM_ITNIMS (DEF_CFG_NUM_RPORTS)
194 #define DEF_CFG_NUM_TINS 256
196 #define DEF_CFG_NUM_SGPGS 2048
197 #define DEF_CFG_NUM_REQQ_ELEMS 256
198 #define DEF_CFG_NUM_RSPQ_ELEMS 64
199 #define DEF_CFG_NUM_SBOOT_TGTS 16
200 #define DEF_CFG_NUM_SBOOT_LUNS 16
203 * forward declaration for IOC FC functions
205 static void bfa_iocfc_enable_cbfn(void *bfa_arg
, enum bfa_status status
);
206 static void bfa_iocfc_disable_cbfn(void *bfa_arg
);
207 static void bfa_iocfc_hbfail_cbfn(void *bfa_arg
);
208 static void bfa_iocfc_reset_cbfn(void *bfa_arg
);
209 static struct bfa_ioc_cbfn_s bfa_iocfc_cbfn
;
212 * BFA Interrupt handling functions
215 bfa_reqq_resume(struct bfa_s
*bfa
, int qid
)
217 struct list_head
*waitq
, *qe
, *qen
;
218 struct bfa_reqq_wait_s
*wqe
;
220 waitq
= bfa_reqq(bfa
, qid
);
221 list_for_each_safe(qe
, qen
, waitq
) {
223 * Callback only as long as there is room in request queue
225 if (bfa_reqq_full(bfa
, qid
))
229 wqe
= (struct bfa_reqq_wait_s
*) qe
;
230 wqe
->qresume(wqe
->cbarg
);
235 bfa_isr_rspq(struct bfa_s
*bfa
, int qid
)
239 struct list_head
*waitq
;
241 ci
= bfa_rspq_ci(bfa
, qid
);
242 pi
= bfa_rspq_pi(bfa
, qid
);
245 m
= bfa_rspq_elem(bfa
, qid
, ci
);
246 WARN_ON(m
->mhdr
.msg_class
>= BFI_MC_MAX
);
248 bfa_isrs
[m
->mhdr
.msg_class
] (bfa
, m
);
249 CQ_INCR(ci
, bfa
->iocfc
.cfg
.drvcfg
.num_rspq_elems
);
253 * acknowledge RME completions and update CI
255 bfa_isr_rspq_ack(bfa
, qid
, ci
);
258 * Resume any pending requests in the corresponding reqq.
260 waitq
= bfa_reqq(bfa
, qid
);
261 if (!list_empty(waitq
))
262 bfa_reqq_resume(bfa
, qid
);
266 bfa_isr_reqq(struct bfa_s
*bfa
, int qid
)
268 struct list_head
*waitq
;
270 bfa_isr_reqq_ack(bfa
, qid
);
273 * Resume any pending requests in the corresponding reqq.
275 waitq
= bfa_reqq(bfa
, qid
);
276 if (!list_empty(waitq
))
277 bfa_reqq_resume(bfa
, qid
);
281 bfa_msix_all(struct bfa_s
*bfa
, int vec
)
286 intr
= readl(bfa
->iocfc
.bfa_regs
.intr_status
);
291 * RME completion queue interrupt
293 qintr
= intr
& __HFN_INT_RME_MASK
;
294 if (qintr
&& bfa
->queue_process
) {
295 for (queue
= 0; queue
< BFI_IOC_MAX_CQS
; queue
++)
296 bfa_isr_rspq(bfa
, queue
);
304 * CPE completion queue interrupt
306 qintr
= intr
& __HFN_INT_CPE_MASK
;
307 if (qintr
&& bfa
->queue_process
) {
308 for (queue
= 0; queue
< BFI_IOC_MAX_CQS
; queue
++)
309 bfa_isr_reqq(bfa
, queue
);
315 bfa_msix_lpu_err(bfa
, intr
);
319 bfa_intx(struct bfa_s
*bfa
)
324 intr
= readl(bfa
->iocfc
.bfa_regs
.intr_status
);
326 qintr
= intr
& (__HFN_INT_RME_MASK
| __HFN_INT_CPE_MASK
);
328 writel(qintr
, bfa
->iocfc
.bfa_regs
.intr_status
);
331 * Unconditional RME completion queue interrupt
333 if (bfa
->queue_process
) {
334 for (queue
= 0; queue
< BFI_IOC_MAX_CQS
; queue
++)
335 bfa_isr_rspq(bfa
, queue
);
342 * CPE completion queue interrupt
344 qintr
= intr
& __HFN_INT_CPE_MASK
;
345 if (qintr
&& bfa
->queue_process
) {
346 for (queue
= 0; queue
< BFI_IOC_MAX_CQS
; queue
++)
347 bfa_isr_reqq(bfa
, queue
);
353 bfa_msix_lpu_err(bfa
, intr
);
359 bfa_isr_enable(struct bfa_s
*bfa
)
362 int pci_func
= bfa_ioc_pcifn(&bfa
->ioc
);
364 bfa_trc(bfa
, pci_func
);
366 bfa_msix_ctrl_install(bfa
);
368 if (bfa_asic_id_ct2(bfa
->ioc
.pcidev
.device_id
)) {
369 umsk
= __HFN_INT_ERR_MASK_CT2
;
370 umsk
|= pci_func
== 0 ?
371 __HFN_INT_FN0_MASK_CT2
: __HFN_INT_FN1_MASK_CT2
;
373 umsk
= __HFN_INT_ERR_MASK
;
374 umsk
|= pci_func
== 0 ? __HFN_INT_FN0_MASK
: __HFN_INT_FN1_MASK
;
377 writel(umsk
, bfa
->iocfc
.bfa_regs
.intr_status
);
378 writel(~umsk
, bfa
->iocfc
.bfa_regs
.intr_mask
);
379 bfa
->iocfc
.intr_mask
= ~umsk
;
380 bfa_isr_mode_set(bfa
, bfa
->msix
.nvecs
!= 0);
384 bfa_isr_disable(struct bfa_s
*bfa
)
386 bfa_isr_mode_set(bfa
, BFA_FALSE
);
387 writel(-1L, bfa
->iocfc
.bfa_regs
.intr_mask
);
388 bfa_msix_uninstall(bfa
);
392 bfa_msix_reqq(struct bfa_s
*bfa
, int vec
)
394 bfa_isr_reqq(bfa
, vec
- bfa
->iocfc
.hwif
.cpe_vec_q0
);
398 bfa_isr_unhandled(struct bfa_s
*bfa
, struct bfi_msg_s
*m
)
400 bfa_trc(bfa
, m
->mhdr
.msg_class
);
401 bfa_trc(bfa
, m
->mhdr
.msg_id
);
402 bfa_trc(bfa
, m
->mhdr
.mtag
.i2htok
);
404 bfa_trc_stop(bfa
->trcmod
);
408 bfa_msix_rspq(struct bfa_s
*bfa
, int vec
)
410 bfa_isr_rspq(bfa
, vec
- bfa
->iocfc
.hwif
.rme_vec_q0
);
414 bfa_msix_lpu_err(struct bfa_s
*bfa
, int vec
)
416 u32 intr
, curr_value
;
417 bfa_boolean_t lpu_isr
, halt_isr
, pss_isr
;
419 intr
= readl(bfa
->iocfc
.bfa_regs
.intr_status
);
421 if (bfa_asic_id_ct2(bfa
->ioc
.pcidev
.device_id
)) {
422 halt_isr
= intr
& __HFN_INT_CPQ_HALT_CT2
;
423 pss_isr
= intr
& __HFN_INT_ERR_PSS_CT2
;
424 lpu_isr
= intr
& (__HFN_INT_MBOX_LPU0_CT2
|
425 __HFN_INT_MBOX_LPU1_CT2
);
426 intr
&= __HFN_INT_ERR_MASK_CT2
;
428 halt_isr
= bfa_asic_id_ct(bfa
->ioc
.pcidev
.device_id
) ?
429 (intr
& __HFN_INT_LL_HALT
) : 0;
430 pss_isr
= intr
& __HFN_INT_ERR_PSS
;
431 lpu_isr
= intr
& (__HFN_INT_MBOX_LPU0
| __HFN_INT_MBOX_LPU1
);
432 intr
&= __HFN_INT_ERR_MASK
;
436 bfa_ioc_mbox_isr(&bfa
->ioc
);
441 * If LL_HALT bit is set then FW Init Halt LL Port
442 * Register needs to be cleared as well so Interrupt
443 * Status Register will be cleared.
445 curr_value
= readl(bfa
->ioc
.ioc_regs
.ll_halt
);
446 curr_value
&= ~__FW_INIT_HALT_P
;
447 writel(curr_value
, bfa
->ioc
.ioc_regs
.ll_halt
);
452 * ERR_PSS bit needs to be cleared as well in case
453 * interrups are shared so driver's interrupt handler is
454 * still called even though it is already masked out.
457 bfa
->ioc
.ioc_regs
.pss_err_status_reg
);
459 bfa
->ioc
.ioc_regs
.pss_err_status_reg
);
462 writel(intr
, bfa
->iocfc
.bfa_regs
.intr_status
);
463 bfa_ioc_error_isr(&bfa
->ioc
);
468 * BFA IOC FC related functions
472 * BFA IOC private functions
476 * Use the Mailbox interface to send BFI_IOCFC_H2I_CFG_REQ
479 bfa_iocfc_send_cfg(void *bfa_arg
)
481 struct bfa_s
*bfa
= bfa_arg
;
482 struct bfa_iocfc_s
*iocfc
= &bfa
->iocfc
;
483 struct bfi_iocfc_cfg_req_s cfg_req
;
484 struct bfi_iocfc_cfg_s
*cfg_info
= iocfc
->cfginfo
;
485 struct bfa_iocfc_cfg_s
*cfg
= &iocfc
->cfg
;
488 WARN_ON(cfg
->fwcfg
.num_cqs
> BFI_IOC_MAX_CQS
);
489 bfa_trc(bfa
, cfg
->fwcfg
.num_cqs
);
491 bfa_iocfc_reset_queues(bfa
);
494 * initialize IOC configuration info
496 cfg_info
->single_msix_vec
= 0;
497 if (bfa
->msix
.nvecs
== 1)
498 cfg_info
->single_msix_vec
= 1;
499 cfg_info
->endian_sig
= BFI_IOC_ENDIAN_SIG
;
500 cfg_info
->num_cqs
= cfg
->fwcfg
.num_cqs
;
501 cfg_info
->num_ioim_reqs
= cpu_to_be16(cfg
->fwcfg
.num_ioim_reqs
);
502 cfg_info
->num_fwtio_reqs
= cpu_to_be16(cfg
->fwcfg
.num_fwtio_reqs
);
504 bfa_dma_be_addr_set(cfg_info
->cfgrsp_addr
, iocfc
->cfgrsp_dma
.pa
);
506 * dma map REQ and RSP circular queues and shadow pointers
508 for (i
= 0; i
< cfg
->fwcfg
.num_cqs
; i
++) {
509 bfa_dma_be_addr_set(cfg_info
->req_cq_ba
[i
],
510 iocfc
->req_cq_ba
[i
].pa
);
511 bfa_dma_be_addr_set(cfg_info
->req_shadow_ci
[i
],
512 iocfc
->req_cq_shadow_ci
[i
].pa
);
513 cfg_info
->req_cq_elems
[i
] =
514 cpu_to_be16(cfg
->drvcfg
.num_reqq_elems
);
516 bfa_dma_be_addr_set(cfg_info
->rsp_cq_ba
[i
],
517 iocfc
->rsp_cq_ba
[i
].pa
);
518 bfa_dma_be_addr_set(cfg_info
->rsp_shadow_pi
[i
],
519 iocfc
->rsp_cq_shadow_pi
[i
].pa
);
520 cfg_info
->rsp_cq_elems
[i
] =
521 cpu_to_be16(cfg
->drvcfg
.num_rspq_elems
);
525 * Enable interrupt coalescing if it is driver init path
526 * and not ioc disable/enable path.
529 cfg_info
->intr_attr
.coalesce
= BFA_TRUE
;
531 iocfc
->cfgdone
= BFA_FALSE
;
534 * dma map IOC configuration itself
536 bfi_h2i_set(cfg_req
.mh
, BFI_MC_IOCFC
, BFI_IOCFC_H2I_CFG_REQ
,
538 bfa_dma_be_addr_set(cfg_req
.ioc_cfg_dma_addr
, iocfc
->cfg_info
.pa
);
540 bfa_ioc_mbox_send(&bfa
->ioc
, &cfg_req
,
541 sizeof(struct bfi_iocfc_cfg_req_s
));
545 bfa_iocfc_init_mem(struct bfa_s
*bfa
, void *bfad
, struct bfa_iocfc_cfg_s
*cfg
,
546 struct bfa_pcidev_s
*pcidev
)
548 struct bfa_iocfc_s
*iocfc
= &bfa
->iocfc
;
552 iocfc
->action
= BFA_IOCFC_ACT_NONE
;
557 * Initialize chip specific handlers.
559 if (bfa_asic_id_ctc(bfa_ioc_devid(&bfa
->ioc
))) {
560 iocfc
->hwif
.hw_reginit
= bfa_hwct_reginit
;
561 iocfc
->hwif
.hw_reqq_ack
= bfa_hwct_reqq_ack
;
562 iocfc
->hwif
.hw_rspq_ack
= bfa_hwct_rspq_ack
;
563 iocfc
->hwif
.hw_msix_init
= bfa_hwct_msix_init
;
564 iocfc
->hwif
.hw_msix_ctrl_install
= bfa_hwct_msix_ctrl_install
;
565 iocfc
->hwif
.hw_msix_queue_install
= bfa_hwct_msix_queue_install
;
566 iocfc
->hwif
.hw_msix_uninstall
= bfa_hwct_msix_uninstall
;
567 iocfc
->hwif
.hw_isr_mode_set
= bfa_hwct_isr_mode_set
;
568 iocfc
->hwif
.hw_msix_getvecs
= bfa_hwct_msix_getvecs
;
569 iocfc
->hwif
.hw_msix_get_rme_range
= bfa_hwct_msix_get_rme_range
;
570 iocfc
->hwif
.rme_vec_q0
= BFI_MSIX_RME_QMIN_CT
;
571 iocfc
->hwif
.cpe_vec_q0
= BFI_MSIX_CPE_QMIN_CT
;
573 iocfc
->hwif
.hw_reginit
= bfa_hwcb_reginit
;
574 iocfc
->hwif
.hw_reqq_ack
= NULL
;
575 iocfc
->hwif
.hw_rspq_ack
= bfa_hwcb_rspq_ack
;
576 iocfc
->hwif
.hw_msix_init
= bfa_hwcb_msix_init
;
577 iocfc
->hwif
.hw_msix_ctrl_install
= bfa_hwcb_msix_ctrl_install
;
578 iocfc
->hwif
.hw_msix_queue_install
= bfa_hwcb_msix_queue_install
;
579 iocfc
->hwif
.hw_msix_uninstall
= bfa_hwcb_msix_uninstall
;
580 iocfc
->hwif
.hw_isr_mode_set
= bfa_hwcb_isr_mode_set
;
581 iocfc
->hwif
.hw_msix_getvecs
= bfa_hwcb_msix_getvecs
;
582 iocfc
->hwif
.hw_msix_get_rme_range
= bfa_hwcb_msix_get_rme_range
;
583 iocfc
->hwif
.rme_vec_q0
= BFI_MSIX_RME_QMIN_CB
+
584 bfa_ioc_pcifn(&bfa
->ioc
) * BFI_IOC_MAX_CQS
;
585 iocfc
->hwif
.cpe_vec_q0
= BFI_MSIX_CPE_QMIN_CB
+
586 bfa_ioc_pcifn(&bfa
->ioc
) * BFI_IOC_MAX_CQS
;
589 if (bfa_asic_id_ct2(bfa_ioc_devid(&bfa
->ioc
))) {
590 iocfc
->hwif
.hw_reginit
= bfa_hwct2_reginit
;
591 iocfc
->hwif
.hw_isr_mode_set
= NULL
;
592 iocfc
->hwif
.hw_rspq_ack
= bfa_hwct2_rspq_ack
;
595 iocfc
->hwif
.hw_reginit(bfa
);
600 bfa_iocfc_mem_claim(struct bfa_s
*bfa
, struct bfa_iocfc_cfg_s
*cfg
)
604 int i
, per_reqq_sz
, per_rspq_sz
, dbgsz
;
605 struct bfa_iocfc_s
*iocfc
= &bfa
->iocfc
;
606 struct bfa_mem_dma_s
*ioc_dma
= BFA_MEM_IOC_DMA(bfa
);
607 struct bfa_mem_dma_s
*iocfc_dma
= BFA_MEM_IOCFC_DMA(bfa
);
608 struct bfa_mem_dma_s
*reqq_dma
, *rspq_dma
;
610 /* First allocate dma memory for IOC */
611 bfa_ioc_mem_claim(&bfa
->ioc
, bfa_mem_dma_virt(ioc_dma
),
612 bfa_mem_dma_phys(ioc_dma
));
614 /* Claim DMA-able memory for the request/response queues */
615 per_reqq_sz
= BFA_ROUNDUP((cfg
->drvcfg
.num_reqq_elems
* BFI_LMSG_SZ
),
617 per_rspq_sz
= BFA_ROUNDUP((cfg
->drvcfg
.num_rspq_elems
* BFI_LMSG_SZ
),
620 for (i
= 0; i
< cfg
->fwcfg
.num_cqs
; i
++) {
621 reqq_dma
= BFA_MEM_REQQ_DMA(bfa
, i
);
622 iocfc
->req_cq_ba
[i
].kva
= bfa_mem_dma_virt(reqq_dma
);
623 iocfc
->req_cq_ba
[i
].pa
= bfa_mem_dma_phys(reqq_dma
);
624 memset(iocfc
->req_cq_ba
[i
].kva
, 0, per_reqq_sz
);
626 rspq_dma
= BFA_MEM_RSPQ_DMA(bfa
, i
);
627 iocfc
->rsp_cq_ba
[i
].kva
= bfa_mem_dma_virt(rspq_dma
);
628 iocfc
->rsp_cq_ba
[i
].pa
= bfa_mem_dma_phys(rspq_dma
);
629 memset(iocfc
->rsp_cq_ba
[i
].kva
, 0, per_rspq_sz
);
632 /* Claim IOCFC dma memory - for shadow CI/PI */
633 dm_kva
= bfa_mem_dma_virt(iocfc_dma
);
634 dm_pa
= bfa_mem_dma_phys(iocfc_dma
);
636 for (i
= 0; i
< cfg
->fwcfg
.num_cqs
; i
++) {
637 iocfc
->req_cq_shadow_ci
[i
].kva
= dm_kva
;
638 iocfc
->req_cq_shadow_ci
[i
].pa
= dm_pa
;
639 dm_kva
+= BFA_CACHELINE_SZ
;
640 dm_pa
+= BFA_CACHELINE_SZ
;
642 iocfc
->rsp_cq_shadow_pi
[i
].kva
= dm_kva
;
643 iocfc
->rsp_cq_shadow_pi
[i
].pa
= dm_pa
;
644 dm_kva
+= BFA_CACHELINE_SZ
;
645 dm_pa
+= BFA_CACHELINE_SZ
;
648 /* Claim IOCFC dma memory - for the config info page */
649 bfa
->iocfc
.cfg_info
.kva
= dm_kva
;
650 bfa
->iocfc
.cfg_info
.pa
= dm_pa
;
651 bfa
->iocfc
.cfginfo
= (struct bfi_iocfc_cfg_s
*) dm_kva
;
652 dm_kva
+= BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfg_s
), BFA_CACHELINE_SZ
);
653 dm_pa
+= BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfg_s
), BFA_CACHELINE_SZ
);
655 /* Claim IOCFC dma memory - for the config response */
656 bfa
->iocfc
.cfgrsp_dma
.kva
= dm_kva
;
657 bfa
->iocfc
.cfgrsp_dma
.pa
= dm_pa
;
658 bfa
->iocfc
.cfgrsp
= (struct bfi_iocfc_cfgrsp_s
*) dm_kva
;
659 dm_kva
+= BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfgrsp_s
),
661 dm_pa
+= BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfgrsp_s
),
664 /* Claim IOCFC kva memory */
665 dbgsz
= (bfa_auto_recover
) ? BFA_DBG_FWTRC_LEN
: 0;
667 bfa_ioc_debug_memclaim(&bfa
->ioc
, bfa_mem_kva_curp(iocfc
));
668 bfa_mem_kva_curp(iocfc
) += dbgsz
;
673 * Start BFA submodules.
676 bfa_iocfc_start_submod(struct bfa_s
*bfa
)
680 bfa
->queue_process
= BFA_TRUE
;
681 for (i
= 0; i
< BFI_IOC_MAX_CQS
; i
++)
682 bfa_isr_rspq_ack(bfa
, i
, bfa_rspq_ci(bfa
, i
));
684 for (i
= 0; hal_mods
[i
]; i
++)
685 hal_mods
[i
]->start(bfa
);
689 * Disable BFA submodules.
692 bfa_iocfc_disable_submod(struct bfa_s
*bfa
)
696 for (i
= 0; hal_mods
[i
]; i
++)
697 hal_mods
[i
]->iocdisable(bfa
);
701 bfa_iocfc_init_cb(void *bfa_arg
, bfa_boolean_t complete
)
703 struct bfa_s
*bfa
= bfa_arg
;
706 if (bfa
->iocfc
.cfgdone
&& BFA_DCONF_MOD(bfa
)->flashdone
)
707 bfa_cb_init(bfa
->bfad
, BFA_STATUS_OK
);
709 bfa_cb_init(bfa
->bfad
, BFA_STATUS_FAILED
);
711 if (bfa
->iocfc
.cfgdone
)
712 bfa
->iocfc
.action
= BFA_IOCFC_ACT_NONE
;
717 bfa_iocfc_stop_cb(void *bfa_arg
, bfa_boolean_t
compl)
719 struct bfa_s
*bfa
= bfa_arg
;
720 struct bfad_s
*bfad
= bfa
->bfad
;
723 complete(&bfad
->comp
);
725 bfa
->iocfc
.action
= BFA_IOCFC_ACT_NONE
;
729 bfa_iocfc_enable_cb(void *bfa_arg
, bfa_boolean_t
compl)
731 struct bfa_s
*bfa
= bfa_arg
;
732 struct bfad_s
*bfad
= bfa
->bfad
;
735 complete(&bfad
->enable_comp
);
739 bfa_iocfc_disable_cb(void *bfa_arg
, bfa_boolean_t
compl)
741 struct bfa_s
*bfa
= bfa_arg
;
742 struct bfad_s
*bfad
= bfa
->bfad
;
745 complete(&bfad
->disable_comp
);
749 * configure queue registers from firmware response
752 bfa_iocfc_qreg(struct bfa_s
*bfa
, struct bfi_iocfc_qreg_s
*qreg
)
755 struct bfa_iocfc_regs_s
*r
= &bfa
->iocfc
.bfa_regs
;
756 void __iomem
*kva
= bfa_ioc_bar0(&bfa
->ioc
);
758 for (i
= 0; i
< BFI_IOC_MAX_CQS
; i
++) {
759 bfa
->iocfc
.hw_qid
[i
] = qreg
->hw_qid
[i
];
760 r
->cpe_q_ci
[i
] = kva
+ be32_to_cpu(qreg
->cpe_q_ci_off
[i
]);
761 r
->cpe_q_pi
[i
] = kva
+ be32_to_cpu(qreg
->cpe_q_pi_off
[i
]);
762 r
->cpe_q_ctrl
[i
] = kva
+ be32_to_cpu(qreg
->cpe_qctl_off
[i
]);
763 r
->rme_q_ci
[i
] = kva
+ be32_to_cpu(qreg
->rme_q_ci_off
[i
]);
764 r
->rme_q_pi
[i
] = kva
+ be32_to_cpu(qreg
->rme_q_pi_off
[i
]);
765 r
->rme_q_ctrl
[i
] = kva
+ be32_to_cpu(qreg
->rme_qctl_off
[i
]);
770 bfa_iocfc_res_recfg(struct bfa_s
*bfa
, struct bfa_iocfc_fwcfg_s
*fwcfg
)
772 bfa_fcxp_res_recfg(bfa
, fwcfg
->num_fcxp_reqs
);
773 bfa_uf_res_recfg(bfa
, fwcfg
->num_uf_bufs
);
774 bfa_rport_res_recfg(bfa
, fwcfg
->num_rports
);
775 bfa_fcp_res_recfg(bfa
, fwcfg
->num_ioim_reqs
);
776 bfa_tskim_res_recfg(bfa
, fwcfg
->num_tskim_reqs
);
780 * Update BFA configuration from firmware configuration.
783 bfa_iocfc_cfgrsp(struct bfa_s
*bfa
)
785 struct bfa_iocfc_s
*iocfc
= &bfa
->iocfc
;
786 struct bfi_iocfc_cfgrsp_s
*cfgrsp
= iocfc
->cfgrsp
;
787 struct bfa_iocfc_fwcfg_s
*fwcfg
= &cfgrsp
->fwcfg
;
789 fwcfg
->num_cqs
= fwcfg
->num_cqs
;
790 fwcfg
->num_ioim_reqs
= be16_to_cpu(fwcfg
->num_ioim_reqs
);
791 fwcfg
->num_fwtio_reqs
= be16_to_cpu(fwcfg
->num_fwtio_reqs
);
792 fwcfg
->num_tskim_reqs
= be16_to_cpu(fwcfg
->num_tskim_reqs
);
793 fwcfg
->num_fcxp_reqs
= be16_to_cpu(fwcfg
->num_fcxp_reqs
);
794 fwcfg
->num_uf_bufs
= be16_to_cpu(fwcfg
->num_uf_bufs
);
795 fwcfg
->num_rports
= be16_to_cpu(fwcfg
->num_rports
);
797 iocfc
->cfgdone
= BFA_TRUE
;
800 * configure queue register offsets as learnt from firmware
802 bfa_iocfc_qreg(bfa
, &cfgrsp
->qreg
);
805 * Re-configure resources as learnt from Firmware
807 bfa_iocfc_res_recfg(bfa
, fwcfg
);
810 * Install MSIX queue handlers
812 bfa_msix_queue_install(bfa
);
815 * Configuration is complete - initialize/start submodules
817 bfa_fcport_init(bfa
);
819 if (iocfc
->action
== BFA_IOCFC_ACT_INIT
) {
820 if (BFA_DCONF_MOD(bfa
)->flashdone
== BFA_TRUE
)
821 bfa_cb_queue(bfa
, &iocfc
->init_hcb_qe
,
822 bfa_iocfc_init_cb
, bfa
);
824 if (bfa
->iocfc
.action
== BFA_IOCFC_ACT_ENABLE
)
825 bfa_cb_queue(bfa
, &bfa
->iocfc
.en_hcb_qe
,
826 bfa_iocfc_enable_cb
, bfa
);
827 bfa_iocfc_start_submod(bfa
);
831 bfa_iocfc_reset_queues(struct bfa_s
*bfa
)
835 for (q
= 0; q
< BFI_IOC_MAX_CQS
; q
++) {
836 bfa_reqq_ci(bfa
, q
) = 0;
837 bfa_reqq_pi(bfa
, q
) = 0;
838 bfa_rspq_ci(bfa
, q
) = 0;
839 bfa_rspq_pi(bfa
, q
) = 0;
843 /* Fabric Assigned Address specific functions */
846 * Check whether IOC is ready before sending command down
849 bfa_faa_validate_request(struct bfa_s
*bfa
)
851 enum bfa_ioc_type_e ioc_type
= bfa_get_type(bfa
);
852 u32 card_type
= bfa
->ioc
.attr
->card_type
;
854 if (bfa_ioc_is_operational(&bfa
->ioc
)) {
855 if ((ioc_type
!= BFA_IOC_TYPE_FC
) || bfa_mfg_is_mezz(card_type
))
856 return BFA_STATUS_FEATURE_NOT_SUPPORTED
;
858 if (!bfa_ioc_is_acq_addr(&bfa
->ioc
))
859 return BFA_STATUS_IOC_NON_OP
;
862 return BFA_STATUS_OK
;
866 bfa_faa_enable(struct bfa_s
*bfa
, bfa_cb_iocfc_t cbfn
, void *cbarg
)
868 struct bfi_faa_en_dis_s faa_enable_req
;
869 struct bfa_iocfc_s
*iocfc
= &bfa
->iocfc
;
872 iocfc
->faa_args
.faa_cb
.faa_cbfn
= cbfn
;
873 iocfc
->faa_args
.faa_cb
.faa_cbarg
= cbarg
;
875 status
= bfa_faa_validate_request(bfa
);
876 if (status
!= BFA_STATUS_OK
)
879 if (iocfc
->faa_args
.busy
== BFA_TRUE
)
880 return BFA_STATUS_DEVBUSY
;
882 if (iocfc
->faa_args
.faa_state
== BFA_FAA_ENABLED
)
883 return BFA_STATUS_FAA_ENABLED
;
885 if (bfa_fcport_is_trunk_enabled(bfa
))
886 return BFA_STATUS_ERROR_TRUNK_ENABLED
;
888 bfa_fcport_cfg_faa(bfa
, BFA_FAA_ENABLED
);
889 iocfc
->faa_args
.busy
= BFA_TRUE
;
891 memset(&faa_enable_req
, 0, sizeof(struct bfi_faa_en_dis_s
));
892 bfi_h2i_set(faa_enable_req
.mh
, BFI_MC_IOCFC
,
893 BFI_IOCFC_H2I_FAA_ENABLE_REQ
, bfa_fn_lpu(bfa
));
895 bfa_ioc_mbox_send(&bfa
->ioc
, &faa_enable_req
,
896 sizeof(struct bfi_faa_en_dis_s
));
898 return BFA_STATUS_OK
;
902 bfa_faa_disable(struct bfa_s
*bfa
, bfa_cb_iocfc_t cbfn
,
905 struct bfi_faa_en_dis_s faa_disable_req
;
906 struct bfa_iocfc_s
*iocfc
= &bfa
->iocfc
;
909 iocfc
->faa_args
.faa_cb
.faa_cbfn
= cbfn
;
910 iocfc
->faa_args
.faa_cb
.faa_cbarg
= cbarg
;
912 status
= bfa_faa_validate_request(bfa
);
913 if (status
!= BFA_STATUS_OK
)
916 if (iocfc
->faa_args
.busy
== BFA_TRUE
)
917 return BFA_STATUS_DEVBUSY
;
919 if (iocfc
->faa_args
.faa_state
== BFA_FAA_DISABLED
)
920 return BFA_STATUS_FAA_DISABLED
;
922 bfa_fcport_cfg_faa(bfa
, BFA_FAA_DISABLED
);
923 iocfc
->faa_args
.busy
= BFA_TRUE
;
925 memset(&faa_disable_req
, 0, sizeof(struct bfi_faa_en_dis_s
));
926 bfi_h2i_set(faa_disable_req
.mh
, BFI_MC_IOCFC
,
927 BFI_IOCFC_H2I_FAA_DISABLE_REQ
, bfa_fn_lpu(bfa
));
929 bfa_ioc_mbox_send(&bfa
->ioc
, &faa_disable_req
,
930 sizeof(struct bfi_faa_en_dis_s
));
932 return BFA_STATUS_OK
;
936 bfa_faa_query(struct bfa_s
*bfa
, struct bfa_faa_attr_s
*attr
,
937 bfa_cb_iocfc_t cbfn
, void *cbarg
)
939 struct bfi_faa_query_s faa_attr_req
;
940 struct bfa_iocfc_s
*iocfc
= &bfa
->iocfc
;
943 iocfc
->faa_args
.faa_attr
= attr
;
944 iocfc
->faa_args
.faa_cb
.faa_cbfn
= cbfn
;
945 iocfc
->faa_args
.faa_cb
.faa_cbarg
= cbarg
;
947 status
= bfa_faa_validate_request(bfa
);
948 if (status
!= BFA_STATUS_OK
)
951 if (iocfc
->faa_args
.busy
== BFA_TRUE
)
952 return BFA_STATUS_DEVBUSY
;
954 iocfc
->faa_args
.busy
= BFA_TRUE
;
955 memset(&faa_attr_req
, 0, sizeof(struct bfi_faa_query_s
));
956 bfi_h2i_set(faa_attr_req
.mh
, BFI_MC_IOCFC
,
957 BFI_IOCFC_H2I_FAA_QUERY_REQ
, bfa_fn_lpu(bfa
));
959 bfa_ioc_mbox_send(&bfa
->ioc
, &faa_attr_req
,
960 sizeof(struct bfi_faa_query_s
));
962 return BFA_STATUS_OK
;
966 * FAA enable response
969 bfa_faa_enable_reply(struct bfa_iocfc_s
*iocfc
,
970 struct bfi_faa_en_dis_rsp_s
*rsp
)
972 void *cbarg
= iocfc
->faa_args
.faa_cb
.faa_cbarg
;
973 bfa_status_t status
= rsp
->status
;
975 WARN_ON(!iocfc
->faa_args
.faa_cb
.faa_cbfn
);
977 iocfc
->faa_args
.faa_cb
.faa_cbfn(cbarg
, status
);
978 iocfc
->faa_args
.busy
= BFA_FALSE
;
982 * FAA disable response
985 bfa_faa_disable_reply(struct bfa_iocfc_s
*iocfc
,
986 struct bfi_faa_en_dis_rsp_s
*rsp
)
988 void *cbarg
= iocfc
->faa_args
.faa_cb
.faa_cbarg
;
989 bfa_status_t status
= rsp
->status
;
991 WARN_ON(!iocfc
->faa_args
.faa_cb
.faa_cbfn
);
993 iocfc
->faa_args
.faa_cb
.faa_cbfn(cbarg
, status
);
994 iocfc
->faa_args
.busy
= BFA_FALSE
;
1001 bfa_faa_query_reply(struct bfa_iocfc_s
*iocfc
,
1002 bfi_faa_query_rsp_t
*rsp
)
1004 void *cbarg
= iocfc
->faa_args
.faa_cb
.faa_cbarg
;
1006 if (iocfc
->faa_args
.faa_attr
) {
1007 iocfc
->faa_args
.faa_attr
->faa
= rsp
->faa
;
1008 iocfc
->faa_args
.faa_attr
->faa_state
= rsp
->faa_status
;
1009 iocfc
->faa_args
.faa_attr
->pwwn_source
= rsp
->addr_source
;
1012 WARN_ON(!iocfc
->faa_args
.faa_cb
.faa_cbfn
);
1014 iocfc
->faa_args
.faa_cb
.faa_cbfn(cbarg
, BFA_STATUS_OK
);
1015 iocfc
->faa_args
.busy
= BFA_FALSE
;
1019 * IOC enable request is complete
1022 bfa_iocfc_enable_cbfn(void *bfa_arg
, enum bfa_status status
)
1024 struct bfa_s
*bfa
= bfa_arg
;
1026 if (status
== BFA_STATUS_FAA_ACQ_ADDR
) {
1027 bfa_cb_queue(bfa
, &bfa
->iocfc
.init_hcb_qe
,
1028 bfa_iocfc_init_cb
, bfa
);
1032 if (status
!= BFA_STATUS_OK
) {
1033 bfa_isr_disable(bfa
);
1034 if (bfa
->iocfc
.action
== BFA_IOCFC_ACT_INIT
)
1035 bfa_cb_queue(bfa
, &bfa
->iocfc
.init_hcb_qe
,
1036 bfa_iocfc_init_cb
, bfa
);
1037 else if (bfa
->iocfc
.action
== BFA_IOCFC_ACT_ENABLE
)
1038 bfa_cb_queue(bfa
, &bfa
->iocfc
.en_hcb_qe
,
1039 bfa_iocfc_enable_cb
, bfa
);
1043 bfa_iocfc_send_cfg(bfa
);
1044 bfa_dconf_modinit(bfa
);
1048 * IOC disable request is complete
1051 bfa_iocfc_disable_cbfn(void *bfa_arg
)
1053 struct bfa_s
*bfa
= bfa_arg
;
1055 bfa_isr_disable(bfa
);
1056 bfa_iocfc_disable_submod(bfa
);
1058 if (bfa
->iocfc
.action
== BFA_IOCFC_ACT_STOP
)
1059 bfa_cb_queue(bfa
, &bfa
->iocfc
.stop_hcb_qe
, bfa_iocfc_stop_cb
,
1062 WARN_ON(bfa
->iocfc
.action
!= BFA_IOCFC_ACT_DISABLE
);
1063 bfa_cb_queue(bfa
, &bfa
->iocfc
.dis_hcb_qe
, bfa_iocfc_disable_cb
,
1069 * Notify sub-modules of hardware failure.
1072 bfa_iocfc_hbfail_cbfn(void *bfa_arg
)
1074 struct bfa_s
*bfa
= bfa_arg
;
1076 bfa
->queue_process
= BFA_FALSE
;
1078 bfa_isr_disable(bfa
);
1079 bfa_iocfc_disable_submod(bfa
);
1081 if (bfa
->iocfc
.action
== BFA_IOCFC_ACT_INIT
)
1082 bfa_cb_queue(bfa
, &bfa
->iocfc
.init_hcb_qe
, bfa_iocfc_init_cb
,
1087 * Actions on chip-reset completion.
1090 bfa_iocfc_reset_cbfn(void *bfa_arg
)
1092 struct bfa_s
*bfa
= bfa_arg
;
1094 bfa_iocfc_reset_queues(bfa
);
1095 bfa_isr_enable(bfa
);
1100 * Query IOC memory requirement information.
1103 bfa_iocfc_meminfo(struct bfa_iocfc_cfg_s
*cfg
, struct bfa_meminfo_s
*meminfo
,
1106 int q
, per_reqq_sz
, per_rspq_sz
;
1107 struct bfa_mem_dma_s
*ioc_dma
= BFA_MEM_IOC_DMA(bfa
);
1108 struct bfa_mem_dma_s
*iocfc_dma
= BFA_MEM_IOCFC_DMA(bfa
);
1109 struct bfa_mem_kva_s
*iocfc_kva
= BFA_MEM_IOCFC_KVA(bfa
);
1112 /* dma memory setup for IOC */
1113 bfa_mem_dma_setup(meminfo
, ioc_dma
,
1114 BFA_ROUNDUP(sizeof(struct bfi_ioc_attr_s
), BFA_DMA_ALIGN_SZ
));
1116 /* dma memory setup for REQ/RSP queues */
1117 per_reqq_sz
= BFA_ROUNDUP((cfg
->drvcfg
.num_reqq_elems
* BFI_LMSG_SZ
),
1119 per_rspq_sz
= BFA_ROUNDUP((cfg
->drvcfg
.num_rspq_elems
* BFI_LMSG_SZ
),
1122 for (q
= 0; q
< cfg
->fwcfg
.num_cqs
; q
++) {
1123 bfa_mem_dma_setup(meminfo
, BFA_MEM_REQQ_DMA(bfa
, q
),
1125 bfa_mem_dma_setup(meminfo
, BFA_MEM_RSPQ_DMA(bfa
, q
),
1129 /* IOCFC dma memory - calculate Shadow CI/PI size */
1130 for (q
= 0; q
< cfg
->fwcfg
.num_cqs
; q
++)
1131 dm_len
+= (2 * BFA_CACHELINE_SZ
);
1133 /* IOCFC dma memory - calculate config info / rsp size */
1134 dm_len
+= BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfg_s
), BFA_CACHELINE_SZ
);
1135 dm_len
+= BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfgrsp_s
),
1138 /* dma memory setup for IOCFC */
1139 bfa_mem_dma_setup(meminfo
, iocfc_dma
, dm_len
);
1141 /* kva memory setup for IOCFC */
1142 bfa_mem_kva_setup(meminfo
, iocfc_kva
,
1143 ((bfa_auto_recover
) ? BFA_DBG_FWTRC_LEN
: 0));
1147 * Query IOC memory requirement information.
1150 bfa_iocfc_attach(struct bfa_s
*bfa
, void *bfad
, struct bfa_iocfc_cfg_s
*cfg
,
1151 struct bfa_pcidev_s
*pcidev
)
1154 struct bfa_ioc_s
*ioc
= &bfa
->ioc
;
1156 bfa_iocfc_cbfn
.enable_cbfn
= bfa_iocfc_enable_cbfn
;
1157 bfa_iocfc_cbfn
.disable_cbfn
= bfa_iocfc_disable_cbfn
;
1158 bfa_iocfc_cbfn
.hbfail_cbfn
= bfa_iocfc_hbfail_cbfn
;
1159 bfa_iocfc_cbfn
.reset_cbfn
= bfa_iocfc_reset_cbfn
;
1161 ioc
->trcmod
= bfa
->trcmod
;
1162 bfa_ioc_attach(&bfa
->ioc
, bfa
, &bfa_iocfc_cbfn
, &bfa
->timer_mod
);
1164 bfa_ioc_pci_init(&bfa
->ioc
, pcidev
, BFI_PCIFN_CLASS_FC
);
1165 bfa_ioc_mbox_register(&bfa
->ioc
, bfa_mbox_isrs
);
1167 bfa_iocfc_init_mem(bfa
, bfad
, cfg
, pcidev
);
1168 bfa_iocfc_mem_claim(bfa
, cfg
);
1169 INIT_LIST_HEAD(&bfa
->timer_mod
.timer_q
);
1171 INIT_LIST_HEAD(&bfa
->comp_q
);
1172 for (i
= 0; i
< BFI_IOC_MAX_CQS
; i
++)
1173 INIT_LIST_HEAD(&bfa
->reqq_waitq
[i
]);
1177 * Query IOC memory requirement information.
1180 bfa_iocfc_init(struct bfa_s
*bfa
)
1182 bfa
->iocfc
.action
= BFA_IOCFC_ACT_INIT
;
1183 bfa_ioc_enable(&bfa
->ioc
);
1187 * IOC start called from bfa_start(). Called to start IOC operations
1188 * at driver instantiation for this instance.
1191 bfa_iocfc_start(struct bfa_s
*bfa
)
1193 if (bfa
->iocfc
.cfgdone
)
1194 bfa_iocfc_start_submod(bfa
);
1198 * IOC stop called from bfa_stop(). Called only when driver is unloaded
1199 * for this instance.
1202 bfa_iocfc_stop(struct bfa_s
*bfa
)
1204 bfa
->iocfc
.action
= BFA_IOCFC_ACT_STOP
;
1206 bfa
->queue_process
= BFA_FALSE
;
1207 bfa_dconf_modexit(bfa
);
1208 if (BFA_DCONF_MOD(bfa
)->flashdone
== BFA_TRUE
)
1209 bfa_ioc_disable(&bfa
->ioc
);
1213 bfa_iocfc_isr(void *bfaarg
, struct bfi_mbmsg_s
*m
)
1215 struct bfa_s
*bfa
= bfaarg
;
1216 struct bfa_iocfc_s
*iocfc
= &bfa
->iocfc
;
1217 union bfi_iocfc_i2h_msg_u
*msg
;
1219 msg
= (union bfi_iocfc_i2h_msg_u
*) m
;
1220 bfa_trc(bfa
, msg
->mh
.msg_id
);
1222 switch (msg
->mh
.msg_id
) {
1223 case BFI_IOCFC_I2H_CFG_REPLY
:
1224 bfa_iocfc_cfgrsp(bfa
);
1226 case BFI_IOCFC_I2H_UPDATEQ_RSP
:
1227 iocfc
->updateq_cbfn(iocfc
->updateq_cbarg
, BFA_STATUS_OK
);
1229 case BFI_IOCFC_I2H_FAA_ENABLE_RSP
:
1230 bfa_faa_enable_reply(iocfc
,
1231 (struct bfi_faa_en_dis_rsp_s
*)msg
);
1233 case BFI_IOCFC_I2H_FAA_DISABLE_RSP
:
1234 bfa_faa_disable_reply(iocfc
,
1235 (struct bfi_faa_en_dis_rsp_s
*)msg
);
1237 case BFI_IOCFC_I2H_FAA_QUERY_RSP
:
1238 bfa_faa_query_reply(iocfc
, (bfi_faa_query_rsp_t
*)msg
);
1246 bfa_iocfc_get_attr(struct bfa_s
*bfa
, struct bfa_iocfc_attr_s
*attr
)
1248 struct bfa_iocfc_s
*iocfc
= &bfa
->iocfc
;
1250 attr
->intr_attr
.coalesce
= iocfc
->cfginfo
->intr_attr
.coalesce
;
1252 attr
->intr_attr
.delay
= iocfc
->cfginfo
->intr_attr
.delay
?
1253 be16_to_cpu(iocfc
->cfginfo
->intr_attr
.delay
) :
1254 be16_to_cpu(iocfc
->cfgrsp
->intr_attr
.delay
);
1256 attr
->intr_attr
.latency
= iocfc
->cfginfo
->intr_attr
.latency
?
1257 be16_to_cpu(iocfc
->cfginfo
->intr_attr
.latency
) :
1258 be16_to_cpu(iocfc
->cfgrsp
->intr_attr
.latency
);
1260 attr
->config
= iocfc
->cfg
;
1264 bfa_iocfc_israttr_set(struct bfa_s
*bfa
, struct bfa_iocfc_intr_attr_s
*attr
)
1266 struct bfa_iocfc_s
*iocfc
= &bfa
->iocfc
;
1267 struct bfi_iocfc_set_intr_req_s
*m
;
1269 iocfc
->cfginfo
->intr_attr
.coalesce
= attr
->coalesce
;
1270 iocfc
->cfginfo
->intr_attr
.delay
= cpu_to_be16(attr
->delay
);
1271 iocfc
->cfginfo
->intr_attr
.latency
= cpu_to_be16(attr
->latency
);
1273 if (!bfa_iocfc_is_operational(bfa
))
1274 return BFA_STATUS_OK
;
1276 m
= bfa_reqq_next(bfa
, BFA_REQQ_IOC
);
1278 return BFA_STATUS_DEVBUSY
;
1280 bfi_h2i_set(m
->mh
, BFI_MC_IOCFC
, BFI_IOCFC_H2I_SET_INTR_REQ
,
1282 m
->coalesce
= iocfc
->cfginfo
->intr_attr
.coalesce
;
1283 m
->delay
= iocfc
->cfginfo
->intr_attr
.delay
;
1284 m
->latency
= iocfc
->cfginfo
->intr_attr
.latency
;
1286 bfa_trc(bfa
, attr
->delay
);
1287 bfa_trc(bfa
, attr
->latency
);
1289 bfa_reqq_produce(bfa
, BFA_REQQ_IOC
, m
->mh
);
1290 return BFA_STATUS_OK
;
1294 bfa_iocfc_set_snsbase(struct bfa_s
*bfa
, int seg_no
, u64 snsbase_pa
)
1296 struct bfa_iocfc_s
*iocfc
= &bfa
->iocfc
;
1298 iocfc
->cfginfo
->sense_buf_len
= (BFI_IOIM_SNSLEN
- 1);
1299 bfa_dma_be_addr_set(iocfc
->cfginfo
->ioim_snsbase
[seg_no
], snsbase_pa
);
1302 * Enable IOC after it is disabled.
1305 bfa_iocfc_enable(struct bfa_s
*bfa
)
1307 bfa_plog_str(bfa
->plog
, BFA_PL_MID_HAL
, BFA_PL_EID_MISC
, 0,
1309 bfa
->iocfc
.action
= BFA_IOCFC_ACT_ENABLE
;
1310 bfa_ioc_enable(&bfa
->ioc
);
1314 bfa_iocfc_disable(struct bfa_s
*bfa
)
1316 bfa_plog_str(bfa
->plog
, BFA_PL_MID_HAL
, BFA_PL_EID_MISC
, 0,
1318 bfa
->iocfc
.action
= BFA_IOCFC_ACT_DISABLE
;
1320 bfa
->queue_process
= BFA_FALSE
;
1321 bfa_ioc_disable(&bfa
->ioc
);
1326 bfa_iocfc_is_operational(struct bfa_s
*bfa
)
1328 return bfa_ioc_is_operational(&bfa
->ioc
) && bfa
->iocfc
.cfgdone
;
1332 * Return boot target port wwns -- read from boot information in flash.
1335 bfa_iocfc_get_bootwwns(struct bfa_s
*bfa
, u8
*nwwns
, wwn_t
*wwns
)
1337 struct bfa_iocfc_s
*iocfc
= &bfa
->iocfc
;
1338 struct bfi_iocfc_cfgrsp_s
*cfgrsp
= iocfc
->cfgrsp
;
1341 if (cfgrsp
->pbc_cfg
.boot_enabled
&& cfgrsp
->pbc_cfg
.nbluns
) {
1342 bfa_trc(bfa
, cfgrsp
->pbc_cfg
.nbluns
);
1343 *nwwns
= cfgrsp
->pbc_cfg
.nbluns
;
1344 for (i
= 0; i
< cfgrsp
->pbc_cfg
.nbluns
; i
++)
1345 wwns
[i
] = cfgrsp
->pbc_cfg
.blun
[i
].tgt_pwwn
;
1350 *nwwns
= cfgrsp
->bootwwns
.nwwns
;
1351 memcpy(wwns
, cfgrsp
->bootwwns
.wwn
, sizeof(cfgrsp
->bootwwns
.wwn
));
1355 bfa_iocfc_get_pbc_vports(struct bfa_s
*bfa
, struct bfi_pbc_vport_s
*pbc_vport
)
1357 struct bfa_iocfc_s
*iocfc
= &bfa
->iocfc
;
1358 struct bfi_iocfc_cfgrsp_s
*cfgrsp
= iocfc
->cfgrsp
;
1360 memcpy(pbc_vport
, cfgrsp
->pbc_cfg
.vport
, sizeof(cfgrsp
->pbc_cfg
.vport
));
1361 return cfgrsp
->pbc_cfg
.nvports
;
1366 * Use this function query the memory requirement of the BFA library.
1367 * This function needs to be called before bfa_attach() to get the
1368 * memory required of the BFA layer for a given driver configuration.
1370 * This call will fail, if the cap is out of range compared to pre-defined
1371 * values within the BFA library
1373 * @param[in] cfg - pointer to bfa_ioc_cfg_t. Driver layer should indicate
1374 * its configuration in this structure.
1375 * The default values for struct bfa_iocfc_cfg_s can be
1376 * fetched using bfa_cfg_get_default() API.
1378 * If cap's boundary check fails, the library will use
1379 * the default bfa_cap_t values (and log a warning msg).
1381 * @param[out] meminfo - pointer to bfa_meminfo_t. This content
1382 * indicates the memory type (see bfa_mem_type_t) and
1383 * amount of memory required.
1385 * Driver should allocate the memory, populate the
1386 * starting address for each block and provide the same
1387 * structure as input parameter to bfa_attach() call.
1389 * @param[in] bfa - pointer to the bfa structure, used while fetching the
1390 * dma, kva memory information of the bfa sub-modules.
1394 * Special Considerations: @note
1397 bfa_cfg_get_meminfo(struct bfa_iocfc_cfg_s
*cfg
, struct bfa_meminfo_s
*meminfo
,
1401 struct bfa_mem_dma_s
*port_dma
= BFA_MEM_PORT_DMA(bfa
);
1402 struct bfa_mem_dma_s
*ablk_dma
= BFA_MEM_ABLK_DMA(bfa
);
1403 struct bfa_mem_dma_s
*cee_dma
= BFA_MEM_CEE_DMA(bfa
);
1404 struct bfa_mem_dma_s
*sfp_dma
= BFA_MEM_SFP_DMA(bfa
);
1405 struct bfa_mem_dma_s
*flash_dma
= BFA_MEM_FLASH_DMA(bfa
);
1406 struct bfa_mem_dma_s
*diag_dma
= BFA_MEM_DIAG_DMA(bfa
);
1407 struct bfa_mem_dma_s
*phy_dma
= BFA_MEM_PHY_DMA(bfa
);
1409 WARN_ON((cfg
== NULL
) || (meminfo
== NULL
));
1411 memset((void *)meminfo
, 0, sizeof(struct bfa_meminfo_s
));
1413 /* Initialize the DMA & KVA meminfo queues */
1414 INIT_LIST_HEAD(&meminfo
->dma_info
.qe
);
1415 INIT_LIST_HEAD(&meminfo
->kva_info
.qe
);
1417 bfa_iocfc_meminfo(cfg
, meminfo
, bfa
);
1419 for (i
= 0; hal_mods
[i
]; i
++)
1420 hal_mods
[i
]->meminfo(cfg
, meminfo
, bfa
);
1422 /* dma info setup */
1423 bfa_mem_dma_setup(meminfo
, port_dma
, bfa_port_meminfo());
1424 bfa_mem_dma_setup(meminfo
, ablk_dma
, bfa_ablk_meminfo());
1425 bfa_mem_dma_setup(meminfo
, cee_dma
, bfa_cee_meminfo());
1426 bfa_mem_dma_setup(meminfo
, sfp_dma
, bfa_sfp_meminfo());
1427 bfa_mem_dma_setup(meminfo
, flash_dma
,
1428 bfa_flash_meminfo(cfg
->drvcfg
.min_cfg
));
1429 bfa_mem_dma_setup(meminfo
, diag_dma
, bfa_diag_meminfo());
1430 bfa_mem_dma_setup(meminfo
, phy_dma
,
1431 bfa_phy_meminfo(cfg
->drvcfg
.min_cfg
));
1435 * Use this function to do attach the driver instance with the BFA
1436 * library. This function will not trigger any HW initialization
1437 * process (which will be done in bfa_init() call)
1439 * This call will fail, if the cap is out of range compared to
1440 * pre-defined values within the BFA library
1442 * @param[out] bfa Pointer to bfa_t.
1443 * @param[in] bfad Opaque handle back to the driver's IOC structure
1444 * @param[in] cfg Pointer to bfa_ioc_cfg_t. Should be same structure
1445 * that was used in bfa_cfg_get_meminfo().
1446 * @param[in] meminfo Pointer to bfa_meminfo_t. The driver should
1447 * use the bfa_cfg_get_meminfo() call to
1448 * find the memory blocks required, allocate the
1449 * required memory and provide the starting addresses.
1450 * @param[in] pcidev pointer to struct bfa_pcidev_s
1455 * Special Considerations:
1461 bfa_attach(struct bfa_s
*bfa
, void *bfad
, struct bfa_iocfc_cfg_s
*cfg
,
1462 struct bfa_meminfo_s
*meminfo
, struct bfa_pcidev_s
*pcidev
)
1465 struct bfa_mem_dma_s
*dma_info
, *dma_elem
;
1466 struct bfa_mem_kva_s
*kva_info
, *kva_elem
;
1467 struct list_head
*dm_qe
, *km_qe
;
1469 bfa
->fcs
= BFA_FALSE
;
1471 WARN_ON((cfg
== NULL
) || (meminfo
== NULL
));
1473 /* Initialize memory pointers for iterative allocation */
1474 dma_info
= &meminfo
->dma_info
;
1475 dma_info
->kva_curp
= dma_info
->kva
;
1476 dma_info
->dma_curp
= dma_info
->dma
;
1478 kva_info
= &meminfo
->kva_info
;
1479 kva_info
->kva_curp
= kva_info
->kva
;
1481 list_for_each(dm_qe
, &dma_info
->qe
) {
1482 dma_elem
= (struct bfa_mem_dma_s
*) dm_qe
;
1483 dma_elem
->kva_curp
= dma_elem
->kva
;
1484 dma_elem
->dma_curp
= dma_elem
->dma
;
1487 list_for_each(km_qe
, &kva_info
->qe
) {
1488 kva_elem
= (struct bfa_mem_kva_s
*) km_qe
;
1489 kva_elem
->kva_curp
= kva_elem
->kva
;
1492 bfa_iocfc_attach(bfa
, bfad
, cfg
, pcidev
);
1494 for (i
= 0; hal_mods
[i
]; i
++)
1495 hal_mods
[i
]->attach(bfa
, bfad
, cfg
, pcidev
);
1497 bfa_com_port_attach(bfa
);
1498 bfa_com_ablk_attach(bfa
);
1499 bfa_com_cee_attach(bfa
);
1500 bfa_com_sfp_attach(bfa
);
1501 bfa_com_flash_attach(bfa
, cfg
->drvcfg
.min_cfg
);
1502 bfa_com_diag_attach(bfa
);
1503 bfa_com_phy_attach(bfa
, cfg
->drvcfg
.min_cfg
);
1507 * Use this function to delete a BFA IOC. IOC should be stopped (by
1508 * calling bfa_stop()) before this function call.
1510 * @param[in] bfa - pointer to bfa_t.
1515 * Special Considerations:
1520 bfa_detach(struct bfa_s
*bfa
)
1524 for (i
= 0; hal_mods
[i
]; i
++)
1525 hal_mods
[i
]->detach(bfa
);
1526 bfa_ioc_detach(&bfa
->ioc
);
1530 bfa_comp_deq(struct bfa_s
*bfa
, struct list_head
*comp_q
)
1532 INIT_LIST_HEAD(comp_q
);
1533 list_splice_tail_init(&bfa
->comp_q
, comp_q
);
1537 bfa_comp_process(struct bfa_s
*bfa
, struct list_head
*comp_q
)
1539 struct list_head
*qe
;
1540 struct list_head
*qen
;
1541 struct bfa_cb_qe_s
*hcb_qe
;
1542 bfa_cb_cbfn_status_t cbfn
;
1544 list_for_each_safe(qe
, qen
, comp_q
) {
1545 hcb_qe
= (struct bfa_cb_qe_s
*) qe
;
1546 if (hcb_qe
->pre_rmv
) {
1547 /* qe is invalid after return, dequeue before cbfn() */
1549 cbfn
= (bfa_cb_cbfn_status_t
)(hcb_qe
->cbfn
);
1550 cbfn(hcb_qe
->cbarg
, hcb_qe
->fw_status
);
1552 hcb_qe
->cbfn(hcb_qe
->cbarg
, BFA_TRUE
);
1557 bfa_comp_free(struct bfa_s
*bfa
, struct list_head
*comp_q
)
1559 struct list_head
*qe
;
1560 struct bfa_cb_qe_s
*hcb_qe
;
1562 while (!list_empty(comp_q
)) {
1563 bfa_q_deq(comp_q
, &qe
);
1564 hcb_qe
= (struct bfa_cb_qe_s
*) qe
;
1565 WARN_ON(hcb_qe
->pre_rmv
);
1566 hcb_qe
->cbfn(hcb_qe
->cbarg
, BFA_FALSE
);
1571 bfa_iocfc_cb_dconf_modinit(struct bfa_s
*bfa
, bfa_status_t status
)
1573 if (bfa
->iocfc
.action
== BFA_IOCFC_ACT_INIT
) {
1574 if (bfa
->iocfc
.cfgdone
== BFA_TRUE
)
1575 bfa_cb_queue(bfa
, &bfa
->iocfc
.init_hcb_qe
,
1576 bfa_iocfc_init_cb
, bfa
);
1581 * Return the list of PCI vendor/device id lists supported by this
1585 bfa_get_pciids(struct bfa_pciid_s
**pciids
, int *npciids
)
1587 static struct bfa_pciid_s __pciids
[] = {
1588 {BFA_PCI_VENDOR_ID_BROCADE
, BFA_PCI_DEVICE_ID_FC_8G2P
},
1589 {BFA_PCI_VENDOR_ID_BROCADE
, BFA_PCI_DEVICE_ID_FC_8G1P
},
1590 {BFA_PCI_VENDOR_ID_BROCADE
, BFA_PCI_DEVICE_ID_CT
},
1591 {BFA_PCI_VENDOR_ID_BROCADE
, BFA_PCI_DEVICE_ID_CT_FC
},
1594 *npciids
= sizeof(__pciids
) / sizeof(__pciids
[0]);
1599 * Use this function query the default struct bfa_iocfc_cfg_s value (compiled
1600 * into BFA layer). The OS driver can then turn back and overwrite entries that
1601 * have been configured by the user.
1603 * @param[in] cfg - pointer to bfa_ioc_cfg_t
1608 * Special Considerations:
1612 bfa_cfg_get_default(struct bfa_iocfc_cfg_s
*cfg
)
1614 cfg
->fwcfg
.num_fabrics
= DEF_CFG_NUM_FABRICS
;
1615 cfg
->fwcfg
.num_lports
= DEF_CFG_NUM_LPORTS
;
1616 cfg
->fwcfg
.num_rports
= DEF_CFG_NUM_RPORTS
;
1617 cfg
->fwcfg
.num_ioim_reqs
= DEF_CFG_NUM_IOIM_REQS
;
1618 cfg
->fwcfg
.num_tskim_reqs
= DEF_CFG_NUM_TSKIM_REQS
;
1619 cfg
->fwcfg
.num_fcxp_reqs
= DEF_CFG_NUM_FCXP_REQS
;
1620 cfg
->fwcfg
.num_uf_bufs
= DEF_CFG_NUM_UF_BUFS
;
1621 cfg
->fwcfg
.num_cqs
= DEF_CFG_NUM_CQS
;
1622 cfg
->fwcfg
.num_fwtio_reqs
= 0;
1624 cfg
->drvcfg
.num_reqq_elems
= DEF_CFG_NUM_REQQ_ELEMS
;
1625 cfg
->drvcfg
.num_rspq_elems
= DEF_CFG_NUM_RSPQ_ELEMS
;
1626 cfg
->drvcfg
.num_sgpgs
= DEF_CFG_NUM_SGPGS
;
1627 cfg
->drvcfg
.num_sboot_tgts
= DEF_CFG_NUM_SBOOT_TGTS
;
1628 cfg
->drvcfg
.num_sboot_luns
= DEF_CFG_NUM_SBOOT_LUNS
;
1629 cfg
->drvcfg
.path_tov
= BFA_FCPIM_PATHTOV_DEF
;
1630 cfg
->drvcfg
.ioc_recover
= BFA_FALSE
;
1631 cfg
->drvcfg
.delay_comp
= BFA_FALSE
;
1636 bfa_cfg_get_min(struct bfa_iocfc_cfg_s
*cfg
)
1638 bfa_cfg_get_default(cfg
);
1639 cfg
->fwcfg
.num_ioim_reqs
= BFA_IOIM_MIN
;
1640 cfg
->fwcfg
.num_tskim_reqs
= BFA_TSKIM_MIN
;
1641 cfg
->fwcfg
.num_fcxp_reqs
= BFA_FCXP_MIN
;
1642 cfg
->fwcfg
.num_uf_bufs
= BFA_UF_MIN
;
1643 cfg
->fwcfg
.num_rports
= BFA_RPORT_MIN
;
1644 cfg
->fwcfg
.num_fwtio_reqs
= 0;
1646 cfg
->drvcfg
.num_sgpgs
= BFA_SGPG_MIN
;
1647 cfg
->drvcfg
.num_reqq_elems
= BFA_REQQ_NELEMS_MIN
;
1648 cfg
->drvcfg
.num_rspq_elems
= BFA_RSPQ_NELEMS_MIN
;
1649 cfg
->drvcfg
.min_cfg
= BFA_TRUE
;