2 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
19 #include "bfa_modules.h"
20 #include "bfi_ctreg.h"
22 BFA_TRC_FILE(HAL
, CORE
);
25 * BFA module list terminated by NULL
27 static struct bfa_module_s
*hal_mods
[] = {
39 * Message handlers for various modules.
41 static bfa_isr_func_t bfa_isrs
[BFI_MC_MAX
] = {
42 bfa_isr_unhandled
, /* NONE */
43 bfa_isr_unhandled
, /* BFI_MC_IOC */
44 bfa_isr_unhandled
, /* BFI_MC_DIAG */
45 bfa_isr_unhandled
, /* BFI_MC_FLASH */
46 bfa_isr_unhandled
, /* BFI_MC_CEE */
47 bfa_fcport_isr
, /* BFI_MC_FCPORT */
48 bfa_isr_unhandled
, /* BFI_MC_IOCFC */
49 bfa_isr_unhandled
, /* BFI_MC_LL */
50 bfa_uf_isr
, /* BFI_MC_UF */
51 bfa_fcxp_isr
, /* BFI_MC_FCXP */
52 bfa_lps_isr
, /* BFI_MC_LPS */
53 bfa_rport_isr
, /* BFI_MC_RPORT */
54 bfa_itnim_isr
, /* BFI_MC_ITNIM */
55 bfa_isr_unhandled
, /* BFI_MC_IOIM_READ */
56 bfa_isr_unhandled
, /* BFI_MC_IOIM_WRITE */
57 bfa_isr_unhandled
, /* BFI_MC_IOIM_IO */
58 bfa_ioim_isr
, /* BFI_MC_IOIM */
59 bfa_ioim_good_comp_isr
, /* BFI_MC_IOIM_IOCOM */
60 bfa_tskim_isr
, /* BFI_MC_TSKIM */
61 bfa_isr_unhandled
, /* BFI_MC_SBOOT */
62 bfa_isr_unhandled
, /* BFI_MC_IPFC */
63 bfa_isr_unhandled
, /* BFI_MC_PORT */
64 bfa_isr_unhandled
, /* --------- */
65 bfa_isr_unhandled
, /* --------- */
66 bfa_isr_unhandled
, /* --------- */
67 bfa_isr_unhandled
, /* --------- */
68 bfa_isr_unhandled
, /* --------- */
69 bfa_isr_unhandled
, /* --------- */
70 bfa_isr_unhandled
, /* --------- */
71 bfa_isr_unhandled
, /* --------- */
72 bfa_isr_unhandled
, /* --------- */
73 bfa_isr_unhandled
, /* --------- */
76 * Message handlers for mailbox command classes
78 static bfa_ioc_mbox_mcfunc_t bfa_mbox_isrs
[BFI_MC_MAX
] = {
80 NULL
, /* BFI_MC_IOC */
81 NULL
, /* BFI_MC_DIAG */
82 NULL
, /* BFI_MC_FLASH */
83 NULL
, /* BFI_MC_CEE */
84 NULL
, /* BFI_MC_PORT */
85 bfa_iocfc_isr
, /* BFI_MC_IOCFC */
92 bfa_com_port_attach(struct bfa_s
*bfa
, struct bfa_meminfo_s
*mi
)
94 struct bfa_port_s
*port
= &bfa
->modules
.port
;
99 dm_len
= bfa_port_meminfo();
100 dm_kva
= bfa_meminfo_dma_virt(mi
);
101 dm_pa
= bfa_meminfo_dma_phys(mi
);
103 memset(port
, 0, sizeof(struct bfa_port_s
));
104 bfa_port_attach(port
, &bfa
->ioc
, bfa
, bfa
->trcmod
);
105 bfa_port_mem_claim(port
, dm_kva
, dm_pa
);
107 bfa_meminfo_dma_virt(mi
) = dm_kva
+ dm_len
;
108 bfa_meminfo_dma_phys(mi
) = dm_pa
+ dm_len
;
112 * BFA IOC FC related definitions
116 * IOC local definitions
118 #define BFA_IOCFC_TOV 5000 /* msecs */
121 BFA_IOCFC_ACT_NONE
= 0,
122 BFA_IOCFC_ACT_INIT
= 1,
123 BFA_IOCFC_ACT_STOP
= 2,
124 BFA_IOCFC_ACT_DISABLE
= 3,
127 #define DEF_CFG_NUM_FABRICS 1
128 #define DEF_CFG_NUM_LPORTS 256
129 #define DEF_CFG_NUM_CQS 4
130 #define DEF_CFG_NUM_IOIM_REQS (BFA_IOIM_MAX)
131 #define DEF_CFG_NUM_TSKIM_REQS 128
132 #define DEF_CFG_NUM_FCXP_REQS 64
133 #define DEF_CFG_NUM_UF_BUFS 64
134 #define DEF_CFG_NUM_RPORTS 1024
135 #define DEF_CFG_NUM_ITNIMS (DEF_CFG_NUM_RPORTS)
136 #define DEF_CFG_NUM_TINS 256
138 #define DEF_CFG_NUM_SGPGS 2048
139 #define DEF_CFG_NUM_REQQ_ELEMS 256
140 #define DEF_CFG_NUM_RSPQ_ELEMS 64
141 #define DEF_CFG_NUM_SBOOT_TGTS 16
142 #define DEF_CFG_NUM_SBOOT_LUNS 16
145 * forward declaration for IOC FC functions
147 static void bfa_iocfc_enable_cbfn(void *bfa_arg
, enum bfa_status status
);
148 static void bfa_iocfc_disable_cbfn(void *bfa_arg
);
149 static void bfa_iocfc_hbfail_cbfn(void *bfa_arg
);
150 static void bfa_iocfc_reset_cbfn(void *bfa_arg
);
151 static struct bfa_ioc_cbfn_s bfa_iocfc_cbfn
;
154 * BFA Interrupt handling functions
157 bfa_reqq_resume(struct bfa_s
*bfa
, int qid
)
159 struct list_head
*waitq
, *qe
, *qen
;
160 struct bfa_reqq_wait_s
*wqe
;
162 waitq
= bfa_reqq(bfa
, qid
);
163 list_for_each_safe(qe
, qen
, waitq
) {
165 * Callback only as long as there is room in request queue
167 if (bfa_reqq_full(bfa
, qid
))
171 wqe
= (struct bfa_reqq_wait_s
*) qe
;
172 wqe
->qresume(wqe
->cbarg
);
177 bfa_msix_all(struct bfa_s
*bfa
, int vec
)
183 bfa_intx(struct bfa_s
*bfa
)
188 intr
= readl(bfa
->iocfc
.bfa_regs
.intr_status
);
193 * RME completion queue interrupt
195 qintr
= intr
& __HFN_INT_RME_MASK
;
196 writel(qintr
, bfa
->iocfc
.bfa_regs
.intr_status
);
198 for (queue
= 0; queue
< BFI_IOC_MAX_CQS_ASIC
; queue
++) {
199 if (intr
& (__HFN_INT_RME_Q0
<< queue
))
200 bfa_msix_rspq(bfa
, queue
& (BFI_IOC_MAX_CQS
- 1));
207 * CPE completion queue interrupt
209 qintr
= intr
& __HFN_INT_CPE_MASK
;
210 writel(qintr
, bfa
->iocfc
.bfa_regs
.intr_status
);
212 for (queue
= 0; queue
< BFI_IOC_MAX_CQS_ASIC
; queue
++) {
213 if (intr
& (__HFN_INT_CPE_Q0
<< queue
))
214 bfa_msix_reqq(bfa
, queue
& (BFI_IOC_MAX_CQS
- 1));
220 bfa_msix_lpu_err(bfa
, intr
);
226 bfa_isr_enable(struct bfa_s
*bfa
)
229 int pci_func
= bfa_ioc_pcifn(&bfa
->ioc
);
231 bfa_trc(bfa
, pci_func
);
233 bfa_msix_install(bfa
);
234 intr_unmask
= (__HFN_INT_ERR_EMC
| __HFN_INT_ERR_LPU0
|
235 __HFN_INT_ERR_LPU1
| __HFN_INT_ERR_PSS
|
239 intr_unmask
|= (__HFN_INT_CPE_Q0
| __HFN_INT_CPE_Q1
|
240 __HFN_INT_CPE_Q2
| __HFN_INT_CPE_Q3
|
241 __HFN_INT_RME_Q0
| __HFN_INT_RME_Q1
|
242 __HFN_INT_RME_Q2
| __HFN_INT_RME_Q3
|
243 __HFN_INT_MBOX_LPU0
);
245 intr_unmask
|= (__HFN_INT_CPE_Q4
| __HFN_INT_CPE_Q5
|
246 __HFN_INT_CPE_Q6
| __HFN_INT_CPE_Q7
|
247 __HFN_INT_RME_Q4
| __HFN_INT_RME_Q5
|
248 __HFN_INT_RME_Q6
| __HFN_INT_RME_Q7
|
249 __HFN_INT_MBOX_LPU1
);
251 writel(intr_unmask
, bfa
->iocfc
.bfa_regs
.intr_status
);
252 writel(~intr_unmask
, bfa
->iocfc
.bfa_regs
.intr_mask
);
253 bfa
->iocfc
.intr_mask
= ~intr_unmask
;
254 bfa_isr_mode_set(bfa
, bfa
->msix
.nvecs
!= 0);
258 bfa_isr_disable(struct bfa_s
*bfa
)
260 bfa_isr_mode_set(bfa
, BFA_FALSE
);
261 writel(-1L, bfa
->iocfc
.bfa_regs
.intr_mask
);
262 bfa_msix_uninstall(bfa
);
266 bfa_msix_reqq(struct bfa_s
*bfa
, int qid
)
268 struct list_head
*waitq
;
270 qid
&= (BFI_IOC_MAX_CQS
- 1);
272 bfa
->iocfc
.hwif
.hw_reqq_ack(bfa
, qid
);
275 * Resume any pending requests in the corresponding reqq.
277 waitq
= bfa_reqq(bfa
, qid
);
278 if (!list_empty(waitq
))
279 bfa_reqq_resume(bfa
, qid
);
283 bfa_isr_unhandled(struct bfa_s
*bfa
, struct bfi_msg_s
*m
)
285 bfa_trc(bfa
, m
->mhdr
.msg_class
);
286 bfa_trc(bfa
, m
->mhdr
.msg_id
);
287 bfa_trc(bfa
, m
->mhdr
.mtag
.i2htok
);
289 bfa_trc_stop(bfa
->trcmod
);
293 bfa_msix_rspq(struct bfa_s
*bfa
, int qid
)
297 struct list_head
*waitq
;
299 qid
&= (BFI_IOC_MAX_CQS
- 1);
301 bfa
->iocfc
.hwif
.hw_rspq_ack(bfa
, qid
);
303 ci
= bfa_rspq_ci(bfa
, qid
);
304 pi
= bfa_rspq_pi(bfa
, qid
);
306 if (bfa
->rme_process
) {
308 m
= bfa_rspq_elem(bfa
, qid
, ci
);
309 bfa_isrs
[m
->mhdr
.msg_class
] (bfa
, m
);
310 CQ_INCR(ci
, bfa
->iocfc
.cfg
.drvcfg
.num_rspq_elems
);
317 bfa_rspq_ci(bfa
, qid
) = pi
;
318 writel(pi
, bfa
->iocfc
.bfa_regs
.rme_q_ci
[qid
]);
322 * Resume any pending requests in the corresponding reqq.
324 waitq
= bfa_reqq(bfa
, qid
);
325 if (!list_empty(waitq
))
326 bfa_reqq_resume(bfa
, qid
);
330 bfa_msix_lpu_err(struct bfa_s
*bfa
, int vec
)
332 u32 intr
, curr_value
;
334 intr
= readl(bfa
->iocfc
.bfa_regs
.intr_status
);
336 if (intr
& (__HFN_INT_MBOX_LPU0
| __HFN_INT_MBOX_LPU1
))
337 bfa_ioc_mbox_isr(&bfa
->ioc
);
339 intr
&= (__HFN_INT_ERR_EMC
| __HFN_INT_ERR_LPU0
|
340 __HFN_INT_ERR_LPU1
| __HFN_INT_ERR_PSS
| __HFN_INT_LL_HALT
);
343 if (intr
& __HFN_INT_LL_HALT
) {
345 * If LL_HALT bit is set then FW Init Halt LL Port
346 * Register needs to be cleared as well so Interrupt
347 * Status Register will be cleared.
349 curr_value
= readl(bfa
->ioc
.ioc_regs
.ll_halt
);
350 curr_value
&= ~__FW_INIT_HALT_P
;
351 writel(curr_value
, bfa
->ioc
.ioc_regs
.ll_halt
);
354 if (intr
& __HFN_INT_ERR_PSS
) {
356 * ERR_PSS bit needs to be cleared as well in case
357 * interrups are shared so driver's interrupt handler is
358 * still called even though it is already masked out.
361 bfa
->ioc
.ioc_regs
.pss_err_status_reg
);
362 curr_value
&= __PSS_ERR_STATUS_SET
;
364 bfa
->ioc
.ioc_regs
.pss_err_status_reg
);
367 writel(intr
, bfa
->iocfc
.bfa_regs
.intr_status
);
368 bfa_ioc_error_isr(&bfa
->ioc
);
373 * BFA IOC FC related functions
377 * BFA IOC private functions
381 bfa_iocfc_cqs_sz(struct bfa_iocfc_cfg_s
*cfg
, u32
*dm_len
)
383 int i
, per_reqq_sz
, per_rspq_sz
;
385 per_reqq_sz
= BFA_ROUNDUP((cfg
->drvcfg
.num_reqq_elems
* BFI_LMSG_SZ
),
387 per_rspq_sz
= BFA_ROUNDUP((cfg
->drvcfg
.num_rspq_elems
* BFI_LMSG_SZ
),
393 for (i
= 0; i
< cfg
->fwcfg
.num_cqs
; i
++) {
394 *dm_len
= *dm_len
+ per_reqq_sz
;
395 *dm_len
= *dm_len
+ per_rspq_sz
;
399 * Calculate Shadow CI/PI size
401 for (i
= 0; i
< cfg
->fwcfg
.num_cqs
; i
++)
402 *dm_len
+= (2 * BFA_CACHELINE_SZ
);
406 bfa_iocfc_fw_cfg_sz(struct bfa_iocfc_cfg_s
*cfg
, u32
*dm_len
)
409 BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfg_s
), BFA_CACHELINE_SZ
);
411 BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfgrsp_s
),
416 * Use the Mailbox interface to send BFI_IOCFC_H2I_CFG_REQ
419 bfa_iocfc_send_cfg(void *bfa_arg
)
421 struct bfa_s
*bfa
= bfa_arg
;
422 struct bfa_iocfc_s
*iocfc
= &bfa
->iocfc
;
423 struct bfi_iocfc_cfg_req_s cfg_req
;
424 struct bfi_iocfc_cfg_s
*cfg_info
= iocfc
->cfginfo
;
425 struct bfa_iocfc_cfg_s
*cfg
= &iocfc
->cfg
;
428 WARN_ON(cfg
->fwcfg
.num_cqs
> BFI_IOC_MAX_CQS
);
429 bfa_trc(bfa
, cfg
->fwcfg
.num_cqs
);
431 bfa_iocfc_reset_queues(bfa
);
434 * initialize IOC configuration info
436 cfg_info
->endian_sig
= BFI_IOC_ENDIAN_SIG
;
437 cfg_info
->num_cqs
= cfg
->fwcfg
.num_cqs
;
439 bfa_dma_be_addr_set(cfg_info
->cfgrsp_addr
, iocfc
->cfgrsp_dma
.pa
);
441 * dma map REQ and RSP circular queues and shadow pointers
443 for (i
= 0; i
< cfg
->fwcfg
.num_cqs
; i
++) {
444 bfa_dma_be_addr_set(cfg_info
->req_cq_ba
[i
],
445 iocfc
->req_cq_ba
[i
].pa
);
446 bfa_dma_be_addr_set(cfg_info
->req_shadow_ci
[i
],
447 iocfc
->req_cq_shadow_ci
[i
].pa
);
448 cfg_info
->req_cq_elems
[i
] =
449 cpu_to_be16(cfg
->drvcfg
.num_reqq_elems
);
451 bfa_dma_be_addr_set(cfg_info
->rsp_cq_ba
[i
],
452 iocfc
->rsp_cq_ba
[i
].pa
);
453 bfa_dma_be_addr_set(cfg_info
->rsp_shadow_pi
[i
],
454 iocfc
->rsp_cq_shadow_pi
[i
].pa
);
455 cfg_info
->rsp_cq_elems
[i
] =
456 cpu_to_be16(cfg
->drvcfg
.num_rspq_elems
);
460 * Enable interrupt coalescing if it is driver init path
461 * and not ioc disable/enable path.
464 cfg_info
->intr_attr
.coalesce
= BFA_TRUE
;
466 iocfc
->cfgdone
= BFA_FALSE
;
469 * dma map IOC configuration itself
471 bfi_h2i_set(cfg_req
.mh
, BFI_MC_IOCFC
, BFI_IOCFC_H2I_CFG_REQ
,
473 bfa_dma_be_addr_set(cfg_req
.ioc_cfg_dma_addr
, iocfc
->cfg_info
.pa
);
475 bfa_ioc_mbox_send(&bfa
->ioc
, &cfg_req
,
476 sizeof(struct bfi_iocfc_cfg_req_s
));
480 bfa_iocfc_init_mem(struct bfa_s
*bfa
, void *bfad
, struct bfa_iocfc_cfg_s
*cfg
,
481 struct bfa_pcidev_s
*pcidev
)
483 struct bfa_iocfc_s
*iocfc
= &bfa
->iocfc
;
487 iocfc
->action
= BFA_IOCFC_ACT_NONE
;
492 * Initialize chip specific handlers.
494 if (bfa_asic_id_ct(bfa_ioc_devid(&bfa
->ioc
))) {
495 iocfc
->hwif
.hw_reginit
= bfa_hwct_reginit
;
496 iocfc
->hwif
.hw_reqq_ack
= bfa_hwct_reqq_ack
;
497 iocfc
->hwif
.hw_rspq_ack
= bfa_hwct_rspq_ack
;
498 iocfc
->hwif
.hw_msix_init
= bfa_hwct_msix_init
;
499 iocfc
->hwif
.hw_msix_install
= bfa_hwct_msix_install
;
500 iocfc
->hwif
.hw_msix_uninstall
= bfa_hwct_msix_uninstall
;
501 iocfc
->hwif
.hw_isr_mode_set
= bfa_hwct_isr_mode_set
;
502 iocfc
->hwif
.hw_msix_getvecs
= bfa_hwct_msix_getvecs
;
503 iocfc
->hwif
.hw_msix_get_rme_range
= bfa_hwct_msix_get_rme_range
;
505 iocfc
->hwif
.hw_reginit
= bfa_hwcb_reginit
;
506 iocfc
->hwif
.hw_reqq_ack
= bfa_hwcb_reqq_ack
;
507 iocfc
->hwif
.hw_rspq_ack
= bfa_hwcb_rspq_ack
;
508 iocfc
->hwif
.hw_msix_init
= bfa_hwcb_msix_init
;
509 iocfc
->hwif
.hw_msix_install
= bfa_hwcb_msix_install
;
510 iocfc
->hwif
.hw_msix_uninstall
= bfa_hwcb_msix_uninstall
;
511 iocfc
->hwif
.hw_isr_mode_set
= bfa_hwcb_isr_mode_set
;
512 iocfc
->hwif
.hw_msix_getvecs
= bfa_hwcb_msix_getvecs
;
513 iocfc
->hwif
.hw_msix_get_rme_range
= bfa_hwcb_msix_get_rme_range
;
516 iocfc
->hwif
.hw_reginit(bfa
);
521 bfa_iocfc_mem_claim(struct bfa_s
*bfa
, struct bfa_iocfc_cfg_s
*cfg
,
522 struct bfa_meminfo_s
*meminfo
)
526 int i
, per_reqq_sz
, per_rspq_sz
;
527 struct bfa_iocfc_s
*iocfc
= &bfa
->iocfc
;
530 dm_kva
= bfa_meminfo_dma_virt(meminfo
);
531 dm_pa
= bfa_meminfo_dma_phys(meminfo
);
534 * First allocate dma memory for IOC.
536 bfa_ioc_mem_claim(&bfa
->ioc
, dm_kva
, dm_pa
);
537 dm_kva
+= BFA_ROUNDUP(sizeof(struct bfi_ioc_attr_s
), BFA_DMA_ALIGN_SZ
);
538 dm_pa
+= BFA_ROUNDUP(sizeof(struct bfi_ioc_attr_s
), BFA_DMA_ALIGN_SZ
);
541 * Claim DMA-able memory for the request/response queues and for shadow
544 per_reqq_sz
= BFA_ROUNDUP((cfg
->drvcfg
.num_reqq_elems
* BFI_LMSG_SZ
),
546 per_rspq_sz
= BFA_ROUNDUP((cfg
->drvcfg
.num_rspq_elems
* BFI_LMSG_SZ
),
549 for (i
= 0; i
< cfg
->fwcfg
.num_cqs
; i
++) {
550 iocfc
->req_cq_ba
[i
].kva
= dm_kva
;
551 iocfc
->req_cq_ba
[i
].pa
= dm_pa
;
552 memset(dm_kva
, 0, per_reqq_sz
);
553 dm_kva
+= per_reqq_sz
;
554 dm_pa
+= per_reqq_sz
;
556 iocfc
->rsp_cq_ba
[i
].kva
= dm_kva
;
557 iocfc
->rsp_cq_ba
[i
].pa
= dm_pa
;
558 memset(dm_kva
, 0, per_rspq_sz
);
559 dm_kva
+= per_rspq_sz
;
560 dm_pa
+= per_rspq_sz
;
563 for (i
= 0; i
< cfg
->fwcfg
.num_cqs
; i
++) {
564 iocfc
->req_cq_shadow_ci
[i
].kva
= dm_kva
;
565 iocfc
->req_cq_shadow_ci
[i
].pa
= dm_pa
;
566 dm_kva
+= BFA_CACHELINE_SZ
;
567 dm_pa
+= BFA_CACHELINE_SZ
;
569 iocfc
->rsp_cq_shadow_pi
[i
].kva
= dm_kva
;
570 iocfc
->rsp_cq_shadow_pi
[i
].pa
= dm_pa
;
571 dm_kva
+= BFA_CACHELINE_SZ
;
572 dm_pa
+= BFA_CACHELINE_SZ
;
576 * Claim DMA-able memory for the config info page
578 bfa
->iocfc
.cfg_info
.kva
= dm_kva
;
579 bfa
->iocfc
.cfg_info
.pa
= dm_pa
;
580 bfa
->iocfc
.cfginfo
= (struct bfi_iocfc_cfg_s
*) dm_kva
;
581 dm_kva
+= BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfg_s
), BFA_CACHELINE_SZ
);
582 dm_pa
+= BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfg_s
), BFA_CACHELINE_SZ
);
585 * Claim DMA-able memory for the config response
587 bfa
->iocfc
.cfgrsp_dma
.kva
= dm_kva
;
588 bfa
->iocfc
.cfgrsp_dma
.pa
= dm_pa
;
589 bfa
->iocfc
.cfgrsp
= (struct bfi_iocfc_cfgrsp_s
*) dm_kva
;
592 BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfgrsp_s
),
594 dm_pa
+= BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfgrsp_s
),
598 bfa_meminfo_dma_virt(meminfo
) = dm_kva
;
599 bfa_meminfo_dma_phys(meminfo
) = dm_pa
;
601 dbgsz
= (bfa_auto_recover
) ? BFA_DBG_FWTRC_LEN
: 0;
603 bfa_ioc_debug_memclaim(&bfa
->ioc
, bfa_meminfo_kva(meminfo
));
604 bfa_meminfo_kva(meminfo
) += dbgsz
;
609 * Start BFA submodules.
612 bfa_iocfc_start_submod(struct bfa_s
*bfa
)
616 bfa
->rme_process
= BFA_TRUE
;
618 for (i
= 0; hal_mods
[i
]; i
++)
619 hal_mods
[i
]->start(bfa
);
623 * Disable BFA submodules.
626 bfa_iocfc_disable_submod(struct bfa_s
*bfa
)
630 for (i
= 0; hal_mods
[i
]; i
++)
631 hal_mods
[i
]->iocdisable(bfa
);
635 bfa_iocfc_init_cb(void *bfa_arg
, bfa_boolean_t complete
)
637 struct bfa_s
*bfa
= bfa_arg
;
640 if (bfa
->iocfc
.cfgdone
)
641 bfa_cb_init(bfa
->bfad
, BFA_STATUS_OK
);
643 bfa_cb_init(bfa
->bfad
, BFA_STATUS_FAILED
);
645 if (bfa
->iocfc
.cfgdone
)
646 bfa
->iocfc
.action
= BFA_IOCFC_ACT_NONE
;
651 bfa_iocfc_stop_cb(void *bfa_arg
, bfa_boolean_t
compl)
653 struct bfa_s
*bfa
= bfa_arg
;
654 struct bfad_s
*bfad
= bfa
->bfad
;
657 complete(&bfad
->comp
);
659 bfa
->iocfc
.action
= BFA_IOCFC_ACT_NONE
;
663 bfa_iocfc_disable_cb(void *bfa_arg
, bfa_boolean_t
compl)
665 struct bfa_s
*bfa
= bfa_arg
;
666 struct bfad_s
*bfad
= bfa
->bfad
;
669 complete(&bfad
->disable_comp
);
673 * Update BFA configuration from firmware configuration.
676 bfa_iocfc_cfgrsp(struct bfa_s
*bfa
)
678 struct bfa_iocfc_s
*iocfc
= &bfa
->iocfc
;
679 struct bfi_iocfc_cfgrsp_s
*cfgrsp
= iocfc
->cfgrsp
;
680 struct bfa_iocfc_fwcfg_s
*fwcfg
= &cfgrsp
->fwcfg
;
682 fwcfg
->num_cqs
= fwcfg
->num_cqs
;
683 fwcfg
->num_ioim_reqs
= be16_to_cpu(fwcfg
->num_ioim_reqs
);
684 fwcfg
->num_tskim_reqs
= be16_to_cpu(fwcfg
->num_tskim_reqs
);
685 fwcfg
->num_fcxp_reqs
= be16_to_cpu(fwcfg
->num_fcxp_reqs
);
686 fwcfg
->num_uf_bufs
= be16_to_cpu(fwcfg
->num_uf_bufs
);
687 fwcfg
->num_rports
= be16_to_cpu(fwcfg
->num_rports
);
689 iocfc
->cfgdone
= BFA_TRUE
;
692 * Configuration is complete - initialize/start submodules
694 bfa_fcport_init(bfa
);
696 if (iocfc
->action
== BFA_IOCFC_ACT_INIT
)
697 bfa_cb_queue(bfa
, &iocfc
->init_hcb_qe
, bfa_iocfc_init_cb
, bfa
);
699 bfa_iocfc_start_submod(bfa
);
702 bfa_iocfc_reset_queues(struct bfa_s
*bfa
)
706 for (q
= 0; q
< BFI_IOC_MAX_CQS
; q
++) {
707 bfa_reqq_ci(bfa
, q
) = 0;
708 bfa_reqq_pi(bfa
, q
) = 0;
709 bfa_rspq_ci(bfa
, q
) = 0;
710 bfa_rspq_pi(bfa
, q
) = 0;
715 * IOC enable request is complete
718 bfa_iocfc_enable_cbfn(void *bfa_arg
, enum bfa_status status
)
720 struct bfa_s
*bfa
= bfa_arg
;
722 if (status
!= BFA_STATUS_OK
) {
723 bfa_isr_disable(bfa
);
724 if (bfa
->iocfc
.action
== BFA_IOCFC_ACT_INIT
)
725 bfa_cb_queue(bfa
, &bfa
->iocfc
.init_hcb_qe
,
726 bfa_iocfc_init_cb
, bfa
);
730 bfa_iocfc_send_cfg(bfa
);
734 * IOC disable request is complete
737 bfa_iocfc_disable_cbfn(void *bfa_arg
)
739 struct bfa_s
*bfa
= bfa_arg
;
741 bfa_isr_disable(bfa
);
742 bfa_iocfc_disable_submod(bfa
);
744 if (bfa
->iocfc
.action
== BFA_IOCFC_ACT_STOP
)
745 bfa_cb_queue(bfa
, &bfa
->iocfc
.stop_hcb_qe
, bfa_iocfc_stop_cb
,
748 WARN_ON(bfa
->iocfc
.action
!= BFA_IOCFC_ACT_DISABLE
);
749 bfa_cb_queue(bfa
, &bfa
->iocfc
.dis_hcb_qe
, bfa_iocfc_disable_cb
,
755 * Notify sub-modules of hardware failure.
758 bfa_iocfc_hbfail_cbfn(void *bfa_arg
)
760 struct bfa_s
*bfa
= bfa_arg
;
762 bfa
->rme_process
= BFA_FALSE
;
764 bfa_isr_disable(bfa
);
765 bfa_iocfc_disable_submod(bfa
);
767 if (bfa
->iocfc
.action
== BFA_IOCFC_ACT_INIT
)
768 bfa_cb_queue(bfa
, &bfa
->iocfc
.init_hcb_qe
, bfa_iocfc_init_cb
,
773 * Actions on chip-reset completion.
776 bfa_iocfc_reset_cbfn(void *bfa_arg
)
778 struct bfa_s
*bfa
= bfa_arg
;
780 bfa_iocfc_reset_queues(bfa
);
786 * Query IOC memory requirement information.
789 bfa_iocfc_meminfo(struct bfa_iocfc_cfg_s
*cfg
, u32
*km_len
,
792 /* dma memory for IOC */
793 *dm_len
+= BFA_ROUNDUP(sizeof(struct bfi_ioc_attr_s
), BFA_DMA_ALIGN_SZ
);
795 bfa_iocfc_fw_cfg_sz(cfg
, dm_len
);
796 bfa_iocfc_cqs_sz(cfg
, dm_len
);
797 *km_len
+= (bfa_auto_recover
) ? BFA_DBG_FWTRC_LEN
: 0;
801 * Query IOC memory requirement information.
804 bfa_iocfc_attach(struct bfa_s
*bfa
, void *bfad
, struct bfa_iocfc_cfg_s
*cfg
,
805 struct bfa_meminfo_s
*meminfo
, struct bfa_pcidev_s
*pcidev
)
808 struct bfa_ioc_s
*ioc
= &bfa
->ioc
;
810 bfa_iocfc_cbfn
.enable_cbfn
= bfa_iocfc_enable_cbfn
;
811 bfa_iocfc_cbfn
.disable_cbfn
= bfa_iocfc_disable_cbfn
;
812 bfa_iocfc_cbfn
.hbfail_cbfn
= bfa_iocfc_hbfail_cbfn
;
813 bfa_iocfc_cbfn
.reset_cbfn
= bfa_iocfc_reset_cbfn
;
815 ioc
->trcmod
= bfa
->trcmod
;
816 bfa_ioc_attach(&bfa
->ioc
, bfa
, &bfa_iocfc_cbfn
, &bfa
->timer_mod
);
819 * Set FC mode for BFA_PCI_DEVICE_ID_CT_FC.
821 if (pcidev
->device_id
== BFA_PCI_DEVICE_ID_CT_FC
)
822 bfa_ioc_set_fcmode(&bfa
->ioc
);
824 bfa_ioc_pci_init(&bfa
->ioc
, pcidev
, BFI_MC_IOCFC
);
825 bfa_ioc_mbox_register(&bfa
->ioc
, bfa_mbox_isrs
);
827 bfa_iocfc_init_mem(bfa
, bfad
, cfg
, pcidev
);
828 bfa_iocfc_mem_claim(bfa
, cfg
, meminfo
);
829 INIT_LIST_HEAD(&bfa
->timer_mod
.timer_q
);
831 INIT_LIST_HEAD(&bfa
->comp_q
);
832 for (i
= 0; i
< BFI_IOC_MAX_CQS
; i
++)
833 INIT_LIST_HEAD(&bfa
->reqq_waitq
[i
]);
837 * Query IOC memory requirement information.
840 bfa_iocfc_init(struct bfa_s
*bfa
)
842 bfa
->iocfc
.action
= BFA_IOCFC_ACT_INIT
;
843 bfa_ioc_enable(&bfa
->ioc
);
847 * IOC start called from bfa_start(). Called to start IOC operations
848 * at driver instantiation for this instance.
851 bfa_iocfc_start(struct bfa_s
*bfa
)
853 if (bfa
->iocfc
.cfgdone
)
854 bfa_iocfc_start_submod(bfa
);
858 * IOC stop called from bfa_stop(). Called only when driver is unloaded
862 bfa_iocfc_stop(struct bfa_s
*bfa
)
864 bfa
->iocfc
.action
= BFA_IOCFC_ACT_STOP
;
866 bfa
->rme_process
= BFA_FALSE
;
867 bfa_ioc_disable(&bfa
->ioc
);
871 bfa_iocfc_isr(void *bfaarg
, struct bfi_mbmsg_s
*m
)
873 struct bfa_s
*bfa
= bfaarg
;
874 struct bfa_iocfc_s
*iocfc
= &bfa
->iocfc
;
875 union bfi_iocfc_i2h_msg_u
*msg
;
877 msg
= (union bfi_iocfc_i2h_msg_u
*) m
;
878 bfa_trc(bfa
, msg
->mh
.msg_id
);
880 switch (msg
->mh
.msg_id
) {
881 case BFI_IOCFC_I2H_CFG_REPLY
:
882 iocfc
->cfg_reply
= &msg
->cfg_reply
;
883 bfa_iocfc_cfgrsp(bfa
);
885 case BFI_IOCFC_I2H_UPDATEQ_RSP
:
886 iocfc
->updateq_cbfn(iocfc
->updateq_cbarg
, BFA_STATUS_OK
);
894 bfa_iocfc_get_attr(struct bfa_s
*bfa
, struct bfa_iocfc_attr_s
*attr
)
896 struct bfa_iocfc_s
*iocfc
= &bfa
->iocfc
;
898 attr
->intr_attr
.coalesce
= iocfc
->cfginfo
->intr_attr
.coalesce
;
900 attr
->intr_attr
.delay
= iocfc
->cfginfo
->intr_attr
.delay
?
901 be16_to_cpu(iocfc
->cfginfo
->intr_attr
.delay
) :
902 be16_to_cpu(iocfc
->cfgrsp
->intr_attr
.delay
);
904 attr
->intr_attr
.latency
= iocfc
->cfginfo
->intr_attr
.latency
?
905 be16_to_cpu(iocfc
->cfginfo
->intr_attr
.latency
) :
906 be16_to_cpu(iocfc
->cfgrsp
->intr_attr
.latency
);
908 attr
->config
= iocfc
->cfg
;
912 bfa_iocfc_israttr_set(struct bfa_s
*bfa
, struct bfa_iocfc_intr_attr_s
*attr
)
914 struct bfa_iocfc_s
*iocfc
= &bfa
->iocfc
;
915 struct bfi_iocfc_set_intr_req_s
*m
;
917 iocfc
->cfginfo
->intr_attr
.coalesce
= attr
->coalesce
;
918 iocfc
->cfginfo
->intr_attr
.delay
= cpu_to_be16(attr
->delay
);
919 iocfc
->cfginfo
->intr_attr
.latency
= cpu_to_be16(attr
->latency
);
921 if (!bfa_iocfc_is_operational(bfa
))
922 return BFA_STATUS_OK
;
924 m
= bfa_reqq_next(bfa
, BFA_REQQ_IOC
);
926 return BFA_STATUS_DEVBUSY
;
928 bfi_h2i_set(m
->mh
, BFI_MC_IOCFC
, BFI_IOCFC_H2I_SET_INTR_REQ
,
930 m
->coalesce
= iocfc
->cfginfo
->intr_attr
.coalesce
;
931 m
->delay
= iocfc
->cfginfo
->intr_attr
.delay
;
932 m
->latency
= iocfc
->cfginfo
->intr_attr
.latency
;
934 bfa_trc(bfa
, attr
->delay
);
935 bfa_trc(bfa
, attr
->latency
);
937 bfa_reqq_produce(bfa
, BFA_REQQ_IOC
);
938 return BFA_STATUS_OK
;
942 bfa_iocfc_set_snsbase(struct bfa_s
*bfa
, u64 snsbase_pa
)
944 struct bfa_iocfc_s
*iocfc
= &bfa
->iocfc
;
946 iocfc
->cfginfo
->sense_buf_len
= (BFI_IOIM_SNSLEN
- 1);
947 bfa_dma_be_addr_set(iocfc
->cfginfo
->ioim_snsbase
, snsbase_pa
);
950 * Enable IOC after it is disabled.
953 bfa_iocfc_enable(struct bfa_s
*bfa
)
955 bfa_plog_str(bfa
->plog
, BFA_PL_MID_HAL
, BFA_PL_EID_MISC
, 0,
957 bfa_ioc_enable(&bfa
->ioc
);
961 bfa_iocfc_disable(struct bfa_s
*bfa
)
963 bfa_plog_str(bfa
->plog
, BFA_PL_MID_HAL
, BFA_PL_EID_MISC
, 0,
965 bfa
->iocfc
.action
= BFA_IOCFC_ACT_DISABLE
;
967 bfa
->rme_process
= BFA_FALSE
;
968 bfa_ioc_disable(&bfa
->ioc
);
973 bfa_iocfc_is_operational(struct bfa_s
*bfa
)
975 return bfa_ioc_is_operational(&bfa
->ioc
) && bfa
->iocfc
.cfgdone
;
979 * Return boot target port wwns -- read from boot information in flash.
982 bfa_iocfc_get_bootwwns(struct bfa_s
*bfa
, u8
*nwwns
, wwn_t
*wwns
)
984 struct bfa_iocfc_s
*iocfc
= &bfa
->iocfc
;
985 struct bfi_iocfc_cfgrsp_s
*cfgrsp
= iocfc
->cfgrsp
;
988 if (cfgrsp
->pbc_cfg
.boot_enabled
&& cfgrsp
->pbc_cfg
.nbluns
) {
989 bfa_trc(bfa
, cfgrsp
->pbc_cfg
.nbluns
);
990 *nwwns
= cfgrsp
->pbc_cfg
.nbluns
;
991 for (i
= 0; i
< cfgrsp
->pbc_cfg
.nbluns
; i
++)
992 wwns
[i
] = cfgrsp
->pbc_cfg
.blun
[i
].tgt_pwwn
;
997 *nwwns
= cfgrsp
->bootwwns
.nwwns
;
998 memcpy(wwns
, cfgrsp
->bootwwns
.wwn
, sizeof(cfgrsp
->bootwwns
.wwn
));
1002 bfa_iocfc_get_pbc_vports(struct bfa_s
*bfa
, struct bfi_pbc_vport_s
*pbc_vport
)
1004 struct bfa_iocfc_s
*iocfc
= &bfa
->iocfc
;
1005 struct bfi_iocfc_cfgrsp_s
*cfgrsp
= iocfc
->cfgrsp
;
1007 memcpy(pbc_vport
, cfgrsp
->pbc_cfg
.vport
, sizeof(cfgrsp
->pbc_cfg
.vport
));
1008 return cfgrsp
->pbc_cfg
.nvports
;
1013 * Use this function query the memory requirement of the BFA library.
1014 * This function needs to be called before bfa_attach() to get the
1015 * memory required of the BFA layer for a given driver configuration.
1017 * This call will fail, if the cap is out of range compared to pre-defined
1018 * values within the BFA library
1020 * @param[in] cfg - pointer to bfa_ioc_cfg_t. Driver layer should indicate
1021 * its configuration in this structure.
1022 * The default values for struct bfa_iocfc_cfg_s can be
1023 * fetched using bfa_cfg_get_default() API.
1025 * If cap's boundary check fails, the library will use
1026 * the default bfa_cap_t values (and log a warning msg).
1028 * @param[out] meminfo - pointer to bfa_meminfo_t. This content
1029 * indicates the memory type (see bfa_mem_type_t) and
1030 * amount of memory required.
1032 * Driver should allocate the memory, populate the
1033 * starting address for each block and provide the same
1034 * structure as input parameter to bfa_attach() call.
1038 * Special Considerations: @note
1041 bfa_cfg_get_meminfo(struct bfa_iocfc_cfg_s
*cfg
, struct bfa_meminfo_s
*meminfo
)
1044 u32 km_len
= 0, dm_len
= 0;
1046 WARN_ON((cfg
== NULL
) || (meminfo
== NULL
));
1048 memset((void *)meminfo
, 0, sizeof(struct bfa_meminfo_s
));
1049 meminfo
->meminfo
[BFA_MEM_TYPE_KVA
- 1].mem_type
=
1051 meminfo
->meminfo
[BFA_MEM_TYPE_DMA
- 1].mem_type
=
1054 bfa_iocfc_meminfo(cfg
, &km_len
, &dm_len
);
1056 for (i
= 0; hal_mods
[i
]; i
++)
1057 hal_mods
[i
]->meminfo(cfg
, &km_len
, &dm_len
);
1059 dm_len
+= bfa_port_meminfo();
1061 meminfo
->meminfo
[BFA_MEM_TYPE_KVA
- 1].mem_len
= km_len
;
1062 meminfo
->meminfo
[BFA_MEM_TYPE_DMA
- 1].mem_len
= dm_len
;
1066 * Use this function to do attach the driver instance with the BFA
1067 * library. This function will not trigger any HW initialization
1068 * process (which will be done in bfa_init() call)
1070 * This call will fail, if the cap is out of range compared to
1071 * pre-defined values within the BFA library
1073 * @param[out] bfa Pointer to bfa_t.
1074 * @param[in] bfad Opaque handle back to the driver's IOC structure
1075 * @param[in] cfg Pointer to bfa_ioc_cfg_t. Should be same structure
1076 * that was used in bfa_cfg_get_meminfo().
1077 * @param[in] meminfo Pointer to bfa_meminfo_t. The driver should
1078 * use the bfa_cfg_get_meminfo() call to
1079 * find the memory blocks required, allocate the
1080 * required memory and provide the starting addresses.
1081 * @param[in] pcidev pointer to struct bfa_pcidev_s
1086 * Special Considerations:
1092 bfa_attach(struct bfa_s
*bfa
, void *bfad
, struct bfa_iocfc_cfg_s
*cfg
,
1093 struct bfa_meminfo_s
*meminfo
, struct bfa_pcidev_s
*pcidev
)
1096 struct bfa_mem_elem_s
*melem
;
1098 bfa
->fcs
= BFA_FALSE
;
1100 WARN_ON((cfg
== NULL
) || (meminfo
== NULL
));
1103 * initialize all memory pointers for iterative allocation
1105 for (i
= 0; i
< BFA_MEM_TYPE_MAX
; i
++) {
1106 melem
= meminfo
->meminfo
+ i
;
1107 melem
->kva_curp
= melem
->kva
;
1108 melem
->dma_curp
= melem
->dma
;
1111 bfa_iocfc_attach(bfa
, bfad
, cfg
, meminfo
, pcidev
);
1113 for (i
= 0; hal_mods
[i
]; i
++)
1114 hal_mods
[i
]->attach(bfa
, bfad
, cfg
, meminfo
, pcidev
);
1116 bfa_com_port_attach(bfa
, meminfo
);
1120 * Use this function to delete a BFA IOC. IOC should be stopped (by
1121 * calling bfa_stop()) before this function call.
1123 * @param[in] bfa - pointer to bfa_t.
1128 * Special Considerations:
1133 bfa_detach(struct bfa_s
*bfa
)
1137 for (i
= 0; hal_mods
[i
]; i
++)
1138 hal_mods
[i
]->detach(bfa
);
1139 bfa_ioc_detach(&bfa
->ioc
);
1143 bfa_comp_deq(struct bfa_s
*bfa
, struct list_head
*comp_q
)
1145 INIT_LIST_HEAD(comp_q
);
1146 list_splice_tail_init(&bfa
->comp_q
, comp_q
);
1150 bfa_comp_process(struct bfa_s
*bfa
, struct list_head
*comp_q
)
1152 struct list_head
*qe
;
1153 struct list_head
*qen
;
1154 struct bfa_cb_qe_s
*hcb_qe
;
1156 list_for_each_safe(qe
, qen
, comp_q
) {
1157 hcb_qe
= (struct bfa_cb_qe_s
*) qe
;
1158 hcb_qe
->cbfn(hcb_qe
->cbarg
, BFA_TRUE
);
1163 bfa_comp_free(struct bfa_s
*bfa
, struct list_head
*comp_q
)
1165 struct list_head
*qe
;
1166 struct bfa_cb_qe_s
*hcb_qe
;
1168 while (!list_empty(comp_q
)) {
1169 bfa_q_deq(comp_q
, &qe
);
1170 hcb_qe
= (struct bfa_cb_qe_s
*) qe
;
1171 hcb_qe
->cbfn(hcb_qe
->cbarg
, BFA_FALSE
);
1177 * Return the list of PCI vendor/device id lists supported by this
1181 bfa_get_pciids(struct bfa_pciid_s
**pciids
, int *npciids
)
1183 static struct bfa_pciid_s __pciids
[] = {
1184 {BFA_PCI_VENDOR_ID_BROCADE
, BFA_PCI_DEVICE_ID_FC_8G2P
},
1185 {BFA_PCI_VENDOR_ID_BROCADE
, BFA_PCI_DEVICE_ID_FC_8G1P
},
1186 {BFA_PCI_VENDOR_ID_BROCADE
, BFA_PCI_DEVICE_ID_CT
},
1187 {BFA_PCI_VENDOR_ID_BROCADE
, BFA_PCI_DEVICE_ID_CT_FC
},
1190 *npciids
= sizeof(__pciids
) / sizeof(__pciids
[0]);
1195 * Use this function query the default struct bfa_iocfc_cfg_s value (compiled
1196 * into BFA layer). The OS driver can then turn back and overwrite entries that
1197 * have been configured by the user.
1199 * @param[in] cfg - pointer to bfa_ioc_cfg_t
1204 * Special Considerations:
1208 bfa_cfg_get_default(struct bfa_iocfc_cfg_s
*cfg
)
1210 cfg
->fwcfg
.num_fabrics
= DEF_CFG_NUM_FABRICS
;
1211 cfg
->fwcfg
.num_lports
= DEF_CFG_NUM_LPORTS
;
1212 cfg
->fwcfg
.num_rports
= DEF_CFG_NUM_RPORTS
;
1213 cfg
->fwcfg
.num_ioim_reqs
= DEF_CFG_NUM_IOIM_REQS
;
1214 cfg
->fwcfg
.num_tskim_reqs
= DEF_CFG_NUM_TSKIM_REQS
;
1215 cfg
->fwcfg
.num_fcxp_reqs
= DEF_CFG_NUM_FCXP_REQS
;
1216 cfg
->fwcfg
.num_uf_bufs
= DEF_CFG_NUM_UF_BUFS
;
1217 cfg
->fwcfg
.num_cqs
= DEF_CFG_NUM_CQS
;
1219 cfg
->drvcfg
.num_reqq_elems
= DEF_CFG_NUM_REQQ_ELEMS
;
1220 cfg
->drvcfg
.num_rspq_elems
= DEF_CFG_NUM_RSPQ_ELEMS
;
1221 cfg
->drvcfg
.num_sgpgs
= DEF_CFG_NUM_SGPGS
;
1222 cfg
->drvcfg
.num_sboot_tgts
= DEF_CFG_NUM_SBOOT_TGTS
;
1223 cfg
->drvcfg
.num_sboot_luns
= DEF_CFG_NUM_SBOOT_LUNS
;
1224 cfg
->drvcfg
.path_tov
= BFA_FCPIM_PATHTOV_DEF
;
1225 cfg
->drvcfg
.ioc_recover
= BFA_FALSE
;
1226 cfg
->drvcfg
.delay_comp
= BFA_FALSE
;
1231 bfa_cfg_get_min(struct bfa_iocfc_cfg_s
*cfg
)
1233 bfa_cfg_get_default(cfg
);
1234 cfg
->fwcfg
.num_ioim_reqs
= BFA_IOIM_MIN
;
1235 cfg
->fwcfg
.num_tskim_reqs
= BFA_TSKIM_MIN
;
1236 cfg
->fwcfg
.num_fcxp_reqs
= BFA_FCXP_MIN
;
1237 cfg
->fwcfg
.num_uf_bufs
= BFA_UF_MIN
;
1238 cfg
->fwcfg
.num_rports
= BFA_RPORT_MIN
;
1240 cfg
->drvcfg
.num_sgpgs
= BFA_SGPG_MIN
;
1241 cfg
->drvcfg
.num_reqq_elems
= BFA_REQQ_NELEMS_MIN
;
1242 cfg
->drvcfg
.num_rspq_elems
= BFA_RSPQ_NELEMS_MIN
;
1243 cfg
->drvcfg
.min_cfg
= BFA_TRUE
;