2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
18 #include <bfi/bfi_ctreg.h>
19 #include <bfa_port_priv.h>
20 #include <bfa_intr_priv.h>
21 #include <cs/bfa_debug.h>
23 BFA_TRC_FILE(HAL
, INTR
);
26 bfa_msix_errint(struct bfa_s
*bfa
, u32 intr
)
28 bfa_ioc_error_isr(&bfa
->ioc
);
32 bfa_msix_lpu(struct bfa_s
*bfa
)
34 bfa_ioc_mbox_isr(&bfa
->ioc
);
38 bfa_reqq_resume(struct bfa_s
*bfa
, int qid
)
40 struct list_head
*waitq
, *qe
, *qen
;
41 struct bfa_reqq_wait_s
*wqe
;
43 waitq
= bfa_reqq(bfa
, qid
);
44 list_for_each_safe(qe
, qen
, waitq
) {
46 * Callback only as long as there is room in request queue
48 if (bfa_reqq_full(bfa
, qid
))
52 wqe
= (struct bfa_reqq_wait_s
*) qe
;
53 wqe
->qresume(wqe
->cbarg
);
58 bfa_msix_all(struct bfa_s
*bfa
, int vec
)
67 bfa_intx(struct bfa_s
*bfa
)
72 intr
= bfa_reg_read(bfa
->iocfc
.bfa_regs
.intr_status
);
77 * RME completion queue interrupt
79 qintr
= intr
& __HFN_INT_RME_MASK
;
80 bfa_reg_write(bfa
->iocfc
.bfa_regs
.intr_status
, qintr
);
82 for (queue
= 0; queue
< BFI_IOC_MAX_CQS_ASIC
; queue
++) {
83 if (intr
& (__HFN_INT_RME_Q0
<< queue
))
84 bfa_msix_rspq(bfa
, queue
& (BFI_IOC_MAX_CQS
- 1));
91 * CPE completion queue interrupt
93 qintr
= intr
& __HFN_INT_CPE_MASK
;
94 bfa_reg_write(bfa
->iocfc
.bfa_regs
.intr_status
, qintr
);
96 for (queue
= 0; queue
< BFI_IOC_MAX_CQS_ASIC
; queue
++) {
97 if (intr
& (__HFN_INT_CPE_Q0
<< queue
))
98 bfa_msix_reqq(bfa
, queue
& (BFI_IOC_MAX_CQS
- 1));
104 bfa_msix_lpu_err(bfa
, intr
);
110 bfa_isr_enable(struct bfa_s
*bfa
)
113 int pci_func
= bfa_ioc_pcifn(&bfa
->ioc
);
115 bfa_trc(bfa
, pci_func
);
117 bfa_msix_install(bfa
);
118 intr_unmask
= (__HFN_INT_ERR_EMC
| __HFN_INT_ERR_LPU0
|
119 __HFN_INT_ERR_LPU1
| __HFN_INT_ERR_PSS
|
123 intr_unmask
|= (__HFN_INT_CPE_Q0
| __HFN_INT_CPE_Q1
|
124 __HFN_INT_CPE_Q2
| __HFN_INT_CPE_Q3
|
125 __HFN_INT_RME_Q0
| __HFN_INT_RME_Q1
|
126 __HFN_INT_RME_Q2
| __HFN_INT_RME_Q3
|
127 __HFN_INT_MBOX_LPU0
);
129 intr_unmask
|= (__HFN_INT_CPE_Q4
| __HFN_INT_CPE_Q5
|
130 __HFN_INT_CPE_Q6
| __HFN_INT_CPE_Q7
|
131 __HFN_INT_RME_Q4
| __HFN_INT_RME_Q5
|
132 __HFN_INT_RME_Q6
| __HFN_INT_RME_Q7
|
133 __HFN_INT_MBOX_LPU1
);
135 bfa_reg_write(bfa
->iocfc
.bfa_regs
.intr_status
, intr_unmask
);
136 bfa_reg_write(bfa
->iocfc
.bfa_regs
.intr_mask
, ~intr_unmask
);
137 bfa_isr_mode_set(bfa
, bfa
->msix
.nvecs
!= 0);
141 bfa_isr_disable(struct bfa_s
*bfa
)
143 bfa_isr_mode_set(bfa
, BFA_FALSE
);
144 bfa_reg_write(bfa
->iocfc
.bfa_regs
.intr_mask
, -1L);
145 bfa_msix_uninstall(bfa
);
149 bfa_msix_reqq(struct bfa_s
*bfa
, int qid
)
151 struct list_head
*waitq
;
153 qid
&= (BFI_IOC_MAX_CQS
- 1);
155 bfa
->iocfc
.hwif
.hw_reqq_ack(bfa
, qid
);
158 * Resume any pending requests in the corresponding reqq.
160 waitq
= bfa_reqq(bfa
, qid
);
161 if (!list_empty(waitq
))
162 bfa_reqq_resume(bfa
, qid
);
166 bfa_isr_unhandled(struct bfa_s
*bfa
, struct bfi_msg_s
*m
)
168 bfa_trc(bfa
, m
->mhdr
.msg_class
);
169 bfa_trc(bfa
, m
->mhdr
.msg_id
);
170 bfa_trc(bfa
, m
->mhdr
.mtag
.i2htok
);
172 bfa_trc_stop(bfa
->trcmod
);
176 bfa_msix_rspq(struct bfa_s
*bfa
, int qid
)
180 struct list_head
*waitq
;
182 bfa_trc_fp(bfa
, qid
);
184 qid
&= (BFI_IOC_MAX_CQS
- 1);
186 bfa
->iocfc
.hwif
.hw_rspq_ack(bfa
, qid
);
188 ci
= bfa_rspq_ci(bfa
, qid
);
189 pi
= bfa_rspq_pi(bfa
, qid
);
194 if (bfa
->rme_process
) {
196 m
= bfa_rspq_elem(bfa
, qid
, ci
);
197 bfa_assert_fp(m
->mhdr
.msg_class
< BFI_MC_MAX
);
199 bfa_isrs
[m
->mhdr
.msg_class
] (bfa
, m
);
201 CQ_INCR(ci
, bfa
->iocfc
.cfg
.drvcfg
.num_rspq_elems
);
208 bfa_rspq_ci(bfa
, qid
) = pi
;
209 bfa_reg_write(bfa
->iocfc
.bfa_regs
.rme_q_ci
[qid
], pi
);
213 * Resume any pending requests in the corresponding reqq.
215 waitq
= bfa_reqq(bfa
, qid
);
216 if (!list_empty(waitq
))
217 bfa_reqq_resume(bfa
, qid
);
221 bfa_msix_lpu_err(struct bfa_s
*bfa
, int vec
)
223 u32 intr
, curr_value
;
225 intr
= bfa_reg_read(bfa
->iocfc
.bfa_regs
.intr_status
);
227 if (intr
& (__HFN_INT_MBOX_LPU0
| __HFN_INT_MBOX_LPU1
))
230 intr
&= (__HFN_INT_ERR_EMC
| __HFN_INT_ERR_LPU0
|
231 __HFN_INT_ERR_LPU1
| __HFN_INT_ERR_PSS
| __HFN_INT_LL_HALT
);
234 if (intr
& __HFN_INT_LL_HALT
) {
236 * If LL_HALT bit is set then FW Init Halt LL Port
237 * Register needs to be cleared as well so Interrupt
238 * Status Register will be cleared.
240 curr_value
= bfa_reg_read(bfa
->ioc
.ioc_regs
.ll_halt
);
241 curr_value
&= ~__FW_INIT_HALT_P
;
242 bfa_reg_write(bfa
->ioc
.ioc_regs
.ll_halt
, curr_value
);
245 if (intr
& __HFN_INT_ERR_PSS
) {
247 * ERR_PSS bit needs to be cleared as well in case
248 * interrups are shared so driver's interrupt handler is
249 * still called eventhough it is already masked out.
251 curr_value
= bfa_reg_read(
252 bfa
->ioc
.ioc_regs
.pss_err_status_reg
);
253 curr_value
&= __PSS_ERR_STATUS_SET
;
254 bfa_reg_write(bfa
->ioc
.ioc_regs
.pss_err_status_reg
,
258 bfa_reg_write(bfa
->iocfc
.bfa_regs
.intr_status
, intr
);
259 bfa_msix_errint(bfa
, intr
);
264 bfa_isr_bind(enum bfi_mclass mc
, bfa_isr_func_t isr_func
)
266 bfa_isrs
[mc
] = isr_func
;