x86/amd-iommu: Add per IOMMU reference counting
[linux/fpc-iii.git] / drivers / scsi / bfa / bfa_hw_cb.c
blobede1438619e2d0394cb755731d75b0fddb101e1f
1 /*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
18 #include <bfa_priv.h>
19 #include <bfi/bfi_cbreg.h>
21 void
22 bfa_hwcb_reginit(struct bfa_s *bfa)
24 struct bfa_iocfc_regs_s *bfa_regs = &bfa->iocfc.bfa_regs;
25 bfa_os_addr_t kva = bfa_ioc_bar0(&bfa->ioc);
26 int i, q, fn = bfa_ioc_pcifn(&bfa->ioc);
28 if (fn == 0) {
29 bfa_regs->intr_status = (kva + HOSTFN0_INT_STATUS);
30 bfa_regs->intr_mask = (kva + HOSTFN0_INT_MSK);
31 } else {
32 bfa_regs->intr_status = (kva + HOSTFN1_INT_STATUS);
33 bfa_regs->intr_mask = (kva + HOSTFN1_INT_MSK);
36 for (i = 0; i < BFI_IOC_MAX_CQS; i++) {
38 * CPE registers
40 q = CPE_Q_NUM(fn, i);
41 bfa_regs->cpe_q_pi[i] = (kva + CPE_Q_PI(q));
42 bfa_regs->cpe_q_ci[i] = (kva + CPE_Q_CI(q));
43 bfa_regs->cpe_q_depth[i] = (kva + CPE_Q_DEPTH(q));
46 * RME registers
48 q = CPE_Q_NUM(fn, i);
49 bfa_regs->rme_q_pi[i] = (kva + RME_Q_PI(q));
50 bfa_regs->rme_q_ci[i] = (kva + RME_Q_CI(q));
51 bfa_regs->rme_q_depth[i] = (kva + RME_Q_DEPTH(q));
55 void
56 bfa_hwcb_rspq_ack(struct bfa_s *bfa, int rspq)
60 static void
61 bfa_hwcb_rspq_ack_msix(struct bfa_s *bfa, int rspq)
63 bfa_reg_write(bfa->iocfc.bfa_regs.intr_status,
64 __HFN_INT_RME_Q0 << RME_Q_NUM(bfa_ioc_pcifn(&bfa->ioc), rspq));
67 void
68 bfa_hwcb_msix_getvecs(struct bfa_s *bfa, u32 *msix_vecs_bmap,
69 u32 *num_vecs, u32 *max_vec_bit)
71 #define __HFN_NUMINTS 13
72 if (bfa_ioc_pcifn(&bfa->ioc) == 0) {
73 *msix_vecs_bmap = (__HFN_INT_CPE_Q0 | __HFN_INT_CPE_Q1 |
74 __HFN_INT_CPE_Q2 | __HFN_INT_CPE_Q3 |
75 __HFN_INT_RME_Q0 | __HFN_INT_RME_Q1 |
76 __HFN_INT_RME_Q2 | __HFN_INT_RME_Q3 |
77 __HFN_INT_MBOX_LPU0);
78 *max_vec_bit = __HFN_INT_MBOX_LPU0;
79 } else {
80 *msix_vecs_bmap = (__HFN_INT_CPE_Q4 | __HFN_INT_CPE_Q5 |
81 __HFN_INT_CPE_Q6 | __HFN_INT_CPE_Q7 |
82 __HFN_INT_RME_Q4 | __HFN_INT_RME_Q5 |
83 __HFN_INT_RME_Q6 | __HFN_INT_RME_Q7 |
84 __HFN_INT_MBOX_LPU1);
85 *max_vec_bit = __HFN_INT_MBOX_LPU1;
88 *msix_vecs_bmap |= (__HFN_INT_ERR_EMC | __HFN_INT_ERR_LPU0 |
89 __HFN_INT_ERR_LPU1 | __HFN_INT_ERR_PSS);
90 *num_vecs = __HFN_NUMINTS;
93 /**
94 * No special setup required for crossbow -- vector assignments are implicit.
96 void
97 bfa_hwcb_msix_init(struct bfa_s *bfa, int nvecs)
99 int i;
101 bfa_assert((nvecs == 1) || (nvecs == __HFN_NUMINTS));
103 bfa->msix.nvecs = nvecs;
104 if (nvecs == 1) {
105 for (i = 0; i < BFA_MSIX_CB_MAX; i++)
106 bfa->msix.handler[i] = bfa_msix_all;
107 return;
110 for (i = BFA_MSIX_CPE_Q0; i <= BFA_MSIX_CPE_Q7; i++)
111 bfa->msix.handler[i] = bfa_msix_reqq;
113 for (i = BFA_MSIX_RME_Q0; i <= BFA_MSIX_RME_Q7; i++)
114 bfa->msix.handler[i] = bfa_msix_rspq;
116 for (; i < BFA_MSIX_CB_MAX; i++)
117 bfa->msix.handler[i] = bfa_msix_lpu_err;
121 * Crossbow -- dummy, interrupts are masked
123 void
124 bfa_hwcb_msix_install(struct bfa_s *bfa)
128 void
129 bfa_hwcb_msix_uninstall(struct bfa_s *bfa)
134 * No special enable/disable -- vector assignments are implicit.
136 void
137 bfa_hwcb_isr_mode_set(struct bfa_s *bfa, bfa_boolean_t msix)
139 bfa->iocfc.hwif.hw_rspq_ack = bfa_hwcb_rspq_ack_msix;