Staging: zram: fix up some sysfs attribute permissions
[zen-stable.git] / drivers / infiniband / hw / qib / qib_intr.c
blob54a40828a1067a417eec97c451ca9a9adbf95d9a
1 /*
2 * Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation.
3 * All rights reserved.
4 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
35 #include <linux/pci.h>
36 #include <linux/delay.h>
38 #include "qib.h"
39 #include "qib_common.h"
41 /**
42 * qib_format_hwmsg - format a single hwerror message
43 * @msg message buffer
44 * @msgl length of message buffer
45 * @hwmsg message to add to message buffer
47 static void qib_format_hwmsg(char *msg, size_t msgl, const char *hwmsg)
49 strlcat(msg, "[", msgl);
50 strlcat(msg, hwmsg, msgl);
51 strlcat(msg, "]", msgl);
54 /**
55 * qib_format_hwerrors - format hardware error messages for display
56 * @hwerrs hardware errors bit vector
57 * @hwerrmsgs hardware error descriptions
58 * @nhwerrmsgs number of hwerrmsgs
59 * @msg message buffer
60 * @msgl message buffer length
62 void qib_format_hwerrors(u64 hwerrs, const struct qib_hwerror_msgs *hwerrmsgs,
63 size_t nhwerrmsgs, char *msg, size_t msgl)
65 int i;
67 for (i = 0; i < nhwerrmsgs; i++)
68 if (hwerrs & hwerrmsgs[i].mask)
69 qib_format_hwmsg(msg, msgl, hwerrmsgs[i].msg);
72 static void signal_ib_event(struct qib_pportdata *ppd, enum ib_event_type ev)
74 struct ib_event event;
75 struct qib_devdata *dd = ppd->dd;
77 event.device = &dd->verbs_dev.ibdev;
78 event.element.port_num = ppd->port;
79 event.event = ev;
80 ib_dispatch_event(&event);
83 void qib_handle_e_ibstatuschanged(struct qib_pportdata *ppd, u64 ibcs)
85 struct qib_devdata *dd = ppd->dd;
86 unsigned long flags;
87 u32 lstate;
88 u8 ltstate;
89 enum ib_event_type ev = 0;
91 lstate = dd->f_iblink_state(ibcs); /* linkstate */
92 ltstate = dd->f_ibphys_portstate(ibcs);
95 * If linkstate transitions into INIT from any of the various down
96 * states, or if it transitions from any of the up (INIT or better)
97 * states into any of the down states (except link recovery), then
98 * call the chip-specific code to take appropriate actions.
100 if (lstate >= IB_PORT_INIT && (ppd->lflags & QIBL_LINKDOWN) &&
101 ltstate == IB_PHYSPORTSTATE_LINKUP) {
102 /* transitioned to UP */
103 if (dd->f_ib_updown(ppd, 1, ibcs))
104 goto skip_ibchange; /* chip-code handled */
105 } else if (ppd->lflags & (QIBL_LINKINIT | QIBL_LINKARMED |
106 QIBL_LINKACTIVE | QIBL_IB_FORCE_NOTIFY)) {
107 if (ltstate != IB_PHYSPORTSTATE_LINKUP &&
108 ltstate <= IB_PHYSPORTSTATE_CFG_TRAIN &&
109 dd->f_ib_updown(ppd, 0, ibcs))
110 goto skip_ibchange; /* chip-code handled */
111 qib_set_uevent_bits(ppd, _QIB_EVENT_LINKDOWN_BIT);
114 if (lstate != IB_PORT_DOWN) {
115 /* lstate is INIT, ARMED, or ACTIVE */
116 if (lstate != IB_PORT_ACTIVE) {
117 *ppd->statusp &= ~QIB_STATUS_IB_READY;
118 if (ppd->lflags & QIBL_LINKACTIVE)
119 ev = IB_EVENT_PORT_ERR;
120 spin_lock_irqsave(&ppd->lflags_lock, flags);
121 if (lstate == IB_PORT_ARMED) {
122 ppd->lflags |= QIBL_LINKARMED | QIBL_LINKV;
123 ppd->lflags &= ~(QIBL_LINKINIT |
124 QIBL_LINKDOWN | QIBL_LINKACTIVE);
125 } else {
126 ppd->lflags |= QIBL_LINKINIT | QIBL_LINKV;
127 ppd->lflags &= ~(QIBL_LINKARMED |
128 QIBL_LINKDOWN | QIBL_LINKACTIVE);
130 spin_unlock_irqrestore(&ppd->lflags_lock, flags);
131 /* start a 75msec timer to clear symbol errors */
132 mod_timer(&ppd->symerr_clear_timer,
133 msecs_to_jiffies(75));
134 } else if (ltstate == IB_PHYSPORTSTATE_LINKUP) {
135 /* active, but not active defered */
136 qib_hol_up(ppd); /* useful only for 6120 now */
137 *ppd->statusp |=
138 QIB_STATUS_IB_READY | QIB_STATUS_IB_CONF;
139 qib_clear_symerror_on_linkup((unsigned long)ppd);
140 spin_lock_irqsave(&ppd->lflags_lock, flags);
141 ppd->lflags |= QIBL_LINKACTIVE | QIBL_LINKV;
142 ppd->lflags &= ~(QIBL_LINKINIT |
143 QIBL_LINKDOWN | QIBL_LINKARMED);
144 spin_unlock_irqrestore(&ppd->lflags_lock, flags);
145 if (dd->flags & QIB_HAS_SEND_DMA)
146 qib_sdma_process_event(ppd,
147 qib_sdma_event_e30_go_running);
148 ev = IB_EVENT_PORT_ACTIVE;
149 dd->f_setextled(ppd, 1);
151 } else { /* down */
152 if (ppd->lflags & QIBL_LINKACTIVE)
153 ev = IB_EVENT_PORT_ERR;
154 spin_lock_irqsave(&ppd->lflags_lock, flags);
155 ppd->lflags |= QIBL_LINKDOWN | QIBL_LINKV;
156 ppd->lflags &= ~(QIBL_LINKINIT |
157 QIBL_LINKACTIVE | QIBL_LINKARMED);
158 spin_unlock_irqrestore(&ppd->lflags_lock, flags);
159 *ppd->statusp &= ~QIB_STATUS_IB_READY;
162 skip_ibchange:
163 ppd->lastibcstat = ibcs;
164 if (ev)
165 signal_ib_event(ppd, ev);
166 return;
169 void qib_clear_symerror_on_linkup(unsigned long opaque)
171 struct qib_pportdata *ppd = (struct qib_pportdata *)opaque;
173 if (ppd->lflags & QIBL_LINKACTIVE)
174 return;
176 ppd->ibport_data.z_symbol_error_counter =
177 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_IBSYMBOLERR);
181 * Handle receive interrupts for user ctxts; this means a user
182 * process was waiting for a packet to arrive, and didn't want
183 * to poll.
185 void qib_handle_urcv(struct qib_devdata *dd, u64 ctxtr)
187 struct qib_ctxtdata *rcd;
188 unsigned long flags;
189 int i;
191 spin_lock_irqsave(&dd->uctxt_lock, flags);
192 for (i = dd->first_user_ctxt; dd->rcd && i < dd->cfgctxts; i++) {
193 if (!(ctxtr & (1ULL << i)))
194 continue;
195 rcd = dd->rcd[i];
196 if (!rcd || !rcd->cnt)
197 continue;
199 if (test_and_clear_bit(QIB_CTXT_WAITING_RCV, &rcd->flag)) {
200 wake_up_interruptible(&rcd->wait);
201 dd->f_rcvctrl(rcd->ppd, QIB_RCVCTRL_INTRAVAIL_DIS,
202 rcd->ctxt);
203 } else if (test_and_clear_bit(QIB_CTXT_WAITING_URG,
204 &rcd->flag)) {
205 rcd->urgent++;
206 wake_up_interruptible(&rcd->wait);
209 spin_unlock_irqrestore(&dd->uctxt_lock, flags);
212 void qib_bad_intrstatus(struct qib_devdata *dd)
214 static int allbits;
216 /* separate routine, for better optimization of qib_intr() */
219 * We print the message and disable interrupts, in hope of
220 * having a better chance of debugging the problem.
222 qib_dev_err(dd, "Read of chip interrupt status failed"
223 " disabling interrupts\n");
224 if (allbits++) {
225 /* disable interrupt delivery, something is very wrong */
226 if (allbits == 2)
227 dd->f_set_intr_state(dd, 0);
228 if (allbits == 3) {
229 qib_dev_err(dd, "2nd bad interrupt status, "
230 "unregistering interrupts\n");
231 dd->flags |= QIB_BADINTR;
232 dd->flags &= ~QIB_INITTED;
233 dd->f_free_irq(dd);