2 * Copyright (c) 2006 QLogic, Inc. All rights reserved.
3 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <linux/pci.h>
36 #include "ipath_kernel.h"
37 #include "ips_common.h"
38 #include "ipath_layer.h"
40 /* These are all rcv-related errors which we want to count for stats */
41 #define E_SUM_PKTERRS \
42 (INFINIPATH_E_RHDRLEN | INFINIPATH_E_RBADTID | \
43 INFINIPATH_E_RBADVERSION | INFINIPATH_E_RHDR | \
44 INFINIPATH_E_RLONGPKTLEN | INFINIPATH_E_RSHORTPKTLEN | \
45 INFINIPATH_E_RMAXPKTLEN | INFINIPATH_E_RMINPKTLEN | \
46 INFINIPATH_E_RFORMATERR | INFINIPATH_E_RUNSUPVL | \
47 INFINIPATH_E_RUNEXPCHAR | INFINIPATH_E_REBP)
49 /* These are all send-related errors which we want to count for stats */
51 (INFINIPATH_E_SPIOARMLAUNCH | INFINIPATH_E_SUNEXPERRPKTNUM | \
52 INFINIPATH_E_SDROPPEDDATAPKT | INFINIPATH_E_SDROPPEDSMPPKT | \
53 INFINIPATH_E_SMAXPKTLEN | INFINIPATH_E_SUNSUPVL | \
54 INFINIPATH_E_SMINPKTLEN | INFINIPATH_E_SPKTLEN | \
55 INFINIPATH_E_INVALIDADDR)
58 * these are errors that can occur when the link changes state while
59 * a packet is being sent or received. This doesn't cover things
60 * like EBP or VCRC that can be the result of a sending having the
61 * link change state, so we receive a "known bad" packet.
63 #define E_SUM_LINK_PKTERRS \
64 (INFINIPATH_E_SDROPPEDDATAPKT | INFINIPATH_E_SDROPPEDSMPPKT | \
65 INFINIPATH_E_SMINPKTLEN | INFINIPATH_E_SPKTLEN | \
66 INFINIPATH_E_RSHORTPKTLEN | INFINIPATH_E_RMINPKTLEN | \
67 INFINIPATH_E_RUNEXPCHAR)
69 static u64
handle_e_sum_errs(struct ipath_devdata
*dd
, ipath_err_t errs
)
71 unsigned long sbuf
[4];
72 u64 ignore_this_time
= 0;
75 /* if possible that sendbuffererror could be valid */
76 piobcnt
= dd
->ipath_piobcnt2k
+ dd
->ipath_piobcnt4k
;
77 /* read these before writing errorclear */
78 sbuf
[0] = ipath_read_kreg64(
79 dd
, dd
->ipath_kregs
->kr_sendbuffererror
);
80 sbuf
[1] = ipath_read_kreg64(
81 dd
, dd
->ipath_kregs
->kr_sendbuffererror
+ 1);
83 sbuf
[2] = ipath_read_kreg64(
84 dd
, dd
->ipath_kregs
->kr_sendbuffererror
+ 2);
85 sbuf
[3] = ipath_read_kreg64(
86 dd
, dd
->ipath_kregs
->kr_sendbuffererror
+ 3);
89 if (sbuf
[0] || sbuf
[1] || (piobcnt
> 128 && (sbuf
[2] || sbuf
[3]))) {
92 ipath_cdbg(PKT
, "SendbufErrs %lx %lx ", sbuf
[0], sbuf
[1]);
93 if (ipath_debug
& __IPATH_PKTDBG
&& piobcnt
> 128)
94 printk("%lx %lx ", sbuf
[2], sbuf
[3]);
95 for (i
= 0; i
< piobcnt
; i
++) {
96 if (test_bit(i
, sbuf
)) {
98 if (i
< dd
->ipath_piobcnt2k
)
99 piobuf
= (u32 __iomem
*)
100 (dd
->ipath_pio2kbase
+
101 i
* dd
->ipath_palign
);
103 piobuf
= (u32 __iomem
*)
104 (dd
->ipath_pio4kbase
+
105 (i
- dd
->ipath_piobcnt2k
) *
109 "PIObuf[%u] @%p pbc is %x; ",
110 i
, piobuf
, readl(piobuf
));
112 ipath_disarm_piobufs(dd
, i
, 1);
115 if (ipath_debug
& __IPATH_PKTDBG
)
118 if ((errs
& E_SUM_LINK_PKTERRS
) &&
119 !(dd
->ipath_flags
& IPATH_LINKACTIVE
)) {
121 * This can happen when SMA is trying to bring the link
122 * up, but the IB link changes state at the "wrong" time.
123 * The IB logic then complains that the packet isn't
124 * valid. We don't want to confuse people, so we just
125 * don't print them, except at debug
127 ipath_dbg("Ignoring packet errors %llx, because link not "
128 "ACTIVE\n", (unsigned long long) errs
);
129 ignore_this_time
= errs
& E_SUM_LINK_PKTERRS
;
132 return ignore_this_time
;
135 /* return the strings for the most common link states */
136 static char *ib_linkstate(u32 linkstate
)
141 case IPATH_IBSTATE_INIT
:
144 case IPATH_IBSTATE_ARM
:
147 case IPATH_IBSTATE_ACTIVE
:
157 static void handle_e_ibstatuschanged(struct ipath_devdata
*dd
,
158 ipath_err_t errs
, int noprint
)
164 * even if diags are enabled, we want to notice LINKINIT, etc.
165 * We just don't want to change the LED state, or
166 * dd->ipath_kregs->kr_ibcctrl
168 val
= ipath_read_kreg64(dd
, dd
->ipath_kregs
->kr_ibcstatus
);
169 lstate
= val
& IPATH_IBSTATE_MASK
;
172 * this is confusing enough when it happens that I want to always put it
173 * on the console and in the logs. If it was a requested state change,
174 * we'll have already cleared the flags, so we won't print this warning
176 if ((lstate
!= IPATH_IBSTATE_ARM
&& lstate
!= IPATH_IBSTATE_ACTIVE
)
177 && (dd
->ipath_flags
& (IPATH_LINKARMED
| IPATH_LINKACTIVE
))) {
178 dev_info(&dd
->pcidev
->dev
, "Link state changed from %s to %s\n",
179 (dd
->ipath_flags
& IPATH_LINKARMED
) ? "ARM" : "ACTIVE",
180 ib_linkstate(lstate
));
182 * Flush all queued sends when link went to DOWN or INIT,
183 * to be sure that they don't block SMA and other MAD packets
185 ipath_write_kreg(dd
, dd
->ipath_kregs
->kr_sendctrl
,
187 ipath_disarm_piobufs(dd
, dd
->ipath_lastport_piobuf
,
188 (unsigned)(dd
->ipath_piobcnt2k
+
189 dd
->ipath_piobcnt4k
) -
190 dd
->ipath_lastport_piobuf
);
192 else if (lstate
== IPATH_IBSTATE_INIT
|| lstate
== IPATH_IBSTATE_ARM
||
193 lstate
== IPATH_IBSTATE_ACTIVE
) {
195 * only print at SMA if there is a change, debug if not
196 * (sometimes we want to know that, usually not).
198 if (lstate
== ((unsigned) dd
->ipath_lastibcstat
199 & IPATH_IBSTATE_MASK
)) {
200 ipath_dbg("Status change intr but no change (%s)\n",
201 ib_linkstate(lstate
));
204 ipath_cdbg(SMA
, "Unit %u link state %s, last "
205 "was %s\n", dd
->ipath_unit
,
206 ib_linkstate(lstate
),
207 ib_linkstate((unsigned)
208 dd
->ipath_lastibcstat
209 & IPATH_IBSTATE_MASK
));
212 lstate
= dd
->ipath_lastibcstat
& IPATH_IBSTATE_MASK
;
213 if (lstate
== IPATH_IBSTATE_INIT
||
214 lstate
== IPATH_IBSTATE_ARM
||
215 lstate
== IPATH_IBSTATE_ACTIVE
)
216 ipath_cdbg(SMA
, "Unit %u link state down"
217 " (state 0x%x), from %s\n",
219 (u32
)val
& IPATH_IBSTATE_MASK
,
220 ib_linkstate(lstate
));
222 ipath_cdbg(VERBOSE
, "Unit %u link state changed "
223 "to 0x%x from down (%x)\n",
224 dd
->ipath_unit
, (u32
) val
, lstate
);
226 ltstate
= (val
>> INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT
) &
227 INFINIPATH_IBCS_LINKTRAININGSTATE_MASK
;
228 lstate
= (val
>> INFINIPATH_IBCS_LINKSTATE_SHIFT
) &
229 INFINIPATH_IBCS_LINKSTATE_MASK
;
231 if (ltstate
== INFINIPATH_IBCS_LT_STATE_POLLACTIVE
||
232 ltstate
== INFINIPATH_IBCS_LT_STATE_POLLQUIET
) {
236 * Ignore cycling back and forth from Polling.Active
237 * to Polling.Quiet while waiting for the other end of
238 * the link to come up. We will cycle back and forth
239 * between them if no cable is plugged in,
240 * the other device is powered off or disabled, etc.
242 last_ltstate
= (dd
->ipath_lastibcstat
>>
243 INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT
)
244 & INFINIPATH_IBCS_LINKTRAININGSTATE_MASK
;
245 if (last_ltstate
== INFINIPATH_IBCS_LT_STATE_POLLACTIVE
247 INFINIPATH_IBCS_LT_STATE_POLLQUIET
) {
248 if (dd
->ipath_ibpollcnt
> 40) {
249 dd
->ipath_flags
|= IPATH_NOCABLE
;
250 *dd
->ipath_statusp
|=
251 IPATH_STATUS_IB_NOCABLE
;
253 dd
->ipath_ibpollcnt
++;
257 dd
->ipath_ibpollcnt
= 0; /* some state other than 2 or 3 */
258 ipath_stats
.sps_iblink
++;
259 if (ltstate
!= INFINIPATH_IBCS_LT_STATE_LINKUP
) {
260 dd
->ipath_flags
|= IPATH_LINKDOWN
;
261 dd
->ipath_flags
&= ~(IPATH_LINKUNK
| IPATH_LINKINIT
264 *dd
->ipath_statusp
&= ~IPATH_STATUS_IB_READY
;
266 if (((dd
->ipath_lastibcstat
>>
267 INFINIPATH_IBCS_LINKSTATE_SHIFT
) &
268 INFINIPATH_IBCS_LINKSTATE_MASK
)
269 == INFINIPATH_IBCS_L_STATE_ACTIVE
)
270 /* if from up to down be more vocal */
272 "Unit %u link now down (%s)\n",
274 ipath_ibcstatus_str
[ltstate
]);
276 ipath_cdbg(VERBOSE
, "Unit %u link is "
277 "down (%s)\n", dd
->ipath_unit
,
278 ipath_ibcstatus_str
[ltstate
]);
281 dd
->ipath_f_setextled(dd
, lstate
, ltstate
);
282 } else if ((val
& IPATH_IBSTATE_MASK
) == IPATH_IBSTATE_ACTIVE
) {
283 dd
->ipath_flags
|= IPATH_LINKACTIVE
;
285 ~(IPATH_LINKUNK
| IPATH_LINKINIT
| IPATH_LINKDOWN
|
286 IPATH_LINKARMED
| IPATH_NOCABLE
);
287 *dd
->ipath_statusp
&= ~IPATH_STATUS_IB_NOCABLE
;
288 *dd
->ipath_statusp
|=
289 IPATH_STATUS_IB_READY
| IPATH_STATUS_IB_CONF
;
290 dd
->ipath_f_setextled(dd
, lstate
, ltstate
);
292 __ipath_layer_intr(dd
, IPATH_LAYER_INT_IF_UP
);
293 } else if ((val
& IPATH_IBSTATE_MASK
) == IPATH_IBSTATE_INIT
) {
295 * set INIT and DOWN. Down is checked by most of the other
296 * code, but INIT is useful to know in a few places.
298 dd
->ipath_flags
|= IPATH_LINKINIT
| IPATH_LINKDOWN
;
300 ~(IPATH_LINKUNK
| IPATH_LINKACTIVE
| IPATH_LINKARMED
302 *dd
->ipath_statusp
&= ~(IPATH_STATUS_IB_NOCABLE
303 | IPATH_STATUS_IB_READY
);
304 dd
->ipath_f_setextled(dd
, lstate
, ltstate
);
305 } else if ((val
& IPATH_IBSTATE_MASK
) == IPATH_IBSTATE_ARM
) {
306 dd
->ipath_flags
|= IPATH_LINKARMED
;
308 ~(IPATH_LINKUNK
| IPATH_LINKDOWN
| IPATH_LINKINIT
|
309 IPATH_LINKACTIVE
| IPATH_NOCABLE
);
310 *dd
->ipath_statusp
&= ~(IPATH_STATUS_IB_NOCABLE
311 | IPATH_STATUS_IB_READY
);
312 dd
->ipath_f_setextled(dd
, lstate
, ltstate
);
315 ipath_dbg("IBstatuschange unit %u: %s (%x)\n",
317 ipath_ibcstatus_str
[ltstate
], ltstate
);
320 dd
->ipath_lastibcstat
= val
;
323 static void handle_supp_msgs(struct ipath_devdata
*dd
,
324 unsigned supp_msgs
, char msg
[512])
327 * Print the message unless it's ibc status change only, which
328 * happens so often we never want to count it.
330 if (dd
->ipath_lasterror
& ~INFINIPATH_E_IBSTATUSCHANGED
) {
331 ipath_decode_err(msg
, sizeof msg
, dd
->ipath_lasterror
&
332 ~INFINIPATH_E_IBSTATUSCHANGED
);
333 if (dd
->ipath_lasterror
&
334 ~(INFINIPATH_E_RRCVEGRFULL
| INFINIPATH_E_RRCVHDRFULL
))
335 ipath_dev_err(dd
, "Suppressed %u messages for "
336 "fast-repeating errors (%s) (%llx)\n",
339 dd
->ipath_lasterror
);
342 * rcvegrfull and rcvhdrqfull are "normal", for some
343 * types of processes (mostly benchmarks) that send
344 * huge numbers of messages, while not processing
345 * them. So only complain about these at debug
348 ipath_dbg("Suppressed %u messages for %s\n",
354 static unsigned handle_frequent_errors(struct ipath_devdata
*dd
,
355 ipath_err_t errs
, char msg
[512],
359 static unsigned long nextmsg_time
;
360 static unsigned nmsgs
, supp_msgs
;
363 * Throttle back "fast" messages to no more than 10 per 5 seconds.
364 * This isn't perfect, but it's a reasonable heuristic. If we get
365 * more than 10, give a 6x longer delay.
369 if (time_before(nc
, nextmsg_time
)) {
372 nextmsg_time
= nc
+ HZ
* 3;
374 else if (supp_msgs
) {
375 handle_supp_msgs(dd
, supp_msgs
, msg
);
380 else if (!nmsgs
++ || time_after(nc
, nextmsg_time
))
381 nextmsg_time
= nc
+ HZ
/ 2;
386 static int handle_errors(struct ipath_devdata
*dd
, ipath_err_t errs
)
389 u64 ignore_this_time
= 0;
391 int chkerrpkts
= 0, noprint
= 0;
394 supp_msgs
= handle_frequent_errors(dd
, errs
, msg
, &noprint
);
397 * don't report errors that are masked (includes those always
400 errs
&= ~dd
->ipath_maskederrs
;
402 /* do these first, they are most important */
403 if (errs
& INFINIPATH_E_HARDWARE
) {
404 /* reuse same msg buf */
405 dd
->ipath_f_handle_hwerrors(dd
, msg
, sizeof msg
);
408 if (!noprint
&& (errs
& ~infinipath_e_bitsextant
))
409 ipath_dev_err(dd
, "error interrupt with unknown errors "
410 "%llx set\n", (unsigned long long)
411 (errs
& ~infinipath_e_bitsextant
));
413 if (errs
& E_SUM_ERRS
)
414 ignore_this_time
= handle_e_sum_errs(dd
, errs
);
415 else if ((errs
& E_SUM_LINK_PKTERRS
) &&
416 !(dd
->ipath_flags
& IPATH_LINKACTIVE
)) {
418 * This can happen when SMA is trying to bring the link
419 * up, but the IB link changes state at the "wrong" time.
420 * The IB logic then complains that the packet isn't
421 * valid. We don't want to confuse people, so we just
422 * don't print them, except at debug
424 ipath_dbg("Ignoring packet errors %llx, because link not "
425 "ACTIVE\n", (unsigned long long) errs
);
426 ignore_this_time
= errs
& E_SUM_LINK_PKTERRS
;
429 if (supp_msgs
== 250000) {
431 * It's not entirely reasonable assuming that the errors set
432 * in the last clear period are all responsible for the
433 * problem, but the alternative is to assume it's the only
434 * ones on this particular interrupt, which also isn't great
436 dd
->ipath_maskederrs
|= dd
->ipath_lasterror
| errs
;
437 ipath_write_kreg(dd
, dd
->ipath_kregs
->kr_errormask
,
438 ~dd
->ipath_maskederrs
);
439 ipath_decode_err(msg
, sizeof msg
,
440 (dd
->ipath_maskederrs
& ~dd
->
443 if ((dd
->ipath_maskederrs
& ~dd
->ipath_ignorederrs
) &
444 ~(INFINIPATH_E_RRCVEGRFULL
| INFINIPATH_E_RRCVHDRFULL
))
445 ipath_dev_err(dd
, "Disabling error(s) %llx because "
446 "occurring too frequently (%s)\n",
448 (dd
->ipath_maskederrs
&
449 ~dd
->ipath_ignorederrs
), msg
);
452 * rcvegrfull and rcvhdrqfull are "normal",
453 * for some types of processes (mostly benchmarks)
454 * that send huge numbers of messages, while not
455 * processing them. So only complain about
456 * these at debug level.
458 ipath_dbg("Disabling frequent queue full errors "
463 * Re-enable the masked errors after around 3 minutes. in
464 * ipath_get_faststats(). If we have a series of fast
465 * repeating but different errors, the interval will keep
466 * stretching out, but that's OK, as that's pretty
469 dd
->ipath_unmasktime
= jiffies
+ HZ
* 180;
472 ipath_write_kreg(dd
, dd
->ipath_kregs
->kr_errorclear
, errs
);
473 if (ignore_this_time
)
474 errs
&= ~ignore_this_time
;
475 if (errs
& ~dd
->ipath_lasterror
) {
476 errs
&= ~dd
->ipath_lasterror
;
477 /* never suppress duplicate hwerrors or ibstatuschange */
478 dd
->ipath_lasterror
|= errs
&
479 ~(INFINIPATH_E_HARDWARE
|
480 INFINIPATH_E_IBSTATUSCHANGED
);
487 * the ones we mask off are handled specially below or above
489 ipath_decode_err(msg
, sizeof msg
,
490 errs
& ~(INFINIPATH_E_IBSTATUSCHANGED
|
491 INFINIPATH_E_RRCVEGRFULL
|
492 INFINIPATH_E_RRCVHDRFULL
|
493 INFINIPATH_E_HARDWARE
));
495 /* so we don't need if (!noprint) at strlcat's below */
498 if (errs
& E_SUM_PKTERRS
) {
499 ipath_stats
.sps_pkterrs
++;
502 if (errs
& E_SUM_ERRS
)
503 ipath_stats
.sps_errs
++;
505 if (errs
& (INFINIPATH_E_RICRC
| INFINIPATH_E_RVCRC
)) {
506 ipath_stats
.sps_crcerrs
++;
511 * We don't want to print these two as they happen, or we can make
512 * the situation even worse, because it takes so long to print
513 * messages to serial consoles. Kernel ports get printed from
514 * fast_stats, no more than every 5 seconds, user ports get printed
517 if (errs
& INFINIPATH_E_RRCVHDRFULL
) {
520 ipath_stats
.sps_hdrqfull
++;
521 for (any
= i
= 0; i
< dd
->ipath_cfgports
; i
++) {
522 struct ipath_portdata
*pd
= dd
->ipath_pd
[i
];
524 hd
= dd
->ipath_port0head
;
525 tl
= (u32
) le64_to_cpu(
526 *dd
->ipath_hdrqtailptr
);
527 } else if (pd
&& pd
->port_cnt
&&
528 pd
->port_rcvhdrtail_kvaddr
) {
530 * don't report same point multiple times,
533 tl
= (u32
) * pd
->port_rcvhdrtail_kvaddr
;
534 if (tl
== dd
->ipath_lastrcvhdrqtails
[i
])
536 hd
= ipath_read_ureg32(dd
, ur_rcvhdrhead
,
540 if (hd
== (tl
+ 1) ||
541 (!hd
&& tl
== dd
->ipath_hdrqlast
)) {
544 dd
->ipath_lastrcvhdrqtails
[i
] = tl
;
549 if (errs
& INFINIPATH_E_RRCVEGRFULL
) {
551 * since this is of less importance and not likely to
552 * happen without also getting hdrfull, only count
553 * occurrences; don't check each port (or even the kernel
556 ipath_stats
.sps_etidfull
++;
557 if (dd
->ipath_port0head
!=
558 (u32
) le64_to_cpu(*dd
->ipath_hdrqtailptr
))
563 * do this before IBSTATUSCHANGED, in case both bits set in a single
564 * interrupt; we want the STATUSCHANGE to "win", so we do our
565 * internal copy of state machine correctly
567 if (errs
& INFINIPATH_E_RIBLOSTLINK
) {
569 * force through block below
571 errs
|= INFINIPATH_E_IBSTATUSCHANGED
;
572 ipath_stats
.sps_iblink
++;
573 dd
->ipath_flags
|= IPATH_LINKDOWN
;
574 dd
->ipath_flags
&= ~(IPATH_LINKUNK
| IPATH_LINKINIT
575 | IPATH_LINKARMED
| IPATH_LINKACTIVE
);
576 *dd
->ipath_statusp
&= ~IPATH_STATUS_IB_READY
;
578 u64 st
= ipath_read_kreg64(
579 dd
, dd
->ipath_kregs
->kr_ibcstatus
);
581 ipath_dbg("Lost link, link now down (%s)\n",
582 ipath_ibcstatus_str
[st
& 0xf]);
585 if (errs
& INFINIPATH_E_IBSTATUSCHANGED
)
586 handle_e_ibstatuschanged(dd
, errs
, noprint
);
588 if (errs
& INFINIPATH_E_RESET
) {
590 ipath_dev_err(dd
, "Got reset, requires re-init "
591 "(unload and reload driver)\n");
592 dd
->ipath_flags
&= ~IPATH_INITTED
; /* needs re-init */
593 /* mark as having had error */
594 *dd
->ipath_statusp
|= IPATH_STATUS_HWERROR
;
595 *dd
->ipath_statusp
&= ~IPATH_STATUS_IB_CONF
;
598 if (!noprint
&& *msg
)
599 ipath_dev_err(dd
, "%s error\n", msg
);
600 if (dd
->ipath_sma_state_wanted
& dd
->ipath_flags
) {
601 ipath_cdbg(VERBOSE
, "sma wanted state %x, iflags now %x, "
602 "waking\n", dd
->ipath_sma_state_wanted
,
604 wake_up_interruptible(&ipath_sma_state_wait
);
610 /* this is separate to allow for better optimization of ipath_intr() */
612 static void ipath_bad_intr(struct ipath_devdata
*dd
, u32
* unexpectp
)
615 * sometimes happen during driver init and unload, don't want
616 * to process any interrupts at that point
619 /* this is just a bandaid, not a fix, if something goes badly
621 if (++*unexpectp
> 100) {
622 if (++*unexpectp
> 105) {
624 * ok, we must be taking somebody else's interrupts,
625 * due to a messed up mptable and/or PIRQ table, so
626 * unregister the interrupt. We've seen this during
627 * linuxbios development work, and it may happen in
630 if (dd
->pcidev
&& dd
->pcidev
->irq
) {
631 ipath_dev_err(dd
, "Now %u unexpected "
632 "interrupts, unregistering "
633 "interrupt handler\n",
635 ipath_dbg("free_irq of irq %x\n",
637 free_irq(dd
->pcidev
->irq
, dd
);
640 if (ipath_read_kreg32(dd
, dd
->ipath_kregs
->kr_intmask
)) {
641 ipath_dev_err(dd
, "%u unexpected interrupts, "
642 "disabling interrupts completely\n",
645 * disable all interrupts, something is very wrong
647 ipath_write_kreg(dd
, dd
->ipath_kregs
->kr_intmask
,
650 } else if (*unexpectp
> 1)
651 ipath_dbg("Interrupt when not ready, should not happen, "
655 static void ipath_bad_regread(struct ipath_devdata
*dd
)
659 /* separate routine, for better optimization of ipath_intr() */
662 * We print the message and disable interrupts, in hope of
663 * having a better chance of debugging the problem.
666 "Read of interrupt status failed (all bits set)\n");
668 /* disable all interrupts, something is very wrong */
669 ipath_write_kreg(dd
, dd
->ipath_kregs
->kr_intmask
, 0ULL);
671 ipath_dev_err(dd
, "Still bad interrupt status, "
672 "unregistering interrupt\n");
673 free_irq(dd
->pcidev
->irq
, dd
);
674 } else if (allbits
> 2) {
675 if ((allbits
% 10000) == 0)
678 ipath_dev_err(dd
, "Disabling interrupts, "
679 "multiple errors\n");
683 static void handle_port_pioavail(struct ipath_devdata
*dd
)
687 * start from port 1, since for now port 0 is never using
690 for (i
= 1; dd
->ipath_portpiowait
&& i
< dd
->ipath_cfgports
; i
++) {
691 struct ipath_portdata
*pd
= dd
->ipath_pd
[i
];
693 if (pd
&& pd
->port_cnt
&&
694 dd
->ipath_portpiowait
& (1U << i
)) {
695 clear_bit(i
, &dd
->ipath_portpiowait
);
696 if (test_bit(IPATH_PORT_WAITING_PIO
,
698 clear_bit(IPATH_PORT_WAITING_PIO
,
700 wake_up_interruptible(&pd
->port_wait
);
706 static void handle_layer_pioavail(struct ipath_devdata
*dd
)
710 ret
= __ipath_layer_intr(dd
, IPATH_LAYER_INT_SEND_CONTINUE
);
714 ret
= __ipath_verbs_piobufavail(dd
);
720 set_bit(IPATH_S_PIOINTBUFAVAIL
, &dd
->ipath_sendctrl
);
721 ipath_write_kreg(dd
, dd
->ipath_kregs
->kr_sendctrl
,
726 * Handle receive interrupts for user ports; this means a user
727 * process was waiting for a packet to arrive, and didn't want
730 static void handle_urcv(struct ipath_devdata
*dd
, u32 istat
)
736 portr
= ((istat
>> INFINIPATH_I_RCVAVAIL_SHIFT
) &
737 infinipath_i_rcvavail_mask
)
738 | ((istat
>> INFINIPATH_I_RCVURG_SHIFT
) &
739 infinipath_i_rcvurg_mask
);
740 for (i
= 1; i
< dd
->ipath_cfgports
; i
++) {
741 struct ipath_portdata
*pd
= dd
->ipath_pd
[i
];
742 if (portr
& (1 << i
) && pd
&& pd
->port_cnt
&&
743 test_bit(IPATH_PORT_WAITING_RCV
, &pd
->port_flag
)) {
745 clear_bit(IPATH_PORT_WAITING_RCV
,
747 rcbit
= i
+ INFINIPATH_R_INTRAVAIL_SHIFT
;
748 clear_bit(1UL << rcbit
, &dd
->ipath_rcvctrl
);
749 wake_up_interruptible(&pd
->port_wait
);
754 /* only want to take one interrupt, so turn off the rcv
755 * interrupt for all the ports that we did the wakeup on
756 * (but never for kernel port)
758 ipath_write_kreg(dd
, dd
->ipath_kregs
->kr_rcvctrl
,
763 irqreturn_t
ipath_intr(int irq
, void *data
, struct pt_regs
*regs
)
765 struct ipath_devdata
*dd
= data
;
766 u32 istat
, chk0rcv
= 0;
767 ipath_err_t estat
= 0;
770 static unsigned unexpected
= 0;
771 static const u32 port0rbits
= (1U<<INFINIPATH_I_RCVAVAIL_SHIFT
) |
772 (1U<<INFINIPATH_I_RCVURG_SHIFT
);
774 ipath_stats
.sps_ints
++;
776 if (!(dd
->ipath_flags
& IPATH_PRESENT
)) {
778 * This return value is not great, but we do not want the
779 * interrupt core code to remove our interrupt handler
780 * because we don't appear to be handling an interrupt
781 * during a chip reset.
787 * this needs to be flags&initted, not statusp, so we keep
788 * taking interrupts even after link goes down, etc.
789 * Also, we *must* clear the interrupt at some point, or we won't
790 * take it again, which can be real bad for errors, etc...
793 if (!(dd
->ipath_flags
& IPATH_INITTED
)) {
794 ipath_bad_intr(dd
, &unexpected
);
800 * We try to avoid reading the interrupt status register, since
801 * that's a PIO read, and stalls the processor for up to about
802 * ~0.25 usec. The idea is that if we processed a port0 packet,
803 * we blindly clear the port 0 receive interrupt bits, and nothing
804 * else, then return. If other interrupts are pending, the chip
805 * will re-interrupt us as soon as we write the intclear register.
806 * We then won't process any more kernel packets (if not the 2nd
807 * time, then the 3rd or 4th) and we'll then handle the other
808 * interrupts. We clear the interrupts first so that we don't
809 * lose intr for later packets that arrive while we are processing.
811 oldhead
= dd
->ipath_port0head
;
812 if (oldhead
!= (u32
) le64_to_cpu(*dd
->ipath_hdrqtailptr
)) {
813 if (dd
->ipath_flags
& IPATH_GPIO_INTR
) {
814 ipath_write_kreg(dd
, dd
->ipath_kregs
->kr_gpio_clear
,
816 p0bits
= port0rbits
| INFINIPATH_I_GPIO
;
820 ipath_write_kreg(dd
, dd
->ipath_kregs
->kr_intclear
, p0bits
);
822 if (oldhead
!= dd
->ipath_port0head
) {
823 ipath_stats
.sps_fastrcvint
++;
826 istat
= ipath_read_kreg32(dd
, dd
->ipath_kregs
->kr_intstatus
);
829 istat
= ipath_read_kreg32(dd
, dd
->ipath_kregs
->kr_intstatus
);
832 if (unlikely(!istat
)) {
833 ipath_stats
.sps_nullintr
++;
834 ret
= IRQ_NONE
; /* not our interrupt, or already handled */
837 if (unlikely(istat
== -1)) {
838 ipath_bad_regread(dd
);
839 /* don't know if it was our interrupt or not */
847 if (unlikely(istat
& ~infinipath_i_bitsextant
))
849 "interrupt with unknown interrupts %x set\n",
850 istat
& (u32
) ~ infinipath_i_bitsextant
);
852 ipath_cdbg(VERBOSE
, "intr stat=0x%x\n", istat
);
854 if (unlikely(istat
& INFINIPATH_I_ERROR
)) {
855 ipath_stats
.sps_errints
++;
856 estat
= ipath_read_kreg64(dd
,
857 dd
->ipath_kregs
->kr_errorstatus
);
859 dev_info(&dd
->pcidev
->dev
, "error interrupt (%x), "
860 "but no error bits set!\n", istat
);
861 else if (estat
== -1LL)
863 * should we try clearing all, or hope next read
866 ipath_dev_err(dd
, "Read of error status failed "
867 "(all bits set); ignoring\n");
869 if (handle_errors(dd
, estat
))
870 /* force calling ipath_kreceive() */
874 if (istat
& INFINIPATH_I_GPIO
) {
876 * Packets are available in the port 0 rcv queue.
877 * Eventually this needs to be generalized to check
878 * IPATH_GPIO_INTR, and the specific GPIO bit, if
879 * GPIO interrupts are used for anything else.
881 if (unlikely(!(dd
->ipath_flags
& IPATH_GPIO_INTR
))) {
883 gpiostatus
= ipath_read_kreg32(
884 dd
, dd
->ipath_kregs
->kr_gpio_status
);
885 ipath_dbg("Unexpected GPIO interrupt bits %x\n",
887 ipath_write_kreg(dd
, dd
->ipath_kregs
->kr_gpio_clear
,
891 /* Clear GPIO status bit 2 */
892 ipath_write_kreg(dd
, dd
->ipath_kregs
->kr_gpio_clear
,
894 p0bits
|= INFINIPATH_I_GPIO
;
898 chk0rcv
|= istat
& p0bits
;
901 * clear the ones we will deal with on this round
902 * We clear it early, mostly for receive interrupts, so we
903 * know the chip will have seen this by the time we process
904 * the queue, and will re-interrupt if necessary. The processor
905 * itself won't take the interrupt again until we return.
907 ipath_write_kreg(dd
, dd
->ipath_kregs
->kr_intclear
, istat
);
910 * handle port0 receive before checking for pio buffers available,
911 * since receives can overflow; piobuf waiters can afford a few
912 * extra cycles, since they were waiting anyway, and user's waiting
913 * for receive are at the bottom.
917 istat
&= ~port0rbits
;
920 if (istat
& ((infinipath_i_rcvavail_mask
<<
921 INFINIPATH_I_RCVAVAIL_SHIFT
)
922 | (infinipath_i_rcvurg_mask
<<
923 INFINIPATH_I_RCVURG_SHIFT
)))
924 handle_urcv(dd
, istat
);
926 if (istat
& INFINIPATH_I_SPIOBUFAVAIL
) {
927 clear_bit(IPATH_S_PIOINTBUFAVAIL
, &dd
->ipath_sendctrl
);
928 ipath_write_kreg(dd
, dd
->ipath_kregs
->kr_sendctrl
,
931 if (dd
->ipath_portpiowait
)
932 handle_port_pioavail(dd
);
934 handle_layer_pioavail(dd
);