2 * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved.
3 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include "ipath_kernel.h"
36 struct infinipath_stats ipath_stats
;
39 * ipath_snap_cntr - snapshot a chip counter
40 * @dd: the infinipath device
41 * @creg: the counter to snapshot
43 * called from add_timer and user counter read calls, to deal with
44 * counters that wrap in "human time". The words sent and received, and
45 * the packets sent and received are all that we worry about. For now,
46 * at least, we don't worry about error counters, because if they wrap
47 * that quickly, we probably don't care. We may eventually just make this
48 * handle all the counters. word counters can wrap in about 20 seconds
49 * of full bandwidth traffic, packet counters in a few hours.
52 u64
ipath_snap_cntr(struct ipath_devdata
*dd
, ipath_creg creg
)
60 /* If fast increment counters are only 32 bits, snapshot them,
61 * and maintain them as 64bit values in the driver */
62 if (!(dd
->ipath_flags
& IPATH_32BITCOUNTERS
) &&
63 (creg
== dd
->ipath_cregs
->cr_wordsendcnt
||
64 creg
== dd
->ipath_cregs
->cr_wordrcvcnt
||
65 creg
== dd
->ipath_cregs
->cr_pktsendcnt
||
66 creg
== dd
->ipath_cregs
->cr_pktrcvcnt
)) {
67 val64
= ipath_read_creg(dd
, creg
);
68 val
= val64
== ~0ULL ? ~0U : 0;
70 } else /* val64 just to keep gcc quiet... */
71 val64
= val
= ipath_read_creg32(dd
, creg
);
73 * See if a second has passed. This is just a way to detect things
74 * that are quite broken. Normally this should take just a few
75 * cycles (the check is for long enough that we don't care if we get
76 * pre-empted.) An Opteron HT O read timeout is 4 seconds with
80 if (time_before(t0
+ HZ
, t1
) && val
== -1) {
81 ipath_dev_err(dd
, "Error! Read counter 0x%x timed out\n",
91 if (creg
== dd
->ipath_cregs
->cr_wordsendcnt
) {
92 if (val
!= dd
->ipath_lastsword
) {
93 dd
->ipath_sword
+= val
- dd
->ipath_lastsword
;
94 dd
->ipath_lastsword
= val
;
96 val64
= dd
->ipath_sword
;
97 } else if (creg
== dd
->ipath_cregs
->cr_wordrcvcnt
) {
98 if (val
!= dd
->ipath_lastrword
) {
99 dd
->ipath_rword
+= val
- dd
->ipath_lastrword
;
100 dd
->ipath_lastrword
= val
;
102 val64
= dd
->ipath_rword
;
103 } else if (creg
== dd
->ipath_cregs
->cr_pktsendcnt
) {
104 if (val
!= dd
->ipath_lastspkts
) {
105 dd
->ipath_spkts
+= val
- dd
->ipath_lastspkts
;
106 dd
->ipath_lastspkts
= val
;
108 val64
= dd
->ipath_spkts
;
109 } else if (creg
== dd
->ipath_cregs
->cr_pktrcvcnt
) {
110 if (val
!= dd
->ipath_lastrpkts
) {
111 dd
->ipath_rpkts
+= val
- dd
->ipath_lastrpkts
;
112 dd
->ipath_lastrpkts
= val
;
114 val64
= dd
->ipath_rpkts
;
125 * ipath_qcheck - print delta of egrfull/hdrqfull errors for kernel ports
126 * @dd: the infinipath device
128 * print the delta of egrfull/hdrqfull errors for kernel ports no more than
129 * every 5 seconds. User processes are printed at close, but kernel doesn't
130 * close, so... Separate routine so may call from other places someday, and
131 * so function name when printed by _IPATH_INFO is meaningfull
133 static void ipath_qcheck(struct ipath_devdata
*dd
)
135 static u64 last_tot_hdrqfull
;
136 struct ipath_portdata
*pd
= dd
->ipath_pd
[0];
141 if (pd
->port_hdrqfull
!= dd
->ipath_p0_hdrqfull
) {
142 blen
= snprintf(buf
, sizeof buf
, "port 0 hdrqfull %u",
144 dd
->ipath_p0_hdrqfull
);
145 dd
->ipath_p0_hdrqfull
= pd
->port_hdrqfull
;
147 if (ipath_stats
.sps_etidfull
!= dd
->ipath_last_tidfull
) {
148 blen
+= snprintf(buf
+ blen
, sizeof buf
- blen
,
152 (ipath_stats
.sps_etidfull
-
153 dd
->ipath_last_tidfull
));
154 dd
->ipath_last_tidfull
= ipath_stats
.sps_etidfull
;
158 * this is actually the number of hdrq full interrupts, not actual
159 * events, but at the moment that's mostly what I'm interested in.
160 * Actual count, etc. is in the counters, if needed. For production
161 * users this won't ordinarily be printed.
164 if ((ipath_debug
& (__IPATH_PKTDBG
| __IPATH_DBG
)) &&
165 ipath_stats
.sps_hdrqfull
!= last_tot_hdrqfull
) {
166 blen
+= snprintf(buf
+ blen
, sizeof buf
- blen
,
167 "%shdrqfull %llu (all ports)",
170 (ipath_stats
.sps_hdrqfull
-
172 last_tot_hdrqfull
= ipath_stats
.sps_hdrqfull
;
175 ipath_dbg("%s\n", buf
);
177 if (pd
->port_head
!= (u32
)
178 le64_to_cpu(*dd
->ipath_hdrqtailptr
)) {
179 if (dd
->ipath_lastport0rcv_cnt
==
180 ipath_stats
.sps_port0pkts
) {
181 ipath_cdbg(PKT
, "missing rcv interrupts? "
182 "port0 hd=%llx tl=%x; port0pkts %llx\n",
184 le64_to_cpu(*dd
->ipath_hdrqtailptr
),
187 ipath_stats
.sps_port0pkts
);
189 dd
->ipath_lastport0rcv_cnt
= ipath_stats
.sps_port0pkts
;
193 static void ipath_chk_errormask(struct ipath_devdata
*dd
)
197 unsigned long errormask
;
198 unsigned long hwerrs
;
200 if (!dd
->ipath_errormask
|| !(dd
->ipath_flags
& IPATH_INITTED
))
203 errormask
= ipath_read_kreg64(dd
, dd
->ipath_kregs
->kr_errormask
);
205 if (errormask
== dd
->ipath_errormask
)
209 hwerrs
= ipath_read_kreg64(dd
, dd
->ipath_kregs
->kr_hwerrstatus
);
210 ctrl
= ipath_read_kreg32(dd
, dd
->ipath_kregs
->kr_control
);
212 ipath_write_kreg(dd
, dd
->ipath_kregs
->kr_errormask
,
213 dd
->ipath_errormask
);
215 if ((hwerrs
& dd
->ipath_hwerrmask
) ||
216 (ctrl
& INFINIPATH_C_FREEZEMODE
)) {
217 /* force re-interrupt of pending events, just in case */
218 ipath_write_kreg(dd
, dd
->ipath_kregs
->kr_hwerrclear
, 0ULL);
219 ipath_write_kreg(dd
, dd
->ipath_kregs
->kr_errorclear
, 0ULL);
220 ipath_write_kreg(dd
, dd
->ipath_kregs
->kr_intclear
, 0ULL);
221 dev_info(&dd
->pcidev
->dev
,
222 "errormask fixed(%u) %lx -> %lx, ctrl %x hwerr %lx\n",
223 fixed
, errormask
, (unsigned long)dd
->ipath_errormask
,
226 ipath_dbg("errormask fixed(%u) %lx -> %lx, no freeze\n",
228 (unsigned long)dd
->ipath_errormask
);
233 * ipath_get_faststats - get word counters from chip before they overflow
234 * @opaque - contains a pointer to the infinipath device ipath_devdata
236 * called from add_timer
238 void ipath_get_faststats(unsigned long opaque
)
240 struct ipath_devdata
*dd
= (struct ipath_devdata
*) opaque
;
247 * don't access the chip while running diags, or memory diags can
250 if (!dd
->ipath_kregbase
|| !(dd
->ipath_flags
& IPATH_INITTED
) ||
252 /* but re-arm the timer, for diags case; won't hurt other */
256 * We now try to maintain a "active timer", based on traffic
257 * exceeding a threshold, so we need to check the word-counts
258 * even if they are 64-bit.
260 traffic_wds
= ipath_snap_cntr(dd
, dd
->ipath_cregs
->cr_wordsendcnt
) +
261 ipath_snap_cntr(dd
, dd
->ipath_cregs
->cr_wordrcvcnt
);
262 spin_lock_irqsave(&dd
->ipath_eep_st_lock
, flags
);
263 traffic_wds
-= dd
->ipath_traffic_wds
;
264 dd
->ipath_traffic_wds
+= traffic_wds
;
265 if (traffic_wds
>= IPATH_TRAFFIC_ACTIVE_THRESHOLD
)
266 atomic_add(5, &dd
->ipath_active_time
); /* S/B #define */
267 spin_unlock_irqrestore(&dd
->ipath_eep_st_lock
, flags
);
269 if (dd
->ipath_flags
& IPATH_32BITCOUNTERS
) {
270 ipath_snap_cntr(dd
, dd
->ipath_cregs
->cr_pktsendcnt
);
271 ipath_snap_cntr(dd
, dd
->ipath_cregs
->cr_pktrcvcnt
);
277 * deal with repeat error suppression. Doesn't really matter if
278 * last error was almost a full interval ago, or just a few usecs
279 * ago; still won't get more than 2 per interval. We may want
280 * longer intervals for this eventually, could do with mod, counter
281 * or separate timer. Also see code in ipath_handle_errors() and
282 * ipath_handle_hwerrors().
285 if (dd
->ipath_lasterror
)
286 dd
->ipath_lasterror
= 0;
287 if (dd
->ipath_lasthwerror
)
288 dd
->ipath_lasthwerror
= 0;
289 if (dd
->ipath_maskederrs
290 && time_after(jiffies
, dd
->ipath_unmasktime
)) {
293 iserr
= ipath_decode_err(ebuf
, sizeof ebuf
,
294 dd
->ipath_maskederrs
);
295 if (dd
->ipath_maskederrs
&
296 ~(INFINIPATH_E_RRCVEGRFULL
| INFINIPATH_E_RRCVHDRFULL
|
297 INFINIPATH_E_PKTERRS
))
298 ipath_dev_err(dd
, "Re-enabling masked errors "
302 * rcvegrfull and rcvhdrqfull are "normal", for some
303 * types of processes (mostly benchmarks) that send
304 * huge numbers of messages, while not processing
305 * them. So only complain about these at debug
309 ipath_dbg("Re-enabling queue full errors (%s)\n",
312 ipath_cdbg(ERRPKT
, "Re-enabling packet"
313 " problem interrupt (%s)\n", ebuf
);
316 /* re-enable masked errors */
317 dd
->ipath_errormask
|= dd
->ipath_maskederrs
;
318 ipath_write_kreg(dd
, dd
->ipath_kregs
->kr_errormask
,
319 dd
->ipath_errormask
);
320 dd
->ipath_maskederrs
= 0;
323 /* limit qfull messages to ~one per minute per port */
324 if ((++cnt
& 0x10)) {
325 for (i
= (int) dd
->ipath_cfgports
; --i
>= 0; ) {
326 struct ipath_portdata
*pd
= dd
->ipath_pd
[i
];
328 if (pd
&& pd
->port_lastrcvhdrqtail
!= -1)
329 pd
->port_lastrcvhdrqtail
= -1;
333 ipath_chk_errormask(dd
);
335 mod_timer(&dd
->ipath_stats_timer
, jiffies
+ HZ
* 5);