fs: use kmem_cache_zalloc instead
[pv_ops_mirror.git] / drivers / infiniband / hw / ipath / ipath_stats.c
blobf0271415cd5b19558f7fae74baaef2330a96490d
1 /*
2 * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved.
3 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
34 #include "ipath_kernel.h"
36 struct infinipath_stats ipath_stats;
38 /**
39 * ipath_snap_cntr - snapshot a chip counter
40 * @dd: the infinipath device
41 * @creg: the counter to snapshot
43 * called from add_timer and user counter read calls, to deal with
44 * counters that wrap in "human time". The words sent and received, and
45 * the packets sent and received are all that we worry about. For now,
46 * at least, we don't worry about error counters, because if they wrap
47 * that quickly, we probably don't care. We may eventually just make this
48 * handle all the counters. word counters can wrap in about 20 seconds
49 * of full bandwidth traffic, packet counters in a few hours.
52 u64 ipath_snap_cntr(struct ipath_devdata *dd, ipath_creg creg)
54 u32 val, reg64 = 0;
55 u64 val64;
56 unsigned long t0, t1;
57 u64 ret;
59 t0 = jiffies;
60 /* If fast increment counters are only 32 bits, snapshot them,
61 * and maintain them as 64bit values in the driver */
62 if (!(dd->ipath_flags & IPATH_32BITCOUNTERS) &&
63 (creg == dd->ipath_cregs->cr_wordsendcnt ||
64 creg == dd->ipath_cregs->cr_wordrcvcnt ||
65 creg == dd->ipath_cregs->cr_pktsendcnt ||
66 creg == dd->ipath_cregs->cr_pktrcvcnt)) {
67 val64 = ipath_read_creg(dd, creg);
68 val = val64 == ~0ULL ? ~0U : 0;
69 reg64 = 1;
70 } else /* val64 just to keep gcc quiet... */
71 val64 = val = ipath_read_creg32(dd, creg);
73 * See if a second has passed. This is just a way to detect things
74 * that are quite broken. Normally this should take just a few
75 * cycles (the check is for long enough that we don't care if we get
76 * pre-empted.) An Opteron HT O read timeout is 4 seconds with
77 * normal NB values
79 t1 = jiffies;
80 if (time_before(t0 + HZ, t1) && val == -1) {
81 ipath_dev_err(dd, "Error! Read counter 0x%x timed out\n",
82 creg);
83 ret = 0ULL;
84 goto bail;
86 if (reg64) {
87 ret = val64;
88 goto bail;
91 if (creg == dd->ipath_cregs->cr_wordsendcnt) {
92 if (val != dd->ipath_lastsword) {
93 dd->ipath_sword += val - dd->ipath_lastsword;
94 dd->ipath_lastsword = val;
96 val64 = dd->ipath_sword;
97 } else if (creg == dd->ipath_cregs->cr_wordrcvcnt) {
98 if (val != dd->ipath_lastrword) {
99 dd->ipath_rword += val - dd->ipath_lastrword;
100 dd->ipath_lastrword = val;
102 val64 = dd->ipath_rword;
103 } else if (creg == dd->ipath_cregs->cr_pktsendcnt) {
104 if (val != dd->ipath_lastspkts) {
105 dd->ipath_spkts += val - dd->ipath_lastspkts;
106 dd->ipath_lastspkts = val;
108 val64 = dd->ipath_spkts;
109 } else if (creg == dd->ipath_cregs->cr_pktrcvcnt) {
110 if (val != dd->ipath_lastrpkts) {
111 dd->ipath_rpkts += val - dd->ipath_lastrpkts;
112 dd->ipath_lastrpkts = val;
114 val64 = dd->ipath_rpkts;
115 } else
116 val64 = (u64) val;
118 ret = val64;
120 bail:
121 return ret;
125 * ipath_qcheck - print delta of egrfull/hdrqfull errors for kernel ports
126 * @dd: the infinipath device
128 * print the delta of egrfull/hdrqfull errors for kernel ports no more than
129 * every 5 seconds. User processes are printed at close, but kernel doesn't
130 * close, so... Separate routine so may call from other places someday, and
131 * so function name when printed by _IPATH_INFO is meaningfull
133 static void ipath_qcheck(struct ipath_devdata *dd)
135 static u64 last_tot_hdrqfull;
136 size_t blen = 0;
137 char buf[128];
139 *buf = 0;
140 if (dd->ipath_pd[0]->port_hdrqfull != dd->ipath_p0_hdrqfull) {
141 blen = snprintf(buf, sizeof buf, "port 0 hdrqfull %u",
142 dd->ipath_pd[0]->port_hdrqfull -
143 dd->ipath_p0_hdrqfull);
144 dd->ipath_p0_hdrqfull = dd->ipath_pd[0]->port_hdrqfull;
146 if (ipath_stats.sps_etidfull != dd->ipath_last_tidfull) {
147 blen += snprintf(buf + blen, sizeof buf - blen,
148 "%srcvegrfull %llu",
149 blen ? ", " : "",
150 (unsigned long long)
151 (ipath_stats.sps_etidfull -
152 dd->ipath_last_tidfull));
153 dd->ipath_last_tidfull = ipath_stats.sps_etidfull;
157 * this is actually the number of hdrq full interrupts, not actual
158 * events, but at the moment that's mostly what I'm interested in.
159 * Actual count, etc. is in the counters, if needed. For production
160 * users this won't ordinarily be printed.
163 if ((ipath_debug & (__IPATH_PKTDBG | __IPATH_DBG)) &&
164 ipath_stats.sps_hdrqfull != last_tot_hdrqfull) {
165 blen += snprintf(buf + blen, sizeof buf - blen,
166 "%shdrqfull %llu (all ports)",
167 blen ? ", " : "",
168 (unsigned long long)
169 (ipath_stats.sps_hdrqfull -
170 last_tot_hdrqfull));
171 last_tot_hdrqfull = ipath_stats.sps_hdrqfull;
173 if (blen)
174 ipath_dbg("%s\n", buf);
176 if (dd->ipath_port0head != (u32)
177 le64_to_cpu(*dd->ipath_hdrqtailptr)) {
178 if (dd->ipath_lastport0rcv_cnt ==
179 ipath_stats.sps_port0pkts) {
180 ipath_cdbg(PKT, "missing rcv interrupts? "
181 "port0 hd=%llx tl=%x; port0pkts %llx\n",
182 (unsigned long long)
183 le64_to_cpu(*dd->ipath_hdrqtailptr),
184 dd->ipath_port0head,
185 (unsigned long long)
186 ipath_stats.sps_port0pkts);
188 dd->ipath_lastport0rcv_cnt = ipath_stats.sps_port0pkts;
192 static void ipath_chk_errormask(struct ipath_devdata *dd)
194 static u32 fixed;
195 u32 ctrl;
196 unsigned long errormask;
197 unsigned long hwerrs;
199 if (!dd->ipath_errormask || !(dd->ipath_flags & IPATH_INITTED))
200 return;
202 errormask = ipath_read_kreg64(dd, dd->ipath_kregs->kr_errormask);
204 if (errormask == dd->ipath_errormask)
205 return;
206 fixed++;
208 hwerrs = ipath_read_kreg64(dd, dd->ipath_kregs->kr_hwerrstatus);
209 ctrl = ipath_read_kreg32(dd, dd->ipath_kregs->kr_control);
211 ipath_write_kreg(dd, dd->ipath_kregs->kr_errormask,
212 dd->ipath_errormask);
214 if ((hwerrs & dd->ipath_hwerrmask) ||
215 (ctrl & INFINIPATH_C_FREEZEMODE)) {
216 /* force re-interrupt of pending events, just in case */
217 ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrclear, 0ULL);
218 ipath_write_kreg(dd, dd->ipath_kregs->kr_errorclear, 0ULL);
219 ipath_write_kreg(dd, dd->ipath_kregs->kr_intclear, 0ULL);
220 dev_info(&dd->pcidev->dev,
221 "errormask fixed(%u) %lx -> %lx, ctrl %x hwerr %lx\n",
222 fixed, errormask, (unsigned long)dd->ipath_errormask,
223 ctrl, hwerrs);
224 } else
225 ipath_dbg("errormask fixed(%u) %lx -> %lx, no freeze\n",
226 fixed, errormask,
227 (unsigned long)dd->ipath_errormask);
232 * ipath_get_faststats - get word counters from chip before they overflow
233 * @opaque - contains a pointer to the infinipath device ipath_devdata
235 * called from add_timer
237 void ipath_get_faststats(unsigned long opaque)
239 struct ipath_devdata *dd = (struct ipath_devdata *) opaque;
240 u32 val;
241 static unsigned cnt;
242 unsigned long flags;
243 u64 traffic_wds;
246 * don't access the chip while running diags, or memory diags can
247 * fail
249 if (!dd->ipath_kregbase || !(dd->ipath_flags & IPATH_INITTED) ||
250 ipath_diag_inuse)
251 /* but re-arm the timer, for diags case; won't hurt other */
252 goto done;
255 * We now try to maintain a "active timer", based on traffic
256 * exceeding a threshold, so we need to check the word-counts
257 * even if they are 64-bit.
259 traffic_wds = ipath_snap_cntr(dd, dd->ipath_cregs->cr_wordsendcnt) +
260 ipath_snap_cntr(dd, dd->ipath_cregs->cr_wordrcvcnt);
261 spin_lock_irqsave(&dd->ipath_eep_st_lock, flags);
262 traffic_wds -= dd->ipath_traffic_wds;
263 dd->ipath_traffic_wds += traffic_wds;
264 if (traffic_wds >= IPATH_TRAFFIC_ACTIVE_THRESHOLD)
265 atomic_add(5, &dd->ipath_active_time); /* S/B #define */
266 spin_unlock_irqrestore(&dd->ipath_eep_st_lock, flags);
268 if (dd->ipath_flags & IPATH_32BITCOUNTERS) {
269 ipath_snap_cntr(dd, dd->ipath_cregs->cr_pktsendcnt);
270 ipath_snap_cntr(dd, dd->ipath_cregs->cr_pktrcvcnt);
273 ipath_qcheck(dd);
276 * deal with repeat error suppression. Doesn't really matter if
277 * last error was almost a full interval ago, or just a few usecs
278 * ago; still won't get more than 2 per interval. We may want
279 * longer intervals for this eventually, could do with mod, counter
280 * or separate timer. Also see code in ipath_handle_errors() and
281 * ipath_handle_hwerrors().
284 if (dd->ipath_lasterror)
285 dd->ipath_lasterror = 0;
286 if (dd->ipath_lasthwerror)
287 dd->ipath_lasthwerror = 0;
288 if (dd->ipath_maskederrs
289 && time_after(jiffies, dd->ipath_unmasktime)) {
290 char ebuf[256];
291 int iserr;
292 iserr = ipath_decode_err(ebuf, sizeof ebuf,
293 dd->ipath_maskederrs);
294 if (dd->ipath_maskederrs &
295 ~(INFINIPATH_E_RRCVEGRFULL | INFINIPATH_E_RRCVHDRFULL |
296 INFINIPATH_E_PKTERRS ))
297 ipath_dev_err(dd, "Re-enabling masked errors "
298 "(%s)\n", ebuf);
299 else {
301 * rcvegrfull and rcvhdrqfull are "normal", for some
302 * types of processes (mostly benchmarks) that send
303 * huge numbers of messages, while not processing
304 * them. So only complain about these at debug
305 * level.
307 if (iserr)
308 ipath_dbg("Re-enabling queue full errors (%s)\n",
309 ebuf);
310 else
311 ipath_cdbg(ERRPKT, "Re-enabling packet"
312 " problem interrupt (%s)\n", ebuf);
315 /* re-enable masked errors */
316 dd->ipath_errormask |= dd->ipath_maskederrs;
317 ipath_write_kreg(dd, dd->ipath_kregs->kr_errormask,
318 dd->ipath_errormask);
319 dd->ipath_maskederrs = 0;
322 /* limit qfull messages to ~one per minute per port */
323 if ((++cnt & 0x10)) {
324 for (val = dd->ipath_cfgports - 1; ((int)val) >= 0;
325 val--) {
326 if (dd->ipath_lastegrheads[val] != -1)
327 dd->ipath_lastegrheads[val] = -1;
328 if (dd->ipath_lastrcvhdrqtails[val] != -1)
329 dd->ipath_lastrcvhdrqtails[val] = -1;
333 ipath_chk_errormask(dd);
334 done:
335 mod_timer(&dd->ipath_stats_timer, jiffies + HZ * 5);