2 * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved.
3 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include "ipath_kernel.h"
36 struct infinipath_stats ipath_stats;
39 * ipath_snap_cntr - snapshot a chip counter
40 * @dd: the infinipath device
41 * @creg: the counter to snapshot
43 * called from add_timer and user counter read calls, to deal with
44 * counters that wrap in "human time". The words sent and received, and
45 * the packets sent and received are all that we worry about. For now,
46 * at least, we don't worry about error counters, because if they wrap
47 * that quickly, we probably don't care. We may eventually just make this
48 * handle all the counters. word counters can wrap in about 20 seconds
49 * of full bandwidth traffic, packet counters in a few hours.
52 u64 ipath_snap_cntr(struct ipath_devdata *dd, ipath_creg creg)
61 /* If fast increment counters are only 32 bits, snapshot them,
62 * and maintain them as 64bit values in the driver */
63 if (!(dd->ipath_flags & IPATH_32BITCOUNTERS) &&
64 (creg == dd->ipath_cregs->cr_wordsendcnt ||
65 creg == dd->ipath_cregs->cr_wordrcvcnt ||
66 creg == dd->ipath_cregs->cr_pktsendcnt ||
67 creg == dd->ipath_cregs->cr_pktrcvcnt)) {
68 val64 = ipath_read_creg(dd, creg);
69 val = val64 == ~0ULL ? ~0U : 0;
71 } else /* val64 just to keep gcc quiet... */
72 val64 = val = ipath_read_creg32(dd, creg);
74 * See if a second has passed. This is just a way to detect things
75 * that are quite broken. Normally this should take just a few
76 * cycles (the check is for long enough that we don't care if we get
77 * pre-empted.) An Opteron HT O read timeout is 4 seconds with
81 if (time_before(t0 + HZ, t1) && val == -1) {
82 ipath_dev_err(dd, "Error! Read counter 0x%x timed out\n",
92 if (creg == dd->ipath_cregs->cr_wordsendcnt) {
93 if (val != dd->ipath_lastsword) {
94 dd->ipath_sword += val - dd->ipath_lastsword;
95 spin_lock_irqsave(&dd->ipath_eep_st_lock, flags);
96 dd->ipath_traffic_wds += val - dd->ipath_lastsword;
97 spin_unlock_irqrestore(&dd->ipath_eep_st_lock, flags);
98 dd->ipath_lastsword = val;
100 val64 = dd->ipath_sword;
101 } else if (creg == dd->ipath_cregs->cr_wordrcvcnt) {
102 if (val != dd->ipath_lastrword) {
103 dd->ipath_rword += val - dd->ipath_lastrword;
104 spin_lock_irqsave(&dd->ipath_eep_st_lock, flags);
105 dd->ipath_traffic_wds += val - dd->ipath_lastrword;
106 spin_unlock_irqrestore(&dd->ipath_eep_st_lock, flags);
107 dd->ipath_lastrword = val;
109 val64 = dd->ipath_rword;
110 } else if (creg == dd->ipath_cregs->cr_pktsendcnt) {
111 if (val != dd->ipath_lastspkts) {
112 dd->ipath_spkts += val - dd->ipath_lastspkts;
113 dd->ipath_lastspkts = val;
115 val64 = dd->ipath_spkts;
116 } else if (creg == dd->ipath_cregs->cr_pktrcvcnt) {
117 if (val != dd->ipath_lastrpkts) {
118 dd->ipath_rpkts += val - dd->ipath_lastrpkts;
119 dd->ipath_lastrpkts = val;
121 val64 = dd->ipath_rpkts;
132 * ipath_qcheck - print delta of egrfull/hdrqfull errors for kernel ports
133 * @dd: the infinipath device
135 * print the delta of egrfull/hdrqfull errors for kernel ports no more than
136 * every 5 seconds. User processes are printed at close, but kernel doesn't
137 * close, so... Separate routine so may call from other places someday, and
138 * so function name when printed by _IPATH_INFO is meaningfull
140 static void ipath_qcheck(struct ipath_devdata *dd)
142 static u64 last_tot_hdrqfull;
147 if (dd->ipath_pd[0]->port_hdrqfull != dd->ipath_p0_hdrqfull) {
148 blen = snprintf(buf, sizeof buf, "port 0 hdrqfull %u",
149 dd->ipath_pd[0]->port_hdrqfull -
150 dd->ipath_p0_hdrqfull);
151 dd->ipath_p0_hdrqfull = dd->ipath_pd[0]->port_hdrqfull;
153 if (ipath_stats.sps_etidfull != dd->ipath_last_tidfull) {
154 blen += snprintf(buf + blen, sizeof buf - blen,
158 (ipath_stats.sps_etidfull -
159 dd->ipath_last_tidfull));
160 dd->ipath_last_tidfull = ipath_stats.sps_etidfull;
164 * this is actually the number of hdrq full interrupts, not actual
165 * events, but at the moment that's mostly what I'm interested in.
166 * Actual count, etc. is in the counters, if needed. For production
167 * users this won't ordinarily be printed.
170 if ((ipath_debug & (__IPATH_PKTDBG | __IPATH_DBG)) &&
171 ipath_stats.sps_hdrqfull != last_tot_hdrqfull) {
172 blen += snprintf(buf + blen, sizeof buf - blen,
173 "%shdrqfull %llu (all ports)",
176 (ipath_stats.sps_hdrqfull -
178 last_tot_hdrqfull = ipath_stats.sps_hdrqfull;
181 ipath_dbg("%s\n", buf);
183 if (dd->ipath_port0head != (u32)
184 le64_to_cpu(*dd->ipath_hdrqtailptr)) {
185 if (dd->ipath_lastport0rcv_cnt ==
186 ipath_stats.sps_port0pkts) {
187 ipath_cdbg(PKT, "missing rcv interrupts? "
188 "port0 hd=%llx tl=%x; port0pkts %llx\n",
190 le64_to_cpu(*dd->ipath_hdrqtailptr),
193 ipath_stats.sps_port0pkts);
195 dd->ipath_lastport0rcv_cnt = ipath_stats.sps_port0pkts;
199 static void ipath_chk_errormask(struct ipath_devdata *dd)
203 unsigned long errormask;
204 unsigned long hwerrs;
206 if (!dd->ipath_errormask || !(dd->ipath_flags & IPATH_INITTED))
209 errormask = ipath_read_kreg64(dd, dd->ipath_kregs->kr_errormask);
211 if (errormask == dd->ipath_errormask)
215 hwerrs = ipath_read_kreg64(dd, dd->ipath_kregs->kr_hwerrstatus);
216 ctrl = ipath_read_kreg32(dd, dd->ipath_kregs->kr_control);
218 ipath_write_kreg(dd, dd->ipath_kregs->kr_errormask,
219 dd->ipath_errormask);
221 if ((hwerrs & dd->ipath_hwerrmask) ||
222 (ctrl & INFINIPATH_C_FREEZEMODE)) {
223 /* force re-interrupt of pending events, just in case */
224 ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrclear, 0ULL);
225 ipath_write_kreg(dd, dd->ipath_kregs->kr_errorclear, 0ULL);
226 ipath_write_kreg(dd, dd->ipath_kregs->kr_intclear, 0ULL);
227 dev_info(&dd->pcidev->dev,
228 "errormask fixed(%u) %lx -> %lx, ctrl %x hwerr %lx\n",
229 fixed, errormask, (unsigned long)dd->ipath_errormask,
232 ipath_dbg("errormask fixed(%u) %lx -> %lx, no freeze\n",
234 (unsigned long)dd->ipath_errormask);
239 * ipath_get_faststats - get word counters from chip before they overflow
240 * @opaque - contains a pointer to the infinipath device ipath_devdata
242 * called from add_timer
244 void ipath_get_faststats(unsigned long opaque)
246 struct ipath_devdata *dd = (struct ipath_devdata *) opaque;
252 * don't access the chip while running diags, or memory diags can
255 if (!dd->ipath_kregbase || !(dd->ipath_flags & IPATH_INITTED) ||
257 /* but re-arm the timer, for diags case; won't hurt other */
261 * We now try to maintain a "active timer", based on traffic
262 * exceeding a threshold, so we need to check the word-counts
263 * even if they are 64-bit.
265 ipath_snap_cntr(dd, dd->ipath_cregs->cr_wordsendcnt);
266 ipath_snap_cntr(dd, dd->ipath_cregs->cr_wordrcvcnt);
267 spin_lock_irqsave(&dd->ipath_eep_st_lock, flags);
268 if (dd->ipath_traffic_wds >= IPATH_TRAFFIC_ACTIVE_THRESHOLD)
269 atomic_add(5, &dd->ipath_active_time); /* S/B #define */
270 dd->ipath_traffic_wds = 0;
271 spin_unlock_irqrestore(&dd->ipath_eep_st_lock, flags);
273 if (dd->ipath_flags & IPATH_32BITCOUNTERS) {
274 ipath_snap_cntr(dd, dd->ipath_cregs->cr_pktsendcnt);
275 ipath_snap_cntr(dd, dd->ipath_cregs->cr_pktrcvcnt);
281 * deal with repeat error suppression. Doesn't really matter if
282 * last error was almost a full interval ago, or just a few usecs
283 * ago; still won't get more than 2 per interval. We may want
284 * longer intervals for this eventually, could do with mod, counter
285 * or separate timer. Also see code in ipath_handle_errors() and
286 * ipath_handle_hwerrors().
289 if (dd->ipath_lasterror)
290 dd->ipath_lasterror = 0;
291 if (dd->ipath_lasthwerror)
292 dd->ipath_lasthwerror = 0;
293 if (dd->ipath_maskederrs
294 && time_after(jiffies, dd->ipath_unmasktime)) {
297 iserr = ipath_decode_err(ebuf, sizeof ebuf,
298 dd->ipath_maskederrs);
299 if (dd->ipath_maskederrs &
300 ~(INFINIPATH_E_RRCVEGRFULL | INFINIPATH_E_RRCVHDRFULL |
301 INFINIPATH_E_PKTERRS ))
302 ipath_dev_err(dd, "Re-enabling masked errors "
306 * rcvegrfull and rcvhdrqfull are "normal", for some
307 * types of processes (mostly benchmarks) that send
308 * huge numbers of messages, while not processing
309 * them. So only complain about these at debug
313 ipath_dbg("Re-enabling queue full errors (%s)\n",
316 ipath_cdbg(ERRPKT, "Re-enabling packet"
317 " problem interrupt (%s)\n", ebuf);
320 /* re-enable masked errors */
321 dd->ipath_errormask |= dd->ipath_maskederrs;
322 ipath_write_kreg(dd, dd->ipath_kregs->kr_errormask,
323 dd->ipath_errormask);
324 dd->ipath_maskederrs = 0;
327 /* limit qfull messages to ~one per minute per port */
328 if ((++cnt & 0x10)) {
329 for (val = dd->ipath_cfgports - 1; ((int)val) >= 0;
331 if (dd->ipath_lastegrheads[val] != -1)
332 dd->ipath_lastegrheads[val] = -1;
333 if (dd->ipath_lastrcvhdrqtails[val] != -1)
334 dd->ipath_lastrcvhdrqtails[val] = -1;
338 ipath_chk_errormask(dd);
340 mod_timer(&dd->ipath_stats_timer, jiffies + HZ * 5);