2 * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved.
3 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 * This file contains all of the code that is specific to the
35 * InfiniPath PCIe chip.
38 #include <linux/interrupt.h>
39 #include <linux/pci.h>
40 #include <linux/delay.h>
43 #include "ipath_kernel.h"
44 #include "ipath_registers.h"
46 static void ipath_setup_pe_setextled(struct ipath_devdata
*, u64
, u64
);
49 * This file contains all the chip-specific register information and
50 * access functions for the QLogic InfiniPath PCI-Express chip.
52 * This lists the InfiniPath registers, in the actual chip layout.
53 * This structure should never be directly accessed.
55 struct _infinipath_do_not_use_kernel_regs
{
56 unsigned long long Revision
;
57 unsigned long long Control
;
58 unsigned long long PageAlign
;
59 unsigned long long PortCnt
;
60 unsigned long long DebugPortSelect
;
61 unsigned long long Reserved0
;
62 unsigned long long SendRegBase
;
63 unsigned long long UserRegBase
;
64 unsigned long long CounterRegBase
;
65 unsigned long long Scratch
;
66 unsigned long long Reserved1
;
67 unsigned long long Reserved2
;
68 unsigned long long IntBlocked
;
69 unsigned long long IntMask
;
70 unsigned long long IntStatus
;
71 unsigned long long IntClear
;
72 unsigned long long ErrorMask
;
73 unsigned long long ErrorStatus
;
74 unsigned long long ErrorClear
;
75 unsigned long long HwErrMask
;
76 unsigned long long HwErrStatus
;
77 unsigned long long HwErrClear
;
78 unsigned long long HwDiagCtrl
;
79 unsigned long long MDIO
;
80 unsigned long long IBCStatus
;
81 unsigned long long IBCCtrl
;
82 unsigned long long ExtStatus
;
83 unsigned long long ExtCtrl
;
84 unsigned long long GPIOOut
;
85 unsigned long long GPIOMask
;
86 unsigned long long GPIOStatus
;
87 unsigned long long GPIOClear
;
88 unsigned long long RcvCtrl
;
89 unsigned long long RcvBTHQP
;
90 unsigned long long RcvHdrSize
;
91 unsigned long long RcvHdrCnt
;
92 unsigned long long RcvHdrEntSize
;
93 unsigned long long RcvTIDBase
;
94 unsigned long long RcvTIDCnt
;
95 unsigned long long RcvEgrBase
;
96 unsigned long long RcvEgrCnt
;
97 unsigned long long RcvBufBase
;
98 unsigned long long RcvBufSize
;
99 unsigned long long RxIntMemBase
;
100 unsigned long long RxIntMemSize
;
101 unsigned long long RcvPartitionKey
;
102 unsigned long long Reserved3
;
103 unsigned long long RcvPktLEDCnt
;
104 unsigned long long Reserved4
[8];
105 unsigned long long SendCtrl
;
106 unsigned long long SendPIOBufBase
;
107 unsigned long long SendPIOSize
;
108 unsigned long long SendPIOBufCnt
;
109 unsigned long long SendPIOAvailAddr
;
110 unsigned long long TxIntMemBase
;
111 unsigned long long TxIntMemSize
;
112 unsigned long long Reserved5
;
113 unsigned long long PCIeRBufTestReg0
;
114 unsigned long long PCIeRBufTestReg1
;
115 unsigned long long Reserved51
[6];
116 unsigned long long SendBufferError
;
117 unsigned long long SendBufferErrorCONT1
;
118 unsigned long long Reserved6SBE
[6];
119 unsigned long long RcvHdrAddr0
;
120 unsigned long long RcvHdrAddr1
;
121 unsigned long long RcvHdrAddr2
;
122 unsigned long long RcvHdrAddr3
;
123 unsigned long long RcvHdrAddr4
;
124 unsigned long long Reserved7RHA
[11];
125 unsigned long long RcvHdrTailAddr0
;
126 unsigned long long RcvHdrTailAddr1
;
127 unsigned long long RcvHdrTailAddr2
;
128 unsigned long long RcvHdrTailAddr3
;
129 unsigned long long RcvHdrTailAddr4
;
130 unsigned long long Reserved8RHTA
[11];
131 unsigned long long Reserved9SW
[8];
132 unsigned long long SerdesConfig0
;
133 unsigned long long SerdesConfig1
;
134 unsigned long long SerdesStatus
;
135 unsigned long long XGXSConfig
;
136 unsigned long long IBPLLCfg
;
137 unsigned long long Reserved10SW2
[3];
138 unsigned long long PCIEQ0SerdesConfig0
;
139 unsigned long long PCIEQ0SerdesConfig1
;
140 unsigned long long PCIEQ0SerdesStatus
;
141 unsigned long long Reserved11
;
142 unsigned long long PCIEQ1SerdesConfig0
;
143 unsigned long long PCIEQ1SerdesConfig1
;
144 unsigned long long PCIEQ1SerdesStatus
;
145 unsigned long long Reserved12
;
148 struct _infinipath_do_not_use_counters
{
150 __u64 LBFlowStallCnt
;
152 __u64 TxUnsupVLErrCnt
;
157 __u64 TxMaxMinLenErrCnt
;
159 __u64 TxFlowStallCnt
;
160 __u64 TxDroppedPktCnt
;
161 __u64 RxDroppedPktCnt
;
166 __u64 RxMaxMinLenErrCnt
;
169 __u64 RxFlowCtrlErrCnt
;
170 __u64 RxBadFormatCnt
;
171 __u64 RxLinkProblemCnt
;
175 __u64 RxTIDFullErrCnt
;
176 __u64 RxTIDValidErrCnt
;
177 __u64 RxPKeyMismatchCnt
;
178 __u64 RxP0HdrEgrOvflCnt
;
179 __u64 RxP1HdrEgrOvflCnt
;
180 __u64 RxP2HdrEgrOvflCnt
;
181 __u64 RxP3HdrEgrOvflCnt
;
182 __u64 RxP4HdrEgrOvflCnt
;
183 __u64 RxP5HdrEgrOvflCnt
;
184 __u64 RxP6HdrEgrOvflCnt
;
185 __u64 RxP7HdrEgrOvflCnt
;
186 __u64 RxP8HdrEgrOvflCnt
;
189 __u64 IBStatusChangeCnt
;
190 __u64 IBLinkErrRecoveryCnt
;
191 __u64 IBLinkDownedCnt
;
192 __u64 IBSymbolErrCnt
;
195 #define IPATH_KREG_OFFSET(field) (offsetof( \
196 struct _infinipath_do_not_use_kernel_regs, field) / sizeof(u64))
197 #define IPATH_CREG_OFFSET(field) (offsetof( \
198 struct _infinipath_do_not_use_counters, field) / sizeof(u64))
200 static const struct ipath_kregs ipath_pe_kregs
= {
201 .kr_control
= IPATH_KREG_OFFSET(Control
),
202 .kr_counterregbase
= IPATH_KREG_OFFSET(CounterRegBase
),
203 .kr_debugportselect
= IPATH_KREG_OFFSET(DebugPortSelect
),
204 .kr_errorclear
= IPATH_KREG_OFFSET(ErrorClear
),
205 .kr_errormask
= IPATH_KREG_OFFSET(ErrorMask
),
206 .kr_errorstatus
= IPATH_KREG_OFFSET(ErrorStatus
),
207 .kr_extctrl
= IPATH_KREG_OFFSET(ExtCtrl
),
208 .kr_extstatus
= IPATH_KREG_OFFSET(ExtStatus
),
209 .kr_gpio_clear
= IPATH_KREG_OFFSET(GPIOClear
),
210 .kr_gpio_mask
= IPATH_KREG_OFFSET(GPIOMask
),
211 .kr_gpio_out
= IPATH_KREG_OFFSET(GPIOOut
),
212 .kr_gpio_status
= IPATH_KREG_OFFSET(GPIOStatus
),
213 .kr_hwdiagctrl
= IPATH_KREG_OFFSET(HwDiagCtrl
),
214 .kr_hwerrclear
= IPATH_KREG_OFFSET(HwErrClear
),
215 .kr_hwerrmask
= IPATH_KREG_OFFSET(HwErrMask
),
216 .kr_hwerrstatus
= IPATH_KREG_OFFSET(HwErrStatus
),
217 .kr_ibcctrl
= IPATH_KREG_OFFSET(IBCCtrl
),
218 .kr_ibcstatus
= IPATH_KREG_OFFSET(IBCStatus
),
219 .kr_intblocked
= IPATH_KREG_OFFSET(IntBlocked
),
220 .kr_intclear
= IPATH_KREG_OFFSET(IntClear
),
221 .kr_intmask
= IPATH_KREG_OFFSET(IntMask
),
222 .kr_intstatus
= IPATH_KREG_OFFSET(IntStatus
),
223 .kr_mdio
= IPATH_KREG_OFFSET(MDIO
),
224 .kr_pagealign
= IPATH_KREG_OFFSET(PageAlign
),
225 .kr_partitionkey
= IPATH_KREG_OFFSET(RcvPartitionKey
),
226 .kr_portcnt
= IPATH_KREG_OFFSET(PortCnt
),
227 .kr_rcvbthqp
= IPATH_KREG_OFFSET(RcvBTHQP
),
228 .kr_rcvbufbase
= IPATH_KREG_OFFSET(RcvBufBase
),
229 .kr_rcvbufsize
= IPATH_KREG_OFFSET(RcvBufSize
),
230 .kr_rcvctrl
= IPATH_KREG_OFFSET(RcvCtrl
),
231 .kr_rcvegrbase
= IPATH_KREG_OFFSET(RcvEgrBase
),
232 .kr_rcvegrcnt
= IPATH_KREG_OFFSET(RcvEgrCnt
),
233 .kr_rcvhdrcnt
= IPATH_KREG_OFFSET(RcvHdrCnt
),
234 .kr_rcvhdrentsize
= IPATH_KREG_OFFSET(RcvHdrEntSize
),
235 .kr_rcvhdrsize
= IPATH_KREG_OFFSET(RcvHdrSize
),
236 .kr_rcvintmembase
= IPATH_KREG_OFFSET(RxIntMemBase
),
237 .kr_rcvintmemsize
= IPATH_KREG_OFFSET(RxIntMemSize
),
238 .kr_rcvtidbase
= IPATH_KREG_OFFSET(RcvTIDBase
),
239 .kr_rcvtidcnt
= IPATH_KREG_OFFSET(RcvTIDCnt
),
240 .kr_revision
= IPATH_KREG_OFFSET(Revision
),
241 .kr_scratch
= IPATH_KREG_OFFSET(Scratch
),
242 .kr_sendbuffererror
= IPATH_KREG_OFFSET(SendBufferError
),
243 .kr_sendctrl
= IPATH_KREG_OFFSET(SendCtrl
),
244 .kr_sendpioavailaddr
= IPATH_KREG_OFFSET(SendPIOAvailAddr
),
245 .kr_sendpiobufbase
= IPATH_KREG_OFFSET(SendPIOBufBase
),
246 .kr_sendpiobufcnt
= IPATH_KREG_OFFSET(SendPIOBufCnt
),
247 .kr_sendpiosize
= IPATH_KREG_OFFSET(SendPIOSize
),
248 .kr_sendregbase
= IPATH_KREG_OFFSET(SendRegBase
),
249 .kr_txintmembase
= IPATH_KREG_OFFSET(TxIntMemBase
),
250 .kr_txintmemsize
= IPATH_KREG_OFFSET(TxIntMemSize
),
251 .kr_userregbase
= IPATH_KREG_OFFSET(UserRegBase
),
252 .kr_serdesconfig0
= IPATH_KREG_OFFSET(SerdesConfig0
),
253 .kr_serdesconfig1
= IPATH_KREG_OFFSET(SerdesConfig1
),
254 .kr_serdesstatus
= IPATH_KREG_OFFSET(SerdesStatus
),
255 .kr_xgxsconfig
= IPATH_KREG_OFFSET(XGXSConfig
),
256 .kr_ibpllcfg
= IPATH_KREG_OFFSET(IBPLLCfg
),
259 * These should not be used directly via ipath_write_kreg64(),
260 * use them with ipath_write_kreg64_port(),
262 .kr_rcvhdraddr
= IPATH_KREG_OFFSET(RcvHdrAddr0
),
263 .kr_rcvhdrtailaddr
= IPATH_KREG_OFFSET(RcvHdrTailAddr0
),
265 /* The rcvpktled register controls one of the debug port signals, so
266 * a packet activity LED can be connected to it. */
267 .kr_rcvpktledcnt
= IPATH_KREG_OFFSET(RcvPktLEDCnt
),
268 .kr_pcierbuftestreg0
= IPATH_KREG_OFFSET(PCIeRBufTestReg0
),
269 .kr_pcierbuftestreg1
= IPATH_KREG_OFFSET(PCIeRBufTestReg1
),
270 .kr_pcieq0serdesconfig0
= IPATH_KREG_OFFSET(PCIEQ0SerdesConfig0
),
271 .kr_pcieq0serdesconfig1
= IPATH_KREG_OFFSET(PCIEQ0SerdesConfig1
),
272 .kr_pcieq0serdesstatus
= IPATH_KREG_OFFSET(PCIEQ0SerdesStatus
),
273 .kr_pcieq1serdesconfig0
= IPATH_KREG_OFFSET(PCIEQ1SerdesConfig0
),
274 .kr_pcieq1serdesconfig1
= IPATH_KREG_OFFSET(PCIEQ1SerdesConfig1
),
275 .kr_pcieq1serdesstatus
= IPATH_KREG_OFFSET(PCIEQ1SerdesStatus
)
278 static const struct ipath_cregs ipath_pe_cregs
= {
279 .cr_badformatcnt
= IPATH_CREG_OFFSET(RxBadFormatCnt
),
280 .cr_erricrccnt
= IPATH_CREG_OFFSET(RxICRCErrCnt
),
281 .cr_errlinkcnt
= IPATH_CREG_OFFSET(RxLinkProblemCnt
),
282 .cr_errlpcrccnt
= IPATH_CREG_OFFSET(RxLPCRCErrCnt
),
283 .cr_errpkey
= IPATH_CREG_OFFSET(RxPKeyMismatchCnt
),
284 .cr_errrcvflowctrlcnt
= IPATH_CREG_OFFSET(RxFlowCtrlErrCnt
),
285 .cr_err_rlencnt
= IPATH_CREG_OFFSET(RxLenErrCnt
),
286 .cr_errslencnt
= IPATH_CREG_OFFSET(TxLenErrCnt
),
287 .cr_errtidfull
= IPATH_CREG_OFFSET(RxTIDFullErrCnt
),
288 .cr_errtidvalid
= IPATH_CREG_OFFSET(RxTIDValidErrCnt
),
289 .cr_errvcrccnt
= IPATH_CREG_OFFSET(RxVCRCErrCnt
),
290 .cr_ibstatuschange
= IPATH_CREG_OFFSET(IBStatusChangeCnt
),
291 .cr_intcnt
= IPATH_CREG_OFFSET(LBIntCnt
),
292 .cr_invalidrlencnt
= IPATH_CREG_OFFSET(RxMaxMinLenErrCnt
),
293 .cr_invalidslencnt
= IPATH_CREG_OFFSET(TxMaxMinLenErrCnt
),
294 .cr_lbflowstallcnt
= IPATH_CREG_OFFSET(LBFlowStallCnt
),
295 .cr_pktrcvcnt
= IPATH_CREG_OFFSET(RxDataPktCnt
),
296 .cr_pktrcvflowctrlcnt
= IPATH_CREG_OFFSET(RxFlowPktCnt
),
297 .cr_pktsendcnt
= IPATH_CREG_OFFSET(TxDataPktCnt
),
298 .cr_pktsendflowcnt
= IPATH_CREG_OFFSET(TxFlowPktCnt
),
299 .cr_portovflcnt
= IPATH_CREG_OFFSET(RxP0HdrEgrOvflCnt
),
300 .cr_rcvebpcnt
= IPATH_CREG_OFFSET(RxEBPCnt
),
301 .cr_rcvovflcnt
= IPATH_CREG_OFFSET(RxBufOvflCnt
),
302 .cr_senddropped
= IPATH_CREG_OFFSET(TxDroppedPktCnt
),
303 .cr_sendstallcnt
= IPATH_CREG_OFFSET(TxFlowStallCnt
),
304 .cr_sendunderruncnt
= IPATH_CREG_OFFSET(TxUnderrunCnt
),
305 .cr_wordrcvcnt
= IPATH_CREG_OFFSET(RxDwordCnt
),
306 .cr_wordsendcnt
= IPATH_CREG_OFFSET(TxDwordCnt
),
307 .cr_unsupvlcnt
= IPATH_CREG_OFFSET(TxUnsupVLErrCnt
),
308 .cr_rxdroppktcnt
= IPATH_CREG_OFFSET(RxDroppedPktCnt
),
309 .cr_iblinkerrrecovcnt
= IPATH_CREG_OFFSET(IBLinkErrRecoveryCnt
),
310 .cr_iblinkdowncnt
= IPATH_CREG_OFFSET(IBLinkDownedCnt
),
311 .cr_ibsymbolerrcnt
= IPATH_CREG_OFFSET(IBSymbolErrCnt
)
314 /* kr_intstatus, kr_intclear, kr_intmask bits */
315 #define INFINIPATH_I_RCVURG_MASK ((1U<<5)-1)
316 #define INFINIPATH_I_RCVAVAIL_MASK ((1U<<5)-1)
318 /* kr_hwerrclear, kr_hwerrmask, kr_hwerrstatus, bits */
319 #define INFINIPATH_HWE_PCIEMEMPARITYERR_MASK 0x000000000000003fULL
320 #define INFINIPATH_HWE_PCIEMEMPARITYERR_SHIFT 0
321 #define INFINIPATH_HWE_PCIEPOISONEDTLP 0x0000000010000000ULL
322 #define INFINIPATH_HWE_PCIECPLTIMEOUT 0x0000000020000000ULL
323 #define INFINIPATH_HWE_PCIEBUSPARITYXTLH 0x0000000040000000ULL
324 #define INFINIPATH_HWE_PCIEBUSPARITYXADM 0x0000000080000000ULL
325 #define INFINIPATH_HWE_PCIEBUSPARITYRADM 0x0000000100000000ULL
326 #define INFINIPATH_HWE_COREPLL_FBSLIP 0x0080000000000000ULL
327 #define INFINIPATH_HWE_COREPLL_RFSLIP 0x0100000000000000ULL
328 #define INFINIPATH_HWE_PCIE1PLLFAILED 0x0400000000000000ULL
329 #define INFINIPATH_HWE_PCIE0PLLFAILED 0x0800000000000000ULL
330 #define INFINIPATH_HWE_SERDESPLLFAILED 0x1000000000000000ULL
332 #define IBA6120_IBCS_LINKTRAININGSTATE_MASK 0xf
333 #define IBA6120_IBCS_LINKSTATE_SHIFT 4
335 /* kr_extstatus bits */
336 #define INFINIPATH_EXTS_FREQSEL 0x2
337 #define INFINIPATH_EXTS_SERDESSEL 0x4
338 #define INFINIPATH_EXTS_MEMBIST_ENDTEST 0x0000000000004000
339 #define INFINIPATH_EXTS_MEMBIST_FOUND 0x0000000000008000
341 #define _IPATH_GPIO_SDA_NUM 1
342 #define _IPATH_GPIO_SCL_NUM 0
344 #define IPATH_GPIO_SDA (1ULL << \
345 (_IPATH_GPIO_SDA_NUM+INFINIPATH_EXTC_GPIOOE_SHIFT))
346 #define IPATH_GPIO_SCL (1ULL << \
347 (_IPATH_GPIO_SCL_NUM+INFINIPATH_EXTC_GPIOOE_SHIFT))
349 #define INFINIPATH_R_INTRAVAIL_SHIFT 16
350 #define INFINIPATH_R_TAILUPD_SHIFT 31
352 /* 6120 specific hardware errors... */
353 static const struct ipath_hwerror_msgs ipath_6120_hwerror_msgs
[] = {
354 INFINIPATH_HWE_MSG(PCIEPOISONEDTLP
, "PCIe Poisoned TLP"),
355 INFINIPATH_HWE_MSG(PCIECPLTIMEOUT
, "PCIe completion timeout"),
357 * In practice, it's unlikely wthat we'll see PCIe PLL, or bus
358 * parity or memory parity error failures, because most likely we
359 * won't be able to talk to the core of the chip. Nonetheless, we
360 * might see them, if they are in parts of the PCIe core that aren't
363 INFINIPATH_HWE_MSG(PCIE1PLLFAILED
, "PCIePLL1"),
364 INFINIPATH_HWE_MSG(PCIE0PLLFAILED
, "PCIePLL0"),
365 INFINIPATH_HWE_MSG(PCIEBUSPARITYXTLH
, "PCIe XTLH core parity"),
366 INFINIPATH_HWE_MSG(PCIEBUSPARITYXADM
, "PCIe ADM TX core parity"),
367 INFINIPATH_HWE_MSG(PCIEBUSPARITYRADM
, "PCIe ADM RX core parity"),
368 INFINIPATH_HWE_MSG(RXDSYNCMEMPARITYERR
, "Rx Dsync"),
369 INFINIPATH_HWE_MSG(SERDESPLLFAILED
, "SerDes PLL"),
372 #define TXE_PIO_PARITY ((INFINIPATH_HWE_TXEMEMPARITYERR_PIOBUF | \
373 INFINIPATH_HWE_TXEMEMPARITYERR_PIOPBC) \
374 << INFINIPATH_HWE_TXEMEMPARITYERR_SHIFT)
376 static void ipath_pe_put_tid_2(struct ipath_devdata
*, u64 __iomem
*,
380 * On platforms using this chip, and not having ordered WC stores, we
381 * can get TXE parity errors due to speculative reads to the PIO buffers,
382 * and this, due to a chip bug can result in (many) false parity error
383 * reports. So it's a debug print on those, and an info print on systems
384 * where the speculative reads don't occur.
386 static void ipath_pe_txe_recover(struct ipath_devdata
*dd
)
388 if (ipath_unordered_wc())
389 ipath_dbg("Recovering from TXE PIO parity error\n");
391 ++ipath_stats
.sps_txeparity
;
392 dev_info(&dd
->pcidev
->dev
,
393 "Recovering from TXE PIO parity error\n");
399 * ipath_pe_handle_hwerrors - display hardware errors.
400 * @dd: the infinipath device
401 * @msg: the output buffer
402 * @msgl: the size of the output buffer
404 * Use same msg buffer as regular errors to avoid excessive stack
405 * use. Most hardware errors are catastrophic, but for right now,
406 * we'll print them and continue. We reuse the same message buffer as
407 * ipath_handle_errors() to avoid excessive stack usage.
409 static void ipath_pe_handle_hwerrors(struct ipath_devdata
*dd
, char *msg
,
418 hwerrs
= ipath_read_kreg64(dd
, dd
->ipath_kregs
->kr_hwerrstatus
);
421 * better than printing cofusing messages
422 * This seems to be related to clearing the crc error, or
423 * the pll error during init.
425 ipath_cdbg(VERBOSE
, "Called but no hardware errors set\n");
427 } else if (hwerrs
== ~0ULL) {
428 ipath_dev_err(dd
, "Read of hardware error status failed "
429 "(all bits set); ignoring\n");
432 ipath_stats
.sps_hwerrs
++;
434 /* Always clear the error status register, except MEMBISTFAIL,
435 * regardless of whether we continue or stop using the chip.
436 * We want that set so we know it failed, even across driver reload.
437 * We'll still ignore it in the hwerrmask. We do this partly for
438 * diagnostics, but also for support */
439 ipath_write_kreg(dd
, dd
->ipath_kregs
->kr_hwerrclear
,
440 hwerrs
&~INFINIPATH_HWE_MEMBISTFAILED
);
442 hwerrs
&= dd
->ipath_hwerrmask
;
444 /* We log some errors to EEPROM, check if we have any of those. */
445 for (log_idx
= 0; log_idx
< IPATH_EEP_LOG_CNT
; ++log_idx
)
446 if (hwerrs
& dd
->ipath_eep_st_masks
[log_idx
].hwerrs_to_log
)
447 ipath_inc_eeprom_err(dd
, log_idx
, 1);
450 * make sure we get this much out, unless told to be quiet,
451 * or it's occurred within the last 5 seconds
453 if ((hwerrs
& ~(dd
->ipath_lasthwerror
|
454 ((INFINIPATH_HWE_TXEMEMPARITYERR_PIOBUF
|
455 INFINIPATH_HWE_TXEMEMPARITYERR_PIOPBC
)
456 << INFINIPATH_HWE_TXEMEMPARITYERR_SHIFT
))) ||
457 (ipath_debug
& __IPATH_VERBDBG
))
458 dev_info(&dd
->pcidev
->dev
, "Hardware error: hwerr=0x%llx "
459 "(cleared)\n", (unsigned long long) hwerrs
);
460 dd
->ipath_lasthwerror
|= hwerrs
;
462 if (hwerrs
& ~dd
->ipath_hwe_bitsextant
)
463 ipath_dev_err(dd
, "hwerror interrupt with unknown errors "
464 "%llx set\n", (unsigned long long)
465 (hwerrs
& ~dd
->ipath_hwe_bitsextant
));
467 ctrl
= ipath_read_kreg32(dd
, dd
->ipath_kregs
->kr_control
);
468 if (ctrl
& INFINIPATH_C_FREEZEMODE
) {
470 * parity errors in send memory are recoverable,
471 * just cancel the send (if indicated in * sendbuffererror),
472 * count the occurrence, unfreeze (if no other handled
473 * hardware error bits are set), and continue. They can
474 * occur if a processor speculative read is done to the PIO
475 * buffer while we are sending a packet, for example.
477 if (hwerrs
& TXE_PIO_PARITY
) {
478 ipath_pe_txe_recover(dd
);
479 hwerrs
&= ~TXE_PIO_PARITY
;
482 static u32 freeze_cnt
;
485 ipath_dbg("Clearing freezemode on ignored or recovered "
486 "hardware error (%u)\n", freeze_cnt
);
487 ipath_clear_freeze(dd
);
493 if (hwerrs
& INFINIPATH_HWE_MEMBISTFAILED
) {
494 strlcat(msg
, "[Memory BIST test failed, InfiniPath hardware unusable]",
496 /* ignore from now on, so disable until driver reloaded */
497 *dd
->ipath_statusp
|= IPATH_STATUS_HWERROR
;
498 dd
->ipath_hwerrmask
&= ~INFINIPATH_HWE_MEMBISTFAILED
;
499 ipath_write_kreg(dd
, dd
->ipath_kregs
->kr_hwerrmask
,
500 dd
->ipath_hwerrmask
);
503 ipath_format_hwerrors(hwerrs
,
504 ipath_6120_hwerror_msgs
,
505 sizeof(ipath_6120_hwerror_msgs
)/
506 sizeof(ipath_6120_hwerror_msgs
[0]),
509 if (hwerrs
& (INFINIPATH_HWE_PCIEMEMPARITYERR_MASK
510 << INFINIPATH_HWE_PCIEMEMPARITYERR_SHIFT
)) {
511 bits
= (u32
) ((hwerrs
>>
512 INFINIPATH_HWE_PCIEMEMPARITYERR_SHIFT
) &
513 INFINIPATH_HWE_PCIEMEMPARITYERR_MASK
);
514 snprintf(bitsmsg
, sizeof bitsmsg
,
515 "[PCIe Mem Parity Errs %x] ", bits
);
516 strlcat(msg
, bitsmsg
, msgl
);
519 #define _IPATH_PLL_FAIL (INFINIPATH_HWE_COREPLL_FBSLIP | \
520 INFINIPATH_HWE_COREPLL_RFSLIP )
522 if (hwerrs
& _IPATH_PLL_FAIL
) {
523 snprintf(bitsmsg
, sizeof bitsmsg
,
524 "[PLL failed (%llx), InfiniPath hardware unusable]",
525 (unsigned long long) hwerrs
& _IPATH_PLL_FAIL
);
526 strlcat(msg
, bitsmsg
, msgl
);
527 /* ignore from now on, so disable until driver reloaded */
528 dd
->ipath_hwerrmask
&= ~(hwerrs
& _IPATH_PLL_FAIL
);
529 ipath_write_kreg(dd
, dd
->ipath_kregs
->kr_hwerrmask
,
530 dd
->ipath_hwerrmask
);
533 if (hwerrs
& INFINIPATH_HWE_SERDESPLLFAILED
) {
535 * If it occurs, it is left masked since the external
536 * interface is unused
538 dd
->ipath_hwerrmask
&= ~INFINIPATH_HWE_SERDESPLLFAILED
;
539 ipath_write_kreg(dd
, dd
->ipath_kregs
->kr_hwerrmask
,
540 dd
->ipath_hwerrmask
);
544 ipath_dev_err(dd
, "%s hardware error\n", msg
);
545 if (isfatal
&& !ipath_diag_inuse
&& dd
->ipath_freezemsg
) {
547 * for /sys status file ; if no trailing } is copied, we'll
548 * know it was truncated.
550 snprintf(dd
->ipath_freezemsg
, dd
->ipath_freezelen
,
556 * ipath_pe_boardname - fill in the board name
557 * @dd: the infinipath device
558 * @name: the output buffer
559 * @namelen: the size of the output buffer
561 * info is based on the board revision register
563 static int ipath_pe_boardname(struct ipath_devdata
*dd
, char *name
,
567 u8 boardrev
= dd
->ipath_boardrev
;
572 n
= "InfiniPath_Emulation";
575 n
= "InfiniPath_QLE7140-Bringup";
578 n
= "InfiniPath_QLE7140";
581 n
= "InfiniPath_QMI7140";
584 n
= "InfiniPath_QEM7140";
587 n
= "InfiniPath_QMH7140";
590 n
= "InfiniPath_QLE7142";
594 "Don't yet know about board with ID %u\n",
596 snprintf(name
, namelen
, "Unknown_InfiniPath_PCIe_%u",
601 snprintf(name
, namelen
, "%s", n
);
603 if (dd
->ipath_majrev
!= 4 || !dd
->ipath_minrev
|| dd
->ipath_minrev
>2) {
604 ipath_dev_err(dd
, "Unsupported InfiniPath hardware revision %u.%u!\n",
605 dd
->ipath_majrev
, dd
->ipath_minrev
);
609 if (dd
->ipath_minrev
>= 2)
610 dd
->ipath_f_put_tid
= ipath_pe_put_tid_2
;
615 * set here, not in ipath_init_*_funcs because we have to do
616 * it after we can read chip registers.
618 dd
->ipath_ureg_align
=
619 ipath_read_kreg32(dd
, dd
->ipath_kregs
->kr_pagealign
);
625 * ipath_pe_init_hwerrors - enable hardware errors
626 * @dd: the infinipath device
628 * now that we have finished initializing everything that might reasonably
629 * cause a hardware error, and cleared those errors bits as they occur,
630 * we can enable hardware errors in the mask (potentially enabling
631 * freeze mode), and enable hardware errors as errors (along with
632 * everything else) in errormask
634 static void ipath_pe_init_hwerrors(struct ipath_devdata
*dd
)
639 extsval
= ipath_read_kreg64(dd
, dd
->ipath_kregs
->kr_extstatus
);
641 if (!(extsval
& INFINIPATH_EXTS_MEMBIST_ENDTEST
))
642 ipath_dev_err(dd
, "MemBIST did not complete!\n");
643 if (extsval
& INFINIPATH_EXTS_MEMBIST_FOUND
)
644 ipath_dbg("MemBIST corrected\n");
646 val
= ~0ULL; /* barring bugs, all hwerrors become interrupts, */
648 if (!dd
->ipath_boardrev
) // no PLL for Emulator
649 val
&= ~INFINIPATH_HWE_SERDESPLLFAILED
;
651 if (dd
->ipath_minrev
< 2) {
652 /* workaround bug 9460 in internal interface bus parity
653 * checking. Fixed (HW bug 9490) in Rev2.
655 val
&= ~INFINIPATH_HWE_PCIEBUSPARITYRADM
;
657 dd
->ipath_hwerrmask
= val
;
661 * ipath_pe_bringup_serdes - bring up the serdes
662 * @dd: the infinipath device
664 static int ipath_pe_bringup_serdes(struct ipath_devdata
*dd
)
666 u64 val
, config1
, prev_val
;
669 ipath_dbg("Trying to bringup serdes\n");
671 if (ipath_read_kreg64(dd
, dd
->ipath_kregs
->kr_hwerrstatus
) &
672 INFINIPATH_HWE_SERDESPLLFAILED
) {
673 ipath_dbg("At start, serdes PLL failed bit set "
674 "in hwerrstatus, clearing and continuing\n");
675 ipath_write_kreg(dd
, dd
->ipath_kregs
->kr_hwerrclear
,
676 INFINIPATH_HWE_SERDESPLLFAILED
);
679 val
= ipath_read_kreg64(dd
, dd
->ipath_kregs
->kr_serdesconfig0
);
680 config1
= ipath_read_kreg64(dd
, dd
->ipath_kregs
->kr_serdesconfig1
);
682 ipath_cdbg(VERBOSE
, "SerDes status config0=%llx config1=%llx, "
683 "xgxsconfig %llx\n", (unsigned long long) val
,
684 (unsigned long long) config1
, (unsigned long long)
685 ipath_read_kreg64(dd
, dd
->ipath_kregs
->kr_xgxsconfig
));
688 * Force reset on, also set rxdetect enable. Must do before reading
689 * serdesstatus at least for simulation, or some of the bits in
690 * serdes status will come back as undefined and cause simulation
693 val
|= INFINIPATH_SERDC0_RESET_PLL
| INFINIPATH_SERDC0_RXDETECT_EN
694 | INFINIPATH_SERDC0_L1PWR_DN
;
695 ipath_write_kreg(dd
, dd
->ipath_kregs
->kr_serdesconfig0
, val
);
696 /* be sure chip saw it */
697 ipath_read_kreg64(dd
, dd
->ipath_kregs
->kr_scratch
);
698 udelay(5); /* need pll reset set at least for a bit */
700 * after PLL is reset, set the per-lane Resets and TxIdle and
701 * clear the PLL reset and rxdetect (to get falling edge).
702 * Leave L1PWR bits set (permanently)
704 val
&= ~(INFINIPATH_SERDC0_RXDETECT_EN
| INFINIPATH_SERDC0_RESET_PLL
705 | INFINIPATH_SERDC0_L1PWR_DN
);
706 val
|= INFINIPATH_SERDC0_RESET_MASK
| INFINIPATH_SERDC0_TXIDLE
;
707 ipath_cdbg(VERBOSE
, "Clearing pll reset and setting lane resets "
708 "and txidle (%llx)\n", (unsigned long long) val
);
709 ipath_write_kreg(dd
, dd
->ipath_kregs
->kr_serdesconfig0
, val
);
710 /* be sure chip saw it */
711 ipath_read_kreg64(dd
, dd
->ipath_kregs
->kr_scratch
);
712 /* need PLL reset clear for at least 11 usec before lane
713 * resets cleared; give it a few more to be sure */
715 val
&= ~(INFINIPATH_SERDC0_RESET_MASK
| INFINIPATH_SERDC0_TXIDLE
);
717 ipath_cdbg(VERBOSE
, "Clearing lane resets and txidle "
718 "(writing %llx)\n", (unsigned long long) val
);
719 ipath_write_kreg(dd
, dd
->ipath_kregs
->kr_serdesconfig0
, val
);
720 /* be sure chip saw it */
721 val
= ipath_read_kreg64(dd
, dd
->ipath_kregs
->kr_scratch
);
723 val
= ipath_read_kreg64(dd
, dd
->ipath_kregs
->kr_xgxsconfig
);
725 if (val
& INFINIPATH_XGXS_RESET
)
726 val
&= ~INFINIPATH_XGXS_RESET
;
727 if (((val
>> INFINIPATH_XGXS_RX_POL_SHIFT
) &
728 INFINIPATH_XGXS_RX_POL_MASK
) != dd
->ipath_rx_pol_inv
) {
729 /* need to compensate for Tx inversion in partner */
730 val
&= ~(INFINIPATH_XGXS_RX_POL_MASK
<<
731 INFINIPATH_XGXS_RX_POL_SHIFT
);
732 val
|= dd
->ipath_rx_pol_inv
<<
733 INFINIPATH_XGXS_RX_POL_SHIFT
;
736 ipath_write_kreg(dd
, dd
->ipath_kregs
->kr_xgxsconfig
, val
);
738 val
= ipath_read_kreg64(dd
, dd
->ipath_kregs
->kr_serdesconfig0
);
740 /* clear current and de-emphasis bits */
741 config1
&= ~0x0ffffffff00ULL
;
742 /* set current to 20ma */
743 config1
|= 0x00000000000ULL
;
744 /* set de-emphasis to -5.68dB */
745 config1
|= 0x0cccc000000ULL
;
746 ipath_write_kreg(dd
, dd
->ipath_kregs
->kr_serdesconfig1
, config1
);
748 ipath_cdbg(VERBOSE
, "done: SerDes status config0=%llx "
749 "config1=%llx, sstatus=%llx xgxs=%llx\n",
750 (unsigned long long) val
, (unsigned long long) config1
,
752 ipath_read_kreg64(dd
, dd
->ipath_kregs
->kr_serdesstatus
),
754 ipath_read_kreg64(dd
, dd
->ipath_kregs
->kr_xgxsconfig
));
760 * ipath_pe_quiet_serdes - set serdes to txidle
761 * @dd: the infinipath device
762 * Called when driver is being unloaded
764 static void ipath_pe_quiet_serdes(struct ipath_devdata
*dd
)
766 u64 val
= ipath_read_kreg64(dd
, dd
->ipath_kregs
->kr_serdesconfig0
);
768 val
|= INFINIPATH_SERDC0_TXIDLE
;
769 ipath_dbg("Setting TxIdleEn on serdes (config0 = %llx)\n",
770 (unsigned long long) val
);
771 ipath_write_kreg(dd
, dd
->ipath_kregs
->kr_serdesconfig0
, val
);
774 static int ipath_pe_intconfig(struct ipath_devdata
*dd
)
779 * If the chip supports added error indication via GPIO pins,
780 * enable interrupts on those bits so the interrupt routine
781 * can count the events. Also set flag so interrupt routine
782 * can know they are expected.
784 chiprev
= dd
->ipath_revision
>> INFINIPATH_R_CHIPREVMINOR_SHIFT
;
785 if ((chiprev
& INFINIPATH_R_CHIPREVMINOR_MASK
) > 1) {
786 /* Rev2+ reports extra errors via internal GPIO pins */
787 dd
->ipath_flags
|= IPATH_GPIO_ERRINTRS
;
788 dd
->ipath_gpio_mask
|= IPATH_GPIO_ERRINTR_MASK
;
789 ipath_write_kreg(dd
, dd
->ipath_kregs
->kr_gpio_mask
,
790 dd
->ipath_gpio_mask
);
796 * ipath_setup_pe_setextled - set the state of the two external LEDs
797 * @dd: the infinipath device
799 * @ltst: the LT state
801 * These LEDs indicate the physical and logical state of IB link.
802 * For this chip (at least with recommended board pinouts), LED1
803 * is Yellow (logical state) and LED2 is Green (physical state),
805 * Note: We try to match the Mellanox HCA LED behavior as best
806 * we can. Green indicates physical link state is OK (something is
807 * plugged in, and we can train).
808 * Amber indicates the link is logically up (ACTIVE).
809 * Mellanox further blinks the amber LED to indicate data packet
810 * activity, but we have no hardware support for that, so it would
811 * require waking up every 10-20 msecs and checking the counters
812 * on the chip, and then turning the LED off if appropriate. That's
813 * visible overhead, so not something we will do.
816 static void ipath_setup_pe_setextled(struct ipath_devdata
*dd
, u64 lst
,
820 unsigned long flags
= 0;
822 /* the diags use the LED to indicate diag info, so we leave
823 * the external LED alone when the diags are running */
824 if (ipath_diag_inuse
)
827 /* Allow override of LED display for, e.g. Locating system in rack */
828 if (dd
->ipath_led_override
) {
829 ltst
= (dd
->ipath_led_override
& IPATH_LED_PHYS
)
830 ? INFINIPATH_IBCS_LT_STATE_LINKUP
831 : INFINIPATH_IBCS_LT_STATE_DISABLED
;
832 lst
= (dd
->ipath_led_override
& IPATH_LED_LOG
)
833 ? INFINIPATH_IBCS_L_STATE_ACTIVE
834 : INFINIPATH_IBCS_L_STATE_DOWN
;
837 spin_lock_irqsave(&dd
->ipath_gpio_lock
, flags
);
838 extctl
= dd
->ipath_extctrl
& ~(INFINIPATH_EXTC_LED1PRIPORT_ON
|
839 INFINIPATH_EXTC_LED2PRIPORT_ON
);
841 if (ltst
& INFINIPATH_IBCS_LT_STATE_LINKUP
)
842 extctl
|= INFINIPATH_EXTC_LED2PRIPORT_ON
;
843 if (lst
== INFINIPATH_IBCS_L_STATE_ACTIVE
)
844 extctl
|= INFINIPATH_EXTC_LED1PRIPORT_ON
;
845 dd
->ipath_extctrl
= extctl
;
846 ipath_write_kreg(dd
, dd
->ipath_kregs
->kr_extctrl
, extctl
);
847 spin_unlock_irqrestore(&dd
->ipath_gpio_lock
, flags
);
851 * ipath_setup_pe_cleanup - clean up any per-chip chip-specific stuff
852 * @dd: the infinipath device
854 * This is called during driver unload.
855 * We do the pci_disable_msi here, not in generic code, because it
856 * isn't used for the HT chips. If we do end up needing pci_enable_msi
857 * at some point in the future for HT, we'll move the call back
858 * into the main init_one code.
860 static void ipath_setup_pe_cleanup(struct ipath_devdata
*dd
)
862 dd
->ipath_msi_lo
= 0; /* just in case unload fails */
863 pci_disable_msi(dd
->pcidev
);
867 * ipath_setup_pe_config - setup PCIe config related stuff
868 * @dd: the infinipath device
869 * @pdev: the PCI device
871 * The pci_enable_msi() call will fail on systems with MSI quirks
872 * such as those with AMD8131, even if the device of interest is not
873 * attached to that device, (in the 2.6.13 - 2.6.15 kernels, at least, fixed
875 * All that can be done is to edit the kernel source to remove the quirk
876 * check until that is fixed.
877 * We do not need to call enable_msi() for our HyperTransport chip,
878 * even though it uses MSI, and we want to avoid the quirk warning, so
879 * So we call enable_msi only for PCIe. If we do end up needing
880 * pci_enable_msi at some point in the future for HT, we'll move the
881 * call back into the main init_one code.
882 * We save the msi lo and hi values, so we can restore them after
883 * chip reset (the kernel PCI infrastructure doesn't yet handle that
886 static int ipath_setup_pe_config(struct ipath_devdata
*dd
,
887 struct pci_dev
*pdev
)
891 dd
->ipath_msi_lo
= 0; /* used as a flag during reset processing */
892 ret
= pci_enable_msi(dd
->pcidev
);
894 ipath_dev_err(dd
, "pci_enable_msi failed: %d, "
895 "interrupts may not work\n", ret
);
896 /* continue even if it fails, we may still be OK... */
897 dd
->ipath_irq
= pdev
->irq
;
899 if ((pos
= pci_find_capability(dd
->pcidev
, PCI_CAP_ID_MSI
))) {
901 pci_read_config_dword(dd
->pcidev
, pos
+ PCI_MSI_ADDRESS_LO
,
903 pci_read_config_dword(dd
->pcidev
, pos
+ PCI_MSI_ADDRESS_HI
,
905 pci_read_config_word(dd
->pcidev
, pos
+ PCI_MSI_FLAGS
,
907 /* now save the data (vector) info */
908 pci_read_config_word(dd
->pcidev
,
909 pos
+ ((control
& PCI_MSI_FLAGS_64BIT
)
911 &dd
->ipath_msi_data
);
912 ipath_cdbg(VERBOSE
, "Read msi data 0x%x from config offset "
913 "0x%x, control=0x%x\n", dd
->ipath_msi_data
,
914 pos
+ ((control
& PCI_MSI_FLAGS_64BIT
) ? 12 : 8),
916 /* we save the cachelinesize also, although it doesn't
918 pci_read_config_byte(dd
->pcidev
, PCI_CACHE_LINE_SIZE
,
919 &dd
->ipath_pci_cacheline
);
921 ipath_dev_err(dd
, "Can't find MSI capability, "
922 "can't save MSI settings for reset\n");
923 if ((pos
= pci_find_capability(dd
->pcidev
, PCI_CAP_ID_EXP
))) {
925 pci_read_config_word(dd
->pcidev
, pos
+ PCI_EXP_LNKSTA
,
930 ipath_dev_err(dd
, "PCIe width %u, "
931 "performance reduced\n", linkstat
);
934 ipath_dev_err(dd
, "Can't find PCI Express "
937 dd
->ipath_link_width_supported
= IB_WIDTH_1X
| IB_WIDTH_4X
;
938 dd
->ipath_link_speed_supported
= IPATH_IB_SDR
;
939 dd
->ipath_link_width_enabled
= IB_WIDTH_4X
;
940 dd
->ipath_link_speed_enabled
= dd
->ipath_link_speed_supported
;
941 /* these can't change for this chip, so set once */
942 dd
->ipath_link_width_active
= dd
->ipath_link_width_enabled
;
943 dd
->ipath_link_speed_active
= dd
->ipath_link_speed_enabled
;
947 static void ipath_init_pe_variables(struct ipath_devdata
*dd
)
950 * setup the register offsets, since they are different for each
953 dd
->ipath_kregs
= &ipath_pe_kregs
;
954 dd
->ipath_cregs
= &ipath_pe_cregs
;
957 * bits for selecting i2c direction and values,
958 * used for I2C serial flash
960 dd
->ipath_gpio_sda_num
= _IPATH_GPIO_SDA_NUM
;
961 dd
->ipath_gpio_scl_num
= _IPATH_GPIO_SCL_NUM
;
962 dd
->ipath_gpio_sda
= IPATH_GPIO_SDA
;
963 dd
->ipath_gpio_scl
= IPATH_GPIO_SCL
;
966 * Fill in data for field-values that change in newer chips.
967 * We dynamically specify only the mask for LINKTRAININGSTATE
968 * and only the shift for LINKSTATE, as they are the only ones
969 * that change. Also precalculate the 3 link states of interest
970 * and the combined mask.
972 dd
->ibcs_ls_shift
= IBA6120_IBCS_LINKSTATE_SHIFT
;
973 dd
->ibcs_lts_mask
= IBA6120_IBCS_LINKTRAININGSTATE_MASK
;
974 dd
->ibcs_mask
= (INFINIPATH_IBCS_LINKSTATE_MASK
<<
975 dd
->ibcs_ls_shift
) | dd
->ibcs_lts_mask
;
976 dd
->ib_init
= (INFINIPATH_IBCS_LT_STATE_LINKUP
<<
977 INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT
) |
978 (INFINIPATH_IBCS_L_STATE_INIT
<< dd
->ibcs_ls_shift
);
979 dd
->ib_arm
= (INFINIPATH_IBCS_LT_STATE_LINKUP
<<
980 INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT
) |
981 (INFINIPATH_IBCS_L_STATE_ARM
<< dd
->ibcs_ls_shift
);
982 dd
->ib_active
= (INFINIPATH_IBCS_LT_STATE_LINKUP
<<
983 INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT
) |
984 (INFINIPATH_IBCS_L_STATE_ACTIVE
<< dd
->ibcs_ls_shift
);
987 * Fill in data for ibcc field-values that change in newer chips.
988 * We dynamically specify only the mask for LINKINITCMD
989 * and only the shift for LINKCMD and MAXPKTLEN, as they are
990 * the only ones that change.
992 dd
->ibcc_lic_mask
= INFINIPATH_IBCC_LINKINITCMD_MASK
;
993 dd
->ibcc_lc_shift
= INFINIPATH_IBCC_LINKCMD_SHIFT
;
994 dd
->ibcc_mpl_shift
= INFINIPATH_IBCC_MAXPKTLEN_SHIFT
;
996 /* Fill in shifts for RcvCtrl. */
997 dd
->ipath_r_portenable_shift
= INFINIPATH_R_PORTENABLE_SHIFT
;
998 dd
->ipath_r_intravail_shift
= INFINIPATH_R_INTRAVAIL_SHIFT
;
999 dd
->ipath_r_tailupd_shift
= INFINIPATH_R_TAILUPD_SHIFT
;
1000 dd
->ipath_r_portcfg_shift
= 0; /* Not on IBA6120 */
1002 /* variables for sanity checking interrupt and errors */
1003 dd
->ipath_hwe_bitsextant
=
1004 (INFINIPATH_HWE_RXEMEMPARITYERR_MASK
<<
1005 INFINIPATH_HWE_RXEMEMPARITYERR_SHIFT
) |
1006 (INFINIPATH_HWE_TXEMEMPARITYERR_MASK
<<
1007 INFINIPATH_HWE_TXEMEMPARITYERR_SHIFT
) |
1008 (INFINIPATH_HWE_PCIEMEMPARITYERR_MASK
<<
1009 INFINIPATH_HWE_PCIEMEMPARITYERR_SHIFT
) |
1010 INFINIPATH_HWE_PCIE1PLLFAILED
|
1011 INFINIPATH_HWE_PCIE0PLLFAILED
|
1012 INFINIPATH_HWE_PCIEPOISONEDTLP
|
1013 INFINIPATH_HWE_PCIECPLTIMEOUT
|
1014 INFINIPATH_HWE_PCIEBUSPARITYXTLH
|
1015 INFINIPATH_HWE_PCIEBUSPARITYXADM
|
1016 INFINIPATH_HWE_PCIEBUSPARITYRADM
|
1017 INFINIPATH_HWE_MEMBISTFAILED
|
1018 INFINIPATH_HWE_COREPLL_FBSLIP
|
1019 INFINIPATH_HWE_COREPLL_RFSLIP
|
1020 INFINIPATH_HWE_SERDESPLLFAILED
|
1021 INFINIPATH_HWE_IBCBUSTOSPCPARITYERR
|
1022 INFINIPATH_HWE_IBCBUSFRSPCPARITYERR
;
1023 dd
->ipath_i_bitsextant
=
1024 (INFINIPATH_I_RCVURG_MASK
<< INFINIPATH_I_RCVURG_SHIFT
) |
1025 (INFINIPATH_I_RCVAVAIL_MASK
<<
1026 INFINIPATH_I_RCVAVAIL_SHIFT
) |
1027 INFINIPATH_I_ERROR
| INFINIPATH_I_SPIOSENT
|
1028 INFINIPATH_I_SPIOBUFAVAIL
| INFINIPATH_I_GPIO
;
1029 dd
->ipath_e_bitsextant
=
1030 INFINIPATH_E_RFORMATERR
| INFINIPATH_E_RVCRC
|
1031 INFINIPATH_E_RICRC
| INFINIPATH_E_RMINPKTLEN
|
1032 INFINIPATH_E_RMAXPKTLEN
| INFINIPATH_E_RLONGPKTLEN
|
1033 INFINIPATH_E_RSHORTPKTLEN
| INFINIPATH_E_RUNEXPCHAR
|
1034 INFINIPATH_E_RUNSUPVL
| INFINIPATH_E_REBP
|
1035 INFINIPATH_E_RIBFLOW
| INFINIPATH_E_RBADVERSION
|
1036 INFINIPATH_E_RRCVEGRFULL
| INFINIPATH_E_RRCVHDRFULL
|
1037 INFINIPATH_E_RBADTID
| INFINIPATH_E_RHDRLEN
|
1038 INFINIPATH_E_RHDR
| INFINIPATH_E_RIBLOSTLINK
|
1039 INFINIPATH_E_SMINPKTLEN
| INFINIPATH_E_SMAXPKTLEN
|
1040 INFINIPATH_E_SUNDERRUN
| INFINIPATH_E_SPKTLEN
|
1041 INFINIPATH_E_SDROPPEDSMPPKT
| INFINIPATH_E_SDROPPEDDATAPKT
|
1042 INFINIPATH_E_SPIOARMLAUNCH
| INFINIPATH_E_SUNEXPERRPKTNUM
|
1043 INFINIPATH_E_SUNSUPVL
| INFINIPATH_E_IBSTATUSCHANGED
|
1044 INFINIPATH_E_INVALIDADDR
| INFINIPATH_E_RESET
|
1045 INFINIPATH_E_HARDWARE
;
1047 dd
->ipath_i_rcvavail_mask
= INFINIPATH_I_RCVAVAIL_MASK
;
1048 dd
->ipath_i_rcvurg_mask
= INFINIPATH_I_RCVURG_MASK
;
1049 dd
->ipath_i_rcvavail_shift
= INFINIPATH_I_RCVAVAIL_SHIFT
;
1050 dd
->ipath_i_rcvurg_shift
= INFINIPATH_I_RCVURG_SHIFT
;
1053 * EEPROM error log 0 is TXE Parity errors. 1 is RXE Parity.
1054 * 2 is Some Misc, 3 is reserved for future.
1056 dd
->ipath_eep_st_masks
[0].hwerrs_to_log
=
1057 INFINIPATH_HWE_TXEMEMPARITYERR_MASK
<<
1058 INFINIPATH_HWE_TXEMEMPARITYERR_SHIFT
;
1060 /* Ignore errors in PIO/PBC on systems with unordered write-combining */
1061 if (ipath_unordered_wc())
1062 dd
->ipath_eep_st_masks
[0].hwerrs_to_log
&= ~TXE_PIO_PARITY
;
1064 dd
->ipath_eep_st_masks
[1].hwerrs_to_log
=
1065 INFINIPATH_HWE_RXEMEMPARITYERR_MASK
<<
1066 INFINIPATH_HWE_RXEMEMPARITYERR_SHIFT
;
1068 dd
->ipath_eep_st_masks
[2].errs_to_log
=
1069 INFINIPATH_E_INVALIDADDR
| INFINIPATH_E_RESET
;
1072 dd
->delay_mult
= 2; /* SDR, 4X, can't change */
1075 /* setup the MSI stuff again after a reset. I'd like to just call
1076 * pci_enable_msi() and request_irq() again, but when I do that,
1077 * the MSI enable bit doesn't get set in the command word, and
1078 * we switch to to a different interrupt vector, which is confusing,
1079 * so I instead just do it all inline. Perhaps somehow can tie this
1080 * into the PCIe hotplug support at some point
1081 * Note, because I'm doing it all here, I don't call pci_disable_msi()
1082 * or free_irq() at the start of ipath_setup_pe_reset().
1084 static int ipath_reinit_msi(struct ipath_devdata
*dd
)
1090 if (!dd
->ipath_msi_lo
) {
1091 dev_info(&dd
->pcidev
->dev
, "Can't restore MSI config, "
1092 "initial setup failed?\n");
1097 if (!(pos
= pci_find_capability(dd
->pcidev
, PCI_CAP_ID_MSI
))) {
1098 ipath_dev_err(dd
, "Can't find MSI capability, "
1099 "can't restore MSI settings\n");
1103 ipath_cdbg(VERBOSE
, "Writing msi_lo 0x%x to config offset 0x%x\n",
1104 dd
->ipath_msi_lo
, pos
+ PCI_MSI_ADDRESS_LO
);
1105 pci_write_config_dword(dd
->pcidev
, pos
+ PCI_MSI_ADDRESS_LO
,
1107 ipath_cdbg(VERBOSE
, "Writing msi_lo 0x%x to config offset 0x%x\n",
1108 dd
->ipath_msi_hi
, pos
+ PCI_MSI_ADDRESS_HI
);
1109 pci_write_config_dword(dd
->pcidev
, pos
+ PCI_MSI_ADDRESS_HI
,
1111 pci_read_config_word(dd
->pcidev
, pos
+ PCI_MSI_FLAGS
, &control
);
1112 if (!(control
& PCI_MSI_FLAGS_ENABLE
)) {
1113 ipath_cdbg(VERBOSE
, "MSI control at off %x was %x, "
1114 "setting MSI enable (%x)\n", pos
+ PCI_MSI_FLAGS
,
1115 control
, control
| PCI_MSI_FLAGS_ENABLE
);
1116 control
|= PCI_MSI_FLAGS_ENABLE
;
1117 pci_write_config_word(dd
->pcidev
, pos
+ PCI_MSI_FLAGS
,
1120 /* now rewrite the data (vector) info */
1121 pci_write_config_word(dd
->pcidev
, pos
+
1122 ((control
& PCI_MSI_FLAGS_64BIT
) ? 12 : 8),
1123 dd
->ipath_msi_data
);
1124 /* we restore the cachelinesize also, although it doesn't really
1126 pci_write_config_byte(dd
->pcidev
, PCI_CACHE_LINE_SIZE
,
1127 dd
->ipath_pci_cacheline
);
1128 /* and now set the pci master bit again */
1129 pci_set_master(dd
->pcidev
);
1136 /* This routine sleeps, so it can only be called from user context, not
1137 * from interrupt context. If we need interrupt context, we can split
1138 * it into two routines.
1140 static int ipath_setup_pe_reset(struct ipath_devdata
*dd
)
1146 /* Use ERROR so it shows up in logs, etc. */
1147 ipath_dev_err(dd
, "Resetting InfiniPath unit %u\n", dd
->ipath_unit
);
1148 /* keep chip from being accessed in a few places */
1149 dd
->ipath_flags
&= ~(IPATH_INITTED
|IPATH_PRESENT
);
1150 val
= dd
->ipath_control
| INFINIPATH_C_RESET
;
1151 ipath_write_kreg(dd
, dd
->ipath_kregs
->kr_control
, val
);
1154 for (i
= 1; i
<= 5; i
++) {
1156 /* allow MBIST, etc. to complete; longer on each retry.
1157 * We sometimes get machine checks from bus timeout if no
1158 * response, so for now, make it *really* long.
1160 msleep(1000 + (1 + i
) * 2000);
1162 pci_write_config_dword(dd
->pcidev
, PCI_BASE_ADDRESS_0
,
1163 dd
->ipath_pcibar0
)))
1164 ipath_dev_err(dd
, "rewrite of BAR0 failed: %d\n",
1167 pci_write_config_dword(dd
->pcidev
, PCI_BASE_ADDRESS_1
,
1168 dd
->ipath_pcibar1
)))
1169 ipath_dev_err(dd
, "rewrite of BAR1 failed: %d\n",
1171 /* now re-enable memory access */
1172 if ((r
= pci_enable_device(dd
->pcidev
)))
1173 ipath_dev_err(dd
, "pci_enable_device failed after "
1175 /* whether it worked or not, mark as present, again */
1176 dd
->ipath_flags
|= IPATH_PRESENT
;
1177 val
= ipath_read_kreg64(dd
, dd
->ipath_kregs
->kr_revision
);
1178 if (val
== dd
->ipath_revision
) {
1179 ipath_cdbg(VERBOSE
, "Got matching revision "
1180 "register %llx on try %d\n",
1181 (unsigned long long) val
, i
);
1182 ret
= ipath_reinit_msi(dd
);
1185 /* Probably getting -1 back */
1186 ipath_dbg("Didn't get expected revision register, "
1187 "got %llx, try %d\n", (unsigned long long) val
,
1190 ret
= 0; /* failed */
1197 * ipath_pe_put_tid - write a TID in chip
1198 * @dd: the infinipath device
1199 * @tidptr: pointer to the expected TID (in chip) to udpate
1200 * @tidtype: RCVHQ_RCV_TYPE_EAGER (1) for eager, RCVHQ_RCV_TYPE_EXPECTED (0) for expected
1201 * @pa: physical address of in memory buffer; ipath_tidinvalid if freeing
1203 * This exists as a separate routine to allow for special locking etc.
1204 * It's used for both the full cleanup on exit, as well as the normal
1205 * setup and teardown.
1207 static void ipath_pe_put_tid(struct ipath_devdata
*dd
, u64 __iomem
*tidptr
,
1208 u32 type
, unsigned long pa
)
1210 u32 __iomem
*tidp32
= (u32 __iomem
*)tidptr
;
1211 unsigned long flags
= 0; /* keep gcc quiet */
1213 if (pa
!= dd
->ipath_tidinvalid
) {
1214 if (pa
& ((1U << 11) - 1)) {
1215 dev_info(&dd
->pcidev
->dev
, "BUG: physaddr %lx "
1216 "not 4KB aligned!\n", pa
);
1220 /* paranoia check */
1223 "BUG: Physical page address 0x%lx "
1224 "has bits set in 31-29\n", pa
);
1226 if (type
== RCVHQ_RCV_TYPE_EAGER
)
1227 pa
|= dd
->ipath_tidtemplate
;
1228 else /* for now, always full 4KB page */
1233 * Workaround chip bug 9437 by writing the scratch register
1234 * before and after the TID, and with an io write barrier.
1235 * We use a spinlock around the writes, so they can't intermix
1236 * with other TID (eager or expected) writes (the chip bug
1237 * is triggered by back to back TID writes). Unfortunately, this
1238 * call can be done from interrupt level for the port 0 eager TIDs,
1239 * so we have to use irqsave locks.
1241 spin_lock_irqsave(&dd
->ipath_tid_lock
, flags
);
1242 ipath_write_kreg(dd
, dd
->ipath_kregs
->kr_scratch
, 0xfeeddeaf);
1243 if (dd
->ipath_kregbase
)
1245 ipath_write_kreg(dd
, dd
->ipath_kregs
->kr_scratch
, 0xdeadbeef);
1247 spin_unlock_irqrestore(&dd
->ipath_tid_lock
, flags
);
1250 * ipath_pe_put_tid_2 - write a TID in chip, Revision 2 or higher
1251 * @dd: the infinipath device
1252 * @tidptr: pointer to the expected TID (in chip) to udpate
1253 * @tidtype: RCVHQ_RCV_TYPE_EAGER (1) for eager, RCVHQ_RCV_TYPE_EXPECTED (0) for expected
1254 * @pa: physical address of in memory buffer; ipath_tidinvalid if freeing
1256 * This exists as a separate routine to allow for selection of the
1257 * appropriate "flavor". The static calls in cleanup just use the
1258 * revision-agnostic form, as they are not performance critical.
1260 static void ipath_pe_put_tid_2(struct ipath_devdata
*dd
, u64 __iomem
*tidptr
,
1261 u32 type
, unsigned long pa
)
1263 u32 __iomem
*tidp32
= (u32 __iomem
*)tidptr
;
1265 if (pa
!= dd
->ipath_tidinvalid
) {
1266 if (pa
& ((1U << 11) - 1)) {
1267 dev_info(&dd
->pcidev
->dev
, "BUG: physaddr %lx "
1268 "not 2KB aligned!\n", pa
);
1272 /* paranoia check */
1275 "BUG: Physical page address 0x%lx "
1276 "has bits set in 31-29\n", pa
);
1278 if (type
== RCVHQ_RCV_TYPE_EAGER
)
1279 pa
|= dd
->ipath_tidtemplate
;
1280 else /* for now, always full 4KB page */
1283 if (dd
->ipath_kregbase
)
1290 * ipath_pe_clear_tid - clear all TID entries for a port, expected and eager
1291 * @dd: the infinipath device
1294 * clear all TID entries for a port, expected and eager.
1295 * Used from ipath_close(). On this chip, TIDs are only 32 bits,
1296 * not 64, but they are still on 64 bit boundaries, so tidbase
1297 * is declared as u64 * for the pointer math, even though we write 32 bits
1299 static void ipath_pe_clear_tids(struct ipath_devdata
*dd
, unsigned port
)
1301 u64 __iomem
*tidbase
;
1302 unsigned long tidinv
;
1305 if (!dd
->ipath_kregbase
)
1308 ipath_cdbg(VERBOSE
, "Invalidate TIDs for port %u\n", port
);
1310 tidinv
= dd
->ipath_tidinvalid
;
1311 tidbase
= (u64 __iomem
*)
1312 ((char __iomem
*)(dd
->ipath_kregbase
) +
1313 dd
->ipath_rcvtidbase
+
1314 port
* dd
->ipath_rcvtidcnt
* sizeof(*tidbase
));
1316 for (i
= 0; i
< dd
->ipath_rcvtidcnt
; i
++)
1317 dd
->ipath_f_put_tid(dd
, &tidbase
[i
], RCVHQ_RCV_TYPE_EXPECTED
,
1320 tidbase
= (u64 __iomem
*)
1321 ((char __iomem
*)(dd
->ipath_kregbase
) +
1322 dd
->ipath_rcvegrbase
+
1323 port
* dd
->ipath_rcvegrcnt
* sizeof(*tidbase
));
1325 for (i
= 0; i
< dd
->ipath_rcvegrcnt
; i
++)
1326 dd
->ipath_f_put_tid(dd
, &tidbase
[i
], RCVHQ_RCV_TYPE_EAGER
,
1331 * ipath_pe_tidtemplate - setup constants for TID updates
1332 * @dd: the infinipath device
1334 * We setup stuff that we use a lot, to avoid calculating each time
1336 static void ipath_pe_tidtemplate(struct ipath_devdata
*dd
)
1338 u32 egrsize
= dd
->ipath_rcvegrbufsize
;
1340 /* For now, we always allocate 4KB buffers (at init) so we can
1341 * receive max size packets. We may want a module parameter to
1342 * specify 2KB or 4KB and/or make be per port instead of per device
1343 * for those who want to reduce memory footprint. Note that the
1344 * ipath_rcvhdrentsize size must be large enough to hold the largest
1345 * IB header (currently 96 bytes) that we expect to handle (plus of
1346 * course the 2 dwords of RHF).
1348 if (egrsize
== 2048)
1349 dd
->ipath_tidtemplate
= 1U << 29;
1350 else if (egrsize
== 4096)
1351 dd
->ipath_tidtemplate
= 2U << 29;
1354 dev_info(&dd
->pcidev
->dev
, "BUG: unsupported egrbufsize "
1355 "%u, using %u\n", dd
->ipath_rcvegrbufsize
,
1357 dd
->ipath_tidtemplate
= 2U << 29;
1359 dd
->ipath_tidinvalid
= 0;
1362 static int ipath_pe_early_init(struct ipath_devdata
*dd
)
1364 dd
->ipath_flags
|= IPATH_4BYTE_TID
;
1365 if (ipath_unordered_wc())
1366 dd
->ipath_flags
|= IPATH_PIO_FLUSH_WC
;
1369 * For openfabrics, we need to be able to handle an IB header of
1370 * 24 dwords. HT chip has arbitrary sized receive buffers, so we
1371 * made them the same size as the PIO buffers. This chip does not
1372 * handle arbitrary size buffers, so we need the header large enough
1373 * to handle largest IB header, but still have room for a 2KB MTU
1374 * standard IB packet.
1376 dd
->ipath_rcvhdrentsize
= 24;
1377 dd
->ipath_rcvhdrsize
= IPATH_DFLT_RCVHDRSIZE
;
1378 dd
->ipath_rhf_offset
= 0;
1379 dd
->ipath_egrtidbase
= (u64 __iomem
*)
1380 ((char __iomem
*) dd
->ipath_kregbase
+ dd
->ipath_rcvegrbase
);
1383 * To truly support a 4KB MTU (for usermode), we need to
1384 * bump this to a larger value. For now, we use them for
1387 dd
->ipath_rcvegrbufsize
= 2048;
1389 * the min() check here is currently a nop, but it may not always
1390 * be, depending on just how we do ipath_rcvegrbufsize
1392 dd
->ipath_ibmaxlen
= min(dd
->ipath_piosize2k
,
1393 dd
->ipath_rcvegrbufsize
+
1394 (dd
->ipath_rcvhdrentsize
<< 2));
1395 dd
->ipath_init_ibmaxlen
= dd
->ipath_ibmaxlen
;
1398 * We can request a receive interrupt for 1 or
1399 * more packets from current offset. For now, we set this
1400 * up for a single packet.
1402 dd
->ipath_rhdrhead_intr_off
= 1ULL<<32;
1404 ipath_get_eeprom_info(dd
);
1409 int __attribute__((weak
)) ipath_unordered_wc(void)
1415 * ipath_init_pe_get_base_info - set chip-specific flags for user code
1416 * @pd: the infinipath port
1417 * @kbase: ipath_base_info pointer
1419 * We set the PCIE flag because the lower bandwidth on PCIe vs
1420 * HyperTransport can affect some user packet algorithms.
1422 static int ipath_pe_get_base_info(struct ipath_portdata
*pd
, void *kbase
)
1424 struct ipath_base_info
*kinfo
= kbase
;
1425 struct ipath_devdata
*dd
;
1427 if (ipath_unordered_wc()) {
1428 kinfo
->spi_runtime_flags
|= IPATH_RUNTIME_FORCE_WC_ORDER
;
1429 ipath_cdbg(PROC
, "Intel processor, forcing WC order\n");
1432 ipath_cdbg(PROC
, "Not Intel processor, WC ordered\n");
1440 kinfo
->spi_runtime_flags
|= IPATH_RUNTIME_PCIE
|
1441 IPATH_RUNTIME_FORCE_PIOAVAIL
| IPATH_RUNTIME_PIO_REGSWAPPED
;
1445 static void ipath_pe_free_irq(struct ipath_devdata
*dd
)
1447 free_irq(dd
->ipath_irq
, dd
);
1452 static struct ipath_message_header
*
1453 ipath_pe_get_msgheader(struct ipath_devdata
*dd
, __le32
*rhf_addr
)
1455 return (struct ipath_message_header
*)
1456 &rhf_addr
[sizeof(u64
) / sizeof(u32
)];
1459 static void ipath_pe_config_ports(struct ipath_devdata
*dd
, ushort cfgports
)
1462 ipath_read_kreg32(dd
, dd
->ipath_kregs
->kr_portcnt
);
1463 dd
->ipath_p0_rcvegrcnt
=
1464 ipath_read_kreg32(dd
, dd
->ipath_kregs
->kr_rcvegrcnt
);
1467 static void ipath_pe_read_counters(struct ipath_devdata
*dd
,
1468 struct infinipath_counters
*cntrs
)
1471 ipath_snap_cntr(dd
, IPATH_CREG_OFFSET(LBIntCnt
));
1472 cntrs
->LBFlowStallCnt
=
1473 ipath_snap_cntr(dd
, IPATH_CREG_OFFSET(LBFlowStallCnt
));
1474 cntrs
->TxSDmaDescCnt
= 0;
1475 cntrs
->TxUnsupVLErrCnt
=
1476 ipath_snap_cntr(dd
, IPATH_CREG_OFFSET(TxUnsupVLErrCnt
));
1477 cntrs
->TxDataPktCnt
=
1478 ipath_snap_cntr(dd
, IPATH_CREG_OFFSET(TxDataPktCnt
));
1479 cntrs
->TxFlowPktCnt
=
1480 ipath_snap_cntr(dd
, IPATH_CREG_OFFSET(TxFlowPktCnt
));
1482 ipath_snap_cntr(dd
, IPATH_CREG_OFFSET(TxDwordCnt
));
1483 cntrs
->TxLenErrCnt
=
1484 ipath_snap_cntr(dd
, IPATH_CREG_OFFSET(TxLenErrCnt
));
1485 cntrs
->TxMaxMinLenErrCnt
=
1486 ipath_snap_cntr(dd
, IPATH_CREG_OFFSET(TxMaxMinLenErrCnt
));
1487 cntrs
->TxUnderrunCnt
=
1488 ipath_snap_cntr(dd
, IPATH_CREG_OFFSET(TxUnderrunCnt
));
1489 cntrs
->TxFlowStallCnt
=
1490 ipath_snap_cntr(dd
, IPATH_CREG_OFFSET(TxFlowStallCnt
));
1491 cntrs
->TxDroppedPktCnt
=
1492 ipath_snap_cntr(dd
, IPATH_CREG_OFFSET(TxDroppedPktCnt
));
1493 cntrs
->RxDroppedPktCnt
=
1494 ipath_snap_cntr(dd
, IPATH_CREG_OFFSET(RxDroppedPktCnt
));
1495 cntrs
->RxDataPktCnt
=
1496 ipath_snap_cntr(dd
, IPATH_CREG_OFFSET(RxDataPktCnt
));
1497 cntrs
->RxFlowPktCnt
=
1498 ipath_snap_cntr(dd
, IPATH_CREG_OFFSET(RxFlowPktCnt
));
1500 ipath_snap_cntr(dd
, IPATH_CREG_OFFSET(RxDwordCnt
));
1501 cntrs
->RxLenErrCnt
=
1502 ipath_snap_cntr(dd
, IPATH_CREG_OFFSET(RxLenErrCnt
));
1503 cntrs
->RxMaxMinLenErrCnt
=
1504 ipath_snap_cntr(dd
, IPATH_CREG_OFFSET(RxMaxMinLenErrCnt
));
1505 cntrs
->RxICRCErrCnt
=
1506 ipath_snap_cntr(dd
, IPATH_CREG_OFFSET(RxICRCErrCnt
));
1507 cntrs
->RxVCRCErrCnt
=
1508 ipath_snap_cntr(dd
, IPATH_CREG_OFFSET(RxVCRCErrCnt
));
1509 cntrs
->RxFlowCtrlErrCnt
=
1510 ipath_snap_cntr(dd
, IPATH_CREG_OFFSET(RxFlowCtrlErrCnt
));
1511 cntrs
->RxBadFormatCnt
=
1512 ipath_snap_cntr(dd
, IPATH_CREG_OFFSET(RxBadFormatCnt
));
1513 cntrs
->RxLinkProblemCnt
=
1514 ipath_snap_cntr(dd
, IPATH_CREG_OFFSET(RxLinkProblemCnt
));
1516 ipath_snap_cntr(dd
, IPATH_CREG_OFFSET(RxEBPCnt
));
1517 cntrs
->RxLPCRCErrCnt
=
1518 ipath_snap_cntr(dd
, IPATH_CREG_OFFSET(RxLPCRCErrCnt
));
1519 cntrs
->RxBufOvflCnt
=
1520 ipath_snap_cntr(dd
, IPATH_CREG_OFFSET(RxBufOvflCnt
));
1521 cntrs
->RxTIDFullErrCnt
=
1522 ipath_snap_cntr(dd
, IPATH_CREG_OFFSET(RxTIDFullErrCnt
));
1523 cntrs
->RxTIDValidErrCnt
=
1524 ipath_snap_cntr(dd
, IPATH_CREG_OFFSET(RxTIDValidErrCnt
));
1525 cntrs
->RxPKeyMismatchCnt
=
1526 ipath_snap_cntr(dd
, IPATH_CREG_OFFSET(RxPKeyMismatchCnt
));
1527 cntrs
->RxP0HdrEgrOvflCnt
=
1528 ipath_snap_cntr(dd
, IPATH_CREG_OFFSET(RxP0HdrEgrOvflCnt
));
1529 cntrs
->RxP1HdrEgrOvflCnt
=
1530 ipath_snap_cntr(dd
, IPATH_CREG_OFFSET(RxP1HdrEgrOvflCnt
));
1531 cntrs
->RxP2HdrEgrOvflCnt
=
1532 ipath_snap_cntr(dd
, IPATH_CREG_OFFSET(RxP2HdrEgrOvflCnt
));
1533 cntrs
->RxP3HdrEgrOvflCnt
=
1534 ipath_snap_cntr(dd
, IPATH_CREG_OFFSET(RxP3HdrEgrOvflCnt
));
1535 cntrs
->RxP4HdrEgrOvflCnt
=
1536 ipath_snap_cntr(dd
, IPATH_CREG_OFFSET(RxP4HdrEgrOvflCnt
));
1537 cntrs
->RxP5HdrEgrOvflCnt
= 0;
1538 cntrs
->RxP6HdrEgrOvflCnt
= 0;
1539 cntrs
->RxP7HdrEgrOvflCnt
= 0;
1540 cntrs
->RxP8HdrEgrOvflCnt
= 0;
1541 cntrs
->RxP9HdrEgrOvflCnt
= 0;
1542 cntrs
->RxP10HdrEgrOvflCnt
= 0;
1543 cntrs
->RxP11HdrEgrOvflCnt
= 0;
1544 cntrs
->RxP12HdrEgrOvflCnt
= 0;
1545 cntrs
->RxP13HdrEgrOvflCnt
= 0;
1546 cntrs
->RxP14HdrEgrOvflCnt
= 0;
1547 cntrs
->RxP15HdrEgrOvflCnt
= 0;
1548 cntrs
->RxP16HdrEgrOvflCnt
= 0;
1549 cntrs
->IBStatusChangeCnt
=
1550 ipath_snap_cntr(dd
, IPATH_CREG_OFFSET(IBStatusChangeCnt
));
1551 cntrs
->IBLinkErrRecoveryCnt
=
1552 ipath_snap_cntr(dd
, IPATH_CREG_OFFSET(IBLinkErrRecoveryCnt
));
1553 cntrs
->IBLinkDownedCnt
=
1554 ipath_snap_cntr(dd
, IPATH_CREG_OFFSET(IBLinkDownedCnt
));
1555 cntrs
->IBSymbolErrCnt
=
1556 ipath_snap_cntr(dd
, IPATH_CREG_OFFSET(IBSymbolErrCnt
));
1557 cntrs
->RxVL15DroppedPktCnt
= 0;
1558 cntrs
->RxOtherLocalPhyErrCnt
= 0;
1559 cntrs
->PcieRetryBufDiagQwordCnt
= 0;
1560 cntrs
->ExcessBufferOvflCnt
= dd
->ipath_overrun_thresh_errs
;
1561 cntrs
->LocalLinkIntegrityErrCnt
= dd
->ipath_lli_errs
;
1562 cntrs
->RxVlErrCnt
= 0;
1563 cntrs
->RxDlidFltrCnt
= 0;
1567 /* no interrupt fallback for these chips */
1568 static int ipath_pe_nointr_fallback(struct ipath_devdata
*dd
)
1575 * reset the XGXS (between serdes and IBC). Slightly less intrusive
1576 * than resetting the IBC or external link state, and useful in some
1577 * cases to cause some retraining. To do this right, we reset IBC
1580 static void ipath_pe_xgxs_reset(struct ipath_devdata
*dd
)
1584 prev_val
= ipath_read_kreg64(dd
, dd
->ipath_kregs
->kr_xgxsconfig
);
1585 val
= prev_val
| INFINIPATH_XGXS_RESET
;
1586 prev_val
&= ~INFINIPATH_XGXS_RESET
; /* be sure */
1587 ipath_write_kreg(dd
, dd
->ipath_kregs
->kr_control
,
1588 dd
->ipath_control
& ~INFINIPATH_C_LINKENABLE
);
1589 ipath_write_kreg(dd
, dd
->ipath_kregs
->kr_xgxsconfig
, val
);
1590 ipath_read_kreg32(dd
, dd
->ipath_kregs
->kr_scratch
);
1591 ipath_write_kreg(dd
, dd
->ipath_kregs
->kr_xgxsconfig
, prev_val
);
1592 ipath_write_kreg(dd
, dd
->ipath_kregs
->kr_control
,
1597 static int ipath_pe_get_ib_cfg(struct ipath_devdata
*dd
, int which
)
1602 case IPATH_IB_CFG_LWID
:
1603 ret
= dd
->ipath_link_width_active
;
1605 case IPATH_IB_CFG_SPD
:
1606 ret
= dd
->ipath_link_speed_active
;
1608 case IPATH_IB_CFG_LWID_ENB
:
1609 ret
= dd
->ipath_link_width_enabled
;
1611 case IPATH_IB_CFG_SPD_ENB
:
1612 ret
= dd
->ipath_link_speed_enabled
;
1622 /* we assume range checking is already done, if needed */
1623 static int ipath_pe_set_ib_cfg(struct ipath_devdata
*dd
, int which
, u32 val
)
1627 if (which
== IPATH_IB_CFG_LWID_ENB
)
1628 dd
->ipath_link_width_enabled
= val
;
1629 else if (which
== IPATH_IB_CFG_SPD_ENB
)
1630 dd
->ipath_link_speed_enabled
= val
;
1636 static void ipath_pe_config_jint(struct ipath_devdata
*dd
, u16 a
, u16 b
)
1641 static int ipath_pe_ib_updown(struct ipath_devdata
*dd
, int ibup
, u64 ibcs
)
1643 ipath_setup_pe_setextled(dd
, ipath_ib_linkstate(dd
, ibcs
),
1644 ipath_ib_linktrstate(dd
, ibcs
));
1650 * ipath_init_iba6120_funcs - set up the chip-specific function pointers
1651 * @dd: the infinipath device
1653 * This is global, and is called directly at init to set up the
1654 * chip-specific function pointers for later use.
1656 void ipath_init_iba6120_funcs(struct ipath_devdata
*dd
)
1658 dd
->ipath_f_intrsetup
= ipath_pe_intconfig
;
1659 dd
->ipath_f_bus
= ipath_setup_pe_config
;
1660 dd
->ipath_f_reset
= ipath_setup_pe_reset
;
1661 dd
->ipath_f_get_boardname
= ipath_pe_boardname
;
1662 dd
->ipath_f_init_hwerrors
= ipath_pe_init_hwerrors
;
1663 dd
->ipath_f_early_init
= ipath_pe_early_init
;
1664 dd
->ipath_f_handle_hwerrors
= ipath_pe_handle_hwerrors
;
1665 dd
->ipath_f_quiet_serdes
= ipath_pe_quiet_serdes
;
1666 dd
->ipath_f_bringup_serdes
= ipath_pe_bringup_serdes
;
1667 dd
->ipath_f_clear_tids
= ipath_pe_clear_tids
;
1669 * _f_put_tid may get changed after we read the chip revision,
1670 * but we start with the safe version for all revs
1672 dd
->ipath_f_put_tid
= ipath_pe_put_tid
;
1673 dd
->ipath_f_cleanup
= ipath_setup_pe_cleanup
;
1674 dd
->ipath_f_setextled
= ipath_setup_pe_setextled
;
1675 dd
->ipath_f_get_base_info
= ipath_pe_get_base_info
;
1676 dd
->ipath_f_free_irq
= ipath_pe_free_irq
;
1677 dd
->ipath_f_tidtemplate
= ipath_pe_tidtemplate
;
1678 dd
->ipath_f_intr_fallback
= ipath_pe_nointr_fallback
;
1679 dd
->ipath_f_xgxs_reset
= ipath_pe_xgxs_reset
;
1680 dd
->ipath_f_get_msgheader
= ipath_pe_get_msgheader
;
1681 dd
->ipath_f_config_ports
= ipath_pe_config_ports
;
1682 dd
->ipath_f_read_counters
= ipath_pe_read_counters
;
1683 dd
->ipath_f_get_ib_cfg
= ipath_pe_get_ib_cfg
;
1684 dd
->ipath_f_set_ib_cfg
= ipath_pe_set_ib_cfg
;
1685 dd
->ipath_f_config_jint
= ipath_pe_config_jint
;
1686 dd
->ipath_f_ib_updown
= ipath_pe_ib_updown
;
1689 /* initialize chip-specific variables */
1690 ipath_init_pe_variables(dd
);