2 * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
3 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <linux/delay.h>
35 #include <linux/pci.h>
36 #include <linux/vmalloc.h>
42 * QSFP support for ib_qib driver, using "Two Wire Serial Interface" driver
45 #define QSFP_MAX_RETRY 4
47 static int qsfp_read(struct qib_pportdata
*ppd
, int addr
, void *bp
, int len
)
49 struct qib_devdata
*dd
= ppd
->dd
;
51 int ret
, cnt
, pass
= 0;
55 ret
= mutex_lock_interruptible(&dd
->eep_lock
);
59 if (dd
->twsi_eeprom_dev
== QIB_TWSI_NO_DEV
) {
65 * We presume, if we are called at all, that this board has
66 * QSFP. This is on the same i2c chain as the legacy parts,
67 * but only responds if the module is selected via GPIO pins.
68 * Further, there are very long setup and hold requirements
71 mask
= QSFP_GPIO_MOD_SEL_N
| QSFP_GPIO_MOD_RST_N
| QSFP_GPIO_LP_MODE
;
72 out
= QSFP_GPIO_MOD_RST_N
| QSFP_GPIO_LP_MODE
;
74 mask
<<= QSFP_GPIO_PORT2_SHIFT
;
75 out
<<= QSFP_GPIO_PORT2_SHIFT
;
78 dd
->f_gpio_mod(dd
, out
, mask
, mask
);
81 * Module could take up to 2 Msec to respond to MOD_SEL, and there
82 * is no way to tell if it is ready, so we must wait.
86 /* Make sure TWSI bus is in sane state. */
87 ret
= qib_twsi_reset(dd
);
89 qib_dev_porterr(dd
, ppd
->port
,
90 "QSFP interface Reset for read failed\n");
96 /* All QSFP modules are at A0 */
101 int wlen
= len
- cnt
;
102 in_page
= addr
% QSFP_PAGESIZE
;
103 if ((in_page
+ wlen
) > QSFP_PAGESIZE
)
104 wlen
= QSFP_PAGESIZE
- in_page
;
105 ret
= qib_twsi_blk_rd(dd
, QSFP_DEV
, addr
, buff
+ cnt
, wlen
);
106 /* Some QSFP's fail first try. Retry as experiment */
107 if (ret
&& cnt
== 0 && ++pass
< QSFP_MAX_RETRY
)
110 /* qib_twsi_blk_rd() 1 for error, else 0 */
121 * Module could take up to 10 uSec after transfer before
122 * ready to respond to MOD_SEL negation, and there is no way
123 * to tell if it is ready, so we must wait.
126 /* set QSFP MODSEL, RST. LP all high */
127 dd
->f_gpio_mod(dd
, mask
, mask
, mask
);
130 * Module could take up to 2 Msec to respond to MOD_SEL
131 * going away, and there is no way to tell if it is ready.
135 qib_dev_err(dd
, "QSFP interface bus stuck non-idle\n");
137 if (pass
>= QSFP_MAX_RETRY
&& ret
)
138 qib_dev_porterr(dd
, ppd
->port
, "QSFP failed even retrying\n");
140 qib_dev_porterr(dd
, ppd
->port
, "QSFP retries: %d\n", pass
);
145 mutex_unlock(&dd
->eep_lock
);
153 * We do not ordinarily write the QSFP, but this is needed to select
154 * the page on non-flat QSFPs, and possibly later unusual cases
156 static int qib_qsfp_write(struct qib_pportdata
*ppd
, int addr
, void *bp
,
159 struct qib_devdata
*dd
= ppd
->dd
;
164 ret
= mutex_lock_interruptible(&dd
->eep_lock
);
168 if (dd
->twsi_eeprom_dev
== QIB_TWSI_NO_DEV
) {
174 * We presume, if we are called at all, that this board has
175 * QSFP. This is on the same i2c chain as the legacy parts,
176 * but only responds if the module is selected via GPIO pins.
177 * Further, there are very long setup and hold requirements
180 mask
= QSFP_GPIO_MOD_SEL_N
| QSFP_GPIO_MOD_RST_N
| QSFP_GPIO_LP_MODE
;
181 out
= QSFP_GPIO_MOD_RST_N
| QSFP_GPIO_LP_MODE
;
183 mask
<<= QSFP_GPIO_PORT2_SHIFT
;
184 out
<<= QSFP_GPIO_PORT2_SHIFT
;
186 dd
->f_gpio_mod(dd
, out
, mask
, mask
);
189 * Module could take up to 2 Msec to respond to MOD_SEL,
190 * and there is no way to tell if it is ready, so we must wait.
194 /* Make sure TWSI bus is in sane state. */
195 ret
= qib_twsi_reset(dd
);
197 qib_dev_porterr(dd
, ppd
->port
,
198 "QSFP interface Reset for write failed\n");
203 /* All QSFP modules are at A0 */
208 int wlen
= len
- cnt
;
209 in_page
= addr
% QSFP_PAGESIZE
;
210 if ((in_page
+ wlen
) > QSFP_PAGESIZE
)
211 wlen
= QSFP_PAGESIZE
- in_page
;
212 ret
= qib_twsi_blk_wr(dd
, QSFP_DEV
, addr
, buff
+ cnt
, wlen
);
214 /* qib_twsi_blk_wr() 1 for error, else 0 */
225 * Module could take up to 10 uSec after transfer before
226 * ready to respond to MOD_SEL negation, and there is no way
227 * to tell if it is ready, so we must wait.
230 /* set QSFP MODSEL, RST, LP high */
231 dd
->f_gpio_mod(dd
, mask
, mask
, mask
);
233 * Module could take up to 2 Msec to respond to MOD_SEL
234 * going away, and there is no way to tell if it is ready.
240 mutex_unlock(&dd
->eep_lock
);
247 * For validation, we want to check the checksums, even of the
248 * fields we do not otherwise use. This function reads the bytes from
249 * <first> to <next-1> and returns the 8lsbs of the sum, or <0 for errors
251 static int qsfp_cks(struct qib_pportdata
*ppd
, int first
, int next
)
258 while (first
< next
) {
259 ret
= qsfp_read(ppd
, first
, &bval
, 1);
271 int qib_refresh_qsfp_cache(struct qib_pportdata
*ppd
, struct qib_qsfp_cache
*cp
)
278 /* ensure sane contents on invalid reads, for cable swaps */
279 memset(cp
, 0, sizeof(*cp
));
281 if (!qib_qsfp_mod_present(ppd
)) {
286 ret
= qsfp_read(ppd
, 0, peek
, 3);
289 if ((peek
[0] & 0xFE) != 0x0C)
290 qib_dev_porterr(ppd
->dd
, ppd
->port
,
291 "QSFP byte0 is 0x%02X, S/B 0x0C/D\n", peek
[0]);
293 if ((peek
[2] & 2) == 0) {
295 * If cable is paged, rather than "flat memory", we need to
296 * set the page to zero, Even if it already appears to be zero.
299 ret
= qib_qsfp_write(ppd
, 127, &poke
, 1);
302 qib_dev_porterr(ppd
->dd
, ppd
->port
,
303 "Failed QSFP Page set\n");
308 ret
= qsfp_read(ppd
, QSFP_MOD_ID_OFFS
, &cp
->id
, 1);
311 if ((cp
->id
& 0xFE) != 0x0C)
312 qib_dev_porterr(ppd
->dd
, ppd
->port
,
313 "QSFP ID byte is 0x%02X, S/B 0x0C/D\n", cp
->id
);
316 ret
= qsfp_read(ppd
, QSFP_MOD_PWR_OFFS
, &cp
->pwr
, 1);
321 ret
= qsfp_cks(ppd
, QSFP_MOD_PWR_OFFS
+ 1, QSFP_MOD_LEN_OFFS
);
326 ret
= qsfp_read(ppd
, QSFP_MOD_LEN_OFFS
, &cp
->len
, 1);
331 ret
= qsfp_read(ppd
, QSFP_MOD_TECH_OFFS
, &cp
->tech
, 1);
336 ret
= qsfp_read(ppd
, QSFP_VEND_OFFS
, &cp
->vendor
, QSFP_VEND_LEN
);
339 for (idx
= 0; idx
< QSFP_VEND_LEN
; ++idx
)
340 cks
+= cp
->vendor
[idx
];
342 ret
= qsfp_read(ppd
, QSFP_IBXCV_OFFS
, &cp
->xt_xcv
, 1);
347 ret
= qsfp_read(ppd
, QSFP_VOUI_OFFS
, &cp
->oui
, QSFP_VOUI_LEN
);
350 for (idx
= 0; idx
< QSFP_VOUI_LEN
; ++idx
)
353 ret
= qsfp_read(ppd
, QSFP_PN_OFFS
, &cp
->partnum
, QSFP_PN_LEN
);
356 for (idx
= 0; idx
< QSFP_PN_LEN
; ++idx
)
357 cks
+= cp
->partnum
[idx
];
359 ret
= qsfp_read(ppd
, QSFP_REV_OFFS
, &cp
->rev
, QSFP_REV_LEN
);
362 for (idx
= 0; idx
< QSFP_REV_LEN
; ++idx
)
365 ret
= qsfp_read(ppd
, QSFP_ATTEN_OFFS
, &cp
->atten
, QSFP_ATTEN_LEN
);
368 for (idx
= 0; idx
< QSFP_ATTEN_LEN
; ++idx
)
369 cks
+= cp
->atten
[idx
];
371 ret
= qsfp_cks(ppd
, QSFP_ATTEN_OFFS
+ QSFP_ATTEN_LEN
, QSFP_CC_OFFS
);
377 ret
= qsfp_read(ppd
, QSFP_CC_OFFS
, &cp
->cks1
, 1);
381 qib_dev_porterr(ppd
->dd
, ppd
->port
,
382 "QSFP cks1 is %02X, computed %02X\n", cp
->cks1
,
385 /* Second checksum covers 192 to (serial, date, lot) */
386 ret
= qsfp_cks(ppd
, QSFP_CC_OFFS
+ 1, QSFP_SN_OFFS
);
391 ret
= qsfp_read(ppd
, QSFP_SN_OFFS
, &cp
->serial
, QSFP_SN_LEN
);
394 for (idx
= 0; idx
< QSFP_SN_LEN
; ++idx
)
395 cks
+= cp
->serial
[idx
];
397 ret
= qsfp_read(ppd
, QSFP_DATE_OFFS
, &cp
->date
, QSFP_DATE_LEN
);
400 for (idx
= 0; idx
< QSFP_DATE_LEN
; ++idx
)
401 cks
+= cp
->date
[idx
];
403 ret
= qsfp_read(ppd
, QSFP_LOT_OFFS
, &cp
->lot
, QSFP_LOT_LEN
);
406 for (idx
= 0; idx
< QSFP_LOT_LEN
; ++idx
)
409 ret
= qsfp_cks(ppd
, QSFP_LOT_OFFS
+ QSFP_LOT_LEN
, QSFP_CC_EXT_OFFS
);
414 ret
= qsfp_read(ppd
, QSFP_CC_EXT_OFFS
, &cp
->cks2
, 1);
419 qib_dev_porterr(ppd
->dd
, ppd
->port
,
420 "QSFP cks2 is %02X, computed %02X\n", cp
->cks2
,
429 const char * const qib_qsfp_devtech
[16] = {
430 "850nm VCSEL", "1310nm VCSEL", "1550nm VCSEL", "1310nm FP",
431 "1310nm DFB", "1550nm DFB", "1310nm EML", "1550nm EML",
432 "Cu Misc", "1490nm DFB", "Cu NoEq", "Cu Eq",
433 "Undef", "Cu Active BothEq", "Cu FarEq", "Cu NearEq"
436 #define QSFP_DUMP_CHUNK 16 /* Holds longest string */
437 #define QSFP_DEFAULT_HDR_CNT 224
439 static const char *pwr_codes
= "1.5W2.0W2.5W3.5W";
441 int qib_qsfp_mod_present(struct qib_pportdata
*ppd
)
446 mask
= QSFP_GPIO_MOD_PRS_N
<<
447 (ppd
->hw_pidx
* QSFP_GPIO_PORT2_SHIFT
);
448 ret
= ppd
->dd
->f_gpio_mod(ppd
->dd
, 0, 0, 0);
450 return !((ret
& mask
) >>
451 ((ppd
->hw_pidx
* QSFP_GPIO_PORT2_SHIFT
) + 3));
455 * Initialize structures that control access to QSFP. Called once per port
456 * on cards that support QSFP.
458 void qib_qsfp_init(struct qib_qsfp_data
*qd
,
459 void (*fevent
)(struct work_struct
*))
463 struct qib_devdata
*dd
= qd
->ppd
->dd
;
465 /* Initialize work struct for later QSFP events */
466 INIT_WORK(&qd
->work
, fevent
);
469 * Later, we may want more validation. For now, just set up pins and
470 * blip reset. If module is present, call qib_refresh_qsfp_cache(),
471 * to do further init.
473 mask
= QSFP_GPIO_MOD_SEL_N
| QSFP_GPIO_MOD_RST_N
| QSFP_GPIO_LP_MODE
;
474 highs
= mask
- QSFP_GPIO_MOD_RST_N
;
475 if (qd
->ppd
->hw_pidx
) {
476 mask
<<= QSFP_GPIO_PORT2_SHIFT
;
477 highs
<<= QSFP_GPIO_PORT2_SHIFT
;
479 dd
->f_gpio_mod(dd
, highs
, mask
, mask
);
480 udelay(20); /* Generous RST dwell */
482 dd
->f_gpio_mod(dd
, mask
, mask
, mask
);
486 void qib_qsfp_deinit(struct qib_qsfp_data
*qd
)
489 * There is nothing to do here for now. our work is scheduled
490 * with queue_work(), and flush_workqueue() from remove_one
491 * will block until all work setup with queue_work()
496 int qib_qsfp_dump(struct qib_pportdata
*ppd
, char *buf
, int len
)
498 struct qib_qsfp_cache cd
;
499 u8 bin_buff
[QSFP_DUMP_CHUNK
];
505 ret
= qib_refresh_qsfp_cache(ppd
, &cd
);
511 if (QSFP_IS_CU(cd
.tech
))
512 sprintf(lenstr
, "%dM ", cd
.len
);
514 sofar
+= scnprintf(buf
+ sofar
, len
- sofar
, "PWR:%.3sW\n", pwr_codes
+
515 (QSFP_PWR(cd
.pwr
) * 4));
517 sofar
+= scnprintf(buf
+ sofar
, len
- sofar
, "TECH:%s%s\n", lenstr
,
518 qib_qsfp_devtech
[cd
.tech
>> 4]);
520 sofar
+= scnprintf(buf
+ sofar
, len
- sofar
, "Vendor:%.*s\n",
521 QSFP_VEND_LEN
, cd
.vendor
);
523 sofar
+= scnprintf(buf
+ sofar
, len
- sofar
, "OUI:%06X\n",
526 sofar
+= scnprintf(buf
+ sofar
, len
- sofar
, "Part#:%.*s\n",
527 QSFP_PN_LEN
, cd
.partnum
);
528 sofar
+= scnprintf(buf
+ sofar
, len
- sofar
, "Rev:%.*s\n",
529 QSFP_REV_LEN
, cd
.rev
);
530 if (QSFP_IS_CU(cd
.tech
))
531 sofar
+= scnprintf(buf
+ sofar
, len
- sofar
, "Atten:%d, %d\n",
532 QSFP_ATTEN_SDR(cd
.atten
),
533 QSFP_ATTEN_DDR(cd
.atten
));
534 sofar
+= scnprintf(buf
+ sofar
, len
- sofar
, "Serial:%.*s\n",
535 QSFP_SN_LEN
, cd
.serial
);
536 sofar
+= scnprintf(buf
+ sofar
, len
- sofar
, "Date:%.*s\n",
537 QSFP_DATE_LEN
, cd
.date
);
538 sofar
+= scnprintf(buf
+ sofar
, len
- sofar
, "Lot:%.*s\n",
539 QSFP_LOT_LEN
, cd
.date
);
541 while (bidx
< QSFP_DEFAULT_HDR_CNT
) {
543 ret
= qsfp_read(ppd
, bidx
, bin_buff
, QSFP_DUMP_CHUNK
);
546 for (iidx
= 0; iidx
< ret
; ++iidx
) {
547 sofar
+= scnprintf(buf
+ sofar
, len
-sofar
, " %02X",
550 sofar
+= scnprintf(buf
+ sofar
, len
- sofar
, "\n");
551 bidx
+= QSFP_DUMP_CHUNK
;