treewide: remove redundant IS_ERR() before error code check
[linux/fpc-iii.git] / drivers / infiniband / hw / hfi1 / qsfp.c
blobb5966991d64744e710dbb7e38574b91c676bacf8
1 /*
2 * Copyright(c) 2015, 2016 Intel Corporation.
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
7 * GPL LICENSE SUMMARY
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
18 * BSD LICENSE
20 * Redistribution and use in source and binary forms, with or without
21 * modification, are permitted provided that the following conditions
22 * are met:
24 * - Redistributions of source code must retain the above copyright
25 * notice, this list of conditions and the following disclaimer.
26 * - Redistributions in binary form must reproduce the above copyright
27 * notice, this list of conditions and the following disclaimer in
28 * the documentation and/or other materials provided with the
29 * distribution.
30 * - Neither the name of Intel Corporation nor the names of its
31 * contributors may be used to endorse or promote products derived
32 * from this software without specific prior written permission.
34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
48 #include <linux/delay.h>
49 #include <linux/pci.h>
50 #include <linux/vmalloc.h>
52 #include "hfi.h"
54 /* for the given bus number, return the CSR for reading an i2c line */
55 static inline u32 i2c_in_csr(u32 bus_num)
57 return bus_num ? ASIC_QSFP2_IN : ASIC_QSFP1_IN;
60 /* for the given bus number, return the CSR for writing an i2c line */
61 static inline u32 i2c_oe_csr(u32 bus_num)
63 return bus_num ? ASIC_QSFP2_OE : ASIC_QSFP1_OE;
66 static void hfi1_setsda(void *data, int state)
68 struct hfi1_i2c_bus *bus = (struct hfi1_i2c_bus *)data;
69 struct hfi1_devdata *dd = bus->controlling_dd;
70 u64 reg;
71 u32 target_oe;
73 target_oe = i2c_oe_csr(bus->num);
74 reg = read_csr(dd, target_oe);
76 * The OE bit value is inverted and connected to the pin. When
77 * OE is 0 the pin is left to be pulled up, when the OE is 1
78 * the pin is driven low. This matches the "open drain" or "open
79 * collector" convention.
81 if (state)
82 reg &= ~QSFP_HFI0_I2CDAT;
83 else
84 reg |= QSFP_HFI0_I2CDAT;
85 write_csr(dd, target_oe, reg);
86 /* do a read to force the write into the chip */
87 (void)read_csr(dd, target_oe);
90 static void hfi1_setscl(void *data, int state)
92 struct hfi1_i2c_bus *bus = (struct hfi1_i2c_bus *)data;
93 struct hfi1_devdata *dd = bus->controlling_dd;
94 u64 reg;
95 u32 target_oe;
97 target_oe = i2c_oe_csr(bus->num);
98 reg = read_csr(dd, target_oe);
100 * The OE bit value is inverted and connected to the pin. When
101 * OE is 0 the pin is left to be pulled up, when the OE is 1
102 * the pin is driven low. This matches the "open drain" or "open
103 * collector" convention.
105 if (state)
106 reg &= ~QSFP_HFI0_I2CCLK;
107 else
108 reg |= QSFP_HFI0_I2CCLK;
109 write_csr(dd, target_oe, reg);
110 /* do a read to force the write into the chip */
111 (void)read_csr(dd, target_oe);
114 static int hfi1_getsda(void *data)
116 struct hfi1_i2c_bus *bus = (struct hfi1_i2c_bus *)data;
117 u64 reg;
118 u32 target_in;
120 hfi1_setsda(data, 1); /* clear OE so we do not pull line down */
121 udelay(2); /* 1us pull up + 250ns hold */
123 target_in = i2c_in_csr(bus->num);
124 reg = read_csr(bus->controlling_dd, target_in);
125 return !!(reg & QSFP_HFI0_I2CDAT);
128 static int hfi1_getscl(void *data)
130 struct hfi1_i2c_bus *bus = (struct hfi1_i2c_bus *)data;
131 u64 reg;
132 u32 target_in;
134 hfi1_setscl(data, 1); /* clear OE so we do not pull line down */
135 udelay(2); /* 1us pull up + 250ns hold */
137 target_in = i2c_in_csr(bus->num);
138 reg = read_csr(bus->controlling_dd, target_in);
139 return !!(reg & QSFP_HFI0_I2CCLK);
143 * Allocate and initialize the given i2c bus number.
144 * Returns NULL on failure.
146 static struct hfi1_i2c_bus *init_i2c_bus(struct hfi1_devdata *dd,
147 struct hfi1_asic_data *ad, int num)
149 struct hfi1_i2c_bus *bus;
150 int ret;
152 bus = kzalloc(sizeof(*bus), GFP_KERNEL);
153 if (!bus)
154 return NULL;
156 bus->controlling_dd = dd;
157 bus->num = num; /* our bus number */
159 bus->algo.setsda = hfi1_setsda;
160 bus->algo.setscl = hfi1_setscl;
161 bus->algo.getsda = hfi1_getsda;
162 bus->algo.getscl = hfi1_getscl;
163 bus->algo.udelay = 5;
164 bus->algo.timeout = usecs_to_jiffies(100000);
165 bus->algo.data = bus;
167 bus->adapter.owner = THIS_MODULE;
168 bus->adapter.algo_data = &bus->algo;
169 bus->adapter.dev.parent = &dd->pcidev->dev;
170 snprintf(bus->adapter.name, sizeof(bus->adapter.name),
171 "hfi1_i2c%d", num);
173 ret = i2c_bit_add_bus(&bus->adapter);
174 if (ret) {
175 dd_dev_info(dd, "%s: unable to add i2c bus %d, err %d\n",
176 __func__, num, ret);
177 kfree(bus);
178 return NULL;
181 return bus;
185 * Initialize i2c buses.
186 * Return 0 on success, -errno on error.
188 int set_up_i2c(struct hfi1_devdata *dd, struct hfi1_asic_data *ad)
190 ad->i2c_bus0 = init_i2c_bus(dd, ad, 0);
191 ad->i2c_bus1 = init_i2c_bus(dd, ad, 1);
192 if (!ad->i2c_bus0 || !ad->i2c_bus1)
193 return -ENOMEM;
194 return 0;
197 static void clean_i2c_bus(struct hfi1_i2c_bus *bus)
199 if (bus) {
200 i2c_del_adapter(&bus->adapter);
201 kfree(bus);
205 void clean_up_i2c(struct hfi1_devdata *dd, struct hfi1_asic_data *ad)
207 if (!ad)
208 return;
209 clean_i2c_bus(ad->i2c_bus0);
210 ad->i2c_bus0 = NULL;
211 clean_i2c_bus(ad->i2c_bus1);
212 ad->i2c_bus1 = NULL;
215 static int i2c_bus_write(struct hfi1_devdata *dd, struct hfi1_i2c_bus *i2c,
216 u8 slave_addr, int offset, int offset_size,
217 u8 *data, u16 len)
219 int ret;
220 int num_msgs;
221 u8 offset_bytes[2];
222 struct i2c_msg msgs[2];
224 switch (offset_size) {
225 case 0:
226 num_msgs = 1;
227 msgs[0].addr = slave_addr;
228 msgs[0].flags = 0;
229 msgs[0].len = len;
230 msgs[0].buf = data;
231 break;
232 case 2:
233 offset_bytes[1] = (offset >> 8) & 0xff;
234 /* fall through */
235 case 1:
236 num_msgs = 2;
237 offset_bytes[0] = offset & 0xff;
239 msgs[0].addr = slave_addr;
240 msgs[0].flags = 0;
241 msgs[0].len = offset_size;
242 msgs[0].buf = offset_bytes;
244 msgs[1].addr = slave_addr;
245 msgs[1].flags = I2C_M_NOSTART,
246 msgs[1].len = len;
247 msgs[1].buf = data;
248 break;
249 default:
250 return -EINVAL;
253 i2c->controlling_dd = dd;
254 ret = i2c_transfer(&i2c->adapter, msgs, num_msgs);
255 if (ret != num_msgs) {
256 dd_dev_err(dd, "%s: bus %d, i2c slave 0x%x, offset 0x%x, len 0x%x; write failed, ret %d\n",
257 __func__, i2c->num, slave_addr, offset, len, ret);
258 return ret < 0 ? ret : -EIO;
260 return 0;
263 static int i2c_bus_read(struct hfi1_devdata *dd, struct hfi1_i2c_bus *bus,
264 u8 slave_addr, int offset, int offset_size,
265 u8 *data, u16 len)
267 int ret;
268 int num_msgs;
269 u8 offset_bytes[2];
270 struct i2c_msg msgs[2];
272 switch (offset_size) {
273 case 0:
274 num_msgs = 1;
275 msgs[0].addr = slave_addr;
276 msgs[0].flags = I2C_M_RD;
277 msgs[0].len = len;
278 msgs[0].buf = data;
279 break;
280 case 2:
281 offset_bytes[1] = (offset >> 8) & 0xff;
282 /* fall through */
283 case 1:
284 num_msgs = 2;
285 offset_bytes[0] = offset & 0xff;
287 msgs[0].addr = slave_addr;
288 msgs[0].flags = 0;
289 msgs[0].len = offset_size;
290 msgs[0].buf = offset_bytes;
292 msgs[1].addr = slave_addr;
293 msgs[1].flags = I2C_M_RD,
294 msgs[1].len = len;
295 msgs[1].buf = data;
296 break;
297 default:
298 return -EINVAL;
301 bus->controlling_dd = dd;
302 ret = i2c_transfer(&bus->adapter, msgs, num_msgs);
303 if (ret != num_msgs) {
304 dd_dev_err(dd, "%s: bus %d, i2c slave 0x%x, offset 0x%x, len 0x%x; read failed, ret %d\n",
305 __func__, bus->num, slave_addr, offset, len, ret);
306 return ret < 0 ? ret : -EIO;
308 return 0;
312 * Raw i2c write. No set-up or lock checking.
314 * Return 0 on success, -errno on error.
316 static int __i2c_write(struct hfi1_pportdata *ppd, u32 target, int i2c_addr,
317 int offset, void *bp, int len)
319 struct hfi1_devdata *dd = ppd->dd;
320 struct hfi1_i2c_bus *bus;
321 u8 slave_addr;
322 int offset_size;
324 bus = target ? dd->asic_data->i2c_bus1 : dd->asic_data->i2c_bus0;
325 slave_addr = (i2c_addr & 0xff) >> 1; /* convert to 7-bit addr */
326 offset_size = (i2c_addr >> 8) & 0x3;
327 return i2c_bus_write(dd, bus, slave_addr, offset, offset_size, bp, len);
331 * Caller must hold the i2c chain resource.
333 * Return number of bytes written, or -errno.
335 int i2c_write(struct hfi1_pportdata *ppd, u32 target, int i2c_addr, int offset,
336 void *bp, int len)
338 int ret;
340 if (!check_chip_resource(ppd->dd, i2c_target(target), __func__))
341 return -EACCES;
343 ret = __i2c_write(ppd, target, i2c_addr, offset, bp, len);
344 if (ret)
345 return ret;
347 return len;
351 * Raw i2c read. No set-up or lock checking.
353 * Return 0 on success, -errno on error.
355 static int __i2c_read(struct hfi1_pportdata *ppd, u32 target, int i2c_addr,
356 int offset, void *bp, int len)
358 struct hfi1_devdata *dd = ppd->dd;
359 struct hfi1_i2c_bus *bus;
360 u8 slave_addr;
361 int offset_size;
363 bus = target ? dd->asic_data->i2c_bus1 : dd->asic_data->i2c_bus0;
364 slave_addr = (i2c_addr & 0xff) >> 1; /* convert to 7-bit addr */
365 offset_size = (i2c_addr >> 8) & 0x3;
366 return i2c_bus_read(dd, bus, slave_addr, offset, offset_size, bp, len);
370 * Caller must hold the i2c chain resource.
372 * Return number of bytes read, or -errno.
374 int i2c_read(struct hfi1_pportdata *ppd, u32 target, int i2c_addr, int offset,
375 void *bp, int len)
377 int ret;
379 if (!check_chip_resource(ppd->dd, i2c_target(target), __func__))
380 return -EACCES;
382 ret = __i2c_read(ppd, target, i2c_addr, offset, bp, len);
383 if (ret)
384 return ret;
386 return len;
390 * Write page n, offset m of QSFP memory as defined by SFF 8636
391 * by writing @addr = ((256 * n) + m)
393 * Caller must hold the i2c chain resource.
395 * Return number of bytes written or -errno.
397 int qsfp_write(struct hfi1_pportdata *ppd, u32 target, int addr, void *bp,
398 int len)
400 int count = 0;
401 int offset;
402 int nwrite;
403 int ret = 0;
404 u8 page;
406 if (!check_chip_resource(ppd->dd, i2c_target(target), __func__))
407 return -EACCES;
409 while (count < len) {
411 * Set the qsfp page based on a zero-based address
412 * and a page size of QSFP_PAGESIZE bytes.
414 page = (u8)(addr / QSFP_PAGESIZE);
416 ret = __i2c_write(ppd, target, QSFP_DEV | QSFP_OFFSET_SIZE,
417 QSFP_PAGE_SELECT_BYTE_OFFS, &page, 1);
418 /* QSFPs require a 5-10msec delay after write operations */
419 mdelay(5);
420 if (ret) {
421 hfi1_dev_porterr(ppd->dd, ppd->port,
422 "QSFP chain %d can't write QSFP_PAGE_SELECT_BYTE: %d\n",
423 target, ret);
424 break;
427 offset = addr % QSFP_PAGESIZE;
428 nwrite = len - count;
429 /* truncate write to boundary if crossing boundary */
430 if (((addr % QSFP_RW_BOUNDARY) + nwrite) > QSFP_RW_BOUNDARY)
431 nwrite = QSFP_RW_BOUNDARY - (addr % QSFP_RW_BOUNDARY);
433 ret = __i2c_write(ppd, target, QSFP_DEV | QSFP_OFFSET_SIZE,
434 offset, bp + count, nwrite);
435 /* QSFPs require a 5-10msec delay after write operations */
436 mdelay(5);
437 if (ret) /* stop on error */
438 break;
440 count += nwrite;
441 addr += nwrite;
444 if (ret < 0)
445 return ret;
446 return count;
450 * Perform a stand-alone single QSFP write. Acquire the resource, do the
451 * write, then release the resource.
453 int one_qsfp_write(struct hfi1_pportdata *ppd, u32 target, int addr, void *bp,
454 int len)
456 struct hfi1_devdata *dd = ppd->dd;
457 u32 resource = qsfp_resource(dd);
458 int ret;
460 ret = acquire_chip_resource(dd, resource, QSFP_WAIT);
461 if (ret)
462 return ret;
463 ret = qsfp_write(ppd, target, addr, bp, len);
464 release_chip_resource(dd, resource);
466 return ret;
470 * Access page n, offset m of QSFP memory as defined by SFF 8636
471 * by reading @addr = ((256 * n) + m)
473 * Caller must hold the i2c chain resource.
475 * Return the number of bytes read or -errno.
477 int qsfp_read(struct hfi1_pportdata *ppd, u32 target, int addr, void *bp,
478 int len)
480 int count = 0;
481 int offset;
482 int nread;
483 int ret = 0;
484 u8 page;
486 if (!check_chip_resource(ppd->dd, i2c_target(target), __func__))
487 return -EACCES;
489 while (count < len) {
491 * Set the qsfp page based on a zero-based address
492 * and a page size of QSFP_PAGESIZE bytes.
494 page = (u8)(addr / QSFP_PAGESIZE);
495 ret = __i2c_write(ppd, target, QSFP_DEV | QSFP_OFFSET_SIZE,
496 QSFP_PAGE_SELECT_BYTE_OFFS, &page, 1);
497 /* QSFPs require a 5-10msec delay after write operations */
498 mdelay(5);
499 if (ret) {
500 hfi1_dev_porterr(ppd->dd, ppd->port,
501 "QSFP chain %d can't write QSFP_PAGE_SELECT_BYTE: %d\n",
502 target, ret);
503 break;
506 offset = addr % QSFP_PAGESIZE;
507 nread = len - count;
508 /* truncate read to boundary if crossing boundary */
509 if (((addr % QSFP_RW_BOUNDARY) + nread) > QSFP_RW_BOUNDARY)
510 nread = QSFP_RW_BOUNDARY - (addr % QSFP_RW_BOUNDARY);
512 ret = __i2c_read(ppd, target, QSFP_DEV | QSFP_OFFSET_SIZE,
513 offset, bp + count, nread);
514 if (ret) /* stop on error */
515 break;
517 count += nread;
518 addr += nread;
521 if (ret < 0)
522 return ret;
523 return count;
527 * Perform a stand-alone single QSFP read. Acquire the resource, do the
528 * read, then release the resource.
530 int one_qsfp_read(struct hfi1_pportdata *ppd, u32 target, int addr, void *bp,
531 int len)
533 struct hfi1_devdata *dd = ppd->dd;
534 u32 resource = qsfp_resource(dd);
535 int ret;
537 ret = acquire_chip_resource(dd, resource, QSFP_WAIT);
538 if (ret)
539 return ret;
540 ret = qsfp_read(ppd, target, addr, bp, len);
541 release_chip_resource(dd, resource);
543 return ret;
547 * This function caches the QSFP memory range in 128 byte chunks.
548 * As an example, the next byte after address 255 is byte 128 from
549 * upper page 01H (if existing) rather than byte 0 from lower page 00H.
550 * Access page n, offset m of QSFP memory as defined by SFF 8636
551 * in the cache by reading byte ((128 * n) + m)
552 * The calls to qsfp_{read,write} in this function correctly handle the
553 * address map difference between this mapping and the mapping implemented
554 * by those functions
556 * The caller must be holding the QSFP i2c chain resource.
558 int refresh_qsfp_cache(struct hfi1_pportdata *ppd, struct qsfp_data *cp)
560 u32 target = ppd->dd->hfi1_id;
561 int ret;
562 unsigned long flags;
563 u8 *cache = &cp->cache[0];
565 /* ensure sane contents on invalid reads, for cable swaps */
566 memset(cache, 0, (QSFP_MAX_NUM_PAGES * 128));
567 spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
568 ppd->qsfp_info.cache_valid = 0;
569 spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock, flags);
571 if (!qsfp_mod_present(ppd)) {
572 ret = -ENODEV;
573 goto bail;
576 ret = qsfp_read(ppd, target, 0, cache, QSFP_PAGESIZE);
577 if (ret != QSFP_PAGESIZE) {
578 dd_dev_info(ppd->dd,
579 "%s: Page 0 read failed, expected %d, got %d\n",
580 __func__, QSFP_PAGESIZE, ret);
581 goto bail;
584 /* Is paging enabled? */
585 if (!(cache[2] & 4)) {
586 /* Paging enabled, page 03 required */
587 if ((cache[195] & 0xC0) == 0xC0) {
588 /* all */
589 ret = qsfp_read(ppd, target, 384, cache + 256, 128);
590 if (ret <= 0 || ret != 128) {
591 dd_dev_info(ppd->dd, "%s failed\n", __func__);
592 goto bail;
594 ret = qsfp_read(ppd, target, 640, cache + 384, 128);
595 if (ret <= 0 || ret != 128) {
596 dd_dev_info(ppd->dd, "%s failed\n", __func__);
597 goto bail;
599 ret = qsfp_read(ppd, target, 896, cache + 512, 128);
600 if (ret <= 0 || ret != 128) {
601 dd_dev_info(ppd->dd, "%s failed\n", __func__);
602 goto bail;
604 } else if ((cache[195] & 0x80) == 0x80) {
605 /* only page 2 and 3 */
606 ret = qsfp_read(ppd, target, 640, cache + 384, 128);
607 if (ret <= 0 || ret != 128) {
608 dd_dev_info(ppd->dd, "%s failed\n", __func__);
609 goto bail;
611 ret = qsfp_read(ppd, target, 896, cache + 512, 128);
612 if (ret <= 0 || ret != 128) {
613 dd_dev_info(ppd->dd, "%s failed\n", __func__);
614 goto bail;
616 } else if ((cache[195] & 0x40) == 0x40) {
617 /* only page 1 and 3 */
618 ret = qsfp_read(ppd, target, 384, cache + 256, 128);
619 if (ret <= 0 || ret != 128) {
620 dd_dev_info(ppd->dd, "%s failed\n", __func__);
621 goto bail;
623 ret = qsfp_read(ppd, target, 896, cache + 512, 128);
624 if (ret <= 0 || ret != 128) {
625 dd_dev_info(ppd->dd, "%s failed\n", __func__);
626 goto bail;
628 } else {
629 /* only page 3 */
630 ret = qsfp_read(ppd, target, 896, cache + 512, 128);
631 if (ret <= 0 || ret != 128) {
632 dd_dev_info(ppd->dd, "%s failed\n", __func__);
633 goto bail;
638 spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
639 ppd->qsfp_info.cache_valid = 1;
640 ppd->qsfp_info.cache_refresh_required = 0;
641 spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock, flags);
643 return 0;
645 bail:
646 memset(cache, 0, (QSFP_MAX_NUM_PAGES * 128));
647 return ret;
650 const char * const hfi1_qsfp_devtech[16] = {
651 "850nm VCSEL", "1310nm VCSEL", "1550nm VCSEL", "1310nm FP",
652 "1310nm DFB", "1550nm DFB", "1310nm EML", "1550nm EML",
653 "Cu Misc", "1490nm DFB", "Cu NoEq", "Cu Eq",
654 "Undef", "Cu Active BothEq", "Cu FarEq", "Cu NearEq"
657 #define QSFP_DUMP_CHUNK 16 /* Holds longest string */
658 #define QSFP_DEFAULT_HDR_CNT 224
660 #define QSFP_PWR(pbyte) (((pbyte) >> 6) & 3)
661 #define QSFP_HIGH_PWR(pbyte) ((pbyte) & 3)
662 /* For use with QSFP_HIGH_PWR macro */
663 #define QSFP_HIGH_PWR_UNUSED 0 /* Bits [1:0] = 00 implies low power module */
666 * Takes power class byte [Page 00 Byte 129] in SFF 8636
667 * Returns power class as integer (1 through 7, per SFF 8636 rev 2.4)
669 int get_qsfp_power_class(u8 power_byte)
671 if (QSFP_HIGH_PWR(power_byte) == QSFP_HIGH_PWR_UNUSED)
672 /* power classes count from 1, their bit encodings from 0 */
673 return (QSFP_PWR(power_byte) + 1);
675 * 00 in the high power classes stands for unused, bringing
676 * balance to the off-by-1 offset above, we add 4 here to
677 * account for the difference between the low and high power
678 * groups
680 return (QSFP_HIGH_PWR(power_byte) + 4);
683 int qsfp_mod_present(struct hfi1_pportdata *ppd)
685 struct hfi1_devdata *dd = ppd->dd;
686 u64 reg;
688 reg = read_csr(dd, dd->hfi1_id ? ASIC_QSFP2_IN : ASIC_QSFP1_IN);
689 return !(reg & QSFP_HFI0_MODPRST_N);
693 * This function maps QSFP memory addresses in 128 byte chunks in the following
694 * fashion per the CableInfo SMA query definition in the IBA 1.3 spec/OPA Gen 1
695 * spec
696 * For addr 000-127, lower page 00h
697 * For addr 128-255, upper page 00h
698 * For addr 256-383, upper page 01h
699 * For addr 384-511, upper page 02h
700 * For addr 512-639, upper page 03h
702 * For addresses beyond this range, it returns the invalid range of data buffer
703 * set to 0.
704 * For upper pages that are optional, if they are not valid, returns the
705 * particular range of bytes in the data buffer set to 0.
707 int get_cable_info(struct hfi1_devdata *dd, u32 port_num, u32 addr, u32 len,
708 u8 *data)
710 struct hfi1_pportdata *ppd;
711 u32 excess_len = len;
712 int ret = 0, offset = 0;
714 if (port_num > dd->num_pports || port_num < 1) {
715 dd_dev_info(dd, "%s: Invalid port number %d\n",
716 __func__, port_num);
717 ret = -EINVAL;
718 goto set_zeroes;
721 ppd = dd->pport + (port_num - 1);
722 if (!qsfp_mod_present(ppd)) {
723 ret = -ENODEV;
724 goto set_zeroes;
727 if (!ppd->qsfp_info.cache_valid) {
728 ret = -EINVAL;
729 goto set_zeroes;
732 if (addr >= (QSFP_MAX_NUM_PAGES * 128)) {
733 ret = -ERANGE;
734 goto set_zeroes;
737 if ((addr + len) > (QSFP_MAX_NUM_PAGES * 128)) {
738 excess_len = (addr + len) - (QSFP_MAX_NUM_PAGES * 128);
739 memcpy(data, &ppd->qsfp_info.cache[addr], (len - excess_len));
740 data += (len - excess_len);
741 goto set_zeroes;
744 memcpy(data, &ppd->qsfp_info.cache[addr], len);
746 if (addr <= QSFP_MONITOR_VAL_END &&
747 (addr + len) >= QSFP_MONITOR_VAL_START) {
748 /* Overlap with the dynamic channel monitor range */
749 if (addr < QSFP_MONITOR_VAL_START) {
750 if (addr + len <= QSFP_MONITOR_VAL_END)
751 len = addr + len - QSFP_MONITOR_VAL_START;
752 else
753 len = QSFP_MONITOR_RANGE;
754 offset = QSFP_MONITOR_VAL_START - addr;
755 addr = QSFP_MONITOR_VAL_START;
756 } else if (addr == QSFP_MONITOR_VAL_START) {
757 offset = 0;
758 if (addr + len > QSFP_MONITOR_VAL_END)
759 len = QSFP_MONITOR_RANGE;
760 } else {
761 offset = 0;
762 if (addr + len > QSFP_MONITOR_VAL_END)
763 len = QSFP_MONITOR_VAL_END - addr + 1;
765 /* Refresh the values of the dynamic monitors from the cable */
766 ret = one_qsfp_read(ppd, dd->hfi1_id, addr, data + offset, len);
767 if (ret != len) {
768 ret = -EAGAIN;
769 goto set_zeroes;
773 return 0;
775 set_zeroes:
776 memset(data, 0, excess_len);
777 return ret;
780 static const char *pwr_codes[8] = {"N/AW",
781 "1.5W",
782 "2.0W",
783 "2.5W",
784 "3.5W",
785 "4.0W",
786 "4.5W",
787 "5.0W"
790 int qsfp_dump(struct hfi1_pportdata *ppd, char *buf, int len)
792 u8 *cache = &ppd->qsfp_info.cache[0];
793 u8 bin_buff[QSFP_DUMP_CHUNK];
794 char lenstr[6];
795 int sofar;
796 int bidx = 0;
797 u8 *atten = &cache[QSFP_ATTEN_OFFS];
798 u8 *vendor_oui = &cache[QSFP_VOUI_OFFS];
799 u8 power_byte = 0;
801 sofar = 0;
802 lenstr[0] = ' ';
803 lenstr[1] = '\0';
805 if (ppd->qsfp_info.cache_valid) {
806 if (QSFP_IS_CU(cache[QSFP_MOD_TECH_OFFS]))
807 snprintf(lenstr, sizeof(lenstr), "%dM ",
808 cache[QSFP_MOD_LEN_OFFS]);
810 power_byte = cache[QSFP_MOD_PWR_OFFS];
811 sofar += scnprintf(buf + sofar, len - sofar, "PWR:%.3sW\n",
812 pwr_codes[get_qsfp_power_class(power_byte)]);
814 sofar += scnprintf(buf + sofar, len - sofar, "TECH:%s%s\n",
815 lenstr,
816 hfi1_qsfp_devtech[(cache[QSFP_MOD_TECH_OFFS]) >> 4]);
818 sofar += scnprintf(buf + sofar, len - sofar, "Vendor:%.*s\n",
819 QSFP_VEND_LEN, &cache[QSFP_VEND_OFFS]);
821 sofar += scnprintf(buf + sofar, len - sofar, "OUI:%06X\n",
822 QSFP_OUI(vendor_oui));
824 sofar += scnprintf(buf + sofar, len - sofar, "Part#:%.*s\n",
825 QSFP_PN_LEN, &cache[QSFP_PN_OFFS]);
827 sofar += scnprintf(buf + sofar, len - sofar, "Rev:%.*s\n",
828 QSFP_REV_LEN, &cache[QSFP_REV_OFFS]);
830 if (QSFP_IS_CU(cache[QSFP_MOD_TECH_OFFS]))
831 sofar += scnprintf(buf + sofar, len - sofar,
832 "Atten:%d, %d\n",
833 QSFP_ATTEN_SDR(atten),
834 QSFP_ATTEN_DDR(atten));
836 sofar += scnprintf(buf + sofar, len - sofar, "Serial:%.*s\n",
837 QSFP_SN_LEN, &cache[QSFP_SN_OFFS]);
839 sofar += scnprintf(buf + sofar, len - sofar, "Date:%.*s\n",
840 QSFP_DATE_LEN, &cache[QSFP_DATE_OFFS]);
842 sofar += scnprintf(buf + sofar, len - sofar, "Lot:%.*s\n",
843 QSFP_LOT_LEN, &cache[QSFP_LOT_OFFS]);
845 while (bidx < QSFP_DEFAULT_HDR_CNT) {
846 int iidx;
848 memcpy(bin_buff, &cache[bidx], QSFP_DUMP_CHUNK);
849 for (iidx = 0; iidx < QSFP_DUMP_CHUNK; ++iidx) {
850 sofar += scnprintf(buf + sofar, len - sofar,
851 " %02X", bin_buff[iidx]);
853 sofar += scnprintf(buf + sofar, len - sofar, "\n");
854 bidx += QSFP_DUMP_CHUNK;
857 return sofar;