2 * This file is part of the Chelsio FCoE driver for Linux.
4 * Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 #include <linux/pci.h>
36 #include <linux/pci_regs.h>
37 #include <linux/firmware.h>
38 #include <linux/stddef.h>
39 #include <linux/delay.h>
40 #include <linux/string.h>
41 #include <linux/compiler.h>
42 #include <linux/jiffies.h>
43 #include <linux/kernel.h>
44 #include <linux/log2.h>
47 #include "csio_lnode.h"
48 #include "csio_rnode.h"
50 int csio_dbg_level
= 0xFEFF;
51 unsigned int csio_port_mask
= 0xf;
53 /* Default FW event queue entries. */
54 static uint32_t csio_evtq_sz
= CSIO_EVTQ_SIZE
;
56 /* Default MSI param level */
59 /* FCoE function instances */
62 /* FCoE Adapter types & its description */
63 static const struct csio_adap_desc csio_t5_fcoe_adapters
[] = {
64 {"T580-Dbg 10G", "Chelsio T580-Dbg 10G [FCoE]"},
65 {"T520-CR 10G", "Chelsio T520-CR 10G [FCoE]"},
66 {"T522-CR 10G/1G", "Chelsio T522-CR 10G/1G [FCoE]"},
67 {"T540-CR 10G", "Chelsio T540-CR 10G [FCoE]"},
68 {"T520-BCH 10G", "Chelsio T520-BCH 10G [FCoE]"},
69 {"T540-BCH 10G", "Chelsio T540-BCH 10G [FCoE]"},
70 {"T540-CH 10G", "Chelsio T540-CH 10G [FCoE]"},
71 {"T520-SO 10G", "Chelsio T520-SO 10G [FCoE]"},
72 {"T520-CX4 10G", "Chelsio T520-CX4 10G [FCoE]"},
73 {"T520-BT 10G", "Chelsio T520-BT 10G [FCoE]"},
74 {"T504-BT 1G", "Chelsio T504-BT 1G [FCoE]"},
75 {"B520-SR 10G", "Chelsio B520-SR 10G [FCoE]"},
76 {"B504-BT 1G", "Chelsio B504-BT 1G [FCoE]"},
77 {"T580-CR 10G", "Chelsio T580-CR 10G [FCoE]"},
78 {"T540-LP-CR 10G", "Chelsio T540-LP-CR 10G [FCoE]"},
79 {"AMSTERDAM 10G", "Chelsio AMSTERDAM 10G [FCoE]"},
80 {"T580-LP-CR 40G", "Chelsio T580-LP-CR 40G [FCoE]"},
81 {"T520-LL-CR 10G", "Chelsio T520-LL-CR 10G [FCoE]"},
82 {"T560-CR 40G", "Chelsio T560-CR 40G [FCoE]"},
83 {"T580-CR 40G", "Chelsio T580-CR 40G [FCoE]"},
84 {"T580-SO 40G", "Chelsio T580-SO 40G [FCoE]"},
85 {"T502-BT 1G", "Chelsio T502-BT 1G [FCoE]"}
88 static void csio_mgmtm_cleanup(struct csio_mgmtm
*);
89 static void csio_hw_mbm_cleanup(struct csio_hw
*);
91 /* State machine forward declarations */
92 static void csio_hws_uninit(struct csio_hw
*, enum csio_hw_ev
);
93 static void csio_hws_configuring(struct csio_hw
*, enum csio_hw_ev
);
94 static void csio_hws_initializing(struct csio_hw
*, enum csio_hw_ev
);
95 static void csio_hws_ready(struct csio_hw
*, enum csio_hw_ev
);
96 static void csio_hws_quiescing(struct csio_hw
*, enum csio_hw_ev
);
97 static void csio_hws_quiesced(struct csio_hw
*, enum csio_hw_ev
);
98 static void csio_hws_resetting(struct csio_hw
*, enum csio_hw_ev
);
99 static void csio_hws_removing(struct csio_hw
*, enum csio_hw_ev
);
100 static void csio_hws_pcierr(struct csio_hw
*, enum csio_hw_ev
);
102 static void csio_hw_initialize(struct csio_hw
*hw
);
103 static void csio_evtq_stop(struct csio_hw
*hw
);
104 static void csio_evtq_start(struct csio_hw
*hw
);
106 int csio_is_hw_ready(struct csio_hw
*hw
)
108 return csio_match_state(hw
, csio_hws_ready
);
111 int csio_is_hw_removing(struct csio_hw
*hw
)
113 return csio_match_state(hw
, csio_hws_removing
);
118 * csio_hw_wait_op_done_val - wait until an operation is completed
120 * @reg: the register to check for completion
121 * @mask: a single-bit field within @reg that indicates completion
122 * @polarity: the value of the field when the operation is completed
123 * @attempts: number of check iterations
124 * @delay: delay in usecs between iterations
125 * @valp: where to store the value of the register at completion time
127 * Wait until an operation is completed by checking a bit in a register
128 * up to @attempts times. If @valp is not NULL the value of the register
129 * at the time it indicated completion is stored there. Returns 0 if the
130 * operation completes and -EAGAIN otherwise.
133 csio_hw_wait_op_done_val(struct csio_hw
*hw
, int reg
, uint32_t mask
,
134 int polarity
, int attempts
, int delay
, uint32_t *valp
)
138 val
= csio_rd_reg32(hw
, reg
);
140 if (!!(val
& mask
) == polarity
) {
154 * csio_hw_tp_wr_bits_indirect - set/clear bits in an indirect TP register
156 * @addr: the indirect TP register address
157 * @mask: specifies the field within the register to modify
158 * @val: new value for the field
160 * Sets a field of an indirect TP register to the given value.
163 csio_hw_tp_wr_bits_indirect(struct csio_hw
*hw
, unsigned int addr
,
164 unsigned int mask
, unsigned int val
)
166 csio_wr_reg32(hw
, addr
, TP_PIO_ADDR_A
);
167 val
|= csio_rd_reg32(hw
, TP_PIO_DATA_A
) & ~mask
;
168 csio_wr_reg32(hw
, val
, TP_PIO_DATA_A
);
172 csio_set_reg_field(struct csio_hw
*hw
, uint32_t reg
, uint32_t mask
,
175 uint32_t val
= csio_rd_reg32(hw
, reg
) & ~mask
;
177 csio_wr_reg32(hw
, val
| value
, reg
);
179 csio_rd_reg32(hw
, reg
);
184 csio_memory_write(struct csio_hw
*hw
, int mtype
, u32 addr
, u32 len
, u32
*buf
)
186 return hw
->chip_ops
->chip_memory_rw(hw
, MEMWIN_CSIOSTOR
, mtype
,
191 * EEPROM reads take a few tens of us while writes can take a bit over 5 ms.
193 #define EEPROM_MAX_RD_POLL 40
194 #define EEPROM_MAX_WR_POLL 6
195 #define EEPROM_STAT_ADDR 0x7bfc
196 #define VPD_BASE 0x400
197 #define VPD_BASE_OLD 0
199 #define VPD_INFO_FLD_HDR_SIZE 3
202 * csio_hw_seeprom_read - read a serial EEPROM location
204 * @addr: EEPROM virtual address
205 * @data: where to store the read data
207 * Read a 32-bit word from a location in serial EEPROM using the card's PCI
208 * VPD capability. Note that this function must be called with a virtual
212 csio_hw_seeprom_read(struct csio_hw
*hw
, uint32_t addr
, uint32_t *data
)
215 int attempts
= EEPROM_MAX_RD_POLL
;
216 uint32_t base
= hw
->params
.pci
.vpd_cap_addr
;
218 if (addr
>= EEPROMVSIZE
|| (addr
& 3))
221 pci_write_config_word(hw
->pdev
, base
+ PCI_VPD_ADDR
, (uint16_t)addr
);
225 pci_read_config_word(hw
->pdev
, base
+ PCI_VPD_ADDR
, &val
);
226 } while (!(val
& PCI_VPD_ADDR_F
) && --attempts
);
228 if (!(val
& PCI_VPD_ADDR_F
)) {
229 csio_err(hw
, "reading EEPROM address 0x%x failed\n", addr
);
233 pci_read_config_dword(hw
->pdev
, base
+ PCI_VPD_DATA
, data
);
234 *data
= le32_to_cpu(*(__le32
*)data
);
240 * Partial EEPROM Vital Product Data structure. Includes only the ID and
252 * csio_hw_get_vpd_keyword_val - Locates an information field keyword in
254 * @v: Pointer to buffered vpd data structure
255 * @kw: The keyword to search for
257 * Returns the value of the information field keyword or
261 csio_hw_get_vpd_keyword_val(const struct t4_vpd_hdr
*v
, const char *kw
)
264 int32_t offset
, len
;
265 const uint8_t *buf
= &v
->id_tag
;
266 const uint8_t *vpdr_len
= &v
->vpdr_tag
;
267 offset
= sizeof(struct t4_vpd_hdr
);
268 len
= (uint16_t)vpdr_len
[1] + ((uint16_t)vpdr_len
[2] << 8);
270 if (len
+ sizeof(struct t4_vpd_hdr
) > VPD_LEN
)
273 for (i
= offset
; (i
+ VPD_INFO_FLD_HDR_SIZE
) <= (offset
+ len
);) {
274 if (memcmp(buf
+ i
, kw
, 2) == 0) {
275 i
+= VPD_INFO_FLD_HDR_SIZE
;
279 i
+= VPD_INFO_FLD_HDR_SIZE
+ buf
[i
+2];
286 csio_pci_capability(struct pci_dev
*pdev
, int cap
, int *pos
)
288 *pos
= pci_find_capability(pdev
, cap
);
296 * csio_hw_get_vpd_params - read VPD parameters from VPD EEPROM
298 * @p: where to store the parameters
300 * Reads card parameters stored in VPD EEPROM.
303 csio_hw_get_vpd_params(struct csio_hw
*hw
, struct csio_vpd
*p
)
305 int i
, ret
, ec
, sn
, addr
;
307 const struct t4_vpd_hdr
*v
;
308 /* To get around compilation warning from strstrip */
311 if (csio_is_valid_vpd(hw
))
314 ret
= csio_pci_capability(hw
->pdev
, PCI_CAP_ID_VPD
,
315 &hw
->params
.pci
.vpd_cap_addr
);
319 vpd
= kzalloc(VPD_LEN
, GFP_ATOMIC
);
324 * Card information normally starts at VPD_BASE but early cards had
327 ret
= csio_hw_seeprom_read(hw
, VPD_BASE
, (uint32_t *)(vpd
));
328 addr
= *vpd
== 0x82 ? VPD_BASE
: VPD_BASE_OLD
;
330 for (i
= 0; i
< VPD_LEN
; i
+= 4) {
331 ret
= csio_hw_seeprom_read(hw
, addr
+ i
, (uint32_t *)(vpd
+ i
));
338 /* Reset the VPD flag! */
339 hw
->flags
&= (~CSIO_HWF_VPD_VALID
);
341 v
= (const struct t4_vpd_hdr
*)vpd
;
343 #define FIND_VPD_KW(var, name) do { \
344 var = csio_hw_get_vpd_keyword_val(v, name); \
346 csio_err(hw, "missing VPD keyword " name "\n"); \
352 FIND_VPD_KW(i
, "RV");
353 for (csum
= 0; i
>= 0; i
--)
357 csio_err(hw
, "corrupted VPD EEPROM, actual csum %u\n", csum
);
361 FIND_VPD_KW(ec
, "EC");
362 FIND_VPD_KW(sn
, "SN");
365 memcpy(p
->id
, v
->id_data
, ID_LEN
);
367 memcpy(p
->ec
, vpd
+ ec
, EC_LEN
);
369 i
= vpd
[sn
- VPD_INFO_FLD_HDR_SIZE
+ 2];
370 memcpy(p
->sn
, vpd
+ sn
, min(i
, SERNUM_LEN
));
373 csio_valid_vpd_copied(hw
);
380 * csio_hw_sf1_read - read data from the serial flash
382 * @byte_cnt: number of bytes to read
383 * @cont: whether another operation will be chained
384 * @lock: whether to lock SF for PL access only
385 * @valp: where to store the read data
387 * Reads up to 4 bytes of data from the serial flash. The location of
388 * the read needs to be specified prior to calling this by issuing the
389 * appropriate commands to the serial flash.
392 csio_hw_sf1_read(struct csio_hw
*hw
, uint32_t byte_cnt
, int32_t cont
,
393 int32_t lock
, uint32_t *valp
)
397 if (!byte_cnt
|| byte_cnt
> 4)
399 if (csio_rd_reg32(hw
, SF_OP_A
) & SF_BUSY_F
)
402 csio_wr_reg32(hw
, SF_LOCK_V(lock
) | SF_CONT_V(cont
) |
403 BYTECNT_V(byte_cnt
- 1), SF_OP_A
);
404 ret
= csio_hw_wait_op_done_val(hw
, SF_OP_A
, SF_BUSY_F
, 0, SF_ATTEMPTS
,
407 *valp
= csio_rd_reg32(hw
, SF_DATA_A
);
412 * csio_hw_sf1_write - write data to the serial flash
414 * @byte_cnt: number of bytes to write
415 * @cont: whether another operation will be chained
416 * @lock: whether to lock SF for PL access only
417 * @val: value to write
419 * Writes up to 4 bytes of data to the serial flash. The location of
420 * the write needs to be specified prior to calling this by issuing the
421 * appropriate commands to the serial flash.
424 csio_hw_sf1_write(struct csio_hw
*hw
, uint32_t byte_cnt
, uint32_t cont
,
425 int32_t lock
, uint32_t val
)
427 if (!byte_cnt
|| byte_cnt
> 4)
429 if (csio_rd_reg32(hw
, SF_OP_A
) & SF_BUSY_F
)
432 csio_wr_reg32(hw
, val
, SF_DATA_A
);
433 csio_wr_reg32(hw
, SF_CONT_V(cont
) | BYTECNT_V(byte_cnt
- 1) |
434 OP_V(1) | SF_LOCK_V(lock
), SF_OP_A
);
436 return csio_hw_wait_op_done_val(hw
, SF_OP_A
, SF_BUSY_F
, 0, SF_ATTEMPTS
,
441 * csio_hw_flash_wait_op - wait for a flash operation to complete
443 * @attempts: max number of polls of the status register
444 * @delay: delay between polls in ms
446 * Wait for a flash operation to complete by polling the status register.
449 csio_hw_flash_wait_op(struct csio_hw
*hw
, int32_t attempts
, int32_t delay
)
455 ret
= csio_hw_sf1_write(hw
, 1, 1, 1, SF_RD_STATUS
);
459 ret
= csio_hw_sf1_read(hw
, 1, 0, 1, &status
);
473 * csio_hw_read_flash - read words from serial flash
475 * @addr: the start address for the read
476 * @nwords: how many 32-bit words to read
477 * @data: where to store the read data
478 * @byte_oriented: whether to store data as bytes or as words
480 * Read the specified number of 32-bit words from the serial flash.
481 * If @byte_oriented is set the read data is stored as a byte array
482 * (i.e., big-endian), otherwise as 32-bit words in the platform's
486 csio_hw_read_flash(struct csio_hw
*hw
, uint32_t addr
, uint32_t nwords
,
487 uint32_t *data
, int32_t byte_oriented
)
491 if (addr
+ nwords
* sizeof(uint32_t) > hw
->params
.sf_size
|| (addr
& 3))
494 addr
= swab32(addr
) | SF_RD_DATA_FAST
;
496 ret
= csio_hw_sf1_write(hw
, 4, 1, 0, addr
);
500 ret
= csio_hw_sf1_read(hw
, 1, 1, 0, data
);
504 for ( ; nwords
; nwords
--, data
++) {
505 ret
= csio_hw_sf1_read(hw
, 4, nwords
> 1, nwords
== 1, data
);
507 csio_wr_reg32(hw
, 0, SF_OP_A
); /* unlock SF */
511 *data
= (__force __u32
) htonl(*data
);
517 * csio_hw_write_flash - write up to a page of data to the serial flash
519 * @addr: the start address to write
520 * @n: length of data to write in bytes
521 * @data: the data to write
523 * Writes up to a page of data (256 bytes) to the serial flash starting
524 * at the given address. All the data must be written to the same page.
527 csio_hw_write_flash(struct csio_hw
*hw
, uint32_t addr
,
528 uint32_t n
, const uint8_t *data
)
532 uint32_t i
, c
, left
, val
, offset
= addr
& 0xff;
534 if (addr
>= hw
->params
.sf_size
|| offset
+ n
> SF_PAGE_SIZE
)
537 val
= swab32(addr
) | SF_PROG_PAGE
;
539 ret
= csio_hw_sf1_write(hw
, 1, 0, 1, SF_WR_ENABLE
);
543 ret
= csio_hw_sf1_write(hw
, 4, 1, 1, val
);
547 for (left
= n
; left
; left
-= c
) {
549 for (val
= 0, i
= 0; i
< c
; ++i
)
550 val
= (val
<< 8) + *data
++;
552 ret
= csio_hw_sf1_write(hw
, c
, c
!= left
, 1, val
);
556 ret
= csio_hw_flash_wait_op(hw
, 8, 1);
560 csio_wr_reg32(hw
, 0, SF_OP_A
); /* unlock SF */
562 /* Read the page to verify the write succeeded */
563 ret
= csio_hw_read_flash(hw
, addr
& ~0xff, ARRAY_SIZE(buf
), buf
, 1);
567 if (memcmp(data
- n
, (uint8_t *)buf
+ offset
, n
)) {
569 "failed to correctly write the flash page at %#x\n",
577 csio_wr_reg32(hw
, 0, SF_OP_A
); /* unlock SF */
582 * csio_hw_flash_erase_sectors - erase a range of flash sectors
584 * @start: the first sector to erase
585 * @end: the last sector to erase
587 * Erases the sectors in the given inclusive range.
590 csio_hw_flash_erase_sectors(struct csio_hw
*hw
, int32_t start
, int32_t end
)
594 while (start
<= end
) {
596 ret
= csio_hw_sf1_write(hw
, 1, 0, 1, SF_WR_ENABLE
);
600 ret
= csio_hw_sf1_write(hw
, 4, 0, 1,
601 SF_ERASE_SECTOR
| (start
<< 8));
605 ret
= csio_hw_flash_wait_op(hw
, 14, 500);
613 csio_err(hw
, "erase of flash sector %d failed, error %d\n",
615 csio_wr_reg32(hw
, 0, SF_OP_A
); /* unlock SF */
620 csio_hw_print_fw_version(struct csio_hw
*hw
, char *str
)
622 csio_info(hw
, "%s: %u.%u.%u.%u\n", str
,
623 FW_HDR_FW_VER_MAJOR_G(hw
->fwrev
),
624 FW_HDR_FW_VER_MINOR_G(hw
->fwrev
),
625 FW_HDR_FW_VER_MICRO_G(hw
->fwrev
),
626 FW_HDR_FW_VER_BUILD_G(hw
->fwrev
));
630 * csio_hw_get_fw_version - read the firmware version
632 * @vers: where to place the version
634 * Reads the FW version from flash.
637 csio_hw_get_fw_version(struct csio_hw
*hw
, uint32_t *vers
)
639 return csio_hw_read_flash(hw
, FLASH_FW_START
+
640 offsetof(struct fw_hdr
, fw_ver
), 1,
645 * csio_hw_get_tp_version - read the TP microcode version
647 * @vers: where to place the version
649 * Reads the TP microcode version from flash.
652 csio_hw_get_tp_version(struct csio_hw
*hw
, u32
*vers
)
654 return csio_hw_read_flash(hw
, FLASH_FW_START
+
655 offsetof(struct fw_hdr
, tp_microcode_ver
), 1,
660 * csio_hw_fw_dload - download firmware.
662 * @fw_data: firmware image to write.
665 * Write the supplied firmware image to the card's serial flash.
668 csio_hw_fw_dload(struct csio_hw
*hw
, uint8_t *fw_data
, uint32_t size
)
674 uint8_t first_page
[SF_PAGE_SIZE
];
675 const __be32
*p
= (const __be32
*)fw_data
;
676 struct fw_hdr
*hdr
= (struct fw_hdr
*)fw_data
;
677 uint32_t sf_sec_size
;
679 if ((!hw
->params
.sf_size
) || (!hw
->params
.sf_nsec
)) {
680 csio_err(hw
, "Serial Flash data invalid\n");
685 csio_err(hw
, "FW image has no data\n");
690 csio_err(hw
, "FW image size not multiple of 512 bytes\n");
694 if (ntohs(hdr
->len512
) * 512 != size
) {
695 csio_err(hw
, "FW image size differs from size in FW header\n");
699 if (size
> FLASH_FW_MAX_SIZE
) {
700 csio_err(hw
, "FW image too large, max is %u bytes\n",
705 for (csum
= 0, i
= 0; i
< size
/ sizeof(csum
); i
++)
708 if (csum
!= 0xffffffff) {
709 csio_err(hw
, "corrupted firmware image, checksum %#x\n", csum
);
713 sf_sec_size
= hw
->params
.sf_size
/ hw
->params
.sf_nsec
;
714 i
= DIV_ROUND_UP(size
, sf_sec_size
); /* # of sectors spanned */
716 csio_dbg(hw
, "Erasing sectors... start:%d end:%d\n",
717 FLASH_FW_START_SEC
, FLASH_FW_START_SEC
+ i
- 1);
719 ret
= csio_hw_flash_erase_sectors(hw
, FLASH_FW_START_SEC
,
720 FLASH_FW_START_SEC
+ i
- 1);
722 csio_err(hw
, "Flash Erase failed\n");
727 * We write the correct version at the end so the driver can see a bad
728 * version if the FW write fails. Start by writing a copy of the
729 * first page with a bad version.
731 memcpy(first_page
, fw_data
, SF_PAGE_SIZE
);
732 ((struct fw_hdr
*)first_page
)->fw_ver
= htonl(0xffffffff);
733 ret
= csio_hw_write_flash(hw
, FLASH_FW_START
, SF_PAGE_SIZE
, first_page
);
737 csio_dbg(hw
, "Writing Flash .. start:%d end:%d\n",
738 FW_IMG_START
, FW_IMG_START
+ size
);
740 addr
= FLASH_FW_START
;
741 for (size
-= SF_PAGE_SIZE
; size
; size
-= SF_PAGE_SIZE
) {
742 addr
+= SF_PAGE_SIZE
;
743 fw_data
+= SF_PAGE_SIZE
;
744 ret
= csio_hw_write_flash(hw
, addr
, SF_PAGE_SIZE
, fw_data
);
749 ret
= csio_hw_write_flash(hw
,
751 offsetof(struct fw_hdr
, fw_ver
),
753 (const uint8_t *)&hdr
->fw_ver
);
757 csio_err(hw
, "firmware download failed, error %d\n", ret
);
762 csio_hw_get_flash_params(struct csio_hw
*hw
)
767 ret
= csio_hw_sf1_write(hw
, 1, 1, 0, SF_RD_ID
);
769 ret
= csio_hw_sf1_read(hw
, 3, 0, 1, &info
);
770 csio_wr_reg32(hw
, 0, SF_OP_A
); /* unlock SF */
774 if ((info
& 0xff) != 0x20) /* not a Numonix flash */
776 info
>>= 16; /* log2 of size */
777 if (info
>= 0x14 && info
< 0x18)
778 hw
->params
.sf_nsec
= 1 << (info
- 16);
779 else if (info
== 0x18)
780 hw
->params
.sf_nsec
= 64;
783 hw
->params
.sf_size
= 1 << info
;
788 /*****************************************************************************/
789 /* HW State machine assists */
790 /*****************************************************************************/
793 csio_hw_dev_ready(struct csio_hw
*hw
)
799 while (((reg
= csio_rd_reg32(hw
, PL_WHOAMI_A
)) == 0xFFFFFFFF) &&
803 if (csio_is_t5(hw
->pdev
->device
& CSIO_HW_CHIP_MASK
))
804 src_pf
= SOURCEPF_G(reg
);
806 src_pf
= T6_SOURCEPF_G(reg
);
808 if ((cnt
== 0) && (((int32_t)(src_pf
) < 0) ||
809 (src_pf
>= CSIO_MAX_PFN
))) {
810 csio_err(hw
, "PL_WHOAMI returned 0x%x, cnt:%d\n", reg
, cnt
);
820 * csio_do_hello - Perform the HELLO FW Mailbox command and process response.
822 * @state: Device state
824 * FW_HELLO_CMD has to be polled for completion.
827 csio_do_hello(struct csio_hw
*hw
, enum csio_dev_state
*state
)
831 enum fw_retval retval
;
834 int retries
= FW_CMD_HELLO_RETRIES
;
836 memset(state_str
, 0, sizeof(state_str
));
838 mbp
= mempool_alloc(hw
->mb_mempool
, GFP_ATOMIC
);
841 CSIO_INC_STATS(hw
, n_err_nomem
);
846 csio_mb_hello(hw
, mbp
, CSIO_MB_DEFAULT_TMO
, hw
->pfn
,
847 hw
->pfn
, CSIO_MASTER_MAY
, NULL
);
849 rv
= csio_mb_issue(hw
, mbp
);
851 csio_err(hw
, "failed to issue HELLO cmd. ret:%d.\n", rv
);
855 csio_mb_process_hello_rsp(hw
, mbp
, &retval
, state
, &mpfn
);
856 if (retval
!= FW_SUCCESS
) {
857 csio_err(hw
, "HELLO cmd failed with ret: %d\n", retval
);
862 /* Firmware has designated us to be master */
863 if (hw
->pfn
== mpfn
) {
864 hw
->flags
|= CSIO_HWF_MASTER
;
865 } else if (*state
== CSIO_DEV_STATE_UNINIT
) {
867 * If we're not the Master PF then we need to wait around for
868 * the Master PF Driver to finish setting up the adapter.
870 * Note that we also do this wait if we're a non-Master-capable
871 * PF and there is no current Master PF; a Master PF may show up
872 * momentarily and we wouldn't want to fail pointlessly. (This
873 * can happen when an OS loads lots of different drivers rapidly
874 * at the same time). In this case, the Master PF returned by
875 * the firmware will be PCIE_FW_MASTER_MASK so the test below
879 int waiting
= FW_CMD_HELLO_TIMEOUT
;
882 * Wait for the firmware to either indicate an error or
883 * initialized state. If we see either of these we bail out
884 * and report the issue to the caller. If we exhaust the
885 * "hello timeout" and we haven't exhausted our retries, try
886 * again. Otherwise bail with a timeout error.
891 spin_unlock_irq(&hw
->lock
);
893 spin_lock_irq(&hw
->lock
);
897 * If neither Error nor Initialialized are indicated
898 * by the firmware keep waiting till we exaust our
899 * timeout ... and then retry if we haven't exhausted
902 pcie_fw
= csio_rd_reg32(hw
, PCIE_FW_A
);
903 if (!(pcie_fw
& (PCIE_FW_ERR_F
|PCIE_FW_INIT_F
))) {
915 * We either have an Error or Initialized condition
916 * report errors preferentially.
919 if (pcie_fw
& PCIE_FW_ERR_F
) {
920 *state
= CSIO_DEV_STATE_ERR
;
922 } else if (pcie_fw
& PCIE_FW_INIT_F
)
923 *state
= CSIO_DEV_STATE_INIT
;
927 * If we arrived before a Master PF was selected and
928 * there's not a valid Master PF, grab its identity
931 if (mpfn
== PCIE_FW_MASTER_M
&&
932 (pcie_fw
& PCIE_FW_MASTER_VLD_F
))
933 mpfn
= PCIE_FW_MASTER_G(pcie_fw
);
936 hw
->flags
&= ~CSIO_HWF_MASTER
;
940 case CSIO_DEV_STATE_UNINIT
:
941 strcpy(state_str
, "Initializing");
943 case CSIO_DEV_STATE_INIT
:
944 strcpy(state_str
, "Initialized");
946 case CSIO_DEV_STATE_ERR
:
947 strcpy(state_str
, "Error");
950 strcpy(state_str
, "Unknown");
955 csio_info(hw
, "PF: %d, Coming up as MASTER, HW state: %s\n",
959 "PF: %d, Coming up as SLAVE, Master PF: %d, HW state: %s\n",
960 hw
->pfn
, mpfn
, state_str
);
963 mempool_free(mbp
, hw
->mb_mempool
);
969 * csio_do_bye - Perform the BYE FW Mailbox command and process response.
974 csio_do_bye(struct csio_hw
*hw
)
977 enum fw_retval retval
;
979 mbp
= mempool_alloc(hw
->mb_mempool
, GFP_ATOMIC
);
981 CSIO_INC_STATS(hw
, n_err_nomem
);
985 csio_mb_bye(hw
, mbp
, CSIO_MB_DEFAULT_TMO
, NULL
);
987 if (csio_mb_issue(hw
, mbp
)) {
988 csio_err(hw
, "Issue of BYE command failed\n");
989 mempool_free(mbp
, hw
->mb_mempool
);
993 retval
= csio_mb_fw_retval(mbp
);
994 if (retval
!= FW_SUCCESS
) {
995 mempool_free(mbp
, hw
->mb_mempool
);
999 mempool_free(mbp
, hw
->mb_mempool
);
1005 * csio_do_reset- Perform the device reset.
1009 * If fw_rst is set, issues FW reset mbox cmd otherwise
1011 * Performs reset of the function.
1014 csio_do_reset(struct csio_hw
*hw
, bool fw_rst
)
1016 struct csio_mb
*mbp
;
1017 enum fw_retval retval
;
1021 csio_wr_reg32(hw
, PIORSTMODE_F
| PIORST_F
, PL_RST_A
);
1026 mbp
= mempool_alloc(hw
->mb_mempool
, GFP_ATOMIC
);
1028 CSIO_INC_STATS(hw
, n_err_nomem
);
1032 csio_mb_reset(hw
, mbp
, CSIO_MB_DEFAULT_TMO
,
1033 PIORSTMODE_F
| PIORST_F
, 0, NULL
);
1035 if (csio_mb_issue(hw
, mbp
)) {
1036 csio_err(hw
, "Issue of RESET command failed.n");
1037 mempool_free(mbp
, hw
->mb_mempool
);
1041 retval
= csio_mb_fw_retval(mbp
);
1042 if (retval
!= FW_SUCCESS
) {
1043 csio_err(hw
, "RESET cmd failed with ret:0x%x.\n", retval
);
1044 mempool_free(mbp
, hw
->mb_mempool
);
1048 mempool_free(mbp
, hw
->mb_mempool
);
1054 csio_hw_validate_caps(struct csio_hw
*hw
, struct csio_mb
*mbp
)
1056 struct fw_caps_config_cmd
*rsp
= (struct fw_caps_config_cmd
*)mbp
->mb
;
1059 caps
= ntohs(rsp
->fcoecaps
);
1061 if (!(caps
& FW_CAPS_CONFIG_FCOE_INITIATOR
)) {
1062 csio_err(hw
, "No FCoE Initiator capability in the firmware.\n");
1066 if (!(caps
& FW_CAPS_CONFIG_FCOE_CTRL_OFLD
)) {
1067 csio_err(hw
, "No FCoE Control Offload capability\n");
1075 * csio_hw_fw_halt - issue a reset/halt to FW and put uP into RESET
1076 * @hw: the HW module
1077 * @mbox: mailbox to use for the FW RESET command (if desired)
1078 * @force: force uP into RESET even if FW RESET command fails
1080 * Issues a RESET command to firmware (if desired) with a HALT indication
1081 * and then puts the microprocessor into RESET state. The RESET command
1082 * will only be issued if a legitimate mailbox is provided (mbox <=
1083 * PCIE_FW_MASTER_MASK).
1085 * This is generally used in order for the host to safely manipulate the
1086 * adapter without fear of conflicting with whatever the firmware might
1087 * be doing. The only way out of this state is to RESTART the firmware
1091 csio_hw_fw_halt(struct csio_hw
*hw
, uint32_t mbox
, int32_t force
)
1093 enum fw_retval retval
= 0;
1096 * If a legitimate mailbox is provided, issue a RESET command
1097 * with a HALT indication.
1099 if (mbox
<= PCIE_FW_MASTER_M
) {
1100 struct csio_mb
*mbp
;
1102 mbp
= mempool_alloc(hw
->mb_mempool
, GFP_ATOMIC
);
1104 CSIO_INC_STATS(hw
, n_err_nomem
);
1108 csio_mb_reset(hw
, mbp
, CSIO_MB_DEFAULT_TMO
,
1109 PIORSTMODE_F
| PIORST_F
, FW_RESET_CMD_HALT_F
,
1112 if (csio_mb_issue(hw
, mbp
)) {
1113 csio_err(hw
, "Issue of RESET command failed!\n");
1114 mempool_free(mbp
, hw
->mb_mempool
);
1118 retval
= csio_mb_fw_retval(mbp
);
1119 mempool_free(mbp
, hw
->mb_mempool
);
1123 * Normally we won't complete the operation if the firmware RESET
1124 * command fails but if our caller insists we'll go ahead and put the
1125 * uP into RESET. This can be useful if the firmware is hung or even
1126 * missing ... We'll have to take the risk of putting the uP into
1127 * RESET without the cooperation of firmware in that case.
1129 * We also force the firmware's HALT flag to be on in case we bypassed
1130 * the firmware RESET command above or we're dealing with old firmware
1131 * which doesn't have the HALT capability. This will serve as a flag
1132 * for the incoming firmware to know that it's coming out of a HALT
1133 * rather than a RESET ... if it's new enough to understand that ...
1135 if (retval
== 0 || force
) {
1136 csio_set_reg_field(hw
, CIM_BOOT_CFG_A
, UPCRST_F
, UPCRST_F
);
1137 csio_set_reg_field(hw
, PCIE_FW_A
, PCIE_FW_HALT_F
,
1142 * And we always return the result of the firmware RESET command
1143 * even when we force the uP into RESET ...
1145 return retval
? -EINVAL
: 0;
1149 * csio_hw_fw_restart - restart the firmware by taking the uP out of RESET
1150 * @hw: the HW module
1151 * @reset: if we want to do a RESET to restart things
1153 * Restart firmware previously halted by csio_hw_fw_halt(). On successful
1154 * return the previous PF Master remains as the new PF Master and there
1155 * is no need to issue a new HELLO command, etc.
1157 * We do this in two ways:
1159 * 1. If we're dealing with newer firmware we'll simply want to take
1160 * the chip's microprocessor out of RESET. This will cause the
1161 * firmware to start up from its start vector. And then we'll loop
1162 * until the firmware indicates it's started again (PCIE_FW.HALT
1163 * reset to 0) or we timeout.
1165 * 2. If we're dealing with older firmware then we'll need to RESET
1166 * the chip since older firmware won't recognize the PCIE_FW.HALT
1167 * flag and automatically RESET itself on startup.
1170 csio_hw_fw_restart(struct csio_hw
*hw
, uint32_t mbox
, int32_t reset
)
1174 * Since we're directing the RESET instead of the firmware
1175 * doing it automatically, we need to clear the PCIE_FW.HALT
1178 csio_set_reg_field(hw
, PCIE_FW_A
, PCIE_FW_HALT_F
, 0);
1181 * If we've been given a valid mailbox, first try to get the
1182 * firmware to do the RESET. If that works, great and we can
1183 * return success. Otherwise, if we haven't been given a
1184 * valid mailbox or the RESET command failed, fall back to
1185 * hitting the chip with a hammer.
1187 if (mbox
<= PCIE_FW_MASTER_M
) {
1188 csio_set_reg_field(hw
, CIM_BOOT_CFG_A
, UPCRST_F
, 0);
1190 if (csio_do_reset(hw
, true) == 0)
1194 csio_wr_reg32(hw
, PIORSTMODE_F
| PIORST_F
, PL_RST_A
);
1199 csio_set_reg_field(hw
, CIM_BOOT_CFG_A
, UPCRST_F
, 0);
1200 for (ms
= 0; ms
< FW_CMD_MAX_TIMEOUT
; ) {
1201 if (!(csio_rd_reg32(hw
, PCIE_FW_A
) & PCIE_FW_HALT_F
))
1212 * csio_hw_fw_upgrade - perform all of the steps necessary to upgrade FW
1213 * @hw: the HW module
1214 * @mbox: mailbox to use for the FW RESET command (if desired)
1215 * @fw_data: the firmware image to write
1217 * @force: force upgrade even if firmware doesn't cooperate
1219 * Perform all of the steps necessary for upgrading an adapter's
1220 * firmware image. Normally this requires the cooperation of the
1221 * existing firmware in order to halt all existing activities
1222 * but if an invalid mailbox token is passed in we skip that step
1223 * (though we'll still put the adapter microprocessor into RESET in
1226 * On successful return the new firmware will have been loaded and
1227 * the adapter will have been fully RESET losing all previous setup
1228 * state. On unsuccessful return the adapter may be completely hosed ...
1229 * positive errno indicates that the adapter is ~probably~ intact, a
1230 * negative errno indicates that things are looking bad ...
1233 csio_hw_fw_upgrade(struct csio_hw
*hw
, uint32_t mbox
,
1234 const u8
*fw_data
, uint32_t size
, int32_t force
)
1236 const struct fw_hdr
*fw_hdr
= (const struct fw_hdr
*)fw_data
;
1239 ret
= csio_hw_fw_halt(hw
, mbox
, force
);
1240 if (ret
!= 0 && !force
)
1243 ret
= csio_hw_fw_dload(hw
, (uint8_t *) fw_data
, size
);
1248 * Older versions of the firmware don't understand the new
1249 * PCIE_FW.HALT flag and so won't know to perform a RESET when they
1250 * restart. So for newly loaded older firmware we'll have to do the
1251 * RESET for it so it starts up on a clean slate. We can tell if
1252 * the newly loaded firmware will handle this right by checking
1253 * its header flags to see if it advertises the capability.
1255 reset
= ((ntohl(fw_hdr
->flags
) & FW_HDR_FLAGS_RESET_HALT
) == 0);
1256 return csio_hw_fw_restart(hw
, mbox
, reset
);
1260 * csio_get_device_params - Get device parameters.
1265 csio_get_device_params(struct csio_hw
*hw
)
1267 struct csio_wrm
*wrm
= csio_hw_to_wrm(hw
);
1268 struct csio_mb
*mbp
;
1269 enum fw_retval retval
;
1273 /* Initialize portids to -1 */
1274 for (i
= 0; i
< CSIO_MAX_PPORTS
; i
++)
1275 hw
->pport
[i
].portid
= -1;
1277 mbp
= mempool_alloc(hw
->mb_mempool
, GFP_ATOMIC
);
1279 CSIO_INC_STATS(hw
, n_err_nomem
);
1283 /* Get port vec information. */
1284 param
[0] = FW_PARAM_DEV(PORTVEC
);
1286 /* Get Core clock. */
1287 param
[1] = FW_PARAM_DEV(CCLK
);
1289 /* Get EQ id start and end. */
1290 param
[2] = FW_PARAM_PFVF(EQ_START
);
1291 param
[3] = FW_PARAM_PFVF(EQ_END
);
1293 /* Get IQ id start and end. */
1294 param
[4] = FW_PARAM_PFVF(IQFLINT_START
);
1295 param
[5] = FW_PARAM_PFVF(IQFLINT_END
);
1297 csio_mb_params(hw
, mbp
, CSIO_MB_DEFAULT_TMO
, hw
->pfn
, 0,
1298 ARRAY_SIZE(param
), param
, NULL
, false, NULL
);
1299 if (csio_mb_issue(hw
, mbp
)) {
1300 csio_err(hw
, "Issue of FW_PARAMS_CMD(read) failed!\n");
1301 mempool_free(mbp
, hw
->mb_mempool
);
1305 csio_mb_process_read_params_rsp(hw
, mbp
, &retval
,
1306 ARRAY_SIZE(param
), param
);
1307 if (retval
!= FW_SUCCESS
) {
1308 csio_err(hw
, "FW_PARAMS_CMD(read) failed with ret:0x%x!\n",
1310 mempool_free(mbp
, hw
->mb_mempool
);
1314 /* cache the information. */
1315 hw
->port_vec
= param
[0];
1316 hw
->vpd
.cclk
= param
[1];
1317 wrm
->fw_eq_start
= param
[2];
1318 wrm
->fw_iq_start
= param
[4];
1320 /* Using FW configured max iqs & eqs */
1321 if ((hw
->flags
& CSIO_HWF_USING_SOFT_PARAMS
) ||
1322 !csio_is_hw_master(hw
)) {
1323 hw
->cfg_niq
= param
[5] - param
[4] + 1;
1324 hw
->cfg_neq
= param
[3] - param
[2] + 1;
1325 csio_dbg(hw
, "Using fwconfig max niqs %d neqs %d\n",
1326 hw
->cfg_niq
, hw
->cfg_neq
);
1329 hw
->port_vec
&= csio_port_mask
;
1331 hw
->num_pports
= hweight32(hw
->port_vec
);
1333 csio_dbg(hw
, "Port vector: 0x%x, #ports: %d\n",
1334 hw
->port_vec
, hw
->num_pports
);
1336 for (i
= 0; i
< hw
->num_pports
; i
++) {
1337 while ((hw
->port_vec
& (1 << j
)) == 0)
1339 hw
->pport
[i
].portid
= j
++;
1340 csio_dbg(hw
, "Found Port:%d\n", hw
->pport
[i
].portid
);
1342 mempool_free(mbp
, hw
->mb_mempool
);
1349 * csio_config_device_caps - Get and set device capabilities.
1354 csio_config_device_caps(struct csio_hw
*hw
)
1356 struct csio_mb
*mbp
;
1357 enum fw_retval retval
;
1360 mbp
= mempool_alloc(hw
->mb_mempool
, GFP_ATOMIC
);
1362 CSIO_INC_STATS(hw
, n_err_nomem
);
1366 /* Get device capabilities */
1367 csio_mb_caps_config(hw
, mbp
, CSIO_MB_DEFAULT_TMO
, 0, 0, 0, 0, NULL
);
1369 if (csio_mb_issue(hw
, mbp
)) {
1370 csio_err(hw
, "Issue of FW_CAPS_CONFIG_CMD(r) failed!\n");
1374 retval
= csio_mb_fw_retval(mbp
);
1375 if (retval
!= FW_SUCCESS
) {
1376 csio_err(hw
, "FW_CAPS_CONFIG_CMD(r) returned %d!\n", retval
);
1380 /* Validate device capabilities */
1381 rv
= csio_hw_validate_caps(hw
, mbp
);
1385 /* Don't config device capabilities if already configured */
1386 if (hw
->fw_state
== CSIO_DEV_STATE_INIT
) {
1391 /* Write back desired device capabilities */
1392 csio_mb_caps_config(hw
, mbp
, CSIO_MB_DEFAULT_TMO
, true, true,
1395 if (csio_mb_issue(hw
, mbp
)) {
1396 csio_err(hw
, "Issue of FW_CAPS_CONFIG_CMD(w) failed!\n");
1400 retval
= csio_mb_fw_retval(mbp
);
1401 if (retval
!= FW_SUCCESS
) {
1402 csio_err(hw
, "FW_CAPS_CONFIG_CMD(w) returned %d!\n", retval
);
1408 mempool_free(mbp
, hw
->mb_mempool
);
1412 static inline enum cc_fec
fwcap_to_cc_fec(fw_port_cap32_t fw_fec
)
1414 enum cc_fec cc_fec
= 0;
1416 if (fw_fec
& FW_PORT_CAP32_FEC_RS
)
1418 if (fw_fec
& FW_PORT_CAP32_FEC_BASER_RS
)
1419 cc_fec
|= FEC_BASER_RS
;
1424 static inline fw_port_cap32_t
cc_to_fwcap_pause(enum cc_pause cc_pause
)
1426 fw_port_cap32_t fw_pause
= 0;
1428 if (cc_pause
& PAUSE_RX
)
1429 fw_pause
|= FW_PORT_CAP32_FC_RX
;
1430 if (cc_pause
& PAUSE_TX
)
1431 fw_pause
|= FW_PORT_CAP32_FC_TX
;
1436 static inline fw_port_cap32_t
cc_to_fwcap_fec(enum cc_fec cc_fec
)
1438 fw_port_cap32_t fw_fec
= 0;
1440 if (cc_fec
& FEC_RS
)
1441 fw_fec
|= FW_PORT_CAP32_FEC_RS
;
1442 if (cc_fec
& FEC_BASER_RS
)
1443 fw_fec
|= FW_PORT_CAP32_FEC_BASER_RS
;
1449 * fwcap_to_fwspeed - return highest speed in Port Capabilities
1450 * @acaps: advertised Port Capabilities
1452 * Get the highest speed for the port from the advertised Port
1455 fw_port_cap32_t
fwcap_to_fwspeed(fw_port_cap32_t acaps
)
1457 #define TEST_SPEED_RETURN(__caps_speed) \
1459 if (acaps & FW_PORT_CAP32_SPEED_##__caps_speed) \
1460 return FW_PORT_CAP32_SPEED_##__caps_speed; \
1463 TEST_SPEED_RETURN(400G
);
1464 TEST_SPEED_RETURN(200G
);
1465 TEST_SPEED_RETURN(100G
);
1466 TEST_SPEED_RETURN(50G
);
1467 TEST_SPEED_RETURN(40G
);
1468 TEST_SPEED_RETURN(25G
);
1469 TEST_SPEED_RETURN(10G
);
1470 TEST_SPEED_RETURN(1G
);
1471 TEST_SPEED_RETURN(100M
);
1473 #undef TEST_SPEED_RETURN
1479 * fwcaps16_to_caps32 - convert 16-bit Port Capabilities to 32-bits
1480 * @caps16: a 16-bit Port Capabilities value
1482 * Returns the equivalent 32-bit Port Capabilities value.
1484 fw_port_cap32_t
fwcaps16_to_caps32(fw_port_cap16_t caps16
)
1486 fw_port_cap32_t caps32
= 0;
1488 #define CAP16_TO_CAP32(__cap) \
1490 if (caps16 & FW_PORT_CAP_##__cap) \
1491 caps32 |= FW_PORT_CAP32_##__cap; \
1494 CAP16_TO_CAP32(SPEED_100M
);
1495 CAP16_TO_CAP32(SPEED_1G
);
1496 CAP16_TO_CAP32(SPEED_25G
);
1497 CAP16_TO_CAP32(SPEED_10G
);
1498 CAP16_TO_CAP32(SPEED_40G
);
1499 CAP16_TO_CAP32(SPEED_100G
);
1500 CAP16_TO_CAP32(FC_RX
);
1501 CAP16_TO_CAP32(FC_TX
);
1502 CAP16_TO_CAP32(ANEG
);
1503 CAP16_TO_CAP32(MDIAUTO
);
1504 CAP16_TO_CAP32(MDISTRAIGHT
);
1505 CAP16_TO_CAP32(FEC_RS
);
1506 CAP16_TO_CAP32(FEC_BASER_RS
);
1507 CAP16_TO_CAP32(802_3_PAUSE
);
1508 CAP16_TO_CAP32(802_3_ASM_DIR
);
1510 #undef CAP16_TO_CAP32
1516 * lstatus_to_fwcap - translate old lstatus to 32-bit Port Capabilities
1517 * @lstatus: old FW_PORT_ACTION_GET_PORT_INFO lstatus value
1519 * Translates old FW_PORT_ACTION_GET_PORT_INFO lstatus field into new
1520 * 32-bit Port Capabilities value.
1522 fw_port_cap32_t
lstatus_to_fwcap(u32 lstatus
)
1524 fw_port_cap32_t linkattr
= 0;
1526 /* The format of the Link Status in the old
1527 * 16-bit Port Information message isn't the same as the
1528 * 16-bit Port Capabilities bitfield used everywhere else.
1530 if (lstatus
& FW_PORT_CMD_RXPAUSE_F
)
1531 linkattr
|= FW_PORT_CAP32_FC_RX
;
1532 if (lstatus
& FW_PORT_CMD_TXPAUSE_F
)
1533 linkattr
|= FW_PORT_CAP32_FC_TX
;
1534 if (lstatus
& FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_100M
))
1535 linkattr
|= FW_PORT_CAP32_SPEED_100M
;
1536 if (lstatus
& FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_1G
))
1537 linkattr
|= FW_PORT_CAP32_SPEED_1G
;
1538 if (lstatus
& FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_10G
))
1539 linkattr
|= FW_PORT_CAP32_SPEED_10G
;
1540 if (lstatus
& FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_25G
))
1541 linkattr
|= FW_PORT_CAP32_SPEED_25G
;
1542 if (lstatus
& FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_40G
))
1543 linkattr
|= FW_PORT_CAP32_SPEED_40G
;
1544 if (lstatus
& FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_100G
))
1545 linkattr
|= FW_PORT_CAP32_SPEED_100G
;
1551 * csio_init_link_config - initialize a link's SW state
1552 * @lc: pointer to structure holding the link state
1553 * @pcaps: link Port Capabilities
1554 * @acaps: link current Advertised Port Capabilities
1556 * Initializes the SW state maintained for each link, including the link's
1557 * capabilities and default speed/flow-control/autonegotiation settings.
1559 static void csio_init_link_config(struct link_config
*lc
, fw_port_cap32_t pcaps
,
1560 fw_port_cap32_t acaps
)
1563 lc
->def_acaps
= acaps
;
1567 lc
->requested_fc
= PAUSE_RX
| PAUSE_TX
;
1568 lc
->fc
= lc
->requested_fc
;
1571 * For Forward Error Control, we default to whatever the Firmware
1572 * tells us the Link is currently advertising.
1574 lc
->requested_fec
= FEC_AUTO
;
1575 lc
->fec
= fwcap_to_cc_fec(lc
->def_acaps
);
1577 /* If the Port is capable of Auto-Negtotiation, initialize it as
1578 * "enabled" and copy over all of the Physical Port Capabilities
1579 * to the Advertised Port Capabilities. Otherwise mark it as
1580 * Auto-Negotiate disabled and select the highest supported speed
1581 * for the link. Note parallel structure in t4_link_l1cfg_core()
1582 * and t4_handle_get_port_info().
1584 if (lc
->pcaps
& FW_PORT_CAP32_ANEG
) {
1585 lc
->acaps
= lc
->pcaps
& ADVERT_MASK
;
1586 lc
->autoneg
= AUTONEG_ENABLE
;
1587 lc
->requested_fc
|= PAUSE_AUTONEG
;
1590 lc
->autoneg
= AUTONEG_DISABLE
;
1594 static void csio_link_l1cfg(struct link_config
*lc
, uint16_t fw_caps
,
1597 unsigned int fw_mdi
= FW_PORT_CAP32_MDI_V(FW_PORT_CAP32_MDI_AUTO
);
1598 fw_port_cap32_t fw_fc
, cc_fec
, fw_fec
, lrcap
;
1603 * Convert driver coding of Pause Frame Flow Control settings into the
1606 fw_fc
= cc_to_fwcap_pause(lc
->requested_fc
);
1609 * Convert Common Code Forward Error Control settings into the
1610 * Firmware's API. If the current Requested FEC has "Automatic"
1611 * (IEEE 802.3) specified, then we use whatever the Firmware
1612 * sent us as part of it's IEEE 802.3-based interpratation of
1613 * the Transceiver Module EPROM FEC parameters. Otherwise we
1614 * use whatever is in the current Requested FEC settings.
1616 if (lc
->requested_fec
& FEC_AUTO
)
1617 cc_fec
= fwcap_to_cc_fec(lc
->def_acaps
);
1619 cc_fec
= lc
->requested_fec
;
1620 fw_fec
= cc_to_fwcap_fec(cc_fec
);
1622 /* Figure out what our Requested Port Capabilities are going to be.
1623 * Note parallel structure in t4_handle_get_port_info() and
1624 * init_link_config().
1626 if (!(lc
->pcaps
& FW_PORT_CAP32_ANEG
)) {
1627 lrcap
= (lc
->pcaps
& ADVERT_MASK
) | fw_fc
| fw_fec
;
1628 lc
->fc
= lc
->requested_fc
& ~PAUSE_AUTONEG
;
1630 } else if (lc
->autoneg
== AUTONEG_DISABLE
) {
1631 lrcap
= lc
->speed_caps
| fw_fc
| fw_fec
| fw_mdi
;
1632 lc
->fc
= lc
->requested_fc
& ~PAUSE_AUTONEG
;
1635 lrcap
= lc
->acaps
| fw_fc
| fw_fec
| fw_mdi
;
1642 * csio_enable_ports - Bring up all available ports.
1647 csio_enable_ports(struct csio_hw
*hw
)
1649 struct csio_mb
*mbp
;
1650 u16 fw_caps
= FW_CAPS_UNKNOWN
;
1651 enum fw_retval retval
;
1653 fw_port_cap32_t pcaps
, acaps
, rcaps
;
1656 mbp
= mempool_alloc(hw
->mb_mempool
, GFP_ATOMIC
);
1658 CSIO_INC_STATS(hw
, n_err_nomem
);
1662 for (i
= 0; i
< hw
->num_pports
; i
++) {
1663 portid
= hw
->pport
[i
].portid
;
1665 if (fw_caps
== FW_CAPS_UNKNOWN
) {
1668 param
= (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_PFVF
) |
1669 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_PFVF_PORT_CAPS32
));
1672 csio_mb_params(hw
, mbp
, CSIO_MB_DEFAULT_TMO
,
1673 hw
->pfn
, 0, 1, ¶m
, &val
, false,
1676 if (csio_mb_issue(hw
, mbp
)) {
1677 csio_err(hw
, "failed to issue FW_PARAMS_CMD(r) port:%d\n",
1679 mempool_free(mbp
, hw
->mb_mempool
);
1683 csio_mb_process_read_params_rsp(hw
, mbp
, &retval
, 1,
1685 if (retval
!= FW_SUCCESS
) {
1686 csio_err(hw
, "FW_PARAMS_CMD(r) port:%d failed: 0x%x\n",
1688 mempool_free(mbp
, hw
->mb_mempool
);
1695 /* Read PORT information */
1696 csio_mb_port(hw
, mbp
, CSIO_MB_DEFAULT_TMO
, portid
,
1697 false, 0, fw_caps
, NULL
);
1699 if (csio_mb_issue(hw
, mbp
)) {
1700 csio_err(hw
, "failed to issue FW_PORT_CMD(r) port:%d\n",
1702 mempool_free(mbp
, hw
->mb_mempool
);
1706 csio_mb_process_read_port_rsp(hw
, mbp
, &retval
, fw_caps
,
1708 if (retval
!= FW_SUCCESS
) {
1709 csio_err(hw
, "FW_PORT_CMD(r) port:%d failed: 0x%x\n",
1711 mempool_free(mbp
, hw
->mb_mempool
);
1715 csio_init_link_config(&hw
->pport
[i
].link_cfg
, pcaps
, acaps
);
1717 csio_link_l1cfg(&hw
->pport
[i
].link_cfg
, fw_caps
, &rcaps
);
1719 /* Write back PORT information */
1720 csio_mb_port(hw
, mbp
, CSIO_MB_DEFAULT_TMO
, portid
,
1721 true, rcaps
, fw_caps
, NULL
);
1723 if (csio_mb_issue(hw
, mbp
)) {
1724 csio_err(hw
, "failed to issue FW_PORT_CMD(w) port:%d\n",
1726 mempool_free(mbp
, hw
->mb_mempool
);
1730 retval
= csio_mb_fw_retval(mbp
);
1731 if (retval
!= FW_SUCCESS
) {
1732 csio_err(hw
, "FW_PORT_CMD(w) port:%d failed :0x%x\n",
1734 mempool_free(mbp
, hw
->mb_mempool
);
1738 } /* For all ports */
1740 mempool_free(mbp
, hw
->mb_mempool
);
1746 * csio_get_fcoe_resinfo - Read fcoe fw resource info.
1748 * Issued with lock held.
1751 csio_get_fcoe_resinfo(struct csio_hw
*hw
)
1753 struct csio_fcoe_res_info
*res_info
= &hw
->fres_info
;
1754 struct fw_fcoe_res_info_cmd
*rsp
;
1755 struct csio_mb
*mbp
;
1756 enum fw_retval retval
;
1758 mbp
= mempool_alloc(hw
->mb_mempool
, GFP_ATOMIC
);
1760 CSIO_INC_STATS(hw
, n_err_nomem
);
1764 /* Get FCoE FW resource information */
1765 csio_fcoe_read_res_info_init_mb(hw
, mbp
, CSIO_MB_DEFAULT_TMO
, NULL
);
1767 if (csio_mb_issue(hw
, mbp
)) {
1768 csio_err(hw
, "failed to issue FW_FCOE_RES_INFO_CMD\n");
1769 mempool_free(mbp
, hw
->mb_mempool
);
1773 rsp
= (struct fw_fcoe_res_info_cmd
*)(mbp
->mb
);
1774 retval
= FW_CMD_RETVAL_G(ntohl(rsp
->retval_len16
));
1775 if (retval
!= FW_SUCCESS
) {
1776 csio_err(hw
, "FW_FCOE_RES_INFO_CMD failed with ret x%x\n",
1778 mempool_free(mbp
, hw
->mb_mempool
);
1782 res_info
->e_d_tov
= ntohs(rsp
->e_d_tov
);
1783 res_info
->r_a_tov_seq
= ntohs(rsp
->r_a_tov_seq
);
1784 res_info
->r_a_tov_els
= ntohs(rsp
->r_a_tov_els
);
1785 res_info
->r_r_tov
= ntohs(rsp
->r_r_tov
);
1786 res_info
->max_xchgs
= ntohl(rsp
->max_xchgs
);
1787 res_info
->max_ssns
= ntohl(rsp
->max_ssns
);
1788 res_info
->used_xchgs
= ntohl(rsp
->used_xchgs
);
1789 res_info
->used_ssns
= ntohl(rsp
->used_ssns
);
1790 res_info
->max_fcfs
= ntohl(rsp
->max_fcfs
);
1791 res_info
->max_vnps
= ntohl(rsp
->max_vnps
);
1792 res_info
->used_fcfs
= ntohl(rsp
->used_fcfs
);
1793 res_info
->used_vnps
= ntohl(rsp
->used_vnps
);
1795 csio_dbg(hw
, "max ssns:%d max xchgs:%d\n", res_info
->max_ssns
,
1796 res_info
->max_xchgs
);
1797 mempool_free(mbp
, hw
->mb_mempool
);
1803 csio_hw_check_fwconfig(struct csio_hw
*hw
, u32
*param
)
1805 struct csio_mb
*mbp
;
1806 enum fw_retval retval
;
1809 mbp
= mempool_alloc(hw
->mb_mempool
, GFP_ATOMIC
);
1811 CSIO_INC_STATS(hw
, n_err_nomem
);
1816 * Find out whether we're dealing with a version of
1817 * the firmware which has configuration file support.
1819 _param
[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV
) |
1820 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_CF
));
1822 csio_mb_params(hw
, mbp
, CSIO_MB_DEFAULT_TMO
, hw
->pfn
, 0,
1823 ARRAY_SIZE(_param
), _param
, NULL
, false, NULL
);
1824 if (csio_mb_issue(hw
, mbp
)) {
1825 csio_err(hw
, "Issue of FW_PARAMS_CMD(read) failed!\n");
1826 mempool_free(mbp
, hw
->mb_mempool
);
1830 csio_mb_process_read_params_rsp(hw
, mbp
, &retval
,
1831 ARRAY_SIZE(_param
), _param
);
1832 if (retval
!= FW_SUCCESS
) {
1833 csio_err(hw
, "FW_PARAMS_CMD(read) failed with ret:0x%x!\n",
1835 mempool_free(mbp
, hw
->mb_mempool
);
1839 mempool_free(mbp
, hw
->mb_mempool
);
1846 csio_hw_flash_config(struct csio_hw
*hw
, u32
*fw_cfg_param
, char *path
)
1849 const struct firmware
*cf
;
1850 struct pci_dev
*pci_dev
= hw
->pdev
;
1851 struct device
*dev
= &pci_dev
->dev
;
1852 unsigned int mtype
= 0, maddr
= 0;
1854 int value_to_add
= 0;
1855 const char *fw_cfg_file
;
1857 if (csio_is_t5(pci_dev
->device
& CSIO_HW_CHIP_MASK
))
1858 fw_cfg_file
= FW_CFG_NAME_T5
;
1860 fw_cfg_file
= FW_CFG_NAME_T6
;
1862 if (request_firmware(&cf
, fw_cfg_file
, dev
) < 0) {
1863 csio_err(hw
, "could not find config file %s, err: %d\n",
1868 if (cf
->size
%4 != 0)
1869 value_to_add
= 4 - (cf
->size
% 4);
1871 cfg_data
= kzalloc(cf
->size
+value_to_add
, GFP_KERNEL
);
1872 if (cfg_data
== NULL
) {
1877 memcpy((void *)cfg_data
, (const void *)cf
->data
, cf
->size
);
1878 if (csio_hw_check_fwconfig(hw
, fw_cfg_param
) != 0) {
1883 mtype
= FW_PARAMS_PARAM_Y_G(*fw_cfg_param
);
1884 maddr
= FW_PARAMS_PARAM_Z_G(*fw_cfg_param
) << 16;
1886 ret
= csio_memory_write(hw
, mtype
, maddr
,
1887 cf
->size
+ value_to_add
, cfg_data
);
1889 if ((ret
== 0) && (value_to_add
!= 0)) {
1894 size_t size
= cf
->size
& ~0x3;
1897 last
.word
= cfg_data
[size
>> 2];
1898 for (i
= value_to_add
; i
< 4; i
++)
1900 ret
= csio_memory_write(hw
, mtype
, maddr
+ size
, 4, &last
.word
);
1903 csio_info(hw
, "config file upgraded to %s\n", fw_cfg_file
);
1904 snprintf(path
, 64, "%s%s", "/lib/firmware/", fw_cfg_file
);
1909 release_firmware(cf
);
1914 * HW initialization: contact FW, obtain config, perform basic init.
1916 * If the firmware we're dealing with has Configuration File support, then
1917 * we use that to perform all configuration -- either using the configuration
1918 * file stored in flash on the adapter or using a filesystem-local file
1921 * If we don't have configuration file support in the firmware, then we'll
1922 * have to set things up the old fashioned way with hard-coded register
1923 * writes and firmware commands ...
1927 * Attempt to initialize the HW via a Firmware Configuration File.
1930 csio_hw_use_fwconfig(struct csio_hw
*hw
, int reset
, u32
*fw_cfg_param
)
1932 struct csio_mb
*mbp
= NULL
;
1933 struct fw_caps_config_cmd
*caps_cmd
;
1934 unsigned int mtype
, maddr
;
1936 uint32_t finiver
= 0, finicsum
= 0, cfcsum
= 0;
1938 char *config_name
= NULL
;
1941 * Reset device if necessary
1944 rv
= csio_do_reset(hw
, true);
1950 * If we have a configuration file in host ,
1951 * then use that. Otherwise, use the configuration file stored
1952 * in the HW flash ...
1954 spin_unlock_irq(&hw
->lock
);
1955 rv
= csio_hw_flash_config(hw
, fw_cfg_param
, path
);
1956 spin_lock_irq(&hw
->lock
);
1959 * config file was not found. Use default
1960 * config file from flash.
1962 config_name
= "On FLASH";
1963 mtype
= FW_MEMTYPE_CF_FLASH
;
1964 maddr
= hw
->chip_ops
->chip_flash_cfg_addr(hw
);
1967 mtype
= FW_PARAMS_PARAM_Y_G(*fw_cfg_param
);
1968 maddr
= FW_PARAMS_PARAM_Z_G(*fw_cfg_param
) << 16;
1971 mbp
= mempool_alloc(hw
->mb_mempool
, GFP_ATOMIC
);
1973 CSIO_INC_STATS(hw
, n_err_nomem
);
1977 * Tell the firmware to process the indicated Configuration File.
1978 * If there are no errors and the caller has provided return value
1979 * pointers for the [fini] section version, checksum and computed
1980 * checksum, pass those back to the caller.
1982 caps_cmd
= (struct fw_caps_config_cmd
*)(mbp
->mb
);
1983 CSIO_INIT_MBP(mbp
, caps_cmd
, CSIO_MB_DEFAULT_TMO
, hw
, NULL
, 1);
1984 caps_cmd
->op_to_write
=
1985 htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD
) |
1988 caps_cmd
->cfvalid_to_len16
=
1989 htonl(FW_CAPS_CONFIG_CMD_CFVALID_F
|
1990 FW_CAPS_CONFIG_CMD_MEMTYPE_CF_V(mtype
) |
1991 FW_CAPS_CONFIG_CMD_MEMADDR64K_CF_V(maddr
>> 16) |
1992 FW_LEN16(*caps_cmd
));
1994 if (csio_mb_issue(hw
, mbp
)) {
1999 rv
= csio_mb_fw_retval(mbp
);
2000 /* If the CAPS_CONFIG failed with an ENOENT (for a Firmware
2001 * Configuration File in FLASH), our last gasp effort is to use the
2002 * Firmware Configuration File which is embedded in the
2003 * firmware. A very few early versions of the firmware didn't
2004 * have one embedded but we can ignore those.
2007 CSIO_INIT_MBP(mbp
, caps_cmd
, CSIO_MB_DEFAULT_TMO
, hw
, NULL
, 1);
2008 caps_cmd
->op_to_write
= htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD
) |
2011 caps_cmd
->cfvalid_to_len16
= htonl(FW_LEN16(*caps_cmd
));
2013 if (csio_mb_issue(hw
, mbp
)) {
2018 rv
= csio_mb_fw_retval(mbp
);
2019 config_name
= "Firmware Default";
2021 if (rv
!= FW_SUCCESS
)
2024 finiver
= ntohl(caps_cmd
->finiver
);
2025 finicsum
= ntohl(caps_cmd
->finicsum
);
2026 cfcsum
= ntohl(caps_cmd
->cfcsum
);
2029 * And now tell the firmware to use the configuration we just loaded.
2031 caps_cmd
->op_to_write
=
2032 htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD
) |
2035 caps_cmd
->cfvalid_to_len16
= htonl(FW_LEN16(*caps_cmd
));
2037 if (csio_mb_issue(hw
, mbp
)) {
2042 rv
= csio_mb_fw_retval(mbp
);
2043 if (rv
!= FW_SUCCESS
) {
2044 csio_dbg(hw
, "FW_CAPS_CONFIG_CMD returned %d!\n", rv
);
2048 if (finicsum
!= cfcsum
) {
2050 "Config File checksum mismatch: csum=%#x, computed=%#x\n",
2054 /* Validate device capabilities */
2055 rv
= csio_hw_validate_caps(hw
, mbp
);
2059 mempool_free(mbp
, hw
->mb_mempool
);
2063 * Note that we're operating with parameters
2064 * not supplied by the driver, rather than from hard-wired
2065 * initialization constants buried in the driver.
2067 hw
->flags
|= CSIO_HWF_USING_SOFT_PARAMS
;
2069 /* device parameters */
2070 rv
= csio_get_device_params(hw
);
2075 csio_wr_sge_init(hw
);
2078 * And finally tell the firmware to initialize itself using the
2079 * parameters from the Configuration File.
2081 /* Post event to notify completion of configuration */
2082 csio_post_event(&hw
->sm
, CSIO_HWE_INIT
);
2084 csio_info(hw
, "Successfully configure using Firmware "
2085 "Configuration File %s, version %#x, computed checksum %#x\n",
2086 config_name
, finiver
, cfcsum
);
2090 * Something bad happened. Return the error ...
2094 mempool_free(mbp
, hw
->mb_mempool
);
2095 hw
->flags
&= ~CSIO_HWF_USING_SOFT_PARAMS
;
2096 csio_warn(hw
, "Configuration file error %d\n", rv
);
2100 /* Is the given firmware API compatible with the one the driver was compiled
2103 static int fw_compatible(const struct fw_hdr
*hdr1
, const struct fw_hdr
*hdr2
)
2106 /* short circuit if it's the exact same firmware version */
2107 if (hdr1
->chip
== hdr2
->chip
&& hdr1
->fw_ver
== hdr2
->fw_ver
)
2110 #define SAME_INTF(x) (hdr1->intfver_##x == hdr2->intfver_##x)
2111 if (hdr1
->chip
== hdr2
->chip
&& SAME_INTF(nic
) && SAME_INTF(vnic
) &&
2112 SAME_INTF(ri
) && SAME_INTF(iscsi
) && SAME_INTF(fcoe
))
2119 /* The firmware in the filesystem is usable, but should it be installed?
2120 * This routine explains itself in detail if it indicates the filesystem
2121 * firmware should be installed.
2123 static int csio_should_install_fs_fw(struct csio_hw
*hw
, int card_fw_usable
,
2128 if (!card_fw_usable
) {
2129 reason
= "incompatible or unusable";
2134 reason
= "older than the version supported with this driver";
2141 csio_err(hw
, "firmware on card (%u.%u.%u.%u) is %s, "
2142 "installing firmware %u.%u.%u.%u on card.\n",
2143 FW_HDR_FW_VER_MAJOR_G(c
), FW_HDR_FW_VER_MINOR_G(c
),
2144 FW_HDR_FW_VER_MICRO_G(c
), FW_HDR_FW_VER_BUILD_G(c
), reason
,
2145 FW_HDR_FW_VER_MAJOR_G(k
), FW_HDR_FW_VER_MINOR_G(k
),
2146 FW_HDR_FW_VER_MICRO_G(k
), FW_HDR_FW_VER_BUILD_G(k
));
2151 static struct fw_info fw_info_array
[] = {
2154 .fs_name
= FW_CFG_NAME_T5
,
2155 .fw_mod_name
= FW_FNAME_T5
,
2157 .chip
= FW_HDR_CHIP_T5
,
2158 .fw_ver
= __cpu_to_be32(FW_VERSION(T5
)),
2159 .intfver_nic
= FW_INTFVER(T5
, NIC
),
2160 .intfver_vnic
= FW_INTFVER(T5
, VNIC
),
2161 .intfver_ri
= FW_INTFVER(T5
, RI
),
2162 .intfver_iscsi
= FW_INTFVER(T5
, ISCSI
),
2163 .intfver_fcoe
= FW_INTFVER(T5
, FCOE
),
2167 .fs_name
= FW_CFG_NAME_T6
,
2168 .fw_mod_name
= FW_FNAME_T6
,
2170 .chip
= FW_HDR_CHIP_T6
,
2171 .fw_ver
= __cpu_to_be32(FW_VERSION(T6
)),
2172 .intfver_nic
= FW_INTFVER(T6
, NIC
),
2173 .intfver_vnic
= FW_INTFVER(T6
, VNIC
),
2174 .intfver_ri
= FW_INTFVER(T6
, RI
),
2175 .intfver_iscsi
= FW_INTFVER(T6
, ISCSI
),
2176 .intfver_fcoe
= FW_INTFVER(T6
, FCOE
),
2181 static struct fw_info
*find_fw_info(int chip
)
2185 for (i
= 0; i
< ARRAY_SIZE(fw_info_array
); i
++) {
2186 if (fw_info_array
[i
].chip
== chip
)
2187 return &fw_info_array
[i
];
2192 static int csio_hw_prep_fw(struct csio_hw
*hw
, struct fw_info
*fw_info
,
2193 const u8
*fw_data
, unsigned int fw_size
,
2194 struct fw_hdr
*card_fw
, enum csio_dev_state state
,
2197 int ret
, card_fw_usable
, fs_fw_usable
;
2198 const struct fw_hdr
*fs_fw
;
2199 const struct fw_hdr
*drv_fw
;
2201 drv_fw
= &fw_info
->fw_hdr
;
2203 /* Read the header of the firmware on the card */
2204 ret
= csio_hw_read_flash(hw
, FLASH_FW_START
,
2205 sizeof(*card_fw
) / sizeof(uint32_t),
2206 (uint32_t *)card_fw
, 1);
2208 card_fw_usable
= fw_compatible(drv_fw
, (const void *)card_fw
);
2211 "Unable to read card's firmware header: %d\n", ret
);
2215 if (fw_data
!= NULL
) {
2216 fs_fw
= (const void *)fw_data
;
2217 fs_fw_usable
= fw_compatible(drv_fw
, fs_fw
);
2223 if (card_fw_usable
&& card_fw
->fw_ver
== drv_fw
->fw_ver
&&
2224 (!fs_fw_usable
|| fs_fw
->fw_ver
== drv_fw
->fw_ver
)) {
2225 /* Common case: the firmware on the card is an exact match and
2226 * the filesystem one is an exact match too, or the filesystem
2227 * one is absent/incompatible.
2229 } else if (fs_fw_usable
&& state
== CSIO_DEV_STATE_UNINIT
&&
2230 csio_should_install_fs_fw(hw
, card_fw_usable
,
2231 be32_to_cpu(fs_fw
->fw_ver
),
2232 be32_to_cpu(card_fw
->fw_ver
))) {
2233 ret
= csio_hw_fw_upgrade(hw
, hw
->pfn
, fw_data
,
2237 "failed to install firmware: %d\n", ret
);
2241 /* Installed successfully, update the cached header too. */
2242 memcpy(card_fw
, fs_fw
, sizeof(*card_fw
));
2244 *reset
= 0; /* already reset as part of load_fw */
2247 if (!card_fw_usable
) {
2250 d
= be32_to_cpu(drv_fw
->fw_ver
);
2251 c
= be32_to_cpu(card_fw
->fw_ver
);
2252 k
= fs_fw
? be32_to_cpu(fs_fw
->fw_ver
) : 0;
2254 csio_err(hw
, "Cannot find a usable firmware: "
2256 "driver compiled with %d.%d.%d.%d, "
2257 "card has %d.%d.%d.%d, filesystem has %d.%d.%d.%d\n",
2259 FW_HDR_FW_VER_MAJOR_G(d
), FW_HDR_FW_VER_MINOR_G(d
),
2260 FW_HDR_FW_VER_MICRO_G(d
), FW_HDR_FW_VER_BUILD_G(d
),
2261 FW_HDR_FW_VER_MAJOR_G(c
), FW_HDR_FW_VER_MINOR_G(c
),
2262 FW_HDR_FW_VER_MICRO_G(c
), FW_HDR_FW_VER_BUILD_G(c
),
2263 FW_HDR_FW_VER_MAJOR_G(k
), FW_HDR_FW_VER_MINOR_G(k
),
2264 FW_HDR_FW_VER_MICRO_G(k
), FW_HDR_FW_VER_BUILD_G(k
));
2269 /* We're using whatever's on the card and it's known to be good. */
2270 hw
->fwrev
= be32_to_cpu(card_fw
->fw_ver
);
2271 hw
->tp_vers
= be32_to_cpu(card_fw
->tp_microcode_ver
);
2278 * Returns -EINVAL if attempts to flash the firmware failed
2280 * if flashing was not attempted because the card had the
2281 * latest firmware ECANCELED is returned
2284 csio_hw_flash_fw(struct csio_hw
*hw
, int *reset
)
2286 int ret
= -ECANCELED
;
2287 const struct firmware
*fw
;
2288 struct fw_info
*fw_info
;
2289 struct fw_hdr
*card_fw
;
2290 struct pci_dev
*pci_dev
= hw
->pdev
;
2291 struct device
*dev
= &pci_dev
->dev
;
2292 const u8
*fw_data
= NULL
;
2293 unsigned int fw_size
= 0;
2294 const char *fw_bin_file
;
2296 /* This is the firmware whose headers the driver was compiled
2299 fw_info
= find_fw_info(CHELSIO_CHIP_VERSION(hw
->chip_id
));
2300 if (fw_info
== NULL
) {
2302 "unable to get firmware info for chip %d.\n",
2303 CHELSIO_CHIP_VERSION(hw
->chip_id
));
2307 if (csio_is_t5(pci_dev
->device
& CSIO_HW_CHIP_MASK
))
2308 fw_bin_file
= FW_FNAME_T5
;
2310 fw_bin_file
= FW_FNAME_T6
;
2312 if (request_firmware(&fw
, fw_bin_file
, dev
) < 0) {
2313 csio_err(hw
, "could not find firmware image %s, err: %d\n",
2320 /* allocate memory to read the header of the firmware on the
2323 card_fw
= kmalloc(sizeof(*card_fw
), GFP_KERNEL
);
2325 /* upgrade FW logic */
2326 ret
= csio_hw_prep_fw(hw
, fw_info
, fw_data
, fw_size
, card_fw
,
2327 hw
->fw_state
, reset
);
2331 release_firmware(fw
);
2336 static int csio_hw_check_fwver(struct csio_hw
*hw
)
2338 if (csio_is_t6(hw
->pdev
->device
& CSIO_HW_CHIP_MASK
) &&
2339 (hw
->fwrev
< CSIO_MIN_T6_FW
)) {
2340 csio_hw_print_fw_version(hw
, "T6 unsupported fw");
2348 * csio_hw_configure - Configure HW
2353 csio_hw_configure(struct csio_hw
*hw
)
2359 rv
= csio_hw_dev_ready(hw
);
2361 CSIO_INC_STATS(hw
, n_err_fatal
);
2362 csio_post_event(&hw
->sm
, CSIO_HWE_FATAL
);
2367 hw
->chip_ver
= (char)csio_rd_reg32(hw
, PL_REV_A
);
2369 /* Needed for FW download */
2370 rv
= csio_hw_get_flash_params(hw
);
2372 csio_err(hw
, "Failed to get serial flash params rv:%d\n", rv
);
2373 csio_post_event(&hw
->sm
, CSIO_HWE_FATAL
);
2377 /* Set PCIe completion timeout to 4 seconds */
2378 if (pci_is_pcie(hw
->pdev
))
2379 pcie_capability_clear_and_set_word(hw
->pdev
, PCI_EXP_DEVCTL2
,
2380 PCI_EXP_DEVCTL2_COMP_TIMEOUT
, 0xd);
2382 hw
->chip_ops
->chip_set_mem_win(hw
, MEMWIN_CSIOSTOR
);
2384 rv
= csio_hw_get_fw_version(hw
, &hw
->fwrev
);
2388 csio_hw_print_fw_version(hw
, "Firmware revision");
2390 rv
= csio_do_hello(hw
, &hw
->fw_state
);
2392 CSIO_INC_STATS(hw
, n_err_fatal
);
2393 csio_post_event(&hw
->sm
, CSIO_HWE_FATAL
);
2398 rv
= csio_hw_get_vpd_params(hw
, &hw
->vpd
);
2402 csio_hw_get_fw_version(hw
, &hw
->fwrev
);
2403 csio_hw_get_tp_version(hw
, &hw
->tp_vers
);
2404 if (csio_is_hw_master(hw
) && hw
->fw_state
!= CSIO_DEV_STATE_INIT
) {
2406 /* Do firmware update */
2407 spin_unlock_irq(&hw
->lock
);
2408 rv
= csio_hw_flash_fw(hw
, &reset
);
2409 spin_lock_irq(&hw
->lock
);
2414 rv
= csio_hw_check_fwver(hw
);
2418 /* If the firmware doesn't support Configuration Files,
2421 rv
= csio_hw_check_fwconfig(hw
, param
);
2423 csio_info(hw
, "Firmware doesn't support "
2424 "Firmware Configuration files\n");
2428 /* The firmware provides us with a memory buffer where we can
2429 * load a Configuration File from the host if we want to
2430 * override the Configuration File in flash.
2432 rv
= csio_hw_use_fwconfig(hw
, reset
, param
);
2433 if (rv
== -ENOENT
) {
2434 csio_info(hw
, "Could not initialize "
2435 "adapter, error%d\n", rv
);
2439 csio_info(hw
, "Could not initialize "
2440 "adapter, error%d\n", rv
);
2445 rv
= csio_hw_check_fwver(hw
);
2449 if (hw
->fw_state
== CSIO_DEV_STATE_INIT
) {
2451 hw
->flags
|= CSIO_HWF_USING_SOFT_PARAMS
;
2453 /* device parameters */
2454 rv
= csio_get_device_params(hw
);
2458 /* Get device capabilities */
2459 rv
= csio_config_device_caps(hw
);
2464 csio_wr_sge_init(hw
);
2466 /* Post event to notify completion of configuration */
2467 csio_post_event(&hw
->sm
, CSIO_HWE_INIT
);
2470 } /* if not master */
2477 * csio_hw_initialize - Initialize HW
2482 csio_hw_initialize(struct csio_hw
*hw
)
2484 struct csio_mb
*mbp
;
2485 enum fw_retval retval
;
2489 if (csio_is_hw_master(hw
) && hw
->fw_state
!= CSIO_DEV_STATE_INIT
) {
2490 mbp
= mempool_alloc(hw
->mb_mempool
, GFP_ATOMIC
);
2494 csio_mb_initialize(hw
, mbp
, CSIO_MB_DEFAULT_TMO
, NULL
);
2496 if (csio_mb_issue(hw
, mbp
)) {
2497 csio_err(hw
, "Issue of FW_INITIALIZE_CMD failed!\n");
2501 retval
= csio_mb_fw_retval(mbp
);
2502 if (retval
!= FW_SUCCESS
) {
2503 csio_err(hw
, "FW_INITIALIZE_CMD returned 0x%x!\n",
2508 mempool_free(mbp
, hw
->mb_mempool
);
2511 rv
= csio_get_fcoe_resinfo(hw
);
2513 csio_err(hw
, "Failed to read fcoe resource info: %d\n", rv
);
2517 spin_unlock_irq(&hw
->lock
);
2518 rv
= csio_config_queues(hw
);
2519 spin_lock_irq(&hw
->lock
);
2522 csio_err(hw
, "Config of queues failed!: %d\n", rv
);
2526 for (i
= 0; i
< hw
->num_pports
; i
++)
2527 hw
->pport
[i
].mod_type
= FW_PORT_MOD_TYPE_NA
;
2529 if (csio_is_hw_master(hw
) && hw
->fw_state
!= CSIO_DEV_STATE_INIT
) {
2530 rv
= csio_enable_ports(hw
);
2532 csio_err(hw
, "Failed to enable ports: %d\n", rv
);
2537 csio_post_event(&hw
->sm
, CSIO_HWE_INIT_DONE
);
2541 mempool_free(mbp
, hw
->mb_mempool
);
2546 #define PF_INTR_MASK (PFSW_F | PFCIM_F)
2549 * csio_hw_intr_enable - Enable HW interrupts
2550 * @hw: Pointer to HW module.
2552 * Enable interrupts in HW registers.
2555 csio_hw_intr_enable(struct csio_hw
*hw
)
2557 uint16_t vec
= (uint16_t)csio_get_mb_intr_idx(csio_hw_to_mbm(hw
));
2559 uint32_t pl
= csio_rd_reg32(hw
, PL_INT_ENABLE_A
);
2561 if (csio_is_t5(hw
->pdev
->device
& CSIO_HW_CHIP_MASK
))
2562 pf
= SOURCEPF_G(csio_rd_reg32(hw
, PL_WHOAMI_A
));
2564 pf
= T6_SOURCEPF_G(csio_rd_reg32(hw
, PL_WHOAMI_A
));
2567 * Set aivec for MSI/MSIX. PCIE_PF_CFG.INTXType is set up
2568 * by FW, so do nothing for INTX.
2570 if (hw
->intr_mode
== CSIO_IM_MSIX
)
2571 csio_set_reg_field(hw
, MYPF_REG(PCIE_PF_CFG_A
),
2572 AIVEC_V(AIVEC_M
), vec
);
2573 else if (hw
->intr_mode
== CSIO_IM_MSI
)
2574 csio_set_reg_field(hw
, MYPF_REG(PCIE_PF_CFG_A
),
2575 AIVEC_V(AIVEC_M
), 0);
2577 csio_wr_reg32(hw
, PF_INTR_MASK
, MYPF_REG(PL_PF_INT_ENABLE_A
));
2579 /* Turn on MB interrupts - this will internally flush PIO as well */
2580 csio_mb_intr_enable(hw
);
2582 /* These are common registers - only a master can modify them */
2583 if (csio_is_hw_master(hw
)) {
2585 * Disable the Serial FLASH interrupt, if enabled!
2588 csio_wr_reg32(hw
, pl
, PL_INT_ENABLE_A
);
2590 csio_wr_reg32(hw
, ERR_CPL_EXCEED_IQE_SIZE_F
|
2591 EGRESS_SIZE_ERR_F
| ERR_INVALID_CIDX_INC_F
|
2592 ERR_CPL_OPCODE_0_F
| ERR_DROPPED_DB_F
|
2593 ERR_DATA_CPL_ON_HIGH_QID1_F
|
2594 ERR_DATA_CPL_ON_HIGH_QID0_F
| ERR_BAD_DB_PIDX3_F
|
2595 ERR_BAD_DB_PIDX2_F
| ERR_BAD_DB_PIDX1_F
|
2596 ERR_BAD_DB_PIDX0_F
| ERR_ING_CTXT_PRIO_F
|
2597 ERR_EGR_CTXT_PRIO_F
| INGRESS_SIZE_ERR_F
,
2599 csio_set_reg_field(hw
, PL_INT_MAP0_A
, 0, 1 << pf
);
2602 hw
->flags
|= CSIO_HWF_HW_INTR_ENABLED
;
2607 * csio_hw_intr_disable - Disable HW interrupts
2608 * @hw: Pointer to HW module.
2610 * Turn off Mailbox and PCI_PF_CFG interrupts.
2613 csio_hw_intr_disable(struct csio_hw
*hw
)
2617 if (csio_is_t5(hw
->pdev
->device
& CSIO_HW_CHIP_MASK
))
2618 pf
= SOURCEPF_G(csio_rd_reg32(hw
, PL_WHOAMI_A
));
2620 pf
= T6_SOURCEPF_G(csio_rd_reg32(hw
, PL_WHOAMI_A
));
2622 if (!(hw
->flags
& CSIO_HWF_HW_INTR_ENABLED
))
2625 hw
->flags
&= ~CSIO_HWF_HW_INTR_ENABLED
;
2627 csio_wr_reg32(hw
, 0, MYPF_REG(PL_PF_INT_ENABLE_A
));
2628 if (csio_is_hw_master(hw
))
2629 csio_set_reg_field(hw
, PL_INT_MAP0_A
, 1 << pf
, 0);
2631 /* Turn off MB interrupts */
2632 csio_mb_intr_disable(hw
);
2637 csio_hw_fatal_err(struct csio_hw
*hw
)
2639 csio_set_reg_field(hw
, SGE_CONTROL_A
, GLOBALENABLE_F
, 0);
2640 csio_hw_intr_disable(hw
);
2642 /* Do not reset HW, we may need FW state for debugging */
2643 csio_fatal(hw
, "HW Fatal error encountered!\n");
2646 /*****************************************************************************/
2648 /*****************************************************************************/
2650 * csio_hws_uninit - Uninit state
2656 csio_hws_uninit(struct csio_hw
*hw
, enum csio_hw_ev evt
)
2658 hw
->prev_evt
= hw
->cur_evt
;
2660 CSIO_INC_STATS(hw
, n_evt_sm
[evt
]);
2664 csio_set_state(&hw
->sm
, csio_hws_configuring
);
2665 csio_hw_configure(hw
);
2669 CSIO_INC_STATS(hw
, n_evt_unexp
);
2675 * csio_hws_configuring - Configuring state
2681 csio_hws_configuring(struct csio_hw
*hw
, enum csio_hw_ev evt
)
2683 hw
->prev_evt
= hw
->cur_evt
;
2685 CSIO_INC_STATS(hw
, n_evt_sm
[evt
]);
2689 csio_set_state(&hw
->sm
, csio_hws_initializing
);
2690 csio_hw_initialize(hw
);
2693 case CSIO_HWE_INIT_DONE
:
2694 csio_set_state(&hw
->sm
, csio_hws_ready
);
2695 /* Fan out event to all lnode SMs */
2696 csio_notify_lnodes(hw
, CSIO_LN_NOTIFY_HWREADY
);
2699 case CSIO_HWE_FATAL
:
2700 csio_set_state(&hw
->sm
, csio_hws_uninit
);
2703 case CSIO_HWE_PCI_REMOVE
:
2707 CSIO_INC_STATS(hw
, n_evt_unexp
);
2713 * csio_hws_initializing - Initialiazing state
2719 csio_hws_initializing(struct csio_hw
*hw
, enum csio_hw_ev evt
)
2721 hw
->prev_evt
= hw
->cur_evt
;
2723 CSIO_INC_STATS(hw
, n_evt_sm
[evt
]);
2726 case CSIO_HWE_INIT_DONE
:
2727 csio_set_state(&hw
->sm
, csio_hws_ready
);
2729 /* Fan out event to all lnode SMs */
2730 csio_notify_lnodes(hw
, CSIO_LN_NOTIFY_HWREADY
);
2732 /* Enable interrupts */
2733 csio_hw_intr_enable(hw
);
2736 case CSIO_HWE_FATAL
:
2737 csio_set_state(&hw
->sm
, csio_hws_uninit
);
2740 case CSIO_HWE_PCI_REMOVE
:
2745 CSIO_INC_STATS(hw
, n_evt_unexp
);
2751 * csio_hws_ready - Ready state
2757 csio_hws_ready(struct csio_hw
*hw
, enum csio_hw_ev evt
)
2759 /* Remember the event */
2762 hw
->prev_evt
= hw
->cur_evt
;
2764 CSIO_INC_STATS(hw
, n_evt_sm
[evt
]);
2767 case CSIO_HWE_HBA_RESET
:
2768 case CSIO_HWE_FW_DLOAD
:
2769 case CSIO_HWE_SUSPEND
:
2770 case CSIO_HWE_PCI_REMOVE
:
2771 case CSIO_HWE_PCIERR_DETECTED
:
2772 csio_set_state(&hw
->sm
, csio_hws_quiescing
);
2773 /* cleanup all outstanding cmds */
2774 if (evt
== CSIO_HWE_HBA_RESET
||
2775 evt
== CSIO_HWE_PCIERR_DETECTED
)
2776 csio_scsim_cleanup_io(csio_hw_to_scsim(hw
), false);
2778 csio_scsim_cleanup_io(csio_hw_to_scsim(hw
), true);
2780 csio_hw_intr_disable(hw
);
2781 csio_hw_mbm_cleanup(hw
);
2783 csio_notify_lnodes(hw
, CSIO_LN_NOTIFY_HWSTOP
);
2784 csio_evtq_flush(hw
);
2785 csio_mgmtm_cleanup(csio_hw_to_mgmtm(hw
));
2786 csio_post_event(&hw
->sm
, CSIO_HWE_QUIESCED
);
2789 case CSIO_HWE_FATAL
:
2790 csio_set_state(&hw
->sm
, csio_hws_uninit
);
2794 CSIO_INC_STATS(hw
, n_evt_unexp
);
2800 * csio_hws_quiescing - Quiescing state
2806 csio_hws_quiescing(struct csio_hw
*hw
, enum csio_hw_ev evt
)
2808 hw
->prev_evt
= hw
->cur_evt
;
2810 CSIO_INC_STATS(hw
, n_evt_sm
[evt
]);
2813 case CSIO_HWE_QUIESCED
:
2814 switch (hw
->evtflag
) {
2815 case CSIO_HWE_FW_DLOAD
:
2816 csio_set_state(&hw
->sm
, csio_hws_resetting
);
2817 /* Download firmware */
2820 case CSIO_HWE_HBA_RESET
:
2821 csio_set_state(&hw
->sm
, csio_hws_resetting
);
2822 /* Start reset of the HBA */
2823 csio_notify_lnodes(hw
, CSIO_LN_NOTIFY_HWRESET
);
2824 csio_wr_destroy_queues(hw
, false);
2825 csio_do_reset(hw
, false);
2826 csio_post_event(&hw
->sm
, CSIO_HWE_HBA_RESET_DONE
);
2829 case CSIO_HWE_PCI_REMOVE
:
2830 csio_set_state(&hw
->sm
, csio_hws_removing
);
2831 csio_notify_lnodes(hw
, CSIO_LN_NOTIFY_HWREMOVE
);
2832 csio_wr_destroy_queues(hw
, true);
2833 /* Now send the bye command */
2837 case CSIO_HWE_SUSPEND
:
2838 csio_set_state(&hw
->sm
, csio_hws_quiesced
);
2841 case CSIO_HWE_PCIERR_DETECTED
:
2842 csio_set_state(&hw
->sm
, csio_hws_pcierr
);
2843 csio_wr_destroy_queues(hw
, false);
2847 CSIO_INC_STATS(hw
, n_evt_unexp
);
2854 CSIO_INC_STATS(hw
, n_evt_unexp
);
2860 * csio_hws_quiesced - Quiesced state
2866 csio_hws_quiesced(struct csio_hw
*hw
, enum csio_hw_ev evt
)
2868 hw
->prev_evt
= hw
->cur_evt
;
2870 CSIO_INC_STATS(hw
, n_evt_sm
[evt
]);
2873 case CSIO_HWE_RESUME
:
2874 csio_set_state(&hw
->sm
, csio_hws_configuring
);
2875 csio_hw_configure(hw
);
2879 CSIO_INC_STATS(hw
, n_evt_unexp
);
2885 * csio_hws_resetting - HW Resetting state
2891 csio_hws_resetting(struct csio_hw
*hw
, enum csio_hw_ev evt
)
2893 hw
->prev_evt
= hw
->cur_evt
;
2895 CSIO_INC_STATS(hw
, n_evt_sm
[evt
]);
2898 case CSIO_HWE_HBA_RESET_DONE
:
2899 csio_evtq_start(hw
);
2900 csio_set_state(&hw
->sm
, csio_hws_configuring
);
2901 csio_hw_configure(hw
);
2905 CSIO_INC_STATS(hw
, n_evt_unexp
);
2911 * csio_hws_removing - PCI Hotplug removing state
2917 csio_hws_removing(struct csio_hw
*hw
, enum csio_hw_ev evt
)
2919 hw
->prev_evt
= hw
->cur_evt
;
2921 CSIO_INC_STATS(hw
, n_evt_sm
[evt
]);
2924 case CSIO_HWE_HBA_RESET
:
2925 if (!csio_is_hw_master(hw
))
2928 * The BYE should have alerady been issued, so we cant
2929 * use the mailbox interface. Hence we use the PL_RST
2930 * register directly.
2932 csio_err(hw
, "Resetting HW and waiting 2 seconds...\n");
2933 csio_wr_reg32(hw
, PIORSTMODE_F
| PIORST_F
, PL_RST_A
);
2937 /* Should never receive any new events */
2939 CSIO_INC_STATS(hw
, n_evt_unexp
);
2946 * csio_hws_pcierr - PCI Error state
2952 csio_hws_pcierr(struct csio_hw
*hw
, enum csio_hw_ev evt
)
2954 hw
->prev_evt
= hw
->cur_evt
;
2956 CSIO_INC_STATS(hw
, n_evt_sm
[evt
]);
2959 case CSIO_HWE_PCIERR_SLOT_RESET
:
2960 csio_evtq_start(hw
);
2961 csio_set_state(&hw
->sm
, csio_hws_configuring
);
2962 csio_hw_configure(hw
);
2966 CSIO_INC_STATS(hw
, n_evt_unexp
);
2971 /*****************************************************************************/
2973 /*****************************************************************************/
2976 * csio_handle_intr_status - table driven interrupt handler
2978 * @reg: the interrupt status register to process
2979 * @acts: table of interrupt actions
2981 * A table driven interrupt handler that applies a set of masks to an
2982 * interrupt status word and performs the corresponding actions if the
2983 * interrupts described by the mask have occured. The actions include
2984 * optionally emitting a warning or alert message. The table is terminated
2985 * by an entry specifying mask 0. Returns the number of fatal interrupt
2989 csio_handle_intr_status(struct csio_hw
*hw
, unsigned int reg
,
2990 const struct intr_info
*acts
)
2993 unsigned int mask
= 0;
2994 unsigned int status
= csio_rd_reg32(hw
, reg
);
2996 for ( ; acts
->mask
; ++acts
) {
2997 if (!(status
& acts
->mask
))
3001 csio_fatal(hw
, "Fatal %s (0x%x)\n",
3002 acts
->msg
, status
& acts
->mask
);
3003 } else if (acts
->msg
)
3004 csio_info(hw
, "%s (0x%x)\n",
3005 acts
->msg
, status
& acts
->mask
);
3009 if (status
) /* clear processed interrupts */
3010 csio_wr_reg32(hw
, status
, reg
);
3015 * TP interrupt handler.
3017 static void csio_tp_intr_handler(struct csio_hw
*hw
)
3019 static struct intr_info tp_intr_info
[] = {
3020 { 0x3fffffff, "TP parity error", -1, 1 },
3021 { FLMTXFLSTEMPTY_F
, "TP out of Tx pages", -1, 1 },
3025 if (csio_handle_intr_status(hw
, TP_INT_CAUSE_A
, tp_intr_info
))
3026 csio_hw_fatal_err(hw
);
3030 * SGE interrupt handler.
3032 static void csio_sge_intr_handler(struct csio_hw
*hw
)
3036 static struct intr_info sge_intr_info
[] = {
3037 { ERR_CPL_EXCEED_IQE_SIZE_F
,
3038 "SGE received CPL exceeding IQE size", -1, 1 },
3039 { ERR_INVALID_CIDX_INC_F
,
3040 "SGE GTS CIDX increment too large", -1, 0 },
3041 { ERR_CPL_OPCODE_0_F
, "SGE received 0-length CPL", -1, 0 },
3042 { ERR_DROPPED_DB_F
, "SGE doorbell dropped", -1, 0 },
3043 { ERR_DATA_CPL_ON_HIGH_QID1_F
| ERR_DATA_CPL_ON_HIGH_QID0_F
,
3044 "SGE IQID > 1023 received CPL for FL", -1, 0 },
3045 { ERR_BAD_DB_PIDX3_F
, "SGE DBP 3 pidx increment too large", -1,
3047 { ERR_BAD_DB_PIDX2_F
, "SGE DBP 2 pidx increment too large", -1,
3049 { ERR_BAD_DB_PIDX1_F
, "SGE DBP 1 pidx increment too large", -1,
3051 { ERR_BAD_DB_PIDX0_F
, "SGE DBP 0 pidx increment too large", -1,
3053 { ERR_ING_CTXT_PRIO_F
,
3054 "SGE too many priority ingress contexts", -1, 0 },
3055 { ERR_EGR_CTXT_PRIO_F
,
3056 "SGE too many priority egress contexts", -1, 0 },
3057 { INGRESS_SIZE_ERR_F
, "SGE illegal ingress QID", -1, 0 },
3058 { EGRESS_SIZE_ERR_F
, "SGE illegal egress QID", -1, 0 },
3062 v
= (uint64_t)csio_rd_reg32(hw
, SGE_INT_CAUSE1_A
) |
3063 ((uint64_t)csio_rd_reg32(hw
, SGE_INT_CAUSE2_A
) << 32);
3065 csio_fatal(hw
, "SGE parity error (%#llx)\n",
3066 (unsigned long long)v
);
3067 csio_wr_reg32(hw
, (uint32_t)(v
& 0xFFFFFFFF),
3069 csio_wr_reg32(hw
, (uint32_t)(v
>> 32), SGE_INT_CAUSE2_A
);
3072 v
|= csio_handle_intr_status(hw
, SGE_INT_CAUSE3_A
, sge_intr_info
);
3074 if (csio_handle_intr_status(hw
, SGE_INT_CAUSE3_A
, sge_intr_info
) ||
3076 csio_hw_fatal_err(hw
);
3079 #define CIM_OBQ_INTR (OBQULP0PARERR_F | OBQULP1PARERR_F | OBQULP2PARERR_F |\
3080 OBQULP3PARERR_F | OBQSGEPARERR_F | OBQNCSIPARERR_F)
3081 #define CIM_IBQ_INTR (IBQTP0PARERR_F | IBQTP1PARERR_F | IBQULPPARERR_F |\
3082 IBQSGEHIPARERR_F | IBQSGELOPARERR_F | IBQNCSIPARERR_F)
3085 * CIM interrupt handler.
3087 static void csio_cim_intr_handler(struct csio_hw
*hw
)
3089 static struct intr_info cim_intr_info
[] = {
3090 { PREFDROPINT_F
, "CIM control register prefetch drop", -1, 1 },
3091 { CIM_OBQ_INTR
, "CIM OBQ parity error", -1, 1 },
3092 { CIM_IBQ_INTR
, "CIM IBQ parity error", -1, 1 },
3093 { MBUPPARERR_F
, "CIM mailbox uP parity error", -1, 1 },
3094 { MBHOSTPARERR_F
, "CIM mailbox host parity error", -1, 1 },
3095 { TIEQINPARERRINT_F
, "CIM TIEQ outgoing parity error", -1, 1 },
3096 { TIEQOUTPARERRINT_F
, "CIM TIEQ incoming parity error", -1, 1 },
3099 static struct intr_info cim_upintr_info
[] = {
3100 { RSVDSPACEINT_F
, "CIM reserved space access", -1, 1 },
3101 { ILLTRANSINT_F
, "CIM illegal transaction", -1, 1 },
3102 { ILLWRINT_F
, "CIM illegal write", -1, 1 },
3103 { ILLRDINT_F
, "CIM illegal read", -1, 1 },
3104 { ILLRDBEINT_F
, "CIM illegal read BE", -1, 1 },
3105 { ILLWRBEINT_F
, "CIM illegal write BE", -1, 1 },
3106 { SGLRDBOOTINT_F
, "CIM single read from boot space", -1, 1 },
3107 { SGLWRBOOTINT_F
, "CIM single write to boot space", -1, 1 },
3108 { BLKWRBOOTINT_F
, "CIM block write to boot space", -1, 1 },
3109 { SGLRDFLASHINT_F
, "CIM single read from flash space", -1, 1 },
3110 { SGLWRFLASHINT_F
, "CIM single write to flash space", -1, 1 },
3111 { BLKWRFLASHINT_F
, "CIM block write to flash space", -1, 1 },
3112 { SGLRDEEPROMINT_F
, "CIM single EEPROM read", -1, 1 },
3113 { SGLWREEPROMINT_F
, "CIM single EEPROM write", -1, 1 },
3114 { BLKRDEEPROMINT_F
, "CIM block EEPROM read", -1, 1 },
3115 { BLKWREEPROMINT_F
, "CIM block EEPROM write", -1, 1 },
3116 { SGLRDCTLINT_F
, "CIM single read from CTL space", -1, 1 },
3117 { SGLWRCTLINT_F
, "CIM single write to CTL space", -1, 1 },
3118 { BLKRDCTLINT_F
, "CIM block read from CTL space", -1, 1 },
3119 { BLKWRCTLINT_F
, "CIM block write to CTL space", -1, 1 },
3120 { SGLRDPLINT_F
, "CIM single read from PL space", -1, 1 },
3121 { SGLWRPLINT_F
, "CIM single write to PL space", -1, 1 },
3122 { BLKRDPLINT_F
, "CIM block read from PL space", -1, 1 },
3123 { BLKWRPLINT_F
, "CIM block write to PL space", -1, 1 },
3124 { REQOVRLOOKUPINT_F
, "CIM request FIFO overwrite", -1, 1 },
3125 { RSPOVRLOOKUPINT_F
, "CIM response FIFO overwrite", -1, 1 },
3126 { TIMEOUTINT_F
, "CIM PIF timeout", -1, 1 },
3127 { TIMEOUTMAINT_F
, "CIM PIF MA timeout", -1, 1 },
3133 fat
= csio_handle_intr_status(hw
, CIM_HOST_INT_CAUSE_A
,
3135 csio_handle_intr_status(hw
, CIM_HOST_UPACC_INT_CAUSE_A
,
3138 csio_hw_fatal_err(hw
);
3142 * ULP RX interrupt handler.
3144 static void csio_ulprx_intr_handler(struct csio_hw
*hw
)
3146 static struct intr_info ulprx_intr_info
[] = {
3147 { 0x1800000, "ULPRX context error", -1, 1 },
3148 { 0x7fffff, "ULPRX parity error", -1, 1 },
3152 if (csio_handle_intr_status(hw
, ULP_RX_INT_CAUSE_A
, ulprx_intr_info
))
3153 csio_hw_fatal_err(hw
);
3157 * ULP TX interrupt handler.
3159 static void csio_ulptx_intr_handler(struct csio_hw
*hw
)
3161 static struct intr_info ulptx_intr_info
[] = {
3162 { PBL_BOUND_ERR_CH3_F
, "ULPTX channel 3 PBL out of bounds", -1,
3164 { PBL_BOUND_ERR_CH2_F
, "ULPTX channel 2 PBL out of bounds", -1,
3166 { PBL_BOUND_ERR_CH1_F
, "ULPTX channel 1 PBL out of bounds", -1,
3168 { PBL_BOUND_ERR_CH0_F
, "ULPTX channel 0 PBL out of bounds", -1,
3170 { 0xfffffff, "ULPTX parity error", -1, 1 },
3174 if (csio_handle_intr_status(hw
, ULP_TX_INT_CAUSE_A
, ulptx_intr_info
))
3175 csio_hw_fatal_err(hw
);
3179 * PM TX interrupt handler.
3181 static void csio_pmtx_intr_handler(struct csio_hw
*hw
)
3183 static struct intr_info pmtx_intr_info
[] = {
3184 { PCMD_LEN_OVFL0_F
, "PMTX channel 0 pcmd too large", -1, 1 },
3185 { PCMD_LEN_OVFL1_F
, "PMTX channel 1 pcmd too large", -1, 1 },
3186 { PCMD_LEN_OVFL2_F
, "PMTX channel 2 pcmd too large", -1, 1 },
3187 { ZERO_C_CMD_ERROR_F
, "PMTX 0-length pcmd", -1, 1 },
3188 { 0xffffff0, "PMTX framing error", -1, 1 },
3189 { OESPI_PAR_ERROR_F
, "PMTX oespi parity error", -1, 1 },
3190 { DB_OPTIONS_PAR_ERROR_F
, "PMTX db_options parity error", -1,
3192 { ICSPI_PAR_ERROR_F
, "PMTX icspi parity error", -1, 1 },
3193 { PMTX_C_PCMD_PAR_ERROR_F
, "PMTX c_pcmd parity error", -1, 1},
3197 if (csio_handle_intr_status(hw
, PM_TX_INT_CAUSE_A
, pmtx_intr_info
))
3198 csio_hw_fatal_err(hw
);
3202 * PM RX interrupt handler.
3204 static void csio_pmrx_intr_handler(struct csio_hw
*hw
)
3206 static struct intr_info pmrx_intr_info
[] = {
3207 { ZERO_E_CMD_ERROR_F
, "PMRX 0-length pcmd", -1, 1 },
3208 { 0x3ffff0, "PMRX framing error", -1, 1 },
3209 { OCSPI_PAR_ERROR_F
, "PMRX ocspi parity error", -1, 1 },
3210 { DB_OPTIONS_PAR_ERROR_F
, "PMRX db_options parity error", -1,
3212 { IESPI_PAR_ERROR_F
, "PMRX iespi parity error", -1, 1 },
3213 { PMRX_E_PCMD_PAR_ERROR_F
, "PMRX e_pcmd parity error", -1, 1},
3217 if (csio_handle_intr_status(hw
, PM_RX_INT_CAUSE_A
, pmrx_intr_info
))
3218 csio_hw_fatal_err(hw
);
3222 * CPL switch interrupt handler.
3224 static void csio_cplsw_intr_handler(struct csio_hw
*hw
)
3226 static struct intr_info cplsw_intr_info
[] = {
3227 { CIM_OP_MAP_PERR_F
, "CPLSW CIM op_map parity error", -1, 1 },
3228 { CIM_OVFL_ERROR_F
, "CPLSW CIM overflow", -1, 1 },
3229 { TP_FRAMING_ERROR_F
, "CPLSW TP framing error", -1, 1 },
3230 { SGE_FRAMING_ERROR_F
, "CPLSW SGE framing error", -1, 1 },
3231 { CIM_FRAMING_ERROR_F
, "CPLSW CIM framing error", -1, 1 },
3232 { ZERO_SWITCH_ERROR_F
, "CPLSW no-switch error", -1, 1 },
3236 if (csio_handle_intr_status(hw
, CPL_INTR_CAUSE_A
, cplsw_intr_info
))
3237 csio_hw_fatal_err(hw
);
3241 * LE interrupt handler.
3243 static void csio_le_intr_handler(struct csio_hw
*hw
)
3245 enum chip_type chip
= CHELSIO_CHIP_VERSION(hw
->chip_id
);
3247 static struct intr_info le_intr_info
[] = {
3248 { LIPMISS_F
, "LE LIP miss", -1, 0 },
3249 { LIP0_F
, "LE 0 LIP error", -1, 0 },
3250 { PARITYERR_F
, "LE parity error", -1, 1 },
3251 { UNKNOWNCMD_F
, "LE unknown command", -1, 1 },
3252 { REQQPARERR_F
, "LE request queue parity error", -1, 1 },
3256 static struct intr_info t6_le_intr_info
[] = {
3257 { T6_LIPMISS_F
, "LE LIP miss", -1, 0 },
3258 { T6_LIP0_F
, "LE 0 LIP error", -1, 0 },
3259 { TCAMINTPERR_F
, "LE parity error", -1, 1 },
3260 { T6_UNKNOWNCMD_F
, "LE unknown command", -1, 1 },
3261 { SSRAMINTPERR_F
, "LE request queue parity error", -1, 1 },
3265 if (csio_handle_intr_status(hw
, LE_DB_INT_CAUSE_A
,
3266 (chip
== CHELSIO_T5
) ?
3267 le_intr_info
: t6_le_intr_info
))
3268 csio_hw_fatal_err(hw
);
3272 * MPS interrupt handler.
3274 static void csio_mps_intr_handler(struct csio_hw
*hw
)
3276 static struct intr_info mps_rx_intr_info
[] = {
3277 { 0xffffff, "MPS Rx parity error", -1, 1 },
3280 static struct intr_info mps_tx_intr_info
[] = {
3281 { TPFIFO_V(TPFIFO_M
), "MPS Tx TP FIFO parity error", -1, 1 },
3282 { NCSIFIFO_F
, "MPS Tx NC-SI FIFO parity error", -1, 1 },
3283 { TXDATAFIFO_V(TXDATAFIFO_M
), "MPS Tx data FIFO parity error",
3285 { TXDESCFIFO_V(TXDESCFIFO_M
), "MPS Tx desc FIFO parity error",
3287 { BUBBLE_F
, "MPS Tx underflow", -1, 1 },
3288 { SECNTERR_F
, "MPS Tx SOP/EOP error", -1, 1 },
3289 { FRMERR_F
, "MPS Tx framing error", -1, 1 },
3292 static struct intr_info mps_trc_intr_info
[] = {
3293 { FILTMEM_V(FILTMEM_M
), "MPS TRC filter parity error", -1, 1 },
3294 { PKTFIFO_V(PKTFIFO_M
), "MPS TRC packet FIFO parity error",
3296 { MISCPERR_F
, "MPS TRC misc parity error", -1, 1 },
3299 static struct intr_info mps_stat_sram_intr_info
[] = {
3300 { 0x1fffff, "MPS statistics SRAM parity error", -1, 1 },
3303 static struct intr_info mps_stat_tx_intr_info
[] = {
3304 { 0xfffff, "MPS statistics Tx FIFO parity error", -1, 1 },
3307 static struct intr_info mps_stat_rx_intr_info
[] = {
3308 { 0xffffff, "MPS statistics Rx FIFO parity error", -1, 1 },
3311 static struct intr_info mps_cls_intr_info
[] = {
3312 { MATCHSRAM_F
, "MPS match SRAM parity error", -1, 1 },
3313 { MATCHTCAM_F
, "MPS match TCAM parity error", -1, 1 },
3314 { HASHSRAM_F
, "MPS hash SRAM parity error", -1, 1 },
3320 fat
= csio_handle_intr_status(hw
, MPS_RX_PERR_INT_CAUSE_A
,
3322 csio_handle_intr_status(hw
, MPS_TX_INT_CAUSE_A
,
3324 csio_handle_intr_status(hw
, MPS_TRC_INT_CAUSE_A
,
3325 mps_trc_intr_info
) +
3326 csio_handle_intr_status(hw
, MPS_STAT_PERR_INT_CAUSE_SRAM_A
,
3327 mps_stat_sram_intr_info
) +
3328 csio_handle_intr_status(hw
, MPS_STAT_PERR_INT_CAUSE_TX_FIFO_A
,
3329 mps_stat_tx_intr_info
) +
3330 csio_handle_intr_status(hw
, MPS_STAT_PERR_INT_CAUSE_RX_FIFO_A
,
3331 mps_stat_rx_intr_info
) +
3332 csio_handle_intr_status(hw
, MPS_CLS_INT_CAUSE_A
,
3335 csio_wr_reg32(hw
, 0, MPS_INT_CAUSE_A
);
3336 csio_rd_reg32(hw
, MPS_INT_CAUSE_A
); /* flush */
3338 csio_hw_fatal_err(hw
);
3341 #define MEM_INT_MASK (PERR_INT_CAUSE_F | ECC_CE_INT_CAUSE_F | \
3345 * EDC/MC interrupt handler.
3347 static void csio_mem_intr_handler(struct csio_hw
*hw
, int idx
)
3349 static const char name
[3][5] = { "EDC0", "EDC1", "MC" };
3351 unsigned int addr
, cnt_addr
, v
;
3353 if (idx
<= MEM_EDC1
) {
3354 addr
= EDC_REG(EDC_INT_CAUSE_A
, idx
);
3355 cnt_addr
= EDC_REG(EDC_ECC_STATUS_A
, idx
);
3357 addr
= MC_INT_CAUSE_A
;
3358 cnt_addr
= MC_ECC_STATUS_A
;
3361 v
= csio_rd_reg32(hw
, addr
) & MEM_INT_MASK
;
3362 if (v
& PERR_INT_CAUSE_F
)
3363 csio_fatal(hw
, "%s FIFO parity error\n", name
[idx
]);
3364 if (v
& ECC_CE_INT_CAUSE_F
) {
3365 uint32_t cnt
= ECC_CECNT_G(csio_rd_reg32(hw
, cnt_addr
));
3367 csio_wr_reg32(hw
, ECC_CECNT_V(ECC_CECNT_M
), cnt_addr
);
3368 csio_warn(hw
, "%u %s correctable ECC data error%s\n",
3369 cnt
, name
[idx
], cnt
> 1 ? "s" : "");
3371 if (v
& ECC_UE_INT_CAUSE_F
)
3372 csio_fatal(hw
, "%s uncorrectable ECC data error\n", name
[idx
]);
3374 csio_wr_reg32(hw
, v
, addr
);
3375 if (v
& (PERR_INT_CAUSE_F
| ECC_UE_INT_CAUSE_F
))
3376 csio_hw_fatal_err(hw
);
3380 * MA interrupt handler.
3382 static void csio_ma_intr_handler(struct csio_hw
*hw
)
3384 uint32_t v
, status
= csio_rd_reg32(hw
, MA_INT_CAUSE_A
);
3386 if (status
& MEM_PERR_INT_CAUSE_F
)
3387 csio_fatal(hw
, "MA parity error, parity status %#x\n",
3388 csio_rd_reg32(hw
, MA_PARITY_ERROR_STATUS_A
));
3389 if (status
& MEM_WRAP_INT_CAUSE_F
) {
3390 v
= csio_rd_reg32(hw
, MA_INT_WRAP_STATUS_A
);
3392 "MA address wrap-around error by client %u to address %#x\n",
3393 MEM_WRAP_CLIENT_NUM_G(v
), MEM_WRAP_ADDRESS_G(v
) << 4);
3395 csio_wr_reg32(hw
, status
, MA_INT_CAUSE_A
);
3396 csio_hw_fatal_err(hw
);
3400 * SMB interrupt handler.
3402 static void csio_smb_intr_handler(struct csio_hw
*hw
)
3404 static struct intr_info smb_intr_info
[] = {
3405 { MSTTXFIFOPARINT_F
, "SMB master Tx FIFO parity error", -1, 1 },
3406 { MSTRXFIFOPARINT_F
, "SMB master Rx FIFO parity error", -1, 1 },
3407 { SLVFIFOPARINT_F
, "SMB slave FIFO parity error", -1, 1 },
3411 if (csio_handle_intr_status(hw
, SMB_INT_CAUSE_A
, smb_intr_info
))
3412 csio_hw_fatal_err(hw
);
3416 * NC-SI interrupt handler.
3418 static void csio_ncsi_intr_handler(struct csio_hw
*hw
)
3420 static struct intr_info ncsi_intr_info
[] = {
3421 { CIM_DM_PRTY_ERR_F
, "NC-SI CIM parity error", -1, 1 },
3422 { MPS_DM_PRTY_ERR_F
, "NC-SI MPS parity error", -1, 1 },
3423 { TXFIFO_PRTY_ERR_F
, "NC-SI Tx FIFO parity error", -1, 1 },
3424 { RXFIFO_PRTY_ERR_F
, "NC-SI Rx FIFO parity error", -1, 1 },
3428 if (csio_handle_intr_status(hw
, NCSI_INT_CAUSE_A
, ncsi_intr_info
))
3429 csio_hw_fatal_err(hw
);
3433 * XGMAC interrupt handler.
3435 static void csio_xgmac_intr_handler(struct csio_hw
*hw
, int port
)
3437 uint32_t v
= csio_rd_reg32(hw
, T5_PORT_REG(port
, MAC_PORT_INT_CAUSE_A
));
3439 v
&= TXFIFO_PRTY_ERR_F
| RXFIFO_PRTY_ERR_F
;
3443 if (v
& TXFIFO_PRTY_ERR_F
)
3444 csio_fatal(hw
, "XGMAC %d Tx FIFO parity error\n", port
);
3445 if (v
& RXFIFO_PRTY_ERR_F
)
3446 csio_fatal(hw
, "XGMAC %d Rx FIFO parity error\n", port
);
3447 csio_wr_reg32(hw
, v
, T5_PORT_REG(port
, MAC_PORT_INT_CAUSE_A
));
3448 csio_hw_fatal_err(hw
);
3452 * PL interrupt handler.
3454 static void csio_pl_intr_handler(struct csio_hw
*hw
)
3456 static struct intr_info pl_intr_info
[] = {
3457 { FATALPERR_F
, "T4 fatal parity error", -1, 1 },
3458 { PERRVFID_F
, "PL VFID_MAP parity error", -1, 1 },
3462 if (csio_handle_intr_status(hw
, PL_PL_INT_CAUSE_A
, pl_intr_info
))
3463 csio_hw_fatal_err(hw
);
3467 * csio_hw_slow_intr_handler - control path interrupt handler
3470 * Interrupt handler for non-data global interrupt events, e.g., errors.
3471 * The designation 'slow' is because it involves register reads, while
3472 * data interrupts typically don't involve any MMIOs.
3475 csio_hw_slow_intr_handler(struct csio_hw
*hw
)
3477 uint32_t cause
= csio_rd_reg32(hw
, PL_INT_CAUSE_A
);
3479 if (!(cause
& CSIO_GLBL_INTR_MASK
)) {
3480 CSIO_INC_STATS(hw
, n_plint_unexp
);
3484 csio_dbg(hw
, "Slow interrupt! cause: 0x%x\n", cause
);
3486 CSIO_INC_STATS(hw
, n_plint_cnt
);
3489 csio_cim_intr_handler(hw
);
3492 csio_mps_intr_handler(hw
);
3495 csio_ncsi_intr_handler(hw
);
3498 csio_pl_intr_handler(hw
);
3501 csio_smb_intr_handler(hw
);
3503 if (cause
& XGMAC0_F
)
3504 csio_xgmac_intr_handler(hw
, 0);
3506 if (cause
& XGMAC1_F
)
3507 csio_xgmac_intr_handler(hw
, 1);
3509 if (cause
& XGMAC_KR0_F
)
3510 csio_xgmac_intr_handler(hw
, 2);
3512 if (cause
& XGMAC_KR1_F
)
3513 csio_xgmac_intr_handler(hw
, 3);
3516 hw
->chip_ops
->chip_pcie_intr_handler(hw
);
3519 csio_mem_intr_handler(hw
, MEM_MC
);
3522 csio_mem_intr_handler(hw
, MEM_EDC0
);
3525 csio_mem_intr_handler(hw
, MEM_EDC1
);
3528 csio_le_intr_handler(hw
);
3531 csio_tp_intr_handler(hw
);
3534 csio_ma_intr_handler(hw
);
3536 if (cause
& PM_TX_F
)
3537 csio_pmtx_intr_handler(hw
);
3539 if (cause
& PM_RX_F
)
3540 csio_pmrx_intr_handler(hw
);
3542 if (cause
& ULP_RX_F
)
3543 csio_ulprx_intr_handler(hw
);
3545 if (cause
& CPL_SWITCH_F
)
3546 csio_cplsw_intr_handler(hw
);
3549 csio_sge_intr_handler(hw
);
3551 if (cause
& ULP_TX_F
)
3552 csio_ulptx_intr_handler(hw
);
3554 /* Clear the interrupts just processed for which we are the master. */
3555 csio_wr_reg32(hw
, cause
& CSIO_GLBL_INTR_MASK
, PL_INT_CAUSE_A
);
3556 csio_rd_reg32(hw
, PL_INT_CAUSE_A
); /* flush */
3561 /*****************************************************************************
3562 * HW <--> mailbox interfacing routines.
3563 ****************************************************************************/
3565 * csio_mberr_worker - Worker thread (dpc) for mailbox/error completions
3567 * @data: Private data pointer.
3569 * Called from worker thread context.
3572 csio_mberr_worker(void *data
)
3574 struct csio_hw
*hw
= (struct csio_hw
*)data
;
3575 struct csio_mbm
*mbm
= &hw
->mbm
;
3577 struct csio_mb
*mbp_next
;
3580 del_timer_sync(&mbm
->timer
);
3582 spin_lock_irq(&hw
->lock
);
3583 if (list_empty(&mbm
->cbfn_q
)) {
3584 spin_unlock_irq(&hw
->lock
);
3588 list_splice_tail_init(&mbm
->cbfn_q
, &cbfn_q
);
3589 mbm
->stats
.n_cbfnq
= 0;
3591 /* Try to start waiting mailboxes */
3592 if (!list_empty(&mbm
->req_q
)) {
3593 mbp_next
= list_first_entry(&mbm
->req_q
, struct csio_mb
, list
);
3594 list_del_init(&mbp_next
->list
);
3596 rv
= csio_mb_issue(hw
, mbp_next
);
3598 list_add_tail(&mbp_next
->list
, &mbm
->req_q
);
3600 CSIO_DEC_STATS(mbm
, n_activeq
);
3602 spin_unlock_irq(&hw
->lock
);
3604 /* Now callback completions */
3605 csio_mb_completions(hw
, &cbfn_q
);
3609 * csio_hw_mb_timer - Top-level Mailbox timeout handler.
3611 * @data: private data pointer
3615 csio_hw_mb_timer(struct timer_list
*t
)
3617 struct csio_mbm
*mbm
= from_timer(mbm
, t
, timer
);
3618 struct csio_hw
*hw
= mbm
->hw
;
3619 struct csio_mb
*mbp
= NULL
;
3621 spin_lock_irq(&hw
->lock
);
3622 mbp
= csio_mb_tmo_handler(hw
);
3623 spin_unlock_irq(&hw
->lock
);
3625 /* Call back the function for the timed-out Mailbox */
3627 mbp
->mb_cbfn(hw
, mbp
);
3632 * csio_hw_mbm_cleanup - Cleanup Mailbox module.
3635 * Called with lock held, should exit with lock held.
3636 * Cancels outstanding mailboxes (waiting, in-flight) and gathers them
3637 * into a local queue. Drops lock and calls the completions. Holds
3641 csio_hw_mbm_cleanup(struct csio_hw
*hw
)
3645 csio_mb_cancel_all(hw
, &cbfn_q
);
3647 spin_unlock_irq(&hw
->lock
);
3648 csio_mb_completions(hw
, &cbfn_q
);
3649 spin_lock_irq(&hw
->lock
);
3652 /*****************************************************************************
3654 ****************************************************************************/
3656 csio_enqueue_evt(struct csio_hw
*hw
, enum csio_evt type
, void *evt_msg
,
3659 struct csio_evt_msg
*evt_entry
= NULL
;
3661 if (type
>= CSIO_EVT_MAX
)
3664 if (len
> CSIO_EVT_MSG_SIZE
)
3667 if (hw
->flags
& CSIO_HWF_FWEVT_STOP
)
3670 if (list_empty(&hw
->evt_free_q
)) {
3671 csio_err(hw
, "Failed to alloc evt entry, msg type %d len %d\n",
3676 evt_entry
= list_first_entry(&hw
->evt_free_q
,
3677 struct csio_evt_msg
, list
);
3678 list_del_init(&evt_entry
->list
);
3680 /* copy event msg and queue the event */
3681 evt_entry
->type
= type
;
3682 memcpy((void *)evt_entry
->data
, evt_msg
, len
);
3683 list_add_tail(&evt_entry
->list
, &hw
->evt_active_q
);
3685 CSIO_DEC_STATS(hw
, n_evt_freeq
);
3686 CSIO_INC_STATS(hw
, n_evt_activeq
);
3692 csio_enqueue_evt_lock(struct csio_hw
*hw
, enum csio_evt type
, void *evt_msg
,
3693 uint16_t len
, bool msg_sg
)
3695 struct csio_evt_msg
*evt_entry
= NULL
;
3696 struct csio_fl_dma_buf
*fl_sg
;
3698 unsigned long flags
;
3701 if (type
>= CSIO_EVT_MAX
)
3704 if (len
> CSIO_EVT_MSG_SIZE
)
3707 spin_lock_irqsave(&hw
->lock
, flags
);
3708 if (hw
->flags
& CSIO_HWF_FWEVT_STOP
) {
3713 if (list_empty(&hw
->evt_free_q
)) {
3714 csio_err(hw
, "Failed to alloc evt entry, msg type %d len %d\n",
3720 evt_entry
= list_first_entry(&hw
->evt_free_q
,
3721 struct csio_evt_msg
, list
);
3722 list_del_init(&evt_entry
->list
);
3724 /* copy event msg and queue the event */
3725 evt_entry
->type
= type
;
3727 /* If Payload in SG list*/
3729 fl_sg
= (struct csio_fl_dma_buf
*) evt_msg
;
3730 for (n
= 0; (n
< CSIO_MAX_FLBUF_PER_IQWR
&& off
< len
); n
++) {
3731 memcpy((void *)((uintptr_t)evt_entry
->data
+ off
),
3732 fl_sg
->flbufs
[n
].vaddr
,
3733 fl_sg
->flbufs
[n
].len
);
3734 off
+= fl_sg
->flbufs
[n
].len
;
3737 memcpy((void *)evt_entry
->data
, evt_msg
, len
);
3739 list_add_tail(&evt_entry
->list
, &hw
->evt_active_q
);
3740 CSIO_DEC_STATS(hw
, n_evt_freeq
);
3741 CSIO_INC_STATS(hw
, n_evt_activeq
);
3743 spin_unlock_irqrestore(&hw
->lock
, flags
);
3748 csio_free_evt(struct csio_hw
*hw
, struct csio_evt_msg
*evt_entry
)
3751 spin_lock_irq(&hw
->lock
);
3752 list_del_init(&evt_entry
->list
);
3753 list_add_tail(&evt_entry
->list
, &hw
->evt_free_q
);
3754 CSIO_DEC_STATS(hw
, n_evt_activeq
);
3755 CSIO_INC_STATS(hw
, n_evt_freeq
);
3756 spin_unlock_irq(&hw
->lock
);
3761 csio_evtq_flush(struct csio_hw
*hw
)
3765 while (hw
->flags
& CSIO_HWF_FWEVT_PENDING
&& count
--) {
3766 spin_unlock_irq(&hw
->lock
);
3768 spin_lock_irq(&hw
->lock
);
3771 CSIO_DB_ASSERT(!(hw
->flags
& CSIO_HWF_FWEVT_PENDING
));
3775 csio_evtq_stop(struct csio_hw
*hw
)
3777 hw
->flags
|= CSIO_HWF_FWEVT_STOP
;
3781 csio_evtq_start(struct csio_hw
*hw
)
3783 hw
->flags
&= ~CSIO_HWF_FWEVT_STOP
;
3787 csio_evtq_cleanup(struct csio_hw
*hw
)
3789 struct list_head
*evt_entry
, *next_entry
;
3791 /* Release outstanding events from activeq to freeq*/
3792 if (!list_empty(&hw
->evt_active_q
))
3793 list_splice_tail_init(&hw
->evt_active_q
, &hw
->evt_free_q
);
3795 hw
->stats
.n_evt_activeq
= 0;
3796 hw
->flags
&= ~CSIO_HWF_FWEVT_PENDING
;
3798 /* Freeup event entry */
3799 list_for_each_safe(evt_entry
, next_entry
, &hw
->evt_free_q
) {
3801 CSIO_DEC_STATS(hw
, n_evt_freeq
);
3804 hw
->stats
.n_evt_freeq
= 0;
3809 csio_process_fwevtq_entry(struct csio_hw
*hw
, void *wr
, uint32_t len
,
3810 struct csio_fl_dma_buf
*flb
, void *priv
)
3814 uint32_t msg_len
= 0;
3817 op
= ((struct rss_header
*) wr
)->opcode
;
3818 if (op
== CPL_FW6_PLD
) {
3819 CSIO_INC_STATS(hw
, n_cpl_fw6_pld
);
3820 if (!flb
|| !flb
->totlen
) {
3821 CSIO_INC_STATS(hw
, n_cpl_unexp
);
3826 msg_len
= flb
->totlen
;
3828 } else if (op
== CPL_FW6_MSG
|| op
== CPL_FW4_MSG
) {
3830 CSIO_INC_STATS(hw
, n_cpl_fw6_msg
);
3831 /* skip RSS header */
3832 msg
= (void *)((uintptr_t)wr
+ sizeof(__be64
));
3833 msg_len
= (op
== CPL_FW6_MSG
) ? sizeof(struct cpl_fw6_msg
) :
3834 sizeof(struct cpl_fw4_msg
);
3836 csio_warn(hw
, "unexpected CPL %#x on FW event queue\n", op
);
3837 CSIO_INC_STATS(hw
, n_cpl_unexp
);
3842 * Enqueue event to EventQ. Events processing happens
3843 * in Event worker thread context
3845 if (csio_enqueue_evt_lock(hw
, CSIO_EVT_FW
, msg
,
3846 (uint16_t)msg_len
, msg_sg
))
3847 CSIO_INC_STATS(hw
, n_evt_drop
);
3851 csio_evtq_worker(struct work_struct
*work
)
3853 struct csio_hw
*hw
= container_of(work
, struct csio_hw
, evtq_work
);
3854 struct list_head
*evt_entry
, *next_entry
;
3856 struct csio_evt_msg
*evt_msg
;
3857 struct cpl_fw6_msg
*msg
;
3858 struct csio_rnode
*rn
;
3860 uint8_t evtq_stop
= 0;
3862 csio_dbg(hw
, "event worker thread active evts#%d\n",
3863 hw
->stats
.n_evt_activeq
);
3865 spin_lock_irq(&hw
->lock
);
3866 while (!list_empty(&hw
->evt_active_q
)) {
3867 list_splice_tail_init(&hw
->evt_active_q
, &evt_q
);
3868 spin_unlock_irq(&hw
->lock
);
3870 list_for_each_safe(evt_entry
, next_entry
, &evt_q
) {
3871 evt_msg
= (struct csio_evt_msg
*) evt_entry
;
3873 /* Drop events if queue is STOPPED */
3874 spin_lock_irq(&hw
->lock
);
3875 if (hw
->flags
& CSIO_HWF_FWEVT_STOP
)
3877 spin_unlock_irq(&hw
->lock
);
3879 CSIO_INC_STATS(hw
, n_evt_drop
);
3883 switch (evt_msg
->type
) {
3885 msg
= (struct cpl_fw6_msg
*)(evt_msg
->data
);
3887 if ((msg
->opcode
== CPL_FW6_MSG
||
3888 msg
->opcode
== CPL_FW4_MSG
) &&
3890 rv
= csio_mb_fwevt_handler(hw
,
3894 /* Handle any remaining fw events */
3895 csio_fcoe_fwevt_handler(hw
,
3896 msg
->opcode
, msg
->data
);
3897 } else if (msg
->opcode
== CPL_FW6_PLD
) {
3899 csio_fcoe_fwevt_handler(hw
,
3900 msg
->opcode
, msg
->data
);
3903 "Unhandled FW msg op %x type %x\n",
3904 msg
->opcode
, msg
->type
);
3905 CSIO_INC_STATS(hw
, n_evt_drop
);
3910 csio_mberr_worker(hw
);
3913 case CSIO_EVT_DEV_LOSS
:
3914 memcpy(&rn
, evt_msg
->data
, sizeof(rn
));
3915 csio_rnode_devloss_handler(rn
);
3919 csio_warn(hw
, "Unhandled event %x on evtq\n",
3921 CSIO_INC_STATS(hw
, n_evt_unexp
);
3925 csio_free_evt(hw
, evt_msg
);
3928 spin_lock_irq(&hw
->lock
);
3930 hw
->flags
&= ~CSIO_HWF_FWEVT_PENDING
;
3931 spin_unlock_irq(&hw
->lock
);
3935 csio_fwevtq_handler(struct csio_hw
*hw
)
3939 if (csio_q_iqid(hw
, hw
->fwevt_iq_idx
) == CSIO_MAX_QID
) {
3940 CSIO_INC_STATS(hw
, n_int_stray
);
3944 rv
= csio_wr_process_iq_idx(hw
, hw
->fwevt_iq_idx
,
3945 csio_process_fwevtq_entry
, NULL
);
3949 /****************************************************************************
3951 ****************************************************************************/
3953 /* Management module */
3955 * csio_mgmt_req_lookup - Lookup the given IO req exist in Active Q.
3956 * mgmt - mgmt module
3957 * @io_req - io request
3959 * Return - 0:if given IO Req exists in active Q.
3960 * -EINVAL :if lookup fails.
3963 csio_mgmt_req_lookup(struct csio_mgmtm
*mgmtm
, struct csio_ioreq
*io_req
)
3965 struct list_head
*tmp
;
3967 /* Lookup ioreq in the ACTIVEQ */
3968 list_for_each(tmp
, &mgmtm
->active_q
) {
3969 if (io_req
== (struct csio_ioreq
*)tmp
)
3975 #define ECM_MIN_TMO 1000 /* Minimum timeout value for req */
3978 * csio_mgmts_tmo_handler - MGMT IO Timeout handler.
3979 * @data - Event data.
3984 csio_mgmt_tmo_handler(struct timer_list
*t
)
3986 struct csio_mgmtm
*mgmtm
= from_timer(mgmtm
, t
, mgmt_timer
);
3987 struct list_head
*tmp
;
3988 struct csio_ioreq
*io_req
;
3990 csio_dbg(mgmtm
->hw
, "Mgmt timer invoked!\n");
3992 spin_lock_irq(&mgmtm
->hw
->lock
);
3994 list_for_each(tmp
, &mgmtm
->active_q
) {
3995 io_req
= (struct csio_ioreq
*) tmp
;
3996 io_req
->tmo
-= min_t(uint32_t, io_req
->tmo
, ECM_MIN_TMO
);
3999 /* Dequeue the request from retry Q. */
4000 tmp
= csio_list_prev(tmp
);
4001 list_del_init(&io_req
->sm
.sm_list
);
4002 if (io_req
->io_cbfn
) {
4003 /* io_req will be freed by completion handler */
4004 io_req
->wr_status
= -ETIMEDOUT
;
4005 io_req
->io_cbfn(mgmtm
->hw
, io_req
);
4012 /* If retry queue is not empty, re-arm timer */
4013 if (!list_empty(&mgmtm
->active_q
))
4014 mod_timer(&mgmtm
->mgmt_timer
,
4015 jiffies
+ msecs_to_jiffies(ECM_MIN_TMO
));
4016 spin_unlock_irq(&mgmtm
->hw
->lock
);
4020 csio_mgmtm_cleanup(struct csio_mgmtm
*mgmtm
)
4022 struct csio_hw
*hw
= mgmtm
->hw
;
4023 struct csio_ioreq
*io_req
;
4024 struct list_head
*tmp
;
4028 /* Wait for all outstanding req to complete gracefully */
4029 while ((!list_empty(&mgmtm
->active_q
)) && count
--) {
4030 spin_unlock_irq(&hw
->lock
);
4032 spin_lock_irq(&hw
->lock
);
4035 /* release outstanding req from ACTIVEQ */
4036 list_for_each(tmp
, &mgmtm
->active_q
) {
4037 io_req
= (struct csio_ioreq
*) tmp
;
4038 tmp
= csio_list_prev(tmp
);
4039 list_del_init(&io_req
->sm
.sm_list
);
4040 mgmtm
->stats
.n_active
--;
4041 if (io_req
->io_cbfn
) {
4042 /* io_req will be freed by completion handler */
4043 io_req
->wr_status
= -ETIMEDOUT
;
4044 io_req
->io_cbfn(mgmtm
->hw
, io_req
);
4050 * csio_mgmt_init - Mgmt module init entry point
4051 * @mgmtsm - mgmt module
4054 * Initialize mgmt timer, resource wait queue, active queue,
4055 * completion q. Allocate Egress and Ingress
4056 * WR queues and save off the queue index returned by the WR
4057 * module for future use. Allocate and save off mgmt reqs in the
4058 * mgmt_req_freelist for future use. Make sure their SM is initialized
4060 * Returns: 0 - on success
4061 * -ENOMEM - on error.
4064 csio_mgmtm_init(struct csio_mgmtm
*mgmtm
, struct csio_hw
*hw
)
4066 timer_setup(&mgmtm
->mgmt_timer
, csio_mgmt_tmo_handler
, 0);
4068 INIT_LIST_HEAD(&mgmtm
->active_q
);
4069 INIT_LIST_HEAD(&mgmtm
->cbfn_q
);
4072 /*mgmtm->iq_idx = hw->fwevt_iq_idx;*/
4078 * csio_mgmtm_exit - MGMT module exit entry point
4079 * @mgmtsm - mgmt module
4081 * This function called during MGMT module uninit.
4082 * Stop timers, free ioreqs allocated.
4087 csio_mgmtm_exit(struct csio_mgmtm
*mgmtm
)
4089 del_timer_sync(&mgmtm
->mgmt_timer
);
4094 * csio_hw_start - Kicks off the HW State machine
4095 * @hw: Pointer to HW module.
4097 * It is assumed that the initialization is a synchronous operation.
4098 * So when we return afer posting the event, the HW SM should be in
4099 * the ready state, if there were no errors during init.
4102 csio_hw_start(struct csio_hw
*hw
)
4104 spin_lock_irq(&hw
->lock
);
4105 csio_post_event(&hw
->sm
, CSIO_HWE_CFG
);
4106 spin_unlock_irq(&hw
->lock
);
4108 if (csio_is_hw_ready(hw
))
4110 else if (csio_match_state(hw
, csio_hws_uninit
))
4117 csio_hw_stop(struct csio_hw
*hw
)
4119 csio_post_event(&hw
->sm
, CSIO_HWE_PCI_REMOVE
);
4121 if (csio_is_hw_removing(hw
))
4127 /* Max reset retries */
4128 #define CSIO_MAX_RESET_RETRIES 3
4131 * csio_hw_reset - Reset the hardware
4134 * Caller should hold lock across this function.
4137 csio_hw_reset(struct csio_hw
*hw
)
4139 if (!csio_is_hw_master(hw
))
4142 if (hw
->rst_retries
>= CSIO_MAX_RESET_RETRIES
) {
4143 csio_dbg(hw
, "Max hw reset attempts reached..");
4148 csio_post_event(&hw
->sm
, CSIO_HWE_HBA_RESET
);
4150 if (csio_is_hw_ready(hw
)) {
4151 hw
->rst_retries
= 0;
4152 hw
->stats
.n_reset_start
= jiffies_to_msecs(jiffies
);
4159 * csio_hw_get_device_id - Caches the Adapter's vendor & device id.
4163 csio_hw_get_device_id(struct csio_hw
*hw
)
4165 /* Is the adapter device id cached already ?*/
4166 if (csio_is_dev_id_cached(hw
))
4169 /* Get the PCI vendor & device id */
4170 pci_read_config_word(hw
->pdev
, PCI_VENDOR_ID
,
4171 &hw
->params
.pci
.vendor_id
);
4172 pci_read_config_word(hw
->pdev
, PCI_DEVICE_ID
,
4173 &hw
->params
.pci
.device_id
);
4175 csio_dev_id_cached(hw
);
4176 hw
->chip_id
= (hw
->params
.pci
.device_id
& CSIO_HW_CHIP_MASK
);
4178 } /* csio_hw_get_device_id */
4181 * csio_hw_set_description - Set the model, description of the hw.
4183 * @ven_id: PCI Vendor ID
4184 * @dev_id: PCI Device ID
4187 csio_hw_set_description(struct csio_hw
*hw
, uint16_t ven_id
, uint16_t dev_id
)
4189 uint32_t adap_type
, prot_type
;
4191 if (ven_id
== CSIO_VENDOR_ID
) {
4192 prot_type
= (dev_id
& CSIO_ASIC_DEVID_PROTO_MASK
);
4193 adap_type
= (dev_id
& CSIO_ASIC_DEVID_TYPE_MASK
);
4195 if (prot_type
== CSIO_T5_FCOE_ASIC
) {
4197 csio_t5_fcoe_adapters
[adap_type
].model_no
, 16);
4198 memcpy(hw
->model_desc
,
4199 csio_t5_fcoe_adapters
[adap_type
].description
,
4202 char tempName
[32] = "Chelsio FCoE Controller";
4203 memcpy(hw
->model_desc
, tempName
, 32);
4206 } /* csio_hw_set_description */
4209 * csio_hw_init - Initialize HW module.
4210 * @hw: Pointer to HW module.
4212 * Initialize the members of the HW module.
4215 csio_hw_init(struct csio_hw
*hw
)
4219 uint16_t ven_id
, dev_id
;
4220 struct csio_evt_msg
*evt_entry
;
4222 INIT_LIST_HEAD(&hw
->sm
.sm_list
);
4223 csio_init_state(&hw
->sm
, csio_hws_uninit
);
4224 spin_lock_init(&hw
->lock
);
4225 INIT_LIST_HEAD(&hw
->sln_head
);
4227 /* Get the PCI vendor & device id */
4228 csio_hw_get_device_id(hw
);
4230 strcpy(hw
->name
, CSIO_HW_NAME
);
4232 /* Initialize the HW chip ops T5 specific ops */
4233 hw
->chip_ops
= &t5_ops
;
4235 /* Set the model & its description */
4237 ven_id
= hw
->params
.pci
.vendor_id
;
4238 dev_id
= hw
->params
.pci
.device_id
;
4240 csio_hw_set_description(hw
, ven_id
, dev_id
);
4242 /* Initialize default log level */
4243 hw
->params
.log_level
= (uint32_t) csio_dbg_level
;
4245 csio_set_fwevt_intr_idx(hw
, -1);
4246 csio_set_nondata_intr_idx(hw
, -1);
4248 /* Init all the modules: Mailbox, WorkRequest and Transport */
4249 if (csio_mbm_init(csio_hw_to_mbm(hw
), hw
, csio_hw_mb_timer
))
4252 rv
= csio_wrm_init(csio_hw_to_wrm(hw
), hw
);
4256 rv
= csio_scsim_init(csio_hw_to_scsim(hw
), hw
);
4260 rv
= csio_mgmtm_init(csio_hw_to_mgmtm(hw
), hw
);
4262 goto err_scsim_exit
;
4263 /* Pre-allocate evtq and initialize them */
4264 INIT_LIST_HEAD(&hw
->evt_active_q
);
4265 INIT_LIST_HEAD(&hw
->evt_free_q
);
4266 for (i
= 0; i
< csio_evtq_sz
; i
++) {
4268 evt_entry
= kzalloc(sizeof(struct csio_evt_msg
), GFP_KERNEL
);
4271 csio_err(hw
, "Failed to initialize eventq");
4272 goto err_evtq_cleanup
;
4275 list_add_tail(&evt_entry
->list
, &hw
->evt_free_q
);
4276 CSIO_INC_STATS(hw
, n_evt_freeq
);
4279 hw
->dev_num
= dev_num
;
4285 csio_evtq_cleanup(hw
);
4286 csio_mgmtm_exit(csio_hw_to_mgmtm(hw
));
4288 csio_scsim_exit(csio_hw_to_scsim(hw
));
4290 csio_wrm_exit(csio_hw_to_wrm(hw
), hw
);
4292 csio_mbm_exit(csio_hw_to_mbm(hw
));
4298 * csio_hw_exit - Un-initialize HW module.
4299 * @hw: Pointer to HW module.
4303 csio_hw_exit(struct csio_hw
*hw
)
4305 csio_evtq_cleanup(hw
);
4306 csio_mgmtm_exit(csio_hw_to_mgmtm(hw
));
4307 csio_scsim_exit(csio_hw_to_scsim(hw
));
4308 csio_wrm_exit(csio_hw_to_wrm(hw
), hw
);
4309 csio_mbm_exit(csio_hw_to_mbm(hw
));