2 * This file is part of the Chelsio FCoE driver for Linux.
4 * Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 #include <linux/pci.h>
36 #include <linux/pci_regs.h>
37 #include <linux/firmware.h>
38 #include <linux/stddef.h>
39 #include <linux/delay.h>
40 #include <linux/string.h>
41 #include <linux/compiler.h>
42 #include <linux/jiffies.h>
43 #include <linux/kernel.h>
44 #include <linux/log2.h>
47 #include "csio_lnode.h"
48 #include "csio_rnode.h"
50 int csio_dbg_level
= 0xFEFF;
51 unsigned int csio_port_mask
= 0xf;
53 /* Default FW event queue entries. */
54 static uint32_t csio_evtq_sz
= CSIO_EVTQ_SIZE
;
56 /* Default MSI param level */
59 /* FCoE function instances */
62 /* FCoE Adapter types & its description */
63 static const struct csio_adap_desc csio_t5_fcoe_adapters
[] = {
64 {"T580-Dbg 10G", "Chelsio T580-Dbg 10G [FCoE]"},
65 {"T520-CR 10G", "Chelsio T520-CR 10G [FCoE]"},
66 {"T522-CR 10G/1G", "Chelsio T522-CR 10G/1G [FCoE]"},
67 {"T540-CR 10G", "Chelsio T540-CR 10G [FCoE]"},
68 {"T520-BCH 10G", "Chelsio T520-BCH 10G [FCoE]"},
69 {"T540-BCH 10G", "Chelsio T540-BCH 10G [FCoE]"},
70 {"T540-CH 10G", "Chelsio T540-CH 10G [FCoE]"},
71 {"T520-SO 10G", "Chelsio T520-SO 10G [FCoE]"},
72 {"T520-CX4 10G", "Chelsio T520-CX4 10G [FCoE]"},
73 {"T520-BT 10G", "Chelsio T520-BT 10G [FCoE]"},
74 {"T504-BT 1G", "Chelsio T504-BT 1G [FCoE]"},
75 {"B520-SR 10G", "Chelsio B520-SR 10G [FCoE]"},
76 {"B504-BT 1G", "Chelsio B504-BT 1G [FCoE]"},
77 {"T580-CR 10G", "Chelsio T580-CR 10G [FCoE]"},
78 {"T540-LP-CR 10G", "Chelsio T540-LP-CR 10G [FCoE]"},
79 {"AMSTERDAM 10G", "Chelsio AMSTERDAM 10G [FCoE]"},
80 {"T580-LP-CR 40G", "Chelsio T580-LP-CR 40G [FCoE]"},
81 {"T520-LL-CR 10G", "Chelsio T520-LL-CR 10G [FCoE]"},
82 {"T560-CR 40G", "Chelsio T560-CR 40G [FCoE]"},
83 {"T580-CR 40G", "Chelsio T580-CR 40G [FCoE]"},
84 {"T580-SO 40G", "Chelsio T580-SO 40G [FCoE]"},
85 {"T502-BT 1G", "Chelsio T502-BT 1G [FCoE]"}
88 static void csio_mgmtm_cleanup(struct csio_mgmtm
*);
89 static void csio_hw_mbm_cleanup(struct csio_hw
*);
91 /* State machine forward declarations */
92 static void csio_hws_uninit(struct csio_hw
*, enum csio_hw_ev
);
93 static void csio_hws_configuring(struct csio_hw
*, enum csio_hw_ev
);
94 static void csio_hws_initializing(struct csio_hw
*, enum csio_hw_ev
);
95 static void csio_hws_ready(struct csio_hw
*, enum csio_hw_ev
);
96 static void csio_hws_quiescing(struct csio_hw
*, enum csio_hw_ev
);
97 static void csio_hws_quiesced(struct csio_hw
*, enum csio_hw_ev
);
98 static void csio_hws_resetting(struct csio_hw
*, enum csio_hw_ev
);
99 static void csio_hws_removing(struct csio_hw
*, enum csio_hw_ev
);
100 static void csio_hws_pcierr(struct csio_hw
*, enum csio_hw_ev
);
102 static void csio_hw_initialize(struct csio_hw
*hw
);
103 static void csio_evtq_stop(struct csio_hw
*hw
);
104 static void csio_evtq_start(struct csio_hw
*hw
);
106 int csio_is_hw_ready(struct csio_hw
*hw
)
108 return csio_match_state(hw
, csio_hws_ready
);
111 int csio_is_hw_removing(struct csio_hw
*hw
)
113 return csio_match_state(hw
, csio_hws_removing
);
118 * csio_hw_wait_op_done_val - wait until an operation is completed
120 * @reg: the register to check for completion
121 * @mask: a single-bit field within @reg that indicates completion
122 * @polarity: the value of the field when the operation is completed
123 * @attempts: number of check iterations
124 * @delay: delay in usecs between iterations
125 * @valp: where to store the value of the register at completion time
127 * Wait until an operation is completed by checking a bit in a register
128 * up to @attempts times. If @valp is not NULL the value of the register
129 * at the time it indicated completion is stored there. Returns 0 if the
130 * operation completes and -EAGAIN otherwise.
133 csio_hw_wait_op_done_val(struct csio_hw
*hw
, int reg
, uint32_t mask
,
134 int polarity
, int attempts
, int delay
, uint32_t *valp
)
138 val
= csio_rd_reg32(hw
, reg
);
140 if (!!(val
& mask
) == polarity
) {
154 * csio_hw_tp_wr_bits_indirect - set/clear bits in an indirect TP register
156 * @addr: the indirect TP register address
157 * @mask: specifies the field within the register to modify
158 * @val: new value for the field
160 * Sets a field of an indirect TP register to the given value.
163 csio_hw_tp_wr_bits_indirect(struct csio_hw
*hw
, unsigned int addr
,
164 unsigned int mask
, unsigned int val
)
166 csio_wr_reg32(hw
, addr
, TP_PIO_ADDR_A
);
167 val
|= csio_rd_reg32(hw
, TP_PIO_DATA_A
) & ~mask
;
168 csio_wr_reg32(hw
, val
, TP_PIO_DATA_A
);
172 csio_set_reg_field(struct csio_hw
*hw
, uint32_t reg
, uint32_t mask
,
175 uint32_t val
= csio_rd_reg32(hw
, reg
) & ~mask
;
177 csio_wr_reg32(hw
, val
| value
, reg
);
179 csio_rd_reg32(hw
, reg
);
184 csio_memory_write(struct csio_hw
*hw
, int mtype
, u32 addr
, u32 len
, u32
*buf
)
186 return hw
->chip_ops
->chip_memory_rw(hw
, MEMWIN_CSIOSTOR
, mtype
,
191 * EEPROM reads take a few tens of us while writes can take a bit over 5 ms.
193 #define EEPROM_MAX_RD_POLL 40
194 #define EEPROM_MAX_WR_POLL 6
195 #define EEPROM_STAT_ADDR 0x7bfc
196 #define VPD_BASE 0x400
197 #define VPD_BASE_OLD 0
199 #define VPD_INFO_FLD_HDR_SIZE 3
202 * csio_hw_seeprom_read - read a serial EEPROM location
204 * @addr: EEPROM virtual address
205 * @data: where to store the read data
207 * Read a 32-bit word from a location in serial EEPROM using the card's PCI
208 * VPD capability. Note that this function must be called with a virtual
212 csio_hw_seeprom_read(struct csio_hw
*hw
, uint32_t addr
, uint32_t *data
)
215 int attempts
= EEPROM_MAX_RD_POLL
;
216 uint32_t base
= hw
->params
.pci
.vpd_cap_addr
;
218 if (addr
>= EEPROMVSIZE
|| (addr
& 3))
221 pci_write_config_word(hw
->pdev
, base
+ PCI_VPD_ADDR
, (uint16_t)addr
);
225 pci_read_config_word(hw
->pdev
, base
+ PCI_VPD_ADDR
, &val
);
226 } while (!(val
& PCI_VPD_ADDR_F
) && --attempts
);
228 if (!(val
& PCI_VPD_ADDR_F
)) {
229 csio_err(hw
, "reading EEPROM address 0x%x failed\n", addr
);
233 pci_read_config_dword(hw
->pdev
, base
+ PCI_VPD_DATA
, data
);
234 *data
= le32_to_cpu(*(__le32
*)data
);
240 * Partial EEPROM Vital Product Data structure. Includes only the ID and
252 * csio_hw_get_vpd_keyword_val - Locates an information field keyword in
254 * @v: Pointer to buffered vpd data structure
255 * @kw: The keyword to search for
257 * Returns the value of the information field keyword or
261 csio_hw_get_vpd_keyword_val(const struct t4_vpd_hdr
*v
, const char *kw
)
264 int32_t offset
, len
;
265 const uint8_t *buf
= &v
->id_tag
;
266 const uint8_t *vpdr_len
= &v
->vpdr_tag
;
267 offset
= sizeof(struct t4_vpd_hdr
);
268 len
= (uint16_t)vpdr_len
[1] + ((uint16_t)vpdr_len
[2] << 8);
270 if (len
+ sizeof(struct t4_vpd_hdr
) > VPD_LEN
)
273 for (i
= offset
; (i
+ VPD_INFO_FLD_HDR_SIZE
) <= (offset
+ len
);) {
274 if (memcmp(buf
+ i
, kw
, 2) == 0) {
275 i
+= VPD_INFO_FLD_HDR_SIZE
;
279 i
+= VPD_INFO_FLD_HDR_SIZE
+ buf
[i
+2];
286 csio_pci_capability(struct pci_dev
*pdev
, int cap
, int *pos
)
288 *pos
= pci_find_capability(pdev
, cap
);
296 * csio_hw_get_vpd_params - read VPD parameters from VPD EEPROM
298 * @p: where to store the parameters
300 * Reads card parameters stored in VPD EEPROM.
303 csio_hw_get_vpd_params(struct csio_hw
*hw
, struct csio_vpd
*p
)
305 int i
, ret
, ec
, sn
, addr
;
307 const struct t4_vpd_hdr
*v
;
308 /* To get around compilation warning from strstrip */
311 if (csio_is_valid_vpd(hw
))
314 ret
= csio_pci_capability(hw
->pdev
, PCI_CAP_ID_VPD
,
315 &hw
->params
.pci
.vpd_cap_addr
);
319 vpd
= kzalloc(VPD_LEN
, GFP_ATOMIC
);
324 * Card information normally starts at VPD_BASE but early cards had
327 ret
= csio_hw_seeprom_read(hw
, VPD_BASE
, (uint32_t *)(vpd
));
328 addr
= *vpd
== 0x82 ? VPD_BASE
: VPD_BASE_OLD
;
330 for (i
= 0; i
< VPD_LEN
; i
+= 4) {
331 ret
= csio_hw_seeprom_read(hw
, addr
+ i
, (uint32_t *)(vpd
+ i
));
338 /* Reset the VPD flag! */
339 hw
->flags
&= (~CSIO_HWF_VPD_VALID
);
341 v
= (const struct t4_vpd_hdr
*)vpd
;
343 #define FIND_VPD_KW(var, name) do { \
344 var = csio_hw_get_vpd_keyword_val(v, name); \
346 csio_err(hw, "missing VPD keyword " name "\n"); \
352 FIND_VPD_KW(i
, "RV");
353 for (csum
= 0; i
>= 0; i
--)
357 csio_err(hw
, "corrupted VPD EEPROM, actual csum %u\n", csum
);
361 FIND_VPD_KW(ec
, "EC");
362 FIND_VPD_KW(sn
, "SN");
365 memcpy(p
->id
, v
->id_data
, ID_LEN
);
367 memcpy(p
->ec
, vpd
+ ec
, EC_LEN
);
369 i
= vpd
[sn
- VPD_INFO_FLD_HDR_SIZE
+ 2];
370 memcpy(p
->sn
, vpd
+ sn
, min(i
, SERNUM_LEN
));
373 csio_valid_vpd_copied(hw
);
380 * csio_hw_sf1_read - read data from the serial flash
382 * @byte_cnt: number of bytes to read
383 * @cont: whether another operation will be chained
384 * @lock: whether to lock SF for PL access only
385 * @valp: where to store the read data
387 * Reads up to 4 bytes of data from the serial flash. The location of
388 * the read needs to be specified prior to calling this by issuing the
389 * appropriate commands to the serial flash.
392 csio_hw_sf1_read(struct csio_hw
*hw
, uint32_t byte_cnt
, int32_t cont
,
393 int32_t lock
, uint32_t *valp
)
397 if (!byte_cnt
|| byte_cnt
> 4)
399 if (csio_rd_reg32(hw
, SF_OP_A
) & SF_BUSY_F
)
402 csio_wr_reg32(hw
, SF_LOCK_V(lock
) | SF_CONT_V(cont
) |
403 BYTECNT_V(byte_cnt
- 1), SF_OP_A
);
404 ret
= csio_hw_wait_op_done_val(hw
, SF_OP_A
, SF_BUSY_F
, 0, SF_ATTEMPTS
,
407 *valp
= csio_rd_reg32(hw
, SF_DATA_A
);
412 * csio_hw_sf1_write - write data to the serial flash
414 * @byte_cnt: number of bytes to write
415 * @cont: whether another operation will be chained
416 * @lock: whether to lock SF for PL access only
417 * @val: value to write
419 * Writes up to 4 bytes of data to the serial flash. The location of
420 * the write needs to be specified prior to calling this by issuing the
421 * appropriate commands to the serial flash.
424 csio_hw_sf1_write(struct csio_hw
*hw
, uint32_t byte_cnt
, uint32_t cont
,
425 int32_t lock
, uint32_t val
)
427 if (!byte_cnt
|| byte_cnt
> 4)
429 if (csio_rd_reg32(hw
, SF_OP_A
) & SF_BUSY_F
)
432 csio_wr_reg32(hw
, val
, SF_DATA_A
);
433 csio_wr_reg32(hw
, SF_CONT_V(cont
) | BYTECNT_V(byte_cnt
- 1) |
434 OP_V(1) | SF_LOCK_V(lock
), SF_OP_A
);
436 return csio_hw_wait_op_done_val(hw
, SF_OP_A
, SF_BUSY_F
, 0, SF_ATTEMPTS
,
441 * csio_hw_flash_wait_op - wait for a flash operation to complete
443 * @attempts: max number of polls of the status register
444 * @delay: delay between polls in ms
446 * Wait for a flash operation to complete by polling the status register.
449 csio_hw_flash_wait_op(struct csio_hw
*hw
, int32_t attempts
, int32_t delay
)
455 ret
= csio_hw_sf1_write(hw
, 1, 1, 1, SF_RD_STATUS
);
459 ret
= csio_hw_sf1_read(hw
, 1, 0, 1, &status
);
473 * csio_hw_read_flash - read words from serial flash
475 * @addr: the start address for the read
476 * @nwords: how many 32-bit words to read
477 * @data: where to store the read data
478 * @byte_oriented: whether to store data as bytes or as words
480 * Read the specified number of 32-bit words from the serial flash.
481 * If @byte_oriented is set the read data is stored as a byte array
482 * (i.e., big-endian), otherwise as 32-bit words in the platform's
486 csio_hw_read_flash(struct csio_hw
*hw
, uint32_t addr
, uint32_t nwords
,
487 uint32_t *data
, int32_t byte_oriented
)
491 if (addr
+ nwords
* sizeof(uint32_t) > hw
->params
.sf_size
|| (addr
& 3))
494 addr
= swab32(addr
) | SF_RD_DATA_FAST
;
496 ret
= csio_hw_sf1_write(hw
, 4, 1, 0, addr
);
500 ret
= csio_hw_sf1_read(hw
, 1, 1, 0, data
);
504 for ( ; nwords
; nwords
--, data
++) {
505 ret
= csio_hw_sf1_read(hw
, 4, nwords
> 1, nwords
== 1, data
);
507 csio_wr_reg32(hw
, 0, SF_OP_A
); /* unlock SF */
511 *data
= (__force __u32
) htonl(*data
);
517 * csio_hw_write_flash - write up to a page of data to the serial flash
519 * @addr: the start address to write
520 * @n: length of data to write in bytes
521 * @data: the data to write
523 * Writes up to a page of data (256 bytes) to the serial flash starting
524 * at the given address. All the data must be written to the same page.
527 csio_hw_write_flash(struct csio_hw
*hw
, uint32_t addr
,
528 uint32_t n
, const uint8_t *data
)
532 uint32_t i
, c
, left
, val
, offset
= addr
& 0xff;
534 if (addr
>= hw
->params
.sf_size
|| offset
+ n
> SF_PAGE_SIZE
)
537 val
= swab32(addr
) | SF_PROG_PAGE
;
539 ret
= csio_hw_sf1_write(hw
, 1, 0, 1, SF_WR_ENABLE
);
543 ret
= csio_hw_sf1_write(hw
, 4, 1, 1, val
);
547 for (left
= n
; left
; left
-= c
) {
549 for (val
= 0, i
= 0; i
< c
; ++i
)
550 val
= (val
<< 8) + *data
++;
552 ret
= csio_hw_sf1_write(hw
, c
, c
!= left
, 1, val
);
556 ret
= csio_hw_flash_wait_op(hw
, 8, 1);
560 csio_wr_reg32(hw
, 0, SF_OP_A
); /* unlock SF */
562 /* Read the page to verify the write succeeded */
563 ret
= csio_hw_read_flash(hw
, addr
& ~0xff, ARRAY_SIZE(buf
), buf
, 1);
567 if (memcmp(data
- n
, (uint8_t *)buf
+ offset
, n
)) {
569 "failed to correctly write the flash page at %#x\n",
577 csio_wr_reg32(hw
, 0, SF_OP_A
); /* unlock SF */
582 * csio_hw_flash_erase_sectors - erase a range of flash sectors
584 * @start: the first sector to erase
585 * @end: the last sector to erase
587 * Erases the sectors in the given inclusive range.
590 csio_hw_flash_erase_sectors(struct csio_hw
*hw
, int32_t start
, int32_t end
)
594 while (start
<= end
) {
596 ret
= csio_hw_sf1_write(hw
, 1, 0, 1, SF_WR_ENABLE
);
600 ret
= csio_hw_sf1_write(hw
, 4, 0, 1,
601 SF_ERASE_SECTOR
| (start
<< 8));
605 ret
= csio_hw_flash_wait_op(hw
, 14, 500);
613 csio_err(hw
, "erase of flash sector %d failed, error %d\n",
615 csio_wr_reg32(hw
, 0, SF_OP_A
); /* unlock SF */
620 csio_hw_print_fw_version(struct csio_hw
*hw
, char *str
)
622 csio_info(hw
, "%s: %u.%u.%u.%u\n", str
,
623 FW_HDR_FW_VER_MAJOR_G(hw
->fwrev
),
624 FW_HDR_FW_VER_MINOR_G(hw
->fwrev
),
625 FW_HDR_FW_VER_MICRO_G(hw
->fwrev
),
626 FW_HDR_FW_VER_BUILD_G(hw
->fwrev
));
630 * csio_hw_get_fw_version - read the firmware version
632 * @vers: where to place the version
634 * Reads the FW version from flash.
637 csio_hw_get_fw_version(struct csio_hw
*hw
, uint32_t *vers
)
639 return csio_hw_read_flash(hw
, FLASH_FW_START
+
640 offsetof(struct fw_hdr
, fw_ver
), 1,
645 * csio_hw_get_tp_version - read the TP microcode version
647 * @vers: where to place the version
649 * Reads the TP microcode version from flash.
652 csio_hw_get_tp_version(struct csio_hw
*hw
, u32
*vers
)
654 return csio_hw_read_flash(hw
, FLASH_FW_START
+
655 offsetof(struct fw_hdr
, tp_microcode_ver
), 1,
660 * csio_hw_fw_dload - download firmware.
662 * @fw_data: firmware image to write.
665 * Write the supplied firmware image to the card's serial flash.
668 csio_hw_fw_dload(struct csio_hw
*hw
, uint8_t *fw_data
, uint32_t size
)
674 uint8_t first_page
[SF_PAGE_SIZE
];
675 const __be32
*p
= (const __be32
*)fw_data
;
676 struct fw_hdr
*hdr
= (struct fw_hdr
*)fw_data
;
677 uint32_t sf_sec_size
;
679 if ((!hw
->params
.sf_size
) || (!hw
->params
.sf_nsec
)) {
680 csio_err(hw
, "Serial Flash data invalid\n");
685 csio_err(hw
, "FW image has no data\n");
690 csio_err(hw
, "FW image size not multiple of 512 bytes\n");
694 if (ntohs(hdr
->len512
) * 512 != size
) {
695 csio_err(hw
, "FW image size differs from size in FW header\n");
699 if (size
> FLASH_FW_MAX_SIZE
) {
700 csio_err(hw
, "FW image too large, max is %u bytes\n",
705 for (csum
= 0, i
= 0; i
< size
/ sizeof(csum
); i
++)
708 if (csum
!= 0xffffffff) {
709 csio_err(hw
, "corrupted firmware image, checksum %#x\n", csum
);
713 sf_sec_size
= hw
->params
.sf_size
/ hw
->params
.sf_nsec
;
714 i
= DIV_ROUND_UP(size
, sf_sec_size
); /* # of sectors spanned */
716 csio_dbg(hw
, "Erasing sectors... start:%d end:%d\n",
717 FLASH_FW_START_SEC
, FLASH_FW_START_SEC
+ i
- 1);
719 ret
= csio_hw_flash_erase_sectors(hw
, FLASH_FW_START_SEC
,
720 FLASH_FW_START_SEC
+ i
- 1);
722 csio_err(hw
, "Flash Erase failed\n");
727 * We write the correct version at the end so the driver can see a bad
728 * version if the FW write fails. Start by writing a copy of the
729 * first page with a bad version.
731 memcpy(first_page
, fw_data
, SF_PAGE_SIZE
);
732 ((struct fw_hdr
*)first_page
)->fw_ver
= htonl(0xffffffff);
733 ret
= csio_hw_write_flash(hw
, FLASH_FW_START
, SF_PAGE_SIZE
, first_page
);
737 csio_dbg(hw
, "Writing Flash .. start:%d end:%d\n",
738 FW_IMG_START
, FW_IMG_START
+ size
);
740 addr
= FLASH_FW_START
;
741 for (size
-= SF_PAGE_SIZE
; size
; size
-= SF_PAGE_SIZE
) {
742 addr
+= SF_PAGE_SIZE
;
743 fw_data
+= SF_PAGE_SIZE
;
744 ret
= csio_hw_write_flash(hw
, addr
, SF_PAGE_SIZE
, fw_data
);
749 ret
= csio_hw_write_flash(hw
,
751 offsetof(struct fw_hdr
, fw_ver
),
753 (const uint8_t *)&hdr
->fw_ver
);
757 csio_err(hw
, "firmware download failed, error %d\n", ret
);
762 csio_hw_get_flash_params(struct csio_hw
*hw
)
767 ret
= csio_hw_sf1_write(hw
, 1, 1, 0, SF_RD_ID
);
769 ret
= csio_hw_sf1_read(hw
, 3, 0, 1, &info
);
770 csio_wr_reg32(hw
, 0, SF_OP_A
); /* unlock SF */
774 if ((info
& 0xff) != 0x20) /* not a Numonix flash */
776 info
>>= 16; /* log2 of size */
777 if (info
>= 0x14 && info
< 0x18)
778 hw
->params
.sf_nsec
= 1 << (info
- 16);
779 else if (info
== 0x18)
780 hw
->params
.sf_nsec
= 64;
783 hw
->params
.sf_size
= 1 << info
;
788 /*****************************************************************************/
789 /* HW State machine assists */
790 /*****************************************************************************/
793 csio_hw_dev_ready(struct csio_hw
*hw
)
799 while (((reg
= csio_rd_reg32(hw
, PL_WHOAMI_A
)) == 0xFFFFFFFF) &&
803 if (csio_is_t5(hw
->pdev
->device
& CSIO_HW_CHIP_MASK
))
804 src_pf
= SOURCEPF_G(reg
);
806 src_pf
= T6_SOURCEPF_G(reg
);
808 if ((cnt
== 0) && (((int32_t)(src_pf
) < 0) ||
809 (src_pf
>= CSIO_MAX_PFN
))) {
810 csio_err(hw
, "PL_WHOAMI returned 0x%x, cnt:%d\n", reg
, cnt
);
820 * csio_do_hello - Perform the HELLO FW Mailbox command and process response.
822 * @state: Device state
824 * FW_HELLO_CMD has to be polled for completion.
827 csio_do_hello(struct csio_hw
*hw
, enum csio_dev_state
*state
)
831 enum fw_retval retval
;
834 int retries
= FW_CMD_HELLO_RETRIES
;
836 memset(state_str
, 0, sizeof(state_str
));
838 mbp
= mempool_alloc(hw
->mb_mempool
, GFP_ATOMIC
);
841 CSIO_INC_STATS(hw
, n_err_nomem
);
846 csio_mb_hello(hw
, mbp
, CSIO_MB_DEFAULT_TMO
, hw
->pfn
,
847 hw
->pfn
, CSIO_MASTER_MAY
, NULL
);
849 rv
= csio_mb_issue(hw
, mbp
);
851 csio_err(hw
, "failed to issue HELLO cmd. ret:%d.\n", rv
);
855 csio_mb_process_hello_rsp(hw
, mbp
, &retval
, state
, &mpfn
);
856 if (retval
!= FW_SUCCESS
) {
857 csio_err(hw
, "HELLO cmd failed with ret: %d\n", retval
);
862 /* Firmware has designated us to be master */
863 if (hw
->pfn
== mpfn
) {
864 hw
->flags
|= CSIO_HWF_MASTER
;
865 } else if (*state
== CSIO_DEV_STATE_UNINIT
) {
867 * If we're not the Master PF then we need to wait around for
868 * the Master PF Driver to finish setting up the adapter.
870 * Note that we also do this wait if we're a non-Master-capable
871 * PF and there is no current Master PF; a Master PF may show up
872 * momentarily and we wouldn't want to fail pointlessly. (This
873 * can happen when an OS loads lots of different drivers rapidly
874 * at the same time). In this case, the Master PF returned by
875 * the firmware will be PCIE_FW_MASTER_MASK so the test below
879 int waiting
= FW_CMD_HELLO_TIMEOUT
;
882 * Wait for the firmware to either indicate an error or
883 * initialized state. If we see either of these we bail out
884 * and report the issue to the caller. If we exhaust the
885 * "hello timeout" and we haven't exhausted our retries, try
886 * again. Otherwise bail with a timeout error.
891 spin_unlock_irq(&hw
->lock
);
893 spin_lock_irq(&hw
->lock
);
897 * If neither Error nor Initialialized are indicated
898 * by the firmware keep waiting till we exaust our
899 * timeout ... and then retry if we haven't exhausted
902 pcie_fw
= csio_rd_reg32(hw
, PCIE_FW_A
);
903 if (!(pcie_fw
& (PCIE_FW_ERR_F
|PCIE_FW_INIT_F
))) {
915 * We either have an Error or Initialized condition
916 * report errors preferentially.
919 if (pcie_fw
& PCIE_FW_ERR_F
) {
920 *state
= CSIO_DEV_STATE_ERR
;
922 } else if (pcie_fw
& PCIE_FW_INIT_F
)
923 *state
= CSIO_DEV_STATE_INIT
;
927 * If we arrived before a Master PF was selected and
928 * there's not a valid Master PF, grab its identity
931 if (mpfn
== PCIE_FW_MASTER_M
&&
932 (pcie_fw
& PCIE_FW_MASTER_VLD_F
))
933 mpfn
= PCIE_FW_MASTER_G(pcie_fw
);
936 hw
->flags
&= ~CSIO_HWF_MASTER
;
940 case CSIO_DEV_STATE_UNINIT
:
941 strcpy(state_str
, "Initializing");
943 case CSIO_DEV_STATE_INIT
:
944 strcpy(state_str
, "Initialized");
946 case CSIO_DEV_STATE_ERR
:
947 strcpy(state_str
, "Error");
950 strcpy(state_str
, "Unknown");
955 csio_info(hw
, "PF: %d, Coming up as MASTER, HW state: %s\n",
959 "PF: %d, Coming up as SLAVE, Master PF: %d, HW state: %s\n",
960 hw
->pfn
, mpfn
, state_str
);
963 mempool_free(mbp
, hw
->mb_mempool
);
969 * csio_do_bye - Perform the BYE FW Mailbox command and process response.
974 csio_do_bye(struct csio_hw
*hw
)
977 enum fw_retval retval
;
979 mbp
= mempool_alloc(hw
->mb_mempool
, GFP_ATOMIC
);
981 CSIO_INC_STATS(hw
, n_err_nomem
);
985 csio_mb_bye(hw
, mbp
, CSIO_MB_DEFAULT_TMO
, NULL
);
987 if (csio_mb_issue(hw
, mbp
)) {
988 csio_err(hw
, "Issue of BYE command failed\n");
989 mempool_free(mbp
, hw
->mb_mempool
);
993 retval
= csio_mb_fw_retval(mbp
);
994 if (retval
!= FW_SUCCESS
) {
995 mempool_free(mbp
, hw
->mb_mempool
);
999 mempool_free(mbp
, hw
->mb_mempool
);
1005 * csio_do_reset- Perform the device reset.
1009 * If fw_rst is set, issues FW reset mbox cmd otherwise
1011 * Performs reset of the function.
1014 csio_do_reset(struct csio_hw
*hw
, bool fw_rst
)
1016 struct csio_mb
*mbp
;
1017 enum fw_retval retval
;
1021 csio_wr_reg32(hw
, PIORSTMODE_F
| PIORST_F
, PL_RST_A
);
1026 mbp
= mempool_alloc(hw
->mb_mempool
, GFP_ATOMIC
);
1028 CSIO_INC_STATS(hw
, n_err_nomem
);
1032 csio_mb_reset(hw
, mbp
, CSIO_MB_DEFAULT_TMO
,
1033 PIORSTMODE_F
| PIORST_F
, 0, NULL
);
1035 if (csio_mb_issue(hw
, mbp
)) {
1036 csio_err(hw
, "Issue of RESET command failed.n");
1037 mempool_free(mbp
, hw
->mb_mempool
);
1041 retval
= csio_mb_fw_retval(mbp
);
1042 if (retval
!= FW_SUCCESS
) {
1043 csio_err(hw
, "RESET cmd failed with ret:0x%x.\n", retval
);
1044 mempool_free(mbp
, hw
->mb_mempool
);
1048 mempool_free(mbp
, hw
->mb_mempool
);
1054 csio_hw_validate_caps(struct csio_hw
*hw
, struct csio_mb
*mbp
)
1056 struct fw_caps_config_cmd
*rsp
= (struct fw_caps_config_cmd
*)mbp
->mb
;
1059 caps
= ntohs(rsp
->fcoecaps
);
1061 if (!(caps
& FW_CAPS_CONFIG_FCOE_INITIATOR
)) {
1062 csio_err(hw
, "No FCoE Initiator capability in the firmware.\n");
1066 if (!(caps
& FW_CAPS_CONFIG_FCOE_CTRL_OFLD
)) {
1067 csio_err(hw
, "No FCoE Control Offload capability\n");
1075 * csio_hw_fw_halt - issue a reset/halt to FW and put uP into RESET
1076 * @hw: the HW module
1077 * @mbox: mailbox to use for the FW RESET command (if desired)
1078 * @force: force uP into RESET even if FW RESET command fails
1080 * Issues a RESET command to firmware (if desired) with a HALT indication
1081 * and then puts the microprocessor into RESET state. The RESET command
1082 * will only be issued if a legitimate mailbox is provided (mbox <=
1083 * PCIE_FW_MASTER_MASK).
1085 * This is generally used in order for the host to safely manipulate the
1086 * adapter without fear of conflicting with whatever the firmware might
1087 * be doing. The only way out of this state is to RESTART the firmware
1091 csio_hw_fw_halt(struct csio_hw
*hw
, uint32_t mbox
, int32_t force
)
1093 enum fw_retval retval
= 0;
1096 * If a legitimate mailbox is provided, issue a RESET command
1097 * with a HALT indication.
1099 if (mbox
<= PCIE_FW_MASTER_M
) {
1100 struct csio_mb
*mbp
;
1102 mbp
= mempool_alloc(hw
->mb_mempool
, GFP_ATOMIC
);
1104 CSIO_INC_STATS(hw
, n_err_nomem
);
1108 csio_mb_reset(hw
, mbp
, CSIO_MB_DEFAULT_TMO
,
1109 PIORSTMODE_F
| PIORST_F
, FW_RESET_CMD_HALT_F
,
1112 if (csio_mb_issue(hw
, mbp
)) {
1113 csio_err(hw
, "Issue of RESET command failed!\n");
1114 mempool_free(mbp
, hw
->mb_mempool
);
1118 retval
= csio_mb_fw_retval(mbp
);
1119 mempool_free(mbp
, hw
->mb_mempool
);
1123 * Normally we won't complete the operation if the firmware RESET
1124 * command fails but if our caller insists we'll go ahead and put the
1125 * uP into RESET. This can be useful if the firmware is hung or even
1126 * missing ... We'll have to take the risk of putting the uP into
1127 * RESET without the cooperation of firmware in that case.
1129 * We also force the firmware's HALT flag to be on in case we bypassed
1130 * the firmware RESET command above or we're dealing with old firmware
1131 * which doesn't have the HALT capability. This will serve as a flag
1132 * for the incoming firmware to know that it's coming out of a HALT
1133 * rather than a RESET ... if it's new enough to understand that ...
1135 if (retval
== 0 || force
) {
1136 csio_set_reg_field(hw
, CIM_BOOT_CFG_A
, UPCRST_F
, UPCRST_F
);
1137 csio_set_reg_field(hw
, PCIE_FW_A
, PCIE_FW_HALT_F
,
1142 * And we always return the result of the firmware RESET command
1143 * even when we force the uP into RESET ...
1145 return retval
? -EINVAL
: 0;
1149 * csio_hw_fw_restart - restart the firmware by taking the uP out of RESET
1150 * @hw: the HW module
1151 * @reset: if we want to do a RESET to restart things
1153 * Restart firmware previously halted by csio_hw_fw_halt(). On successful
1154 * return the previous PF Master remains as the new PF Master and there
1155 * is no need to issue a new HELLO command, etc.
1157 * We do this in two ways:
1159 * 1. If we're dealing with newer firmware we'll simply want to take
1160 * the chip's microprocessor out of RESET. This will cause the
1161 * firmware to start up from its start vector. And then we'll loop
1162 * until the firmware indicates it's started again (PCIE_FW.HALT
1163 * reset to 0) or we timeout.
1165 * 2. If we're dealing with older firmware then we'll need to RESET
1166 * the chip since older firmware won't recognize the PCIE_FW.HALT
1167 * flag and automatically RESET itself on startup.
1170 csio_hw_fw_restart(struct csio_hw
*hw
, uint32_t mbox
, int32_t reset
)
1174 * Since we're directing the RESET instead of the firmware
1175 * doing it automatically, we need to clear the PCIE_FW.HALT
1178 csio_set_reg_field(hw
, PCIE_FW_A
, PCIE_FW_HALT_F
, 0);
1181 * If we've been given a valid mailbox, first try to get the
1182 * firmware to do the RESET. If that works, great and we can
1183 * return success. Otherwise, if we haven't been given a
1184 * valid mailbox or the RESET command failed, fall back to
1185 * hitting the chip with a hammer.
1187 if (mbox
<= PCIE_FW_MASTER_M
) {
1188 csio_set_reg_field(hw
, CIM_BOOT_CFG_A
, UPCRST_F
, 0);
1190 if (csio_do_reset(hw
, true) == 0)
1194 csio_wr_reg32(hw
, PIORSTMODE_F
| PIORST_F
, PL_RST_A
);
1199 csio_set_reg_field(hw
, CIM_BOOT_CFG_A
, UPCRST_F
, 0);
1200 for (ms
= 0; ms
< FW_CMD_MAX_TIMEOUT
; ) {
1201 if (!(csio_rd_reg32(hw
, PCIE_FW_A
) & PCIE_FW_HALT_F
))
1212 * csio_hw_fw_upgrade - perform all of the steps necessary to upgrade FW
1213 * @hw: the HW module
1214 * @mbox: mailbox to use for the FW RESET command (if desired)
1215 * @fw_data: the firmware image to write
1217 * @force: force upgrade even if firmware doesn't cooperate
1219 * Perform all of the steps necessary for upgrading an adapter's
1220 * firmware image. Normally this requires the cooperation of the
1221 * existing firmware in order to halt all existing activities
1222 * but if an invalid mailbox token is passed in we skip that step
1223 * (though we'll still put the adapter microprocessor into RESET in
1226 * On successful return the new firmware will have been loaded and
1227 * the adapter will have been fully RESET losing all previous setup
1228 * state. On unsuccessful return the adapter may be completely hosed ...
1229 * positive errno indicates that the adapter is ~probably~ intact, a
1230 * negative errno indicates that things are looking bad ...
1233 csio_hw_fw_upgrade(struct csio_hw
*hw
, uint32_t mbox
,
1234 const u8
*fw_data
, uint32_t size
, int32_t force
)
1236 const struct fw_hdr
*fw_hdr
= (const struct fw_hdr
*)fw_data
;
1239 ret
= csio_hw_fw_halt(hw
, mbox
, force
);
1240 if (ret
!= 0 && !force
)
1243 ret
= csio_hw_fw_dload(hw
, (uint8_t *) fw_data
, size
);
1248 * Older versions of the firmware don't understand the new
1249 * PCIE_FW.HALT flag and so won't know to perform a RESET when they
1250 * restart. So for newly loaded older firmware we'll have to do the
1251 * RESET for it so it starts up on a clean slate. We can tell if
1252 * the newly loaded firmware will handle this right by checking
1253 * its header flags to see if it advertises the capability.
1255 reset
= ((ntohl(fw_hdr
->flags
) & FW_HDR_FLAGS_RESET_HALT
) == 0);
1256 return csio_hw_fw_restart(hw
, mbox
, reset
);
1260 * csio_get_device_params - Get device parameters.
1265 csio_get_device_params(struct csio_hw
*hw
)
1267 struct csio_wrm
*wrm
= csio_hw_to_wrm(hw
);
1268 struct csio_mb
*mbp
;
1269 enum fw_retval retval
;
1273 /* Initialize portids to -1 */
1274 for (i
= 0; i
< CSIO_MAX_PPORTS
; i
++)
1275 hw
->pport
[i
].portid
= -1;
1277 mbp
= mempool_alloc(hw
->mb_mempool
, GFP_ATOMIC
);
1279 CSIO_INC_STATS(hw
, n_err_nomem
);
1283 /* Get port vec information. */
1284 param
[0] = FW_PARAM_DEV(PORTVEC
);
1286 /* Get Core clock. */
1287 param
[1] = FW_PARAM_DEV(CCLK
);
1289 /* Get EQ id start and end. */
1290 param
[2] = FW_PARAM_PFVF(EQ_START
);
1291 param
[3] = FW_PARAM_PFVF(EQ_END
);
1293 /* Get IQ id start and end. */
1294 param
[4] = FW_PARAM_PFVF(IQFLINT_START
);
1295 param
[5] = FW_PARAM_PFVF(IQFLINT_END
);
1297 csio_mb_params(hw
, mbp
, CSIO_MB_DEFAULT_TMO
, hw
->pfn
, 0,
1298 ARRAY_SIZE(param
), param
, NULL
, false, NULL
);
1299 if (csio_mb_issue(hw
, mbp
)) {
1300 csio_err(hw
, "Issue of FW_PARAMS_CMD(read) failed!\n");
1301 mempool_free(mbp
, hw
->mb_mempool
);
1305 csio_mb_process_read_params_rsp(hw
, mbp
, &retval
,
1306 ARRAY_SIZE(param
), param
);
1307 if (retval
!= FW_SUCCESS
) {
1308 csio_err(hw
, "FW_PARAMS_CMD(read) failed with ret:0x%x!\n",
1310 mempool_free(mbp
, hw
->mb_mempool
);
1314 /* cache the information. */
1315 hw
->port_vec
= param
[0];
1316 hw
->vpd
.cclk
= param
[1];
1317 wrm
->fw_eq_start
= param
[2];
1318 wrm
->fw_iq_start
= param
[4];
1320 /* Using FW configured max iqs & eqs */
1321 if ((hw
->flags
& CSIO_HWF_USING_SOFT_PARAMS
) ||
1322 !csio_is_hw_master(hw
)) {
1323 hw
->cfg_niq
= param
[5] - param
[4] + 1;
1324 hw
->cfg_neq
= param
[3] - param
[2] + 1;
1325 csio_dbg(hw
, "Using fwconfig max niqs %d neqs %d\n",
1326 hw
->cfg_niq
, hw
->cfg_neq
);
1329 hw
->port_vec
&= csio_port_mask
;
1331 hw
->num_pports
= hweight32(hw
->port_vec
);
1333 csio_dbg(hw
, "Port vector: 0x%x, #ports: %d\n",
1334 hw
->port_vec
, hw
->num_pports
);
1336 for (i
= 0; i
< hw
->num_pports
; i
++) {
1337 while ((hw
->port_vec
& (1 << j
)) == 0)
1339 hw
->pport
[i
].portid
= j
++;
1340 csio_dbg(hw
, "Found Port:%d\n", hw
->pport
[i
].portid
);
1342 mempool_free(mbp
, hw
->mb_mempool
);
1349 * csio_config_device_caps - Get and set device capabilities.
1354 csio_config_device_caps(struct csio_hw
*hw
)
1356 struct csio_mb
*mbp
;
1357 enum fw_retval retval
;
1360 mbp
= mempool_alloc(hw
->mb_mempool
, GFP_ATOMIC
);
1362 CSIO_INC_STATS(hw
, n_err_nomem
);
1366 /* Get device capabilities */
1367 csio_mb_caps_config(hw
, mbp
, CSIO_MB_DEFAULT_TMO
, 0, 0, 0, 0, NULL
);
1369 if (csio_mb_issue(hw
, mbp
)) {
1370 csio_err(hw
, "Issue of FW_CAPS_CONFIG_CMD(r) failed!\n");
1374 retval
= csio_mb_fw_retval(mbp
);
1375 if (retval
!= FW_SUCCESS
) {
1376 csio_err(hw
, "FW_CAPS_CONFIG_CMD(r) returned %d!\n", retval
);
1380 /* Validate device capabilities */
1381 rv
= csio_hw_validate_caps(hw
, mbp
);
1385 /* Don't config device capabilities if already configured */
1386 if (hw
->fw_state
== CSIO_DEV_STATE_INIT
) {
1391 /* Write back desired device capabilities */
1392 csio_mb_caps_config(hw
, mbp
, CSIO_MB_DEFAULT_TMO
, true, true,
1395 if (csio_mb_issue(hw
, mbp
)) {
1396 csio_err(hw
, "Issue of FW_CAPS_CONFIG_CMD(w) failed!\n");
1400 retval
= csio_mb_fw_retval(mbp
);
1401 if (retval
!= FW_SUCCESS
) {
1402 csio_err(hw
, "FW_CAPS_CONFIG_CMD(w) returned %d!\n", retval
);
1408 mempool_free(mbp
, hw
->mb_mempool
);
1413 * csio_enable_ports - Bring up all available ports.
1418 csio_enable_ports(struct csio_hw
*hw
)
1420 struct csio_mb
*mbp
;
1421 enum fw_retval retval
;
1425 mbp
= mempool_alloc(hw
->mb_mempool
, GFP_ATOMIC
);
1427 CSIO_INC_STATS(hw
, n_err_nomem
);
1431 for (i
= 0; i
< hw
->num_pports
; i
++) {
1432 portid
= hw
->pport
[i
].portid
;
1434 /* Read PORT information */
1435 csio_mb_port(hw
, mbp
, CSIO_MB_DEFAULT_TMO
, portid
,
1438 if (csio_mb_issue(hw
, mbp
)) {
1439 csio_err(hw
, "failed to issue FW_PORT_CMD(r) port:%d\n",
1441 mempool_free(mbp
, hw
->mb_mempool
);
1445 csio_mb_process_read_port_rsp(hw
, mbp
, &retval
,
1446 &hw
->pport
[i
].pcap
);
1447 if (retval
!= FW_SUCCESS
) {
1448 csio_err(hw
, "FW_PORT_CMD(r) port:%d failed: 0x%x\n",
1450 mempool_free(mbp
, hw
->mb_mempool
);
1454 /* Write back PORT information */
1455 csio_mb_port(hw
, mbp
, CSIO_MB_DEFAULT_TMO
, portid
, true,
1456 (PAUSE_RX
| PAUSE_TX
), hw
->pport
[i
].pcap
, NULL
);
1458 if (csio_mb_issue(hw
, mbp
)) {
1459 csio_err(hw
, "failed to issue FW_PORT_CMD(w) port:%d\n",
1461 mempool_free(mbp
, hw
->mb_mempool
);
1465 retval
= csio_mb_fw_retval(mbp
);
1466 if (retval
!= FW_SUCCESS
) {
1467 csio_err(hw
, "FW_PORT_CMD(w) port:%d failed :0x%x\n",
1469 mempool_free(mbp
, hw
->mb_mempool
);
1473 } /* For all ports */
1475 mempool_free(mbp
, hw
->mb_mempool
);
1481 * csio_get_fcoe_resinfo - Read fcoe fw resource info.
1483 * Issued with lock held.
1486 csio_get_fcoe_resinfo(struct csio_hw
*hw
)
1488 struct csio_fcoe_res_info
*res_info
= &hw
->fres_info
;
1489 struct fw_fcoe_res_info_cmd
*rsp
;
1490 struct csio_mb
*mbp
;
1491 enum fw_retval retval
;
1493 mbp
= mempool_alloc(hw
->mb_mempool
, GFP_ATOMIC
);
1495 CSIO_INC_STATS(hw
, n_err_nomem
);
1499 /* Get FCoE FW resource information */
1500 csio_fcoe_read_res_info_init_mb(hw
, mbp
, CSIO_MB_DEFAULT_TMO
, NULL
);
1502 if (csio_mb_issue(hw
, mbp
)) {
1503 csio_err(hw
, "failed to issue FW_FCOE_RES_INFO_CMD\n");
1504 mempool_free(mbp
, hw
->mb_mempool
);
1508 rsp
= (struct fw_fcoe_res_info_cmd
*)(mbp
->mb
);
1509 retval
= FW_CMD_RETVAL_G(ntohl(rsp
->retval_len16
));
1510 if (retval
!= FW_SUCCESS
) {
1511 csio_err(hw
, "FW_FCOE_RES_INFO_CMD failed with ret x%x\n",
1513 mempool_free(mbp
, hw
->mb_mempool
);
1517 res_info
->e_d_tov
= ntohs(rsp
->e_d_tov
);
1518 res_info
->r_a_tov_seq
= ntohs(rsp
->r_a_tov_seq
);
1519 res_info
->r_a_tov_els
= ntohs(rsp
->r_a_tov_els
);
1520 res_info
->r_r_tov
= ntohs(rsp
->r_r_tov
);
1521 res_info
->max_xchgs
= ntohl(rsp
->max_xchgs
);
1522 res_info
->max_ssns
= ntohl(rsp
->max_ssns
);
1523 res_info
->used_xchgs
= ntohl(rsp
->used_xchgs
);
1524 res_info
->used_ssns
= ntohl(rsp
->used_ssns
);
1525 res_info
->max_fcfs
= ntohl(rsp
->max_fcfs
);
1526 res_info
->max_vnps
= ntohl(rsp
->max_vnps
);
1527 res_info
->used_fcfs
= ntohl(rsp
->used_fcfs
);
1528 res_info
->used_vnps
= ntohl(rsp
->used_vnps
);
1530 csio_dbg(hw
, "max ssns:%d max xchgs:%d\n", res_info
->max_ssns
,
1531 res_info
->max_xchgs
);
1532 mempool_free(mbp
, hw
->mb_mempool
);
1538 csio_hw_check_fwconfig(struct csio_hw
*hw
, u32
*param
)
1540 struct csio_mb
*mbp
;
1541 enum fw_retval retval
;
1544 mbp
= mempool_alloc(hw
->mb_mempool
, GFP_ATOMIC
);
1546 CSIO_INC_STATS(hw
, n_err_nomem
);
1551 * Find out whether we're dealing with a version of
1552 * the firmware which has configuration file support.
1554 _param
[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV
) |
1555 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_CF
));
1557 csio_mb_params(hw
, mbp
, CSIO_MB_DEFAULT_TMO
, hw
->pfn
, 0,
1558 ARRAY_SIZE(_param
), _param
, NULL
, false, NULL
);
1559 if (csio_mb_issue(hw
, mbp
)) {
1560 csio_err(hw
, "Issue of FW_PARAMS_CMD(read) failed!\n");
1561 mempool_free(mbp
, hw
->mb_mempool
);
1565 csio_mb_process_read_params_rsp(hw
, mbp
, &retval
,
1566 ARRAY_SIZE(_param
), _param
);
1567 if (retval
!= FW_SUCCESS
) {
1568 csio_err(hw
, "FW_PARAMS_CMD(read) failed with ret:0x%x!\n",
1570 mempool_free(mbp
, hw
->mb_mempool
);
1574 mempool_free(mbp
, hw
->mb_mempool
);
1581 csio_hw_flash_config(struct csio_hw
*hw
, u32
*fw_cfg_param
, char *path
)
1584 const struct firmware
*cf
;
1585 struct pci_dev
*pci_dev
= hw
->pdev
;
1586 struct device
*dev
= &pci_dev
->dev
;
1587 unsigned int mtype
= 0, maddr
= 0;
1589 int value_to_add
= 0;
1590 const char *fw_cfg_file
;
1592 if (csio_is_t5(pci_dev
->device
& CSIO_HW_CHIP_MASK
))
1593 fw_cfg_file
= FW_CFG_NAME_T5
;
1595 fw_cfg_file
= FW_CFG_NAME_T6
;
1597 if (request_firmware(&cf
, fw_cfg_file
, dev
) < 0) {
1598 csio_err(hw
, "could not find config file %s, err: %d\n",
1603 if (cf
->size
%4 != 0)
1604 value_to_add
= 4 - (cf
->size
% 4);
1606 cfg_data
= kzalloc(cf
->size
+value_to_add
, GFP_KERNEL
);
1607 if (cfg_data
== NULL
) {
1612 memcpy((void *)cfg_data
, (const void *)cf
->data
, cf
->size
);
1613 if (csio_hw_check_fwconfig(hw
, fw_cfg_param
) != 0) {
1618 mtype
= FW_PARAMS_PARAM_Y_G(*fw_cfg_param
);
1619 maddr
= FW_PARAMS_PARAM_Z_G(*fw_cfg_param
) << 16;
1621 ret
= csio_memory_write(hw
, mtype
, maddr
,
1622 cf
->size
+ value_to_add
, cfg_data
);
1624 if ((ret
== 0) && (value_to_add
!= 0)) {
1629 size_t size
= cf
->size
& ~0x3;
1632 last
.word
= cfg_data
[size
>> 2];
1633 for (i
= value_to_add
; i
< 4; i
++)
1635 ret
= csio_memory_write(hw
, mtype
, maddr
+ size
, 4, &last
.word
);
1638 csio_info(hw
, "config file upgraded to %s\n", fw_cfg_file
);
1639 snprintf(path
, 64, "%s%s", "/lib/firmware/", fw_cfg_file
);
1644 release_firmware(cf
);
1649 * HW initialization: contact FW, obtain config, perform basic init.
1651 * If the firmware we're dealing with has Configuration File support, then
1652 * we use that to perform all configuration -- either using the configuration
1653 * file stored in flash on the adapter or using a filesystem-local file
1656 * If we don't have configuration file support in the firmware, then we'll
1657 * have to set things up the old fashioned way with hard-coded register
1658 * writes and firmware commands ...
1662 * Attempt to initialize the HW via a Firmware Configuration File.
1665 csio_hw_use_fwconfig(struct csio_hw
*hw
, int reset
, u32
*fw_cfg_param
)
1667 struct csio_mb
*mbp
= NULL
;
1668 struct fw_caps_config_cmd
*caps_cmd
;
1669 unsigned int mtype
, maddr
;
1671 uint32_t finiver
= 0, finicsum
= 0, cfcsum
= 0;
1673 char *config_name
= NULL
;
1676 * Reset device if necessary
1679 rv
= csio_do_reset(hw
, true);
1685 * If we have a configuration file in host ,
1686 * then use that. Otherwise, use the configuration file stored
1687 * in the HW flash ...
1689 spin_unlock_irq(&hw
->lock
);
1690 rv
= csio_hw_flash_config(hw
, fw_cfg_param
, path
);
1691 spin_lock_irq(&hw
->lock
);
1694 * config file was not found. Use default
1695 * config file from flash.
1697 config_name
= "On FLASH";
1698 mtype
= FW_MEMTYPE_CF_FLASH
;
1699 maddr
= hw
->chip_ops
->chip_flash_cfg_addr(hw
);
1702 mtype
= FW_PARAMS_PARAM_Y_G(*fw_cfg_param
);
1703 maddr
= FW_PARAMS_PARAM_Z_G(*fw_cfg_param
) << 16;
1706 mbp
= mempool_alloc(hw
->mb_mempool
, GFP_ATOMIC
);
1708 CSIO_INC_STATS(hw
, n_err_nomem
);
1712 * Tell the firmware to process the indicated Configuration File.
1713 * If there are no errors and the caller has provided return value
1714 * pointers for the [fini] section version, checksum and computed
1715 * checksum, pass those back to the caller.
1717 caps_cmd
= (struct fw_caps_config_cmd
*)(mbp
->mb
);
1718 CSIO_INIT_MBP(mbp
, caps_cmd
, CSIO_MB_DEFAULT_TMO
, hw
, NULL
, 1);
1719 caps_cmd
->op_to_write
=
1720 htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD
) |
1723 caps_cmd
->cfvalid_to_len16
=
1724 htonl(FW_CAPS_CONFIG_CMD_CFVALID_F
|
1725 FW_CAPS_CONFIG_CMD_MEMTYPE_CF_V(mtype
) |
1726 FW_CAPS_CONFIG_CMD_MEMADDR64K_CF_V(maddr
>> 16) |
1727 FW_LEN16(*caps_cmd
));
1729 if (csio_mb_issue(hw
, mbp
)) {
1734 rv
= csio_mb_fw_retval(mbp
);
1735 /* If the CAPS_CONFIG failed with an ENOENT (for a Firmware
1736 * Configuration File in FLASH), our last gasp effort is to use the
1737 * Firmware Configuration File which is embedded in the
1738 * firmware. A very few early versions of the firmware didn't
1739 * have one embedded but we can ignore those.
1742 CSIO_INIT_MBP(mbp
, caps_cmd
, CSIO_MB_DEFAULT_TMO
, hw
, NULL
, 1);
1743 caps_cmd
->op_to_write
= htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD
) |
1746 caps_cmd
->cfvalid_to_len16
= htonl(FW_LEN16(*caps_cmd
));
1748 if (csio_mb_issue(hw
, mbp
)) {
1753 rv
= csio_mb_fw_retval(mbp
);
1754 config_name
= "Firmware Default";
1756 if (rv
!= FW_SUCCESS
)
1759 finiver
= ntohl(caps_cmd
->finiver
);
1760 finicsum
= ntohl(caps_cmd
->finicsum
);
1761 cfcsum
= ntohl(caps_cmd
->cfcsum
);
1764 * And now tell the firmware to use the configuration we just loaded.
1766 caps_cmd
->op_to_write
=
1767 htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD
) |
1770 caps_cmd
->cfvalid_to_len16
= htonl(FW_LEN16(*caps_cmd
));
1772 if (csio_mb_issue(hw
, mbp
)) {
1777 rv
= csio_mb_fw_retval(mbp
);
1778 if (rv
!= FW_SUCCESS
) {
1779 csio_dbg(hw
, "FW_CAPS_CONFIG_CMD returned %d!\n", rv
);
1783 if (finicsum
!= cfcsum
) {
1785 "Config File checksum mismatch: csum=%#x, computed=%#x\n",
1789 /* Validate device capabilities */
1790 rv
= csio_hw_validate_caps(hw
, mbp
);
1794 mempool_free(mbp
, hw
->mb_mempool
);
1798 * Note that we're operating with parameters
1799 * not supplied by the driver, rather than from hard-wired
1800 * initialization constants buried in the driver.
1802 hw
->flags
|= CSIO_HWF_USING_SOFT_PARAMS
;
1804 /* device parameters */
1805 rv
= csio_get_device_params(hw
);
1810 csio_wr_sge_init(hw
);
1813 * And finally tell the firmware to initialize itself using the
1814 * parameters from the Configuration File.
1816 /* Post event to notify completion of configuration */
1817 csio_post_event(&hw
->sm
, CSIO_HWE_INIT
);
1819 csio_info(hw
, "Successfully configure using Firmware "
1820 "Configuration File %s, version %#x, computed checksum %#x\n",
1821 config_name
, finiver
, cfcsum
);
1825 * Something bad happened. Return the error ...
1829 mempool_free(mbp
, hw
->mb_mempool
);
1830 hw
->flags
&= ~CSIO_HWF_USING_SOFT_PARAMS
;
1831 csio_warn(hw
, "Configuration file error %d\n", rv
);
1835 /* Is the given firmware API compatible with the one the driver was compiled
1838 static int fw_compatible(const struct fw_hdr
*hdr1
, const struct fw_hdr
*hdr2
)
1841 /* short circuit if it's the exact same firmware version */
1842 if (hdr1
->chip
== hdr2
->chip
&& hdr1
->fw_ver
== hdr2
->fw_ver
)
1845 #define SAME_INTF(x) (hdr1->intfver_##x == hdr2->intfver_##x)
1846 if (hdr1
->chip
== hdr2
->chip
&& SAME_INTF(nic
) && SAME_INTF(vnic
) &&
1847 SAME_INTF(ri
) && SAME_INTF(iscsi
) && SAME_INTF(fcoe
))
1854 /* The firmware in the filesystem is usable, but should it be installed?
1855 * This routine explains itself in detail if it indicates the filesystem
1856 * firmware should be installed.
1858 static int csio_should_install_fs_fw(struct csio_hw
*hw
, int card_fw_usable
,
1863 if (!card_fw_usable
) {
1864 reason
= "incompatible or unusable";
1869 reason
= "older than the version supported with this driver";
1876 csio_err(hw
, "firmware on card (%u.%u.%u.%u) is %s, "
1877 "installing firmware %u.%u.%u.%u on card.\n",
1878 FW_HDR_FW_VER_MAJOR_G(c
), FW_HDR_FW_VER_MINOR_G(c
),
1879 FW_HDR_FW_VER_MICRO_G(c
), FW_HDR_FW_VER_BUILD_G(c
), reason
,
1880 FW_HDR_FW_VER_MAJOR_G(k
), FW_HDR_FW_VER_MINOR_G(k
),
1881 FW_HDR_FW_VER_MICRO_G(k
), FW_HDR_FW_VER_BUILD_G(k
));
1886 static struct fw_info fw_info_array
[] = {
1889 .fs_name
= FW_CFG_NAME_T5
,
1890 .fw_mod_name
= FW_FNAME_T5
,
1892 .chip
= FW_HDR_CHIP_T5
,
1893 .fw_ver
= __cpu_to_be32(FW_VERSION(T5
)),
1894 .intfver_nic
= FW_INTFVER(T5
, NIC
),
1895 .intfver_vnic
= FW_INTFVER(T5
, VNIC
),
1896 .intfver_ri
= FW_INTFVER(T5
, RI
),
1897 .intfver_iscsi
= FW_INTFVER(T5
, ISCSI
),
1898 .intfver_fcoe
= FW_INTFVER(T5
, FCOE
),
1902 .fs_name
= FW_CFG_NAME_T6
,
1903 .fw_mod_name
= FW_FNAME_T6
,
1905 .chip
= FW_HDR_CHIP_T6
,
1906 .fw_ver
= __cpu_to_be32(FW_VERSION(T6
)),
1907 .intfver_nic
= FW_INTFVER(T6
, NIC
),
1908 .intfver_vnic
= FW_INTFVER(T6
, VNIC
),
1909 .intfver_ri
= FW_INTFVER(T6
, RI
),
1910 .intfver_iscsi
= FW_INTFVER(T6
, ISCSI
),
1911 .intfver_fcoe
= FW_INTFVER(T6
, FCOE
),
1916 static struct fw_info
*find_fw_info(int chip
)
1920 for (i
= 0; i
< ARRAY_SIZE(fw_info_array
); i
++) {
1921 if (fw_info_array
[i
].chip
== chip
)
1922 return &fw_info_array
[i
];
1927 static int csio_hw_prep_fw(struct csio_hw
*hw
, struct fw_info
*fw_info
,
1928 const u8
*fw_data
, unsigned int fw_size
,
1929 struct fw_hdr
*card_fw
, enum csio_dev_state state
,
1932 int ret
, card_fw_usable
, fs_fw_usable
;
1933 const struct fw_hdr
*fs_fw
;
1934 const struct fw_hdr
*drv_fw
;
1936 drv_fw
= &fw_info
->fw_hdr
;
1938 /* Read the header of the firmware on the card */
1939 ret
= csio_hw_read_flash(hw
, FLASH_FW_START
,
1940 sizeof(*card_fw
) / sizeof(uint32_t),
1941 (uint32_t *)card_fw
, 1);
1943 card_fw_usable
= fw_compatible(drv_fw
, (const void *)card_fw
);
1946 "Unable to read card's firmware header: %d\n", ret
);
1950 if (fw_data
!= NULL
) {
1951 fs_fw
= (const void *)fw_data
;
1952 fs_fw_usable
= fw_compatible(drv_fw
, fs_fw
);
1958 if (card_fw_usable
&& card_fw
->fw_ver
== drv_fw
->fw_ver
&&
1959 (!fs_fw_usable
|| fs_fw
->fw_ver
== drv_fw
->fw_ver
)) {
1960 /* Common case: the firmware on the card is an exact match and
1961 * the filesystem one is an exact match too, or the filesystem
1962 * one is absent/incompatible.
1964 } else if (fs_fw_usable
&& state
== CSIO_DEV_STATE_UNINIT
&&
1965 csio_should_install_fs_fw(hw
, card_fw_usable
,
1966 be32_to_cpu(fs_fw
->fw_ver
),
1967 be32_to_cpu(card_fw
->fw_ver
))) {
1968 ret
= csio_hw_fw_upgrade(hw
, hw
->pfn
, fw_data
,
1972 "failed to install firmware: %d\n", ret
);
1976 /* Installed successfully, update the cached header too. */
1977 memcpy(card_fw
, fs_fw
, sizeof(*card_fw
));
1979 *reset
= 0; /* already reset as part of load_fw */
1982 if (!card_fw_usable
) {
1985 d
= be32_to_cpu(drv_fw
->fw_ver
);
1986 c
= be32_to_cpu(card_fw
->fw_ver
);
1987 k
= fs_fw
? be32_to_cpu(fs_fw
->fw_ver
) : 0;
1989 csio_err(hw
, "Cannot find a usable firmware: "
1991 "driver compiled with %d.%d.%d.%d, "
1992 "card has %d.%d.%d.%d, filesystem has %d.%d.%d.%d\n",
1994 FW_HDR_FW_VER_MAJOR_G(d
), FW_HDR_FW_VER_MINOR_G(d
),
1995 FW_HDR_FW_VER_MICRO_G(d
), FW_HDR_FW_VER_BUILD_G(d
),
1996 FW_HDR_FW_VER_MAJOR_G(c
), FW_HDR_FW_VER_MINOR_G(c
),
1997 FW_HDR_FW_VER_MICRO_G(c
), FW_HDR_FW_VER_BUILD_G(c
),
1998 FW_HDR_FW_VER_MAJOR_G(k
), FW_HDR_FW_VER_MINOR_G(k
),
1999 FW_HDR_FW_VER_MICRO_G(k
), FW_HDR_FW_VER_BUILD_G(k
));
2004 /* We're using whatever's on the card and it's known to be good. */
2005 hw
->fwrev
= be32_to_cpu(card_fw
->fw_ver
);
2006 hw
->tp_vers
= be32_to_cpu(card_fw
->tp_microcode_ver
);
2013 * Returns -EINVAL if attempts to flash the firmware failed
2015 * if flashing was not attempted because the card had the
2016 * latest firmware ECANCELED is returned
2019 csio_hw_flash_fw(struct csio_hw
*hw
, int *reset
)
2021 int ret
= -ECANCELED
;
2022 const struct firmware
*fw
;
2023 struct fw_info
*fw_info
;
2024 struct fw_hdr
*card_fw
;
2025 struct pci_dev
*pci_dev
= hw
->pdev
;
2026 struct device
*dev
= &pci_dev
->dev
;
2027 const u8
*fw_data
= NULL
;
2028 unsigned int fw_size
= 0;
2029 const char *fw_bin_file
;
2031 /* This is the firmware whose headers the driver was compiled
2034 fw_info
= find_fw_info(CHELSIO_CHIP_VERSION(hw
->chip_id
));
2035 if (fw_info
== NULL
) {
2037 "unable to get firmware info for chip %d.\n",
2038 CHELSIO_CHIP_VERSION(hw
->chip_id
));
2042 if (csio_is_t5(pci_dev
->device
& CSIO_HW_CHIP_MASK
))
2043 fw_bin_file
= FW_FNAME_T5
;
2045 fw_bin_file
= FW_FNAME_T6
;
2047 if (request_firmware(&fw
, fw_bin_file
, dev
) < 0) {
2048 csio_err(hw
, "could not find firmware image %s, err: %d\n",
2055 /* allocate memory to read the header of the firmware on the
2058 card_fw
= kmalloc(sizeof(*card_fw
), GFP_KERNEL
);
2060 /* upgrade FW logic */
2061 ret
= csio_hw_prep_fw(hw
, fw_info
, fw_data
, fw_size
, card_fw
,
2062 hw
->fw_state
, reset
);
2066 release_firmware(fw
);
2071 static int csio_hw_check_fwver(struct csio_hw
*hw
)
2073 if (csio_is_t6(hw
->pdev
->device
& CSIO_HW_CHIP_MASK
) &&
2074 (hw
->fwrev
< CSIO_MIN_T6_FW
)) {
2075 csio_hw_print_fw_version(hw
, "T6 unsupported fw");
2083 * csio_hw_configure - Configure HW
2088 csio_hw_configure(struct csio_hw
*hw
)
2094 rv
= csio_hw_dev_ready(hw
);
2096 CSIO_INC_STATS(hw
, n_err_fatal
);
2097 csio_post_event(&hw
->sm
, CSIO_HWE_FATAL
);
2102 hw
->chip_ver
= (char)csio_rd_reg32(hw
, PL_REV_A
);
2104 /* Needed for FW download */
2105 rv
= csio_hw_get_flash_params(hw
);
2107 csio_err(hw
, "Failed to get serial flash params rv:%d\n", rv
);
2108 csio_post_event(&hw
->sm
, CSIO_HWE_FATAL
);
2112 /* Set PCIe completion timeout to 4 seconds */
2113 if (pci_is_pcie(hw
->pdev
))
2114 pcie_capability_clear_and_set_word(hw
->pdev
, PCI_EXP_DEVCTL2
,
2115 PCI_EXP_DEVCTL2_COMP_TIMEOUT
, 0xd);
2117 hw
->chip_ops
->chip_set_mem_win(hw
, MEMWIN_CSIOSTOR
);
2119 rv
= csio_hw_get_fw_version(hw
, &hw
->fwrev
);
2123 csio_hw_print_fw_version(hw
, "Firmware revision");
2125 rv
= csio_do_hello(hw
, &hw
->fw_state
);
2127 CSIO_INC_STATS(hw
, n_err_fatal
);
2128 csio_post_event(&hw
->sm
, CSIO_HWE_FATAL
);
2133 rv
= csio_hw_get_vpd_params(hw
, &hw
->vpd
);
2137 csio_hw_get_fw_version(hw
, &hw
->fwrev
);
2138 csio_hw_get_tp_version(hw
, &hw
->tp_vers
);
2139 if (csio_is_hw_master(hw
) && hw
->fw_state
!= CSIO_DEV_STATE_INIT
) {
2141 /* Do firmware update */
2142 spin_unlock_irq(&hw
->lock
);
2143 rv
= csio_hw_flash_fw(hw
, &reset
);
2144 spin_lock_irq(&hw
->lock
);
2149 rv
= csio_hw_check_fwver(hw
);
2153 /* If the firmware doesn't support Configuration Files,
2156 rv
= csio_hw_check_fwconfig(hw
, param
);
2158 csio_info(hw
, "Firmware doesn't support "
2159 "Firmware Configuration files\n");
2163 /* The firmware provides us with a memory buffer where we can
2164 * load a Configuration File from the host if we want to
2165 * override the Configuration File in flash.
2167 rv
= csio_hw_use_fwconfig(hw
, reset
, param
);
2168 if (rv
== -ENOENT
) {
2169 csio_info(hw
, "Could not initialize "
2170 "adapter, error%d\n", rv
);
2174 csio_info(hw
, "Could not initialize "
2175 "adapter, error%d\n", rv
);
2180 rv
= csio_hw_check_fwver(hw
);
2184 if (hw
->fw_state
== CSIO_DEV_STATE_INIT
) {
2186 hw
->flags
|= CSIO_HWF_USING_SOFT_PARAMS
;
2188 /* device parameters */
2189 rv
= csio_get_device_params(hw
);
2193 /* Get device capabilities */
2194 rv
= csio_config_device_caps(hw
);
2199 csio_wr_sge_init(hw
);
2201 /* Post event to notify completion of configuration */
2202 csio_post_event(&hw
->sm
, CSIO_HWE_INIT
);
2205 } /* if not master */
2212 * csio_hw_initialize - Initialize HW
2217 csio_hw_initialize(struct csio_hw
*hw
)
2219 struct csio_mb
*mbp
;
2220 enum fw_retval retval
;
2224 if (csio_is_hw_master(hw
) && hw
->fw_state
!= CSIO_DEV_STATE_INIT
) {
2225 mbp
= mempool_alloc(hw
->mb_mempool
, GFP_ATOMIC
);
2229 csio_mb_initialize(hw
, mbp
, CSIO_MB_DEFAULT_TMO
, NULL
);
2231 if (csio_mb_issue(hw
, mbp
)) {
2232 csio_err(hw
, "Issue of FW_INITIALIZE_CMD failed!\n");
2236 retval
= csio_mb_fw_retval(mbp
);
2237 if (retval
!= FW_SUCCESS
) {
2238 csio_err(hw
, "FW_INITIALIZE_CMD returned 0x%x!\n",
2243 mempool_free(mbp
, hw
->mb_mempool
);
2246 rv
= csio_get_fcoe_resinfo(hw
);
2248 csio_err(hw
, "Failed to read fcoe resource info: %d\n", rv
);
2252 spin_unlock_irq(&hw
->lock
);
2253 rv
= csio_config_queues(hw
);
2254 spin_lock_irq(&hw
->lock
);
2257 csio_err(hw
, "Config of queues failed!: %d\n", rv
);
2261 for (i
= 0; i
< hw
->num_pports
; i
++)
2262 hw
->pport
[i
].mod_type
= FW_PORT_MOD_TYPE_NA
;
2264 if (csio_is_hw_master(hw
) && hw
->fw_state
!= CSIO_DEV_STATE_INIT
) {
2265 rv
= csio_enable_ports(hw
);
2267 csio_err(hw
, "Failed to enable ports: %d\n", rv
);
2272 csio_post_event(&hw
->sm
, CSIO_HWE_INIT_DONE
);
2276 mempool_free(mbp
, hw
->mb_mempool
);
2281 #define PF_INTR_MASK (PFSW_F | PFCIM_F)
2284 * csio_hw_intr_enable - Enable HW interrupts
2285 * @hw: Pointer to HW module.
2287 * Enable interrupts in HW registers.
2290 csio_hw_intr_enable(struct csio_hw
*hw
)
2292 uint16_t vec
= (uint16_t)csio_get_mb_intr_idx(csio_hw_to_mbm(hw
));
2294 uint32_t pl
= csio_rd_reg32(hw
, PL_INT_ENABLE_A
);
2296 if (csio_is_t5(hw
->pdev
->device
& CSIO_HW_CHIP_MASK
))
2297 pf
= SOURCEPF_G(csio_rd_reg32(hw
, PL_WHOAMI_A
));
2299 pf
= T6_SOURCEPF_G(csio_rd_reg32(hw
, PL_WHOAMI_A
));
2302 * Set aivec for MSI/MSIX. PCIE_PF_CFG.INTXType is set up
2303 * by FW, so do nothing for INTX.
2305 if (hw
->intr_mode
== CSIO_IM_MSIX
)
2306 csio_set_reg_field(hw
, MYPF_REG(PCIE_PF_CFG_A
),
2307 AIVEC_V(AIVEC_M
), vec
);
2308 else if (hw
->intr_mode
== CSIO_IM_MSI
)
2309 csio_set_reg_field(hw
, MYPF_REG(PCIE_PF_CFG_A
),
2310 AIVEC_V(AIVEC_M
), 0);
2312 csio_wr_reg32(hw
, PF_INTR_MASK
, MYPF_REG(PL_PF_INT_ENABLE_A
));
2314 /* Turn on MB interrupts - this will internally flush PIO as well */
2315 csio_mb_intr_enable(hw
);
2317 /* These are common registers - only a master can modify them */
2318 if (csio_is_hw_master(hw
)) {
2320 * Disable the Serial FLASH interrupt, if enabled!
2323 csio_wr_reg32(hw
, pl
, PL_INT_ENABLE_A
);
2325 csio_wr_reg32(hw
, ERR_CPL_EXCEED_IQE_SIZE_F
|
2326 EGRESS_SIZE_ERR_F
| ERR_INVALID_CIDX_INC_F
|
2327 ERR_CPL_OPCODE_0_F
| ERR_DROPPED_DB_F
|
2328 ERR_DATA_CPL_ON_HIGH_QID1_F
|
2329 ERR_DATA_CPL_ON_HIGH_QID0_F
| ERR_BAD_DB_PIDX3_F
|
2330 ERR_BAD_DB_PIDX2_F
| ERR_BAD_DB_PIDX1_F
|
2331 ERR_BAD_DB_PIDX0_F
| ERR_ING_CTXT_PRIO_F
|
2332 ERR_EGR_CTXT_PRIO_F
| INGRESS_SIZE_ERR_F
,
2334 csio_set_reg_field(hw
, PL_INT_MAP0_A
, 0, 1 << pf
);
2337 hw
->flags
|= CSIO_HWF_HW_INTR_ENABLED
;
2342 * csio_hw_intr_disable - Disable HW interrupts
2343 * @hw: Pointer to HW module.
2345 * Turn off Mailbox and PCI_PF_CFG interrupts.
2348 csio_hw_intr_disable(struct csio_hw
*hw
)
2352 if (csio_is_t5(hw
->pdev
->device
& CSIO_HW_CHIP_MASK
))
2353 pf
= SOURCEPF_G(csio_rd_reg32(hw
, PL_WHOAMI_A
));
2355 pf
= T6_SOURCEPF_G(csio_rd_reg32(hw
, PL_WHOAMI_A
));
2357 if (!(hw
->flags
& CSIO_HWF_HW_INTR_ENABLED
))
2360 hw
->flags
&= ~CSIO_HWF_HW_INTR_ENABLED
;
2362 csio_wr_reg32(hw
, 0, MYPF_REG(PL_PF_INT_ENABLE_A
));
2363 if (csio_is_hw_master(hw
))
2364 csio_set_reg_field(hw
, PL_INT_MAP0_A
, 1 << pf
, 0);
2366 /* Turn off MB interrupts */
2367 csio_mb_intr_disable(hw
);
2372 csio_hw_fatal_err(struct csio_hw
*hw
)
2374 csio_set_reg_field(hw
, SGE_CONTROL_A
, GLOBALENABLE_F
, 0);
2375 csio_hw_intr_disable(hw
);
2377 /* Do not reset HW, we may need FW state for debugging */
2378 csio_fatal(hw
, "HW Fatal error encountered!\n");
2381 /*****************************************************************************/
2383 /*****************************************************************************/
2385 * csio_hws_uninit - Uninit state
2391 csio_hws_uninit(struct csio_hw
*hw
, enum csio_hw_ev evt
)
2393 hw
->prev_evt
= hw
->cur_evt
;
2395 CSIO_INC_STATS(hw
, n_evt_sm
[evt
]);
2399 csio_set_state(&hw
->sm
, csio_hws_configuring
);
2400 csio_hw_configure(hw
);
2404 CSIO_INC_STATS(hw
, n_evt_unexp
);
2410 * csio_hws_configuring - Configuring state
2416 csio_hws_configuring(struct csio_hw
*hw
, enum csio_hw_ev evt
)
2418 hw
->prev_evt
= hw
->cur_evt
;
2420 CSIO_INC_STATS(hw
, n_evt_sm
[evt
]);
2424 csio_set_state(&hw
->sm
, csio_hws_initializing
);
2425 csio_hw_initialize(hw
);
2428 case CSIO_HWE_INIT_DONE
:
2429 csio_set_state(&hw
->sm
, csio_hws_ready
);
2430 /* Fan out event to all lnode SMs */
2431 csio_notify_lnodes(hw
, CSIO_LN_NOTIFY_HWREADY
);
2434 case CSIO_HWE_FATAL
:
2435 csio_set_state(&hw
->sm
, csio_hws_uninit
);
2438 case CSIO_HWE_PCI_REMOVE
:
2442 CSIO_INC_STATS(hw
, n_evt_unexp
);
2448 * csio_hws_initializing - Initialiazing state
2454 csio_hws_initializing(struct csio_hw
*hw
, enum csio_hw_ev evt
)
2456 hw
->prev_evt
= hw
->cur_evt
;
2458 CSIO_INC_STATS(hw
, n_evt_sm
[evt
]);
2461 case CSIO_HWE_INIT_DONE
:
2462 csio_set_state(&hw
->sm
, csio_hws_ready
);
2464 /* Fan out event to all lnode SMs */
2465 csio_notify_lnodes(hw
, CSIO_LN_NOTIFY_HWREADY
);
2467 /* Enable interrupts */
2468 csio_hw_intr_enable(hw
);
2471 case CSIO_HWE_FATAL
:
2472 csio_set_state(&hw
->sm
, csio_hws_uninit
);
2475 case CSIO_HWE_PCI_REMOVE
:
2480 CSIO_INC_STATS(hw
, n_evt_unexp
);
2486 * csio_hws_ready - Ready state
2492 csio_hws_ready(struct csio_hw
*hw
, enum csio_hw_ev evt
)
2494 /* Remember the event */
2497 hw
->prev_evt
= hw
->cur_evt
;
2499 CSIO_INC_STATS(hw
, n_evt_sm
[evt
]);
2502 case CSIO_HWE_HBA_RESET
:
2503 case CSIO_HWE_FW_DLOAD
:
2504 case CSIO_HWE_SUSPEND
:
2505 case CSIO_HWE_PCI_REMOVE
:
2506 case CSIO_HWE_PCIERR_DETECTED
:
2507 csio_set_state(&hw
->sm
, csio_hws_quiescing
);
2508 /* cleanup all outstanding cmds */
2509 if (evt
== CSIO_HWE_HBA_RESET
||
2510 evt
== CSIO_HWE_PCIERR_DETECTED
)
2511 csio_scsim_cleanup_io(csio_hw_to_scsim(hw
), false);
2513 csio_scsim_cleanup_io(csio_hw_to_scsim(hw
), true);
2515 csio_hw_intr_disable(hw
);
2516 csio_hw_mbm_cleanup(hw
);
2518 csio_notify_lnodes(hw
, CSIO_LN_NOTIFY_HWSTOP
);
2519 csio_evtq_flush(hw
);
2520 csio_mgmtm_cleanup(csio_hw_to_mgmtm(hw
));
2521 csio_post_event(&hw
->sm
, CSIO_HWE_QUIESCED
);
2524 case CSIO_HWE_FATAL
:
2525 csio_set_state(&hw
->sm
, csio_hws_uninit
);
2529 CSIO_INC_STATS(hw
, n_evt_unexp
);
2535 * csio_hws_quiescing - Quiescing state
2541 csio_hws_quiescing(struct csio_hw
*hw
, enum csio_hw_ev evt
)
2543 hw
->prev_evt
= hw
->cur_evt
;
2545 CSIO_INC_STATS(hw
, n_evt_sm
[evt
]);
2548 case CSIO_HWE_QUIESCED
:
2549 switch (hw
->evtflag
) {
2550 case CSIO_HWE_FW_DLOAD
:
2551 csio_set_state(&hw
->sm
, csio_hws_resetting
);
2552 /* Download firmware */
2555 case CSIO_HWE_HBA_RESET
:
2556 csio_set_state(&hw
->sm
, csio_hws_resetting
);
2557 /* Start reset of the HBA */
2558 csio_notify_lnodes(hw
, CSIO_LN_NOTIFY_HWRESET
);
2559 csio_wr_destroy_queues(hw
, false);
2560 csio_do_reset(hw
, false);
2561 csio_post_event(&hw
->sm
, CSIO_HWE_HBA_RESET_DONE
);
2564 case CSIO_HWE_PCI_REMOVE
:
2565 csio_set_state(&hw
->sm
, csio_hws_removing
);
2566 csio_notify_lnodes(hw
, CSIO_LN_NOTIFY_HWREMOVE
);
2567 csio_wr_destroy_queues(hw
, true);
2568 /* Now send the bye command */
2572 case CSIO_HWE_SUSPEND
:
2573 csio_set_state(&hw
->sm
, csio_hws_quiesced
);
2576 case CSIO_HWE_PCIERR_DETECTED
:
2577 csio_set_state(&hw
->sm
, csio_hws_pcierr
);
2578 csio_wr_destroy_queues(hw
, false);
2582 CSIO_INC_STATS(hw
, n_evt_unexp
);
2589 CSIO_INC_STATS(hw
, n_evt_unexp
);
2595 * csio_hws_quiesced - Quiesced state
2601 csio_hws_quiesced(struct csio_hw
*hw
, enum csio_hw_ev evt
)
2603 hw
->prev_evt
= hw
->cur_evt
;
2605 CSIO_INC_STATS(hw
, n_evt_sm
[evt
]);
2608 case CSIO_HWE_RESUME
:
2609 csio_set_state(&hw
->sm
, csio_hws_configuring
);
2610 csio_hw_configure(hw
);
2614 CSIO_INC_STATS(hw
, n_evt_unexp
);
2620 * csio_hws_resetting - HW Resetting state
2626 csio_hws_resetting(struct csio_hw
*hw
, enum csio_hw_ev evt
)
2628 hw
->prev_evt
= hw
->cur_evt
;
2630 CSIO_INC_STATS(hw
, n_evt_sm
[evt
]);
2633 case CSIO_HWE_HBA_RESET_DONE
:
2634 csio_evtq_start(hw
);
2635 csio_set_state(&hw
->sm
, csio_hws_configuring
);
2636 csio_hw_configure(hw
);
2640 CSIO_INC_STATS(hw
, n_evt_unexp
);
2646 * csio_hws_removing - PCI Hotplug removing state
2652 csio_hws_removing(struct csio_hw
*hw
, enum csio_hw_ev evt
)
2654 hw
->prev_evt
= hw
->cur_evt
;
2656 CSIO_INC_STATS(hw
, n_evt_sm
[evt
]);
2659 case CSIO_HWE_HBA_RESET
:
2660 if (!csio_is_hw_master(hw
))
2663 * The BYE should have alerady been issued, so we cant
2664 * use the mailbox interface. Hence we use the PL_RST
2665 * register directly.
2667 csio_err(hw
, "Resetting HW and waiting 2 seconds...\n");
2668 csio_wr_reg32(hw
, PIORSTMODE_F
| PIORST_F
, PL_RST_A
);
2672 /* Should never receive any new events */
2674 CSIO_INC_STATS(hw
, n_evt_unexp
);
2681 * csio_hws_pcierr - PCI Error state
2687 csio_hws_pcierr(struct csio_hw
*hw
, enum csio_hw_ev evt
)
2689 hw
->prev_evt
= hw
->cur_evt
;
2691 CSIO_INC_STATS(hw
, n_evt_sm
[evt
]);
2694 case CSIO_HWE_PCIERR_SLOT_RESET
:
2695 csio_evtq_start(hw
);
2696 csio_set_state(&hw
->sm
, csio_hws_configuring
);
2697 csio_hw_configure(hw
);
2701 CSIO_INC_STATS(hw
, n_evt_unexp
);
2706 /*****************************************************************************/
2708 /*****************************************************************************/
2711 * csio_handle_intr_status - table driven interrupt handler
2713 * @reg: the interrupt status register to process
2714 * @acts: table of interrupt actions
2716 * A table driven interrupt handler that applies a set of masks to an
2717 * interrupt status word and performs the corresponding actions if the
2718 * interrupts described by the mask have occured. The actions include
2719 * optionally emitting a warning or alert message. The table is terminated
2720 * by an entry specifying mask 0. Returns the number of fatal interrupt
2724 csio_handle_intr_status(struct csio_hw
*hw
, unsigned int reg
,
2725 const struct intr_info
*acts
)
2728 unsigned int mask
= 0;
2729 unsigned int status
= csio_rd_reg32(hw
, reg
);
2731 for ( ; acts
->mask
; ++acts
) {
2732 if (!(status
& acts
->mask
))
2736 csio_fatal(hw
, "Fatal %s (0x%x)\n",
2737 acts
->msg
, status
& acts
->mask
);
2738 } else if (acts
->msg
)
2739 csio_info(hw
, "%s (0x%x)\n",
2740 acts
->msg
, status
& acts
->mask
);
2744 if (status
) /* clear processed interrupts */
2745 csio_wr_reg32(hw
, status
, reg
);
2750 * TP interrupt handler.
2752 static void csio_tp_intr_handler(struct csio_hw
*hw
)
2754 static struct intr_info tp_intr_info
[] = {
2755 { 0x3fffffff, "TP parity error", -1, 1 },
2756 { FLMTXFLSTEMPTY_F
, "TP out of Tx pages", -1, 1 },
2760 if (csio_handle_intr_status(hw
, TP_INT_CAUSE_A
, tp_intr_info
))
2761 csio_hw_fatal_err(hw
);
2765 * SGE interrupt handler.
2767 static void csio_sge_intr_handler(struct csio_hw
*hw
)
2771 static struct intr_info sge_intr_info
[] = {
2772 { ERR_CPL_EXCEED_IQE_SIZE_F
,
2773 "SGE received CPL exceeding IQE size", -1, 1 },
2774 { ERR_INVALID_CIDX_INC_F
,
2775 "SGE GTS CIDX increment too large", -1, 0 },
2776 { ERR_CPL_OPCODE_0_F
, "SGE received 0-length CPL", -1, 0 },
2777 { ERR_DROPPED_DB_F
, "SGE doorbell dropped", -1, 0 },
2778 { ERR_DATA_CPL_ON_HIGH_QID1_F
| ERR_DATA_CPL_ON_HIGH_QID0_F
,
2779 "SGE IQID > 1023 received CPL for FL", -1, 0 },
2780 { ERR_BAD_DB_PIDX3_F
, "SGE DBP 3 pidx increment too large", -1,
2782 { ERR_BAD_DB_PIDX2_F
, "SGE DBP 2 pidx increment too large", -1,
2784 { ERR_BAD_DB_PIDX1_F
, "SGE DBP 1 pidx increment too large", -1,
2786 { ERR_BAD_DB_PIDX0_F
, "SGE DBP 0 pidx increment too large", -1,
2788 { ERR_ING_CTXT_PRIO_F
,
2789 "SGE too many priority ingress contexts", -1, 0 },
2790 { ERR_EGR_CTXT_PRIO_F
,
2791 "SGE too many priority egress contexts", -1, 0 },
2792 { INGRESS_SIZE_ERR_F
, "SGE illegal ingress QID", -1, 0 },
2793 { EGRESS_SIZE_ERR_F
, "SGE illegal egress QID", -1, 0 },
2797 v
= (uint64_t)csio_rd_reg32(hw
, SGE_INT_CAUSE1_A
) |
2798 ((uint64_t)csio_rd_reg32(hw
, SGE_INT_CAUSE2_A
) << 32);
2800 csio_fatal(hw
, "SGE parity error (%#llx)\n",
2801 (unsigned long long)v
);
2802 csio_wr_reg32(hw
, (uint32_t)(v
& 0xFFFFFFFF),
2804 csio_wr_reg32(hw
, (uint32_t)(v
>> 32), SGE_INT_CAUSE2_A
);
2807 v
|= csio_handle_intr_status(hw
, SGE_INT_CAUSE3_A
, sge_intr_info
);
2809 if (csio_handle_intr_status(hw
, SGE_INT_CAUSE3_A
, sge_intr_info
) ||
2811 csio_hw_fatal_err(hw
);
2814 #define CIM_OBQ_INTR (OBQULP0PARERR_F | OBQULP1PARERR_F | OBQULP2PARERR_F |\
2815 OBQULP3PARERR_F | OBQSGEPARERR_F | OBQNCSIPARERR_F)
2816 #define CIM_IBQ_INTR (IBQTP0PARERR_F | IBQTP1PARERR_F | IBQULPPARERR_F |\
2817 IBQSGEHIPARERR_F | IBQSGELOPARERR_F | IBQNCSIPARERR_F)
2820 * CIM interrupt handler.
2822 static void csio_cim_intr_handler(struct csio_hw
*hw
)
2824 static struct intr_info cim_intr_info
[] = {
2825 { PREFDROPINT_F
, "CIM control register prefetch drop", -1, 1 },
2826 { CIM_OBQ_INTR
, "CIM OBQ parity error", -1, 1 },
2827 { CIM_IBQ_INTR
, "CIM IBQ parity error", -1, 1 },
2828 { MBUPPARERR_F
, "CIM mailbox uP parity error", -1, 1 },
2829 { MBHOSTPARERR_F
, "CIM mailbox host parity error", -1, 1 },
2830 { TIEQINPARERRINT_F
, "CIM TIEQ outgoing parity error", -1, 1 },
2831 { TIEQOUTPARERRINT_F
, "CIM TIEQ incoming parity error", -1, 1 },
2834 static struct intr_info cim_upintr_info
[] = {
2835 { RSVDSPACEINT_F
, "CIM reserved space access", -1, 1 },
2836 { ILLTRANSINT_F
, "CIM illegal transaction", -1, 1 },
2837 { ILLWRINT_F
, "CIM illegal write", -1, 1 },
2838 { ILLRDINT_F
, "CIM illegal read", -1, 1 },
2839 { ILLRDBEINT_F
, "CIM illegal read BE", -1, 1 },
2840 { ILLWRBEINT_F
, "CIM illegal write BE", -1, 1 },
2841 { SGLRDBOOTINT_F
, "CIM single read from boot space", -1, 1 },
2842 { SGLWRBOOTINT_F
, "CIM single write to boot space", -1, 1 },
2843 { BLKWRBOOTINT_F
, "CIM block write to boot space", -1, 1 },
2844 { SGLRDFLASHINT_F
, "CIM single read from flash space", -1, 1 },
2845 { SGLWRFLASHINT_F
, "CIM single write to flash space", -1, 1 },
2846 { BLKWRFLASHINT_F
, "CIM block write to flash space", -1, 1 },
2847 { SGLRDEEPROMINT_F
, "CIM single EEPROM read", -1, 1 },
2848 { SGLWREEPROMINT_F
, "CIM single EEPROM write", -1, 1 },
2849 { BLKRDEEPROMINT_F
, "CIM block EEPROM read", -1, 1 },
2850 { BLKWREEPROMINT_F
, "CIM block EEPROM write", -1, 1 },
2851 { SGLRDCTLINT_F
, "CIM single read from CTL space", -1, 1 },
2852 { SGLWRCTLINT_F
, "CIM single write to CTL space", -1, 1 },
2853 { BLKRDCTLINT_F
, "CIM block read from CTL space", -1, 1 },
2854 { BLKWRCTLINT_F
, "CIM block write to CTL space", -1, 1 },
2855 { SGLRDPLINT_F
, "CIM single read from PL space", -1, 1 },
2856 { SGLWRPLINT_F
, "CIM single write to PL space", -1, 1 },
2857 { BLKRDPLINT_F
, "CIM block read from PL space", -1, 1 },
2858 { BLKWRPLINT_F
, "CIM block write to PL space", -1, 1 },
2859 { REQOVRLOOKUPINT_F
, "CIM request FIFO overwrite", -1, 1 },
2860 { RSPOVRLOOKUPINT_F
, "CIM response FIFO overwrite", -1, 1 },
2861 { TIMEOUTINT_F
, "CIM PIF timeout", -1, 1 },
2862 { TIMEOUTMAINT_F
, "CIM PIF MA timeout", -1, 1 },
2868 fat
= csio_handle_intr_status(hw
, CIM_HOST_INT_CAUSE_A
,
2870 csio_handle_intr_status(hw
, CIM_HOST_UPACC_INT_CAUSE_A
,
2873 csio_hw_fatal_err(hw
);
2877 * ULP RX interrupt handler.
2879 static void csio_ulprx_intr_handler(struct csio_hw
*hw
)
2881 static struct intr_info ulprx_intr_info
[] = {
2882 { 0x1800000, "ULPRX context error", -1, 1 },
2883 { 0x7fffff, "ULPRX parity error", -1, 1 },
2887 if (csio_handle_intr_status(hw
, ULP_RX_INT_CAUSE_A
, ulprx_intr_info
))
2888 csio_hw_fatal_err(hw
);
2892 * ULP TX interrupt handler.
2894 static void csio_ulptx_intr_handler(struct csio_hw
*hw
)
2896 static struct intr_info ulptx_intr_info
[] = {
2897 { PBL_BOUND_ERR_CH3_F
, "ULPTX channel 3 PBL out of bounds", -1,
2899 { PBL_BOUND_ERR_CH2_F
, "ULPTX channel 2 PBL out of bounds", -1,
2901 { PBL_BOUND_ERR_CH1_F
, "ULPTX channel 1 PBL out of bounds", -1,
2903 { PBL_BOUND_ERR_CH0_F
, "ULPTX channel 0 PBL out of bounds", -1,
2905 { 0xfffffff, "ULPTX parity error", -1, 1 },
2909 if (csio_handle_intr_status(hw
, ULP_TX_INT_CAUSE_A
, ulptx_intr_info
))
2910 csio_hw_fatal_err(hw
);
2914 * PM TX interrupt handler.
2916 static void csio_pmtx_intr_handler(struct csio_hw
*hw
)
2918 static struct intr_info pmtx_intr_info
[] = {
2919 { PCMD_LEN_OVFL0_F
, "PMTX channel 0 pcmd too large", -1, 1 },
2920 { PCMD_LEN_OVFL1_F
, "PMTX channel 1 pcmd too large", -1, 1 },
2921 { PCMD_LEN_OVFL2_F
, "PMTX channel 2 pcmd too large", -1, 1 },
2922 { ZERO_C_CMD_ERROR_F
, "PMTX 0-length pcmd", -1, 1 },
2923 { 0xffffff0, "PMTX framing error", -1, 1 },
2924 { OESPI_PAR_ERROR_F
, "PMTX oespi parity error", -1, 1 },
2925 { DB_OPTIONS_PAR_ERROR_F
, "PMTX db_options parity error", -1,
2927 { ICSPI_PAR_ERROR_F
, "PMTX icspi parity error", -1, 1 },
2928 { PMTX_C_PCMD_PAR_ERROR_F
, "PMTX c_pcmd parity error", -1, 1},
2932 if (csio_handle_intr_status(hw
, PM_TX_INT_CAUSE_A
, pmtx_intr_info
))
2933 csio_hw_fatal_err(hw
);
2937 * PM RX interrupt handler.
2939 static void csio_pmrx_intr_handler(struct csio_hw
*hw
)
2941 static struct intr_info pmrx_intr_info
[] = {
2942 { ZERO_E_CMD_ERROR_F
, "PMRX 0-length pcmd", -1, 1 },
2943 { 0x3ffff0, "PMRX framing error", -1, 1 },
2944 { OCSPI_PAR_ERROR_F
, "PMRX ocspi parity error", -1, 1 },
2945 { DB_OPTIONS_PAR_ERROR_F
, "PMRX db_options parity error", -1,
2947 { IESPI_PAR_ERROR_F
, "PMRX iespi parity error", -1, 1 },
2948 { PMRX_E_PCMD_PAR_ERROR_F
, "PMRX e_pcmd parity error", -1, 1},
2952 if (csio_handle_intr_status(hw
, PM_RX_INT_CAUSE_A
, pmrx_intr_info
))
2953 csio_hw_fatal_err(hw
);
2957 * CPL switch interrupt handler.
2959 static void csio_cplsw_intr_handler(struct csio_hw
*hw
)
2961 static struct intr_info cplsw_intr_info
[] = {
2962 { CIM_OP_MAP_PERR_F
, "CPLSW CIM op_map parity error", -1, 1 },
2963 { CIM_OVFL_ERROR_F
, "CPLSW CIM overflow", -1, 1 },
2964 { TP_FRAMING_ERROR_F
, "CPLSW TP framing error", -1, 1 },
2965 { SGE_FRAMING_ERROR_F
, "CPLSW SGE framing error", -1, 1 },
2966 { CIM_FRAMING_ERROR_F
, "CPLSW CIM framing error", -1, 1 },
2967 { ZERO_SWITCH_ERROR_F
, "CPLSW no-switch error", -1, 1 },
2971 if (csio_handle_intr_status(hw
, CPL_INTR_CAUSE_A
, cplsw_intr_info
))
2972 csio_hw_fatal_err(hw
);
2976 * LE interrupt handler.
2978 static void csio_le_intr_handler(struct csio_hw
*hw
)
2980 enum chip_type chip
= CHELSIO_CHIP_VERSION(hw
->chip_id
);
2982 static struct intr_info le_intr_info
[] = {
2983 { LIPMISS_F
, "LE LIP miss", -1, 0 },
2984 { LIP0_F
, "LE 0 LIP error", -1, 0 },
2985 { PARITYERR_F
, "LE parity error", -1, 1 },
2986 { UNKNOWNCMD_F
, "LE unknown command", -1, 1 },
2987 { REQQPARERR_F
, "LE request queue parity error", -1, 1 },
2991 static struct intr_info t6_le_intr_info
[] = {
2992 { T6_LIPMISS_F
, "LE LIP miss", -1, 0 },
2993 { T6_LIP0_F
, "LE 0 LIP error", -1, 0 },
2994 { TCAMINTPERR_F
, "LE parity error", -1, 1 },
2995 { T6_UNKNOWNCMD_F
, "LE unknown command", -1, 1 },
2996 { SSRAMINTPERR_F
, "LE request queue parity error", -1, 1 },
3000 if (csio_handle_intr_status(hw
, LE_DB_INT_CAUSE_A
,
3001 (chip
== CHELSIO_T5
) ?
3002 le_intr_info
: t6_le_intr_info
))
3003 csio_hw_fatal_err(hw
);
3007 * MPS interrupt handler.
3009 static void csio_mps_intr_handler(struct csio_hw
*hw
)
3011 static struct intr_info mps_rx_intr_info
[] = {
3012 { 0xffffff, "MPS Rx parity error", -1, 1 },
3015 static struct intr_info mps_tx_intr_info
[] = {
3016 { TPFIFO_V(TPFIFO_M
), "MPS Tx TP FIFO parity error", -1, 1 },
3017 { NCSIFIFO_F
, "MPS Tx NC-SI FIFO parity error", -1, 1 },
3018 { TXDATAFIFO_V(TXDATAFIFO_M
), "MPS Tx data FIFO parity error",
3020 { TXDESCFIFO_V(TXDESCFIFO_M
), "MPS Tx desc FIFO parity error",
3022 { BUBBLE_F
, "MPS Tx underflow", -1, 1 },
3023 { SECNTERR_F
, "MPS Tx SOP/EOP error", -1, 1 },
3024 { FRMERR_F
, "MPS Tx framing error", -1, 1 },
3027 static struct intr_info mps_trc_intr_info
[] = {
3028 { FILTMEM_V(FILTMEM_M
), "MPS TRC filter parity error", -1, 1 },
3029 { PKTFIFO_V(PKTFIFO_M
), "MPS TRC packet FIFO parity error",
3031 { MISCPERR_F
, "MPS TRC misc parity error", -1, 1 },
3034 static struct intr_info mps_stat_sram_intr_info
[] = {
3035 { 0x1fffff, "MPS statistics SRAM parity error", -1, 1 },
3038 static struct intr_info mps_stat_tx_intr_info
[] = {
3039 { 0xfffff, "MPS statistics Tx FIFO parity error", -1, 1 },
3042 static struct intr_info mps_stat_rx_intr_info
[] = {
3043 { 0xffffff, "MPS statistics Rx FIFO parity error", -1, 1 },
3046 static struct intr_info mps_cls_intr_info
[] = {
3047 { MATCHSRAM_F
, "MPS match SRAM parity error", -1, 1 },
3048 { MATCHTCAM_F
, "MPS match TCAM parity error", -1, 1 },
3049 { HASHSRAM_F
, "MPS hash SRAM parity error", -1, 1 },
3055 fat
= csio_handle_intr_status(hw
, MPS_RX_PERR_INT_CAUSE_A
,
3057 csio_handle_intr_status(hw
, MPS_TX_INT_CAUSE_A
,
3059 csio_handle_intr_status(hw
, MPS_TRC_INT_CAUSE_A
,
3060 mps_trc_intr_info
) +
3061 csio_handle_intr_status(hw
, MPS_STAT_PERR_INT_CAUSE_SRAM_A
,
3062 mps_stat_sram_intr_info
) +
3063 csio_handle_intr_status(hw
, MPS_STAT_PERR_INT_CAUSE_TX_FIFO_A
,
3064 mps_stat_tx_intr_info
) +
3065 csio_handle_intr_status(hw
, MPS_STAT_PERR_INT_CAUSE_RX_FIFO_A
,
3066 mps_stat_rx_intr_info
) +
3067 csio_handle_intr_status(hw
, MPS_CLS_INT_CAUSE_A
,
3070 csio_wr_reg32(hw
, 0, MPS_INT_CAUSE_A
);
3071 csio_rd_reg32(hw
, MPS_INT_CAUSE_A
); /* flush */
3073 csio_hw_fatal_err(hw
);
3076 #define MEM_INT_MASK (PERR_INT_CAUSE_F | ECC_CE_INT_CAUSE_F | \
3080 * EDC/MC interrupt handler.
3082 static void csio_mem_intr_handler(struct csio_hw
*hw
, int idx
)
3084 static const char name
[3][5] = { "EDC0", "EDC1", "MC" };
3086 unsigned int addr
, cnt_addr
, v
;
3088 if (idx
<= MEM_EDC1
) {
3089 addr
= EDC_REG(EDC_INT_CAUSE_A
, idx
);
3090 cnt_addr
= EDC_REG(EDC_ECC_STATUS_A
, idx
);
3092 addr
= MC_INT_CAUSE_A
;
3093 cnt_addr
= MC_ECC_STATUS_A
;
3096 v
= csio_rd_reg32(hw
, addr
) & MEM_INT_MASK
;
3097 if (v
& PERR_INT_CAUSE_F
)
3098 csio_fatal(hw
, "%s FIFO parity error\n", name
[idx
]);
3099 if (v
& ECC_CE_INT_CAUSE_F
) {
3100 uint32_t cnt
= ECC_CECNT_G(csio_rd_reg32(hw
, cnt_addr
));
3102 csio_wr_reg32(hw
, ECC_CECNT_V(ECC_CECNT_M
), cnt_addr
);
3103 csio_warn(hw
, "%u %s correctable ECC data error%s\n",
3104 cnt
, name
[idx
], cnt
> 1 ? "s" : "");
3106 if (v
& ECC_UE_INT_CAUSE_F
)
3107 csio_fatal(hw
, "%s uncorrectable ECC data error\n", name
[idx
]);
3109 csio_wr_reg32(hw
, v
, addr
);
3110 if (v
& (PERR_INT_CAUSE_F
| ECC_UE_INT_CAUSE_F
))
3111 csio_hw_fatal_err(hw
);
3115 * MA interrupt handler.
3117 static void csio_ma_intr_handler(struct csio_hw
*hw
)
3119 uint32_t v
, status
= csio_rd_reg32(hw
, MA_INT_CAUSE_A
);
3121 if (status
& MEM_PERR_INT_CAUSE_F
)
3122 csio_fatal(hw
, "MA parity error, parity status %#x\n",
3123 csio_rd_reg32(hw
, MA_PARITY_ERROR_STATUS_A
));
3124 if (status
& MEM_WRAP_INT_CAUSE_F
) {
3125 v
= csio_rd_reg32(hw
, MA_INT_WRAP_STATUS_A
);
3127 "MA address wrap-around error by client %u to address %#x\n",
3128 MEM_WRAP_CLIENT_NUM_G(v
), MEM_WRAP_ADDRESS_G(v
) << 4);
3130 csio_wr_reg32(hw
, status
, MA_INT_CAUSE_A
);
3131 csio_hw_fatal_err(hw
);
3135 * SMB interrupt handler.
3137 static void csio_smb_intr_handler(struct csio_hw
*hw
)
3139 static struct intr_info smb_intr_info
[] = {
3140 { MSTTXFIFOPARINT_F
, "SMB master Tx FIFO parity error", -1, 1 },
3141 { MSTRXFIFOPARINT_F
, "SMB master Rx FIFO parity error", -1, 1 },
3142 { SLVFIFOPARINT_F
, "SMB slave FIFO parity error", -1, 1 },
3146 if (csio_handle_intr_status(hw
, SMB_INT_CAUSE_A
, smb_intr_info
))
3147 csio_hw_fatal_err(hw
);
3151 * NC-SI interrupt handler.
3153 static void csio_ncsi_intr_handler(struct csio_hw
*hw
)
3155 static struct intr_info ncsi_intr_info
[] = {
3156 { CIM_DM_PRTY_ERR_F
, "NC-SI CIM parity error", -1, 1 },
3157 { MPS_DM_PRTY_ERR_F
, "NC-SI MPS parity error", -1, 1 },
3158 { TXFIFO_PRTY_ERR_F
, "NC-SI Tx FIFO parity error", -1, 1 },
3159 { RXFIFO_PRTY_ERR_F
, "NC-SI Rx FIFO parity error", -1, 1 },
3163 if (csio_handle_intr_status(hw
, NCSI_INT_CAUSE_A
, ncsi_intr_info
))
3164 csio_hw_fatal_err(hw
);
3168 * XGMAC interrupt handler.
3170 static void csio_xgmac_intr_handler(struct csio_hw
*hw
, int port
)
3172 uint32_t v
= csio_rd_reg32(hw
, T5_PORT_REG(port
, MAC_PORT_INT_CAUSE_A
));
3174 v
&= TXFIFO_PRTY_ERR_F
| RXFIFO_PRTY_ERR_F
;
3178 if (v
& TXFIFO_PRTY_ERR_F
)
3179 csio_fatal(hw
, "XGMAC %d Tx FIFO parity error\n", port
);
3180 if (v
& RXFIFO_PRTY_ERR_F
)
3181 csio_fatal(hw
, "XGMAC %d Rx FIFO parity error\n", port
);
3182 csio_wr_reg32(hw
, v
, T5_PORT_REG(port
, MAC_PORT_INT_CAUSE_A
));
3183 csio_hw_fatal_err(hw
);
3187 * PL interrupt handler.
3189 static void csio_pl_intr_handler(struct csio_hw
*hw
)
3191 static struct intr_info pl_intr_info
[] = {
3192 { FATALPERR_F
, "T4 fatal parity error", -1, 1 },
3193 { PERRVFID_F
, "PL VFID_MAP parity error", -1, 1 },
3197 if (csio_handle_intr_status(hw
, PL_PL_INT_CAUSE_A
, pl_intr_info
))
3198 csio_hw_fatal_err(hw
);
3202 * csio_hw_slow_intr_handler - control path interrupt handler
3205 * Interrupt handler for non-data global interrupt events, e.g., errors.
3206 * The designation 'slow' is because it involves register reads, while
3207 * data interrupts typically don't involve any MMIOs.
3210 csio_hw_slow_intr_handler(struct csio_hw
*hw
)
3212 uint32_t cause
= csio_rd_reg32(hw
, PL_INT_CAUSE_A
);
3214 if (!(cause
& CSIO_GLBL_INTR_MASK
)) {
3215 CSIO_INC_STATS(hw
, n_plint_unexp
);
3219 csio_dbg(hw
, "Slow interrupt! cause: 0x%x\n", cause
);
3221 CSIO_INC_STATS(hw
, n_plint_cnt
);
3224 csio_cim_intr_handler(hw
);
3227 csio_mps_intr_handler(hw
);
3230 csio_ncsi_intr_handler(hw
);
3233 csio_pl_intr_handler(hw
);
3236 csio_smb_intr_handler(hw
);
3238 if (cause
& XGMAC0_F
)
3239 csio_xgmac_intr_handler(hw
, 0);
3241 if (cause
& XGMAC1_F
)
3242 csio_xgmac_intr_handler(hw
, 1);
3244 if (cause
& XGMAC_KR0_F
)
3245 csio_xgmac_intr_handler(hw
, 2);
3247 if (cause
& XGMAC_KR1_F
)
3248 csio_xgmac_intr_handler(hw
, 3);
3251 hw
->chip_ops
->chip_pcie_intr_handler(hw
);
3254 csio_mem_intr_handler(hw
, MEM_MC
);
3257 csio_mem_intr_handler(hw
, MEM_EDC0
);
3260 csio_mem_intr_handler(hw
, MEM_EDC1
);
3263 csio_le_intr_handler(hw
);
3266 csio_tp_intr_handler(hw
);
3269 csio_ma_intr_handler(hw
);
3271 if (cause
& PM_TX_F
)
3272 csio_pmtx_intr_handler(hw
);
3274 if (cause
& PM_RX_F
)
3275 csio_pmrx_intr_handler(hw
);
3277 if (cause
& ULP_RX_F
)
3278 csio_ulprx_intr_handler(hw
);
3280 if (cause
& CPL_SWITCH_F
)
3281 csio_cplsw_intr_handler(hw
);
3284 csio_sge_intr_handler(hw
);
3286 if (cause
& ULP_TX_F
)
3287 csio_ulptx_intr_handler(hw
);
3289 /* Clear the interrupts just processed for which we are the master. */
3290 csio_wr_reg32(hw
, cause
& CSIO_GLBL_INTR_MASK
, PL_INT_CAUSE_A
);
3291 csio_rd_reg32(hw
, PL_INT_CAUSE_A
); /* flush */
3296 /*****************************************************************************
3297 * HW <--> mailbox interfacing routines.
3298 ****************************************************************************/
3300 * csio_mberr_worker - Worker thread (dpc) for mailbox/error completions
3302 * @data: Private data pointer.
3304 * Called from worker thread context.
3307 csio_mberr_worker(void *data
)
3309 struct csio_hw
*hw
= (struct csio_hw
*)data
;
3310 struct csio_mbm
*mbm
= &hw
->mbm
;
3312 struct csio_mb
*mbp_next
;
3315 del_timer_sync(&mbm
->timer
);
3317 spin_lock_irq(&hw
->lock
);
3318 if (list_empty(&mbm
->cbfn_q
)) {
3319 spin_unlock_irq(&hw
->lock
);
3323 list_splice_tail_init(&mbm
->cbfn_q
, &cbfn_q
);
3324 mbm
->stats
.n_cbfnq
= 0;
3326 /* Try to start waiting mailboxes */
3327 if (!list_empty(&mbm
->req_q
)) {
3328 mbp_next
= list_first_entry(&mbm
->req_q
, struct csio_mb
, list
);
3329 list_del_init(&mbp_next
->list
);
3331 rv
= csio_mb_issue(hw
, mbp_next
);
3333 list_add_tail(&mbp_next
->list
, &mbm
->req_q
);
3335 CSIO_DEC_STATS(mbm
, n_activeq
);
3337 spin_unlock_irq(&hw
->lock
);
3339 /* Now callback completions */
3340 csio_mb_completions(hw
, &cbfn_q
);
3344 * csio_hw_mb_timer - Top-level Mailbox timeout handler.
3346 * @data: private data pointer
3350 csio_hw_mb_timer(struct timer_list
*t
)
3352 struct csio_mbm
*mbm
= from_timer(mbm
, t
, timer
);
3353 struct csio_hw
*hw
= mbm
->hw
;
3354 struct csio_mb
*mbp
= NULL
;
3356 spin_lock_irq(&hw
->lock
);
3357 mbp
= csio_mb_tmo_handler(hw
);
3358 spin_unlock_irq(&hw
->lock
);
3360 /* Call back the function for the timed-out Mailbox */
3362 mbp
->mb_cbfn(hw
, mbp
);
3367 * csio_hw_mbm_cleanup - Cleanup Mailbox module.
3370 * Called with lock held, should exit with lock held.
3371 * Cancels outstanding mailboxes (waiting, in-flight) and gathers them
3372 * into a local queue. Drops lock and calls the completions. Holds
3376 csio_hw_mbm_cleanup(struct csio_hw
*hw
)
3380 csio_mb_cancel_all(hw
, &cbfn_q
);
3382 spin_unlock_irq(&hw
->lock
);
3383 csio_mb_completions(hw
, &cbfn_q
);
3384 spin_lock_irq(&hw
->lock
);
3387 /*****************************************************************************
3389 ****************************************************************************/
3391 csio_enqueue_evt(struct csio_hw
*hw
, enum csio_evt type
, void *evt_msg
,
3394 struct csio_evt_msg
*evt_entry
= NULL
;
3396 if (type
>= CSIO_EVT_MAX
)
3399 if (len
> CSIO_EVT_MSG_SIZE
)
3402 if (hw
->flags
& CSIO_HWF_FWEVT_STOP
)
3405 if (list_empty(&hw
->evt_free_q
)) {
3406 csio_err(hw
, "Failed to alloc evt entry, msg type %d len %d\n",
3411 evt_entry
= list_first_entry(&hw
->evt_free_q
,
3412 struct csio_evt_msg
, list
);
3413 list_del_init(&evt_entry
->list
);
3415 /* copy event msg and queue the event */
3416 evt_entry
->type
= type
;
3417 memcpy((void *)evt_entry
->data
, evt_msg
, len
);
3418 list_add_tail(&evt_entry
->list
, &hw
->evt_active_q
);
3420 CSIO_DEC_STATS(hw
, n_evt_freeq
);
3421 CSIO_INC_STATS(hw
, n_evt_activeq
);
3427 csio_enqueue_evt_lock(struct csio_hw
*hw
, enum csio_evt type
, void *evt_msg
,
3428 uint16_t len
, bool msg_sg
)
3430 struct csio_evt_msg
*evt_entry
= NULL
;
3431 struct csio_fl_dma_buf
*fl_sg
;
3433 unsigned long flags
;
3436 if (type
>= CSIO_EVT_MAX
)
3439 if (len
> CSIO_EVT_MSG_SIZE
)
3442 spin_lock_irqsave(&hw
->lock
, flags
);
3443 if (hw
->flags
& CSIO_HWF_FWEVT_STOP
) {
3448 if (list_empty(&hw
->evt_free_q
)) {
3449 csio_err(hw
, "Failed to alloc evt entry, msg type %d len %d\n",
3455 evt_entry
= list_first_entry(&hw
->evt_free_q
,
3456 struct csio_evt_msg
, list
);
3457 list_del_init(&evt_entry
->list
);
3459 /* copy event msg and queue the event */
3460 evt_entry
->type
= type
;
3462 /* If Payload in SG list*/
3464 fl_sg
= (struct csio_fl_dma_buf
*) evt_msg
;
3465 for (n
= 0; (n
< CSIO_MAX_FLBUF_PER_IQWR
&& off
< len
); n
++) {
3466 memcpy((void *)((uintptr_t)evt_entry
->data
+ off
),
3467 fl_sg
->flbufs
[n
].vaddr
,
3468 fl_sg
->flbufs
[n
].len
);
3469 off
+= fl_sg
->flbufs
[n
].len
;
3472 memcpy((void *)evt_entry
->data
, evt_msg
, len
);
3474 list_add_tail(&evt_entry
->list
, &hw
->evt_active_q
);
3475 CSIO_DEC_STATS(hw
, n_evt_freeq
);
3476 CSIO_INC_STATS(hw
, n_evt_activeq
);
3478 spin_unlock_irqrestore(&hw
->lock
, flags
);
3483 csio_free_evt(struct csio_hw
*hw
, struct csio_evt_msg
*evt_entry
)
3486 spin_lock_irq(&hw
->lock
);
3487 list_del_init(&evt_entry
->list
);
3488 list_add_tail(&evt_entry
->list
, &hw
->evt_free_q
);
3489 CSIO_DEC_STATS(hw
, n_evt_activeq
);
3490 CSIO_INC_STATS(hw
, n_evt_freeq
);
3491 spin_unlock_irq(&hw
->lock
);
3496 csio_evtq_flush(struct csio_hw
*hw
)
3500 while (hw
->flags
& CSIO_HWF_FWEVT_PENDING
&& count
--) {
3501 spin_unlock_irq(&hw
->lock
);
3503 spin_lock_irq(&hw
->lock
);
3506 CSIO_DB_ASSERT(!(hw
->flags
& CSIO_HWF_FWEVT_PENDING
));
3510 csio_evtq_stop(struct csio_hw
*hw
)
3512 hw
->flags
|= CSIO_HWF_FWEVT_STOP
;
3516 csio_evtq_start(struct csio_hw
*hw
)
3518 hw
->flags
&= ~CSIO_HWF_FWEVT_STOP
;
3522 csio_evtq_cleanup(struct csio_hw
*hw
)
3524 struct list_head
*evt_entry
, *next_entry
;
3526 /* Release outstanding events from activeq to freeq*/
3527 if (!list_empty(&hw
->evt_active_q
))
3528 list_splice_tail_init(&hw
->evt_active_q
, &hw
->evt_free_q
);
3530 hw
->stats
.n_evt_activeq
= 0;
3531 hw
->flags
&= ~CSIO_HWF_FWEVT_PENDING
;
3533 /* Freeup event entry */
3534 list_for_each_safe(evt_entry
, next_entry
, &hw
->evt_free_q
) {
3536 CSIO_DEC_STATS(hw
, n_evt_freeq
);
3539 hw
->stats
.n_evt_freeq
= 0;
3544 csio_process_fwevtq_entry(struct csio_hw
*hw
, void *wr
, uint32_t len
,
3545 struct csio_fl_dma_buf
*flb
, void *priv
)
3549 uint32_t msg_len
= 0;
3552 op
= ((struct rss_header
*) wr
)->opcode
;
3553 if (op
== CPL_FW6_PLD
) {
3554 CSIO_INC_STATS(hw
, n_cpl_fw6_pld
);
3555 if (!flb
|| !flb
->totlen
) {
3556 CSIO_INC_STATS(hw
, n_cpl_unexp
);
3561 msg_len
= flb
->totlen
;
3563 } else if (op
== CPL_FW6_MSG
|| op
== CPL_FW4_MSG
) {
3565 CSIO_INC_STATS(hw
, n_cpl_fw6_msg
);
3566 /* skip RSS header */
3567 msg
= (void *)((uintptr_t)wr
+ sizeof(__be64
));
3568 msg_len
= (op
== CPL_FW6_MSG
) ? sizeof(struct cpl_fw6_msg
) :
3569 sizeof(struct cpl_fw4_msg
);
3571 csio_warn(hw
, "unexpected CPL %#x on FW event queue\n", op
);
3572 CSIO_INC_STATS(hw
, n_cpl_unexp
);
3577 * Enqueue event to EventQ. Events processing happens
3578 * in Event worker thread context
3580 if (csio_enqueue_evt_lock(hw
, CSIO_EVT_FW
, msg
,
3581 (uint16_t)msg_len
, msg_sg
))
3582 CSIO_INC_STATS(hw
, n_evt_drop
);
3586 csio_evtq_worker(struct work_struct
*work
)
3588 struct csio_hw
*hw
= container_of(work
, struct csio_hw
, evtq_work
);
3589 struct list_head
*evt_entry
, *next_entry
;
3591 struct csio_evt_msg
*evt_msg
;
3592 struct cpl_fw6_msg
*msg
;
3593 struct csio_rnode
*rn
;
3595 uint8_t evtq_stop
= 0;
3597 csio_dbg(hw
, "event worker thread active evts#%d\n",
3598 hw
->stats
.n_evt_activeq
);
3600 spin_lock_irq(&hw
->lock
);
3601 while (!list_empty(&hw
->evt_active_q
)) {
3602 list_splice_tail_init(&hw
->evt_active_q
, &evt_q
);
3603 spin_unlock_irq(&hw
->lock
);
3605 list_for_each_safe(evt_entry
, next_entry
, &evt_q
) {
3606 evt_msg
= (struct csio_evt_msg
*) evt_entry
;
3608 /* Drop events if queue is STOPPED */
3609 spin_lock_irq(&hw
->lock
);
3610 if (hw
->flags
& CSIO_HWF_FWEVT_STOP
)
3612 spin_unlock_irq(&hw
->lock
);
3614 CSIO_INC_STATS(hw
, n_evt_drop
);
3618 switch (evt_msg
->type
) {
3620 msg
= (struct cpl_fw6_msg
*)(evt_msg
->data
);
3622 if ((msg
->opcode
== CPL_FW6_MSG
||
3623 msg
->opcode
== CPL_FW4_MSG
) &&
3625 rv
= csio_mb_fwevt_handler(hw
,
3629 /* Handle any remaining fw events */
3630 csio_fcoe_fwevt_handler(hw
,
3631 msg
->opcode
, msg
->data
);
3632 } else if (msg
->opcode
== CPL_FW6_PLD
) {
3634 csio_fcoe_fwevt_handler(hw
,
3635 msg
->opcode
, msg
->data
);
3638 "Unhandled FW msg op %x type %x\n",
3639 msg
->opcode
, msg
->type
);
3640 CSIO_INC_STATS(hw
, n_evt_drop
);
3645 csio_mberr_worker(hw
);
3648 case CSIO_EVT_DEV_LOSS
:
3649 memcpy(&rn
, evt_msg
->data
, sizeof(rn
));
3650 csio_rnode_devloss_handler(rn
);
3654 csio_warn(hw
, "Unhandled event %x on evtq\n",
3656 CSIO_INC_STATS(hw
, n_evt_unexp
);
3660 csio_free_evt(hw
, evt_msg
);
3663 spin_lock_irq(&hw
->lock
);
3665 hw
->flags
&= ~CSIO_HWF_FWEVT_PENDING
;
3666 spin_unlock_irq(&hw
->lock
);
3670 csio_fwevtq_handler(struct csio_hw
*hw
)
3674 if (csio_q_iqid(hw
, hw
->fwevt_iq_idx
) == CSIO_MAX_QID
) {
3675 CSIO_INC_STATS(hw
, n_int_stray
);
3679 rv
= csio_wr_process_iq_idx(hw
, hw
->fwevt_iq_idx
,
3680 csio_process_fwevtq_entry
, NULL
);
3684 /****************************************************************************
3686 ****************************************************************************/
3688 /* Management module */
3690 * csio_mgmt_req_lookup - Lookup the given IO req exist in Active Q.
3691 * mgmt - mgmt module
3692 * @io_req - io request
3694 * Return - 0:if given IO Req exists in active Q.
3695 * -EINVAL :if lookup fails.
3698 csio_mgmt_req_lookup(struct csio_mgmtm
*mgmtm
, struct csio_ioreq
*io_req
)
3700 struct list_head
*tmp
;
3702 /* Lookup ioreq in the ACTIVEQ */
3703 list_for_each(tmp
, &mgmtm
->active_q
) {
3704 if (io_req
== (struct csio_ioreq
*)tmp
)
3710 #define ECM_MIN_TMO 1000 /* Minimum timeout value for req */
3713 * csio_mgmts_tmo_handler - MGMT IO Timeout handler.
3714 * @data - Event data.
3719 csio_mgmt_tmo_handler(struct timer_list
*t
)
3721 struct csio_mgmtm
*mgmtm
= from_timer(mgmtm
, t
, mgmt_timer
);
3722 struct list_head
*tmp
;
3723 struct csio_ioreq
*io_req
;
3725 csio_dbg(mgmtm
->hw
, "Mgmt timer invoked!\n");
3727 spin_lock_irq(&mgmtm
->hw
->lock
);
3729 list_for_each(tmp
, &mgmtm
->active_q
) {
3730 io_req
= (struct csio_ioreq
*) tmp
;
3731 io_req
->tmo
-= min_t(uint32_t, io_req
->tmo
, ECM_MIN_TMO
);
3734 /* Dequeue the request from retry Q. */
3735 tmp
= csio_list_prev(tmp
);
3736 list_del_init(&io_req
->sm
.sm_list
);
3737 if (io_req
->io_cbfn
) {
3738 /* io_req will be freed by completion handler */
3739 io_req
->wr_status
= -ETIMEDOUT
;
3740 io_req
->io_cbfn(mgmtm
->hw
, io_req
);
3747 /* If retry queue is not empty, re-arm timer */
3748 if (!list_empty(&mgmtm
->active_q
))
3749 mod_timer(&mgmtm
->mgmt_timer
,
3750 jiffies
+ msecs_to_jiffies(ECM_MIN_TMO
));
3751 spin_unlock_irq(&mgmtm
->hw
->lock
);
3755 csio_mgmtm_cleanup(struct csio_mgmtm
*mgmtm
)
3757 struct csio_hw
*hw
= mgmtm
->hw
;
3758 struct csio_ioreq
*io_req
;
3759 struct list_head
*tmp
;
3763 /* Wait for all outstanding req to complete gracefully */
3764 while ((!list_empty(&mgmtm
->active_q
)) && count
--) {
3765 spin_unlock_irq(&hw
->lock
);
3767 spin_lock_irq(&hw
->lock
);
3770 /* release outstanding req from ACTIVEQ */
3771 list_for_each(tmp
, &mgmtm
->active_q
) {
3772 io_req
= (struct csio_ioreq
*) tmp
;
3773 tmp
= csio_list_prev(tmp
);
3774 list_del_init(&io_req
->sm
.sm_list
);
3775 mgmtm
->stats
.n_active
--;
3776 if (io_req
->io_cbfn
) {
3777 /* io_req will be freed by completion handler */
3778 io_req
->wr_status
= -ETIMEDOUT
;
3779 io_req
->io_cbfn(mgmtm
->hw
, io_req
);
3785 * csio_mgmt_init - Mgmt module init entry point
3786 * @mgmtsm - mgmt module
3789 * Initialize mgmt timer, resource wait queue, active queue,
3790 * completion q. Allocate Egress and Ingress
3791 * WR queues and save off the queue index returned by the WR
3792 * module for future use. Allocate and save off mgmt reqs in the
3793 * mgmt_req_freelist for future use. Make sure their SM is initialized
3795 * Returns: 0 - on success
3796 * -ENOMEM - on error.
3799 csio_mgmtm_init(struct csio_mgmtm
*mgmtm
, struct csio_hw
*hw
)
3801 timer_setup(&mgmtm
->mgmt_timer
, csio_mgmt_tmo_handler
, 0);
3803 INIT_LIST_HEAD(&mgmtm
->active_q
);
3804 INIT_LIST_HEAD(&mgmtm
->cbfn_q
);
3807 /*mgmtm->iq_idx = hw->fwevt_iq_idx;*/
3813 * csio_mgmtm_exit - MGMT module exit entry point
3814 * @mgmtsm - mgmt module
3816 * This function called during MGMT module uninit.
3817 * Stop timers, free ioreqs allocated.
3822 csio_mgmtm_exit(struct csio_mgmtm
*mgmtm
)
3824 del_timer_sync(&mgmtm
->mgmt_timer
);
3829 * csio_hw_start - Kicks off the HW State machine
3830 * @hw: Pointer to HW module.
3832 * It is assumed that the initialization is a synchronous operation.
3833 * So when we return afer posting the event, the HW SM should be in
3834 * the ready state, if there were no errors during init.
3837 csio_hw_start(struct csio_hw
*hw
)
3839 spin_lock_irq(&hw
->lock
);
3840 csio_post_event(&hw
->sm
, CSIO_HWE_CFG
);
3841 spin_unlock_irq(&hw
->lock
);
3843 if (csio_is_hw_ready(hw
))
3845 else if (csio_match_state(hw
, csio_hws_uninit
))
3852 csio_hw_stop(struct csio_hw
*hw
)
3854 csio_post_event(&hw
->sm
, CSIO_HWE_PCI_REMOVE
);
3856 if (csio_is_hw_removing(hw
))
3862 /* Max reset retries */
3863 #define CSIO_MAX_RESET_RETRIES 3
3866 * csio_hw_reset - Reset the hardware
3869 * Caller should hold lock across this function.
3872 csio_hw_reset(struct csio_hw
*hw
)
3874 if (!csio_is_hw_master(hw
))
3877 if (hw
->rst_retries
>= CSIO_MAX_RESET_RETRIES
) {
3878 csio_dbg(hw
, "Max hw reset attempts reached..");
3883 csio_post_event(&hw
->sm
, CSIO_HWE_HBA_RESET
);
3885 if (csio_is_hw_ready(hw
)) {
3886 hw
->rst_retries
= 0;
3887 hw
->stats
.n_reset_start
= jiffies_to_msecs(jiffies
);
3894 * csio_hw_get_device_id - Caches the Adapter's vendor & device id.
3898 csio_hw_get_device_id(struct csio_hw
*hw
)
3900 /* Is the adapter device id cached already ?*/
3901 if (csio_is_dev_id_cached(hw
))
3904 /* Get the PCI vendor & device id */
3905 pci_read_config_word(hw
->pdev
, PCI_VENDOR_ID
,
3906 &hw
->params
.pci
.vendor_id
);
3907 pci_read_config_word(hw
->pdev
, PCI_DEVICE_ID
,
3908 &hw
->params
.pci
.device_id
);
3910 csio_dev_id_cached(hw
);
3911 hw
->chip_id
= (hw
->params
.pci
.device_id
& CSIO_HW_CHIP_MASK
);
3913 } /* csio_hw_get_device_id */
3916 * csio_hw_set_description - Set the model, description of the hw.
3918 * @ven_id: PCI Vendor ID
3919 * @dev_id: PCI Device ID
3922 csio_hw_set_description(struct csio_hw
*hw
, uint16_t ven_id
, uint16_t dev_id
)
3924 uint32_t adap_type
, prot_type
;
3926 if (ven_id
== CSIO_VENDOR_ID
) {
3927 prot_type
= (dev_id
& CSIO_ASIC_DEVID_PROTO_MASK
);
3928 adap_type
= (dev_id
& CSIO_ASIC_DEVID_TYPE_MASK
);
3930 if (prot_type
== CSIO_T5_FCOE_ASIC
) {
3932 csio_t5_fcoe_adapters
[adap_type
].model_no
, 16);
3933 memcpy(hw
->model_desc
,
3934 csio_t5_fcoe_adapters
[adap_type
].description
,
3937 char tempName
[32] = "Chelsio FCoE Controller";
3938 memcpy(hw
->model_desc
, tempName
, 32);
3941 } /* csio_hw_set_description */
3944 * csio_hw_init - Initialize HW module.
3945 * @hw: Pointer to HW module.
3947 * Initialize the members of the HW module.
3950 csio_hw_init(struct csio_hw
*hw
)
3954 uint16_t ven_id
, dev_id
;
3955 struct csio_evt_msg
*evt_entry
;
3957 INIT_LIST_HEAD(&hw
->sm
.sm_list
);
3958 csio_init_state(&hw
->sm
, csio_hws_uninit
);
3959 spin_lock_init(&hw
->lock
);
3960 INIT_LIST_HEAD(&hw
->sln_head
);
3962 /* Get the PCI vendor & device id */
3963 csio_hw_get_device_id(hw
);
3965 strcpy(hw
->name
, CSIO_HW_NAME
);
3967 /* Initialize the HW chip ops T5 specific ops */
3968 hw
->chip_ops
= &t5_ops
;
3970 /* Set the model & its description */
3972 ven_id
= hw
->params
.pci
.vendor_id
;
3973 dev_id
= hw
->params
.pci
.device_id
;
3975 csio_hw_set_description(hw
, ven_id
, dev_id
);
3977 /* Initialize default log level */
3978 hw
->params
.log_level
= (uint32_t) csio_dbg_level
;
3980 csio_set_fwevt_intr_idx(hw
, -1);
3981 csio_set_nondata_intr_idx(hw
, -1);
3983 /* Init all the modules: Mailbox, WorkRequest and Transport */
3984 if (csio_mbm_init(csio_hw_to_mbm(hw
), hw
, csio_hw_mb_timer
))
3987 rv
= csio_wrm_init(csio_hw_to_wrm(hw
), hw
);
3991 rv
= csio_scsim_init(csio_hw_to_scsim(hw
), hw
);
3995 rv
= csio_mgmtm_init(csio_hw_to_mgmtm(hw
), hw
);
3997 goto err_scsim_exit
;
3998 /* Pre-allocate evtq and initialize them */
3999 INIT_LIST_HEAD(&hw
->evt_active_q
);
4000 INIT_LIST_HEAD(&hw
->evt_free_q
);
4001 for (i
= 0; i
< csio_evtq_sz
; i
++) {
4003 evt_entry
= kzalloc(sizeof(struct csio_evt_msg
), GFP_KERNEL
);
4006 csio_err(hw
, "Failed to initialize eventq");
4007 goto err_evtq_cleanup
;
4010 list_add_tail(&evt_entry
->list
, &hw
->evt_free_q
);
4011 CSIO_INC_STATS(hw
, n_evt_freeq
);
4014 hw
->dev_num
= dev_num
;
4020 csio_evtq_cleanup(hw
);
4021 csio_mgmtm_exit(csio_hw_to_mgmtm(hw
));
4023 csio_scsim_exit(csio_hw_to_scsim(hw
));
4025 csio_wrm_exit(csio_hw_to_wrm(hw
), hw
);
4027 csio_mbm_exit(csio_hw_to_mbm(hw
));
4033 * csio_hw_exit - Un-initialize HW module.
4034 * @hw: Pointer to HW module.
4038 csio_hw_exit(struct csio_hw
*hw
)
4040 csio_evtq_cleanup(hw
);
4041 csio_mgmtm_exit(csio_hw_to_mgmtm(hw
));
4042 csio_scsim_exit(csio_hw_to_scsim(hw
));
4043 csio_wrm_exit(csio_hw_to_wrm(hw
), hw
);
4044 csio_mbm_exit(csio_hw_to_mbm(hw
));