2 * This file is part of the Chelsio FCoE driver for Linux.
4 * Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 #include <linux/pci.h>
36 #include <linux/pci_regs.h>
37 #include <linux/firmware.h>
38 #include <linux/stddef.h>
39 #include <linux/delay.h>
40 #include <linux/string.h>
41 #include <linux/compiler.h>
42 #include <linux/jiffies.h>
43 #include <linux/kernel.h>
44 #include <linux/log2.h>
47 #include "csio_lnode.h"
48 #include "csio_rnode.h"
50 int csio_dbg_level
= 0xFEFF;
51 unsigned int csio_port_mask
= 0xf;
53 /* Default FW event queue entries. */
54 static uint32_t csio_evtq_sz
= CSIO_EVTQ_SIZE
;
56 /* Default MSI param level */
59 /* FCoE function instances */
62 /* FCoE Adapter types & its description */
63 static const struct csio_adap_desc csio_t5_fcoe_adapters
[] = {
64 {"T580-Dbg 10G", "Chelsio T580-Dbg 10G [FCoE]"},
65 {"T520-CR 10G", "Chelsio T520-CR 10G [FCoE]"},
66 {"T522-CR 10G/1G", "Chelsio T522-CR 10G/1G [FCoE]"},
67 {"T540-CR 10G", "Chelsio T540-CR 10G [FCoE]"},
68 {"T520-BCH 10G", "Chelsio T520-BCH 10G [FCoE]"},
69 {"T540-BCH 10G", "Chelsio T540-BCH 10G [FCoE]"},
70 {"T540-CH 10G", "Chelsio T540-CH 10G [FCoE]"},
71 {"T520-SO 10G", "Chelsio T520-SO 10G [FCoE]"},
72 {"T520-CX4 10G", "Chelsio T520-CX4 10G [FCoE]"},
73 {"T520-BT 10G", "Chelsio T520-BT 10G [FCoE]"},
74 {"T504-BT 1G", "Chelsio T504-BT 1G [FCoE]"},
75 {"B520-SR 10G", "Chelsio B520-SR 10G [FCoE]"},
76 {"B504-BT 1G", "Chelsio B504-BT 1G [FCoE]"},
77 {"T580-CR 10G", "Chelsio T580-CR 10G [FCoE]"},
78 {"T540-LP-CR 10G", "Chelsio T540-LP-CR 10G [FCoE]"},
79 {"AMSTERDAM 10G", "Chelsio AMSTERDAM 10G [FCoE]"},
80 {"T580-LP-CR 40G", "Chelsio T580-LP-CR 40G [FCoE]"},
81 {"T520-LL-CR 10G", "Chelsio T520-LL-CR 10G [FCoE]"},
82 {"T560-CR 40G", "Chelsio T560-CR 40G [FCoE]"},
83 {"T580-CR 40G", "Chelsio T580-CR 40G [FCoE]"},
84 {"T580-SO 40G", "Chelsio T580-SO 40G [FCoE]"},
85 {"T502-BT 1G", "Chelsio T502-BT 1G [FCoE]"}
88 static void csio_mgmtm_cleanup(struct csio_mgmtm
*);
89 static void csio_hw_mbm_cleanup(struct csio_hw
*);
91 /* State machine forward declarations */
92 static void csio_hws_uninit(struct csio_hw
*, enum csio_hw_ev
);
93 static void csio_hws_configuring(struct csio_hw
*, enum csio_hw_ev
);
94 static void csio_hws_initializing(struct csio_hw
*, enum csio_hw_ev
);
95 static void csio_hws_ready(struct csio_hw
*, enum csio_hw_ev
);
96 static void csio_hws_quiescing(struct csio_hw
*, enum csio_hw_ev
);
97 static void csio_hws_quiesced(struct csio_hw
*, enum csio_hw_ev
);
98 static void csio_hws_resetting(struct csio_hw
*, enum csio_hw_ev
);
99 static void csio_hws_removing(struct csio_hw
*, enum csio_hw_ev
);
100 static void csio_hws_pcierr(struct csio_hw
*, enum csio_hw_ev
);
102 static void csio_hw_initialize(struct csio_hw
*hw
);
103 static void csio_evtq_stop(struct csio_hw
*hw
);
104 static void csio_evtq_start(struct csio_hw
*hw
);
106 int csio_is_hw_ready(struct csio_hw
*hw
)
108 return csio_match_state(hw
, csio_hws_ready
);
111 int csio_is_hw_removing(struct csio_hw
*hw
)
113 return csio_match_state(hw
, csio_hws_removing
);
118 * csio_hw_wait_op_done_val - wait until an operation is completed
120 * @reg: the register to check for completion
121 * @mask: a single-bit field within @reg that indicates completion
122 * @polarity: the value of the field when the operation is completed
123 * @attempts: number of check iterations
124 * @delay: delay in usecs between iterations
125 * @valp: where to store the value of the register at completion time
127 * Wait until an operation is completed by checking a bit in a register
128 * up to @attempts times. If @valp is not NULL the value of the register
129 * at the time it indicated completion is stored there. Returns 0 if the
130 * operation completes and -EAGAIN otherwise.
133 csio_hw_wait_op_done_val(struct csio_hw
*hw
, int reg
, uint32_t mask
,
134 int polarity
, int attempts
, int delay
, uint32_t *valp
)
138 val
= csio_rd_reg32(hw
, reg
);
140 if (!!(val
& mask
) == polarity
) {
154 * csio_hw_tp_wr_bits_indirect - set/clear bits in an indirect TP register
156 * @addr: the indirect TP register address
157 * @mask: specifies the field within the register to modify
158 * @val: new value for the field
160 * Sets a field of an indirect TP register to the given value.
163 csio_hw_tp_wr_bits_indirect(struct csio_hw
*hw
, unsigned int addr
,
164 unsigned int mask
, unsigned int val
)
166 csio_wr_reg32(hw
, addr
, TP_PIO_ADDR_A
);
167 val
|= csio_rd_reg32(hw
, TP_PIO_DATA_A
) & ~mask
;
168 csio_wr_reg32(hw
, val
, TP_PIO_DATA_A
);
172 csio_set_reg_field(struct csio_hw
*hw
, uint32_t reg
, uint32_t mask
,
175 uint32_t val
= csio_rd_reg32(hw
, reg
) & ~mask
;
177 csio_wr_reg32(hw
, val
| value
, reg
);
179 csio_rd_reg32(hw
, reg
);
184 csio_memory_write(struct csio_hw
*hw
, int mtype
, u32 addr
, u32 len
, u32
*buf
)
186 return hw
->chip_ops
->chip_memory_rw(hw
, MEMWIN_CSIOSTOR
, mtype
,
191 * EEPROM reads take a few tens of us while writes can take a bit over 5 ms.
193 #define EEPROM_MAX_RD_POLL 40
194 #define EEPROM_MAX_WR_POLL 6
195 #define EEPROM_STAT_ADDR 0x7bfc
196 #define VPD_BASE 0x400
197 #define VPD_BASE_OLD 0
199 #define VPD_INFO_FLD_HDR_SIZE 3
202 * csio_hw_seeprom_read - read a serial EEPROM location
204 * @addr: EEPROM virtual address
205 * @data: where to store the read data
207 * Read a 32-bit word from a location in serial EEPROM using the card's PCI
208 * VPD capability. Note that this function must be called with a virtual
212 csio_hw_seeprom_read(struct csio_hw
*hw
, uint32_t addr
, uint32_t *data
)
215 int attempts
= EEPROM_MAX_RD_POLL
;
216 uint32_t base
= hw
->params
.pci
.vpd_cap_addr
;
218 if (addr
>= EEPROMVSIZE
|| (addr
& 3))
221 pci_write_config_word(hw
->pdev
, base
+ PCI_VPD_ADDR
, (uint16_t)addr
);
225 pci_read_config_word(hw
->pdev
, base
+ PCI_VPD_ADDR
, &val
);
226 } while (!(val
& PCI_VPD_ADDR_F
) && --attempts
);
228 if (!(val
& PCI_VPD_ADDR_F
)) {
229 csio_err(hw
, "reading EEPROM address 0x%x failed\n", addr
);
233 pci_read_config_dword(hw
->pdev
, base
+ PCI_VPD_DATA
, data
);
234 *data
= le32_to_cpu(*(__le32
*)data
);
240 * Partial EEPROM Vital Product Data structure. Includes only the ID and
252 * csio_hw_get_vpd_keyword_val - Locates an information field keyword in
254 * @v: Pointer to buffered vpd data structure
255 * @kw: The keyword to search for
257 * Returns the value of the information field keyword or
261 csio_hw_get_vpd_keyword_val(const struct t4_vpd_hdr
*v
, const char *kw
)
264 int32_t offset
, len
;
265 const uint8_t *buf
= &v
->id_tag
;
266 const uint8_t *vpdr_len
= &v
->vpdr_tag
;
267 offset
= sizeof(struct t4_vpd_hdr
);
268 len
= (uint16_t)vpdr_len
[1] + ((uint16_t)vpdr_len
[2] << 8);
270 if (len
+ sizeof(struct t4_vpd_hdr
) > VPD_LEN
)
273 for (i
= offset
; (i
+ VPD_INFO_FLD_HDR_SIZE
) <= (offset
+ len
);) {
274 if (memcmp(buf
+ i
, kw
, 2) == 0) {
275 i
+= VPD_INFO_FLD_HDR_SIZE
;
279 i
+= VPD_INFO_FLD_HDR_SIZE
+ buf
[i
+2];
286 csio_pci_capability(struct pci_dev
*pdev
, int cap
, int *pos
)
288 *pos
= pci_find_capability(pdev
, cap
);
296 * csio_hw_get_vpd_params - read VPD parameters from VPD EEPROM
298 * @p: where to store the parameters
300 * Reads card parameters stored in VPD EEPROM.
303 csio_hw_get_vpd_params(struct csio_hw
*hw
, struct csio_vpd
*p
)
305 int i
, ret
, ec
, sn
, addr
;
307 const struct t4_vpd_hdr
*v
;
308 /* To get around compilation warning from strstrip */
311 if (csio_is_valid_vpd(hw
))
314 ret
= csio_pci_capability(hw
->pdev
, PCI_CAP_ID_VPD
,
315 &hw
->params
.pci
.vpd_cap_addr
);
319 vpd
= kzalloc(VPD_LEN
, GFP_ATOMIC
);
324 * Card information normally starts at VPD_BASE but early cards had
327 ret
= csio_hw_seeprom_read(hw
, VPD_BASE
, (uint32_t *)(vpd
));
328 addr
= *vpd
== 0x82 ? VPD_BASE
: VPD_BASE_OLD
;
330 for (i
= 0; i
< VPD_LEN
; i
+= 4) {
331 ret
= csio_hw_seeprom_read(hw
, addr
+ i
, (uint32_t *)(vpd
+ i
));
338 /* Reset the VPD flag! */
339 hw
->flags
&= (~CSIO_HWF_VPD_VALID
);
341 v
= (const struct t4_vpd_hdr
*)vpd
;
343 #define FIND_VPD_KW(var, name) do { \
344 var = csio_hw_get_vpd_keyword_val(v, name); \
346 csio_err(hw, "missing VPD keyword " name "\n"); \
352 FIND_VPD_KW(i
, "RV");
353 for (csum
= 0; i
>= 0; i
--)
357 csio_err(hw
, "corrupted VPD EEPROM, actual csum %u\n", csum
);
361 FIND_VPD_KW(ec
, "EC");
362 FIND_VPD_KW(sn
, "SN");
365 memcpy(p
->id
, v
->id_data
, ID_LEN
);
367 memcpy(p
->ec
, vpd
+ ec
, EC_LEN
);
369 i
= vpd
[sn
- VPD_INFO_FLD_HDR_SIZE
+ 2];
370 memcpy(p
->sn
, vpd
+ sn
, min(i
, SERNUM_LEN
));
373 csio_valid_vpd_copied(hw
);
380 * csio_hw_sf1_read - read data from the serial flash
382 * @byte_cnt: number of bytes to read
383 * @cont: whether another operation will be chained
384 * @lock: whether to lock SF for PL access only
385 * @valp: where to store the read data
387 * Reads up to 4 bytes of data from the serial flash. The location of
388 * the read needs to be specified prior to calling this by issuing the
389 * appropriate commands to the serial flash.
392 csio_hw_sf1_read(struct csio_hw
*hw
, uint32_t byte_cnt
, int32_t cont
,
393 int32_t lock
, uint32_t *valp
)
397 if (!byte_cnt
|| byte_cnt
> 4)
399 if (csio_rd_reg32(hw
, SF_OP_A
) & SF_BUSY_F
)
402 csio_wr_reg32(hw
, SF_LOCK_V(lock
) | SF_CONT_V(cont
) |
403 BYTECNT_V(byte_cnt
- 1), SF_OP_A
);
404 ret
= csio_hw_wait_op_done_val(hw
, SF_OP_A
, SF_BUSY_F
, 0, SF_ATTEMPTS
,
407 *valp
= csio_rd_reg32(hw
, SF_DATA_A
);
412 * csio_hw_sf1_write - write data to the serial flash
414 * @byte_cnt: number of bytes to write
415 * @cont: whether another operation will be chained
416 * @lock: whether to lock SF for PL access only
417 * @val: value to write
419 * Writes up to 4 bytes of data to the serial flash. The location of
420 * the write needs to be specified prior to calling this by issuing the
421 * appropriate commands to the serial flash.
424 csio_hw_sf1_write(struct csio_hw
*hw
, uint32_t byte_cnt
, uint32_t cont
,
425 int32_t lock
, uint32_t val
)
427 if (!byte_cnt
|| byte_cnt
> 4)
429 if (csio_rd_reg32(hw
, SF_OP_A
) & SF_BUSY_F
)
432 csio_wr_reg32(hw
, val
, SF_DATA_A
);
433 csio_wr_reg32(hw
, SF_CONT_V(cont
) | BYTECNT_V(byte_cnt
- 1) |
434 OP_V(1) | SF_LOCK_V(lock
), SF_OP_A
);
436 return csio_hw_wait_op_done_val(hw
, SF_OP_A
, SF_BUSY_F
, 0, SF_ATTEMPTS
,
441 * csio_hw_flash_wait_op - wait for a flash operation to complete
443 * @attempts: max number of polls of the status register
444 * @delay: delay between polls in ms
446 * Wait for a flash operation to complete by polling the status register.
449 csio_hw_flash_wait_op(struct csio_hw
*hw
, int32_t attempts
, int32_t delay
)
455 ret
= csio_hw_sf1_write(hw
, 1, 1, 1, SF_RD_STATUS
);
459 ret
= csio_hw_sf1_read(hw
, 1, 0, 1, &status
);
473 * csio_hw_read_flash - read words from serial flash
475 * @addr: the start address for the read
476 * @nwords: how many 32-bit words to read
477 * @data: where to store the read data
478 * @byte_oriented: whether to store data as bytes or as words
480 * Read the specified number of 32-bit words from the serial flash.
481 * If @byte_oriented is set the read data is stored as a byte array
482 * (i.e., big-endian), otherwise as 32-bit words in the platform's
486 csio_hw_read_flash(struct csio_hw
*hw
, uint32_t addr
, uint32_t nwords
,
487 uint32_t *data
, int32_t byte_oriented
)
491 if (addr
+ nwords
* sizeof(uint32_t) > hw
->params
.sf_size
|| (addr
& 3))
494 addr
= swab32(addr
) | SF_RD_DATA_FAST
;
496 ret
= csio_hw_sf1_write(hw
, 4, 1, 0, addr
);
500 ret
= csio_hw_sf1_read(hw
, 1, 1, 0, data
);
504 for ( ; nwords
; nwords
--, data
++) {
505 ret
= csio_hw_sf1_read(hw
, 4, nwords
> 1, nwords
== 1, data
);
507 csio_wr_reg32(hw
, 0, SF_OP_A
); /* unlock SF */
511 *data
= (__force __u32
) htonl(*data
);
517 * csio_hw_write_flash - write up to a page of data to the serial flash
519 * @addr: the start address to write
520 * @n: length of data to write in bytes
521 * @data: the data to write
523 * Writes up to a page of data (256 bytes) to the serial flash starting
524 * at the given address. All the data must be written to the same page.
527 csio_hw_write_flash(struct csio_hw
*hw
, uint32_t addr
,
528 uint32_t n
, const uint8_t *data
)
532 uint32_t i
, c
, left
, val
, offset
= addr
& 0xff;
534 if (addr
>= hw
->params
.sf_size
|| offset
+ n
> SF_PAGE_SIZE
)
537 val
= swab32(addr
) | SF_PROG_PAGE
;
539 ret
= csio_hw_sf1_write(hw
, 1, 0, 1, SF_WR_ENABLE
);
543 ret
= csio_hw_sf1_write(hw
, 4, 1, 1, val
);
547 for (left
= n
; left
; left
-= c
) {
549 for (val
= 0, i
= 0; i
< c
; ++i
)
550 val
= (val
<< 8) + *data
++;
552 ret
= csio_hw_sf1_write(hw
, c
, c
!= left
, 1, val
);
556 ret
= csio_hw_flash_wait_op(hw
, 8, 1);
560 csio_wr_reg32(hw
, 0, SF_OP_A
); /* unlock SF */
562 /* Read the page to verify the write succeeded */
563 ret
= csio_hw_read_flash(hw
, addr
& ~0xff, ARRAY_SIZE(buf
), buf
, 1);
567 if (memcmp(data
- n
, (uint8_t *)buf
+ offset
, n
)) {
569 "failed to correctly write the flash page at %#x\n",
577 csio_wr_reg32(hw
, 0, SF_OP_A
); /* unlock SF */
582 * csio_hw_flash_erase_sectors - erase a range of flash sectors
584 * @start: the first sector to erase
585 * @end: the last sector to erase
587 * Erases the sectors in the given inclusive range.
590 csio_hw_flash_erase_sectors(struct csio_hw
*hw
, int32_t start
, int32_t end
)
594 while (start
<= end
) {
596 ret
= csio_hw_sf1_write(hw
, 1, 0, 1, SF_WR_ENABLE
);
600 ret
= csio_hw_sf1_write(hw
, 4, 0, 1,
601 SF_ERASE_SECTOR
| (start
<< 8));
605 ret
= csio_hw_flash_wait_op(hw
, 14, 500);
613 csio_err(hw
, "erase of flash sector %d failed, error %d\n",
615 csio_wr_reg32(hw
, 0, SF_OP_A
); /* unlock SF */
620 csio_hw_print_fw_version(struct csio_hw
*hw
, char *str
)
622 csio_info(hw
, "%s: %u.%u.%u.%u\n", str
,
623 FW_HDR_FW_VER_MAJOR_G(hw
->fwrev
),
624 FW_HDR_FW_VER_MINOR_G(hw
->fwrev
),
625 FW_HDR_FW_VER_MICRO_G(hw
->fwrev
),
626 FW_HDR_FW_VER_BUILD_G(hw
->fwrev
));
630 * csio_hw_get_fw_version - read the firmware version
632 * @vers: where to place the version
634 * Reads the FW version from flash.
637 csio_hw_get_fw_version(struct csio_hw
*hw
, uint32_t *vers
)
639 return csio_hw_read_flash(hw
, FLASH_FW_START
+
640 offsetof(struct fw_hdr
, fw_ver
), 1,
645 * csio_hw_get_tp_version - read the TP microcode version
647 * @vers: where to place the version
649 * Reads the TP microcode version from flash.
652 csio_hw_get_tp_version(struct csio_hw
*hw
, u32
*vers
)
654 return csio_hw_read_flash(hw
, FLASH_FW_START
+
655 offsetof(struct fw_hdr
, tp_microcode_ver
), 1,
660 * csio_hw_fw_dload - download firmware.
662 * @fw_data: firmware image to write.
665 * Write the supplied firmware image to the card's serial flash.
668 csio_hw_fw_dload(struct csio_hw
*hw
, uint8_t *fw_data
, uint32_t size
)
674 uint8_t first_page
[SF_PAGE_SIZE
];
675 const __be32
*p
= (const __be32
*)fw_data
;
676 struct fw_hdr
*hdr
= (struct fw_hdr
*)fw_data
;
677 uint32_t sf_sec_size
;
679 if ((!hw
->params
.sf_size
) || (!hw
->params
.sf_nsec
)) {
680 csio_err(hw
, "Serial Flash data invalid\n");
685 csio_err(hw
, "FW image has no data\n");
690 csio_err(hw
, "FW image size not multiple of 512 bytes\n");
694 if (ntohs(hdr
->len512
) * 512 != size
) {
695 csio_err(hw
, "FW image size differs from size in FW header\n");
699 if (size
> FLASH_FW_MAX_SIZE
) {
700 csio_err(hw
, "FW image too large, max is %u bytes\n",
705 for (csum
= 0, i
= 0; i
< size
/ sizeof(csum
); i
++)
708 if (csum
!= 0xffffffff) {
709 csio_err(hw
, "corrupted firmware image, checksum %#x\n", csum
);
713 sf_sec_size
= hw
->params
.sf_size
/ hw
->params
.sf_nsec
;
714 i
= DIV_ROUND_UP(size
, sf_sec_size
); /* # of sectors spanned */
716 csio_dbg(hw
, "Erasing sectors... start:%d end:%d\n",
717 FLASH_FW_START_SEC
, FLASH_FW_START_SEC
+ i
- 1);
719 ret
= csio_hw_flash_erase_sectors(hw
, FLASH_FW_START_SEC
,
720 FLASH_FW_START_SEC
+ i
- 1);
722 csio_err(hw
, "Flash Erase failed\n");
727 * We write the correct version at the end so the driver can see a bad
728 * version if the FW write fails. Start by writing a copy of the
729 * first page with a bad version.
731 memcpy(first_page
, fw_data
, SF_PAGE_SIZE
);
732 ((struct fw_hdr
*)first_page
)->fw_ver
= htonl(0xffffffff);
733 ret
= csio_hw_write_flash(hw
, FLASH_FW_START
, SF_PAGE_SIZE
, first_page
);
737 csio_dbg(hw
, "Writing Flash .. start:%d end:%d\n",
738 FW_IMG_START
, FW_IMG_START
+ size
);
740 addr
= FLASH_FW_START
;
741 for (size
-= SF_PAGE_SIZE
; size
; size
-= SF_PAGE_SIZE
) {
742 addr
+= SF_PAGE_SIZE
;
743 fw_data
+= SF_PAGE_SIZE
;
744 ret
= csio_hw_write_flash(hw
, addr
, SF_PAGE_SIZE
, fw_data
);
749 ret
= csio_hw_write_flash(hw
,
751 offsetof(struct fw_hdr
, fw_ver
),
753 (const uint8_t *)&hdr
->fw_ver
);
757 csio_err(hw
, "firmware download failed, error %d\n", ret
);
762 csio_hw_get_flash_params(struct csio_hw
*hw
)
767 ret
= csio_hw_sf1_write(hw
, 1, 1, 0, SF_RD_ID
);
769 ret
= csio_hw_sf1_read(hw
, 3, 0, 1, &info
);
770 csio_wr_reg32(hw
, 0, SF_OP_A
); /* unlock SF */
774 if ((info
& 0xff) != 0x20) /* not a Numonix flash */
776 info
>>= 16; /* log2 of size */
777 if (info
>= 0x14 && info
< 0x18)
778 hw
->params
.sf_nsec
= 1 << (info
- 16);
779 else if (info
== 0x18)
780 hw
->params
.sf_nsec
= 64;
783 hw
->params
.sf_size
= 1 << info
;
788 /*****************************************************************************/
789 /* HW State machine assists */
790 /*****************************************************************************/
793 csio_hw_dev_ready(struct csio_hw
*hw
)
798 while (((reg
= csio_rd_reg32(hw
, PL_WHOAMI_A
)) == 0xFFFFFFFF) &&
802 if ((cnt
== 0) && (((int32_t)(SOURCEPF_G(reg
)) < 0) ||
803 (SOURCEPF_G(reg
) >= CSIO_MAX_PFN
))) {
804 csio_err(hw
, "PL_WHOAMI returned 0x%x, cnt:%d\n", reg
, cnt
);
808 hw
->pfn
= SOURCEPF_G(reg
);
814 * csio_do_hello - Perform the HELLO FW Mailbox command and process response.
816 * @state: Device state
818 * FW_HELLO_CMD has to be polled for completion.
821 csio_do_hello(struct csio_hw
*hw
, enum csio_dev_state
*state
)
825 enum fw_retval retval
;
828 int retries
= FW_CMD_HELLO_RETRIES
;
830 memset(state_str
, 0, sizeof(state_str
));
832 mbp
= mempool_alloc(hw
->mb_mempool
, GFP_ATOMIC
);
835 CSIO_INC_STATS(hw
, n_err_nomem
);
840 csio_mb_hello(hw
, mbp
, CSIO_MB_DEFAULT_TMO
, hw
->pfn
,
841 hw
->pfn
, CSIO_MASTER_MAY
, NULL
);
843 rv
= csio_mb_issue(hw
, mbp
);
845 csio_err(hw
, "failed to issue HELLO cmd. ret:%d.\n", rv
);
849 csio_mb_process_hello_rsp(hw
, mbp
, &retval
, state
, &mpfn
);
850 if (retval
!= FW_SUCCESS
) {
851 csio_err(hw
, "HELLO cmd failed with ret: %d\n", retval
);
856 /* Firmware has designated us to be master */
857 if (hw
->pfn
== mpfn
) {
858 hw
->flags
|= CSIO_HWF_MASTER
;
859 } else if (*state
== CSIO_DEV_STATE_UNINIT
) {
861 * If we're not the Master PF then we need to wait around for
862 * the Master PF Driver to finish setting up the adapter.
864 * Note that we also do this wait if we're a non-Master-capable
865 * PF and there is no current Master PF; a Master PF may show up
866 * momentarily and we wouldn't want to fail pointlessly. (This
867 * can happen when an OS loads lots of different drivers rapidly
868 * at the same time). In this case, the Master PF returned by
869 * the firmware will be PCIE_FW_MASTER_MASK so the test below
873 int waiting
= FW_CMD_HELLO_TIMEOUT
;
876 * Wait for the firmware to either indicate an error or
877 * initialized state. If we see either of these we bail out
878 * and report the issue to the caller. If we exhaust the
879 * "hello timeout" and we haven't exhausted our retries, try
880 * again. Otherwise bail with a timeout error.
885 spin_unlock_irq(&hw
->lock
);
887 spin_lock_irq(&hw
->lock
);
891 * If neither Error nor Initialialized are indicated
892 * by the firmware keep waiting till we exaust our
893 * timeout ... and then retry if we haven't exhausted
896 pcie_fw
= csio_rd_reg32(hw
, PCIE_FW_A
);
897 if (!(pcie_fw
& (PCIE_FW_ERR_F
|PCIE_FW_INIT_F
))) {
909 * We either have an Error or Initialized condition
910 * report errors preferentially.
913 if (pcie_fw
& PCIE_FW_ERR_F
) {
914 *state
= CSIO_DEV_STATE_ERR
;
916 } else if (pcie_fw
& PCIE_FW_INIT_F
)
917 *state
= CSIO_DEV_STATE_INIT
;
921 * If we arrived before a Master PF was selected and
922 * there's not a valid Master PF, grab its identity
925 if (mpfn
== PCIE_FW_MASTER_M
&&
926 (pcie_fw
& PCIE_FW_MASTER_VLD_F
))
927 mpfn
= PCIE_FW_MASTER_G(pcie_fw
);
930 hw
->flags
&= ~CSIO_HWF_MASTER
;
934 case CSIO_DEV_STATE_UNINIT
:
935 strcpy(state_str
, "Initializing");
937 case CSIO_DEV_STATE_INIT
:
938 strcpy(state_str
, "Initialized");
940 case CSIO_DEV_STATE_ERR
:
941 strcpy(state_str
, "Error");
944 strcpy(state_str
, "Unknown");
949 csio_info(hw
, "PF: %d, Coming up as MASTER, HW state: %s\n",
953 "PF: %d, Coming up as SLAVE, Master PF: %d, HW state: %s\n",
954 hw
->pfn
, mpfn
, state_str
);
957 mempool_free(mbp
, hw
->mb_mempool
);
963 * csio_do_bye - Perform the BYE FW Mailbox command and process response.
968 csio_do_bye(struct csio_hw
*hw
)
971 enum fw_retval retval
;
973 mbp
= mempool_alloc(hw
->mb_mempool
, GFP_ATOMIC
);
975 CSIO_INC_STATS(hw
, n_err_nomem
);
979 csio_mb_bye(hw
, mbp
, CSIO_MB_DEFAULT_TMO
, NULL
);
981 if (csio_mb_issue(hw
, mbp
)) {
982 csio_err(hw
, "Issue of BYE command failed\n");
983 mempool_free(mbp
, hw
->mb_mempool
);
987 retval
= csio_mb_fw_retval(mbp
);
988 if (retval
!= FW_SUCCESS
) {
989 mempool_free(mbp
, hw
->mb_mempool
);
993 mempool_free(mbp
, hw
->mb_mempool
);
999 * csio_do_reset- Perform the device reset.
1003 * If fw_rst is set, issues FW reset mbox cmd otherwise
1005 * Performs reset of the function.
1008 csio_do_reset(struct csio_hw
*hw
, bool fw_rst
)
1010 struct csio_mb
*mbp
;
1011 enum fw_retval retval
;
1015 csio_wr_reg32(hw
, PIORSTMODE_F
| PIORST_F
, PL_RST_A
);
1020 mbp
= mempool_alloc(hw
->mb_mempool
, GFP_ATOMIC
);
1022 CSIO_INC_STATS(hw
, n_err_nomem
);
1026 csio_mb_reset(hw
, mbp
, CSIO_MB_DEFAULT_TMO
,
1027 PIORSTMODE_F
| PIORST_F
, 0, NULL
);
1029 if (csio_mb_issue(hw
, mbp
)) {
1030 csio_err(hw
, "Issue of RESET command failed.n");
1031 mempool_free(mbp
, hw
->mb_mempool
);
1035 retval
= csio_mb_fw_retval(mbp
);
1036 if (retval
!= FW_SUCCESS
) {
1037 csio_err(hw
, "RESET cmd failed with ret:0x%x.\n", retval
);
1038 mempool_free(mbp
, hw
->mb_mempool
);
1042 mempool_free(mbp
, hw
->mb_mempool
);
1048 csio_hw_validate_caps(struct csio_hw
*hw
, struct csio_mb
*mbp
)
1050 struct fw_caps_config_cmd
*rsp
= (struct fw_caps_config_cmd
*)mbp
->mb
;
1053 caps
= ntohs(rsp
->fcoecaps
);
1055 if (!(caps
& FW_CAPS_CONFIG_FCOE_INITIATOR
)) {
1056 csio_err(hw
, "No FCoE Initiator capability in the firmware.\n");
1060 if (!(caps
& FW_CAPS_CONFIG_FCOE_CTRL_OFLD
)) {
1061 csio_err(hw
, "No FCoE Control Offload capability\n");
1069 * csio_hw_fw_halt - issue a reset/halt to FW and put uP into RESET
1070 * @hw: the HW module
1071 * @mbox: mailbox to use for the FW RESET command (if desired)
1072 * @force: force uP into RESET even if FW RESET command fails
1074 * Issues a RESET command to firmware (if desired) with a HALT indication
1075 * and then puts the microprocessor into RESET state. The RESET command
1076 * will only be issued if a legitimate mailbox is provided (mbox <=
1077 * PCIE_FW_MASTER_MASK).
1079 * This is generally used in order for the host to safely manipulate the
1080 * adapter without fear of conflicting with whatever the firmware might
1081 * be doing. The only way out of this state is to RESTART the firmware
1085 csio_hw_fw_halt(struct csio_hw
*hw
, uint32_t mbox
, int32_t force
)
1087 enum fw_retval retval
= 0;
1090 * If a legitimate mailbox is provided, issue a RESET command
1091 * with a HALT indication.
1093 if (mbox
<= PCIE_FW_MASTER_M
) {
1094 struct csio_mb
*mbp
;
1096 mbp
= mempool_alloc(hw
->mb_mempool
, GFP_ATOMIC
);
1098 CSIO_INC_STATS(hw
, n_err_nomem
);
1102 csio_mb_reset(hw
, mbp
, CSIO_MB_DEFAULT_TMO
,
1103 PIORSTMODE_F
| PIORST_F
, FW_RESET_CMD_HALT_F
,
1106 if (csio_mb_issue(hw
, mbp
)) {
1107 csio_err(hw
, "Issue of RESET command failed!\n");
1108 mempool_free(mbp
, hw
->mb_mempool
);
1112 retval
= csio_mb_fw_retval(mbp
);
1113 mempool_free(mbp
, hw
->mb_mempool
);
1117 * Normally we won't complete the operation if the firmware RESET
1118 * command fails but if our caller insists we'll go ahead and put the
1119 * uP into RESET. This can be useful if the firmware is hung or even
1120 * missing ... We'll have to take the risk of putting the uP into
1121 * RESET without the cooperation of firmware in that case.
1123 * We also force the firmware's HALT flag to be on in case we bypassed
1124 * the firmware RESET command above or we're dealing with old firmware
1125 * which doesn't have the HALT capability. This will serve as a flag
1126 * for the incoming firmware to know that it's coming out of a HALT
1127 * rather than a RESET ... if it's new enough to understand that ...
1129 if (retval
== 0 || force
) {
1130 csio_set_reg_field(hw
, CIM_BOOT_CFG_A
, UPCRST_F
, UPCRST_F
);
1131 csio_set_reg_field(hw
, PCIE_FW_A
, PCIE_FW_HALT_F
,
1136 * And we always return the result of the firmware RESET command
1137 * even when we force the uP into RESET ...
1139 return retval
? -EINVAL
: 0;
1143 * csio_hw_fw_restart - restart the firmware by taking the uP out of RESET
1144 * @hw: the HW module
1145 * @reset: if we want to do a RESET to restart things
1147 * Restart firmware previously halted by csio_hw_fw_halt(). On successful
1148 * return the previous PF Master remains as the new PF Master and there
1149 * is no need to issue a new HELLO command, etc.
1151 * We do this in two ways:
1153 * 1. If we're dealing with newer firmware we'll simply want to take
1154 * the chip's microprocessor out of RESET. This will cause the
1155 * firmware to start up from its start vector. And then we'll loop
1156 * until the firmware indicates it's started again (PCIE_FW.HALT
1157 * reset to 0) or we timeout.
1159 * 2. If we're dealing with older firmware then we'll need to RESET
1160 * the chip since older firmware won't recognize the PCIE_FW.HALT
1161 * flag and automatically RESET itself on startup.
1164 csio_hw_fw_restart(struct csio_hw
*hw
, uint32_t mbox
, int32_t reset
)
1168 * Since we're directing the RESET instead of the firmware
1169 * doing it automatically, we need to clear the PCIE_FW.HALT
1172 csio_set_reg_field(hw
, PCIE_FW_A
, PCIE_FW_HALT_F
, 0);
1175 * If we've been given a valid mailbox, first try to get the
1176 * firmware to do the RESET. If that works, great and we can
1177 * return success. Otherwise, if we haven't been given a
1178 * valid mailbox or the RESET command failed, fall back to
1179 * hitting the chip with a hammer.
1181 if (mbox
<= PCIE_FW_MASTER_M
) {
1182 csio_set_reg_field(hw
, CIM_BOOT_CFG_A
, UPCRST_F
, 0);
1184 if (csio_do_reset(hw
, true) == 0)
1188 csio_wr_reg32(hw
, PIORSTMODE_F
| PIORST_F
, PL_RST_A
);
1193 csio_set_reg_field(hw
, CIM_BOOT_CFG_A
, UPCRST_F
, 0);
1194 for (ms
= 0; ms
< FW_CMD_MAX_TIMEOUT
; ) {
1195 if (!(csio_rd_reg32(hw
, PCIE_FW_A
) & PCIE_FW_HALT_F
))
1206 * csio_hw_fw_upgrade - perform all of the steps necessary to upgrade FW
1207 * @hw: the HW module
1208 * @mbox: mailbox to use for the FW RESET command (if desired)
1209 * @fw_data: the firmware image to write
1211 * @force: force upgrade even if firmware doesn't cooperate
1213 * Perform all of the steps necessary for upgrading an adapter's
1214 * firmware image. Normally this requires the cooperation of the
1215 * existing firmware in order to halt all existing activities
1216 * but if an invalid mailbox token is passed in we skip that step
1217 * (though we'll still put the adapter microprocessor into RESET in
1220 * On successful return the new firmware will have been loaded and
1221 * the adapter will have been fully RESET losing all previous setup
1222 * state. On unsuccessful return the adapter may be completely hosed ...
1223 * positive errno indicates that the adapter is ~probably~ intact, a
1224 * negative errno indicates that things are looking bad ...
1227 csio_hw_fw_upgrade(struct csio_hw
*hw
, uint32_t mbox
,
1228 const u8
*fw_data
, uint32_t size
, int32_t force
)
1230 const struct fw_hdr
*fw_hdr
= (const struct fw_hdr
*)fw_data
;
1233 ret
= csio_hw_fw_halt(hw
, mbox
, force
);
1234 if (ret
!= 0 && !force
)
1237 ret
= csio_hw_fw_dload(hw
, (uint8_t *) fw_data
, size
);
1242 * Older versions of the firmware don't understand the new
1243 * PCIE_FW.HALT flag and so won't know to perform a RESET when they
1244 * restart. So for newly loaded older firmware we'll have to do the
1245 * RESET for it so it starts up on a clean slate. We can tell if
1246 * the newly loaded firmware will handle this right by checking
1247 * its header flags to see if it advertises the capability.
1249 reset
= ((ntohl(fw_hdr
->flags
) & FW_HDR_FLAGS_RESET_HALT
) == 0);
1250 return csio_hw_fw_restart(hw
, mbox
, reset
);
1254 * csio_get_device_params - Get device parameters.
1259 csio_get_device_params(struct csio_hw
*hw
)
1261 struct csio_wrm
*wrm
= csio_hw_to_wrm(hw
);
1262 struct csio_mb
*mbp
;
1263 enum fw_retval retval
;
1267 /* Initialize portids to -1 */
1268 for (i
= 0; i
< CSIO_MAX_PPORTS
; i
++)
1269 hw
->pport
[i
].portid
= -1;
1271 mbp
= mempool_alloc(hw
->mb_mempool
, GFP_ATOMIC
);
1273 CSIO_INC_STATS(hw
, n_err_nomem
);
1277 /* Get port vec information. */
1278 param
[0] = FW_PARAM_DEV(PORTVEC
);
1280 /* Get Core clock. */
1281 param
[1] = FW_PARAM_DEV(CCLK
);
1283 /* Get EQ id start and end. */
1284 param
[2] = FW_PARAM_PFVF(EQ_START
);
1285 param
[3] = FW_PARAM_PFVF(EQ_END
);
1287 /* Get IQ id start and end. */
1288 param
[4] = FW_PARAM_PFVF(IQFLINT_START
);
1289 param
[5] = FW_PARAM_PFVF(IQFLINT_END
);
1291 csio_mb_params(hw
, mbp
, CSIO_MB_DEFAULT_TMO
, hw
->pfn
, 0,
1292 ARRAY_SIZE(param
), param
, NULL
, false, NULL
);
1293 if (csio_mb_issue(hw
, mbp
)) {
1294 csio_err(hw
, "Issue of FW_PARAMS_CMD(read) failed!\n");
1295 mempool_free(mbp
, hw
->mb_mempool
);
1299 csio_mb_process_read_params_rsp(hw
, mbp
, &retval
,
1300 ARRAY_SIZE(param
), param
);
1301 if (retval
!= FW_SUCCESS
) {
1302 csio_err(hw
, "FW_PARAMS_CMD(read) failed with ret:0x%x!\n",
1304 mempool_free(mbp
, hw
->mb_mempool
);
1308 /* cache the information. */
1309 hw
->port_vec
= param
[0];
1310 hw
->vpd
.cclk
= param
[1];
1311 wrm
->fw_eq_start
= param
[2];
1312 wrm
->fw_iq_start
= param
[4];
1314 /* Using FW configured max iqs & eqs */
1315 if ((hw
->flags
& CSIO_HWF_USING_SOFT_PARAMS
) ||
1316 !csio_is_hw_master(hw
)) {
1317 hw
->cfg_niq
= param
[5] - param
[4] + 1;
1318 hw
->cfg_neq
= param
[3] - param
[2] + 1;
1319 csio_dbg(hw
, "Using fwconfig max niqs %d neqs %d\n",
1320 hw
->cfg_niq
, hw
->cfg_neq
);
1323 hw
->port_vec
&= csio_port_mask
;
1325 hw
->num_pports
= hweight32(hw
->port_vec
);
1327 csio_dbg(hw
, "Port vector: 0x%x, #ports: %d\n",
1328 hw
->port_vec
, hw
->num_pports
);
1330 for (i
= 0; i
< hw
->num_pports
; i
++) {
1331 while ((hw
->port_vec
& (1 << j
)) == 0)
1333 hw
->pport
[i
].portid
= j
++;
1334 csio_dbg(hw
, "Found Port:%d\n", hw
->pport
[i
].portid
);
1336 mempool_free(mbp
, hw
->mb_mempool
);
1343 * csio_config_device_caps - Get and set device capabilities.
1348 csio_config_device_caps(struct csio_hw
*hw
)
1350 struct csio_mb
*mbp
;
1351 enum fw_retval retval
;
1354 mbp
= mempool_alloc(hw
->mb_mempool
, GFP_ATOMIC
);
1356 CSIO_INC_STATS(hw
, n_err_nomem
);
1360 /* Get device capabilities */
1361 csio_mb_caps_config(hw
, mbp
, CSIO_MB_DEFAULT_TMO
, 0, 0, 0, 0, NULL
);
1363 if (csio_mb_issue(hw
, mbp
)) {
1364 csio_err(hw
, "Issue of FW_CAPS_CONFIG_CMD(r) failed!\n");
1368 retval
= csio_mb_fw_retval(mbp
);
1369 if (retval
!= FW_SUCCESS
) {
1370 csio_err(hw
, "FW_CAPS_CONFIG_CMD(r) returned %d!\n", retval
);
1374 /* Validate device capabilities */
1375 rv
= csio_hw_validate_caps(hw
, mbp
);
1379 /* Don't config device capabilities if already configured */
1380 if (hw
->fw_state
== CSIO_DEV_STATE_INIT
) {
1385 /* Write back desired device capabilities */
1386 csio_mb_caps_config(hw
, mbp
, CSIO_MB_DEFAULT_TMO
, true, true,
1389 if (csio_mb_issue(hw
, mbp
)) {
1390 csio_err(hw
, "Issue of FW_CAPS_CONFIG_CMD(w) failed!\n");
1394 retval
= csio_mb_fw_retval(mbp
);
1395 if (retval
!= FW_SUCCESS
) {
1396 csio_err(hw
, "FW_CAPS_CONFIG_CMD(w) returned %d!\n", retval
);
1402 mempool_free(mbp
, hw
->mb_mempool
);
1407 * csio_enable_ports - Bring up all available ports.
1412 csio_enable_ports(struct csio_hw
*hw
)
1414 struct csio_mb
*mbp
;
1415 enum fw_retval retval
;
1419 mbp
= mempool_alloc(hw
->mb_mempool
, GFP_ATOMIC
);
1421 CSIO_INC_STATS(hw
, n_err_nomem
);
1425 for (i
= 0; i
< hw
->num_pports
; i
++) {
1426 portid
= hw
->pport
[i
].portid
;
1428 /* Read PORT information */
1429 csio_mb_port(hw
, mbp
, CSIO_MB_DEFAULT_TMO
, portid
,
1432 if (csio_mb_issue(hw
, mbp
)) {
1433 csio_err(hw
, "failed to issue FW_PORT_CMD(r) port:%d\n",
1435 mempool_free(mbp
, hw
->mb_mempool
);
1439 csio_mb_process_read_port_rsp(hw
, mbp
, &retval
,
1440 &hw
->pport
[i
].pcap
);
1441 if (retval
!= FW_SUCCESS
) {
1442 csio_err(hw
, "FW_PORT_CMD(r) port:%d failed: 0x%x\n",
1444 mempool_free(mbp
, hw
->mb_mempool
);
1448 /* Write back PORT information */
1449 csio_mb_port(hw
, mbp
, CSIO_MB_DEFAULT_TMO
, portid
, true,
1450 (PAUSE_RX
| PAUSE_TX
), hw
->pport
[i
].pcap
, NULL
);
1452 if (csio_mb_issue(hw
, mbp
)) {
1453 csio_err(hw
, "failed to issue FW_PORT_CMD(w) port:%d\n",
1455 mempool_free(mbp
, hw
->mb_mempool
);
1459 retval
= csio_mb_fw_retval(mbp
);
1460 if (retval
!= FW_SUCCESS
) {
1461 csio_err(hw
, "FW_PORT_CMD(w) port:%d failed :0x%x\n",
1463 mempool_free(mbp
, hw
->mb_mempool
);
1467 } /* For all ports */
1469 mempool_free(mbp
, hw
->mb_mempool
);
1475 * csio_get_fcoe_resinfo - Read fcoe fw resource info.
1477 * Issued with lock held.
1480 csio_get_fcoe_resinfo(struct csio_hw
*hw
)
1482 struct csio_fcoe_res_info
*res_info
= &hw
->fres_info
;
1483 struct fw_fcoe_res_info_cmd
*rsp
;
1484 struct csio_mb
*mbp
;
1485 enum fw_retval retval
;
1487 mbp
= mempool_alloc(hw
->mb_mempool
, GFP_ATOMIC
);
1489 CSIO_INC_STATS(hw
, n_err_nomem
);
1493 /* Get FCoE FW resource information */
1494 csio_fcoe_read_res_info_init_mb(hw
, mbp
, CSIO_MB_DEFAULT_TMO
, NULL
);
1496 if (csio_mb_issue(hw
, mbp
)) {
1497 csio_err(hw
, "failed to issue FW_FCOE_RES_INFO_CMD\n");
1498 mempool_free(mbp
, hw
->mb_mempool
);
1502 rsp
= (struct fw_fcoe_res_info_cmd
*)(mbp
->mb
);
1503 retval
= FW_CMD_RETVAL_G(ntohl(rsp
->retval_len16
));
1504 if (retval
!= FW_SUCCESS
) {
1505 csio_err(hw
, "FW_FCOE_RES_INFO_CMD failed with ret x%x\n",
1507 mempool_free(mbp
, hw
->mb_mempool
);
1511 res_info
->e_d_tov
= ntohs(rsp
->e_d_tov
);
1512 res_info
->r_a_tov_seq
= ntohs(rsp
->r_a_tov_seq
);
1513 res_info
->r_a_tov_els
= ntohs(rsp
->r_a_tov_els
);
1514 res_info
->r_r_tov
= ntohs(rsp
->r_r_tov
);
1515 res_info
->max_xchgs
= ntohl(rsp
->max_xchgs
);
1516 res_info
->max_ssns
= ntohl(rsp
->max_ssns
);
1517 res_info
->used_xchgs
= ntohl(rsp
->used_xchgs
);
1518 res_info
->used_ssns
= ntohl(rsp
->used_ssns
);
1519 res_info
->max_fcfs
= ntohl(rsp
->max_fcfs
);
1520 res_info
->max_vnps
= ntohl(rsp
->max_vnps
);
1521 res_info
->used_fcfs
= ntohl(rsp
->used_fcfs
);
1522 res_info
->used_vnps
= ntohl(rsp
->used_vnps
);
1524 csio_dbg(hw
, "max ssns:%d max xchgs:%d\n", res_info
->max_ssns
,
1525 res_info
->max_xchgs
);
1526 mempool_free(mbp
, hw
->mb_mempool
);
1532 csio_hw_check_fwconfig(struct csio_hw
*hw
, u32
*param
)
1534 struct csio_mb
*mbp
;
1535 enum fw_retval retval
;
1538 mbp
= mempool_alloc(hw
->mb_mempool
, GFP_ATOMIC
);
1540 CSIO_INC_STATS(hw
, n_err_nomem
);
1545 * Find out whether we're dealing with a version of
1546 * the firmware which has configuration file support.
1548 _param
[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV
) |
1549 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_CF
));
1551 csio_mb_params(hw
, mbp
, CSIO_MB_DEFAULT_TMO
, hw
->pfn
, 0,
1552 ARRAY_SIZE(_param
), _param
, NULL
, false, NULL
);
1553 if (csio_mb_issue(hw
, mbp
)) {
1554 csio_err(hw
, "Issue of FW_PARAMS_CMD(read) failed!\n");
1555 mempool_free(mbp
, hw
->mb_mempool
);
1559 csio_mb_process_read_params_rsp(hw
, mbp
, &retval
,
1560 ARRAY_SIZE(_param
), _param
);
1561 if (retval
!= FW_SUCCESS
) {
1562 csio_err(hw
, "FW_PARAMS_CMD(read) failed with ret:0x%x!\n",
1564 mempool_free(mbp
, hw
->mb_mempool
);
1568 mempool_free(mbp
, hw
->mb_mempool
);
1575 csio_hw_flash_config(struct csio_hw
*hw
, u32
*fw_cfg_param
, char *path
)
1578 const struct firmware
*cf
;
1579 struct pci_dev
*pci_dev
= hw
->pdev
;
1580 struct device
*dev
= &pci_dev
->dev
;
1581 unsigned int mtype
= 0, maddr
= 0;
1583 int value_to_add
= 0;
1585 if (request_firmware(&cf
, FW_CFG_NAME_T5
, dev
) < 0) {
1586 csio_err(hw
, "could not find config file %s, err: %d\n",
1587 FW_CFG_NAME_T5
, ret
);
1591 if (cf
->size
%4 != 0)
1592 value_to_add
= 4 - (cf
->size
% 4);
1594 cfg_data
= kzalloc(cf
->size
+value_to_add
, GFP_KERNEL
);
1595 if (cfg_data
== NULL
) {
1600 memcpy((void *)cfg_data
, (const void *)cf
->data
, cf
->size
);
1601 if (csio_hw_check_fwconfig(hw
, fw_cfg_param
) != 0) {
1606 mtype
= FW_PARAMS_PARAM_Y_G(*fw_cfg_param
);
1607 maddr
= FW_PARAMS_PARAM_Z_G(*fw_cfg_param
) << 16;
1609 ret
= csio_memory_write(hw
, mtype
, maddr
,
1610 cf
->size
+ value_to_add
, cfg_data
);
1612 if ((ret
== 0) && (value_to_add
!= 0)) {
1617 size_t size
= cf
->size
& ~0x3;
1620 last
.word
= cfg_data
[size
>> 2];
1621 for (i
= value_to_add
; i
< 4; i
++)
1623 ret
= csio_memory_write(hw
, mtype
, maddr
+ size
, 4, &last
.word
);
1626 csio_info(hw
, "config file upgraded to %s\n",
1628 snprintf(path
, 64, "%s%s", "/lib/firmware/", FW_CFG_NAME_T5
);
1633 release_firmware(cf
);
1638 * HW initialization: contact FW, obtain config, perform basic init.
1640 * If the firmware we're dealing with has Configuration File support, then
1641 * we use that to perform all configuration -- either using the configuration
1642 * file stored in flash on the adapter or using a filesystem-local file
1645 * If we don't have configuration file support in the firmware, then we'll
1646 * have to set things up the old fashioned way with hard-coded register
1647 * writes and firmware commands ...
1651 * Attempt to initialize the HW via a Firmware Configuration File.
1654 csio_hw_use_fwconfig(struct csio_hw
*hw
, int reset
, u32
*fw_cfg_param
)
1656 struct csio_mb
*mbp
= NULL
;
1657 struct fw_caps_config_cmd
*caps_cmd
;
1658 unsigned int mtype
, maddr
;
1660 uint32_t finiver
= 0, finicsum
= 0, cfcsum
= 0;
1662 char *config_name
= NULL
;
1665 * Reset device if necessary
1668 rv
= csio_do_reset(hw
, true);
1674 * If we have a configuration file in host ,
1675 * then use that. Otherwise, use the configuration file stored
1676 * in the HW flash ...
1678 spin_unlock_irq(&hw
->lock
);
1679 rv
= csio_hw_flash_config(hw
, fw_cfg_param
, path
);
1680 spin_lock_irq(&hw
->lock
);
1683 * config file was not found. Use default
1684 * config file from flash.
1686 config_name
= "On FLASH";
1687 mtype
= FW_MEMTYPE_CF_FLASH
;
1688 maddr
= hw
->chip_ops
->chip_flash_cfg_addr(hw
);
1691 mtype
= FW_PARAMS_PARAM_Y_G(*fw_cfg_param
);
1692 maddr
= FW_PARAMS_PARAM_Z_G(*fw_cfg_param
) << 16;
1695 mbp
= mempool_alloc(hw
->mb_mempool
, GFP_ATOMIC
);
1697 CSIO_INC_STATS(hw
, n_err_nomem
);
1701 * Tell the firmware to process the indicated Configuration File.
1702 * If there are no errors and the caller has provided return value
1703 * pointers for the [fini] section version, checksum and computed
1704 * checksum, pass those back to the caller.
1706 caps_cmd
= (struct fw_caps_config_cmd
*)(mbp
->mb
);
1707 CSIO_INIT_MBP(mbp
, caps_cmd
, CSIO_MB_DEFAULT_TMO
, hw
, NULL
, 1);
1708 caps_cmd
->op_to_write
=
1709 htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD
) |
1712 caps_cmd
->cfvalid_to_len16
=
1713 htonl(FW_CAPS_CONFIG_CMD_CFVALID_F
|
1714 FW_CAPS_CONFIG_CMD_MEMTYPE_CF_V(mtype
) |
1715 FW_CAPS_CONFIG_CMD_MEMADDR64K_CF_V(maddr
>> 16) |
1716 FW_LEN16(*caps_cmd
));
1718 if (csio_mb_issue(hw
, mbp
)) {
1723 rv
= csio_mb_fw_retval(mbp
);
1724 /* If the CAPS_CONFIG failed with an ENOENT (for a Firmware
1725 * Configuration File in FLASH), our last gasp effort is to use the
1726 * Firmware Configuration File which is embedded in the
1727 * firmware. A very few early versions of the firmware didn't
1728 * have one embedded but we can ignore those.
1731 CSIO_INIT_MBP(mbp
, caps_cmd
, CSIO_MB_DEFAULT_TMO
, hw
, NULL
, 1);
1732 caps_cmd
->op_to_write
= htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD
) |
1735 caps_cmd
->cfvalid_to_len16
= htonl(FW_LEN16(*caps_cmd
));
1737 if (csio_mb_issue(hw
, mbp
)) {
1742 rv
= csio_mb_fw_retval(mbp
);
1743 config_name
= "Firmware Default";
1745 if (rv
!= FW_SUCCESS
)
1748 finiver
= ntohl(caps_cmd
->finiver
);
1749 finicsum
= ntohl(caps_cmd
->finicsum
);
1750 cfcsum
= ntohl(caps_cmd
->cfcsum
);
1753 * And now tell the firmware to use the configuration we just loaded.
1755 caps_cmd
->op_to_write
=
1756 htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD
) |
1759 caps_cmd
->cfvalid_to_len16
= htonl(FW_LEN16(*caps_cmd
));
1761 if (csio_mb_issue(hw
, mbp
)) {
1766 rv
= csio_mb_fw_retval(mbp
);
1767 if (rv
!= FW_SUCCESS
) {
1768 csio_dbg(hw
, "FW_CAPS_CONFIG_CMD returned %d!\n", rv
);
1772 mempool_free(mbp
, hw
->mb_mempool
);
1773 if (finicsum
!= cfcsum
) {
1775 "Config File checksum mismatch: csum=%#x, computed=%#x\n",
1779 /* Validate device capabilities */
1780 rv
= csio_hw_validate_caps(hw
, mbp
);
1784 * Note that we're operating with parameters
1785 * not supplied by the driver, rather than from hard-wired
1786 * initialization constants buried in the driver.
1788 hw
->flags
|= CSIO_HWF_USING_SOFT_PARAMS
;
1790 /* device parameters */
1791 rv
= csio_get_device_params(hw
);
1796 csio_wr_sge_init(hw
);
1799 * And finally tell the firmware to initialize itself using the
1800 * parameters from the Configuration File.
1802 /* Post event to notify completion of configuration */
1803 csio_post_event(&hw
->sm
, CSIO_HWE_INIT
);
1805 csio_info(hw
, "Successfully configure using Firmware "
1806 "Configuration File %s, version %#x, computed checksum %#x\n",
1807 config_name
, finiver
, cfcsum
);
1811 * Something bad happened. Return the error ...
1815 mempool_free(mbp
, hw
->mb_mempool
);
1816 hw
->flags
&= ~CSIO_HWF_USING_SOFT_PARAMS
;
1817 csio_warn(hw
, "Configuration file error %d\n", rv
);
1821 /* Is the given firmware API compatible with the one the driver was compiled
1824 static int fw_compatible(const struct fw_hdr
*hdr1
, const struct fw_hdr
*hdr2
)
1827 /* short circuit if it's the exact same firmware version */
1828 if (hdr1
->chip
== hdr2
->chip
&& hdr1
->fw_ver
== hdr2
->fw_ver
)
1831 #define SAME_INTF(x) (hdr1->intfver_##x == hdr2->intfver_##x)
1832 if (hdr1
->chip
== hdr2
->chip
&& SAME_INTF(nic
) && SAME_INTF(vnic
) &&
1833 SAME_INTF(ri
) && SAME_INTF(iscsi
) && SAME_INTF(fcoe
))
1840 /* The firmware in the filesystem is usable, but should it be installed?
1841 * This routine explains itself in detail if it indicates the filesystem
1842 * firmware should be installed.
1844 static int csio_should_install_fs_fw(struct csio_hw
*hw
, int card_fw_usable
,
1849 if (!card_fw_usable
) {
1850 reason
= "incompatible or unusable";
1855 reason
= "older than the version supported with this driver";
1862 csio_err(hw
, "firmware on card (%u.%u.%u.%u) is %s, "
1863 "installing firmware %u.%u.%u.%u on card.\n",
1864 FW_HDR_FW_VER_MAJOR_G(c
), FW_HDR_FW_VER_MINOR_G(c
),
1865 FW_HDR_FW_VER_MICRO_G(c
), FW_HDR_FW_VER_BUILD_G(c
), reason
,
1866 FW_HDR_FW_VER_MAJOR_G(k
), FW_HDR_FW_VER_MINOR_G(k
),
1867 FW_HDR_FW_VER_MICRO_G(k
), FW_HDR_FW_VER_BUILD_G(k
));
1872 static struct fw_info fw_info_array
[] = {
1875 .fs_name
= FW_CFG_NAME_T5
,
1876 .fw_mod_name
= FW_FNAME_T5
,
1878 .chip
= FW_HDR_CHIP_T5
,
1879 .fw_ver
= __cpu_to_be32(FW_VERSION(T5
)),
1880 .intfver_nic
= FW_INTFVER(T5
, NIC
),
1881 .intfver_vnic
= FW_INTFVER(T5
, VNIC
),
1882 .intfver_ri
= FW_INTFVER(T5
, RI
),
1883 .intfver_iscsi
= FW_INTFVER(T5
, ISCSI
),
1884 .intfver_fcoe
= FW_INTFVER(T5
, FCOE
),
1889 static struct fw_info
*find_fw_info(int chip
)
1893 for (i
= 0; i
< ARRAY_SIZE(fw_info_array
); i
++) {
1894 if (fw_info_array
[i
].chip
== chip
)
1895 return &fw_info_array
[i
];
1900 static int csio_hw_prep_fw(struct csio_hw
*hw
, struct fw_info
*fw_info
,
1901 const u8
*fw_data
, unsigned int fw_size
,
1902 struct fw_hdr
*card_fw
, enum csio_dev_state state
,
1905 int ret
, card_fw_usable
, fs_fw_usable
;
1906 const struct fw_hdr
*fs_fw
;
1907 const struct fw_hdr
*drv_fw
;
1909 drv_fw
= &fw_info
->fw_hdr
;
1911 /* Read the header of the firmware on the card */
1912 ret
= csio_hw_read_flash(hw
, FLASH_FW_START
,
1913 sizeof(*card_fw
) / sizeof(uint32_t),
1914 (uint32_t *)card_fw
, 1);
1916 card_fw_usable
= fw_compatible(drv_fw
, (const void *)card_fw
);
1919 "Unable to read card's firmware header: %d\n", ret
);
1923 if (fw_data
!= NULL
) {
1924 fs_fw
= (const void *)fw_data
;
1925 fs_fw_usable
= fw_compatible(drv_fw
, fs_fw
);
1931 if (card_fw_usable
&& card_fw
->fw_ver
== drv_fw
->fw_ver
&&
1932 (!fs_fw_usable
|| fs_fw
->fw_ver
== drv_fw
->fw_ver
)) {
1933 /* Common case: the firmware on the card is an exact match and
1934 * the filesystem one is an exact match too, or the filesystem
1935 * one is absent/incompatible.
1937 } else if (fs_fw_usable
&& state
== CSIO_DEV_STATE_UNINIT
&&
1938 csio_should_install_fs_fw(hw
, card_fw_usable
,
1939 be32_to_cpu(fs_fw
->fw_ver
),
1940 be32_to_cpu(card_fw
->fw_ver
))) {
1941 ret
= csio_hw_fw_upgrade(hw
, hw
->pfn
, fw_data
,
1945 "failed to install firmware: %d\n", ret
);
1949 /* Installed successfully, update the cached header too. */
1950 memcpy(card_fw
, fs_fw
, sizeof(*card_fw
));
1952 *reset
= 0; /* already reset as part of load_fw */
1955 if (!card_fw_usable
) {
1958 d
= be32_to_cpu(drv_fw
->fw_ver
);
1959 c
= be32_to_cpu(card_fw
->fw_ver
);
1960 k
= fs_fw
? be32_to_cpu(fs_fw
->fw_ver
) : 0;
1962 csio_err(hw
, "Cannot find a usable firmware: "
1964 "driver compiled with %d.%d.%d.%d, "
1965 "card has %d.%d.%d.%d, filesystem has %d.%d.%d.%d\n",
1967 FW_HDR_FW_VER_MAJOR_G(d
), FW_HDR_FW_VER_MINOR_G(d
),
1968 FW_HDR_FW_VER_MICRO_G(d
), FW_HDR_FW_VER_BUILD_G(d
),
1969 FW_HDR_FW_VER_MAJOR_G(c
), FW_HDR_FW_VER_MINOR_G(c
),
1970 FW_HDR_FW_VER_MICRO_G(c
), FW_HDR_FW_VER_BUILD_G(c
),
1971 FW_HDR_FW_VER_MAJOR_G(k
), FW_HDR_FW_VER_MINOR_G(k
),
1972 FW_HDR_FW_VER_MICRO_G(k
), FW_HDR_FW_VER_BUILD_G(k
));
1977 /* We're using whatever's on the card and it's known to be good. */
1978 hw
->fwrev
= be32_to_cpu(card_fw
->fw_ver
);
1979 hw
->tp_vers
= be32_to_cpu(card_fw
->tp_microcode_ver
);
1986 * Returns -EINVAL if attempts to flash the firmware failed
1988 * if flashing was not attempted because the card had the
1989 * latest firmware ECANCELED is returned
1992 csio_hw_flash_fw(struct csio_hw
*hw
, int *reset
)
1994 int ret
= -ECANCELED
;
1995 const struct firmware
*fw
;
1996 struct fw_info
*fw_info
;
1997 struct fw_hdr
*card_fw
;
1998 struct pci_dev
*pci_dev
= hw
->pdev
;
1999 struct device
*dev
= &pci_dev
->dev
;
2000 const u8
*fw_data
= NULL
;
2001 unsigned int fw_size
= 0;
2003 /* This is the firmware whose headers the driver was compiled
2006 fw_info
= find_fw_info(CHELSIO_CHIP_VERSION(hw
->chip_id
));
2007 if (fw_info
== NULL
) {
2009 "unable to get firmware info for chip %d.\n",
2010 CHELSIO_CHIP_VERSION(hw
->chip_id
));
2014 if (request_firmware(&fw
, FW_FNAME_T5
, dev
) < 0) {
2015 csio_err(hw
, "could not find firmware image %s, err: %d\n",
2022 /* allocate memory to read the header of the firmware on the
2025 card_fw
= kmalloc(sizeof(*card_fw
), GFP_KERNEL
);
2027 /* upgrade FW logic */
2028 ret
= csio_hw_prep_fw(hw
, fw_info
, fw_data
, fw_size
, card_fw
,
2029 hw
->fw_state
, reset
);
2033 release_firmware(fw
);
2039 * csio_hw_configure - Configure HW
2044 csio_hw_configure(struct csio_hw
*hw
)
2050 rv
= csio_hw_dev_ready(hw
);
2052 CSIO_INC_STATS(hw
, n_err_fatal
);
2053 csio_post_event(&hw
->sm
, CSIO_HWE_FATAL
);
2058 hw
->chip_ver
= (char)csio_rd_reg32(hw
, PL_REV_A
);
2060 /* Needed for FW download */
2061 rv
= csio_hw_get_flash_params(hw
);
2063 csio_err(hw
, "Failed to get serial flash params rv:%d\n", rv
);
2064 csio_post_event(&hw
->sm
, CSIO_HWE_FATAL
);
2068 /* Set PCIe completion timeout to 4 seconds */
2069 if (pci_is_pcie(hw
->pdev
))
2070 pcie_capability_clear_and_set_word(hw
->pdev
, PCI_EXP_DEVCTL2
,
2071 PCI_EXP_DEVCTL2_COMP_TIMEOUT
, 0xd);
2073 hw
->chip_ops
->chip_set_mem_win(hw
, MEMWIN_CSIOSTOR
);
2075 rv
= csio_hw_get_fw_version(hw
, &hw
->fwrev
);
2079 csio_hw_print_fw_version(hw
, "Firmware revision");
2081 rv
= csio_do_hello(hw
, &hw
->fw_state
);
2083 CSIO_INC_STATS(hw
, n_err_fatal
);
2084 csio_post_event(&hw
->sm
, CSIO_HWE_FATAL
);
2089 rv
= csio_hw_get_vpd_params(hw
, &hw
->vpd
);
2093 csio_hw_get_fw_version(hw
, &hw
->fwrev
);
2094 csio_hw_get_tp_version(hw
, &hw
->tp_vers
);
2095 if (csio_is_hw_master(hw
) && hw
->fw_state
!= CSIO_DEV_STATE_INIT
) {
2097 /* Do firmware update */
2098 spin_unlock_irq(&hw
->lock
);
2099 rv
= csio_hw_flash_fw(hw
, &reset
);
2100 spin_lock_irq(&hw
->lock
);
2105 /* If the firmware doesn't support Configuration Files,
2108 rv
= csio_hw_check_fwconfig(hw
, param
);
2110 csio_info(hw
, "Firmware doesn't support "
2111 "Firmware Configuration files\n");
2115 /* The firmware provides us with a memory buffer where we can
2116 * load a Configuration File from the host if we want to
2117 * override the Configuration File in flash.
2119 rv
= csio_hw_use_fwconfig(hw
, reset
, param
);
2120 if (rv
== -ENOENT
) {
2121 csio_info(hw
, "Could not initialize "
2122 "adapter, error%d\n", rv
);
2126 csio_info(hw
, "Could not initialize "
2127 "adapter, error%d\n", rv
);
2132 if (hw
->fw_state
== CSIO_DEV_STATE_INIT
) {
2134 hw
->flags
|= CSIO_HWF_USING_SOFT_PARAMS
;
2136 /* device parameters */
2137 rv
= csio_get_device_params(hw
);
2141 /* Get device capabilities */
2142 rv
= csio_config_device_caps(hw
);
2147 csio_wr_sge_init(hw
);
2149 /* Post event to notify completion of configuration */
2150 csio_post_event(&hw
->sm
, CSIO_HWE_INIT
);
2153 } /* if not master */
2160 * csio_hw_initialize - Initialize HW
2165 csio_hw_initialize(struct csio_hw
*hw
)
2167 struct csio_mb
*mbp
;
2168 enum fw_retval retval
;
2172 if (csio_is_hw_master(hw
) && hw
->fw_state
!= CSIO_DEV_STATE_INIT
) {
2173 mbp
= mempool_alloc(hw
->mb_mempool
, GFP_ATOMIC
);
2177 csio_mb_initialize(hw
, mbp
, CSIO_MB_DEFAULT_TMO
, NULL
);
2179 if (csio_mb_issue(hw
, mbp
)) {
2180 csio_err(hw
, "Issue of FW_INITIALIZE_CMD failed!\n");
2184 retval
= csio_mb_fw_retval(mbp
);
2185 if (retval
!= FW_SUCCESS
) {
2186 csio_err(hw
, "FW_INITIALIZE_CMD returned 0x%x!\n",
2191 mempool_free(mbp
, hw
->mb_mempool
);
2194 rv
= csio_get_fcoe_resinfo(hw
);
2196 csio_err(hw
, "Failed to read fcoe resource info: %d\n", rv
);
2200 spin_unlock_irq(&hw
->lock
);
2201 rv
= csio_config_queues(hw
);
2202 spin_lock_irq(&hw
->lock
);
2205 csio_err(hw
, "Config of queues failed!: %d\n", rv
);
2209 for (i
= 0; i
< hw
->num_pports
; i
++)
2210 hw
->pport
[i
].mod_type
= FW_PORT_MOD_TYPE_NA
;
2212 if (csio_is_hw_master(hw
) && hw
->fw_state
!= CSIO_DEV_STATE_INIT
) {
2213 rv
= csio_enable_ports(hw
);
2215 csio_err(hw
, "Failed to enable ports: %d\n", rv
);
2220 csio_post_event(&hw
->sm
, CSIO_HWE_INIT_DONE
);
2224 mempool_free(mbp
, hw
->mb_mempool
);
2229 #define PF_INTR_MASK (PFSW_F | PFCIM_F)
2232 * csio_hw_intr_enable - Enable HW interrupts
2233 * @hw: Pointer to HW module.
2235 * Enable interrupts in HW registers.
2238 csio_hw_intr_enable(struct csio_hw
*hw
)
2240 uint16_t vec
= (uint16_t)csio_get_mb_intr_idx(csio_hw_to_mbm(hw
));
2241 uint32_t pf
= SOURCEPF_G(csio_rd_reg32(hw
, PL_WHOAMI_A
));
2242 uint32_t pl
= csio_rd_reg32(hw
, PL_INT_ENABLE_A
);
2245 * Set aivec for MSI/MSIX. PCIE_PF_CFG.INTXType is set up
2246 * by FW, so do nothing for INTX.
2248 if (hw
->intr_mode
== CSIO_IM_MSIX
)
2249 csio_set_reg_field(hw
, MYPF_REG(PCIE_PF_CFG_A
),
2250 AIVEC_V(AIVEC_M
), vec
);
2251 else if (hw
->intr_mode
== CSIO_IM_MSI
)
2252 csio_set_reg_field(hw
, MYPF_REG(PCIE_PF_CFG_A
),
2253 AIVEC_V(AIVEC_M
), 0);
2255 csio_wr_reg32(hw
, PF_INTR_MASK
, MYPF_REG(PL_PF_INT_ENABLE_A
));
2257 /* Turn on MB interrupts - this will internally flush PIO as well */
2258 csio_mb_intr_enable(hw
);
2260 /* These are common registers - only a master can modify them */
2261 if (csio_is_hw_master(hw
)) {
2263 * Disable the Serial FLASH interrupt, if enabled!
2266 csio_wr_reg32(hw
, pl
, PL_INT_ENABLE_A
);
2268 csio_wr_reg32(hw
, ERR_CPL_EXCEED_IQE_SIZE_F
|
2269 EGRESS_SIZE_ERR_F
| ERR_INVALID_CIDX_INC_F
|
2270 ERR_CPL_OPCODE_0_F
| ERR_DROPPED_DB_F
|
2271 ERR_DATA_CPL_ON_HIGH_QID1_F
|
2272 ERR_DATA_CPL_ON_HIGH_QID0_F
| ERR_BAD_DB_PIDX3_F
|
2273 ERR_BAD_DB_PIDX2_F
| ERR_BAD_DB_PIDX1_F
|
2274 ERR_BAD_DB_PIDX0_F
| ERR_ING_CTXT_PRIO_F
|
2275 ERR_EGR_CTXT_PRIO_F
| INGRESS_SIZE_ERR_F
,
2277 csio_set_reg_field(hw
, PL_INT_MAP0_A
, 0, 1 << pf
);
2280 hw
->flags
|= CSIO_HWF_HW_INTR_ENABLED
;
2285 * csio_hw_intr_disable - Disable HW interrupts
2286 * @hw: Pointer to HW module.
2288 * Turn off Mailbox and PCI_PF_CFG interrupts.
2291 csio_hw_intr_disable(struct csio_hw
*hw
)
2293 uint32_t pf
= SOURCEPF_G(csio_rd_reg32(hw
, PL_WHOAMI_A
));
2295 if (!(hw
->flags
& CSIO_HWF_HW_INTR_ENABLED
))
2298 hw
->flags
&= ~CSIO_HWF_HW_INTR_ENABLED
;
2300 csio_wr_reg32(hw
, 0, MYPF_REG(PL_PF_INT_ENABLE_A
));
2301 if (csio_is_hw_master(hw
))
2302 csio_set_reg_field(hw
, PL_INT_MAP0_A
, 1 << pf
, 0);
2304 /* Turn off MB interrupts */
2305 csio_mb_intr_disable(hw
);
2310 csio_hw_fatal_err(struct csio_hw
*hw
)
2312 csio_set_reg_field(hw
, SGE_CONTROL_A
, GLOBALENABLE_F
, 0);
2313 csio_hw_intr_disable(hw
);
2315 /* Do not reset HW, we may need FW state for debugging */
2316 csio_fatal(hw
, "HW Fatal error encountered!\n");
2319 /*****************************************************************************/
2321 /*****************************************************************************/
2323 * csio_hws_uninit - Uninit state
2329 csio_hws_uninit(struct csio_hw
*hw
, enum csio_hw_ev evt
)
2331 hw
->prev_evt
= hw
->cur_evt
;
2333 CSIO_INC_STATS(hw
, n_evt_sm
[evt
]);
2337 csio_set_state(&hw
->sm
, csio_hws_configuring
);
2338 csio_hw_configure(hw
);
2342 CSIO_INC_STATS(hw
, n_evt_unexp
);
2348 * csio_hws_configuring - Configuring state
2354 csio_hws_configuring(struct csio_hw
*hw
, enum csio_hw_ev evt
)
2356 hw
->prev_evt
= hw
->cur_evt
;
2358 CSIO_INC_STATS(hw
, n_evt_sm
[evt
]);
2362 csio_set_state(&hw
->sm
, csio_hws_initializing
);
2363 csio_hw_initialize(hw
);
2366 case CSIO_HWE_INIT_DONE
:
2367 csio_set_state(&hw
->sm
, csio_hws_ready
);
2368 /* Fan out event to all lnode SMs */
2369 csio_notify_lnodes(hw
, CSIO_LN_NOTIFY_HWREADY
);
2372 case CSIO_HWE_FATAL
:
2373 csio_set_state(&hw
->sm
, csio_hws_uninit
);
2376 case CSIO_HWE_PCI_REMOVE
:
2380 CSIO_INC_STATS(hw
, n_evt_unexp
);
2386 * csio_hws_initializing - Initialiazing state
2392 csio_hws_initializing(struct csio_hw
*hw
, enum csio_hw_ev evt
)
2394 hw
->prev_evt
= hw
->cur_evt
;
2396 CSIO_INC_STATS(hw
, n_evt_sm
[evt
]);
2399 case CSIO_HWE_INIT_DONE
:
2400 csio_set_state(&hw
->sm
, csio_hws_ready
);
2402 /* Fan out event to all lnode SMs */
2403 csio_notify_lnodes(hw
, CSIO_LN_NOTIFY_HWREADY
);
2405 /* Enable interrupts */
2406 csio_hw_intr_enable(hw
);
2409 case CSIO_HWE_FATAL
:
2410 csio_set_state(&hw
->sm
, csio_hws_uninit
);
2413 case CSIO_HWE_PCI_REMOVE
:
2418 CSIO_INC_STATS(hw
, n_evt_unexp
);
2424 * csio_hws_ready - Ready state
2430 csio_hws_ready(struct csio_hw
*hw
, enum csio_hw_ev evt
)
2432 /* Remember the event */
2435 hw
->prev_evt
= hw
->cur_evt
;
2437 CSIO_INC_STATS(hw
, n_evt_sm
[evt
]);
2440 case CSIO_HWE_HBA_RESET
:
2441 case CSIO_HWE_FW_DLOAD
:
2442 case CSIO_HWE_SUSPEND
:
2443 case CSIO_HWE_PCI_REMOVE
:
2444 case CSIO_HWE_PCIERR_DETECTED
:
2445 csio_set_state(&hw
->sm
, csio_hws_quiescing
);
2446 /* cleanup all outstanding cmds */
2447 if (evt
== CSIO_HWE_HBA_RESET
||
2448 evt
== CSIO_HWE_PCIERR_DETECTED
)
2449 csio_scsim_cleanup_io(csio_hw_to_scsim(hw
), false);
2451 csio_scsim_cleanup_io(csio_hw_to_scsim(hw
), true);
2453 csio_hw_intr_disable(hw
);
2454 csio_hw_mbm_cleanup(hw
);
2456 csio_notify_lnodes(hw
, CSIO_LN_NOTIFY_HWSTOP
);
2457 csio_evtq_flush(hw
);
2458 csio_mgmtm_cleanup(csio_hw_to_mgmtm(hw
));
2459 csio_post_event(&hw
->sm
, CSIO_HWE_QUIESCED
);
2462 case CSIO_HWE_FATAL
:
2463 csio_set_state(&hw
->sm
, csio_hws_uninit
);
2467 CSIO_INC_STATS(hw
, n_evt_unexp
);
2473 * csio_hws_quiescing - Quiescing state
2479 csio_hws_quiescing(struct csio_hw
*hw
, enum csio_hw_ev evt
)
2481 hw
->prev_evt
= hw
->cur_evt
;
2483 CSIO_INC_STATS(hw
, n_evt_sm
[evt
]);
2486 case CSIO_HWE_QUIESCED
:
2487 switch (hw
->evtflag
) {
2488 case CSIO_HWE_FW_DLOAD
:
2489 csio_set_state(&hw
->sm
, csio_hws_resetting
);
2490 /* Download firmware */
2493 case CSIO_HWE_HBA_RESET
:
2494 csio_set_state(&hw
->sm
, csio_hws_resetting
);
2495 /* Start reset of the HBA */
2496 csio_notify_lnodes(hw
, CSIO_LN_NOTIFY_HWRESET
);
2497 csio_wr_destroy_queues(hw
, false);
2498 csio_do_reset(hw
, false);
2499 csio_post_event(&hw
->sm
, CSIO_HWE_HBA_RESET_DONE
);
2502 case CSIO_HWE_PCI_REMOVE
:
2503 csio_set_state(&hw
->sm
, csio_hws_removing
);
2504 csio_notify_lnodes(hw
, CSIO_LN_NOTIFY_HWREMOVE
);
2505 csio_wr_destroy_queues(hw
, true);
2506 /* Now send the bye command */
2510 case CSIO_HWE_SUSPEND
:
2511 csio_set_state(&hw
->sm
, csio_hws_quiesced
);
2514 case CSIO_HWE_PCIERR_DETECTED
:
2515 csio_set_state(&hw
->sm
, csio_hws_pcierr
);
2516 csio_wr_destroy_queues(hw
, false);
2520 CSIO_INC_STATS(hw
, n_evt_unexp
);
2527 CSIO_INC_STATS(hw
, n_evt_unexp
);
2533 * csio_hws_quiesced - Quiesced state
2539 csio_hws_quiesced(struct csio_hw
*hw
, enum csio_hw_ev evt
)
2541 hw
->prev_evt
= hw
->cur_evt
;
2543 CSIO_INC_STATS(hw
, n_evt_sm
[evt
]);
2546 case CSIO_HWE_RESUME
:
2547 csio_set_state(&hw
->sm
, csio_hws_configuring
);
2548 csio_hw_configure(hw
);
2552 CSIO_INC_STATS(hw
, n_evt_unexp
);
2558 * csio_hws_resetting - HW Resetting state
2564 csio_hws_resetting(struct csio_hw
*hw
, enum csio_hw_ev evt
)
2566 hw
->prev_evt
= hw
->cur_evt
;
2568 CSIO_INC_STATS(hw
, n_evt_sm
[evt
]);
2571 case CSIO_HWE_HBA_RESET_DONE
:
2572 csio_evtq_start(hw
);
2573 csio_set_state(&hw
->sm
, csio_hws_configuring
);
2574 csio_hw_configure(hw
);
2578 CSIO_INC_STATS(hw
, n_evt_unexp
);
2584 * csio_hws_removing - PCI Hotplug removing state
2590 csio_hws_removing(struct csio_hw
*hw
, enum csio_hw_ev evt
)
2592 hw
->prev_evt
= hw
->cur_evt
;
2594 CSIO_INC_STATS(hw
, n_evt_sm
[evt
]);
2597 case CSIO_HWE_HBA_RESET
:
2598 if (!csio_is_hw_master(hw
))
2601 * The BYE should have alerady been issued, so we cant
2602 * use the mailbox interface. Hence we use the PL_RST
2603 * register directly.
2605 csio_err(hw
, "Resetting HW and waiting 2 seconds...\n");
2606 csio_wr_reg32(hw
, PIORSTMODE_F
| PIORST_F
, PL_RST_A
);
2610 /* Should never receive any new events */
2612 CSIO_INC_STATS(hw
, n_evt_unexp
);
2619 * csio_hws_pcierr - PCI Error state
2625 csio_hws_pcierr(struct csio_hw
*hw
, enum csio_hw_ev evt
)
2627 hw
->prev_evt
= hw
->cur_evt
;
2629 CSIO_INC_STATS(hw
, n_evt_sm
[evt
]);
2632 case CSIO_HWE_PCIERR_SLOT_RESET
:
2633 csio_evtq_start(hw
);
2634 csio_set_state(&hw
->sm
, csio_hws_configuring
);
2635 csio_hw_configure(hw
);
2639 CSIO_INC_STATS(hw
, n_evt_unexp
);
2644 /*****************************************************************************/
2646 /*****************************************************************************/
2649 * csio_handle_intr_status - table driven interrupt handler
2651 * @reg: the interrupt status register to process
2652 * @acts: table of interrupt actions
2654 * A table driven interrupt handler that applies a set of masks to an
2655 * interrupt status word and performs the corresponding actions if the
2656 * interrupts described by the mask have occured. The actions include
2657 * optionally emitting a warning or alert message. The table is terminated
2658 * by an entry specifying mask 0. Returns the number of fatal interrupt
2662 csio_handle_intr_status(struct csio_hw
*hw
, unsigned int reg
,
2663 const struct intr_info
*acts
)
2666 unsigned int mask
= 0;
2667 unsigned int status
= csio_rd_reg32(hw
, reg
);
2669 for ( ; acts
->mask
; ++acts
) {
2670 if (!(status
& acts
->mask
))
2674 csio_fatal(hw
, "Fatal %s (0x%x)\n",
2675 acts
->msg
, status
& acts
->mask
);
2676 } else if (acts
->msg
)
2677 csio_info(hw
, "%s (0x%x)\n",
2678 acts
->msg
, status
& acts
->mask
);
2682 if (status
) /* clear processed interrupts */
2683 csio_wr_reg32(hw
, status
, reg
);
2688 * TP interrupt handler.
2690 static void csio_tp_intr_handler(struct csio_hw
*hw
)
2692 static struct intr_info tp_intr_info
[] = {
2693 { 0x3fffffff, "TP parity error", -1, 1 },
2694 { FLMTXFLSTEMPTY_F
, "TP out of Tx pages", -1, 1 },
2698 if (csio_handle_intr_status(hw
, TP_INT_CAUSE_A
, tp_intr_info
))
2699 csio_hw_fatal_err(hw
);
2703 * SGE interrupt handler.
2705 static void csio_sge_intr_handler(struct csio_hw
*hw
)
2709 static struct intr_info sge_intr_info
[] = {
2710 { ERR_CPL_EXCEED_IQE_SIZE_F
,
2711 "SGE received CPL exceeding IQE size", -1, 1 },
2712 { ERR_INVALID_CIDX_INC_F
,
2713 "SGE GTS CIDX increment too large", -1, 0 },
2714 { ERR_CPL_OPCODE_0_F
, "SGE received 0-length CPL", -1, 0 },
2715 { ERR_DROPPED_DB_F
, "SGE doorbell dropped", -1, 0 },
2716 { ERR_DATA_CPL_ON_HIGH_QID1_F
| ERR_DATA_CPL_ON_HIGH_QID0_F
,
2717 "SGE IQID > 1023 received CPL for FL", -1, 0 },
2718 { ERR_BAD_DB_PIDX3_F
, "SGE DBP 3 pidx increment too large", -1,
2720 { ERR_BAD_DB_PIDX2_F
, "SGE DBP 2 pidx increment too large", -1,
2722 { ERR_BAD_DB_PIDX1_F
, "SGE DBP 1 pidx increment too large", -1,
2724 { ERR_BAD_DB_PIDX0_F
, "SGE DBP 0 pidx increment too large", -1,
2726 { ERR_ING_CTXT_PRIO_F
,
2727 "SGE too many priority ingress contexts", -1, 0 },
2728 { ERR_EGR_CTXT_PRIO_F
,
2729 "SGE too many priority egress contexts", -1, 0 },
2730 { INGRESS_SIZE_ERR_F
, "SGE illegal ingress QID", -1, 0 },
2731 { EGRESS_SIZE_ERR_F
, "SGE illegal egress QID", -1, 0 },
2735 v
= (uint64_t)csio_rd_reg32(hw
, SGE_INT_CAUSE1_A
) |
2736 ((uint64_t)csio_rd_reg32(hw
, SGE_INT_CAUSE2_A
) << 32);
2738 csio_fatal(hw
, "SGE parity error (%#llx)\n",
2739 (unsigned long long)v
);
2740 csio_wr_reg32(hw
, (uint32_t)(v
& 0xFFFFFFFF),
2742 csio_wr_reg32(hw
, (uint32_t)(v
>> 32), SGE_INT_CAUSE2_A
);
2745 v
|= csio_handle_intr_status(hw
, SGE_INT_CAUSE3_A
, sge_intr_info
);
2747 if (csio_handle_intr_status(hw
, SGE_INT_CAUSE3_A
, sge_intr_info
) ||
2749 csio_hw_fatal_err(hw
);
2752 #define CIM_OBQ_INTR (OBQULP0PARERR_F | OBQULP1PARERR_F | OBQULP2PARERR_F |\
2753 OBQULP3PARERR_F | OBQSGEPARERR_F | OBQNCSIPARERR_F)
2754 #define CIM_IBQ_INTR (IBQTP0PARERR_F | IBQTP1PARERR_F | IBQULPPARERR_F |\
2755 IBQSGEHIPARERR_F | IBQSGELOPARERR_F | IBQNCSIPARERR_F)
2758 * CIM interrupt handler.
2760 static void csio_cim_intr_handler(struct csio_hw
*hw
)
2762 static struct intr_info cim_intr_info
[] = {
2763 { PREFDROPINT_F
, "CIM control register prefetch drop", -1, 1 },
2764 { CIM_OBQ_INTR
, "CIM OBQ parity error", -1, 1 },
2765 { CIM_IBQ_INTR
, "CIM IBQ parity error", -1, 1 },
2766 { MBUPPARERR_F
, "CIM mailbox uP parity error", -1, 1 },
2767 { MBHOSTPARERR_F
, "CIM mailbox host parity error", -1, 1 },
2768 { TIEQINPARERRINT_F
, "CIM TIEQ outgoing parity error", -1, 1 },
2769 { TIEQOUTPARERRINT_F
, "CIM TIEQ incoming parity error", -1, 1 },
2772 static struct intr_info cim_upintr_info
[] = {
2773 { RSVDSPACEINT_F
, "CIM reserved space access", -1, 1 },
2774 { ILLTRANSINT_F
, "CIM illegal transaction", -1, 1 },
2775 { ILLWRINT_F
, "CIM illegal write", -1, 1 },
2776 { ILLRDINT_F
, "CIM illegal read", -1, 1 },
2777 { ILLRDBEINT_F
, "CIM illegal read BE", -1, 1 },
2778 { ILLWRBEINT_F
, "CIM illegal write BE", -1, 1 },
2779 { SGLRDBOOTINT_F
, "CIM single read from boot space", -1, 1 },
2780 { SGLWRBOOTINT_F
, "CIM single write to boot space", -1, 1 },
2781 { BLKWRBOOTINT_F
, "CIM block write to boot space", -1, 1 },
2782 { SGLRDFLASHINT_F
, "CIM single read from flash space", -1, 1 },
2783 { SGLWRFLASHINT_F
, "CIM single write to flash space", -1, 1 },
2784 { BLKWRFLASHINT_F
, "CIM block write to flash space", -1, 1 },
2785 { SGLRDEEPROMINT_F
, "CIM single EEPROM read", -1, 1 },
2786 { SGLWREEPROMINT_F
, "CIM single EEPROM write", -1, 1 },
2787 { BLKRDEEPROMINT_F
, "CIM block EEPROM read", -1, 1 },
2788 { BLKWREEPROMINT_F
, "CIM block EEPROM write", -1, 1 },
2789 { SGLRDCTLINT_F
, "CIM single read from CTL space", -1, 1 },
2790 { SGLWRCTLINT_F
, "CIM single write to CTL space", -1, 1 },
2791 { BLKRDCTLINT_F
, "CIM block read from CTL space", -1, 1 },
2792 { BLKWRCTLINT_F
, "CIM block write to CTL space", -1, 1 },
2793 { SGLRDPLINT_F
, "CIM single read from PL space", -1, 1 },
2794 { SGLWRPLINT_F
, "CIM single write to PL space", -1, 1 },
2795 { BLKRDPLINT_F
, "CIM block read from PL space", -1, 1 },
2796 { BLKWRPLINT_F
, "CIM block write to PL space", -1, 1 },
2797 { REQOVRLOOKUPINT_F
, "CIM request FIFO overwrite", -1, 1 },
2798 { RSPOVRLOOKUPINT_F
, "CIM response FIFO overwrite", -1, 1 },
2799 { TIMEOUTINT_F
, "CIM PIF timeout", -1, 1 },
2800 { TIMEOUTMAINT_F
, "CIM PIF MA timeout", -1, 1 },
2806 fat
= csio_handle_intr_status(hw
, CIM_HOST_INT_CAUSE_A
,
2808 csio_handle_intr_status(hw
, CIM_HOST_UPACC_INT_CAUSE_A
,
2811 csio_hw_fatal_err(hw
);
2815 * ULP RX interrupt handler.
2817 static void csio_ulprx_intr_handler(struct csio_hw
*hw
)
2819 static struct intr_info ulprx_intr_info
[] = {
2820 { 0x1800000, "ULPRX context error", -1, 1 },
2821 { 0x7fffff, "ULPRX parity error", -1, 1 },
2825 if (csio_handle_intr_status(hw
, ULP_RX_INT_CAUSE_A
, ulprx_intr_info
))
2826 csio_hw_fatal_err(hw
);
2830 * ULP TX interrupt handler.
2832 static void csio_ulptx_intr_handler(struct csio_hw
*hw
)
2834 static struct intr_info ulptx_intr_info
[] = {
2835 { PBL_BOUND_ERR_CH3_F
, "ULPTX channel 3 PBL out of bounds", -1,
2837 { PBL_BOUND_ERR_CH2_F
, "ULPTX channel 2 PBL out of bounds", -1,
2839 { PBL_BOUND_ERR_CH1_F
, "ULPTX channel 1 PBL out of bounds", -1,
2841 { PBL_BOUND_ERR_CH0_F
, "ULPTX channel 0 PBL out of bounds", -1,
2843 { 0xfffffff, "ULPTX parity error", -1, 1 },
2847 if (csio_handle_intr_status(hw
, ULP_TX_INT_CAUSE_A
, ulptx_intr_info
))
2848 csio_hw_fatal_err(hw
);
2852 * PM TX interrupt handler.
2854 static void csio_pmtx_intr_handler(struct csio_hw
*hw
)
2856 static struct intr_info pmtx_intr_info
[] = {
2857 { PCMD_LEN_OVFL0_F
, "PMTX channel 0 pcmd too large", -1, 1 },
2858 { PCMD_LEN_OVFL1_F
, "PMTX channel 1 pcmd too large", -1, 1 },
2859 { PCMD_LEN_OVFL2_F
, "PMTX channel 2 pcmd too large", -1, 1 },
2860 { ZERO_C_CMD_ERROR_F
, "PMTX 0-length pcmd", -1, 1 },
2861 { 0xffffff0, "PMTX framing error", -1, 1 },
2862 { OESPI_PAR_ERROR_F
, "PMTX oespi parity error", -1, 1 },
2863 { DB_OPTIONS_PAR_ERROR_F
, "PMTX db_options parity error", -1,
2865 { ICSPI_PAR_ERROR_F
, "PMTX icspi parity error", -1, 1 },
2866 { PMTX_C_PCMD_PAR_ERROR_F
, "PMTX c_pcmd parity error", -1, 1},
2870 if (csio_handle_intr_status(hw
, PM_TX_INT_CAUSE_A
, pmtx_intr_info
))
2871 csio_hw_fatal_err(hw
);
2875 * PM RX interrupt handler.
2877 static void csio_pmrx_intr_handler(struct csio_hw
*hw
)
2879 static struct intr_info pmrx_intr_info
[] = {
2880 { ZERO_E_CMD_ERROR_F
, "PMRX 0-length pcmd", -1, 1 },
2881 { 0x3ffff0, "PMRX framing error", -1, 1 },
2882 { OCSPI_PAR_ERROR_F
, "PMRX ocspi parity error", -1, 1 },
2883 { DB_OPTIONS_PAR_ERROR_F
, "PMRX db_options parity error", -1,
2885 { IESPI_PAR_ERROR_F
, "PMRX iespi parity error", -1, 1 },
2886 { PMRX_E_PCMD_PAR_ERROR_F
, "PMRX e_pcmd parity error", -1, 1},
2890 if (csio_handle_intr_status(hw
, PM_RX_INT_CAUSE_A
, pmrx_intr_info
))
2891 csio_hw_fatal_err(hw
);
2895 * CPL switch interrupt handler.
2897 static void csio_cplsw_intr_handler(struct csio_hw
*hw
)
2899 static struct intr_info cplsw_intr_info
[] = {
2900 { CIM_OP_MAP_PERR_F
, "CPLSW CIM op_map parity error", -1, 1 },
2901 { CIM_OVFL_ERROR_F
, "CPLSW CIM overflow", -1, 1 },
2902 { TP_FRAMING_ERROR_F
, "CPLSW TP framing error", -1, 1 },
2903 { SGE_FRAMING_ERROR_F
, "CPLSW SGE framing error", -1, 1 },
2904 { CIM_FRAMING_ERROR_F
, "CPLSW CIM framing error", -1, 1 },
2905 { ZERO_SWITCH_ERROR_F
, "CPLSW no-switch error", -1, 1 },
2909 if (csio_handle_intr_status(hw
, CPL_INTR_CAUSE_A
, cplsw_intr_info
))
2910 csio_hw_fatal_err(hw
);
2914 * LE interrupt handler.
2916 static void csio_le_intr_handler(struct csio_hw
*hw
)
2918 static struct intr_info le_intr_info
[] = {
2919 { LIPMISS_F
, "LE LIP miss", -1, 0 },
2920 { LIP0_F
, "LE 0 LIP error", -1, 0 },
2921 { PARITYERR_F
, "LE parity error", -1, 1 },
2922 { UNKNOWNCMD_F
, "LE unknown command", -1, 1 },
2923 { REQQPARERR_F
, "LE request queue parity error", -1, 1 },
2927 if (csio_handle_intr_status(hw
, LE_DB_INT_CAUSE_A
, le_intr_info
))
2928 csio_hw_fatal_err(hw
);
2932 * MPS interrupt handler.
2934 static void csio_mps_intr_handler(struct csio_hw
*hw
)
2936 static struct intr_info mps_rx_intr_info
[] = {
2937 { 0xffffff, "MPS Rx parity error", -1, 1 },
2940 static struct intr_info mps_tx_intr_info
[] = {
2941 { TPFIFO_V(TPFIFO_M
), "MPS Tx TP FIFO parity error", -1, 1 },
2942 { NCSIFIFO_F
, "MPS Tx NC-SI FIFO parity error", -1, 1 },
2943 { TXDATAFIFO_V(TXDATAFIFO_M
), "MPS Tx data FIFO parity error",
2945 { TXDESCFIFO_V(TXDESCFIFO_M
), "MPS Tx desc FIFO parity error",
2947 { BUBBLE_F
, "MPS Tx underflow", -1, 1 },
2948 { SECNTERR_F
, "MPS Tx SOP/EOP error", -1, 1 },
2949 { FRMERR_F
, "MPS Tx framing error", -1, 1 },
2952 static struct intr_info mps_trc_intr_info
[] = {
2953 { FILTMEM_V(FILTMEM_M
), "MPS TRC filter parity error", -1, 1 },
2954 { PKTFIFO_V(PKTFIFO_M
), "MPS TRC packet FIFO parity error",
2956 { MISCPERR_F
, "MPS TRC misc parity error", -1, 1 },
2959 static struct intr_info mps_stat_sram_intr_info
[] = {
2960 { 0x1fffff, "MPS statistics SRAM parity error", -1, 1 },
2963 static struct intr_info mps_stat_tx_intr_info
[] = {
2964 { 0xfffff, "MPS statistics Tx FIFO parity error", -1, 1 },
2967 static struct intr_info mps_stat_rx_intr_info
[] = {
2968 { 0xffffff, "MPS statistics Rx FIFO parity error", -1, 1 },
2971 static struct intr_info mps_cls_intr_info
[] = {
2972 { MATCHSRAM_F
, "MPS match SRAM parity error", -1, 1 },
2973 { MATCHTCAM_F
, "MPS match TCAM parity error", -1, 1 },
2974 { HASHSRAM_F
, "MPS hash SRAM parity error", -1, 1 },
2980 fat
= csio_handle_intr_status(hw
, MPS_RX_PERR_INT_CAUSE_A
,
2982 csio_handle_intr_status(hw
, MPS_TX_INT_CAUSE_A
,
2984 csio_handle_intr_status(hw
, MPS_TRC_INT_CAUSE_A
,
2985 mps_trc_intr_info
) +
2986 csio_handle_intr_status(hw
, MPS_STAT_PERR_INT_CAUSE_SRAM_A
,
2987 mps_stat_sram_intr_info
) +
2988 csio_handle_intr_status(hw
, MPS_STAT_PERR_INT_CAUSE_TX_FIFO_A
,
2989 mps_stat_tx_intr_info
) +
2990 csio_handle_intr_status(hw
, MPS_STAT_PERR_INT_CAUSE_RX_FIFO_A
,
2991 mps_stat_rx_intr_info
) +
2992 csio_handle_intr_status(hw
, MPS_CLS_INT_CAUSE_A
,
2995 csio_wr_reg32(hw
, 0, MPS_INT_CAUSE_A
);
2996 csio_rd_reg32(hw
, MPS_INT_CAUSE_A
); /* flush */
2998 csio_hw_fatal_err(hw
);
3001 #define MEM_INT_MASK (PERR_INT_CAUSE_F | ECC_CE_INT_CAUSE_F | \
3005 * EDC/MC interrupt handler.
3007 static void csio_mem_intr_handler(struct csio_hw
*hw
, int idx
)
3009 static const char name
[3][5] = { "EDC0", "EDC1", "MC" };
3011 unsigned int addr
, cnt_addr
, v
;
3013 if (idx
<= MEM_EDC1
) {
3014 addr
= EDC_REG(EDC_INT_CAUSE_A
, idx
);
3015 cnt_addr
= EDC_REG(EDC_ECC_STATUS_A
, idx
);
3017 addr
= MC_INT_CAUSE_A
;
3018 cnt_addr
= MC_ECC_STATUS_A
;
3021 v
= csio_rd_reg32(hw
, addr
) & MEM_INT_MASK
;
3022 if (v
& PERR_INT_CAUSE_F
)
3023 csio_fatal(hw
, "%s FIFO parity error\n", name
[idx
]);
3024 if (v
& ECC_CE_INT_CAUSE_F
) {
3025 uint32_t cnt
= ECC_CECNT_G(csio_rd_reg32(hw
, cnt_addr
));
3027 csio_wr_reg32(hw
, ECC_CECNT_V(ECC_CECNT_M
), cnt_addr
);
3028 csio_warn(hw
, "%u %s correctable ECC data error%s\n",
3029 cnt
, name
[idx
], cnt
> 1 ? "s" : "");
3031 if (v
& ECC_UE_INT_CAUSE_F
)
3032 csio_fatal(hw
, "%s uncorrectable ECC data error\n", name
[idx
]);
3034 csio_wr_reg32(hw
, v
, addr
);
3035 if (v
& (PERR_INT_CAUSE_F
| ECC_UE_INT_CAUSE_F
))
3036 csio_hw_fatal_err(hw
);
3040 * MA interrupt handler.
3042 static void csio_ma_intr_handler(struct csio_hw
*hw
)
3044 uint32_t v
, status
= csio_rd_reg32(hw
, MA_INT_CAUSE_A
);
3046 if (status
& MEM_PERR_INT_CAUSE_F
)
3047 csio_fatal(hw
, "MA parity error, parity status %#x\n",
3048 csio_rd_reg32(hw
, MA_PARITY_ERROR_STATUS_A
));
3049 if (status
& MEM_WRAP_INT_CAUSE_F
) {
3050 v
= csio_rd_reg32(hw
, MA_INT_WRAP_STATUS_A
);
3052 "MA address wrap-around error by client %u to address %#x\n",
3053 MEM_WRAP_CLIENT_NUM_G(v
), MEM_WRAP_ADDRESS_G(v
) << 4);
3055 csio_wr_reg32(hw
, status
, MA_INT_CAUSE_A
);
3056 csio_hw_fatal_err(hw
);
3060 * SMB interrupt handler.
3062 static void csio_smb_intr_handler(struct csio_hw
*hw
)
3064 static struct intr_info smb_intr_info
[] = {
3065 { MSTTXFIFOPARINT_F
, "SMB master Tx FIFO parity error", -1, 1 },
3066 { MSTRXFIFOPARINT_F
, "SMB master Rx FIFO parity error", -1, 1 },
3067 { SLVFIFOPARINT_F
, "SMB slave FIFO parity error", -1, 1 },
3071 if (csio_handle_intr_status(hw
, SMB_INT_CAUSE_A
, smb_intr_info
))
3072 csio_hw_fatal_err(hw
);
3076 * NC-SI interrupt handler.
3078 static void csio_ncsi_intr_handler(struct csio_hw
*hw
)
3080 static struct intr_info ncsi_intr_info
[] = {
3081 { CIM_DM_PRTY_ERR_F
, "NC-SI CIM parity error", -1, 1 },
3082 { MPS_DM_PRTY_ERR_F
, "NC-SI MPS parity error", -1, 1 },
3083 { TXFIFO_PRTY_ERR_F
, "NC-SI Tx FIFO parity error", -1, 1 },
3084 { RXFIFO_PRTY_ERR_F
, "NC-SI Rx FIFO parity error", -1, 1 },
3088 if (csio_handle_intr_status(hw
, NCSI_INT_CAUSE_A
, ncsi_intr_info
))
3089 csio_hw_fatal_err(hw
);
3093 * XGMAC interrupt handler.
3095 static void csio_xgmac_intr_handler(struct csio_hw
*hw
, int port
)
3097 uint32_t v
= csio_rd_reg32(hw
, T5_PORT_REG(port
, MAC_PORT_INT_CAUSE_A
));
3099 v
&= TXFIFO_PRTY_ERR_F
| RXFIFO_PRTY_ERR_F
;
3103 if (v
& TXFIFO_PRTY_ERR_F
)
3104 csio_fatal(hw
, "XGMAC %d Tx FIFO parity error\n", port
);
3105 if (v
& RXFIFO_PRTY_ERR_F
)
3106 csio_fatal(hw
, "XGMAC %d Rx FIFO parity error\n", port
);
3107 csio_wr_reg32(hw
, v
, T5_PORT_REG(port
, MAC_PORT_INT_CAUSE_A
));
3108 csio_hw_fatal_err(hw
);
3112 * PL interrupt handler.
3114 static void csio_pl_intr_handler(struct csio_hw
*hw
)
3116 static struct intr_info pl_intr_info
[] = {
3117 { FATALPERR_F
, "T4 fatal parity error", -1, 1 },
3118 { PERRVFID_F
, "PL VFID_MAP parity error", -1, 1 },
3122 if (csio_handle_intr_status(hw
, PL_PL_INT_CAUSE_A
, pl_intr_info
))
3123 csio_hw_fatal_err(hw
);
3127 * csio_hw_slow_intr_handler - control path interrupt handler
3130 * Interrupt handler for non-data global interrupt events, e.g., errors.
3131 * The designation 'slow' is because it involves register reads, while
3132 * data interrupts typically don't involve any MMIOs.
3135 csio_hw_slow_intr_handler(struct csio_hw
*hw
)
3137 uint32_t cause
= csio_rd_reg32(hw
, PL_INT_CAUSE_A
);
3139 if (!(cause
& CSIO_GLBL_INTR_MASK
)) {
3140 CSIO_INC_STATS(hw
, n_plint_unexp
);
3144 csio_dbg(hw
, "Slow interrupt! cause: 0x%x\n", cause
);
3146 CSIO_INC_STATS(hw
, n_plint_cnt
);
3149 csio_cim_intr_handler(hw
);
3152 csio_mps_intr_handler(hw
);
3155 csio_ncsi_intr_handler(hw
);
3158 csio_pl_intr_handler(hw
);
3161 csio_smb_intr_handler(hw
);
3163 if (cause
& XGMAC0_F
)
3164 csio_xgmac_intr_handler(hw
, 0);
3166 if (cause
& XGMAC1_F
)
3167 csio_xgmac_intr_handler(hw
, 1);
3169 if (cause
& XGMAC_KR0_F
)
3170 csio_xgmac_intr_handler(hw
, 2);
3172 if (cause
& XGMAC_KR1_F
)
3173 csio_xgmac_intr_handler(hw
, 3);
3176 hw
->chip_ops
->chip_pcie_intr_handler(hw
);
3179 csio_mem_intr_handler(hw
, MEM_MC
);
3182 csio_mem_intr_handler(hw
, MEM_EDC0
);
3185 csio_mem_intr_handler(hw
, MEM_EDC1
);
3188 csio_le_intr_handler(hw
);
3191 csio_tp_intr_handler(hw
);
3194 csio_ma_intr_handler(hw
);
3196 if (cause
& PM_TX_F
)
3197 csio_pmtx_intr_handler(hw
);
3199 if (cause
& PM_RX_F
)
3200 csio_pmrx_intr_handler(hw
);
3202 if (cause
& ULP_RX_F
)
3203 csio_ulprx_intr_handler(hw
);
3205 if (cause
& CPL_SWITCH_F
)
3206 csio_cplsw_intr_handler(hw
);
3209 csio_sge_intr_handler(hw
);
3211 if (cause
& ULP_TX_F
)
3212 csio_ulptx_intr_handler(hw
);
3214 /* Clear the interrupts just processed for which we are the master. */
3215 csio_wr_reg32(hw
, cause
& CSIO_GLBL_INTR_MASK
, PL_INT_CAUSE_A
);
3216 csio_rd_reg32(hw
, PL_INT_CAUSE_A
); /* flush */
3221 /*****************************************************************************
3222 * HW <--> mailbox interfacing routines.
3223 ****************************************************************************/
3225 * csio_mberr_worker - Worker thread (dpc) for mailbox/error completions
3227 * @data: Private data pointer.
3229 * Called from worker thread context.
3232 csio_mberr_worker(void *data
)
3234 struct csio_hw
*hw
= (struct csio_hw
*)data
;
3235 struct csio_mbm
*mbm
= &hw
->mbm
;
3237 struct csio_mb
*mbp_next
;
3240 del_timer_sync(&mbm
->timer
);
3242 spin_lock_irq(&hw
->lock
);
3243 if (list_empty(&mbm
->cbfn_q
)) {
3244 spin_unlock_irq(&hw
->lock
);
3248 list_splice_tail_init(&mbm
->cbfn_q
, &cbfn_q
);
3249 mbm
->stats
.n_cbfnq
= 0;
3251 /* Try to start waiting mailboxes */
3252 if (!list_empty(&mbm
->req_q
)) {
3253 mbp_next
= list_first_entry(&mbm
->req_q
, struct csio_mb
, list
);
3254 list_del_init(&mbp_next
->list
);
3256 rv
= csio_mb_issue(hw
, mbp_next
);
3258 list_add_tail(&mbp_next
->list
, &mbm
->req_q
);
3260 CSIO_DEC_STATS(mbm
, n_activeq
);
3262 spin_unlock_irq(&hw
->lock
);
3264 /* Now callback completions */
3265 csio_mb_completions(hw
, &cbfn_q
);
3269 * csio_hw_mb_timer - Top-level Mailbox timeout handler.
3271 * @data: private data pointer
3275 csio_hw_mb_timer(uintptr_t data
)
3277 struct csio_hw
*hw
= (struct csio_hw
*)data
;
3278 struct csio_mb
*mbp
= NULL
;
3280 spin_lock_irq(&hw
->lock
);
3281 mbp
= csio_mb_tmo_handler(hw
);
3282 spin_unlock_irq(&hw
->lock
);
3284 /* Call back the function for the timed-out Mailbox */
3286 mbp
->mb_cbfn(hw
, mbp
);
3291 * csio_hw_mbm_cleanup - Cleanup Mailbox module.
3294 * Called with lock held, should exit with lock held.
3295 * Cancels outstanding mailboxes (waiting, in-flight) and gathers them
3296 * into a local queue. Drops lock and calls the completions. Holds
3300 csio_hw_mbm_cleanup(struct csio_hw
*hw
)
3304 csio_mb_cancel_all(hw
, &cbfn_q
);
3306 spin_unlock_irq(&hw
->lock
);
3307 csio_mb_completions(hw
, &cbfn_q
);
3308 spin_lock_irq(&hw
->lock
);
3311 /*****************************************************************************
3313 ****************************************************************************/
3315 csio_enqueue_evt(struct csio_hw
*hw
, enum csio_evt type
, void *evt_msg
,
3318 struct csio_evt_msg
*evt_entry
= NULL
;
3320 if (type
>= CSIO_EVT_MAX
)
3323 if (len
> CSIO_EVT_MSG_SIZE
)
3326 if (hw
->flags
& CSIO_HWF_FWEVT_STOP
)
3329 if (list_empty(&hw
->evt_free_q
)) {
3330 csio_err(hw
, "Failed to alloc evt entry, msg type %d len %d\n",
3335 evt_entry
= list_first_entry(&hw
->evt_free_q
,
3336 struct csio_evt_msg
, list
);
3337 list_del_init(&evt_entry
->list
);
3339 /* copy event msg and queue the event */
3340 evt_entry
->type
= type
;
3341 memcpy((void *)evt_entry
->data
, evt_msg
, len
);
3342 list_add_tail(&evt_entry
->list
, &hw
->evt_active_q
);
3344 CSIO_DEC_STATS(hw
, n_evt_freeq
);
3345 CSIO_INC_STATS(hw
, n_evt_activeq
);
3351 csio_enqueue_evt_lock(struct csio_hw
*hw
, enum csio_evt type
, void *evt_msg
,
3352 uint16_t len
, bool msg_sg
)
3354 struct csio_evt_msg
*evt_entry
= NULL
;
3355 struct csio_fl_dma_buf
*fl_sg
;
3357 unsigned long flags
;
3360 if (type
>= CSIO_EVT_MAX
)
3363 if (len
> CSIO_EVT_MSG_SIZE
)
3366 spin_lock_irqsave(&hw
->lock
, flags
);
3367 if (hw
->flags
& CSIO_HWF_FWEVT_STOP
) {
3372 if (list_empty(&hw
->evt_free_q
)) {
3373 csio_err(hw
, "Failed to alloc evt entry, msg type %d len %d\n",
3379 evt_entry
= list_first_entry(&hw
->evt_free_q
,
3380 struct csio_evt_msg
, list
);
3381 list_del_init(&evt_entry
->list
);
3383 /* copy event msg and queue the event */
3384 evt_entry
->type
= type
;
3386 /* If Payload in SG list*/
3388 fl_sg
= (struct csio_fl_dma_buf
*) evt_msg
;
3389 for (n
= 0; (n
< CSIO_MAX_FLBUF_PER_IQWR
&& off
< len
); n
++) {
3390 memcpy((void *)((uintptr_t)evt_entry
->data
+ off
),
3391 fl_sg
->flbufs
[n
].vaddr
,
3392 fl_sg
->flbufs
[n
].len
);
3393 off
+= fl_sg
->flbufs
[n
].len
;
3396 memcpy((void *)evt_entry
->data
, evt_msg
, len
);
3398 list_add_tail(&evt_entry
->list
, &hw
->evt_active_q
);
3399 CSIO_DEC_STATS(hw
, n_evt_freeq
);
3400 CSIO_INC_STATS(hw
, n_evt_activeq
);
3402 spin_unlock_irqrestore(&hw
->lock
, flags
);
3407 csio_free_evt(struct csio_hw
*hw
, struct csio_evt_msg
*evt_entry
)
3410 spin_lock_irq(&hw
->lock
);
3411 list_del_init(&evt_entry
->list
);
3412 list_add_tail(&evt_entry
->list
, &hw
->evt_free_q
);
3413 CSIO_DEC_STATS(hw
, n_evt_activeq
);
3414 CSIO_INC_STATS(hw
, n_evt_freeq
);
3415 spin_unlock_irq(&hw
->lock
);
3420 csio_evtq_flush(struct csio_hw
*hw
)
3424 while (hw
->flags
& CSIO_HWF_FWEVT_PENDING
&& count
--) {
3425 spin_unlock_irq(&hw
->lock
);
3427 spin_lock_irq(&hw
->lock
);
3430 CSIO_DB_ASSERT(!(hw
->flags
& CSIO_HWF_FWEVT_PENDING
));
3434 csio_evtq_stop(struct csio_hw
*hw
)
3436 hw
->flags
|= CSIO_HWF_FWEVT_STOP
;
3440 csio_evtq_start(struct csio_hw
*hw
)
3442 hw
->flags
&= ~CSIO_HWF_FWEVT_STOP
;
3446 csio_evtq_cleanup(struct csio_hw
*hw
)
3448 struct list_head
*evt_entry
, *next_entry
;
3450 /* Release outstanding events from activeq to freeq*/
3451 if (!list_empty(&hw
->evt_active_q
))
3452 list_splice_tail_init(&hw
->evt_active_q
, &hw
->evt_free_q
);
3454 hw
->stats
.n_evt_activeq
= 0;
3455 hw
->flags
&= ~CSIO_HWF_FWEVT_PENDING
;
3457 /* Freeup event entry */
3458 list_for_each_safe(evt_entry
, next_entry
, &hw
->evt_free_q
) {
3460 CSIO_DEC_STATS(hw
, n_evt_freeq
);
3463 hw
->stats
.n_evt_freeq
= 0;
3468 csio_process_fwevtq_entry(struct csio_hw
*hw
, void *wr
, uint32_t len
,
3469 struct csio_fl_dma_buf
*flb
, void *priv
)
3473 uint32_t msg_len
= 0;
3476 op
= ((struct rss_header
*) wr
)->opcode
;
3477 if (op
== CPL_FW6_PLD
) {
3478 CSIO_INC_STATS(hw
, n_cpl_fw6_pld
);
3479 if (!flb
|| !flb
->totlen
) {
3480 CSIO_INC_STATS(hw
, n_cpl_unexp
);
3485 msg_len
= flb
->totlen
;
3487 } else if (op
== CPL_FW6_MSG
|| op
== CPL_FW4_MSG
) {
3489 CSIO_INC_STATS(hw
, n_cpl_fw6_msg
);
3490 /* skip RSS header */
3491 msg
= (void *)((uintptr_t)wr
+ sizeof(__be64
));
3492 msg_len
= (op
== CPL_FW6_MSG
) ? sizeof(struct cpl_fw6_msg
) :
3493 sizeof(struct cpl_fw4_msg
);
3495 csio_warn(hw
, "unexpected CPL %#x on FW event queue\n", op
);
3496 CSIO_INC_STATS(hw
, n_cpl_unexp
);
3501 * Enqueue event to EventQ. Events processing happens
3502 * in Event worker thread context
3504 if (csio_enqueue_evt_lock(hw
, CSIO_EVT_FW
, msg
,
3505 (uint16_t)msg_len
, msg_sg
))
3506 CSIO_INC_STATS(hw
, n_evt_drop
);
3510 csio_evtq_worker(struct work_struct
*work
)
3512 struct csio_hw
*hw
= container_of(work
, struct csio_hw
, evtq_work
);
3513 struct list_head
*evt_entry
, *next_entry
;
3515 struct csio_evt_msg
*evt_msg
;
3516 struct cpl_fw6_msg
*msg
;
3517 struct csio_rnode
*rn
;
3519 uint8_t evtq_stop
= 0;
3521 csio_dbg(hw
, "event worker thread active evts#%d\n",
3522 hw
->stats
.n_evt_activeq
);
3524 spin_lock_irq(&hw
->lock
);
3525 while (!list_empty(&hw
->evt_active_q
)) {
3526 list_splice_tail_init(&hw
->evt_active_q
, &evt_q
);
3527 spin_unlock_irq(&hw
->lock
);
3529 list_for_each_safe(evt_entry
, next_entry
, &evt_q
) {
3530 evt_msg
= (struct csio_evt_msg
*) evt_entry
;
3532 /* Drop events if queue is STOPPED */
3533 spin_lock_irq(&hw
->lock
);
3534 if (hw
->flags
& CSIO_HWF_FWEVT_STOP
)
3536 spin_unlock_irq(&hw
->lock
);
3538 CSIO_INC_STATS(hw
, n_evt_drop
);
3542 switch (evt_msg
->type
) {
3544 msg
= (struct cpl_fw6_msg
*)(evt_msg
->data
);
3546 if ((msg
->opcode
== CPL_FW6_MSG
||
3547 msg
->opcode
== CPL_FW4_MSG
) &&
3549 rv
= csio_mb_fwevt_handler(hw
,
3553 /* Handle any remaining fw events */
3554 csio_fcoe_fwevt_handler(hw
,
3555 msg
->opcode
, msg
->data
);
3556 } else if (msg
->opcode
== CPL_FW6_PLD
) {
3558 csio_fcoe_fwevt_handler(hw
,
3559 msg
->opcode
, msg
->data
);
3562 "Unhandled FW msg op %x type %x\n",
3563 msg
->opcode
, msg
->type
);
3564 CSIO_INC_STATS(hw
, n_evt_drop
);
3569 csio_mberr_worker(hw
);
3572 case CSIO_EVT_DEV_LOSS
:
3573 memcpy(&rn
, evt_msg
->data
, sizeof(rn
));
3574 csio_rnode_devloss_handler(rn
);
3578 csio_warn(hw
, "Unhandled event %x on evtq\n",
3580 CSIO_INC_STATS(hw
, n_evt_unexp
);
3584 csio_free_evt(hw
, evt_msg
);
3587 spin_lock_irq(&hw
->lock
);
3589 hw
->flags
&= ~CSIO_HWF_FWEVT_PENDING
;
3590 spin_unlock_irq(&hw
->lock
);
3594 csio_fwevtq_handler(struct csio_hw
*hw
)
3598 if (csio_q_iqid(hw
, hw
->fwevt_iq_idx
) == CSIO_MAX_QID
) {
3599 CSIO_INC_STATS(hw
, n_int_stray
);
3603 rv
= csio_wr_process_iq_idx(hw
, hw
->fwevt_iq_idx
,
3604 csio_process_fwevtq_entry
, NULL
);
3608 /****************************************************************************
3610 ****************************************************************************/
3612 /* Management module */
3614 * csio_mgmt_req_lookup - Lookup the given IO req exist in Active Q.
3615 * mgmt - mgmt module
3616 * @io_req - io request
3618 * Return - 0:if given IO Req exists in active Q.
3619 * -EINVAL :if lookup fails.
3622 csio_mgmt_req_lookup(struct csio_mgmtm
*mgmtm
, struct csio_ioreq
*io_req
)
3624 struct list_head
*tmp
;
3626 /* Lookup ioreq in the ACTIVEQ */
3627 list_for_each(tmp
, &mgmtm
->active_q
) {
3628 if (io_req
== (struct csio_ioreq
*)tmp
)
3634 #define ECM_MIN_TMO 1000 /* Minimum timeout value for req */
3637 * csio_mgmts_tmo_handler - MGMT IO Timeout handler.
3638 * @data - Event data.
3643 csio_mgmt_tmo_handler(uintptr_t data
)
3645 struct csio_mgmtm
*mgmtm
= (struct csio_mgmtm
*) data
;
3646 struct list_head
*tmp
;
3647 struct csio_ioreq
*io_req
;
3649 csio_dbg(mgmtm
->hw
, "Mgmt timer invoked!\n");
3651 spin_lock_irq(&mgmtm
->hw
->lock
);
3653 list_for_each(tmp
, &mgmtm
->active_q
) {
3654 io_req
= (struct csio_ioreq
*) tmp
;
3655 io_req
->tmo
-= min_t(uint32_t, io_req
->tmo
, ECM_MIN_TMO
);
3658 /* Dequeue the request from retry Q. */
3659 tmp
= csio_list_prev(tmp
);
3660 list_del_init(&io_req
->sm
.sm_list
);
3661 if (io_req
->io_cbfn
) {
3662 /* io_req will be freed by completion handler */
3663 io_req
->wr_status
= -ETIMEDOUT
;
3664 io_req
->io_cbfn(mgmtm
->hw
, io_req
);
3671 /* If retry queue is not empty, re-arm timer */
3672 if (!list_empty(&mgmtm
->active_q
))
3673 mod_timer(&mgmtm
->mgmt_timer
,
3674 jiffies
+ msecs_to_jiffies(ECM_MIN_TMO
));
3675 spin_unlock_irq(&mgmtm
->hw
->lock
);
3679 csio_mgmtm_cleanup(struct csio_mgmtm
*mgmtm
)
3681 struct csio_hw
*hw
= mgmtm
->hw
;
3682 struct csio_ioreq
*io_req
;
3683 struct list_head
*tmp
;
3687 /* Wait for all outstanding req to complete gracefully */
3688 while ((!list_empty(&mgmtm
->active_q
)) && count
--) {
3689 spin_unlock_irq(&hw
->lock
);
3691 spin_lock_irq(&hw
->lock
);
3694 /* release outstanding req from ACTIVEQ */
3695 list_for_each(tmp
, &mgmtm
->active_q
) {
3696 io_req
= (struct csio_ioreq
*) tmp
;
3697 tmp
= csio_list_prev(tmp
);
3698 list_del_init(&io_req
->sm
.sm_list
);
3699 mgmtm
->stats
.n_active
--;
3700 if (io_req
->io_cbfn
) {
3701 /* io_req will be freed by completion handler */
3702 io_req
->wr_status
= -ETIMEDOUT
;
3703 io_req
->io_cbfn(mgmtm
->hw
, io_req
);
3709 * csio_mgmt_init - Mgmt module init entry point
3710 * @mgmtsm - mgmt module
3713 * Initialize mgmt timer, resource wait queue, active queue,
3714 * completion q. Allocate Egress and Ingress
3715 * WR queues and save off the queue index returned by the WR
3716 * module for future use. Allocate and save off mgmt reqs in the
3717 * mgmt_req_freelist for future use. Make sure their SM is initialized
3719 * Returns: 0 - on success
3720 * -ENOMEM - on error.
3723 csio_mgmtm_init(struct csio_mgmtm
*mgmtm
, struct csio_hw
*hw
)
3725 struct timer_list
*timer
= &mgmtm
->mgmt_timer
;
3728 timer
->function
= csio_mgmt_tmo_handler
;
3729 timer
->data
= (unsigned long)mgmtm
;
3731 INIT_LIST_HEAD(&mgmtm
->active_q
);
3732 INIT_LIST_HEAD(&mgmtm
->cbfn_q
);
3735 /*mgmtm->iq_idx = hw->fwevt_iq_idx;*/
3741 * csio_mgmtm_exit - MGMT module exit entry point
3742 * @mgmtsm - mgmt module
3744 * This function called during MGMT module uninit.
3745 * Stop timers, free ioreqs allocated.
3750 csio_mgmtm_exit(struct csio_mgmtm
*mgmtm
)
3752 del_timer_sync(&mgmtm
->mgmt_timer
);
3757 * csio_hw_start - Kicks off the HW State machine
3758 * @hw: Pointer to HW module.
3760 * It is assumed that the initialization is a synchronous operation.
3761 * So when we return afer posting the event, the HW SM should be in
3762 * the ready state, if there were no errors during init.
3765 csio_hw_start(struct csio_hw
*hw
)
3767 spin_lock_irq(&hw
->lock
);
3768 csio_post_event(&hw
->sm
, CSIO_HWE_CFG
);
3769 spin_unlock_irq(&hw
->lock
);
3771 if (csio_is_hw_ready(hw
))
3778 csio_hw_stop(struct csio_hw
*hw
)
3780 csio_post_event(&hw
->sm
, CSIO_HWE_PCI_REMOVE
);
3782 if (csio_is_hw_removing(hw
))
3788 /* Max reset retries */
3789 #define CSIO_MAX_RESET_RETRIES 3
3792 * csio_hw_reset - Reset the hardware
3795 * Caller should hold lock across this function.
3798 csio_hw_reset(struct csio_hw
*hw
)
3800 if (!csio_is_hw_master(hw
))
3803 if (hw
->rst_retries
>= CSIO_MAX_RESET_RETRIES
) {
3804 csio_dbg(hw
, "Max hw reset attempts reached..");
3809 csio_post_event(&hw
->sm
, CSIO_HWE_HBA_RESET
);
3811 if (csio_is_hw_ready(hw
)) {
3812 hw
->rst_retries
= 0;
3813 hw
->stats
.n_reset_start
= jiffies_to_msecs(jiffies
);
3820 * csio_hw_get_device_id - Caches the Adapter's vendor & device id.
3824 csio_hw_get_device_id(struct csio_hw
*hw
)
3826 /* Is the adapter device id cached already ?*/
3827 if (csio_is_dev_id_cached(hw
))
3830 /* Get the PCI vendor & device id */
3831 pci_read_config_word(hw
->pdev
, PCI_VENDOR_ID
,
3832 &hw
->params
.pci
.vendor_id
);
3833 pci_read_config_word(hw
->pdev
, PCI_DEVICE_ID
,
3834 &hw
->params
.pci
.device_id
);
3836 csio_dev_id_cached(hw
);
3837 hw
->chip_id
= (hw
->params
.pci
.device_id
& CSIO_HW_CHIP_MASK
);
3839 } /* csio_hw_get_device_id */
3842 * csio_hw_set_description - Set the model, description of the hw.
3844 * @ven_id: PCI Vendor ID
3845 * @dev_id: PCI Device ID
3848 csio_hw_set_description(struct csio_hw
*hw
, uint16_t ven_id
, uint16_t dev_id
)
3850 uint32_t adap_type
, prot_type
;
3852 if (ven_id
== CSIO_VENDOR_ID
) {
3853 prot_type
= (dev_id
& CSIO_ASIC_DEVID_PROTO_MASK
);
3854 adap_type
= (dev_id
& CSIO_ASIC_DEVID_TYPE_MASK
);
3856 if (prot_type
== CSIO_T5_FCOE_ASIC
) {
3858 csio_t5_fcoe_adapters
[adap_type
].model_no
, 16);
3859 memcpy(hw
->model_desc
,
3860 csio_t5_fcoe_adapters
[adap_type
].description
,
3863 char tempName
[32] = "Chelsio FCoE Controller";
3864 memcpy(hw
->model_desc
, tempName
, 32);
3867 } /* csio_hw_set_description */
3870 * csio_hw_init - Initialize HW module.
3871 * @hw: Pointer to HW module.
3873 * Initialize the members of the HW module.
3876 csio_hw_init(struct csio_hw
*hw
)
3880 uint16_t ven_id
, dev_id
;
3881 struct csio_evt_msg
*evt_entry
;
3883 INIT_LIST_HEAD(&hw
->sm
.sm_list
);
3884 csio_init_state(&hw
->sm
, csio_hws_uninit
);
3885 spin_lock_init(&hw
->lock
);
3886 INIT_LIST_HEAD(&hw
->sln_head
);
3888 /* Get the PCI vendor & device id */
3889 csio_hw_get_device_id(hw
);
3891 strcpy(hw
->name
, CSIO_HW_NAME
);
3893 /* Initialize the HW chip ops T5 specific ops */
3894 hw
->chip_ops
= &t5_ops
;
3896 /* Set the model & its description */
3898 ven_id
= hw
->params
.pci
.vendor_id
;
3899 dev_id
= hw
->params
.pci
.device_id
;
3901 csio_hw_set_description(hw
, ven_id
, dev_id
);
3903 /* Initialize default log level */
3904 hw
->params
.log_level
= (uint32_t) csio_dbg_level
;
3906 csio_set_fwevt_intr_idx(hw
, -1);
3907 csio_set_nondata_intr_idx(hw
, -1);
3909 /* Init all the modules: Mailbox, WorkRequest and Transport */
3910 if (csio_mbm_init(csio_hw_to_mbm(hw
), hw
, csio_hw_mb_timer
))
3913 rv
= csio_wrm_init(csio_hw_to_wrm(hw
), hw
);
3917 rv
= csio_scsim_init(csio_hw_to_scsim(hw
), hw
);
3921 rv
= csio_mgmtm_init(csio_hw_to_mgmtm(hw
), hw
);
3923 goto err_scsim_exit
;
3924 /* Pre-allocate evtq and initialize them */
3925 INIT_LIST_HEAD(&hw
->evt_active_q
);
3926 INIT_LIST_HEAD(&hw
->evt_free_q
);
3927 for (i
= 0; i
< csio_evtq_sz
; i
++) {
3929 evt_entry
= kzalloc(sizeof(struct csio_evt_msg
), GFP_KERNEL
);
3932 csio_err(hw
, "Failed to initialize eventq");
3933 goto err_evtq_cleanup
;
3936 list_add_tail(&evt_entry
->list
, &hw
->evt_free_q
);
3937 CSIO_INC_STATS(hw
, n_evt_freeq
);
3940 hw
->dev_num
= dev_num
;
3946 csio_evtq_cleanup(hw
);
3947 csio_mgmtm_exit(csio_hw_to_mgmtm(hw
));
3949 csio_scsim_exit(csio_hw_to_scsim(hw
));
3951 csio_wrm_exit(csio_hw_to_wrm(hw
), hw
);
3953 csio_mbm_exit(csio_hw_to_mbm(hw
));
3959 * csio_hw_exit - Un-initialize HW module.
3960 * @hw: Pointer to HW module.
3964 csio_hw_exit(struct csio_hw
*hw
)
3966 csio_evtq_cleanup(hw
);
3967 csio_mgmtm_exit(csio_hw_to_mgmtm(hw
));
3968 csio_scsim_exit(csio_hw_to_scsim(hw
));
3969 csio_wrm_exit(csio_hw_to_wrm(hw
), hw
);
3970 csio_mbm_exit(csio_hw_to_mbm(hw
));