2 * This file is part of the Chelsio FCoE driver for Linux.
4 * Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 #include <linux/pci.h>
36 #include <linux/pci_regs.h>
37 #include <linux/firmware.h>
38 #include <linux/stddef.h>
39 #include <linux/delay.h>
40 #include <linux/string.h>
41 #include <linux/compiler.h>
42 #include <linux/jiffies.h>
43 #include <linux/kernel.h>
44 #include <linux/log2.h>
47 #include "csio_lnode.h"
48 #include "csio_rnode.h"
50 int csio_dbg_level
= 0xFEFF;
51 unsigned int csio_port_mask
= 0xf;
53 /* Default FW event queue entries. */
54 static uint32_t csio_evtq_sz
= CSIO_EVTQ_SIZE
;
56 /* Default MSI param level */
59 /* FCoE function instances */
62 /* FCoE Adapter types & its description */
63 static const struct csio_adap_desc csio_t5_fcoe_adapters
[] = {
64 {"T580-Dbg 10G", "Chelsio T580-Dbg 10G [FCoE]"},
65 {"T520-CR 10G", "Chelsio T520-CR 10G [FCoE]"},
66 {"T522-CR 10G/1G", "Chelsio T522-CR 10G/1G [FCoE]"},
67 {"T540-CR 10G", "Chelsio T540-CR 10G [FCoE]"},
68 {"T520-BCH 10G", "Chelsio T520-BCH 10G [FCoE]"},
69 {"T540-BCH 10G", "Chelsio T540-BCH 10G [FCoE]"},
70 {"T540-CH 10G", "Chelsio T540-CH 10G [FCoE]"},
71 {"T520-SO 10G", "Chelsio T520-SO 10G [FCoE]"},
72 {"T520-CX4 10G", "Chelsio T520-CX4 10G [FCoE]"},
73 {"T520-BT 10G", "Chelsio T520-BT 10G [FCoE]"},
74 {"T504-BT 1G", "Chelsio T504-BT 1G [FCoE]"},
75 {"B520-SR 10G", "Chelsio B520-SR 10G [FCoE]"},
76 {"B504-BT 1G", "Chelsio B504-BT 1G [FCoE]"},
77 {"T580-CR 10G", "Chelsio T580-CR 10G [FCoE]"},
78 {"T540-LP-CR 10G", "Chelsio T540-LP-CR 10G [FCoE]"},
79 {"AMSTERDAM 10G", "Chelsio AMSTERDAM 10G [FCoE]"},
80 {"T580-LP-CR 40G", "Chelsio T580-LP-CR 40G [FCoE]"},
81 {"T520-LL-CR 10G", "Chelsio T520-LL-CR 10G [FCoE]"},
82 {"T560-CR 40G", "Chelsio T560-CR 40G [FCoE]"},
83 {"T580-CR 40G", "Chelsio T580-CR 40G [FCoE]"},
84 {"T580-SO 40G", "Chelsio T580-SO 40G [FCoE]"},
85 {"T502-BT 1G", "Chelsio T502-BT 1G [FCoE]"}
88 static void csio_mgmtm_cleanup(struct csio_mgmtm
*);
89 static void csio_hw_mbm_cleanup(struct csio_hw
*);
91 /* State machine forward declarations */
92 static void csio_hws_uninit(struct csio_hw
*, enum csio_hw_ev
);
93 static void csio_hws_configuring(struct csio_hw
*, enum csio_hw_ev
);
94 static void csio_hws_initializing(struct csio_hw
*, enum csio_hw_ev
);
95 static void csio_hws_ready(struct csio_hw
*, enum csio_hw_ev
);
96 static void csio_hws_quiescing(struct csio_hw
*, enum csio_hw_ev
);
97 static void csio_hws_quiesced(struct csio_hw
*, enum csio_hw_ev
);
98 static void csio_hws_resetting(struct csio_hw
*, enum csio_hw_ev
);
99 static void csio_hws_removing(struct csio_hw
*, enum csio_hw_ev
);
100 static void csio_hws_pcierr(struct csio_hw
*, enum csio_hw_ev
);
102 static void csio_hw_initialize(struct csio_hw
*hw
);
103 static void csio_evtq_stop(struct csio_hw
*hw
);
104 static void csio_evtq_start(struct csio_hw
*hw
);
106 int csio_is_hw_ready(struct csio_hw
*hw
)
108 return csio_match_state(hw
, csio_hws_ready
);
111 int csio_is_hw_removing(struct csio_hw
*hw
)
113 return csio_match_state(hw
, csio_hws_removing
);
118 * csio_hw_wait_op_done_val - wait until an operation is completed
120 * @reg: the register to check for completion
121 * @mask: a single-bit field within @reg that indicates completion
122 * @polarity: the value of the field when the operation is completed
123 * @attempts: number of check iterations
124 * @delay: delay in usecs between iterations
125 * @valp: where to store the value of the register at completion time
127 * Wait until an operation is completed by checking a bit in a register
128 * up to @attempts times. If @valp is not NULL the value of the register
129 * at the time it indicated completion is stored there. Returns 0 if the
130 * operation completes and -EAGAIN otherwise.
133 csio_hw_wait_op_done_val(struct csio_hw
*hw
, int reg
, uint32_t mask
,
134 int polarity
, int attempts
, int delay
, uint32_t *valp
)
138 val
= csio_rd_reg32(hw
, reg
);
140 if (!!(val
& mask
) == polarity
) {
154 * csio_hw_tp_wr_bits_indirect - set/clear bits in an indirect TP register
156 * @addr: the indirect TP register address
157 * @mask: specifies the field within the register to modify
158 * @val: new value for the field
160 * Sets a field of an indirect TP register to the given value.
163 csio_hw_tp_wr_bits_indirect(struct csio_hw
*hw
, unsigned int addr
,
164 unsigned int mask
, unsigned int val
)
166 csio_wr_reg32(hw
, addr
, TP_PIO_ADDR_A
);
167 val
|= csio_rd_reg32(hw
, TP_PIO_DATA_A
) & ~mask
;
168 csio_wr_reg32(hw
, val
, TP_PIO_DATA_A
);
172 csio_set_reg_field(struct csio_hw
*hw
, uint32_t reg
, uint32_t mask
,
175 uint32_t val
= csio_rd_reg32(hw
, reg
) & ~mask
;
177 csio_wr_reg32(hw
, val
| value
, reg
);
179 csio_rd_reg32(hw
, reg
);
184 csio_memory_write(struct csio_hw
*hw
, int mtype
, u32 addr
, u32 len
, u32
*buf
)
186 return hw
->chip_ops
->chip_memory_rw(hw
, MEMWIN_CSIOSTOR
, mtype
,
191 * EEPROM reads take a few tens of us while writes can take a bit over 5 ms.
193 #define EEPROM_MAX_RD_POLL 40
194 #define EEPROM_MAX_WR_POLL 6
195 #define EEPROM_STAT_ADDR 0x7bfc
196 #define VPD_BASE 0x400
197 #define VPD_BASE_OLD 0
199 #define VPD_INFO_FLD_HDR_SIZE 3
202 * csio_hw_seeprom_read - read a serial EEPROM location
204 * @addr: EEPROM virtual address
205 * @data: where to store the read data
207 * Read a 32-bit word from a location in serial EEPROM using the card's PCI
208 * VPD capability. Note that this function must be called with a virtual
212 csio_hw_seeprom_read(struct csio_hw
*hw
, uint32_t addr
, uint32_t *data
)
215 int attempts
= EEPROM_MAX_RD_POLL
;
216 uint32_t base
= hw
->params
.pci
.vpd_cap_addr
;
218 if (addr
>= EEPROMVSIZE
|| (addr
& 3))
221 pci_write_config_word(hw
->pdev
, base
+ PCI_VPD_ADDR
, (uint16_t)addr
);
225 pci_read_config_word(hw
->pdev
, base
+ PCI_VPD_ADDR
, &val
);
226 } while (!(val
& PCI_VPD_ADDR_F
) && --attempts
);
228 if (!(val
& PCI_VPD_ADDR_F
)) {
229 csio_err(hw
, "reading EEPROM address 0x%x failed\n", addr
);
233 pci_read_config_dword(hw
->pdev
, base
+ PCI_VPD_DATA
, data
);
234 *data
= le32_to_cpu(*(__le32
*)data
);
240 * Partial EEPROM Vital Product Data structure. Includes only the ID and
252 * csio_hw_get_vpd_keyword_val - Locates an information field keyword in
254 * @v: Pointer to buffered vpd data structure
255 * @kw: The keyword to search for
257 * Returns the value of the information field keyword or
261 csio_hw_get_vpd_keyword_val(const struct t4_vpd_hdr
*v
, const char *kw
)
264 int32_t offset
, len
;
265 const uint8_t *buf
= &v
->id_tag
;
266 const uint8_t *vpdr_len
= &v
->vpdr_tag
;
267 offset
= sizeof(struct t4_vpd_hdr
);
268 len
= (uint16_t)vpdr_len
[1] + ((uint16_t)vpdr_len
[2] << 8);
270 if (len
+ sizeof(struct t4_vpd_hdr
) > VPD_LEN
)
273 for (i
= offset
; (i
+ VPD_INFO_FLD_HDR_SIZE
) <= (offset
+ len
);) {
274 if (memcmp(buf
+ i
, kw
, 2) == 0) {
275 i
+= VPD_INFO_FLD_HDR_SIZE
;
279 i
+= VPD_INFO_FLD_HDR_SIZE
+ buf
[i
+2];
286 csio_pci_capability(struct pci_dev
*pdev
, int cap
, int *pos
)
288 *pos
= pci_find_capability(pdev
, cap
);
296 * csio_hw_get_vpd_params - read VPD parameters from VPD EEPROM
298 * @p: where to store the parameters
300 * Reads card parameters stored in VPD EEPROM.
303 csio_hw_get_vpd_params(struct csio_hw
*hw
, struct csio_vpd
*p
)
305 int i
, ret
, ec
, sn
, addr
;
307 const struct t4_vpd_hdr
*v
;
308 /* To get around compilation warning from strstrip */
311 if (csio_is_valid_vpd(hw
))
314 ret
= csio_pci_capability(hw
->pdev
, PCI_CAP_ID_VPD
,
315 &hw
->params
.pci
.vpd_cap_addr
);
319 vpd
= kzalloc(VPD_LEN
, GFP_ATOMIC
);
324 * Card information normally starts at VPD_BASE but early cards had
327 ret
= csio_hw_seeprom_read(hw
, VPD_BASE
, (uint32_t *)(vpd
));
328 addr
= *vpd
== 0x82 ? VPD_BASE
: VPD_BASE_OLD
;
330 for (i
= 0; i
< VPD_LEN
; i
+= 4) {
331 ret
= csio_hw_seeprom_read(hw
, addr
+ i
, (uint32_t *)(vpd
+ i
));
338 /* Reset the VPD flag! */
339 hw
->flags
&= (~CSIO_HWF_VPD_VALID
);
341 v
= (const struct t4_vpd_hdr
*)vpd
;
343 #define FIND_VPD_KW(var, name) do { \
344 var = csio_hw_get_vpd_keyword_val(v, name); \
346 csio_err(hw, "missing VPD keyword " name "\n"); \
352 FIND_VPD_KW(i
, "RV");
353 for (csum
= 0; i
>= 0; i
--)
357 csio_err(hw
, "corrupted VPD EEPROM, actual csum %u\n", csum
);
361 FIND_VPD_KW(ec
, "EC");
362 FIND_VPD_KW(sn
, "SN");
365 memcpy(p
->id
, v
->id_data
, ID_LEN
);
367 memcpy(p
->ec
, vpd
+ ec
, EC_LEN
);
369 i
= vpd
[sn
- VPD_INFO_FLD_HDR_SIZE
+ 2];
370 memcpy(p
->sn
, vpd
+ sn
, min(i
, SERNUM_LEN
));
373 csio_valid_vpd_copied(hw
);
380 * csio_hw_sf1_read - read data from the serial flash
382 * @byte_cnt: number of bytes to read
383 * @cont: whether another operation will be chained
384 * @lock: whether to lock SF for PL access only
385 * @valp: where to store the read data
387 * Reads up to 4 bytes of data from the serial flash. The location of
388 * the read needs to be specified prior to calling this by issuing the
389 * appropriate commands to the serial flash.
392 csio_hw_sf1_read(struct csio_hw
*hw
, uint32_t byte_cnt
, int32_t cont
,
393 int32_t lock
, uint32_t *valp
)
397 if (!byte_cnt
|| byte_cnt
> 4)
399 if (csio_rd_reg32(hw
, SF_OP_A
) & SF_BUSY_F
)
402 csio_wr_reg32(hw
, SF_LOCK_V(lock
) | SF_CONT_V(cont
) |
403 BYTECNT_V(byte_cnt
- 1), SF_OP_A
);
404 ret
= csio_hw_wait_op_done_val(hw
, SF_OP_A
, SF_BUSY_F
, 0, SF_ATTEMPTS
,
407 *valp
= csio_rd_reg32(hw
, SF_DATA_A
);
412 * csio_hw_sf1_write - write data to the serial flash
414 * @byte_cnt: number of bytes to write
415 * @cont: whether another operation will be chained
416 * @lock: whether to lock SF for PL access only
417 * @val: value to write
419 * Writes up to 4 bytes of data to the serial flash. The location of
420 * the write needs to be specified prior to calling this by issuing the
421 * appropriate commands to the serial flash.
424 csio_hw_sf1_write(struct csio_hw
*hw
, uint32_t byte_cnt
, uint32_t cont
,
425 int32_t lock
, uint32_t val
)
427 if (!byte_cnt
|| byte_cnt
> 4)
429 if (csio_rd_reg32(hw
, SF_OP_A
) & SF_BUSY_F
)
432 csio_wr_reg32(hw
, val
, SF_DATA_A
);
433 csio_wr_reg32(hw
, SF_CONT_V(cont
) | BYTECNT_V(byte_cnt
- 1) |
434 OP_V(1) | SF_LOCK_V(lock
), SF_OP_A
);
436 return csio_hw_wait_op_done_val(hw
, SF_OP_A
, SF_BUSY_F
, 0, SF_ATTEMPTS
,
441 * csio_hw_flash_wait_op - wait for a flash operation to complete
443 * @attempts: max number of polls of the status register
444 * @delay: delay between polls in ms
446 * Wait for a flash operation to complete by polling the status register.
449 csio_hw_flash_wait_op(struct csio_hw
*hw
, int32_t attempts
, int32_t delay
)
455 ret
= csio_hw_sf1_write(hw
, 1, 1, 1, SF_RD_STATUS
);
459 ret
= csio_hw_sf1_read(hw
, 1, 0, 1, &status
);
473 * csio_hw_read_flash - read words from serial flash
475 * @addr: the start address for the read
476 * @nwords: how many 32-bit words to read
477 * @data: where to store the read data
478 * @byte_oriented: whether to store data as bytes or as words
480 * Read the specified number of 32-bit words from the serial flash.
481 * If @byte_oriented is set the read data is stored as a byte array
482 * (i.e., big-endian), otherwise as 32-bit words in the platform's
486 csio_hw_read_flash(struct csio_hw
*hw
, uint32_t addr
, uint32_t nwords
,
487 uint32_t *data
, int32_t byte_oriented
)
491 if (addr
+ nwords
* sizeof(uint32_t) > hw
->params
.sf_size
|| (addr
& 3))
494 addr
= swab32(addr
) | SF_RD_DATA_FAST
;
496 ret
= csio_hw_sf1_write(hw
, 4, 1, 0, addr
);
500 ret
= csio_hw_sf1_read(hw
, 1, 1, 0, data
);
504 for ( ; nwords
; nwords
--, data
++) {
505 ret
= csio_hw_sf1_read(hw
, 4, nwords
> 1, nwords
== 1, data
);
507 csio_wr_reg32(hw
, 0, SF_OP_A
); /* unlock SF */
511 *data
= (__force __u32
) htonl(*data
);
517 * csio_hw_write_flash - write up to a page of data to the serial flash
519 * @addr: the start address to write
520 * @n: length of data to write in bytes
521 * @data: the data to write
523 * Writes up to a page of data (256 bytes) to the serial flash starting
524 * at the given address. All the data must be written to the same page.
527 csio_hw_write_flash(struct csio_hw
*hw
, uint32_t addr
,
528 uint32_t n
, const uint8_t *data
)
532 uint32_t i
, c
, left
, val
, offset
= addr
& 0xff;
534 if (addr
>= hw
->params
.sf_size
|| offset
+ n
> SF_PAGE_SIZE
)
537 val
= swab32(addr
) | SF_PROG_PAGE
;
539 ret
= csio_hw_sf1_write(hw
, 1, 0, 1, SF_WR_ENABLE
);
543 ret
= csio_hw_sf1_write(hw
, 4, 1, 1, val
);
547 for (left
= n
; left
; left
-= c
) {
549 for (val
= 0, i
= 0; i
< c
; ++i
)
550 val
= (val
<< 8) + *data
++;
552 ret
= csio_hw_sf1_write(hw
, c
, c
!= left
, 1, val
);
556 ret
= csio_hw_flash_wait_op(hw
, 8, 1);
560 csio_wr_reg32(hw
, 0, SF_OP_A
); /* unlock SF */
562 /* Read the page to verify the write succeeded */
563 ret
= csio_hw_read_flash(hw
, addr
& ~0xff, ARRAY_SIZE(buf
), buf
, 1);
567 if (memcmp(data
- n
, (uint8_t *)buf
+ offset
, n
)) {
569 "failed to correctly write the flash page at %#x\n",
577 csio_wr_reg32(hw
, 0, SF_OP_A
); /* unlock SF */
582 * csio_hw_flash_erase_sectors - erase a range of flash sectors
584 * @start: the first sector to erase
585 * @end: the last sector to erase
587 * Erases the sectors in the given inclusive range.
590 csio_hw_flash_erase_sectors(struct csio_hw
*hw
, int32_t start
, int32_t end
)
594 while (start
<= end
) {
596 ret
= csio_hw_sf1_write(hw
, 1, 0, 1, SF_WR_ENABLE
);
600 ret
= csio_hw_sf1_write(hw
, 4, 0, 1,
601 SF_ERASE_SECTOR
| (start
<< 8));
605 ret
= csio_hw_flash_wait_op(hw
, 14, 500);
613 csio_err(hw
, "erase of flash sector %d failed, error %d\n",
615 csio_wr_reg32(hw
, 0, SF_OP_A
); /* unlock SF */
620 csio_hw_print_fw_version(struct csio_hw
*hw
, char *str
)
622 csio_info(hw
, "%s: %u.%u.%u.%u\n", str
,
623 FW_HDR_FW_VER_MAJOR_G(hw
->fwrev
),
624 FW_HDR_FW_VER_MINOR_G(hw
->fwrev
),
625 FW_HDR_FW_VER_MICRO_G(hw
->fwrev
),
626 FW_HDR_FW_VER_BUILD_G(hw
->fwrev
));
630 * csio_hw_get_fw_version - read the firmware version
632 * @vers: where to place the version
634 * Reads the FW version from flash.
637 csio_hw_get_fw_version(struct csio_hw
*hw
, uint32_t *vers
)
639 return csio_hw_read_flash(hw
, FLASH_FW_START
+
640 offsetof(struct fw_hdr
, fw_ver
), 1,
645 * csio_hw_get_tp_version - read the TP microcode version
647 * @vers: where to place the version
649 * Reads the TP microcode version from flash.
652 csio_hw_get_tp_version(struct csio_hw
*hw
, u32
*vers
)
654 return csio_hw_read_flash(hw
, FLASH_FW_START
+
655 offsetof(struct fw_hdr
, tp_microcode_ver
), 1,
660 * csio_hw_fw_dload - download firmware.
662 * @fw_data: firmware image to write.
665 * Write the supplied firmware image to the card's serial flash.
668 csio_hw_fw_dload(struct csio_hw
*hw
, uint8_t *fw_data
, uint32_t size
)
674 uint8_t first_page
[SF_PAGE_SIZE
];
675 const __be32
*p
= (const __be32
*)fw_data
;
676 struct fw_hdr
*hdr
= (struct fw_hdr
*)fw_data
;
677 uint32_t sf_sec_size
;
679 if ((!hw
->params
.sf_size
) || (!hw
->params
.sf_nsec
)) {
680 csio_err(hw
, "Serial Flash data invalid\n");
685 csio_err(hw
, "FW image has no data\n");
690 csio_err(hw
, "FW image size not multiple of 512 bytes\n");
694 if (ntohs(hdr
->len512
) * 512 != size
) {
695 csio_err(hw
, "FW image size differs from size in FW header\n");
699 if (size
> FLASH_FW_MAX_SIZE
) {
700 csio_err(hw
, "FW image too large, max is %u bytes\n",
705 for (csum
= 0, i
= 0; i
< size
/ sizeof(csum
); i
++)
708 if (csum
!= 0xffffffff) {
709 csio_err(hw
, "corrupted firmware image, checksum %#x\n", csum
);
713 sf_sec_size
= hw
->params
.sf_size
/ hw
->params
.sf_nsec
;
714 i
= DIV_ROUND_UP(size
, sf_sec_size
); /* # of sectors spanned */
716 csio_dbg(hw
, "Erasing sectors... start:%d end:%d\n",
717 FLASH_FW_START_SEC
, FLASH_FW_START_SEC
+ i
- 1);
719 ret
= csio_hw_flash_erase_sectors(hw
, FLASH_FW_START_SEC
,
720 FLASH_FW_START_SEC
+ i
- 1);
722 csio_err(hw
, "Flash Erase failed\n");
727 * We write the correct version at the end so the driver can see a bad
728 * version if the FW write fails. Start by writing a copy of the
729 * first page with a bad version.
731 memcpy(first_page
, fw_data
, SF_PAGE_SIZE
);
732 ((struct fw_hdr
*)first_page
)->fw_ver
= htonl(0xffffffff);
733 ret
= csio_hw_write_flash(hw
, FLASH_FW_START
, SF_PAGE_SIZE
, first_page
);
737 csio_dbg(hw
, "Writing Flash .. start:%d end:%d\n",
738 FW_IMG_START
, FW_IMG_START
+ size
);
740 addr
= FLASH_FW_START
;
741 for (size
-= SF_PAGE_SIZE
; size
; size
-= SF_PAGE_SIZE
) {
742 addr
+= SF_PAGE_SIZE
;
743 fw_data
+= SF_PAGE_SIZE
;
744 ret
= csio_hw_write_flash(hw
, addr
, SF_PAGE_SIZE
, fw_data
);
749 ret
= csio_hw_write_flash(hw
,
751 offsetof(struct fw_hdr
, fw_ver
),
753 (const uint8_t *)&hdr
->fw_ver
);
757 csio_err(hw
, "firmware download failed, error %d\n", ret
);
762 csio_hw_get_flash_params(struct csio_hw
*hw
)
767 ret
= csio_hw_sf1_write(hw
, 1, 1, 0, SF_RD_ID
);
769 ret
= csio_hw_sf1_read(hw
, 3, 0, 1, &info
);
770 csio_wr_reg32(hw
, 0, SF_OP_A
); /* unlock SF */
774 if ((info
& 0xff) != 0x20) /* not a Numonix flash */
776 info
>>= 16; /* log2 of size */
777 if (info
>= 0x14 && info
< 0x18)
778 hw
->params
.sf_nsec
= 1 << (info
- 16);
779 else if (info
== 0x18)
780 hw
->params
.sf_nsec
= 64;
783 hw
->params
.sf_size
= 1 << info
;
788 /*****************************************************************************/
789 /* HW State machine assists */
790 /*****************************************************************************/
793 csio_hw_dev_ready(struct csio_hw
*hw
)
798 while (((reg
= csio_rd_reg32(hw
, PL_WHOAMI_A
)) == 0xFFFFFFFF) &&
802 if ((cnt
== 0) && (((int32_t)(SOURCEPF_G(reg
)) < 0) ||
803 (SOURCEPF_G(reg
) >= CSIO_MAX_PFN
))) {
804 csio_err(hw
, "PL_WHOAMI returned 0x%x, cnt:%d\n", reg
, cnt
);
808 hw
->pfn
= SOURCEPF_G(reg
);
814 * csio_do_hello - Perform the HELLO FW Mailbox command and process response.
816 * @state: Device state
818 * FW_HELLO_CMD has to be polled for completion.
821 csio_do_hello(struct csio_hw
*hw
, enum csio_dev_state
*state
)
825 enum fw_retval retval
;
828 int retries
= FW_CMD_HELLO_RETRIES
;
830 memset(state_str
, 0, sizeof(state_str
));
832 mbp
= mempool_alloc(hw
->mb_mempool
, GFP_ATOMIC
);
835 CSIO_INC_STATS(hw
, n_err_nomem
);
840 csio_mb_hello(hw
, mbp
, CSIO_MB_DEFAULT_TMO
, hw
->pfn
,
841 hw
->pfn
, CSIO_MASTER_MAY
, NULL
);
843 rv
= csio_mb_issue(hw
, mbp
);
845 csio_err(hw
, "failed to issue HELLO cmd. ret:%d.\n", rv
);
849 csio_mb_process_hello_rsp(hw
, mbp
, &retval
, state
, &mpfn
);
850 if (retval
!= FW_SUCCESS
) {
851 csio_err(hw
, "HELLO cmd failed with ret: %d\n", retval
);
856 /* Firmware has designated us to be master */
857 if (hw
->pfn
== mpfn
) {
858 hw
->flags
|= CSIO_HWF_MASTER
;
859 } else if (*state
== CSIO_DEV_STATE_UNINIT
) {
861 * If we're not the Master PF then we need to wait around for
862 * the Master PF Driver to finish setting up the adapter.
864 * Note that we also do this wait if we're a non-Master-capable
865 * PF and there is no current Master PF; a Master PF may show up
866 * momentarily and we wouldn't want to fail pointlessly. (This
867 * can happen when an OS loads lots of different drivers rapidly
868 * at the same time). In this case, the Master PF returned by
869 * the firmware will be PCIE_FW_MASTER_MASK so the test below
873 int waiting
= FW_CMD_HELLO_TIMEOUT
;
876 * Wait for the firmware to either indicate an error or
877 * initialized state. If we see either of these we bail out
878 * and report the issue to the caller. If we exhaust the
879 * "hello timeout" and we haven't exhausted our retries, try
880 * again. Otherwise bail with a timeout error.
885 spin_unlock_irq(&hw
->lock
);
887 spin_lock_irq(&hw
->lock
);
891 * If neither Error nor Initialialized are indicated
892 * by the firmware keep waiting till we exaust our
893 * timeout ... and then retry if we haven't exhausted
896 pcie_fw
= csio_rd_reg32(hw
, PCIE_FW_A
);
897 if (!(pcie_fw
& (PCIE_FW_ERR_F
|PCIE_FW_INIT_F
))) {
909 * We either have an Error or Initialized condition
910 * report errors preferentially.
913 if (pcie_fw
& PCIE_FW_ERR_F
) {
914 *state
= CSIO_DEV_STATE_ERR
;
916 } else if (pcie_fw
& PCIE_FW_INIT_F
)
917 *state
= CSIO_DEV_STATE_INIT
;
921 * If we arrived before a Master PF was selected and
922 * there's not a valid Master PF, grab its identity
925 if (mpfn
== PCIE_FW_MASTER_M
&&
926 (pcie_fw
& PCIE_FW_MASTER_VLD_F
))
927 mpfn
= PCIE_FW_MASTER_G(pcie_fw
);
930 hw
->flags
&= ~CSIO_HWF_MASTER
;
934 case CSIO_DEV_STATE_UNINIT
:
935 strcpy(state_str
, "Initializing");
937 case CSIO_DEV_STATE_INIT
:
938 strcpy(state_str
, "Initialized");
940 case CSIO_DEV_STATE_ERR
:
941 strcpy(state_str
, "Error");
944 strcpy(state_str
, "Unknown");
949 csio_info(hw
, "PF: %d, Coming up as MASTER, HW state: %s\n",
953 "PF: %d, Coming up as SLAVE, Master PF: %d, HW state: %s\n",
954 hw
->pfn
, mpfn
, state_str
);
957 mempool_free(mbp
, hw
->mb_mempool
);
963 * csio_do_bye - Perform the BYE FW Mailbox command and process response.
968 csio_do_bye(struct csio_hw
*hw
)
971 enum fw_retval retval
;
973 mbp
= mempool_alloc(hw
->mb_mempool
, GFP_ATOMIC
);
975 CSIO_INC_STATS(hw
, n_err_nomem
);
979 csio_mb_bye(hw
, mbp
, CSIO_MB_DEFAULT_TMO
, NULL
);
981 if (csio_mb_issue(hw
, mbp
)) {
982 csio_err(hw
, "Issue of BYE command failed\n");
983 mempool_free(mbp
, hw
->mb_mempool
);
987 retval
= csio_mb_fw_retval(mbp
);
988 if (retval
!= FW_SUCCESS
) {
989 mempool_free(mbp
, hw
->mb_mempool
);
993 mempool_free(mbp
, hw
->mb_mempool
);
999 * csio_do_reset- Perform the device reset.
1003 * If fw_rst is set, issues FW reset mbox cmd otherwise
1005 * Performs reset of the function.
1008 csio_do_reset(struct csio_hw
*hw
, bool fw_rst
)
1010 struct csio_mb
*mbp
;
1011 enum fw_retval retval
;
1015 csio_wr_reg32(hw
, PIORSTMODE_F
| PIORST_F
, PL_RST_A
);
1020 mbp
= mempool_alloc(hw
->mb_mempool
, GFP_ATOMIC
);
1022 CSIO_INC_STATS(hw
, n_err_nomem
);
1026 csio_mb_reset(hw
, mbp
, CSIO_MB_DEFAULT_TMO
,
1027 PIORSTMODE_F
| PIORST_F
, 0, NULL
);
1029 if (csio_mb_issue(hw
, mbp
)) {
1030 csio_err(hw
, "Issue of RESET command failed.n");
1031 mempool_free(mbp
, hw
->mb_mempool
);
1035 retval
= csio_mb_fw_retval(mbp
);
1036 if (retval
!= FW_SUCCESS
) {
1037 csio_err(hw
, "RESET cmd failed with ret:0x%x.\n", retval
);
1038 mempool_free(mbp
, hw
->mb_mempool
);
1042 mempool_free(mbp
, hw
->mb_mempool
);
1048 csio_hw_validate_caps(struct csio_hw
*hw
, struct csio_mb
*mbp
)
1050 struct fw_caps_config_cmd
*rsp
= (struct fw_caps_config_cmd
*)mbp
->mb
;
1053 caps
= ntohs(rsp
->fcoecaps
);
1055 if (!(caps
& FW_CAPS_CONFIG_FCOE_INITIATOR
)) {
1056 csio_err(hw
, "No FCoE Initiator capability in the firmware.\n");
1060 if (!(caps
& FW_CAPS_CONFIG_FCOE_CTRL_OFLD
)) {
1061 csio_err(hw
, "No FCoE Control Offload capability\n");
1069 * csio_hw_fw_halt - issue a reset/halt to FW and put uP into RESET
1070 * @hw: the HW module
1071 * @mbox: mailbox to use for the FW RESET command (if desired)
1072 * @force: force uP into RESET even if FW RESET command fails
1074 * Issues a RESET command to firmware (if desired) with a HALT indication
1075 * and then puts the microprocessor into RESET state. The RESET command
1076 * will only be issued if a legitimate mailbox is provided (mbox <=
1077 * PCIE_FW_MASTER_MASK).
1079 * This is generally used in order for the host to safely manipulate the
1080 * adapter without fear of conflicting with whatever the firmware might
1081 * be doing. The only way out of this state is to RESTART the firmware
1085 csio_hw_fw_halt(struct csio_hw
*hw
, uint32_t mbox
, int32_t force
)
1087 enum fw_retval retval
= 0;
1090 * If a legitimate mailbox is provided, issue a RESET command
1091 * with a HALT indication.
1093 if (mbox
<= PCIE_FW_MASTER_M
) {
1094 struct csio_mb
*mbp
;
1096 mbp
= mempool_alloc(hw
->mb_mempool
, GFP_ATOMIC
);
1098 CSIO_INC_STATS(hw
, n_err_nomem
);
1102 csio_mb_reset(hw
, mbp
, CSIO_MB_DEFAULT_TMO
,
1103 PIORSTMODE_F
| PIORST_F
, FW_RESET_CMD_HALT_F
,
1106 if (csio_mb_issue(hw
, mbp
)) {
1107 csio_err(hw
, "Issue of RESET command failed!\n");
1108 mempool_free(mbp
, hw
->mb_mempool
);
1112 retval
= csio_mb_fw_retval(mbp
);
1113 mempool_free(mbp
, hw
->mb_mempool
);
1117 * Normally we won't complete the operation if the firmware RESET
1118 * command fails but if our caller insists we'll go ahead and put the
1119 * uP into RESET. This can be useful if the firmware is hung or even
1120 * missing ... We'll have to take the risk of putting the uP into
1121 * RESET without the cooperation of firmware in that case.
1123 * We also force the firmware's HALT flag to be on in case we bypassed
1124 * the firmware RESET command above or we're dealing with old firmware
1125 * which doesn't have the HALT capability. This will serve as a flag
1126 * for the incoming firmware to know that it's coming out of a HALT
1127 * rather than a RESET ... if it's new enough to understand that ...
1129 if (retval
== 0 || force
) {
1130 csio_set_reg_field(hw
, CIM_BOOT_CFG_A
, UPCRST_F
, UPCRST_F
);
1131 csio_set_reg_field(hw
, PCIE_FW_A
, PCIE_FW_HALT_F
,
1136 * And we always return the result of the firmware RESET command
1137 * even when we force the uP into RESET ...
1139 return retval
? -EINVAL
: 0;
1143 * csio_hw_fw_restart - restart the firmware by taking the uP out of RESET
1144 * @hw: the HW module
1145 * @reset: if we want to do a RESET to restart things
1147 * Restart firmware previously halted by csio_hw_fw_halt(). On successful
1148 * return the previous PF Master remains as the new PF Master and there
1149 * is no need to issue a new HELLO command, etc.
1151 * We do this in two ways:
1153 * 1. If we're dealing with newer firmware we'll simply want to take
1154 * the chip's microprocessor out of RESET. This will cause the
1155 * firmware to start up from its start vector. And then we'll loop
1156 * until the firmware indicates it's started again (PCIE_FW.HALT
1157 * reset to 0) or we timeout.
1159 * 2. If we're dealing with older firmware then we'll need to RESET
1160 * the chip since older firmware won't recognize the PCIE_FW.HALT
1161 * flag and automatically RESET itself on startup.
1164 csio_hw_fw_restart(struct csio_hw
*hw
, uint32_t mbox
, int32_t reset
)
1168 * Since we're directing the RESET instead of the firmware
1169 * doing it automatically, we need to clear the PCIE_FW.HALT
1172 csio_set_reg_field(hw
, PCIE_FW_A
, PCIE_FW_HALT_F
, 0);
1175 * If we've been given a valid mailbox, first try to get the
1176 * firmware to do the RESET. If that works, great and we can
1177 * return success. Otherwise, if we haven't been given a
1178 * valid mailbox or the RESET command failed, fall back to
1179 * hitting the chip with a hammer.
1181 if (mbox
<= PCIE_FW_MASTER_M
) {
1182 csio_set_reg_field(hw
, CIM_BOOT_CFG_A
, UPCRST_F
, 0);
1184 if (csio_do_reset(hw
, true) == 0)
1188 csio_wr_reg32(hw
, PIORSTMODE_F
| PIORST_F
, PL_RST_A
);
1193 csio_set_reg_field(hw
, CIM_BOOT_CFG_A
, UPCRST_F
, 0);
1194 for (ms
= 0; ms
< FW_CMD_MAX_TIMEOUT
; ) {
1195 if (!(csio_rd_reg32(hw
, PCIE_FW_A
) & PCIE_FW_HALT_F
))
1206 * csio_hw_fw_upgrade - perform all of the steps necessary to upgrade FW
1207 * @hw: the HW module
1208 * @mbox: mailbox to use for the FW RESET command (if desired)
1209 * @fw_data: the firmware image to write
1211 * @force: force upgrade even if firmware doesn't cooperate
1213 * Perform all of the steps necessary for upgrading an adapter's
1214 * firmware image. Normally this requires the cooperation of the
1215 * existing firmware in order to halt all existing activities
1216 * but if an invalid mailbox token is passed in we skip that step
1217 * (though we'll still put the adapter microprocessor into RESET in
1220 * On successful return the new firmware will have been loaded and
1221 * the adapter will have been fully RESET losing all previous setup
1222 * state. On unsuccessful return the adapter may be completely hosed ...
1223 * positive errno indicates that the adapter is ~probably~ intact, a
1224 * negative errno indicates that things are looking bad ...
1227 csio_hw_fw_upgrade(struct csio_hw
*hw
, uint32_t mbox
,
1228 const u8
*fw_data
, uint32_t size
, int32_t force
)
1230 const struct fw_hdr
*fw_hdr
= (const struct fw_hdr
*)fw_data
;
1233 ret
= csio_hw_fw_halt(hw
, mbox
, force
);
1234 if (ret
!= 0 && !force
)
1237 ret
= csio_hw_fw_dload(hw
, (uint8_t *) fw_data
, size
);
1242 * Older versions of the firmware don't understand the new
1243 * PCIE_FW.HALT flag and so won't know to perform a RESET when they
1244 * restart. So for newly loaded older firmware we'll have to do the
1245 * RESET for it so it starts up on a clean slate. We can tell if
1246 * the newly loaded firmware will handle this right by checking
1247 * its header flags to see if it advertises the capability.
1249 reset
= ((ntohl(fw_hdr
->flags
) & FW_HDR_FLAGS_RESET_HALT
) == 0);
1250 return csio_hw_fw_restart(hw
, mbox
, reset
);
1254 * csio_get_device_params - Get device parameters.
1259 csio_get_device_params(struct csio_hw
*hw
)
1261 struct csio_wrm
*wrm
= csio_hw_to_wrm(hw
);
1262 struct csio_mb
*mbp
;
1263 enum fw_retval retval
;
1267 /* Initialize portids to -1 */
1268 for (i
= 0; i
< CSIO_MAX_PPORTS
; i
++)
1269 hw
->pport
[i
].portid
= -1;
1271 mbp
= mempool_alloc(hw
->mb_mempool
, GFP_ATOMIC
);
1273 CSIO_INC_STATS(hw
, n_err_nomem
);
1277 /* Get port vec information. */
1278 param
[0] = FW_PARAM_DEV(PORTVEC
);
1280 /* Get Core clock. */
1281 param
[1] = FW_PARAM_DEV(CCLK
);
1283 /* Get EQ id start and end. */
1284 param
[2] = FW_PARAM_PFVF(EQ_START
);
1285 param
[3] = FW_PARAM_PFVF(EQ_END
);
1287 /* Get IQ id start and end. */
1288 param
[4] = FW_PARAM_PFVF(IQFLINT_START
);
1289 param
[5] = FW_PARAM_PFVF(IQFLINT_END
);
1291 csio_mb_params(hw
, mbp
, CSIO_MB_DEFAULT_TMO
, hw
->pfn
, 0,
1292 ARRAY_SIZE(param
), param
, NULL
, false, NULL
);
1293 if (csio_mb_issue(hw
, mbp
)) {
1294 csio_err(hw
, "Issue of FW_PARAMS_CMD(read) failed!\n");
1295 mempool_free(mbp
, hw
->mb_mempool
);
1299 csio_mb_process_read_params_rsp(hw
, mbp
, &retval
,
1300 ARRAY_SIZE(param
), param
);
1301 if (retval
!= FW_SUCCESS
) {
1302 csio_err(hw
, "FW_PARAMS_CMD(read) failed with ret:0x%x!\n",
1304 mempool_free(mbp
, hw
->mb_mempool
);
1308 /* cache the information. */
1309 hw
->port_vec
= param
[0];
1310 hw
->vpd
.cclk
= param
[1];
1311 wrm
->fw_eq_start
= param
[2];
1312 wrm
->fw_iq_start
= param
[4];
1314 /* Using FW configured max iqs & eqs */
1315 if ((hw
->flags
& CSIO_HWF_USING_SOFT_PARAMS
) ||
1316 !csio_is_hw_master(hw
)) {
1317 hw
->cfg_niq
= param
[5] - param
[4] + 1;
1318 hw
->cfg_neq
= param
[3] - param
[2] + 1;
1319 csio_dbg(hw
, "Using fwconfig max niqs %d neqs %d\n",
1320 hw
->cfg_niq
, hw
->cfg_neq
);
1323 hw
->port_vec
&= csio_port_mask
;
1325 hw
->num_pports
= hweight32(hw
->port_vec
);
1327 csio_dbg(hw
, "Port vector: 0x%x, #ports: %d\n",
1328 hw
->port_vec
, hw
->num_pports
);
1330 for (i
= 0; i
< hw
->num_pports
; i
++) {
1331 while ((hw
->port_vec
& (1 << j
)) == 0)
1333 hw
->pport
[i
].portid
= j
++;
1334 csio_dbg(hw
, "Found Port:%d\n", hw
->pport
[i
].portid
);
1336 mempool_free(mbp
, hw
->mb_mempool
);
1343 * csio_config_device_caps - Get and set device capabilities.
1348 csio_config_device_caps(struct csio_hw
*hw
)
1350 struct csio_mb
*mbp
;
1351 enum fw_retval retval
;
1354 mbp
= mempool_alloc(hw
->mb_mempool
, GFP_ATOMIC
);
1356 CSIO_INC_STATS(hw
, n_err_nomem
);
1360 /* Get device capabilities */
1361 csio_mb_caps_config(hw
, mbp
, CSIO_MB_DEFAULT_TMO
, 0, 0, 0, 0, NULL
);
1363 if (csio_mb_issue(hw
, mbp
)) {
1364 csio_err(hw
, "Issue of FW_CAPS_CONFIG_CMD(r) failed!\n");
1368 retval
= csio_mb_fw_retval(mbp
);
1369 if (retval
!= FW_SUCCESS
) {
1370 csio_err(hw
, "FW_CAPS_CONFIG_CMD(r) returned %d!\n", retval
);
1374 /* Validate device capabilities */
1375 rv
= csio_hw_validate_caps(hw
, mbp
);
1379 /* Don't config device capabilities if already configured */
1380 if (hw
->fw_state
== CSIO_DEV_STATE_INIT
) {
1385 /* Write back desired device capabilities */
1386 csio_mb_caps_config(hw
, mbp
, CSIO_MB_DEFAULT_TMO
, true, true,
1389 if (csio_mb_issue(hw
, mbp
)) {
1390 csio_err(hw
, "Issue of FW_CAPS_CONFIG_CMD(w) failed!\n");
1394 retval
= csio_mb_fw_retval(mbp
);
1395 if (retval
!= FW_SUCCESS
) {
1396 csio_err(hw
, "FW_CAPS_CONFIG_CMD(w) returned %d!\n", retval
);
1402 mempool_free(mbp
, hw
->mb_mempool
);
1407 * csio_enable_ports - Bring up all available ports.
1412 csio_enable_ports(struct csio_hw
*hw
)
1414 struct csio_mb
*mbp
;
1415 enum fw_retval retval
;
1419 mbp
= mempool_alloc(hw
->mb_mempool
, GFP_ATOMIC
);
1421 CSIO_INC_STATS(hw
, n_err_nomem
);
1425 for (i
= 0; i
< hw
->num_pports
; i
++) {
1426 portid
= hw
->pport
[i
].portid
;
1428 /* Read PORT information */
1429 csio_mb_port(hw
, mbp
, CSIO_MB_DEFAULT_TMO
, portid
,
1432 if (csio_mb_issue(hw
, mbp
)) {
1433 csio_err(hw
, "failed to issue FW_PORT_CMD(r) port:%d\n",
1435 mempool_free(mbp
, hw
->mb_mempool
);
1439 csio_mb_process_read_port_rsp(hw
, mbp
, &retval
,
1440 &hw
->pport
[i
].pcap
);
1441 if (retval
!= FW_SUCCESS
) {
1442 csio_err(hw
, "FW_PORT_CMD(r) port:%d failed: 0x%x\n",
1444 mempool_free(mbp
, hw
->mb_mempool
);
1448 /* Write back PORT information */
1449 csio_mb_port(hw
, mbp
, CSIO_MB_DEFAULT_TMO
, portid
, true,
1450 (PAUSE_RX
| PAUSE_TX
), hw
->pport
[i
].pcap
, NULL
);
1452 if (csio_mb_issue(hw
, mbp
)) {
1453 csio_err(hw
, "failed to issue FW_PORT_CMD(w) port:%d\n",
1455 mempool_free(mbp
, hw
->mb_mempool
);
1459 retval
= csio_mb_fw_retval(mbp
);
1460 if (retval
!= FW_SUCCESS
) {
1461 csio_err(hw
, "FW_PORT_CMD(w) port:%d failed :0x%x\n",
1463 mempool_free(mbp
, hw
->mb_mempool
);
1467 } /* For all ports */
1469 mempool_free(mbp
, hw
->mb_mempool
);
1475 * csio_get_fcoe_resinfo - Read fcoe fw resource info.
1477 * Issued with lock held.
1480 csio_get_fcoe_resinfo(struct csio_hw
*hw
)
1482 struct csio_fcoe_res_info
*res_info
= &hw
->fres_info
;
1483 struct fw_fcoe_res_info_cmd
*rsp
;
1484 struct csio_mb
*mbp
;
1485 enum fw_retval retval
;
1487 mbp
= mempool_alloc(hw
->mb_mempool
, GFP_ATOMIC
);
1489 CSIO_INC_STATS(hw
, n_err_nomem
);
1493 /* Get FCoE FW resource information */
1494 csio_fcoe_read_res_info_init_mb(hw
, mbp
, CSIO_MB_DEFAULT_TMO
, NULL
);
1496 if (csio_mb_issue(hw
, mbp
)) {
1497 csio_err(hw
, "failed to issue FW_FCOE_RES_INFO_CMD\n");
1498 mempool_free(mbp
, hw
->mb_mempool
);
1502 rsp
= (struct fw_fcoe_res_info_cmd
*)(mbp
->mb
);
1503 retval
= FW_CMD_RETVAL_G(ntohl(rsp
->retval_len16
));
1504 if (retval
!= FW_SUCCESS
) {
1505 csio_err(hw
, "FW_FCOE_RES_INFO_CMD failed with ret x%x\n",
1507 mempool_free(mbp
, hw
->mb_mempool
);
1511 res_info
->e_d_tov
= ntohs(rsp
->e_d_tov
);
1512 res_info
->r_a_tov_seq
= ntohs(rsp
->r_a_tov_seq
);
1513 res_info
->r_a_tov_els
= ntohs(rsp
->r_a_tov_els
);
1514 res_info
->r_r_tov
= ntohs(rsp
->r_r_tov
);
1515 res_info
->max_xchgs
= ntohl(rsp
->max_xchgs
);
1516 res_info
->max_ssns
= ntohl(rsp
->max_ssns
);
1517 res_info
->used_xchgs
= ntohl(rsp
->used_xchgs
);
1518 res_info
->used_ssns
= ntohl(rsp
->used_ssns
);
1519 res_info
->max_fcfs
= ntohl(rsp
->max_fcfs
);
1520 res_info
->max_vnps
= ntohl(rsp
->max_vnps
);
1521 res_info
->used_fcfs
= ntohl(rsp
->used_fcfs
);
1522 res_info
->used_vnps
= ntohl(rsp
->used_vnps
);
1524 csio_dbg(hw
, "max ssns:%d max xchgs:%d\n", res_info
->max_ssns
,
1525 res_info
->max_xchgs
);
1526 mempool_free(mbp
, hw
->mb_mempool
);
1532 csio_hw_check_fwconfig(struct csio_hw
*hw
, u32
*param
)
1534 struct csio_mb
*mbp
;
1535 enum fw_retval retval
;
1538 mbp
= mempool_alloc(hw
->mb_mempool
, GFP_ATOMIC
);
1540 CSIO_INC_STATS(hw
, n_err_nomem
);
1545 * Find out whether we're dealing with a version of
1546 * the firmware which has configuration file support.
1548 _param
[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV
) |
1549 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_CF
));
1551 csio_mb_params(hw
, mbp
, CSIO_MB_DEFAULT_TMO
, hw
->pfn
, 0,
1552 ARRAY_SIZE(_param
), _param
, NULL
, false, NULL
);
1553 if (csio_mb_issue(hw
, mbp
)) {
1554 csio_err(hw
, "Issue of FW_PARAMS_CMD(read) failed!\n");
1555 mempool_free(mbp
, hw
->mb_mempool
);
1559 csio_mb_process_read_params_rsp(hw
, mbp
, &retval
,
1560 ARRAY_SIZE(_param
), _param
);
1561 if (retval
!= FW_SUCCESS
) {
1562 csio_err(hw
, "FW_PARAMS_CMD(read) failed with ret:0x%x!\n",
1564 mempool_free(mbp
, hw
->mb_mempool
);
1568 mempool_free(mbp
, hw
->mb_mempool
);
1575 csio_hw_flash_config(struct csio_hw
*hw
, u32
*fw_cfg_param
, char *path
)
1578 const struct firmware
*cf
;
1579 struct pci_dev
*pci_dev
= hw
->pdev
;
1580 struct device
*dev
= &pci_dev
->dev
;
1581 unsigned int mtype
= 0, maddr
= 0;
1583 int value_to_add
= 0;
1585 if (request_firmware(&cf
, FW_CFG_NAME_T5
, dev
) < 0) {
1586 csio_err(hw
, "could not find config file %s, err: %d\n",
1587 FW_CFG_NAME_T5
, ret
);
1591 if (cf
->size
%4 != 0)
1592 value_to_add
= 4 - (cf
->size
% 4);
1594 cfg_data
= kzalloc(cf
->size
+value_to_add
, GFP_KERNEL
);
1595 if (cfg_data
== NULL
) {
1600 memcpy((void *)cfg_data
, (const void *)cf
->data
, cf
->size
);
1601 if (csio_hw_check_fwconfig(hw
, fw_cfg_param
) != 0) {
1606 mtype
= FW_PARAMS_PARAM_Y_G(*fw_cfg_param
);
1607 maddr
= FW_PARAMS_PARAM_Z_G(*fw_cfg_param
) << 16;
1609 ret
= csio_memory_write(hw
, mtype
, maddr
,
1610 cf
->size
+ value_to_add
, cfg_data
);
1612 if ((ret
== 0) && (value_to_add
!= 0)) {
1617 size_t size
= cf
->size
& ~0x3;
1620 last
.word
= cfg_data
[size
>> 2];
1621 for (i
= value_to_add
; i
< 4; i
++)
1623 ret
= csio_memory_write(hw
, mtype
, maddr
+ size
, 4, &last
.word
);
1626 csio_info(hw
, "config file upgraded to %s\n",
1628 snprintf(path
, 64, "%s%s", "/lib/firmware/", FW_CFG_NAME_T5
);
1633 release_firmware(cf
);
1638 * HW initialization: contact FW, obtain config, perform basic init.
1640 * If the firmware we're dealing with has Configuration File support, then
1641 * we use that to perform all configuration -- either using the configuration
1642 * file stored in flash on the adapter or using a filesystem-local file
1645 * If we don't have configuration file support in the firmware, then we'll
1646 * have to set things up the old fashioned way with hard-coded register
1647 * writes and firmware commands ...
1651 * Attempt to initialize the HW via a Firmware Configuration File.
1654 csio_hw_use_fwconfig(struct csio_hw
*hw
, int reset
, u32
*fw_cfg_param
)
1656 struct csio_mb
*mbp
= NULL
;
1657 struct fw_caps_config_cmd
*caps_cmd
;
1658 unsigned int mtype
, maddr
;
1660 uint32_t finiver
= 0, finicsum
= 0, cfcsum
= 0;
1662 char *config_name
= NULL
;
1665 * Reset device if necessary
1668 rv
= csio_do_reset(hw
, true);
1674 * If we have a configuration file in host ,
1675 * then use that. Otherwise, use the configuration file stored
1676 * in the HW flash ...
1678 spin_unlock_irq(&hw
->lock
);
1679 rv
= csio_hw_flash_config(hw
, fw_cfg_param
, path
);
1680 spin_lock_irq(&hw
->lock
);
1683 * config file was not found. Use default
1684 * config file from flash.
1686 config_name
= "On FLASH";
1687 mtype
= FW_MEMTYPE_CF_FLASH
;
1688 maddr
= hw
->chip_ops
->chip_flash_cfg_addr(hw
);
1691 mtype
= FW_PARAMS_PARAM_Y_G(*fw_cfg_param
);
1692 maddr
= FW_PARAMS_PARAM_Z_G(*fw_cfg_param
) << 16;
1695 mbp
= mempool_alloc(hw
->mb_mempool
, GFP_ATOMIC
);
1697 CSIO_INC_STATS(hw
, n_err_nomem
);
1701 * Tell the firmware to process the indicated Configuration File.
1702 * If there are no errors and the caller has provided return value
1703 * pointers for the [fini] section version, checksum and computed
1704 * checksum, pass those back to the caller.
1706 caps_cmd
= (struct fw_caps_config_cmd
*)(mbp
->mb
);
1707 CSIO_INIT_MBP(mbp
, caps_cmd
, CSIO_MB_DEFAULT_TMO
, hw
, NULL
, 1);
1708 caps_cmd
->op_to_write
=
1709 htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD
) |
1712 caps_cmd
->cfvalid_to_len16
=
1713 htonl(FW_CAPS_CONFIG_CMD_CFVALID_F
|
1714 FW_CAPS_CONFIG_CMD_MEMTYPE_CF_V(mtype
) |
1715 FW_CAPS_CONFIG_CMD_MEMADDR64K_CF_V(maddr
>> 16) |
1716 FW_LEN16(*caps_cmd
));
1718 if (csio_mb_issue(hw
, mbp
)) {
1723 rv
= csio_mb_fw_retval(mbp
);
1724 /* If the CAPS_CONFIG failed with an ENOENT (for a Firmware
1725 * Configuration File in FLASH), our last gasp effort is to use the
1726 * Firmware Configuration File which is embedded in the
1727 * firmware. A very few early versions of the firmware didn't
1728 * have one embedded but we can ignore those.
1731 CSIO_INIT_MBP(mbp
, caps_cmd
, CSIO_MB_DEFAULT_TMO
, hw
, NULL
, 1);
1732 caps_cmd
->op_to_write
= htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD
) |
1735 caps_cmd
->cfvalid_to_len16
= htonl(FW_LEN16(*caps_cmd
));
1737 if (csio_mb_issue(hw
, mbp
)) {
1742 rv
= csio_mb_fw_retval(mbp
);
1743 config_name
= "Firmware Default";
1745 if (rv
!= FW_SUCCESS
)
1748 finiver
= ntohl(caps_cmd
->finiver
);
1749 finicsum
= ntohl(caps_cmd
->finicsum
);
1750 cfcsum
= ntohl(caps_cmd
->cfcsum
);
1753 * And now tell the firmware to use the configuration we just loaded.
1755 caps_cmd
->op_to_write
=
1756 htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD
) |
1759 caps_cmd
->cfvalid_to_len16
= htonl(FW_LEN16(*caps_cmd
));
1761 if (csio_mb_issue(hw
, mbp
)) {
1766 rv
= csio_mb_fw_retval(mbp
);
1767 if (rv
!= FW_SUCCESS
) {
1768 csio_dbg(hw
, "FW_CAPS_CONFIG_CMD returned %d!\n", rv
);
1772 if (finicsum
!= cfcsum
) {
1774 "Config File checksum mismatch: csum=%#x, computed=%#x\n",
1778 /* Validate device capabilities */
1779 rv
= csio_hw_validate_caps(hw
, mbp
);
1783 mempool_free(mbp
, hw
->mb_mempool
);
1787 * Note that we're operating with parameters
1788 * not supplied by the driver, rather than from hard-wired
1789 * initialization constants buried in the driver.
1791 hw
->flags
|= CSIO_HWF_USING_SOFT_PARAMS
;
1793 /* device parameters */
1794 rv
= csio_get_device_params(hw
);
1799 csio_wr_sge_init(hw
);
1802 * And finally tell the firmware to initialize itself using the
1803 * parameters from the Configuration File.
1805 /* Post event to notify completion of configuration */
1806 csio_post_event(&hw
->sm
, CSIO_HWE_INIT
);
1808 csio_info(hw
, "Successfully configure using Firmware "
1809 "Configuration File %s, version %#x, computed checksum %#x\n",
1810 config_name
, finiver
, cfcsum
);
1814 * Something bad happened. Return the error ...
1818 mempool_free(mbp
, hw
->mb_mempool
);
1819 hw
->flags
&= ~CSIO_HWF_USING_SOFT_PARAMS
;
1820 csio_warn(hw
, "Configuration file error %d\n", rv
);
1824 /* Is the given firmware API compatible with the one the driver was compiled
1827 static int fw_compatible(const struct fw_hdr
*hdr1
, const struct fw_hdr
*hdr2
)
1830 /* short circuit if it's the exact same firmware version */
1831 if (hdr1
->chip
== hdr2
->chip
&& hdr1
->fw_ver
== hdr2
->fw_ver
)
1834 #define SAME_INTF(x) (hdr1->intfver_##x == hdr2->intfver_##x)
1835 if (hdr1
->chip
== hdr2
->chip
&& SAME_INTF(nic
) && SAME_INTF(vnic
) &&
1836 SAME_INTF(ri
) && SAME_INTF(iscsi
) && SAME_INTF(fcoe
))
1843 /* The firmware in the filesystem is usable, but should it be installed?
1844 * This routine explains itself in detail if it indicates the filesystem
1845 * firmware should be installed.
1847 static int csio_should_install_fs_fw(struct csio_hw
*hw
, int card_fw_usable
,
1852 if (!card_fw_usable
) {
1853 reason
= "incompatible or unusable";
1858 reason
= "older than the version supported with this driver";
1865 csio_err(hw
, "firmware on card (%u.%u.%u.%u) is %s, "
1866 "installing firmware %u.%u.%u.%u on card.\n",
1867 FW_HDR_FW_VER_MAJOR_G(c
), FW_HDR_FW_VER_MINOR_G(c
),
1868 FW_HDR_FW_VER_MICRO_G(c
), FW_HDR_FW_VER_BUILD_G(c
), reason
,
1869 FW_HDR_FW_VER_MAJOR_G(k
), FW_HDR_FW_VER_MINOR_G(k
),
1870 FW_HDR_FW_VER_MICRO_G(k
), FW_HDR_FW_VER_BUILD_G(k
));
1875 static struct fw_info fw_info_array
[] = {
1878 .fs_name
= FW_CFG_NAME_T5
,
1879 .fw_mod_name
= FW_FNAME_T5
,
1881 .chip
= FW_HDR_CHIP_T5
,
1882 .fw_ver
= __cpu_to_be32(FW_VERSION(T5
)),
1883 .intfver_nic
= FW_INTFVER(T5
, NIC
),
1884 .intfver_vnic
= FW_INTFVER(T5
, VNIC
),
1885 .intfver_ri
= FW_INTFVER(T5
, RI
),
1886 .intfver_iscsi
= FW_INTFVER(T5
, ISCSI
),
1887 .intfver_fcoe
= FW_INTFVER(T5
, FCOE
),
1892 static struct fw_info
*find_fw_info(int chip
)
1896 for (i
= 0; i
< ARRAY_SIZE(fw_info_array
); i
++) {
1897 if (fw_info_array
[i
].chip
== chip
)
1898 return &fw_info_array
[i
];
1903 static int csio_hw_prep_fw(struct csio_hw
*hw
, struct fw_info
*fw_info
,
1904 const u8
*fw_data
, unsigned int fw_size
,
1905 struct fw_hdr
*card_fw
, enum csio_dev_state state
,
1908 int ret
, card_fw_usable
, fs_fw_usable
;
1909 const struct fw_hdr
*fs_fw
;
1910 const struct fw_hdr
*drv_fw
;
1912 drv_fw
= &fw_info
->fw_hdr
;
1914 /* Read the header of the firmware on the card */
1915 ret
= csio_hw_read_flash(hw
, FLASH_FW_START
,
1916 sizeof(*card_fw
) / sizeof(uint32_t),
1917 (uint32_t *)card_fw
, 1);
1919 card_fw_usable
= fw_compatible(drv_fw
, (const void *)card_fw
);
1922 "Unable to read card's firmware header: %d\n", ret
);
1926 if (fw_data
!= NULL
) {
1927 fs_fw
= (const void *)fw_data
;
1928 fs_fw_usable
= fw_compatible(drv_fw
, fs_fw
);
1934 if (card_fw_usable
&& card_fw
->fw_ver
== drv_fw
->fw_ver
&&
1935 (!fs_fw_usable
|| fs_fw
->fw_ver
== drv_fw
->fw_ver
)) {
1936 /* Common case: the firmware on the card is an exact match and
1937 * the filesystem one is an exact match too, or the filesystem
1938 * one is absent/incompatible.
1940 } else if (fs_fw_usable
&& state
== CSIO_DEV_STATE_UNINIT
&&
1941 csio_should_install_fs_fw(hw
, card_fw_usable
,
1942 be32_to_cpu(fs_fw
->fw_ver
),
1943 be32_to_cpu(card_fw
->fw_ver
))) {
1944 ret
= csio_hw_fw_upgrade(hw
, hw
->pfn
, fw_data
,
1948 "failed to install firmware: %d\n", ret
);
1952 /* Installed successfully, update the cached header too. */
1953 memcpy(card_fw
, fs_fw
, sizeof(*card_fw
));
1955 *reset
= 0; /* already reset as part of load_fw */
1958 if (!card_fw_usable
) {
1961 d
= be32_to_cpu(drv_fw
->fw_ver
);
1962 c
= be32_to_cpu(card_fw
->fw_ver
);
1963 k
= fs_fw
? be32_to_cpu(fs_fw
->fw_ver
) : 0;
1965 csio_err(hw
, "Cannot find a usable firmware: "
1967 "driver compiled with %d.%d.%d.%d, "
1968 "card has %d.%d.%d.%d, filesystem has %d.%d.%d.%d\n",
1970 FW_HDR_FW_VER_MAJOR_G(d
), FW_HDR_FW_VER_MINOR_G(d
),
1971 FW_HDR_FW_VER_MICRO_G(d
), FW_HDR_FW_VER_BUILD_G(d
),
1972 FW_HDR_FW_VER_MAJOR_G(c
), FW_HDR_FW_VER_MINOR_G(c
),
1973 FW_HDR_FW_VER_MICRO_G(c
), FW_HDR_FW_VER_BUILD_G(c
),
1974 FW_HDR_FW_VER_MAJOR_G(k
), FW_HDR_FW_VER_MINOR_G(k
),
1975 FW_HDR_FW_VER_MICRO_G(k
), FW_HDR_FW_VER_BUILD_G(k
));
1980 /* We're using whatever's on the card and it's known to be good. */
1981 hw
->fwrev
= be32_to_cpu(card_fw
->fw_ver
);
1982 hw
->tp_vers
= be32_to_cpu(card_fw
->tp_microcode_ver
);
1989 * Returns -EINVAL if attempts to flash the firmware failed
1991 * if flashing was not attempted because the card had the
1992 * latest firmware ECANCELED is returned
1995 csio_hw_flash_fw(struct csio_hw
*hw
, int *reset
)
1997 int ret
= -ECANCELED
;
1998 const struct firmware
*fw
;
1999 struct fw_info
*fw_info
;
2000 struct fw_hdr
*card_fw
;
2001 struct pci_dev
*pci_dev
= hw
->pdev
;
2002 struct device
*dev
= &pci_dev
->dev
;
2003 const u8
*fw_data
= NULL
;
2004 unsigned int fw_size
= 0;
2006 /* This is the firmware whose headers the driver was compiled
2009 fw_info
= find_fw_info(CHELSIO_CHIP_VERSION(hw
->chip_id
));
2010 if (fw_info
== NULL
) {
2012 "unable to get firmware info for chip %d.\n",
2013 CHELSIO_CHIP_VERSION(hw
->chip_id
));
2017 if (request_firmware(&fw
, FW_FNAME_T5
, dev
) < 0) {
2018 csio_err(hw
, "could not find firmware image %s, err: %d\n",
2025 /* allocate memory to read the header of the firmware on the
2028 card_fw
= kmalloc(sizeof(*card_fw
), GFP_KERNEL
);
2030 /* upgrade FW logic */
2031 ret
= csio_hw_prep_fw(hw
, fw_info
, fw_data
, fw_size
, card_fw
,
2032 hw
->fw_state
, reset
);
2036 release_firmware(fw
);
2042 * csio_hw_configure - Configure HW
2047 csio_hw_configure(struct csio_hw
*hw
)
2053 rv
= csio_hw_dev_ready(hw
);
2055 CSIO_INC_STATS(hw
, n_err_fatal
);
2056 csio_post_event(&hw
->sm
, CSIO_HWE_FATAL
);
2061 hw
->chip_ver
= (char)csio_rd_reg32(hw
, PL_REV_A
);
2063 /* Needed for FW download */
2064 rv
= csio_hw_get_flash_params(hw
);
2066 csio_err(hw
, "Failed to get serial flash params rv:%d\n", rv
);
2067 csio_post_event(&hw
->sm
, CSIO_HWE_FATAL
);
2071 /* Set PCIe completion timeout to 4 seconds */
2072 if (pci_is_pcie(hw
->pdev
))
2073 pcie_capability_clear_and_set_word(hw
->pdev
, PCI_EXP_DEVCTL2
,
2074 PCI_EXP_DEVCTL2_COMP_TIMEOUT
, 0xd);
2076 hw
->chip_ops
->chip_set_mem_win(hw
, MEMWIN_CSIOSTOR
);
2078 rv
= csio_hw_get_fw_version(hw
, &hw
->fwrev
);
2082 csio_hw_print_fw_version(hw
, "Firmware revision");
2084 rv
= csio_do_hello(hw
, &hw
->fw_state
);
2086 CSIO_INC_STATS(hw
, n_err_fatal
);
2087 csio_post_event(&hw
->sm
, CSIO_HWE_FATAL
);
2092 rv
= csio_hw_get_vpd_params(hw
, &hw
->vpd
);
2096 csio_hw_get_fw_version(hw
, &hw
->fwrev
);
2097 csio_hw_get_tp_version(hw
, &hw
->tp_vers
);
2098 if (csio_is_hw_master(hw
) && hw
->fw_state
!= CSIO_DEV_STATE_INIT
) {
2100 /* Do firmware update */
2101 spin_unlock_irq(&hw
->lock
);
2102 rv
= csio_hw_flash_fw(hw
, &reset
);
2103 spin_lock_irq(&hw
->lock
);
2108 /* If the firmware doesn't support Configuration Files,
2111 rv
= csio_hw_check_fwconfig(hw
, param
);
2113 csio_info(hw
, "Firmware doesn't support "
2114 "Firmware Configuration files\n");
2118 /* The firmware provides us with a memory buffer where we can
2119 * load a Configuration File from the host if we want to
2120 * override the Configuration File in flash.
2122 rv
= csio_hw_use_fwconfig(hw
, reset
, param
);
2123 if (rv
== -ENOENT
) {
2124 csio_info(hw
, "Could not initialize "
2125 "adapter, error%d\n", rv
);
2129 csio_info(hw
, "Could not initialize "
2130 "adapter, error%d\n", rv
);
2135 if (hw
->fw_state
== CSIO_DEV_STATE_INIT
) {
2137 hw
->flags
|= CSIO_HWF_USING_SOFT_PARAMS
;
2139 /* device parameters */
2140 rv
= csio_get_device_params(hw
);
2144 /* Get device capabilities */
2145 rv
= csio_config_device_caps(hw
);
2150 csio_wr_sge_init(hw
);
2152 /* Post event to notify completion of configuration */
2153 csio_post_event(&hw
->sm
, CSIO_HWE_INIT
);
2156 } /* if not master */
2163 * csio_hw_initialize - Initialize HW
2168 csio_hw_initialize(struct csio_hw
*hw
)
2170 struct csio_mb
*mbp
;
2171 enum fw_retval retval
;
2175 if (csio_is_hw_master(hw
) && hw
->fw_state
!= CSIO_DEV_STATE_INIT
) {
2176 mbp
= mempool_alloc(hw
->mb_mempool
, GFP_ATOMIC
);
2180 csio_mb_initialize(hw
, mbp
, CSIO_MB_DEFAULT_TMO
, NULL
);
2182 if (csio_mb_issue(hw
, mbp
)) {
2183 csio_err(hw
, "Issue of FW_INITIALIZE_CMD failed!\n");
2187 retval
= csio_mb_fw_retval(mbp
);
2188 if (retval
!= FW_SUCCESS
) {
2189 csio_err(hw
, "FW_INITIALIZE_CMD returned 0x%x!\n",
2194 mempool_free(mbp
, hw
->mb_mempool
);
2197 rv
= csio_get_fcoe_resinfo(hw
);
2199 csio_err(hw
, "Failed to read fcoe resource info: %d\n", rv
);
2203 spin_unlock_irq(&hw
->lock
);
2204 rv
= csio_config_queues(hw
);
2205 spin_lock_irq(&hw
->lock
);
2208 csio_err(hw
, "Config of queues failed!: %d\n", rv
);
2212 for (i
= 0; i
< hw
->num_pports
; i
++)
2213 hw
->pport
[i
].mod_type
= FW_PORT_MOD_TYPE_NA
;
2215 if (csio_is_hw_master(hw
) && hw
->fw_state
!= CSIO_DEV_STATE_INIT
) {
2216 rv
= csio_enable_ports(hw
);
2218 csio_err(hw
, "Failed to enable ports: %d\n", rv
);
2223 csio_post_event(&hw
->sm
, CSIO_HWE_INIT_DONE
);
2227 mempool_free(mbp
, hw
->mb_mempool
);
2232 #define PF_INTR_MASK (PFSW_F | PFCIM_F)
2235 * csio_hw_intr_enable - Enable HW interrupts
2236 * @hw: Pointer to HW module.
2238 * Enable interrupts in HW registers.
2241 csio_hw_intr_enable(struct csio_hw
*hw
)
2243 uint16_t vec
= (uint16_t)csio_get_mb_intr_idx(csio_hw_to_mbm(hw
));
2244 uint32_t pf
= SOURCEPF_G(csio_rd_reg32(hw
, PL_WHOAMI_A
));
2245 uint32_t pl
= csio_rd_reg32(hw
, PL_INT_ENABLE_A
);
2248 * Set aivec for MSI/MSIX. PCIE_PF_CFG.INTXType is set up
2249 * by FW, so do nothing for INTX.
2251 if (hw
->intr_mode
== CSIO_IM_MSIX
)
2252 csio_set_reg_field(hw
, MYPF_REG(PCIE_PF_CFG_A
),
2253 AIVEC_V(AIVEC_M
), vec
);
2254 else if (hw
->intr_mode
== CSIO_IM_MSI
)
2255 csio_set_reg_field(hw
, MYPF_REG(PCIE_PF_CFG_A
),
2256 AIVEC_V(AIVEC_M
), 0);
2258 csio_wr_reg32(hw
, PF_INTR_MASK
, MYPF_REG(PL_PF_INT_ENABLE_A
));
2260 /* Turn on MB interrupts - this will internally flush PIO as well */
2261 csio_mb_intr_enable(hw
);
2263 /* These are common registers - only a master can modify them */
2264 if (csio_is_hw_master(hw
)) {
2266 * Disable the Serial FLASH interrupt, if enabled!
2269 csio_wr_reg32(hw
, pl
, PL_INT_ENABLE_A
);
2271 csio_wr_reg32(hw
, ERR_CPL_EXCEED_IQE_SIZE_F
|
2272 EGRESS_SIZE_ERR_F
| ERR_INVALID_CIDX_INC_F
|
2273 ERR_CPL_OPCODE_0_F
| ERR_DROPPED_DB_F
|
2274 ERR_DATA_CPL_ON_HIGH_QID1_F
|
2275 ERR_DATA_CPL_ON_HIGH_QID0_F
| ERR_BAD_DB_PIDX3_F
|
2276 ERR_BAD_DB_PIDX2_F
| ERR_BAD_DB_PIDX1_F
|
2277 ERR_BAD_DB_PIDX0_F
| ERR_ING_CTXT_PRIO_F
|
2278 ERR_EGR_CTXT_PRIO_F
| INGRESS_SIZE_ERR_F
,
2280 csio_set_reg_field(hw
, PL_INT_MAP0_A
, 0, 1 << pf
);
2283 hw
->flags
|= CSIO_HWF_HW_INTR_ENABLED
;
2288 * csio_hw_intr_disable - Disable HW interrupts
2289 * @hw: Pointer to HW module.
2291 * Turn off Mailbox and PCI_PF_CFG interrupts.
2294 csio_hw_intr_disable(struct csio_hw
*hw
)
2296 uint32_t pf
= SOURCEPF_G(csio_rd_reg32(hw
, PL_WHOAMI_A
));
2298 if (!(hw
->flags
& CSIO_HWF_HW_INTR_ENABLED
))
2301 hw
->flags
&= ~CSIO_HWF_HW_INTR_ENABLED
;
2303 csio_wr_reg32(hw
, 0, MYPF_REG(PL_PF_INT_ENABLE_A
));
2304 if (csio_is_hw_master(hw
))
2305 csio_set_reg_field(hw
, PL_INT_MAP0_A
, 1 << pf
, 0);
2307 /* Turn off MB interrupts */
2308 csio_mb_intr_disable(hw
);
2313 csio_hw_fatal_err(struct csio_hw
*hw
)
2315 csio_set_reg_field(hw
, SGE_CONTROL_A
, GLOBALENABLE_F
, 0);
2316 csio_hw_intr_disable(hw
);
2318 /* Do not reset HW, we may need FW state for debugging */
2319 csio_fatal(hw
, "HW Fatal error encountered!\n");
2322 /*****************************************************************************/
2324 /*****************************************************************************/
2326 * csio_hws_uninit - Uninit state
2332 csio_hws_uninit(struct csio_hw
*hw
, enum csio_hw_ev evt
)
2334 hw
->prev_evt
= hw
->cur_evt
;
2336 CSIO_INC_STATS(hw
, n_evt_sm
[evt
]);
2340 csio_set_state(&hw
->sm
, csio_hws_configuring
);
2341 csio_hw_configure(hw
);
2345 CSIO_INC_STATS(hw
, n_evt_unexp
);
2351 * csio_hws_configuring - Configuring state
2357 csio_hws_configuring(struct csio_hw
*hw
, enum csio_hw_ev evt
)
2359 hw
->prev_evt
= hw
->cur_evt
;
2361 CSIO_INC_STATS(hw
, n_evt_sm
[evt
]);
2365 csio_set_state(&hw
->sm
, csio_hws_initializing
);
2366 csio_hw_initialize(hw
);
2369 case CSIO_HWE_INIT_DONE
:
2370 csio_set_state(&hw
->sm
, csio_hws_ready
);
2371 /* Fan out event to all lnode SMs */
2372 csio_notify_lnodes(hw
, CSIO_LN_NOTIFY_HWREADY
);
2375 case CSIO_HWE_FATAL
:
2376 csio_set_state(&hw
->sm
, csio_hws_uninit
);
2379 case CSIO_HWE_PCI_REMOVE
:
2383 CSIO_INC_STATS(hw
, n_evt_unexp
);
2389 * csio_hws_initializing - Initialiazing state
2395 csio_hws_initializing(struct csio_hw
*hw
, enum csio_hw_ev evt
)
2397 hw
->prev_evt
= hw
->cur_evt
;
2399 CSIO_INC_STATS(hw
, n_evt_sm
[evt
]);
2402 case CSIO_HWE_INIT_DONE
:
2403 csio_set_state(&hw
->sm
, csio_hws_ready
);
2405 /* Fan out event to all lnode SMs */
2406 csio_notify_lnodes(hw
, CSIO_LN_NOTIFY_HWREADY
);
2408 /* Enable interrupts */
2409 csio_hw_intr_enable(hw
);
2412 case CSIO_HWE_FATAL
:
2413 csio_set_state(&hw
->sm
, csio_hws_uninit
);
2416 case CSIO_HWE_PCI_REMOVE
:
2421 CSIO_INC_STATS(hw
, n_evt_unexp
);
2427 * csio_hws_ready - Ready state
2433 csio_hws_ready(struct csio_hw
*hw
, enum csio_hw_ev evt
)
2435 /* Remember the event */
2438 hw
->prev_evt
= hw
->cur_evt
;
2440 CSIO_INC_STATS(hw
, n_evt_sm
[evt
]);
2443 case CSIO_HWE_HBA_RESET
:
2444 case CSIO_HWE_FW_DLOAD
:
2445 case CSIO_HWE_SUSPEND
:
2446 case CSIO_HWE_PCI_REMOVE
:
2447 case CSIO_HWE_PCIERR_DETECTED
:
2448 csio_set_state(&hw
->sm
, csio_hws_quiescing
);
2449 /* cleanup all outstanding cmds */
2450 if (evt
== CSIO_HWE_HBA_RESET
||
2451 evt
== CSIO_HWE_PCIERR_DETECTED
)
2452 csio_scsim_cleanup_io(csio_hw_to_scsim(hw
), false);
2454 csio_scsim_cleanup_io(csio_hw_to_scsim(hw
), true);
2456 csio_hw_intr_disable(hw
);
2457 csio_hw_mbm_cleanup(hw
);
2459 csio_notify_lnodes(hw
, CSIO_LN_NOTIFY_HWSTOP
);
2460 csio_evtq_flush(hw
);
2461 csio_mgmtm_cleanup(csio_hw_to_mgmtm(hw
));
2462 csio_post_event(&hw
->sm
, CSIO_HWE_QUIESCED
);
2465 case CSIO_HWE_FATAL
:
2466 csio_set_state(&hw
->sm
, csio_hws_uninit
);
2470 CSIO_INC_STATS(hw
, n_evt_unexp
);
2476 * csio_hws_quiescing - Quiescing state
2482 csio_hws_quiescing(struct csio_hw
*hw
, enum csio_hw_ev evt
)
2484 hw
->prev_evt
= hw
->cur_evt
;
2486 CSIO_INC_STATS(hw
, n_evt_sm
[evt
]);
2489 case CSIO_HWE_QUIESCED
:
2490 switch (hw
->evtflag
) {
2491 case CSIO_HWE_FW_DLOAD
:
2492 csio_set_state(&hw
->sm
, csio_hws_resetting
);
2493 /* Download firmware */
2496 case CSIO_HWE_HBA_RESET
:
2497 csio_set_state(&hw
->sm
, csio_hws_resetting
);
2498 /* Start reset of the HBA */
2499 csio_notify_lnodes(hw
, CSIO_LN_NOTIFY_HWRESET
);
2500 csio_wr_destroy_queues(hw
, false);
2501 csio_do_reset(hw
, false);
2502 csio_post_event(&hw
->sm
, CSIO_HWE_HBA_RESET_DONE
);
2505 case CSIO_HWE_PCI_REMOVE
:
2506 csio_set_state(&hw
->sm
, csio_hws_removing
);
2507 csio_notify_lnodes(hw
, CSIO_LN_NOTIFY_HWREMOVE
);
2508 csio_wr_destroy_queues(hw
, true);
2509 /* Now send the bye command */
2513 case CSIO_HWE_SUSPEND
:
2514 csio_set_state(&hw
->sm
, csio_hws_quiesced
);
2517 case CSIO_HWE_PCIERR_DETECTED
:
2518 csio_set_state(&hw
->sm
, csio_hws_pcierr
);
2519 csio_wr_destroy_queues(hw
, false);
2523 CSIO_INC_STATS(hw
, n_evt_unexp
);
2530 CSIO_INC_STATS(hw
, n_evt_unexp
);
2536 * csio_hws_quiesced - Quiesced state
2542 csio_hws_quiesced(struct csio_hw
*hw
, enum csio_hw_ev evt
)
2544 hw
->prev_evt
= hw
->cur_evt
;
2546 CSIO_INC_STATS(hw
, n_evt_sm
[evt
]);
2549 case CSIO_HWE_RESUME
:
2550 csio_set_state(&hw
->sm
, csio_hws_configuring
);
2551 csio_hw_configure(hw
);
2555 CSIO_INC_STATS(hw
, n_evt_unexp
);
2561 * csio_hws_resetting - HW Resetting state
2567 csio_hws_resetting(struct csio_hw
*hw
, enum csio_hw_ev evt
)
2569 hw
->prev_evt
= hw
->cur_evt
;
2571 CSIO_INC_STATS(hw
, n_evt_sm
[evt
]);
2574 case CSIO_HWE_HBA_RESET_DONE
:
2575 csio_evtq_start(hw
);
2576 csio_set_state(&hw
->sm
, csio_hws_configuring
);
2577 csio_hw_configure(hw
);
2581 CSIO_INC_STATS(hw
, n_evt_unexp
);
2587 * csio_hws_removing - PCI Hotplug removing state
2593 csio_hws_removing(struct csio_hw
*hw
, enum csio_hw_ev evt
)
2595 hw
->prev_evt
= hw
->cur_evt
;
2597 CSIO_INC_STATS(hw
, n_evt_sm
[evt
]);
2600 case CSIO_HWE_HBA_RESET
:
2601 if (!csio_is_hw_master(hw
))
2604 * The BYE should have alerady been issued, so we cant
2605 * use the mailbox interface. Hence we use the PL_RST
2606 * register directly.
2608 csio_err(hw
, "Resetting HW and waiting 2 seconds...\n");
2609 csio_wr_reg32(hw
, PIORSTMODE_F
| PIORST_F
, PL_RST_A
);
2613 /* Should never receive any new events */
2615 CSIO_INC_STATS(hw
, n_evt_unexp
);
2622 * csio_hws_pcierr - PCI Error state
2628 csio_hws_pcierr(struct csio_hw
*hw
, enum csio_hw_ev evt
)
2630 hw
->prev_evt
= hw
->cur_evt
;
2632 CSIO_INC_STATS(hw
, n_evt_sm
[evt
]);
2635 case CSIO_HWE_PCIERR_SLOT_RESET
:
2636 csio_evtq_start(hw
);
2637 csio_set_state(&hw
->sm
, csio_hws_configuring
);
2638 csio_hw_configure(hw
);
2642 CSIO_INC_STATS(hw
, n_evt_unexp
);
2647 /*****************************************************************************/
2649 /*****************************************************************************/
2652 * csio_handle_intr_status - table driven interrupt handler
2654 * @reg: the interrupt status register to process
2655 * @acts: table of interrupt actions
2657 * A table driven interrupt handler that applies a set of masks to an
2658 * interrupt status word and performs the corresponding actions if the
2659 * interrupts described by the mask have occured. The actions include
2660 * optionally emitting a warning or alert message. The table is terminated
2661 * by an entry specifying mask 0. Returns the number of fatal interrupt
2665 csio_handle_intr_status(struct csio_hw
*hw
, unsigned int reg
,
2666 const struct intr_info
*acts
)
2669 unsigned int mask
= 0;
2670 unsigned int status
= csio_rd_reg32(hw
, reg
);
2672 for ( ; acts
->mask
; ++acts
) {
2673 if (!(status
& acts
->mask
))
2677 csio_fatal(hw
, "Fatal %s (0x%x)\n",
2678 acts
->msg
, status
& acts
->mask
);
2679 } else if (acts
->msg
)
2680 csio_info(hw
, "%s (0x%x)\n",
2681 acts
->msg
, status
& acts
->mask
);
2685 if (status
) /* clear processed interrupts */
2686 csio_wr_reg32(hw
, status
, reg
);
2691 * TP interrupt handler.
2693 static void csio_tp_intr_handler(struct csio_hw
*hw
)
2695 static struct intr_info tp_intr_info
[] = {
2696 { 0x3fffffff, "TP parity error", -1, 1 },
2697 { FLMTXFLSTEMPTY_F
, "TP out of Tx pages", -1, 1 },
2701 if (csio_handle_intr_status(hw
, TP_INT_CAUSE_A
, tp_intr_info
))
2702 csio_hw_fatal_err(hw
);
2706 * SGE interrupt handler.
2708 static void csio_sge_intr_handler(struct csio_hw
*hw
)
2712 static struct intr_info sge_intr_info
[] = {
2713 { ERR_CPL_EXCEED_IQE_SIZE_F
,
2714 "SGE received CPL exceeding IQE size", -1, 1 },
2715 { ERR_INVALID_CIDX_INC_F
,
2716 "SGE GTS CIDX increment too large", -1, 0 },
2717 { ERR_CPL_OPCODE_0_F
, "SGE received 0-length CPL", -1, 0 },
2718 { ERR_DROPPED_DB_F
, "SGE doorbell dropped", -1, 0 },
2719 { ERR_DATA_CPL_ON_HIGH_QID1_F
| ERR_DATA_CPL_ON_HIGH_QID0_F
,
2720 "SGE IQID > 1023 received CPL for FL", -1, 0 },
2721 { ERR_BAD_DB_PIDX3_F
, "SGE DBP 3 pidx increment too large", -1,
2723 { ERR_BAD_DB_PIDX2_F
, "SGE DBP 2 pidx increment too large", -1,
2725 { ERR_BAD_DB_PIDX1_F
, "SGE DBP 1 pidx increment too large", -1,
2727 { ERR_BAD_DB_PIDX0_F
, "SGE DBP 0 pidx increment too large", -1,
2729 { ERR_ING_CTXT_PRIO_F
,
2730 "SGE too many priority ingress contexts", -1, 0 },
2731 { ERR_EGR_CTXT_PRIO_F
,
2732 "SGE too many priority egress contexts", -1, 0 },
2733 { INGRESS_SIZE_ERR_F
, "SGE illegal ingress QID", -1, 0 },
2734 { EGRESS_SIZE_ERR_F
, "SGE illegal egress QID", -1, 0 },
2738 v
= (uint64_t)csio_rd_reg32(hw
, SGE_INT_CAUSE1_A
) |
2739 ((uint64_t)csio_rd_reg32(hw
, SGE_INT_CAUSE2_A
) << 32);
2741 csio_fatal(hw
, "SGE parity error (%#llx)\n",
2742 (unsigned long long)v
);
2743 csio_wr_reg32(hw
, (uint32_t)(v
& 0xFFFFFFFF),
2745 csio_wr_reg32(hw
, (uint32_t)(v
>> 32), SGE_INT_CAUSE2_A
);
2748 v
|= csio_handle_intr_status(hw
, SGE_INT_CAUSE3_A
, sge_intr_info
);
2750 if (csio_handle_intr_status(hw
, SGE_INT_CAUSE3_A
, sge_intr_info
) ||
2752 csio_hw_fatal_err(hw
);
2755 #define CIM_OBQ_INTR (OBQULP0PARERR_F | OBQULP1PARERR_F | OBQULP2PARERR_F |\
2756 OBQULP3PARERR_F | OBQSGEPARERR_F | OBQNCSIPARERR_F)
2757 #define CIM_IBQ_INTR (IBQTP0PARERR_F | IBQTP1PARERR_F | IBQULPPARERR_F |\
2758 IBQSGEHIPARERR_F | IBQSGELOPARERR_F | IBQNCSIPARERR_F)
2761 * CIM interrupt handler.
2763 static void csio_cim_intr_handler(struct csio_hw
*hw
)
2765 static struct intr_info cim_intr_info
[] = {
2766 { PREFDROPINT_F
, "CIM control register prefetch drop", -1, 1 },
2767 { CIM_OBQ_INTR
, "CIM OBQ parity error", -1, 1 },
2768 { CIM_IBQ_INTR
, "CIM IBQ parity error", -1, 1 },
2769 { MBUPPARERR_F
, "CIM mailbox uP parity error", -1, 1 },
2770 { MBHOSTPARERR_F
, "CIM mailbox host parity error", -1, 1 },
2771 { TIEQINPARERRINT_F
, "CIM TIEQ outgoing parity error", -1, 1 },
2772 { TIEQOUTPARERRINT_F
, "CIM TIEQ incoming parity error", -1, 1 },
2775 static struct intr_info cim_upintr_info
[] = {
2776 { RSVDSPACEINT_F
, "CIM reserved space access", -1, 1 },
2777 { ILLTRANSINT_F
, "CIM illegal transaction", -1, 1 },
2778 { ILLWRINT_F
, "CIM illegal write", -1, 1 },
2779 { ILLRDINT_F
, "CIM illegal read", -1, 1 },
2780 { ILLRDBEINT_F
, "CIM illegal read BE", -1, 1 },
2781 { ILLWRBEINT_F
, "CIM illegal write BE", -1, 1 },
2782 { SGLRDBOOTINT_F
, "CIM single read from boot space", -1, 1 },
2783 { SGLWRBOOTINT_F
, "CIM single write to boot space", -1, 1 },
2784 { BLKWRBOOTINT_F
, "CIM block write to boot space", -1, 1 },
2785 { SGLRDFLASHINT_F
, "CIM single read from flash space", -1, 1 },
2786 { SGLWRFLASHINT_F
, "CIM single write to flash space", -1, 1 },
2787 { BLKWRFLASHINT_F
, "CIM block write to flash space", -1, 1 },
2788 { SGLRDEEPROMINT_F
, "CIM single EEPROM read", -1, 1 },
2789 { SGLWREEPROMINT_F
, "CIM single EEPROM write", -1, 1 },
2790 { BLKRDEEPROMINT_F
, "CIM block EEPROM read", -1, 1 },
2791 { BLKWREEPROMINT_F
, "CIM block EEPROM write", -1, 1 },
2792 { SGLRDCTLINT_F
, "CIM single read from CTL space", -1, 1 },
2793 { SGLWRCTLINT_F
, "CIM single write to CTL space", -1, 1 },
2794 { BLKRDCTLINT_F
, "CIM block read from CTL space", -1, 1 },
2795 { BLKWRCTLINT_F
, "CIM block write to CTL space", -1, 1 },
2796 { SGLRDPLINT_F
, "CIM single read from PL space", -1, 1 },
2797 { SGLWRPLINT_F
, "CIM single write to PL space", -1, 1 },
2798 { BLKRDPLINT_F
, "CIM block read from PL space", -1, 1 },
2799 { BLKWRPLINT_F
, "CIM block write to PL space", -1, 1 },
2800 { REQOVRLOOKUPINT_F
, "CIM request FIFO overwrite", -1, 1 },
2801 { RSPOVRLOOKUPINT_F
, "CIM response FIFO overwrite", -1, 1 },
2802 { TIMEOUTINT_F
, "CIM PIF timeout", -1, 1 },
2803 { TIMEOUTMAINT_F
, "CIM PIF MA timeout", -1, 1 },
2809 fat
= csio_handle_intr_status(hw
, CIM_HOST_INT_CAUSE_A
,
2811 csio_handle_intr_status(hw
, CIM_HOST_UPACC_INT_CAUSE_A
,
2814 csio_hw_fatal_err(hw
);
2818 * ULP RX interrupt handler.
2820 static void csio_ulprx_intr_handler(struct csio_hw
*hw
)
2822 static struct intr_info ulprx_intr_info
[] = {
2823 { 0x1800000, "ULPRX context error", -1, 1 },
2824 { 0x7fffff, "ULPRX parity error", -1, 1 },
2828 if (csio_handle_intr_status(hw
, ULP_RX_INT_CAUSE_A
, ulprx_intr_info
))
2829 csio_hw_fatal_err(hw
);
2833 * ULP TX interrupt handler.
2835 static void csio_ulptx_intr_handler(struct csio_hw
*hw
)
2837 static struct intr_info ulptx_intr_info
[] = {
2838 { PBL_BOUND_ERR_CH3_F
, "ULPTX channel 3 PBL out of bounds", -1,
2840 { PBL_BOUND_ERR_CH2_F
, "ULPTX channel 2 PBL out of bounds", -1,
2842 { PBL_BOUND_ERR_CH1_F
, "ULPTX channel 1 PBL out of bounds", -1,
2844 { PBL_BOUND_ERR_CH0_F
, "ULPTX channel 0 PBL out of bounds", -1,
2846 { 0xfffffff, "ULPTX parity error", -1, 1 },
2850 if (csio_handle_intr_status(hw
, ULP_TX_INT_CAUSE_A
, ulptx_intr_info
))
2851 csio_hw_fatal_err(hw
);
2855 * PM TX interrupt handler.
2857 static void csio_pmtx_intr_handler(struct csio_hw
*hw
)
2859 static struct intr_info pmtx_intr_info
[] = {
2860 { PCMD_LEN_OVFL0_F
, "PMTX channel 0 pcmd too large", -1, 1 },
2861 { PCMD_LEN_OVFL1_F
, "PMTX channel 1 pcmd too large", -1, 1 },
2862 { PCMD_LEN_OVFL2_F
, "PMTX channel 2 pcmd too large", -1, 1 },
2863 { ZERO_C_CMD_ERROR_F
, "PMTX 0-length pcmd", -1, 1 },
2864 { 0xffffff0, "PMTX framing error", -1, 1 },
2865 { OESPI_PAR_ERROR_F
, "PMTX oespi parity error", -1, 1 },
2866 { DB_OPTIONS_PAR_ERROR_F
, "PMTX db_options parity error", -1,
2868 { ICSPI_PAR_ERROR_F
, "PMTX icspi parity error", -1, 1 },
2869 { PMTX_C_PCMD_PAR_ERROR_F
, "PMTX c_pcmd parity error", -1, 1},
2873 if (csio_handle_intr_status(hw
, PM_TX_INT_CAUSE_A
, pmtx_intr_info
))
2874 csio_hw_fatal_err(hw
);
2878 * PM RX interrupt handler.
2880 static void csio_pmrx_intr_handler(struct csio_hw
*hw
)
2882 static struct intr_info pmrx_intr_info
[] = {
2883 { ZERO_E_CMD_ERROR_F
, "PMRX 0-length pcmd", -1, 1 },
2884 { 0x3ffff0, "PMRX framing error", -1, 1 },
2885 { OCSPI_PAR_ERROR_F
, "PMRX ocspi parity error", -1, 1 },
2886 { DB_OPTIONS_PAR_ERROR_F
, "PMRX db_options parity error", -1,
2888 { IESPI_PAR_ERROR_F
, "PMRX iespi parity error", -1, 1 },
2889 { PMRX_E_PCMD_PAR_ERROR_F
, "PMRX e_pcmd parity error", -1, 1},
2893 if (csio_handle_intr_status(hw
, PM_RX_INT_CAUSE_A
, pmrx_intr_info
))
2894 csio_hw_fatal_err(hw
);
2898 * CPL switch interrupt handler.
2900 static void csio_cplsw_intr_handler(struct csio_hw
*hw
)
2902 static struct intr_info cplsw_intr_info
[] = {
2903 { CIM_OP_MAP_PERR_F
, "CPLSW CIM op_map parity error", -1, 1 },
2904 { CIM_OVFL_ERROR_F
, "CPLSW CIM overflow", -1, 1 },
2905 { TP_FRAMING_ERROR_F
, "CPLSW TP framing error", -1, 1 },
2906 { SGE_FRAMING_ERROR_F
, "CPLSW SGE framing error", -1, 1 },
2907 { CIM_FRAMING_ERROR_F
, "CPLSW CIM framing error", -1, 1 },
2908 { ZERO_SWITCH_ERROR_F
, "CPLSW no-switch error", -1, 1 },
2912 if (csio_handle_intr_status(hw
, CPL_INTR_CAUSE_A
, cplsw_intr_info
))
2913 csio_hw_fatal_err(hw
);
2917 * LE interrupt handler.
2919 static void csio_le_intr_handler(struct csio_hw
*hw
)
2921 static struct intr_info le_intr_info
[] = {
2922 { LIPMISS_F
, "LE LIP miss", -1, 0 },
2923 { LIP0_F
, "LE 0 LIP error", -1, 0 },
2924 { PARITYERR_F
, "LE parity error", -1, 1 },
2925 { UNKNOWNCMD_F
, "LE unknown command", -1, 1 },
2926 { REQQPARERR_F
, "LE request queue parity error", -1, 1 },
2930 if (csio_handle_intr_status(hw
, LE_DB_INT_CAUSE_A
, le_intr_info
))
2931 csio_hw_fatal_err(hw
);
2935 * MPS interrupt handler.
2937 static void csio_mps_intr_handler(struct csio_hw
*hw
)
2939 static struct intr_info mps_rx_intr_info
[] = {
2940 { 0xffffff, "MPS Rx parity error", -1, 1 },
2943 static struct intr_info mps_tx_intr_info
[] = {
2944 { TPFIFO_V(TPFIFO_M
), "MPS Tx TP FIFO parity error", -1, 1 },
2945 { NCSIFIFO_F
, "MPS Tx NC-SI FIFO parity error", -1, 1 },
2946 { TXDATAFIFO_V(TXDATAFIFO_M
), "MPS Tx data FIFO parity error",
2948 { TXDESCFIFO_V(TXDESCFIFO_M
), "MPS Tx desc FIFO parity error",
2950 { BUBBLE_F
, "MPS Tx underflow", -1, 1 },
2951 { SECNTERR_F
, "MPS Tx SOP/EOP error", -1, 1 },
2952 { FRMERR_F
, "MPS Tx framing error", -1, 1 },
2955 static struct intr_info mps_trc_intr_info
[] = {
2956 { FILTMEM_V(FILTMEM_M
), "MPS TRC filter parity error", -1, 1 },
2957 { PKTFIFO_V(PKTFIFO_M
), "MPS TRC packet FIFO parity error",
2959 { MISCPERR_F
, "MPS TRC misc parity error", -1, 1 },
2962 static struct intr_info mps_stat_sram_intr_info
[] = {
2963 { 0x1fffff, "MPS statistics SRAM parity error", -1, 1 },
2966 static struct intr_info mps_stat_tx_intr_info
[] = {
2967 { 0xfffff, "MPS statistics Tx FIFO parity error", -1, 1 },
2970 static struct intr_info mps_stat_rx_intr_info
[] = {
2971 { 0xffffff, "MPS statistics Rx FIFO parity error", -1, 1 },
2974 static struct intr_info mps_cls_intr_info
[] = {
2975 { MATCHSRAM_F
, "MPS match SRAM parity error", -1, 1 },
2976 { MATCHTCAM_F
, "MPS match TCAM parity error", -1, 1 },
2977 { HASHSRAM_F
, "MPS hash SRAM parity error", -1, 1 },
2983 fat
= csio_handle_intr_status(hw
, MPS_RX_PERR_INT_CAUSE_A
,
2985 csio_handle_intr_status(hw
, MPS_TX_INT_CAUSE_A
,
2987 csio_handle_intr_status(hw
, MPS_TRC_INT_CAUSE_A
,
2988 mps_trc_intr_info
) +
2989 csio_handle_intr_status(hw
, MPS_STAT_PERR_INT_CAUSE_SRAM_A
,
2990 mps_stat_sram_intr_info
) +
2991 csio_handle_intr_status(hw
, MPS_STAT_PERR_INT_CAUSE_TX_FIFO_A
,
2992 mps_stat_tx_intr_info
) +
2993 csio_handle_intr_status(hw
, MPS_STAT_PERR_INT_CAUSE_RX_FIFO_A
,
2994 mps_stat_rx_intr_info
) +
2995 csio_handle_intr_status(hw
, MPS_CLS_INT_CAUSE_A
,
2998 csio_wr_reg32(hw
, 0, MPS_INT_CAUSE_A
);
2999 csio_rd_reg32(hw
, MPS_INT_CAUSE_A
); /* flush */
3001 csio_hw_fatal_err(hw
);
3004 #define MEM_INT_MASK (PERR_INT_CAUSE_F | ECC_CE_INT_CAUSE_F | \
3008 * EDC/MC interrupt handler.
3010 static void csio_mem_intr_handler(struct csio_hw
*hw
, int idx
)
3012 static const char name
[3][5] = { "EDC0", "EDC1", "MC" };
3014 unsigned int addr
, cnt_addr
, v
;
3016 if (idx
<= MEM_EDC1
) {
3017 addr
= EDC_REG(EDC_INT_CAUSE_A
, idx
);
3018 cnt_addr
= EDC_REG(EDC_ECC_STATUS_A
, idx
);
3020 addr
= MC_INT_CAUSE_A
;
3021 cnt_addr
= MC_ECC_STATUS_A
;
3024 v
= csio_rd_reg32(hw
, addr
) & MEM_INT_MASK
;
3025 if (v
& PERR_INT_CAUSE_F
)
3026 csio_fatal(hw
, "%s FIFO parity error\n", name
[idx
]);
3027 if (v
& ECC_CE_INT_CAUSE_F
) {
3028 uint32_t cnt
= ECC_CECNT_G(csio_rd_reg32(hw
, cnt_addr
));
3030 csio_wr_reg32(hw
, ECC_CECNT_V(ECC_CECNT_M
), cnt_addr
);
3031 csio_warn(hw
, "%u %s correctable ECC data error%s\n",
3032 cnt
, name
[idx
], cnt
> 1 ? "s" : "");
3034 if (v
& ECC_UE_INT_CAUSE_F
)
3035 csio_fatal(hw
, "%s uncorrectable ECC data error\n", name
[idx
]);
3037 csio_wr_reg32(hw
, v
, addr
);
3038 if (v
& (PERR_INT_CAUSE_F
| ECC_UE_INT_CAUSE_F
))
3039 csio_hw_fatal_err(hw
);
3043 * MA interrupt handler.
3045 static void csio_ma_intr_handler(struct csio_hw
*hw
)
3047 uint32_t v
, status
= csio_rd_reg32(hw
, MA_INT_CAUSE_A
);
3049 if (status
& MEM_PERR_INT_CAUSE_F
)
3050 csio_fatal(hw
, "MA parity error, parity status %#x\n",
3051 csio_rd_reg32(hw
, MA_PARITY_ERROR_STATUS_A
));
3052 if (status
& MEM_WRAP_INT_CAUSE_F
) {
3053 v
= csio_rd_reg32(hw
, MA_INT_WRAP_STATUS_A
);
3055 "MA address wrap-around error by client %u to address %#x\n",
3056 MEM_WRAP_CLIENT_NUM_G(v
), MEM_WRAP_ADDRESS_G(v
) << 4);
3058 csio_wr_reg32(hw
, status
, MA_INT_CAUSE_A
);
3059 csio_hw_fatal_err(hw
);
3063 * SMB interrupt handler.
3065 static void csio_smb_intr_handler(struct csio_hw
*hw
)
3067 static struct intr_info smb_intr_info
[] = {
3068 { MSTTXFIFOPARINT_F
, "SMB master Tx FIFO parity error", -1, 1 },
3069 { MSTRXFIFOPARINT_F
, "SMB master Rx FIFO parity error", -1, 1 },
3070 { SLVFIFOPARINT_F
, "SMB slave FIFO parity error", -1, 1 },
3074 if (csio_handle_intr_status(hw
, SMB_INT_CAUSE_A
, smb_intr_info
))
3075 csio_hw_fatal_err(hw
);
3079 * NC-SI interrupt handler.
3081 static void csio_ncsi_intr_handler(struct csio_hw
*hw
)
3083 static struct intr_info ncsi_intr_info
[] = {
3084 { CIM_DM_PRTY_ERR_F
, "NC-SI CIM parity error", -1, 1 },
3085 { MPS_DM_PRTY_ERR_F
, "NC-SI MPS parity error", -1, 1 },
3086 { TXFIFO_PRTY_ERR_F
, "NC-SI Tx FIFO parity error", -1, 1 },
3087 { RXFIFO_PRTY_ERR_F
, "NC-SI Rx FIFO parity error", -1, 1 },
3091 if (csio_handle_intr_status(hw
, NCSI_INT_CAUSE_A
, ncsi_intr_info
))
3092 csio_hw_fatal_err(hw
);
3096 * XGMAC interrupt handler.
3098 static void csio_xgmac_intr_handler(struct csio_hw
*hw
, int port
)
3100 uint32_t v
= csio_rd_reg32(hw
, T5_PORT_REG(port
, MAC_PORT_INT_CAUSE_A
));
3102 v
&= TXFIFO_PRTY_ERR_F
| RXFIFO_PRTY_ERR_F
;
3106 if (v
& TXFIFO_PRTY_ERR_F
)
3107 csio_fatal(hw
, "XGMAC %d Tx FIFO parity error\n", port
);
3108 if (v
& RXFIFO_PRTY_ERR_F
)
3109 csio_fatal(hw
, "XGMAC %d Rx FIFO parity error\n", port
);
3110 csio_wr_reg32(hw
, v
, T5_PORT_REG(port
, MAC_PORT_INT_CAUSE_A
));
3111 csio_hw_fatal_err(hw
);
3115 * PL interrupt handler.
3117 static void csio_pl_intr_handler(struct csio_hw
*hw
)
3119 static struct intr_info pl_intr_info
[] = {
3120 { FATALPERR_F
, "T4 fatal parity error", -1, 1 },
3121 { PERRVFID_F
, "PL VFID_MAP parity error", -1, 1 },
3125 if (csio_handle_intr_status(hw
, PL_PL_INT_CAUSE_A
, pl_intr_info
))
3126 csio_hw_fatal_err(hw
);
3130 * csio_hw_slow_intr_handler - control path interrupt handler
3133 * Interrupt handler for non-data global interrupt events, e.g., errors.
3134 * The designation 'slow' is because it involves register reads, while
3135 * data interrupts typically don't involve any MMIOs.
3138 csio_hw_slow_intr_handler(struct csio_hw
*hw
)
3140 uint32_t cause
= csio_rd_reg32(hw
, PL_INT_CAUSE_A
);
3142 if (!(cause
& CSIO_GLBL_INTR_MASK
)) {
3143 CSIO_INC_STATS(hw
, n_plint_unexp
);
3147 csio_dbg(hw
, "Slow interrupt! cause: 0x%x\n", cause
);
3149 CSIO_INC_STATS(hw
, n_plint_cnt
);
3152 csio_cim_intr_handler(hw
);
3155 csio_mps_intr_handler(hw
);
3158 csio_ncsi_intr_handler(hw
);
3161 csio_pl_intr_handler(hw
);
3164 csio_smb_intr_handler(hw
);
3166 if (cause
& XGMAC0_F
)
3167 csio_xgmac_intr_handler(hw
, 0);
3169 if (cause
& XGMAC1_F
)
3170 csio_xgmac_intr_handler(hw
, 1);
3172 if (cause
& XGMAC_KR0_F
)
3173 csio_xgmac_intr_handler(hw
, 2);
3175 if (cause
& XGMAC_KR1_F
)
3176 csio_xgmac_intr_handler(hw
, 3);
3179 hw
->chip_ops
->chip_pcie_intr_handler(hw
);
3182 csio_mem_intr_handler(hw
, MEM_MC
);
3185 csio_mem_intr_handler(hw
, MEM_EDC0
);
3188 csio_mem_intr_handler(hw
, MEM_EDC1
);
3191 csio_le_intr_handler(hw
);
3194 csio_tp_intr_handler(hw
);
3197 csio_ma_intr_handler(hw
);
3199 if (cause
& PM_TX_F
)
3200 csio_pmtx_intr_handler(hw
);
3202 if (cause
& PM_RX_F
)
3203 csio_pmrx_intr_handler(hw
);
3205 if (cause
& ULP_RX_F
)
3206 csio_ulprx_intr_handler(hw
);
3208 if (cause
& CPL_SWITCH_F
)
3209 csio_cplsw_intr_handler(hw
);
3212 csio_sge_intr_handler(hw
);
3214 if (cause
& ULP_TX_F
)
3215 csio_ulptx_intr_handler(hw
);
3217 /* Clear the interrupts just processed for which we are the master. */
3218 csio_wr_reg32(hw
, cause
& CSIO_GLBL_INTR_MASK
, PL_INT_CAUSE_A
);
3219 csio_rd_reg32(hw
, PL_INT_CAUSE_A
); /* flush */
3224 /*****************************************************************************
3225 * HW <--> mailbox interfacing routines.
3226 ****************************************************************************/
3228 * csio_mberr_worker - Worker thread (dpc) for mailbox/error completions
3230 * @data: Private data pointer.
3232 * Called from worker thread context.
3235 csio_mberr_worker(void *data
)
3237 struct csio_hw
*hw
= (struct csio_hw
*)data
;
3238 struct csio_mbm
*mbm
= &hw
->mbm
;
3240 struct csio_mb
*mbp_next
;
3243 del_timer_sync(&mbm
->timer
);
3245 spin_lock_irq(&hw
->lock
);
3246 if (list_empty(&mbm
->cbfn_q
)) {
3247 spin_unlock_irq(&hw
->lock
);
3251 list_splice_tail_init(&mbm
->cbfn_q
, &cbfn_q
);
3252 mbm
->stats
.n_cbfnq
= 0;
3254 /* Try to start waiting mailboxes */
3255 if (!list_empty(&mbm
->req_q
)) {
3256 mbp_next
= list_first_entry(&mbm
->req_q
, struct csio_mb
, list
);
3257 list_del_init(&mbp_next
->list
);
3259 rv
= csio_mb_issue(hw
, mbp_next
);
3261 list_add_tail(&mbp_next
->list
, &mbm
->req_q
);
3263 CSIO_DEC_STATS(mbm
, n_activeq
);
3265 spin_unlock_irq(&hw
->lock
);
3267 /* Now callback completions */
3268 csio_mb_completions(hw
, &cbfn_q
);
3272 * csio_hw_mb_timer - Top-level Mailbox timeout handler.
3274 * @data: private data pointer
3278 csio_hw_mb_timer(uintptr_t data
)
3280 struct csio_hw
*hw
= (struct csio_hw
*)data
;
3281 struct csio_mb
*mbp
= NULL
;
3283 spin_lock_irq(&hw
->lock
);
3284 mbp
= csio_mb_tmo_handler(hw
);
3285 spin_unlock_irq(&hw
->lock
);
3287 /* Call back the function for the timed-out Mailbox */
3289 mbp
->mb_cbfn(hw
, mbp
);
3294 * csio_hw_mbm_cleanup - Cleanup Mailbox module.
3297 * Called with lock held, should exit with lock held.
3298 * Cancels outstanding mailboxes (waiting, in-flight) and gathers them
3299 * into a local queue. Drops lock and calls the completions. Holds
3303 csio_hw_mbm_cleanup(struct csio_hw
*hw
)
3307 csio_mb_cancel_all(hw
, &cbfn_q
);
3309 spin_unlock_irq(&hw
->lock
);
3310 csio_mb_completions(hw
, &cbfn_q
);
3311 spin_lock_irq(&hw
->lock
);
3314 /*****************************************************************************
3316 ****************************************************************************/
3318 csio_enqueue_evt(struct csio_hw
*hw
, enum csio_evt type
, void *evt_msg
,
3321 struct csio_evt_msg
*evt_entry
= NULL
;
3323 if (type
>= CSIO_EVT_MAX
)
3326 if (len
> CSIO_EVT_MSG_SIZE
)
3329 if (hw
->flags
& CSIO_HWF_FWEVT_STOP
)
3332 if (list_empty(&hw
->evt_free_q
)) {
3333 csio_err(hw
, "Failed to alloc evt entry, msg type %d len %d\n",
3338 evt_entry
= list_first_entry(&hw
->evt_free_q
,
3339 struct csio_evt_msg
, list
);
3340 list_del_init(&evt_entry
->list
);
3342 /* copy event msg and queue the event */
3343 evt_entry
->type
= type
;
3344 memcpy((void *)evt_entry
->data
, evt_msg
, len
);
3345 list_add_tail(&evt_entry
->list
, &hw
->evt_active_q
);
3347 CSIO_DEC_STATS(hw
, n_evt_freeq
);
3348 CSIO_INC_STATS(hw
, n_evt_activeq
);
3354 csio_enqueue_evt_lock(struct csio_hw
*hw
, enum csio_evt type
, void *evt_msg
,
3355 uint16_t len
, bool msg_sg
)
3357 struct csio_evt_msg
*evt_entry
= NULL
;
3358 struct csio_fl_dma_buf
*fl_sg
;
3360 unsigned long flags
;
3363 if (type
>= CSIO_EVT_MAX
)
3366 if (len
> CSIO_EVT_MSG_SIZE
)
3369 spin_lock_irqsave(&hw
->lock
, flags
);
3370 if (hw
->flags
& CSIO_HWF_FWEVT_STOP
) {
3375 if (list_empty(&hw
->evt_free_q
)) {
3376 csio_err(hw
, "Failed to alloc evt entry, msg type %d len %d\n",
3382 evt_entry
= list_first_entry(&hw
->evt_free_q
,
3383 struct csio_evt_msg
, list
);
3384 list_del_init(&evt_entry
->list
);
3386 /* copy event msg and queue the event */
3387 evt_entry
->type
= type
;
3389 /* If Payload in SG list*/
3391 fl_sg
= (struct csio_fl_dma_buf
*) evt_msg
;
3392 for (n
= 0; (n
< CSIO_MAX_FLBUF_PER_IQWR
&& off
< len
); n
++) {
3393 memcpy((void *)((uintptr_t)evt_entry
->data
+ off
),
3394 fl_sg
->flbufs
[n
].vaddr
,
3395 fl_sg
->flbufs
[n
].len
);
3396 off
+= fl_sg
->flbufs
[n
].len
;
3399 memcpy((void *)evt_entry
->data
, evt_msg
, len
);
3401 list_add_tail(&evt_entry
->list
, &hw
->evt_active_q
);
3402 CSIO_DEC_STATS(hw
, n_evt_freeq
);
3403 CSIO_INC_STATS(hw
, n_evt_activeq
);
3405 spin_unlock_irqrestore(&hw
->lock
, flags
);
3410 csio_free_evt(struct csio_hw
*hw
, struct csio_evt_msg
*evt_entry
)
3413 spin_lock_irq(&hw
->lock
);
3414 list_del_init(&evt_entry
->list
);
3415 list_add_tail(&evt_entry
->list
, &hw
->evt_free_q
);
3416 CSIO_DEC_STATS(hw
, n_evt_activeq
);
3417 CSIO_INC_STATS(hw
, n_evt_freeq
);
3418 spin_unlock_irq(&hw
->lock
);
3423 csio_evtq_flush(struct csio_hw
*hw
)
3427 while (hw
->flags
& CSIO_HWF_FWEVT_PENDING
&& count
--) {
3428 spin_unlock_irq(&hw
->lock
);
3430 spin_lock_irq(&hw
->lock
);
3433 CSIO_DB_ASSERT(!(hw
->flags
& CSIO_HWF_FWEVT_PENDING
));
3437 csio_evtq_stop(struct csio_hw
*hw
)
3439 hw
->flags
|= CSIO_HWF_FWEVT_STOP
;
3443 csio_evtq_start(struct csio_hw
*hw
)
3445 hw
->flags
&= ~CSIO_HWF_FWEVT_STOP
;
3449 csio_evtq_cleanup(struct csio_hw
*hw
)
3451 struct list_head
*evt_entry
, *next_entry
;
3453 /* Release outstanding events from activeq to freeq*/
3454 if (!list_empty(&hw
->evt_active_q
))
3455 list_splice_tail_init(&hw
->evt_active_q
, &hw
->evt_free_q
);
3457 hw
->stats
.n_evt_activeq
= 0;
3458 hw
->flags
&= ~CSIO_HWF_FWEVT_PENDING
;
3460 /* Freeup event entry */
3461 list_for_each_safe(evt_entry
, next_entry
, &hw
->evt_free_q
) {
3463 CSIO_DEC_STATS(hw
, n_evt_freeq
);
3466 hw
->stats
.n_evt_freeq
= 0;
3471 csio_process_fwevtq_entry(struct csio_hw
*hw
, void *wr
, uint32_t len
,
3472 struct csio_fl_dma_buf
*flb
, void *priv
)
3476 uint32_t msg_len
= 0;
3479 op
= ((struct rss_header
*) wr
)->opcode
;
3480 if (op
== CPL_FW6_PLD
) {
3481 CSIO_INC_STATS(hw
, n_cpl_fw6_pld
);
3482 if (!flb
|| !flb
->totlen
) {
3483 CSIO_INC_STATS(hw
, n_cpl_unexp
);
3488 msg_len
= flb
->totlen
;
3490 } else if (op
== CPL_FW6_MSG
|| op
== CPL_FW4_MSG
) {
3492 CSIO_INC_STATS(hw
, n_cpl_fw6_msg
);
3493 /* skip RSS header */
3494 msg
= (void *)((uintptr_t)wr
+ sizeof(__be64
));
3495 msg_len
= (op
== CPL_FW6_MSG
) ? sizeof(struct cpl_fw6_msg
) :
3496 sizeof(struct cpl_fw4_msg
);
3498 csio_warn(hw
, "unexpected CPL %#x on FW event queue\n", op
);
3499 CSIO_INC_STATS(hw
, n_cpl_unexp
);
3504 * Enqueue event to EventQ. Events processing happens
3505 * in Event worker thread context
3507 if (csio_enqueue_evt_lock(hw
, CSIO_EVT_FW
, msg
,
3508 (uint16_t)msg_len
, msg_sg
))
3509 CSIO_INC_STATS(hw
, n_evt_drop
);
3513 csio_evtq_worker(struct work_struct
*work
)
3515 struct csio_hw
*hw
= container_of(work
, struct csio_hw
, evtq_work
);
3516 struct list_head
*evt_entry
, *next_entry
;
3518 struct csio_evt_msg
*evt_msg
;
3519 struct cpl_fw6_msg
*msg
;
3520 struct csio_rnode
*rn
;
3522 uint8_t evtq_stop
= 0;
3524 csio_dbg(hw
, "event worker thread active evts#%d\n",
3525 hw
->stats
.n_evt_activeq
);
3527 spin_lock_irq(&hw
->lock
);
3528 while (!list_empty(&hw
->evt_active_q
)) {
3529 list_splice_tail_init(&hw
->evt_active_q
, &evt_q
);
3530 spin_unlock_irq(&hw
->lock
);
3532 list_for_each_safe(evt_entry
, next_entry
, &evt_q
) {
3533 evt_msg
= (struct csio_evt_msg
*) evt_entry
;
3535 /* Drop events if queue is STOPPED */
3536 spin_lock_irq(&hw
->lock
);
3537 if (hw
->flags
& CSIO_HWF_FWEVT_STOP
)
3539 spin_unlock_irq(&hw
->lock
);
3541 CSIO_INC_STATS(hw
, n_evt_drop
);
3545 switch (evt_msg
->type
) {
3547 msg
= (struct cpl_fw6_msg
*)(evt_msg
->data
);
3549 if ((msg
->opcode
== CPL_FW6_MSG
||
3550 msg
->opcode
== CPL_FW4_MSG
) &&
3552 rv
= csio_mb_fwevt_handler(hw
,
3556 /* Handle any remaining fw events */
3557 csio_fcoe_fwevt_handler(hw
,
3558 msg
->opcode
, msg
->data
);
3559 } else if (msg
->opcode
== CPL_FW6_PLD
) {
3561 csio_fcoe_fwevt_handler(hw
,
3562 msg
->opcode
, msg
->data
);
3565 "Unhandled FW msg op %x type %x\n",
3566 msg
->opcode
, msg
->type
);
3567 CSIO_INC_STATS(hw
, n_evt_drop
);
3572 csio_mberr_worker(hw
);
3575 case CSIO_EVT_DEV_LOSS
:
3576 memcpy(&rn
, evt_msg
->data
, sizeof(rn
));
3577 csio_rnode_devloss_handler(rn
);
3581 csio_warn(hw
, "Unhandled event %x on evtq\n",
3583 CSIO_INC_STATS(hw
, n_evt_unexp
);
3587 csio_free_evt(hw
, evt_msg
);
3590 spin_lock_irq(&hw
->lock
);
3592 hw
->flags
&= ~CSIO_HWF_FWEVT_PENDING
;
3593 spin_unlock_irq(&hw
->lock
);
3597 csio_fwevtq_handler(struct csio_hw
*hw
)
3601 if (csio_q_iqid(hw
, hw
->fwevt_iq_idx
) == CSIO_MAX_QID
) {
3602 CSIO_INC_STATS(hw
, n_int_stray
);
3606 rv
= csio_wr_process_iq_idx(hw
, hw
->fwevt_iq_idx
,
3607 csio_process_fwevtq_entry
, NULL
);
3611 /****************************************************************************
3613 ****************************************************************************/
3615 /* Management module */
3617 * csio_mgmt_req_lookup - Lookup the given IO req exist in Active Q.
3618 * mgmt - mgmt module
3619 * @io_req - io request
3621 * Return - 0:if given IO Req exists in active Q.
3622 * -EINVAL :if lookup fails.
3625 csio_mgmt_req_lookup(struct csio_mgmtm
*mgmtm
, struct csio_ioreq
*io_req
)
3627 struct list_head
*tmp
;
3629 /* Lookup ioreq in the ACTIVEQ */
3630 list_for_each(tmp
, &mgmtm
->active_q
) {
3631 if (io_req
== (struct csio_ioreq
*)tmp
)
3637 #define ECM_MIN_TMO 1000 /* Minimum timeout value for req */
3640 * csio_mgmts_tmo_handler - MGMT IO Timeout handler.
3641 * @data - Event data.
3646 csio_mgmt_tmo_handler(uintptr_t data
)
3648 struct csio_mgmtm
*mgmtm
= (struct csio_mgmtm
*) data
;
3649 struct list_head
*tmp
;
3650 struct csio_ioreq
*io_req
;
3652 csio_dbg(mgmtm
->hw
, "Mgmt timer invoked!\n");
3654 spin_lock_irq(&mgmtm
->hw
->lock
);
3656 list_for_each(tmp
, &mgmtm
->active_q
) {
3657 io_req
= (struct csio_ioreq
*) tmp
;
3658 io_req
->tmo
-= min_t(uint32_t, io_req
->tmo
, ECM_MIN_TMO
);
3661 /* Dequeue the request from retry Q. */
3662 tmp
= csio_list_prev(tmp
);
3663 list_del_init(&io_req
->sm
.sm_list
);
3664 if (io_req
->io_cbfn
) {
3665 /* io_req will be freed by completion handler */
3666 io_req
->wr_status
= -ETIMEDOUT
;
3667 io_req
->io_cbfn(mgmtm
->hw
, io_req
);
3674 /* If retry queue is not empty, re-arm timer */
3675 if (!list_empty(&mgmtm
->active_q
))
3676 mod_timer(&mgmtm
->mgmt_timer
,
3677 jiffies
+ msecs_to_jiffies(ECM_MIN_TMO
));
3678 spin_unlock_irq(&mgmtm
->hw
->lock
);
3682 csio_mgmtm_cleanup(struct csio_mgmtm
*mgmtm
)
3684 struct csio_hw
*hw
= mgmtm
->hw
;
3685 struct csio_ioreq
*io_req
;
3686 struct list_head
*tmp
;
3690 /* Wait for all outstanding req to complete gracefully */
3691 while ((!list_empty(&mgmtm
->active_q
)) && count
--) {
3692 spin_unlock_irq(&hw
->lock
);
3694 spin_lock_irq(&hw
->lock
);
3697 /* release outstanding req from ACTIVEQ */
3698 list_for_each(tmp
, &mgmtm
->active_q
) {
3699 io_req
= (struct csio_ioreq
*) tmp
;
3700 tmp
= csio_list_prev(tmp
);
3701 list_del_init(&io_req
->sm
.sm_list
);
3702 mgmtm
->stats
.n_active
--;
3703 if (io_req
->io_cbfn
) {
3704 /* io_req will be freed by completion handler */
3705 io_req
->wr_status
= -ETIMEDOUT
;
3706 io_req
->io_cbfn(mgmtm
->hw
, io_req
);
3712 * csio_mgmt_init - Mgmt module init entry point
3713 * @mgmtsm - mgmt module
3716 * Initialize mgmt timer, resource wait queue, active queue,
3717 * completion q. Allocate Egress and Ingress
3718 * WR queues and save off the queue index returned by the WR
3719 * module for future use. Allocate and save off mgmt reqs in the
3720 * mgmt_req_freelist for future use. Make sure their SM is initialized
3722 * Returns: 0 - on success
3723 * -ENOMEM - on error.
3726 csio_mgmtm_init(struct csio_mgmtm
*mgmtm
, struct csio_hw
*hw
)
3728 struct timer_list
*timer
= &mgmtm
->mgmt_timer
;
3731 timer
->function
= csio_mgmt_tmo_handler
;
3732 timer
->data
= (unsigned long)mgmtm
;
3734 INIT_LIST_HEAD(&mgmtm
->active_q
);
3735 INIT_LIST_HEAD(&mgmtm
->cbfn_q
);
3738 /*mgmtm->iq_idx = hw->fwevt_iq_idx;*/
3744 * csio_mgmtm_exit - MGMT module exit entry point
3745 * @mgmtsm - mgmt module
3747 * This function called during MGMT module uninit.
3748 * Stop timers, free ioreqs allocated.
3753 csio_mgmtm_exit(struct csio_mgmtm
*mgmtm
)
3755 del_timer_sync(&mgmtm
->mgmt_timer
);
3760 * csio_hw_start - Kicks off the HW State machine
3761 * @hw: Pointer to HW module.
3763 * It is assumed that the initialization is a synchronous operation.
3764 * So when we return afer posting the event, the HW SM should be in
3765 * the ready state, if there were no errors during init.
3768 csio_hw_start(struct csio_hw
*hw
)
3770 spin_lock_irq(&hw
->lock
);
3771 csio_post_event(&hw
->sm
, CSIO_HWE_CFG
);
3772 spin_unlock_irq(&hw
->lock
);
3774 if (csio_is_hw_ready(hw
))
3781 csio_hw_stop(struct csio_hw
*hw
)
3783 csio_post_event(&hw
->sm
, CSIO_HWE_PCI_REMOVE
);
3785 if (csio_is_hw_removing(hw
))
3791 /* Max reset retries */
3792 #define CSIO_MAX_RESET_RETRIES 3
3795 * csio_hw_reset - Reset the hardware
3798 * Caller should hold lock across this function.
3801 csio_hw_reset(struct csio_hw
*hw
)
3803 if (!csio_is_hw_master(hw
))
3806 if (hw
->rst_retries
>= CSIO_MAX_RESET_RETRIES
) {
3807 csio_dbg(hw
, "Max hw reset attempts reached..");
3812 csio_post_event(&hw
->sm
, CSIO_HWE_HBA_RESET
);
3814 if (csio_is_hw_ready(hw
)) {
3815 hw
->rst_retries
= 0;
3816 hw
->stats
.n_reset_start
= jiffies_to_msecs(jiffies
);
3823 * csio_hw_get_device_id - Caches the Adapter's vendor & device id.
3827 csio_hw_get_device_id(struct csio_hw
*hw
)
3829 /* Is the adapter device id cached already ?*/
3830 if (csio_is_dev_id_cached(hw
))
3833 /* Get the PCI vendor & device id */
3834 pci_read_config_word(hw
->pdev
, PCI_VENDOR_ID
,
3835 &hw
->params
.pci
.vendor_id
);
3836 pci_read_config_word(hw
->pdev
, PCI_DEVICE_ID
,
3837 &hw
->params
.pci
.device_id
);
3839 csio_dev_id_cached(hw
);
3840 hw
->chip_id
= (hw
->params
.pci
.device_id
& CSIO_HW_CHIP_MASK
);
3842 } /* csio_hw_get_device_id */
3845 * csio_hw_set_description - Set the model, description of the hw.
3847 * @ven_id: PCI Vendor ID
3848 * @dev_id: PCI Device ID
3851 csio_hw_set_description(struct csio_hw
*hw
, uint16_t ven_id
, uint16_t dev_id
)
3853 uint32_t adap_type
, prot_type
;
3855 if (ven_id
== CSIO_VENDOR_ID
) {
3856 prot_type
= (dev_id
& CSIO_ASIC_DEVID_PROTO_MASK
);
3857 adap_type
= (dev_id
& CSIO_ASIC_DEVID_TYPE_MASK
);
3859 if (prot_type
== CSIO_T5_FCOE_ASIC
) {
3861 csio_t5_fcoe_adapters
[adap_type
].model_no
, 16);
3862 memcpy(hw
->model_desc
,
3863 csio_t5_fcoe_adapters
[adap_type
].description
,
3866 char tempName
[32] = "Chelsio FCoE Controller";
3867 memcpy(hw
->model_desc
, tempName
, 32);
3870 } /* csio_hw_set_description */
3873 * csio_hw_init - Initialize HW module.
3874 * @hw: Pointer to HW module.
3876 * Initialize the members of the HW module.
3879 csio_hw_init(struct csio_hw
*hw
)
3883 uint16_t ven_id
, dev_id
;
3884 struct csio_evt_msg
*evt_entry
;
3886 INIT_LIST_HEAD(&hw
->sm
.sm_list
);
3887 csio_init_state(&hw
->sm
, csio_hws_uninit
);
3888 spin_lock_init(&hw
->lock
);
3889 INIT_LIST_HEAD(&hw
->sln_head
);
3891 /* Get the PCI vendor & device id */
3892 csio_hw_get_device_id(hw
);
3894 strcpy(hw
->name
, CSIO_HW_NAME
);
3896 /* Initialize the HW chip ops T5 specific ops */
3897 hw
->chip_ops
= &t5_ops
;
3899 /* Set the model & its description */
3901 ven_id
= hw
->params
.pci
.vendor_id
;
3902 dev_id
= hw
->params
.pci
.device_id
;
3904 csio_hw_set_description(hw
, ven_id
, dev_id
);
3906 /* Initialize default log level */
3907 hw
->params
.log_level
= (uint32_t) csio_dbg_level
;
3909 csio_set_fwevt_intr_idx(hw
, -1);
3910 csio_set_nondata_intr_idx(hw
, -1);
3912 /* Init all the modules: Mailbox, WorkRequest and Transport */
3913 if (csio_mbm_init(csio_hw_to_mbm(hw
), hw
, csio_hw_mb_timer
))
3916 rv
= csio_wrm_init(csio_hw_to_wrm(hw
), hw
);
3920 rv
= csio_scsim_init(csio_hw_to_scsim(hw
), hw
);
3924 rv
= csio_mgmtm_init(csio_hw_to_mgmtm(hw
), hw
);
3926 goto err_scsim_exit
;
3927 /* Pre-allocate evtq and initialize them */
3928 INIT_LIST_HEAD(&hw
->evt_active_q
);
3929 INIT_LIST_HEAD(&hw
->evt_free_q
);
3930 for (i
= 0; i
< csio_evtq_sz
; i
++) {
3932 evt_entry
= kzalloc(sizeof(struct csio_evt_msg
), GFP_KERNEL
);
3935 csio_err(hw
, "Failed to initialize eventq");
3936 goto err_evtq_cleanup
;
3939 list_add_tail(&evt_entry
->list
, &hw
->evt_free_q
);
3940 CSIO_INC_STATS(hw
, n_evt_freeq
);
3943 hw
->dev_num
= dev_num
;
3949 csio_evtq_cleanup(hw
);
3950 csio_mgmtm_exit(csio_hw_to_mgmtm(hw
));
3952 csio_scsim_exit(csio_hw_to_scsim(hw
));
3954 csio_wrm_exit(csio_hw_to_wrm(hw
), hw
);
3956 csio_mbm_exit(csio_hw_to_mbm(hw
));
3962 * csio_hw_exit - Un-initialize HW module.
3963 * @hw: Pointer to HW module.
3967 csio_hw_exit(struct csio_hw
*hw
)
3969 csio_evtq_cleanup(hw
);
3970 csio_mgmtm_exit(csio_hw_to_mgmtm(hw
));
3971 csio_scsim_exit(csio_hw_to_scsim(hw
));
3972 csio_wrm_exit(csio_hw_to_wrm(hw
), hw
);
3973 csio_mbm_exit(csio_hw_to_mbm(hw
));