2 * This file is part of the Chelsio FCoE driver for Linux.
4 * Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 #include <linux/pci.h>
36 #include <linux/pci_regs.h>
37 #include <linux/firmware.h>
38 #include <linux/stddef.h>
39 #include <linux/delay.h>
40 #include <linux/string.h>
41 #include <linux/compiler.h>
42 #include <linux/jiffies.h>
43 #include <linux/kernel.h>
44 #include <linux/log2.h>
47 #include "csio_lnode.h"
48 #include "csio_rnode.h"
50 int csio_force_master
;
51 int csio_dbg_level
= 0xFEFF;
52 unsigned int csio_port_mask
= 0xf;
54 /* Default FW event queue entries. */
55 static uint32_t csio_evtq_sz
= CSIO_EVTQ_SIZE
;
57 /* Default MSI param level */
60 /* FCoE function instances */
63 /* FCoE Adapter types & its description */
64 static const struct csio_adap_desc csio_t4_fcoe_adapters
[] = {
65 {"T440-Dbg 10G", "Chelsio T440-Dbg 10G [FCoE]"},
66 {"T420-CR 10G", "Chelsio T420-CR 10G [FCoE]"},
67 {"T422-CR 10G/1G", "Chelsio T422-CR 10G/1G [FCoE]"},
68 {"T440-CR 10G", "Chelsio T440-CR 10G [FCoE]"},
69 {"T420-BCH 10G", "Chelsio T420-BCH 10G [FCoE]"},
70 {"T440-BCH 10G", "Chelsio T440-BCH 10G [FCoE]"},
71 {"T440-CH 10G", "Chelsio T440-CH 10G [FCoE]"},
72 {"T420-SO 10G", "Chelsio T420-SO 10G [FCoE]"},
73 {"T420-CX4 10G", "Chelsio T420-CX4 10G [FCoE]"},
74 {"T420-BT 10G", "Chelsio T420-BT 10G [FCoE]"},
75 {"T404-BT 1G", "Chelsio T404-BT 1G [FCoE]"},
76 {"B420-SR 10G", "Chelsio B420-SR 10G [FCoE]"},
77 {"B404-BT 1G", "Chelsio B404-BT 1G [FCoE]"},
78 {"T480-CR 10G", "Chelsio T480-CR 10G [FCoE]"},
79 {"T440-LP-CR 10G", "Chelsio T440-LP-CR 10G [FCoE]"},
80 {"AMSTERDAM 10G", "Chelsio AMSTERDAM 10G [FCoE]"},
81 {"HUAWEI T480 10G", "Chelsio HUAWEI T480 10G [FCoE]"},
82 {"HUAWEI T440 10G", "Chelsio HUAWEI T440 10G [FCoE]"},
83 {"HUAWEI STG 10G", "Chelsio HUAWEI STG 10G [FCoE]"},
84 {"ACROMAG XAUI 10G", "Chelsio ACROMAG XAUI 10G [FCoE]"},
85 {"ACROMAG SFP+ 10G", "Chelsio ACROMAG SFP+ 10G [FCoE]"},
86 {"QUANTA SFP+ 10G", "Chelsio QUANTA SFP+ 10G [FCoE]"},
87 {"HUAWEI 10Gbase-T", "Chelsio HUAWEI 10Gbase-T [FCoE]"},
88 {"HUAWEI T4TOE 10G", "Chelsio HUAWEI T4TOE 10G [FCoE]"}
91 static const struct csio_adap_desc csio_t5_fcoe_adapters
[] = {
92 {"T580-Dbg 10G", "Chelsio T580-Dbg 10G [FCoE]"},
93 {"T520-CR 10G", "Chelsio T520-CR 10G [FCoE]"},
94 {"T522-CR 10G/1G", "Chelsio T452-CR 10G/1G [FCoE]"},
95 {"T540-CR 10G", "Chelsio T540-CR 10G [FCoE]"},
96 {"T520-BCH 10G", "Chelsio T520-BCH 10G [FCoE]"},
97 {"T540-BCH 10G", "Chelsio T540-BCH 10G [FCoE]"},
98 {"T540-CH 10G", "Chelsio T540-CH 10G [FCoE]"},
99 {"T520-SO 10G", "Chelsio T520-SO 10G [FCoE]"},
100 {"T520-CX4 10G", "Chelsio T520-CX4 10G [FCoE]"},
101 {"T520-BT 10G", "Chelsio T520-BT 10G [FCoE]"},
102 {"T504-BT 1G", "Chelsio T504-BT 1G [FCoE]"},
103 {"B520-SR 10G", "Chelsio B520-SR 10G [FCoE]"},
104 {"B504-BT 1G", "Chelsio B504-BT 1G [FCoE]"},
105 {"T580-CR 10G", "Chelsio T580-CR 10G [FCoE]"},
106 {"T540-LP-CR 10G", "Chelsio T540-LP-CR 10G [FCoE]"},
107 {"AMSTERDAM 10G", "Chelsio AMSTERDAM 10G [FCoE]"},
108 {"T580-LP-CR 40G", "Chelsio T580-LP-CR 40G [FCoE]"},
109 {"T520-LL-CR 10G", "Chelsio T520-LL-CR 10G [FCoE]"},
110 {"T560-CR 40G", "Chelsio T560-CR 40G [FCoE]"},
111 {"T580-CR 40G", "Chelsio T580-CR 40G [FCoE]"}
114 static void csio_mgmtm_cleanup(struct csio_mgmtm
*);
115 static void csio_hw_mbm_cleanup(struct csio_hw
*);
117 /* State machine forward declarations */
118 static void csio_hws_uninit(struct csio_hw
*, enum csio_hw_ev
);
119 static void csio_hws_configuring(struct csio_hw
*, enum csio_hw_ev
);
120 static void csio_hws_initializing(struct csio_hw
*, enum csio_hw_ev
);
121 static void csio_hws_ready(struct csio_hw
*, enum csio_hw_ev
);
122 static void csio_hws_quiescing(struct csio_hw
*, enum csio_hw_ev
);
123 static void csio_hws_quiesced(struct csio_hw
*, enum csio_hw_ev
);
124 static void csio_hws_resetting(struct csio_hw
*, enum csio_hw_ev
);
125 static void csio_hws_removing(struct csio_hw
*, enum csio_hw_ev
);
126 static void csio_hws_pcierr(struct csio_hw
*, enum csio_hw_ev
);
128 static void csio_hw_initialize(struct csio_hw
*hw
);
129 static void csio_evtq_stop(struct csio_hw
*hw
);
130 static void csio_evtq_start(struct csio_hw
*hw
);
132 int csio_is_hw_ready(struct csio_hw
*hw
)
134 return csio_match_state(hw
, csio_hws_ready
);
137 int csio_is_hw_removing(struct csio_hw
*hw
)
139 return csio_match_state(hw
, csio_hws_removing
);
144 * csio_hw_wait_op_done_val - wait until an operation is completed
146 * @reg: the register to check for completion
147 * @mask: a single-bit field within @reg that indicates completion
148 * @polarity: the value of the field when the operation is completed
149 * @attempts: number of check iterations
150 * @delay: delay in usecs between iterations
151 * @valp: where to store the value of the register at completion time
153 * Wait until an operation is completed by checking a bit in a register
154 * up to @attempts times. If @valp is not NULL the value of the register
155 * at the time it indicated completion is stored there. Returns 0 if the
156 * operation completes and -EAGAIN otherwise.
159 csio_hw_wait_op_done_val(struct csio_hw
*hw
, int reg
, uint32_t mask
,
160 int polarity
, int attempts
, int delay
, uint32_t *valp
)
164 val
= csio_rd_reg32(hw
, reg
);
166 if (!!(val
& mask
) == polarity
) {
180 * csio_hw_tp_wr_bits_indirect - set/clear bits in an indirect TP register
182 * @addr: the indirect TP register address
183 * @mask: specifies the field within the register to modify
184 * @val: new value for the field
186 * Sets a field of an indirect TP register to the given value.
189 csio_hw_tp_wr_bits_indirect(struct csio_hw
*hw
, unsigned int addr
,
190 unsigned int mask
, unsigned int val
)
192 csio_wr_reg32(hw
, addr
, TP_PIO_ADDR
);
193 val
|= csio_rd_reg32(hw
, TP_PIO_DATA
) & ~mask
;
194 csio_wr_reg32(hw
, val
, TP_PIO_DATA
);
198 csio_set_reg_field(struct csio_hw
*hw
, uint32_t reg
, uint32_t mask
,
201 uint32_t val
= csio_rd_reg32(hw
, reg
) & ~mask
;
203 csio_wr_reg32(hw
, val
| value
, reg
);
205 csio_rd_reg32(hw
, reg
);
210 csio_memory_write(struct csio_hw
*hw
, int mtype
, u32 addr
, u32 len
, u32
*buf
)
212 return hw
->chip_ops
->chip_memory_rw(hw
, MEMWIN_CSIOSTOR
, mtype
,
217 * EEPROM reads take a few tens of us while writes can take a bit over 5 ms.
219 #define EEPROM_MAX_RD_POLL 40
220 #define EEPROM_MAX_WR_POLL 6
221 #define EEPROM_STAT_ADDR 0x7bfc
222 #define VPD_BASE 0x400
223 #define VPD_BASE_OLD 0
225 #define VPD_INFO_FLD_HDR_SIZE 3
228 * csio_hw_seeprom_read - read a serial EEPROM location
230 * @addr: EEPROM virtual address
231 * @data: where to store the read data
233 * Read a 32-bit word from a location in serial EEPROM using the card's PCI
234 * VPD capability. Note that this function must be called with a virtual
238 csio_hw_seeprom_read(struct csio_hw
*hw
, uint32_t addr
, uint32_t *data
)
241 int attempts
= EEPROM_MAX_RD_POLL
;
242 uint32_t base
= hw
->params
.pci
.vpd_cap_addr
;
244 if (addr
>= EEPROMVSIZE
|| (addr
& 3))
247 pci_write_config_word(hw
->pdev
, base
+ PCI_VPD_ADDR
, (uint16_t)addr
);
251 pci_read_config_word(hw
->pdev
, base
+ PCI_VPD_ADDR
, &val
);
252 } while (!(val
& PCI_VPD_ADDR_F
) && --attempts
);
254 if (!(val
& PCI_VPD_ADDR_F
)) {
255 csio_err(hw
, "reading EEPROM address 0x%x failed\n", addr
);
259 pci_read_config_dword(hw
->pdev
, base
+ PCI_VPD_DATA
, data
);
260 *data
= le32_to_cpu(*data
);
266 * Partial EEPROM Vital Product Data structure. Includes only the ID and
278 * csio_hw_get_vpd_keyword_val - Locates an information field keyword in
280 * @v: Pointer to buffered vpd data structure
281 * @kw: The keyword to search for
283 * Returns the value of the information field keyword or
287 csio_hw_get_vpd_keyword_val(const struct t4_vpd_hdr
*v
, const char *kw
)
290 int32_t offset
, len
;
291 const uint8_t *buf
= &v
->id_tag
;
292 const uint8_t *vpdr_len
= &v
->vpdr_tag
;
293 offset
= sizeof(struct t4_vpd_hdr
);
294 len
= (uint16_t)vpdr_len
[1] + ((uint16_t)vpdr_len
[2] << 8);
296 if (len
+ sizeof(struct t4_vpd_hdr
) > VPD_LEN
)
299 for (i
= offset
; (i
+ VPD_INFO_FLD_HDR_SIZE
) <= (offset
+ len
);) {
300 if (memcmp(buf
+ i
, kw
, 2) == 0) {
301 i
+= VPD_INFO_FLD_HDR_SIZE
;
305 i
+= VPD_INFO_FLD_HDR_SIZE
+ buf
[i
+2];
312 csio_pci_capability(struct pci_dev
*pdev
, int cap
, int *pos
)
314 *pos
= pci_find_capability(pdev
, cap
);
322 * csio_hw_get_vpd_params - read VPD parameters from VPD EEPROM
324 * @p: where to store the parameters
326 * Reads card parameters stored in VPD EEPROM.
329 csio_hw_get_vpd_params(struct csio_hw
*hw
, struct csio_vpd
*p
)
331 int i
, ret
, ec
, sn
, addr
;
333 const struct t4_vpd_hdr
*v
;
334 /* To get around compilation warning from strstrip */
337 if (csio_is_valid_vpd(hw
))
340 ret
= csio_pci_capability(hw
->pdev
, PCI_CAP_ID_VPD
,
341 &hw
->params
.pci
.vpd_cap_addr
);
345 vpd
= kzalloc(VPD_LEN
, GFP_ATOMIC
);
350 * Card information normally starts at VPD_BASE but early cards had
353 ret
= csio_hw_seeprom_read(hw
, VPD_BASE
, (uint32_t *)(vpd
));
354 addr
= *vpd
== 0x82 ? VPD_BASE
: VPD_BASE_OLD
;
356 for (i
= 0; i
< VPD_LEN
; i
+= 4) {
357 ret
= csio_hw_seeprom_read(hw
, addr
+ i
, (uint32_t *)(vpd
+ i
));
364 /* Reset the VPD flag! */
365 hw
->flags
&= (~CSIO_HWF_VPD_VALID
);
367 v
= (const struct t4_vpd_hdr
*)vpd
;
369 #define FIND_VPD_KW(var, name) do { \
370 var = csio_hw_get_vpd_keyword_val(v, name); \
372 csio_err(hw, "missing VPD keyword " name "\n"); \
378 FIND_VPD_KW(i
, "RV");
379 for (csum
= 0; i
>= 0; i
--)
383 csio_err(hw
, "corrupted VPD EEPROM, actual csum %u\n", csum
);
387 FIND_VPD_KW(ec
, "EC");
388 FIND_VPD_KW(sn
, "SN");
391 memcpy(p
->id
, v
->id_data
, ID_LEN
);
393 memcpy(p
->ec
, vpd
+ ec
, EC_LEN
);
395 i
= vpd
[sn
- VPD_INFO_FLD_HDR_SIZE
+ 2];
396 memcpy(p
->sn
, vpd
+ sn
, min(i
, SERNUM_LEN
));
399 csio_valid_vpd_copied(hw
);
406 * csio_hw_sf1_read - read data from the serial flash
408 * @byte_cnt: number of bytes to read
409 * @cont: whether another operation will be chained
410 * @lock: whether to lock SF for PL access only
411 * @valp: where to store the read data
413 * Reads up to 4 bytes of data from the serial flash. The location of
414 * the read needs to be specified prior to calling this by issuing the
415 * appropriate commands to the serial flash.
418 csio_hw_sf1_read(struct csio_hw
*hw
, uint32_t byte_cnt
, int32_t cont
,
419 int32_t lock
, uint32_t *valp
)
423 if (!byte_cnt
|| byte_cnt
> 4)
425 if (csio_rd_reg32(hw
, SF_OP
) & SF_BUSY
)
428 cont
= cont
? SF_CONT
: 0;
429 lock
= lock
? SF_LOCK
: 0;
431 csio_wr_reg32(hw
, lock
| cont
| BYTECNT(byte_cnt
- 1), SF_OP
);
432 ret
= csio_hw_wait_op_done_val(hw
, SF_OP
, SF_BUSY
, 0, SF_ATTEMPTS
,
435 *valp
= csio_rd_reg32(hw
, SF_DATA
);
440 * csio_hw_sf1_write - write data to the serial flash
442 * @byte_cnt: number of bytes to write
443 * @cont: whether another operation will be chained
444 * @lock: whether to lock SF for PL access only
445 * @val: value to write
447 * Writes up to 4 bytes of data to the serial flash. The location of
448 * the write needs to be specified prior to calling this by issuing the
449 * appropriate commands to the serial flash.
452 csio_hw_sf1_write(struct csio_hw
*hw
, uint32_t byte_cnt
, uint32_t cont
,
453 int32_t lock
, uint32_t val
)
455 if (!byte_cnt
|| byte_cnt
> 4)
457 if (csio_rd_reg32(hw
, SF_OP
) & SF_BUSY
)
460 cont
= cont
? SF_CONT
: 0;
461 lock
= lock
? SF_LOCK
: 0;
463 csio_wr_reg32(hw
, val
, SF_DATA
);
464 csio_wr_reg32(hw
, cont
| BYTECNT(byte_cnt
- 1) | OP_WR
| lock
, SF_OP
);
466 return csio_hw_wait_op_done_val(hw
, SF_OP
, SF_BUSY
, 0, SF_ATTEMPTS
,
471 * csio_hw_flash_wait_op - wait for a flash operation to complete
473 * @attempts: max number of polls of the status register
474 * @delay: delay between polls in ms
476 * Wait for a flash operation to complete by polling the status register.
479 csio_hw_flash_wait_op(struct csio_hw
*hw
, int32_t attempts
, int32_t delay
)
485 ret
= csio_hw_sf1_write(hw
, 1, 1, 1, SF_RD_STATUS
);
489 ret
= csio_hw_sf1_read(hw
, 1, 0, 1, &status
);
503 * csio_hw_read_flash - read words from serial flash
505 * @addr: the start address for the read
506 * @nwords: how many 32-bit words to read
507 * @data: where to store the read data
508 * @byte_oriented: whether to store data as bytes or as words
510 * Read the specified number of 32-bit words from the serial flash.
511 * If @byte_oriented is set the read data is stored as a byte array
512 * (i.e., big-endian), otherwise as 32-bit words in the platform's
516 csio_hw_read_flash(struct csio_hw
*hw
, uint32_t addr
, uint32_t nwords
,
517 uint32_t *data
, int32_t byte_oriented
)
521 if (addr
+ nwords
* sizeof(uint32_t) > hw
->params
.sf_size
|| (addr
& 3))
524 addr
= swab32(addr
) | SF_RD_DATA_FAST
;
526 ret
= csio_hw_sf1_write(hw
, 4, 1, 0, addr
);
530 ret
= csio_hw_sf1_read(hw
, 1, 1, 0, data
);
534 for ( ; nwords
; nwords
--, data
++) {
535 ret
= csio_hw_sf1_read(hw
, 4, nwords
> 1, nwords
== 1, data
);
537 csio_wr_reg32(hw
, 0, SF_OP
); /* unlock SF */
541 *data
= htonl(*data
);
547 * csio_hw_write_flash - write up to a page of data to the serial flash
549 * @addr: the start address to write
550 * @n: length of data to write in bytes
551 * @data: the data to write
553 * Writes up to a page of data (256 bytes) to the serial flash starting
554 * at the given address. All the data must be written to the same page.
557 csio_hw_write_flash(struct csio_hw
*hw
, uint32_t addr
,
558 uint32_t n
, const uint8_t *data
)
562 uint32_t i
, c
, left
, val
, offset
= addr
& 0xff;
564 if (addr
>= hw
->params
.sf_size
|| offset
+ n
> SF_PAGE_SIZE
)
567 val
= swab32(addr
) | SF_PROG_PAGE
;
569 ret
= csio_hw_sf1_write(hw
, 1, 0, 1, SF_WR_ENABLE
);
573 ret
= csio_hw_sf1_write(hw
, 4, 1, 1, val
);
577 for (left
= n
; left
; left
-= c
) {
579 for (val
= 0, i
= 0; i
< c
; ++i
)
580 val
= (val
<< 8) + *data
++;
582 ret
= csio_hw_sf1_write(hw
, c
, c
!= left
, 1, val
);
586 ret
= csio_hw_flash_wait_op(hw
, 8, 1);
590 csio_wr_reg32(hw
, 0, SF_OP
); /* unlock SF */
592 /* Read the page to verify the write succeeded */
593 ret
= csio_hw_read_flash(hw
, addr
& ~0xff, ARRAY_SIZE(buf
), buf
, 1);
597 if (memcmp(data
- n
, (uint8_t *)buf
+ offset
, n
)) {
599 "failed to correctly write the flash page at %#x\n",
607 csio_wr_reg32(hw
, 0, SF_OP
); /* unlock SF */
612 * csio_hw_flash_erase_sectors - erase a range of flash sectors
614 * @start: the first sector to erase
615 * @end: the last sector to erase
617 * Erases the sectors in the given inclusive range.
620 csio_hw_flash_erase_sectors(struct csio_hw
*hw
, int32_t start
, int32_t end
)
624 while (start
<= end
) {
626 ret
= csio_hw_sf1_write(hw
, 1, 0, 1, SF_WR_ENABLE
);
630 ret
= csio_hw_sf1_write(hw
, 4, 0, 1,
631 SF_ERASE_SECTOR
| (start
<< 8));
635 ret
= csio_hw_flash_wait_op(hw
, 14, 500);
643 csio_err(hw
, "erase of flash sector %d failed, error %d\n",
645 csio_wr_reg32(hw
, 0, SF_OP
); /* unlock SF */
650 csio_hw_print_fw_version(struct csio_hw
*hw
, char *str
)
652 csio_info(hw
, "%s: %u.%u.%u.%u\n", str
,
653 FW_HDR_FW_VER_MAJOR_GET(hw
->fwrev
),
654 FW_HDR_FW_VER_MINOR_GET(hw
->fwrev
),
655 FW_HDR_FW_VER_MICRO_GET(hw
->fwrev
),
656 FW_HDR_FW_VER_BUILD_GET(hw
->fwrev
));
660 * csio_hw_get_fw_version - read the firmware version
662 * @vers: where to place the version
664 * Reads the FW version from flash.
667 csio_hw_get_fw_version(struct csio_hw
*hw
, uint32_t *vers
)
669 return csio_hw_read_flash(hw
, FW_IMG_START
+
670 offsetof(struct fw_hdr
, fw_ver
), 1,
675 * csio_hw_get_tp_version - read the TP microcode version
677 * @vers: where to place the version
679 * Reads the TP microcode version from flash.
682 csio_hw_get_tp_version(struct csio_hw
*hw
, u32
*vers
)
684 return csio_hw_read_flash(hw
, FLASH_FW_START
+
685 offsetof(struct fw_hdr
, tp_microcode_ver
), 1,
690 * csio_hw_check_fw_version - check if the FW is compatible with
694 * Checks if an adapter's FW is compatible with the driver. Returns 0
695 * if there's exact match, a negative error if the version could not be
696 * read or there's a major/minor version mismatch/minor.
699 csio_hw_check_fw_version(struct csio_hw
*hw
)
701 int ret
, major
, minor
, micro
;
703 ret
= csio_hw_get_fw_version(hw
, &hw
->fwrev
);
705 ret
= csio_hw_get_tp_version(hw
, &hw
->tp_vers
);
709 major
= FW_HDR_FW_VER_MAJOR_GET(hw
->fwrev
);
710 minor
= FW_HDR_FW_VER_MINOR_GET(hw
->fwrev
);
711 micro
= FW_HDR_FW_VER_MICRO_GET(hw
->fwrev
);
713 if (major
!= FW_VERSION_MAJOR(hw
)) { /* major mismatch - fail */
714 csio_err(hw
, "card FW has major version %u, driver wants %u\n",
715 major
, FW_VERSION_MAJOR(hw
));
719 if (minor
== FW_VERSION_MINOR(hw
) && micro
== FW_VERSION_MICRO(hw
))
720 return 0; /* perfect match */
722 /* Minor/micro version mismatch */
727 * csio_hw_fw_dload - download firmware.
729 * @fw_data: firmware image to write.
732 * Write the supplied firmware image to the card's serial flash.
735 csio_hw_fw_dload(struct csio_hw
*hw
, uint8_t *fw_data
, uint32_t size
)
741 uint8_t first_page
[SF_PAGE_SIZE
];
742 const __be32
*p
= (const __be32
*)fw_data
;
743 struct fw_hdr
*hdr
= (struct fw_hdr
*)fw_data
;
744 uint32_t sf_sec_size
;
746 if ((!hw
->params
.sf_size
) || (!hw
->params
.sf_nsec
)) {
747 csio_err(hw
, "Serial Flash data invalid\n");
752 csio_err(hw
, "FW image has no data\n");
757 csio_err(hw
, "FW image size not multiple of 512 bytes\n");
761 if (ntohs(hdr
->len512
) * 512 != size
) {
762 csio_err(hw
, "FW image size differs from size in FW header\n");
766 if (size
> FW_MAX_SIZE
) {
767 csio_err(hw
, "FW image too large, max is %u bytes\n",
772 for (csum
= 0, i
= 0; i
< size
/ sizeof(csum
); i
++)
775 if (csum
!= 0xffffffff) {
776 csio_err(hw
, "corrupted firmware image, checksum %#x\n", csum
);
780 sf_sec_size
= hw
->params
.sf_size
/ hw
->params
.sf_nsec
;
781 i
= DIV_ROUND_UP(size
, sf_sec_size
); /* # of sectors spanned */
783 csio_dbg(hw
, "Erasing sectors... start:%d end:%d\n",
784 FW_START_SEC
, FW_START_SEC
+ i
- 1);
786 ret
= csio_hw_flash_erase_sectors(hw
, FW_START_SEC
,
787 FW_START_SEC
+ i
- 1);
789 csio_err(hw
, "Flash Erase failed\n");
794 * We write the correct version at the end so the driver can see a bad
795 * version if the FW write fails. Start by writing a copy of the
796 * first page with a bad version.
798 memcpy(first_page
, fw_data
, SF_PAGE_SIZE
);
799 ((struct fw_hdr
*)first_page
)->fw_ver
= htonl(0xffffffff);
800 ret
= csio_hw_write_flash(hw
, FW_IMG_START
, SF_PAGE_SIZE
, first_page
);
804 csio_dbg(hw
, "Writing Flash .. start:%d end:%d\n",
805 FW_IMG_START
, FW_IMG_START
+ size
);
808 for (size
-= SF_PAGE_SIZE
; size
; size
-= SF_PAGE_SIZE
) {
809 addr
+= SF_PAGE_SIZE
;
810 fw_data
+= SF_PAGE_SIZE
;
811 ret
= csio_hw_write_flash(hw
, addr
, SF_PAGE_SIZE
, fw_data
);
816 ret
= csio_hw_write_flash(hw
,
818 offsetof(struct fw_hdr
, fw_ver
),
820 (const uint8_t *)&hdr
->fw_ver
);
824 csio_err(hw
, "firmware download failed, error %d\n", ret
);
829 csio_hw_get_flash_params(struct csio_hw
*hw
)
834 ret
= csio_hw_sf1_write(hw
, 1, 1, 0, SF_RD_ID
);
836 ret
= csio_hw_sf1_read(hw
, 3, 0, 1, &info
);
837 csio_wr_reg32(hw
, 0, SF_OP
); /* unlock SF */
841 if ((info
& 0xff) != 0x20) /* not a Numonix flash */
843 info
>>= 16; /* log2 of size */
844 if (info
>= 0x14 && info
< 0x18)
845 hw
->params
.sf_nsec
= 1 << (info
- 16);
846 else if (info
== 0x18)
847 hw
->params
.sf_nsec
= 64;
850 hw
->params
.sf_size
= 1 << info
;
855 /*****************************************************************************/
856 /* HW State machine assists */
857 /*****************************************************************************/
860 csio_hw_dev_ready(struct csio_hw
*hw
)
865 while (((reg
= csio_rd_reg32(hw
, PL_WHOAMI
)) == 0xFFFFFFFF) &&
869 if ((cnt
== 0) && (((int32_t)(SOURCEPF_GET(reg
)) < 0) ||
870 (SOURCEPF_GET(reg
) >= CSIO_MAX_PFN
))) {
871 csio_err(hw
, "PL_WHOAMI returned 0x%x, cnt:%d\n", reg
, cnt
);
875 hw
->pfn
= SOURCEPF_GET(reg
);
881 * csio_do_hello - Perform the HELLO FW Mailbox command and process response.
883 * @state: Device state
885 * FW_HELLO_CMD has to be polled for completion.
888 csio_do_hello(struct csio_hw
*hw
, enum csio_dev_state
*state
)
892 enum csio_dev_master master
;
893 enum fw_retval retval
;
896 int retries
= FW_CMD_HELLO_RETRIES
;
898 memset(state_str
, 0, sizeof(state_str
));
900 mbp
= mempool_alloc(hw
->mb_mempool
, GFP_ATOMIC
);
903 CSIO_INC_STATS(hw
, n_err_nomem
);
907 master
= csio_force_master
? CSIO_MASTER_MUST
: CSIO_MASTER_MAY
;
910 csio_mb_hello(hw
, mbp
, CSIO_MB_DEFAULT_TMO
, hw
->pfn
,
911 hw
->pfn
, master
, NULL
);
913 rv
= csio_mb_issue(hw
, mbp
);
915 csio_err(hw
, "failed to issue HELLO cmd. ret:%d.\n", rv
);
919 csio_mb_process_hello_rsp(hw
, mbp
, &retval
, state
, &mpfn
);
920 if (retval
!= FW_SUCCESS
) {
921 csio_err(hw
, "HELLO cmd failed with ret: %d\n", retval
);
926 /* Firmware has designated us to be master */
927 if (hw
->pfn
== mpfn
) {
928 hw
->flags
|= CSIO_HWF_MASTER
;
929 } else if (*state
== CSIO_DEV_STATE_UNINIT
) {
931 * If we're not the Master PF then we need to wait around for
932 * the Master PF Driver to finish setting up the adapter.
934 * Note that we also do this wait if we're a non-Master-capable
935 * PF and there is no current Master PF; a Master PF may show up
936 * momentarily and we wouldn't want to fail pointlessly. (This
937 * can happen when an OS loads lots of different drivers rapidly
938 * at the same time). In this case, the Master PF returned by
939 * the firmware will be PCIE_FW_MASTER_MASK so the test below
943 int waiting
= FW_CMD_HELLO_TIMEOUT
;
946 * Wait for the firmware to either indicate an error or
947 * initialized state. If we see either of these we bail out
948 * and report the issue to the caller. If we exhaust the
949 * "hello timeout" and we haven't exhausted our retries, try
950 * again. Otherwise bail with a timeout error.
955 spin_unlock_irq(&hw
->lock
);
957 spin_lock_irq(&hw
->lock
);
961 * If neither Error nor Initialialized are indicated
962 * by the firmware keep waiting till we exaust our
963 * timeout ... and then retry if we haven't exhausted
966 pcie_fw
= csio_rd_reg32(hw
, PCIE_FW
);
967 if (!(pcie_fw
& (PCIE_FW_ERR
|PCIE_FW_INIT
))) {
979 * We either have an Error or Initialized condition
980 * report errors preferentially.
983 if (pcie_fw
& PCIE_FW_ERR
) {
984 *state
= CSIO_DEV_STATE_ERR
;
986 } else if (pcie_fw
& PCIE_FW_INIT
)
987 *state
= CSIO_DEV_STATE_INIT
;
991 * If we arrived before a Master PF was selected and
992 * there's not a valid Master PF, grab its identity
995 if (mpfn
== PCIE_FW_MASTER_MASK
&&
996 (pcie_fw
& PCIE_FW_MASTER_VLD
))
997 mpfn
= PCIE_FW_MASTER_GET(pcie_fw
);
1000 hw
->flags
&= ~CSIO_HWF_MASTER
;
1004 case CSIO_DEV_STATE_UNINIT
:
1005 strcpy(state_str
, "Initializing");
1007 case CSIO_DEV_STATE_INIT
:
1008 strcpy(state_str
, "Initialized");
1010 case CSIO_DEV_STATE_ERR
:
1011 strcpy(state_str
, "Error");
1014 strcpy(state_str
, "Unknown");
1018 if (hw
->pfn
== mpfn
)
1019 csio_info(hw
, "PF: %d, Coming up as MASTER, HW state: %s\n",
1020 hw
->pfn
, state_str
);
1023 "PF: %d, Coming up as SLAVE, Master PF: %d, HW state: %s\n",
1024 hw
->pfn
, mpfn
, state_str
);
1027 mempool_free(mbp
, hw
->mb_mempool
);
1033 * csio_do_bye - Perform the BYE FW Mailbox command and process response.
1038 csio_do_bye(struct csio_hw
*hw
)
1040 struct csio_mb
*mbp
;
1041 enum fw_retval retval
;
1043 mbp
= mempool_alloc(hw
->mb_mempool
, GFP_ATOMIC
);
1045 CSIO_INC_STATS(hw
, n_err_nomem
);
1049 csio_mb_bye(hw
, mbp
, CSIO_MB_DEFAULT_TMO
, NULL
);
1051 if (csio_mb_issue(hw
, mbp
)) {
1052 csio_err(hw
, "Issue of BYE command failed\n");
1053 mempool_free(mbp
, hw
->mb_mempool
);
1057 retval
= csio_mb_fw_retval(mbp
);
1058 if (retval
!= FW_SUCCESS
) {
1059 mempool_free(mbp
, hw
->mb_mempool
);
1063 mempool_free(mbp
, hw
->mb_mempool
);
1069 * csio_do_reset- Perform the device reset.
1073 * If fw_rst is set, issues FW reset mbox cmd otherwise
1075 * Performs reset of the function.
1078 csio_do_reset(struct csio_hw
*hw
, bool fw_rst
)
1080 struct csio_mb
*mbp
;
1081 enum fw_retval retval
;
1085 csio_wr_reg32(hw
, PIORSTMODE
| PIORST
, PL_RST
);
1090 mbp
= mempool_alloc(hw
->mb_mempool
, GFP_ATOMIC
);
1092 CSIO_INC_STATS(hw
, n_err_nomem
);
1096 csio_mb_reset(hw
, mbp
, CSIO_MB_DEFAULT_TMO
,
1097 PIORSTMODE
| PIORST
, 0, NULL
);
1099 if (csio_mb_issue(hw
, mbp
)) {
1100 csio_err(hw
, "Issue of RESET command failed.n");
1101 mempool_free(mbp
, hw
->mb_mempool
);
1105 retval
= csio_mb_fw_retval(mbp
);
1106 if (retval
!= FW_SUCCESS
) {
1107 csio_err(hw
, "RESET cmd failed with ret:0x%x.\n", retval
);
1108 mempool_free(mbp
, hw
->mb_mempool
);
1112 mempool_free(mbp
, hw
->mb_mempool
);
1118 csio_hw_validate_caps(struct csio_hw
*hw
, struct csio_mb
*mbp
)
1120 struct fw_caps_config_cmd
*rsp
= (struct fw_caps_config_cmd
*)mbp
->mb
;
1123 caps
= ntohs(rsp
->fcoecaps
);
1125 if (!(caps
& FW_CAPS_CONFIG_FCOE_INITIATOR
)) {
1126 csio_err(hw
, "No FCoE Initiator capability in the firmware.\n");
1130 if (!(caps
& FW_CAPS_CONFIG_FCOE_CTRL_OFLD
)) {
1131 csio_err(hw
, "No FCoE Control Offload capability\n");
1139 * csio_hw_fw_halt - issue a reset/halt to FW and put uP into RESET
1140 * @hw: the HW module
1141 * @mbox: mailbox to use for the FW RESET command (if desired)
1142 * @force: force uP into RESET even if FW RESET command fails
1144 * Issues a RESET command to firmware (if desired) with a HALT indication
1145 * and then puts the microprocessor into RESET state. The RESET command
1146 * will only be issued if a legitimate mailbox is provided (mbox <=
1147 * PCIE_FW_MASTER_MASK).
1149 * This is generally used in order for the host to safely manipulate the
1150 * adapter without fear of conflicting with whatever the firmware might
1151 * be doing. The only way out of this state is to RESTART the firmware
1155 csio_hw_fw_halt(struct csio_hw
*hw
, uint32_t mbox
, int32_t force
)
1157 enum fw_retval retval
= 0;
1160 * If a legitimate mailbox is provided, issue a RESET command
1161 * with a HALT indication.
1163 if (mbox
<= PCIE_FW_MASTER_MASK
) {
1164 struct csio_mb
*mbp
;
1166 mbp
= mempool_alloc(hw
->mb_mempool
, GFP_ATOMIC
);
1168 CSIO_INC_STATS(hw
, n_err_nomem
);
1172 csio_mb_reset(hw
, mbp
, CSIO_MB_DEFAULT_TMO
,
1173 PIORSTMODE
| PIORST
, FW_RESET_CMD_HALT(1),
1176 if (csio_mb_issue(hw
, mbp
)) {
1177 csio_err(hw
, "Issue of RESET command failed!\n");
1178 mempool_free(mbp
, hw
->mb_mempool
);
1182 retval
= csio_mb_fw_retval(mbp
);
1183 mempool_free(mbp
, hw
->mb_mempool
);
1187 * Normally we won't complete the operation if the firmware RESET
1188 * command fails but if our caller insists we'll go ahead and put the
1189 * uP into RESET. This can be useful if the firmware is hung or even
1190 * missing ... We'll have to take the risk of putting the uP into
1191 * RESET without the cooperation of firmware in that case.
1193 * We also force the firmware's HALT flag to be on in case we bypassed
1194 * the firmware RESET command above or we're dealing with old firmware
1195 * which doesn't have the HALT capability. This will serve as a flag
1196 * for the incoming firmware to know that it's coming out of a HALT
1197 * rather than a RESET ... if it's new enough to understand that ...
1199 if (retval
== 0 || force
) {
1200 csio_set_reg_field(hw
, CIM_BOOT_CFG
, UPCRST
, UPCRST
);
1201 csio_set_reg_field(hw
, PCIE_FW
, PCIE_FW_HALT
, PCIE_FW_HALT
);
1205 * And we always return the result of the firmware RESET command
1206 * even when we force the uP into RESET ...
1208 return retval
? -EINVAL
: 0;
1212 * csio_hw_fw_restart - restart the firmware by taking the uP out of RESET
1213 * @hw: the HW module
1214 * @reset: if we want to do a RESET to restart things
1216 * Restart firmware previously halted by csio_hw_fw_halt(). On successful
1217 * return the previous PF Master remains as the new PF Master and there
1218 * is no need to issue a new HELLO command, etc.
1220 * We do this in two ways:
1222 * 1. If we're dealing with newer firmware we'll simply want to take
1223 * the chip's microprocessor out of RESET. This will cause the
1224 * firmware to start up from its start vector. And then we'll loop
1225 * until the firmware indicates it's started again (PCIE_FW.HALT
1226 * reset to 0) or we timeout.
1228 * 2. If we're dealing with older firmware then we'll need to RESET
1229 * the chip since older firmware won't recognize the PCIE_FW.HALT
1230 * flag and automatically RESET itself on startup.
1233 csio_hw_fw_restart(struct csio_hw
*hw
, uint32_t mbox
, int32_t reset
)
1237 * Since we're directing the RESET instead of the firmware
1238 * doing it automatically, we need to clear the PCIE_FW.HALT
1241 csio_set_reg_field(hw
, PCIE_FW
, PCIE_FW_HALT
, 0);
1244 * If we've been given a valid mailbox, first try to get the
1245 * firmware to do the RESET. If that works, great and we can
1246 * return success. Otherwise, if we haven't been given a
1247 * valid mailbox or the RESET command failed, fall back to
1248 * hitting the chip with a hammer.
1250 if (mbox
<= PCIE_FW_MASTER_MASK
) {
1251 csio_set_reg_field(hw
, CIM_BOOT_CFG
, UPCRST
, 0);
1253 if (csio_do_reset(hw
, true) == 0)
1257 csio_wr_reg32(hw
, PIORSTMODE
| PIORST
, PL_RST
);
1262 csio_set_reg_field(hw
, CIM_BOOT_CFG
, UPCRST
, 0);
1263 for (ms
= 0; ms
< FW_CMD_MAX_TIMEOUT
; ) {
1264 if (!(csio_rd_reg32(hw
, PCIE_FW
) & PCIE_FW_HALT
))
1275 * csio_hw_fw_upgrade - perform all of the steps necessary to upgrade FW
1276 * @hw: the HW module
1277 * @mbox: mailbox to use for the FW RESET command (if desired)
1278 * @fw_data: the firmware image to write
1280 * @force: force upgrade even if firmware doesn't cooperate
1282 * Perform all of the steps necessary for upgrading an adapter's
1283 * firmware image. Normally this requires the cooperation of the
1284 * existing firmware in order to halt all existing activities
1285 * but if an invalid mailbox token is passed in we skip that step
1286 * (though we'll still put the adapter microprocessor into RESET in
1289 * On successful return the new firmware will have been loaded and
1290 * the adapter will have been fully RESET losing all previous setup
1291 * state. On unsuccessful return the adapter may be completely hosed ...
1292 * positive errno indicates that the adapter is ~probably~ intact, a
1293 * negative errno indicates that things are looking bad ...
1296 csio_hw_fw_upgrade(struct csio_hw
*hw
, uint32_t mbox
,
1297 const u8
*fw_data
, uint32_t size
, int32_t force
)
1299 const struct fw_hdr
*fw_hdr
= (const struct fw_hdr
*)fw_data
;
1302 ret
= csio_hw_fw_halt(hw
, mbox
, force
);
1303 if (ret
!= 0 && !force
)
1306 ret
= csio_hw_fw_dload(hw
, (uint8_t *) fw_data
, size
);
1311 * Older versions of the firmware don't understand the new
1312 * PCIE_FW.HALT flag and so won't know to perform a RESET when they
1313 * restart. So for newly loaded older firmware we'll have to do the
1314 * RESET for it so it starts up on a clean slate. We can tell if
1315 * the newly loaded firmware will handle this right by checking
1316 * its header flags to see if it advertises the capability.
1318 reset
= ((ntohl(fw_hdr
->flags
) & FW_HDR_FLAGS_RESET_HALT
) == 0);
1319 return csio_hw_fw_restart(hw
, mbox
, reset
);
1324 * csio_hw_fw_config_file - setup an adapter via a Configuration File
1325 * @hw: the HW module
1326 * @mbox: mailbox to use for the FW command
1327 * @mtype: the memory type where the Configuration File is located
1328 * @maddr: the memory address where the Configuration File is located
1329 * @finiver: return value for CF [fini] version
1330 * @finicsum: return value for CF [fini] checksum
1331 * @cfcsum: return value for CF computed checksum
1333 * Issue a command to get the firmware to process the Configuration
1334 * File located at the specified mtype/maddress. If the Configuration
1335 * File is processed successfully and return value pointers are
1336 * provided, the Configuration File "[fini] section version and
1337 * checksum values will be returned along with the computed checksum.
1338 * It's up to the caller to decide how it wants to respond to the
1339 * checksums not matching but it recommended that a prominant warning
1340 * be emitted in order to help people rapidly identify changed or
1341 * corrupted Configuration Files.
1343 * Also note that it's possible to modify things like "niccaps",
1344 * "toecaps",etc. between processing the Configuration File and telling
1345 * the firmware to use the new configuration. Callers which want to
1346 * do this will need to "hand-roll" their own CAPS_CONFIGS commands for
1347 * Configuration Files if they want to do this.
1350 csio_hw_fw_config_file(struct csio_hw
*hw
,
1351 unsigned int mtype
, unsigned int maddr
,
1352 uint32_t *finiver
, uint32_t *finicsum
, uint32_t *cfcsum
)
1354 struct csio_mb
*mbp
;
1355 struct fw_caps_config_cmd
*caps_cmd
;
1359 mbp
= mempool_alloc(hw
->mb_mempool
, GFP_ATOMIC
);
1361 CSIO_INC_STATS(hw
, n_err_nomem
);
1365 * Tell the firmware to process the indicated Configuration File.
1366 * If there are no errors and the caller has provided return value
1367 * pointers for the [fini] section version, checksum and computed
1368 * checksum, pass those back to the caller.
1370 caps_cmd
= (struct fw_caps_config_cmd
*)(mbp
->mb
);
1371 CSIO_INIT_MBP(mbp
, caps_cmd
, CSIO_MB_DEFAULT_TMO
, hw
, NULL
, 1);
1372 caps_cmd
->op_to_write
=
1373 htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD
) |
1376 caps_cmd
->cfvalid_to_len16
=
1377 htonl(FW_CAPS_CONFIG_CMD_CFVALID
|
1378 FW_CAPS_CONFIG_CMD_MEMTYPE_CF(mtype
) |
1379 FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(maddr
>> 16) |
1380 FW_LEN16(*caps_cmd
));
1382 if (csio_mb_issue(hw
, mbp
)) {
1383 csio_err(hw
, "Issue of FW_CAPS_CONFIG_CMD failed!\n");
1387 ret
= csio_mb_fw_retval(mbp
);
1388 if (ret
!= FW_SUCCESS
) {
1389 csio_dbg(hw
, "FW_CAPS_CONFIG_CMD returned %d!\n", rv
);
1394 *finiver
= ntohl(caps_cmd
->finiver
);
1396 *finicsum
= ntohl(caps_cmd
->finicsum
);
1398 *cfcsum
= ntohl(caps_cmd
->cfcsum
);
1400 /* Validate device capabilities */
1401 if (csio_hw_validate_caps(hw
, mbp
)) {
1407 * And now tell the firmware to use the configuration we just loaded.
1409 caps_cmd
->op_to_write
=
1410 htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD
) |
1413 caps_cmd
->cfvalid_to_len16
= htonl(FW_LEN16(*caps_cmd
));
1415 if (csio_mb_issue(hw
, mbp
)) {
1416 csio_err(hw
, "Issue of FW_CAPS_CONFIG_CMD failed!\n");
1420 ret
= csio_mb_fw_retval(mbp
);
1421 if (ret
!= FW_SUCCESS
) {
1422 csio_dbg(hw
, "FW_CAPS_CONFIG_CMD returned %d!\n", rv
);
1428 mempool_free(mbp
, hw
->mb_mempool
);
1433 * csio_get_device_params - Get device parameters.
1438 csio_get_device_params(struct csio_hw
*hw
)
1440 struct csio_wrm
*wrm
= csio_hw_to_wrm(hw
);
1441 struct csio_mb
*mbp
;
1442 enum fw_retval retval
;
1446 /* Initialize portids to -1 */
1447 for (i
= 0; i
< CSIO_MAX_PPORTS
; i
++)
1448 hw
->pport
[i
].portid
= -1;
1450 mbp
= mempool_alloc(hw
->mb_mempool
, GFP_ATOMIC
);
1452 CSIO_INC_STATS(hw
, n_err_nomem
);
1456 /* Get port vec information. */
1457 param
[0] = FW_PARAM_DEV(PORTVEC
);
1459 /* Get Core clock. */
1460 param
[1] = FW_PARAM_DEV(CCLK
);
1462 /* Get EQ id start and end. */
1463 param
[2] = FW_PARAM_PFVF(EQ_START
);
1464 param
[3] = FW_PARAM_PFVF(EQ_END
);
1466 /* Get IQ id start and end. */
1467 param
[4] = FW_PARAM_PFVF(IQFLINT_START
);
1468 param
[5] = FW_PARAM_PFVF(IQFLINT_END
);
1470 csio_mb_params(hw
, mbp
, CSIO_MB_DEFAULT_TMO
, hw
->pfn
, 0,
1471 ARRAY_SIZE(param
), param
, NULL
, false, NULL
);
1472 if (csio_mb_issue(hw
, mbp
)) {
1473 csio_err(hw
, "Issue of FW_PARAMS_CMD(read) failed!\n");
1474 mempool_free(mbp
, hw
->mb_mempool
);
1478 csio_mb_process_read_params_rsp(hw
, mbp
, &retval
,
1479 ARRAY_SIZE(param
), param
);
1480 if (retval
!= FW_SUCCESS
) {
1481 csio_err(hw
, "FW_PARAMS_CMD(read) failed with ret:0x%x!\n",
1483 mempool_free(mbp
, hw
->mb_mempool
);
1487 /* cache the information. */
1488 hw
->port_vec
= param
[0];
1489 hw
->vpd
.cclk
= param
[1];
1490 wrm
->fw_eq_start
= param
[2];
1491 wrm
->fw_iq_start
= param
[4];
1493 /* Using FW configured max iqs & eqs */
1494 if ((hw
->flags
& CSIO_HWF_USING_SOFT_PARAMS
) ||
1495 !csio_is_hw_master(hw
)) {
1496 hw
->cfg_niq
= param
[5] - param
[4] + 1;
1497 hw
->cfg_neq
= param
[3] - param
[2] + 1;
1498 csio_dbg(hw
, "Using fwconfig max niqs %d neqs %d\n",
1499 hw
->cfg_niq
, hw
->cfg_neq
);
1502 hw
->port_vec
&= csio_port_mask
;
1504 hw
->num_pports
= hweight32(hw
->port_vec
);
1506 csio_dbg(hw
, "Port vector: 0x%x, #ports: %d\n",
1507 hw
->port_vec
, hw
->num_pports
);
1509 for (i
= 0; i
< hw
->num_pports
; i
++) {
1510 while ((hw
->port_vec
& (1 << j
)) == 0)
1512 hw
->pport
[i
].portid
= j
++;
1513 csio_dbg(hw
, "Found Port:%d\n", hw
->pport
[i
].portid
);
1515 mempool_free(mbp
, hw
->mb_mempool
);
1522 * csio_config_device_caps - Get and set device capabilities.
1527 csio_config_device_caps(struct csio_hw
*hw
)
1529 struct csio_mb
*mbp
;
1530 enum fw_retval retval
;
1533 mbp
= mempool_alloc(hw
->mb_mempool
, GFP_ATOMIC
);
1535 CSIO_INC_STATS(hw
, n_err_nomem
);
1539 /* Get device capabilities */
1540 csio_mb_caps_config(hw
, mbp
, CSIO_MB_DEFAULT_TMO
, 0, 0, 0, 0, NULL
);
1542 if (csio_mb_issue(hw
, mbp
)) {
1543 csio_err(hw
, "Issue of FW_CAPS_CONFIG_CMD(r) failed!\n");
1547 retval
= csio_mb_fw_retval(mbp
);
1548 if (retval
!= FW_SUCCESS
) {
1549 csio_err(hw
, "FW_CAPS_CONFIG_CMD(r) returned %d!\n", retval
);
1553 /* Validate device capabilities */
1554 if (csio_hw_validate_caps(hw
, mbp
))
1557 /* Don't config device capabilities if already configured */
1558 if (hw
->fw_state
== CSIO_DEV_STATE_INIT
) {
1563 /* Write back desired device capabilities */
1564 csio_mb_caps_config(hw
, mbp
, CSIO_MB_DEFAULT_TMO
, true, true,
1567 if (csio_mb_issue(hw
, mbp
)) {
1568 csio_err(hw
, "Issue of FW_CAPS_CONFIG_CMD(w) failed!\n");
1572 retval
= csio_mb_fw_retval(mbp
);
1573 if (retval
!= FW_SUCCESS
) {
1574 csio_err(hw
, "FW_CAPS_CONFIG_CMD(w) returned %d!\n", retval
);
1580 mempool_free(mbp
, hw
->mb_mempool
);
1585 * csio_enable_ports - Bring up all available ports.
1590 csio_enable_ports(struct csio_hw
*hw
)
1592 struct csio_mb
*mbp
;
1593 enum fw_retval retval
;
1597 mbp
= mempool_alloc(hw
->mb_mempool
, GFP_ATOMIC
);
1599 CSIO_INC_STATS(hw
, n_err_nomem
);
1603 for (i
= 0; i
< hw
->num_pports
; i
++) {
1604 portid
= hw
->pport
[i
].portid
;
1606 /* Read PORT information */
1607 csio_mb_port(hw
, mbp
, CSIO_MB_DEFAULT_TMO
, portid
,
1610 if (csio_mb_issue(hw
, mbp
)) {
1611 csio_err(hw
, "failed to issue FW_PORT_CMD(r) port:%d\n",
1613 mempool_free(mbp
, hw
->mb_mempool
);
1617 csio_mb_process_read_port_rsp(hw
, mbp
, &retval
,
1618 &hw
->pport
[i
].pcap
);
1619 if (retval
!= FW_SUCCESS
) {
1620 csio_err(hw
, "FW_PORT_CMD(r) port:%d failed: 0x%x\n",
1622 mempool_free(mbp
, hw
->mb_mempool
);
1626 /* Write back PORT information */
1627 csio_mb_port(hw
, mbp
, CSIO_MB_DEFAULT_TMO
, portid
, true,
1628 (PAUSE_RX
| PAUSE_TX
), hw
->pport
[i
].pcap
, NULL
);
1630 if (csio_mb_issue(hw
, mbp
)) {
1631 csio_err(hw
, "failed to issue FW_PORT_CMD(w) port:%d\n",
1633 mempool_free(mbp
, hw
->mb_mempool
);
1637 retval
= csio_mb_fw_retval(mbp
);
1638 if (retval
!= FW_SUCCESS
) {
1639 csio_err(hw
, "FW_PORT_CMD(w) port:%d failed :0x%x\n",
1641 mempool_free(mbp
, hw
->mb_mempool
);
1645 } /* For all ports */
1647 mempool_free(mbp
, hw
->mb_mempool
);
1653 * csio_get_fcoe_resinfo - Read fcoe fw resource info.
1655 * Issued with lock held.
1658 csio_get_fcoe_resinfo(struct csio_hw
*hw
)
1660 struct csio_fcoe_res_info
*res_info
= &hw
->fres_info
;
1661 struct fw_fcoe_res_info_cmd
*rsp
;
1662 struct csio_mb
*mbp
;
1663 enum fw_retval retval
;
1665 mbp
= mempool_alloc(hw
->mb_mempool
, GFP_ATOMIC
);
1667 CSIO_INC_STATS(hw
, n_err_nomem
);
1671 /* Get FCoE FW resource information */
1672 csio_fcoe_read_res_info_init_mb(hw
, mbp
, CSIO_MB_DEFAULT_TMO
, NULL
);
1674 if (csio_mb_issue(hw
, mbp
)) {
1675 csio_err(hw
, "failed to issue FW_FCOE_RES_INFO_CMD\n");
1676 mempool_free(mbp
, hw
->mb_mempool
);
1680 rsp
= (struct fw_fcoe_res_info_cmd
*)(mbp
->mb
);
1681 retval
= FW_CMD_RETVAL_GET(ntohl(rsp
->retval_len16
));
1682 if (retval
!= FW_SUCCESS
) {
1683 csio_err(hw
, "FW_FCOE_RES_INFO_CMD failed with ret x%x\n",
1685 mempool_free(mbp
, hw
->mb_mempool
);
1689 res_info
->e_d_tov
= ntohs(rsp
->e_d_tov
);
1690 res_info
->r_a_tov_seq
= ntohs(rsp
->r_a_tov_seq
);
1691 res_info
->r_a_tov_els
= ntohs(rsp
->r_a_tov_els
);
1692 res_info
->r_r_tov
= ntohs(rsp
->r_r_tov
);
1693 res_info
->max_xchgs
= ntohl(rsp
->max_xchgs
);
1694 res_info
->max_ssns
= ntohl(rsp
->max_ssns
);
1695 res_info
->used_xchgs
= ntohl(rsp
->used_xchgs
);
1696 res_info
->used_ssns
= ntohl(rsp
->used_ssns
);
1697 res_info
->max_fcfs
= ntohl(rsp
->max_fcfs
);
1698 res_info
->max_vnps
= ntohl(rsp
->max_vnps
);
1699 res_info
->used_fcfs
= ntohl(rsp
->used_fcfs
);
1700 res_info
->used_vnps
= ntohl(rsp
->used_vnps
);
1702 csio_dbg(hw
, "max ssns:%d max xchgs:%d\n", res_info
->max_ssns
,
1703 res_info
->max_xchgs
);
1704 mempool_free(mbp
, hw
->mb_mempool
);
1710 csio_hw_check_fwconfig(struct csio_hw
*hw
, u32
*param
)
1712 struct csio_mb
*mbp
;
1713 enum fw_retval retval
;
1716 mbp
= mempool_alloc(hw
->mb_mempool
, GFP_ATOMIC
);
1718 CSIO_INC_STATS(hw
, n_err_nomem
);
1723 * Find out whether we're dealing with a version of
1724 * the firmware which has configuration file support.
1726 _param
[0] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV
) |
1727 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_CF
));
1729 csio_mb_params(hw
, mbp
, CSIO_MB_DEFAULT_TMO
, hw
->pfn
, 0,
1730 ARRAY_SIZE(_param
), _param
, NULL
, false, NULL
);
1731 if (csio_mb_issue(hw
, mbp
)) {
1732 csio_err(hw
, "Issue of FW_PARAMS_CMD(read) failed!\n");
1733 mempool_free(mbp
, hw
->mb_mempool
);
1737 csio_mb_process_read_params_rsp(hw
, mbp
, &retval
,
1738 ARRAY_SIZE(_param
), _param
);
1739 if (retval
!= FW_SUCCESS
) {
1740 csio_err(hw
, "FW_PARAMS_CMD(read) failed with ret:0x%x!\n",
1742 mempool_free(mbp
, hw
->mb_mempool
);
1746 mempool_free(mbp
, hw
->mb_mempool
);
1753 csio_hw_flash_config(struct csio_hw
*hw
, u32
*fw_cfg_param
, char *path
)
1756 const struct firmware
*cf
;
1757 struct pci_dev
*pci_dev
= hw
->pdev
;
1758 struct device
*dev
= &pci_dev
->dev
;
1759 unsigned int mtype
= 0, maddr
= 0;
1761 int value_to_add
= 0;
1763 if (request_firmware(&cf
, CSIO_CF_FNAME(hw
), dev
) < 0) {
1764 csio_err(hw
, "could not find config file %s, err: %d\n",
1765 CSIO_CF_FNAME(hw
), ret
);
1769 if (cf
->size
%4 != 0)
1770 value_to_add
= 4 - (cf
->size
% 4);
1772 cfg_data
= kzalloc(cf
->size
+value_to_add
, GFP_KERNEL
);
1773 if (cfg_data
== NULL
) {
1778 memcpy((void *)cfg_data
, (const void *)cf
->data
, cf
->size
);
1779 if (csio_hw_check_fwconfig(hw
, fw_cfg_param
) != 0) {
1784 mtype
= FW_PARAMS_PARAM_Y_GET(*fw_cfg_param
);
1785 maddr
= FW_PARAMS_PARAM_Z_GET(*fw_cfg_param
) << 16;
1787 ret
= csio_memory_write(hw
, mtype
, maddr
,
1788 cf
->size
+ value_to_add
, cfg_data
);
1790 if ((ret
== 0) && (value_to_add
!= 0)) {
1795 size_t size
= cf
->size
& ~0x3;
1798 last
.word
= cfg_data
[size
>> 2];
1799 for (i
= value_to_add
; i
< 4; i
++)
1801 ret
= csio_memory_write(hw
, mtype
, maddr
+ size
, 4, &last
.word
);
1804 csio_info(hw
, "config file upgraded to %s\n",
1806 snprintf(path
, 64, "%s%s", "/lib/firmware/", CSIO_CF_FNAME(hw
));
1811 release_firmware(cf
);
1816 * HW initialization: contact FW, obtain config, perform basic init.
1818 * If the firmware we're dealing with has Configuration File support, then
1819 * we use that to perform all configuration -- either using the configuration
1820 * file stored in flash on the adapter or using a filesystem-local file
1823 * If we don't have configuration file support in the firmware, then we'll
1824 * have to set things up the old fashioned way with hard-coded register
1825 * writes and firmware commands ...
1829 * Attempt to initialize the HW via a Firmware Configuration File.
1832 csio_hw_use_fwconfig(struct csio_hw
*hw
, int reset
, u32
*fw_cfg_param
)
1834 unsigned int mtype
, maddr
;
1836 uint32_t finiver
= 0, finicsum
= 0, cfcsum
= 0;
1841 * Reset device if necessary
1844 rv
= csio_do_reset(hw
, true);
1850 * If we have a configuration file in host ,
1851 * then use that. Otherwise, use the configuration file stored
1852 * in the HW flash ...
1854 spin_unlock_irq(&hw
->lock
);
1855 rv
= csio_hw_flash_config(hw
, fw_cfg_param
, path
);
1856 spin_lock_irq(&hw
->lock
);
1858 if (rv
== -ENOENT
) {
1860 * config file was not found. Use default
1861 * config file from flash.
1863 mtype
= FW_MEMTYPE_CF_FLASH
;
1864 maddr
= hw
->chip_ops
->chip_flash_cfg_addr(hw
);
1868 * we revert back to the hardwired config if
1874 mtype
= FW_PARAMS_PARAM_Y_GET(*fw_cfg_param
);
1875 maddr
= FW_PARAMS_PARAM_Z_GET(*fw_cfg_param
) << 16;
1879 hw
->cfg_store
= (uint8_t)mtype
;
1882 * Issue a Capability Configuration command to the firmware to get it
1883 * to parse the Configuration File.
1885 rv
= csio_hw_fw_config_file(hw
, mtype
, maddr
, &finiver
,
1886 &finicsum
, &cfcsum
);
1890 hw
->cfg_finiver
= finiver
;
1891 hw
->cfg_finicsum
= finicsum
;
1892 hw
->cfg_cfcsum
= cfcsum
;
1893 hw
->cfg_csum_status
= true;
1895 if (finicsum
!= cfcsum
) {
1897 "Config File checksum mismatch: csum=%#x, computed=%#x\n",
1900 hw
->cfg_csum_status
= false;
1904 * Note that we're operating with parameters
1905 * not supplied by the driver, rather than from hard-wired
1906 * initialization constants buried in the driver.
1908 hw
->flags
|= CSIO_HWF_USING_SOFT_PARAMS
;
1910 /* device parameters */
1911 rv
= csio_get_device_params(hw
);
1916 csio_wr_sge_init(hw
);
1919 * And finally tell the firmware to initialize itself using the
1920 * parameters from the Configuration File.
1922 /* Post event to notify completion of configuration */
1923 csio_post_event(&hw
->sm
, CSIO_HWE_INIT
);
1926 "Firmware Configuration File %s, version %#x, computed checksum %#x\n",
1927 (using_flash
? "in device FLASH" : path
), finiver
, cfcsum
);
1932 * Something bad happened. Return the error ...
1935 hw
->flags
&= ~CSIO_HWF_USING_SOFT_PARAMS
;
1936 csio_dbg(hw
, "Configuration file error %d\n", rv
);
1941 * Attempt to initialize the adapter via hard-coded, driver supplied
1945 csio_hw_no_fwconfig(struct csio_hw
*hw
, int reset
)
1949 * Reset device if necessary
1952 rv
= csio_do_reset(hw
, true);
1957 /* Get and set device capabilities */
1958 rv
= csio_config_device_caps(hw
);
1962 /* device parameters */
1963 rv
= csio_get_device_params(hw
);
1968 csio_wr_sge_init(hw
);
1970 /* Post event to notify completion of configuration */
1971 csio_post_event(&hw
->sm
, CSIO_HWE_INIT
);
1978 * Returns -EINVAL if attempts to flash the firmware failed
1980 * if flashing was not attempted because the card had the
1981 * latest firmware ECANCELED is returned
1984 csio_hw_flash_fw(struct csio_hw
*hw
)
1986 int ret
= -ECANCELED
;
1987 const struct firmware
*fw
;
1988 const struct fw_hdr
*hdr
;
1990 struct pci_dev
*pci_dev
= hw
->pdev
;
1991 struct device
*dev
= &pci_dev
->dev
;
1993 if (request_firmware(&fw
, CSIO_FW_FNAME(hw
), dev
) < 0) {
1994 csio_err(hw
, "could not find firmware image %s, err: %d\n",
1995 CSIO_FW_FNAME(hw
), ret
);
1999 hdr
= (const struct fw_hdr
*)fw
->data
;
2000 fw_ver
= ntohl(hdr
->fw_ver
);
2001 if (FW_HDR_FW_VER_MAJOR_GET(fw_ver
) != FW_VERSION_MAJOR(hw
))
2002 return -EINVAL
; /* wrong major version, won't do */
2005 * If the flash FW is unusable or we found something newer, load it.
2007 if (FW_HDR_FW_VER_MAJOR_GET(hw
->fwrev
) != FW_VERSION_MAJOR(hw
) ||
2008 fw_ver
> hw
->fwrev
) {
2009 ret
= csio_hw_fw_upgrade(hw
, hw
->pfn
, fw
->data
, fw
->size
,
2013 "firmware upgraded to version %pI4 from %s\n",
2014 &hdr
->fw_ver
, CSIO_FW_FNAME(hw
));
2016 csio_err(hw
, "firmware upgrade failed! err=%d\n", ret
);
2020 release_firmware(fw
);
2027 * csio_hw_configure - Configure HW
2032 csio_hw_configure(struct csio_hw
*hw
)
2038 rv
= csio_hw_dev_ready(hw
);
2040 CSIO_INC_STATS(hw
, n_err_fatal
);
2041 csio_post_event(&hw
->sm
, CSIO_HWE_FATAL
);
2046 hw
->chip_ver
= (char)csio_rd_reg32(hw
, PL_REV
);
2048 /* Needed for FW download */
2049 rv
= csio_hw_get_flash_params(hw
);
2051 csio_err(hw
, "Failed to get serial flash params rv:%d\n", rv
);
2052 csio_post_event(&hw
->sm
, CSIO_HWE_FATAL
);
2056 /* Set PCIe completion timeout to 4 seconds */
2057 if (pci_is_pcie(hw
->pdev
))
2058 pcie_capability_clear_and_set_word(hw
->pdev
, PCI_EXP_DEVCTL2
,
2059 PCI_EXP_DEVCTL2_COMP_TIMEOUT
, 0xd);
2061 hw
->chip_ops
->chip_set_mem_win(hw
, MEMWIN_CSIOSTOR
);
2063 rv
= csio_hw_get_fw_version(hw
, &hw
->fwrev
);
2067 csio_hw_print_fw_version(hw
, "Firmware revision");
2069 rv
= csio_do_hello(hw
, &hw
->fw_state
);
2071 CSIO_INC_STATS(hw
, n_err_fatal
);
2072 csio_post_event(&hw
->sm
, CSIO_HWE_FATAL
);
2077 rv
= csio_hw_get_vpd_params(hw
, &hw
->vpd
);
2081 if (csio_is_hw_master(hw
) && hw
->fw_state
!= CSIO_DEV_STATE_INIT
) {
2082 rv
= csio_hw_check_fw_version(hw
);
2083 if (rv
== -EINVAL
) {
2085 /* Do firmware update */
2086 spin_unlock_irq(&hw
->lock
);
2087 rv
= csio_hw_flash_fw(hw
);
2088 spin_lock_irq(&hw
->lock
);
2093 * Note that the chip was reset as part of the
2094 * firmware upgrade so we don't reset it again
2095 * below and grab the new firmware version.
2097 rv
= csio_hw_check_fw_version(hw
);
2101 * If the firmware doesn't support Configuration
2102 * Files, use the old Driver-based, hard-wired
2103 * initialization. Otherwise, try using the
2104 * Configuration File support and fall back to the
2105 * Driver-based initialization if there's no
2106 * Configuration File found.
2108 if (csio_hw_check_fwconfig(hw
, param
) == 0) {
2109 rv
= csio_hw_use_fwconfig(hw
, reset
, param
);
2114 "No Configuration File present "
2115 "on adapter. Using hard-wired "
2116 "configuration parameters.\n");
2117 rv
= csio_hw_no_fwconfig(hw
, reset
);
2120 rv
= csio_hw_no_fwconfig(hw
, reset
);
2127 if (hw
->fw_state
== CSIO_DEV_STATE_INIT
) {
2129 hw
->flags
|= CSIO_HWF_USING_SOFT_PARAMS
;
2131 /* device parameters */
2132 rv
= csio_get_device_params(hw
);
2136 /* Get device capabilities */
2137 rv
= csio_config_device_caps(hw
);
2142 csio_wr_sge_init(hw
);
2144 /* Post event to notify completion of configuration */
2145 csio_post_event(&hw
->sm
, CSIO_HWE_INIT
);
2148 } /* if not master */
2155 * csio_hw_initialize - Initialize HW
2160 csio_hw_initialize(struct csio_hw
*hw
)
2162 struct csio_mb
*mbp
;
2163 enum fw_retval retval
;
2167 if (csio_is_hw_master(hw
) && hw
->fw_state
!= CSIO_DEV_STATE_INIT
) {
2168 mbp
= mempool_alloc(hw
->mb_mempool
, GFP_ATOMIC
);
2172 csio_mb_initialize(hw
, mbp
, CSIO_MB_DEFAULT_TMO
, NULL
);
2174 if (csio_mb_issue(hw
, mbp
)) {
2175 csio_err(hw
, "Issue of FW_INITIALIZE_CMD failed!\n");
2179 retval
= csio_mb_fw_retval(mbp
);
2180 if (retval
!= FW_SUCCESS
) {
2181 csio_err(hw
, "FW_INITIALIZE_CMD returned 0x%x!\n",
2186 mempool_free(mbp
, hw
->mb_mempool
);
2189 rv
= csio_get_fcoe_resinfo(hw
);
2191 csio_err(hw
, "Failed to read fcoe resource info: %d\n", rv
);
2195 spin_unlock_irq(&hw
->lock
);
2196 rv
= csio_config_queues(hw
);
2197 spin_lock_irq(&hw
->lock
);
2200 csio_err(hw
, "Config of queues failed!: %d\n", rv
);
2204 for (i
= 0; i
< hw
->num_pports
; i
++)
2205 hw
->pport
[i
].mod_type
= FW_PORT_MOD_TYPE_NA
;
2207 if (csio_is_hw_master(hw
) && hw
->fw_state
!= CSIO_DEV_STATE_INIT
) {
2208 rv
= csio_enable_ports(hw
);
2210 csio_err(hw
, "Failed to enable ports: %d\n", rv
);
2215 csio_post_event(&hw
->sm
, CSIO_HWE_INIT_DONE
);
2219 mempool_free(mbp
, hw
->mb_mempool
);
2224 #define PF_INTR_MASK (PFSW | PFCIM)
2227 * csio_hw_intr_enable - Enable HW interrupts
2228 * @hw: Pointer to HW module.
2230 * Enable interrupts in HW registers.
2233 csio_hw_intr_enable(struct csio_hw
*hw
)
2235 uint16_t vec
= (uint16_t)csio_get_mb_intr_idx(csio_hw_to_mbm(hw
));
2236 uint32_t pf
= SOURCEPF_GET(csio_rd_reg32(hw
, PL_WHOAMI
));
2237 uint32_t pl
= csio_rd_reg32(hw
, PL_INT_ENABLE
);
2240 * Set aivec for MSI/MSIX. PCIE_PF_CFG.INTXType is set up
2241 * by FW, so do nothing for INTX.
2243 if (hw
->intr_mode
== CSIO_IM_MSIX
)
2244 csio_set_reg_field(hw
, MYPF_REG(PCIE_PF_CFG
),
2245 AIVEC(AIVEC_MASK
), vec
);
2246 else if (hw
->intr_mode
== CSIO_IM_MSI
)
2247 csio_set_reg_field(hw
, MYPF_REG(PCIE_PF_CFG
),
2248 AIVEC(AIVEC_MASK
), 0);
2250 csio_wr_reg32(hw
, PF_INTR_MASK
, MYPF_REG(PL_PF_INT_ENABLE
));
2252 /* Turn on MB interrupts - this will internally flush PIO as well */
2253 csio_mb_intr_enable(hw
);
2255 /* These are common registers - only a master can modify them */
2256 if (csio_is_hw_master(hw
)) {
2258 * Disable the Serial FLASH interrupt, if enabled!
2261 csio_wr_reg32(hw
, pl
, PL_INT_ENABLE
);
2263 csio_wr_reg32(hw
, ERR_CPL_EXCEED_IQE_SIZE
|
2264 EGRESS_SIZE_ERR
| ERR_INVALID_CIDX_INC
|
2265 ERR_CPL_OPCODE_0
| ERR_DROPPED_DB
|
2266 ERR_DATA_CPL_ON_HIGH_QID1
|
2267 ERR_DATA_CPL_ON_HIGH_QID0
| ERR_BAD_DB_PIDX3
|
2268 ERR_BAD_DB_PIDX2
| ERR_BAD_DB_PIDX1
|
2269 ERR_BAD_DB_PIDX0
| ERR_ING_CTXT_PRIO
|
2270 ERR_EGR_CTXT_PRIO
| INGRESS_SIZE_ERR
,
2272 csio_set_reg_field(hw
, PL_INT_MAP0
, 0, 1 << pf
);
2275 hw
->flags
|= CSIO_HWF_HW_INTR_ENABLED
;
2280 * csio_hw_intr_disable - Disable HW interrupts
2281 * @hw: Pointer to HW module.
2283 * Turn off Mailbox and PCI_PF_CFG interrupts.
2286 csio_hw_intr_disable(struct csio_hw
*hw
)
2288 uint32_t pf
= SOURCEPF_GET(csio_rd_reg32(hw
, PL_WHOAMI
));
2290 if (!(hw
->flags
& CSIO_HWF_HW_INTR_ENABLED
))
2293 hw
->flags
&= ~CSIO_HWF_HW_INTR_ENABLED
;
2295 csio_wr_reg32(hw
, 0, MYPF_REG(PL_PF_INT_ENABLE
));
2296 if (csio_is_hw_master(hw
))
2297 csio_set_reg_field(hw
, PL_INT_MAP0
, 1 << pf
, 0);
2299 /* Turn off MB interrupts */
2300 csio_mb_intr_disable(hw
);
2305 csio_hw_fatal_err(struct csio_hw
*hw
)
2307 csio_set_reg_field(hw
, SGE_CONTROL
, GLOBALENABLE
, 0);
2308 csio_hw_intr_disable(hw
);
2310 /* Do not reset HW, we may need FW state for debugging */
2311 csio_fatal(hw
, "HW Fatal error encountered!\n");
2314 /*****************************************************************************/
2316 /*****************************************************************************/
2318 * csio_hws_uninit - Uninit state
2324 csio_hws_uninit(struct csio_hw
*hw
, enum csio_hw_ev evt
)
2326 hw
->prev_evt
= hw
->cur_evt
;
2328 CSIO_INC_STATS(hw
, n_evt_sm
[evt
]);
2332 csio_set_state(&hw
->sm
, csio_hws_configuring
);
2333 csio_hw_configure(hw
);
2337 CSIO_INC_STATS(hw
, n_evt_unexp
);
2343 * csio_hws_configuring - Configuring state
2349 csio_hws_configuring(struct csio_hw
*hw
, enum csio_hw_ev evt
)
2351 hw
->prev_evt
= hw
->cur_evt
;
2353 CSIO_INC_STATS(hw
, n_evt_sm
[evt
]);
2357 csio_set_state(&hw
->sm
, csio_hws_initializing
);
2358 csio_hw_initialize(hw
);
2361 case CSIO_HWE_INIT_DONE
:
2362 csio_set_state(&hw
->sm
, csio_hws_ready
);
2363 /* Fan out event to all lnode SMs */
2364 csio_notify_lnodes(hw
, CSIO_LN_NOTIFY_HWREADY
);
2367 case CSIO_HWE_FATAL
:
2368 csio_set_state(&hw
->sm
, csio_hws_uninit
);
2371 case CSIO_HWE_PCI_REMOVE
:
2375 CSIO_INC_STATS(hw
, n_evt_unexp
);
2381 * csio_hws_initializing - Initialiazing state
2387 csio_hws_initializing(struct csio_hw
*hw
, enum csio_hw_ev evt
)
2389 hw
->prev_evt
= hw
->cur_evt
;
2391 CSIO_INC_STATS(hw
, n_evt_sm
[evt
]);
2394 case CSIO_HWE_INIT_DONE
:
2395 csio_set_state(&hw
->sm
, csio_hws_ready
);
2397 /* Fan out event to all lnode SMs */
2398 csio_notify_lnodes(hw
, CSIO_LN_NOTIFY_HWREADY
);
2400 /* Enable interrupts */
2401 csio_hw_intr_enable(hw
);
2404 case CSIO_HWE_FATAL
:
2405 csio_set_state(&hw
->sm
, csio_hws_uninit
);
2408 case CSIO_HWE_PCI_REMOVE
:
2413 CSIO_INC_STATS(hw
, n_evt_unexp
);
2419 * csio_hws_ready - Ready state
2425 csio_hws_ready(struct csio_hw
*hw
, enum csio_hw_ev evt
)
2427 /* Remember the event */
2430 hw
->prev_evt
= hw
->cur_evt
;
2432 CSIO_INC_STATS(hw
, n_evt_sm
[evt
]);
2435 case CSIO_HWE_HBA_RESET
:
2436 case CSIO_HWE_FW_DLOAD
:
2437 case CSIO_HWE_SUSPEND
:
2438 case CSIO_HWE_PCI_REMOVE
:
2439 case CSIO_HWE_PCIERR_DETECTED
:
2440 csio_set_state(&hw
->sm
, csio_hws_quiescing
);
2441 /* cleanup all outstanding cmds */
2442 if (evt
== CSIO_HWE_HBA_RESET
||
2443 evt
== CSIO_HWE_PCIERR_DETECTED
)
2444 csio_scsim_cleanup_io(csio_hw_to_scsim(hw
), false);
2446 csio_scsim_cleanup_io(csio_hw_to_scsim(hw
), true);
2448 csio_hw_intr_disable(hw
);
2449 csio_hw_mbm_cleanup(hw
);
2451 csio_notify_lnodes(hw
, CSIO_LN_NOTIFY_HWSTOP
);
2452 csio_evtq_flush(hw
);
2453 csio_mgmtm_cleanup(csio_hw_to_mgmtm(hw
));
2454 csio_post_event(&hw
->sm
, CSIO_HWE_QUIESCED
);
2457 case CSIO_HWE_FATAL
:
2458 csio_set_state(&hw
->sm
, csio_hws_uninit
);
2462 CSIO_INC_STATS(hw
, n_evt_unexp
);
2468 * csio_hws_quiescing - Quiescing state
2474 csio_hws_quiescing(struct csio_hw
*hw
, enum csio_hw_ev evt
)
2476 hw
->prev_evt
= hw
->cur_evt
;
2478 CSIO_INC_STATS(hw
, n_evt_sm
[evt
]);
2481 case CSIO_HWE_QUIESCED
:
2482 switch (hw
->evtflag
) {
2483 case CSIO_HWE_FW_DLOAD
:
2484 csio_set_state(&hw
->sm
, csio_hws_resetting
);
2485 /* Download firmware */
2488 case CSIO_HWE_HBA_RESET
:
2489 csio_set_state(&hw
->sm
, csio_hws_resetting
);
2490 /* Start reset of the HBA */
2491 csio_notify_lnodes(hw
, CSIO_LN_NOTIFY_HWRESET
);
2492 csio_wr_destroy_queues(hw
, false);
2493 csio_do_reset(hw
, false);
2494 csio_post_event(&hw
->sm
, CSIO_HWE_HBA_RESET_DONE
);
2497 case CSIO_HWE_PCI_REMOVE
:
2498 csio_set_state(&hw
->sm
, csio_hws_removing
);
2499 csio_notify_lnodes(hw
, CSIO_LN_NOTIFY_HWREMOVE
);
2500 csio_wr_destroy_queues(hw
, true);
2501 /* Now send the bye command */
2505 case CSIO_HWE_SUSPEND
:
2506 csio_set_state(&hw
->sm
, csio_hws_quiesced
);
2509 case CSIO_HWE_PCIERR_DETECTED
:
2510 csio_set_state(&hw
->sm
, csio_hws_pcierr
);
2511 csio_wr_destroy_queues(hw
, false);
2515 CSIO_INC_STATS(hw
, n_evt_unexp
);
2522 CSIO_INC_STATS(hw
, n_evt_unexp
);
2528 * csio_hws_quiesced - Quiesced state
2534 csio_hws_quiesced(struct csio_hw
*hw
, enum csio_hw_ev evt
)
2536 hw
->prev_evt
= hw
->cur_evt
;
2538 CSIO_INC_STATS(hw
, n_evt_sm
[evt
]);
2541 case CSIO_HWE_RESUME
:
2542 csio_set_state(&hw
->sm
, csio_hws_configuring
);
2543 csio_hw_configure(hw
);
2547 CSIO_INC_STATS(hw
, n_evt_unexp
);
2553 * csio_hws_resetting - HW Resetting state
2559 csio_hws_resetting(struct csio_hw
*hw
, enum csio_hw_ev evt
)
2561 hw
->prev_evt
= hw
->cur_evt
;
2563 CSIO_INC_STATS(hw
, n_evt_sm
[evt
]);
2566 case CSIO_HWE_HBA_RESET_DONE
:
2567 csio_evtq_start(hw
);
2568 csio_set_state(&hw
->sm
, csio_hws_configuring
);
2569 csio_hw_configure(hw
);
2573 CSIO_INC_STATS(hw
, n_evt_unexp
);
2579 * csio_hws_removing - PCI Hotplug removing state
2585 csio_hws_removing(struct csio_hw
*hw
, enum csio_hw_ev evt
)
2587 hw
->prev_evt
= hw
->cur_evt
;
2589 CSIO_INC_STATS(hw
, n_evt_sm
[evt
]);
2592 case CSIO_HWE_HBA_RESET
:
2593 if (!csio_is_hw_master(hw
))
2596 * The BYE should have alerady been issued, so we cant
2597 * use the mailbox interface. Hence we use the PL_RST
2598 * register directly.
2600 csio_err(hw
, "Resetting HW and waiting 2 seconds...\n");
2601 csio_wr_reg32(hw
, PIORSTMODE
| PIORST
, PL_RST
);
2605 /* Should never receive any new events */
2607 CSIO_INC_STATS(hw
, n_evt_unexp
);
2614 * csio_hws_pcierr - PCI Error state
2620 csio_hws_pcierr(struct csio_hw
*hw
, enum csio_hw_ev evt
)
2622 hw
->prev_evt
= hw
->cur_evt
;
2624 CSIO_INC_STATS(hw
, n_evt_sm
[evt
]);
2627 case CSIO_HWE_PCIERR_SLOT_RESET
:
2628 csio_evtq_start(hw
);
2629 csio_set_state(&hw
->sm
, csio_hws_configuring
);
2630 csio_hw_configure(hw
);
2634 CSIO_INC_STATS(hw
, n_evt_unexp
);
2639 /*****************************************************************************/
2641 /*****************************************************************************/
2644 * csio_handle_intr_status - table driven interrupt handler
2646 * @reg: the interrupt status register to process
2647 * @acts: table of interrupt actions
2649 * A table driven interrupt handler that applies a set of masks to an
2650 * interrupt status word and performs the corresponding actions if the
2651 * interrupts described by the mask have occured. The actions include
2652 * optionally emitting a warning or alert message. The table is terminated
2653 * by an entry specifying mask 0. Returns the number of fatal interrupt
2657 csio_handle_intr_status(struct csio_hw
*hw
, unsigned int reg
,
2658 const struct intr_info
*acts
)
2661 unsigned int mask
= 0;
2662 unsigned int status
= csio_rd_reg32(hw
, reg
);
2664 for ( ; acts
->mask
; ++acts
) {
2665 if (!(status
& acts
->mask
))
2669 csio_fatal(hw
, "Fatal %s (0x%x)\n",
2670 acts
->msg
, status
& acts
->mask
);
2671 } else if (acts
->msg
)
2672 csio_info(hw
, "%s (0x%x)\n",
2673 acts
->msg
, status
& acts
->mask
);
2677 if (status
) /* clear processed interrupts */
2678 csio_wr_reg32(hw
, status
, reg
);
2683 * TP interrupt handler.
2685 static void csio_tp_intr_handler(struct csio_hw
*hw
)
2687 static struct intr_info tp_intr_info
[] = {
2688 { 0x3fffffff, "TP parity error", -1, 1 },
2689 { FLMTXFLSTEMPTY
, "TP out of Tx pages", -1, 1 },
2693 if (csio_handle_intr_status(hw
, TP_INT_CAUSE
, tp_intr_info
))
2694 csio_hw_fatal_err(hw
);
2698 * SGE interrupt handler.
2700 static void csio_sge_intr_handler(struct csio_hw
*hw
)
2704 static struct intr_info sge_intr_info
[] = {
2705 { ERR_CPL_EXCEED_IQE_SIZE
,
2706 "SGE received CPL exceeding IQE size", -1, 1 },
2707 { ERR_INVALID_CIDX_INC
,
2708 "SGE GTS CIDX increment too large", -1, 0 },
2709 { ERR_CPL_OPCODE_0
, "SGE received 0-length CPL", -1, 0 },
2710 { ERR_DROPPED_DB
, "SGE doorbell dropped", -1, 0 },
2711 { ERR_DATA_CPL_ON_HIGH_QID1
| ERR_DATA_CPL_ON_HIGH_QID0
,
2712 "SGE IQID > 1023 received CPL for FL", -1, 0 },
2713 { ERR_BAD_DB_PIDX3
, "SGE DBP 3 pidx increment too large", -1,
2715 { ERR_BAD_DB_PIDX2
, "SGE DBP 2 pidx increment too large", -1,
2717 { ERR_BAD_DB_PIDX1
, "SGE DBP 1 pidx increment too large", -1,
2719 { ERR_BAD_DB_PIDX0
, "SGE DBP 0 pidx increment too large", -1,
2721 { ERR_ING_CTXT_PRIO
,
2722 "SGE too many priority ingress contexts", -1, 0 },
2723 { ERR_EGR_CTXT_PRIO
,
2724 "SGE too many priority egress contexts", -1, 0 },
2725 { INGRESS_SIZE_ERR
, "SGE illegal ingress QID", -1, 0 },
2726 { EGRESS_SIZE_ERR
, "SGE illegal egress QID", -1, 0 },
2730 v
= (uint64_t)csio_rd_reg32(hw
, SGE_INT_CAUSE1
) |
2731 ((uint64_t)csio_rd_reg32(hw
, SGE_INT_CAUSE2
) << 32);
2733 csio_fatal(hw
, "SGE parity error (%#llx)\n",
2734 (unsigned long long)v
);
2735 csio_wr_reg32(hw
, (uint32_t)(v
& 0xFFFFFFFF),
2737 csio_wr_reg32(hw
, (uint32_t)(v
>> 32), SGE_INT_CAUSE2
);
2740 v
|= csio_handle_intr_status(hw
, SGE_INT_CAUSE3
, sge_intr_info
);
2742 if (csio_handle_intr_status(hw
, SGE_INT_CAUSE3
, sge_intr_info
) ||
2744 csio_hw_fatal_err(hw
);
2747 #define CIM_OBQ_INTR (OBQULP0PARERR | OBQULP1PARERR | OBQULP2PARERR |\
2748 OBQULP3PARERR | OBQSGEPARERR | OBQNCSIPARERR)
2749 #define CIM_IBQ_INTR (IBQTP0PARERR | IBQTP1PARERR | IBQULPPARERR |\
2750 IBQSGEHIPARERR | IBQSGELOPARERR | IBQNCSIPARERR)
2753 * CIM interrupt handler.
2755 static void csio_cim_intr_handler(struct csio_hw
*hw
)
2757 static struct intr_info cim_intr_info
[] = {
2758 { PREFDROPINT
, "CIM control register prefetch drop", -1, 1 },
2759 { CIM_OBQ_INTR
, "CIM OBQ parity error", -1, 1 },
2760 { CIM_IBQ_INTR
, "CIM IBQ parity error", -1, 1 },
2761 { MBUPPARERR
, "CIM mailbox uP parity error", -1, 1 },
2762 { MBHOSTPARERR
, "CIM mailbox host parity error", -1, 1 },
2763 { TIEQINPARERRINT
, "CIM TIEQ outgoing parity error", -1, 1 },
2764 { TIEQOUTPARERRINT
, "CIM TIEQ incoming parity error", -1, 1 },
2767 static struct intr_info cim_upintr_info
[] = {
2768 { RSVDSPACEINT
, "CIM reserved space access", -1, 1 },
2769 { ILLTRANSINT
, "CIM illegal transaction", -1, 1 },
2770 { ILLWRINT
, "CIM illegal write", -1, 1 },
2771 { ILLRDINT
, "CIM illegal read", -1, 1 },
2772 { ILLRDBEINT
, "CIM illegal read BE", -1, 1 },
2773 { ILLWRBEINT
, "CIM illegal write BE", -1, 1 },
2774 { SGLRDBOOTINT
, "CIM single read from boot space", -1, 1 },
2775 { SGLWRBOOTINT
, "CIM single write to boot space", -1, 1 },
2776 { BLKWRBOOTINT
, "CIM block write to boot space", -1, 1 },
2777 { SGLRDFLASHINT
, "CIM single read from flash space", -1, 1 },
2778 { SGLWRFLASHINT
, "CIM single write to flash space", -1, 1 },
2779 { BLKWRFLASHINT
, "CIM block write to flash space", -1, 1 },
2780 { SGLRDEEPROMINT
, "CIM single EEPROM read", -1, 1 },
2781 { SGLWREEPROMINT
, "CIM single EEPROM write", -1, 1 },
2782 { BLKRDEEPROMINT
, "CIM block EEPROM read", -1, 1 },
2783 { BLKWREEPROMINT
, "CIM block EEPROM write", -1, 1 },
2784 { SGLRDCTLINT
, "CIM single read from CTL space", -1, 1 },
2785 { SGLWRCTLINT
, "CIM single write to CTL space", -1, 1 },
2786 { BLKRDCTLINT
, "CIM block read from CTL space", -1, 1 },
2787 { BLKWRCTLINT
, "CIM block write to CTL space", -1, 1 },
2788 { SGLRDPLINT
, "CIM single read from PL space", -1, 1 },
2789 { SGLWRPLINT
, "CIM single write to PL space", -1, 1 },
2790 { BLKRDPLINT
, "CIM block read from PL space", -1, 1 },
2791 { BLKWRPLINT
, "CIM block write to PL space", -1, 1 },
2792 { REQOVRLOOKUPINT
, "CIM request FIFO overwrite", -1, 1 },
2793 { RSPOVRLOOKUPINT
, "CIM response FIFO overwrite", -1, 1 },
2794 { TIMEOUTINT
, "CIM PIF timeout", -1, 1 },
2795 { TIMEOUTMAINT
, "CIM PIF MA timeout", -1, 1 },
2801 fat
= csio_handle_intr_status(hw
, CIM_HOST_INT_CAUSE
,
2803 csio_handle_intr_status(hw
, CIM_HOST_UPACC_INT_CAUSE
,
2806 csio_hw_fatal_err(hw
);
2810 * ULP RX interrupt handler.
2812 static void csio_ulprx_intr_handler(struct csio_hw
*hw
)
2814 static struct intr_info ulprx_intr_info
[] = {
2815 { 0x1800000, "ULPRX context error", -1, 1 },
2816 { 0x7fffff, "ULPRX parity error", -1, 1 },
2820 if (csio_handle_intr_status(hw
, ULP_RX_INT_CAUSE
, ulprx_intr_info
))
2821 csio_hw_fatal_err(hw
);
2825 * ULP TX interrupt handler.
2827 static void csio_ulptx_intr_handler(struct csio_hw
*hw
)
2829 static struct intr_info ulptx_intr_info
[] = {
2830 { PBL_BOUND_ERR_CH3
, "ULPTX channel 3 PBL out of bounds", -1,
2832 { PBL_BOUND_ERR_CH2
, "ULPTX channel 2 PBL out of bounds", -1,
2834 { PBL_BOUND_ERR_CH1
, "ULPTX channel 1 PBL out of bounds", -1,
2836 { PBL_BOUND_ERR_CH0
, "ULPTX channel 0 PBL out of bounds", -1,
2838 { 0xfffffff, "ULPTX parity error", -1, 1 },
2842 if (csio_handle_intr_status(hw
, ULP_TX_INT_CAUSE
, ulptx_intr_info
))
2843 csio_hw_fatal_err(hw
);
2847 * PM TX interrupt handler.
2849 static void csio_pmtx_intr_handler(struct csio_hw
*hw
)
2851 static struct intr_info pmtx_intr_info
[] = {
2852 { PCMD_LEN_OVFL0
, "PMTX channel 0 pcmd too large", -1, 1 },
2853 { PCMD_LEN_OVFL1
, "PMTX channel 1 pcmd too large", -1, 1 },
2854 { PCMD_LEN_OVFL2
, "PMTX channel 2 pcmd too large", -1, 1 },
2855 { ZERO_C_CMD_ERROR
, "PMTX 0-length pcmd", -1, 1 },
2856 { 0xffffff0, "PMTX framing error", -1, 1 },
2857 { OESPI_PAR_ERROR
, "PMTX oespi parity error", -1, 1 },
2858 { DB_OPTIONS_PAR_ERROR
, "PMTX db_options parity error", -1,
2860 { ICSPI_PAR_ERROR
, "PMTX icspi parity error", -1, 1 },
2861 { C_PCMD_PAR_ERROR
, "PMTX c_pcmd parity error", -1, 1},
2865 if (csio_handle_intr_status(hw
, PM_TX_INT_CAUSE
, pmtx_intr_info
))
2866 csio_hw_fatal_err(hw
);
2870 * PM RX interrupt handler.
2872 static void csio_pmrx_intr_handler(struct csio_hw
*hw
)
2874 static struct intr_info pmrx_intr_info
[] = {
2875 { ZERO_E_CMD_ERROR
, "PMRX 0-length pcmd", -1, 1 },
2876 { 0x3ffff0, "PMRX framing error", -1, 1 },
2877 { OCSPI_PAR_ERROR
, "PMRX ocspi parity error", -1, 1 },
2878 { DB_OPTIONS_PAR_ERROR
, "PMRX db_options parity error", -1,
2880 { IESPI_PAR_ERROR
, "PMRX iespi parity error", -1, 1 },
2881 { E_PCMD_PAR_ERROR
, "PMRX e_pcmd parity error", -1, 1},
2885 if (csio_handle_intr_status(hw
, PM_RX_INT_CAUSE
, pmrx_intr_info
))
2886 csio_hw_fatal_err(hw
);
2890 * CPL switch interrupt handler.
2892 static void csio_cplsw_intr_handler(struct csio_hw
*hw
)
2894 static struct intr_info cplsw_intr_info
[] = {
2895 { CIM_OP_MAP_PERR
, "CPLSW CIM op_map parity error", -1, 1 },
2896 { CIM_OVFL_ERROR
, "CPLSW CIM overflow", -1, 1 },
2897 { TP_FRAMING_ERROR
, "CPLSW TP framing error", -1, 1 },
2898 { SGE_FRAMING_ERROR
, "CPLSW SGE framing error", -1, 1 },
2899 { CIM_FRAMING_ERROR
, "CPLSW CIM framing error", -1, 1 },
2900 { ZERO_SWITCH_ERROR
, "CPLSW no-switch error", -1, 1 },
2904 if (csio_handle_intr_status(hw
, CPL_INTR_CAUSE
, cplsw_intr_info
))
2905 csio_hw_fatal_err(hw
);
2909 * LE interrupt handler.
2911 static void csio_le_intr_handler(struct csio_hw
*hw
)
2913 static struct intr_info le_intr_info
[] = {
2914 { LIPMISS
, "LE LIP miss", -1, 0 },
2915 { LIP0
, "LE 0 LIP error", -1, 0 },
2916 { PARITYERR
, "LE parity error", -1, 1 },
2917 { UNKNOWNCMD
, "LE unknown command", -1, 1 },
2918 { REQQPARERR
, "LE request queue parity error", -1, 1 },
2922 if (csio_handle_intr_status(hw
, LE_DB_INT_CAUSE
, le_intr_info
))
2923 csio_hw_fatal_err(hw
);
2927 * MPS interrupt handler.
2929 static void csio_mps_intr_handler(struct csio_hw
*hw
)
2931 static struct intr_info mps_rx_intr_info
[] = {
2932 { 0xffffff, "MPS Rx parity error", -1, 1 },
2935 static struct intr_info mps_tx_intr_info
[] = {
2936 { TPFIFO
, "MPS Tx TP FIFO parity error", -1, 1 },
2937 { NCSIFIFO
, "MPS Tx NC-SI FIFO parity error", -1, 1 },
2938 { TXDATAFIFO
, "MPS Tx data FIFO parity error", -1, 1 },
2939 { TXDESCFIFO
, "MPS Tx desc FIFO parity error", -1, 1 },
2940 { BUBBLE
, "MPS Tx underflow", -1, 1 },
2941 { SECNTERR
, "MPS Tx SOP/EOP error", -1, 1 },
2942 { FRMERR
, "MPS Tx framing error", -1, 1 },
2945 static struct intr_info mps_trc_intr_info
[] = {
2946 { FILTMEM
, "MPS TRC filter parity error", -1, 1 },
2947 { PKTFIFO
, "MPS TRC packet FIFO parity error", -1, 1 },
2948 { MISCPERR
, "MPS TRC misc parity error", -1, 1 },
2951 static struct intr_info mps_stat_sram_intr_info
[] = {
2952 { 0x1fffff, "MPS statistics SRAM parity error", -1, 1 },
2955 static struct intr_info mps_stat_tx_intr_info
[] = {
2956 { 0xfffff, "MPS statistics Tx FIFO parity error", -1, 1 },
2959 static struct intr_info mps_stat_rx_intr_info
[] = {
2960 { 0xffffff, "MPS statistics Rx FIFO parity error", -1, 1 },
2963 static struct intr_info mps_cls_intr_info
[] = {
2964 { MATCHSRAM
, "MPS match SRAM parity error", -1, 1 },
2965 { MATCHTCAM
, "MPS match TCAM parity error", -1, 1 },
2966 { HASHSRAM
, "MPS hash SRAM parity error", -1, 1 },
2972 fat
= csio_handle_intr_status(hw
, MPS_RX_PERR_INT_CAUSE
,
2974 csio_handle_intr_status(hw
, MPS_TX_INT_CAUSE
,
2976 csio_handle_intr_status(hw
, MPS_TRC_INT_CAUSE
,
2977 mps_trc_intr_info
) +
2978 csio_handle_intr_status(hw
, MPS_STAT_PERR_INT_CAUSE_SRAM
,
2979 mps_stat_sram_intr_info
) +
2980 csio_handle_intr_status(hw
, MPS_STAT_PERR_INT_CAUSE_TX_FIFO
,
2981 mps_stat_tx_intr_info
) +
2982 csio_handle_intr_status(hw
, MPS_STAT_PERR_INT_CAUSE_RX_FIFO
,
2983 mps_stat_rx_intr_info
) +
2984 csio_handle_intr_status(hw
, MPS_CLS_INT_CAUSE
,
2987 csio_wr_reg32(hw
, 0, MPS_INT_CAUSE
);
2988 csio_rd_reg32(hw
, MPS_INT_CAUSE
); /* flush */
2990 csio_hw_fatal_err(hw
);
2993 #define MEM_INT_MASK (PERR_INT_CAUSE | ECC_CE_INT_CAUSE | ECC_UE_INT_CAUSE)
2996 * EDC/MC interrupt handler.
2998 static void csio_mem_intr_handler(struct csio_hw
*hw
, int idx
)
3000 static const char name
[3][5] = { "EDC0", "EDC1", "MC" };
3002 unsigned int addr
, cnt_addr
, v
;
3004 if (idx
<= MEM_EDC1
) {
3005 addr
= EDC_REG(EDC_INT_CAUSE
, idx
);
3006 cnt_addr
= EDC_REG(EDC_ECC_STATUS
, idx
);
3008 addr
= MC_INT_CAUSE
;
3009 cnt_addr
= MC_ECC_STATUS
;
3012 v
= csio_rd_reg32(hw
, addr
) & MEM_INT_MASK
;
3013 if (v
& PERR_INT_CAUSE
)
3014 csio_fatal(hw
, "%s FIFO parity error\n", name
[idx
]);
3015 if (v
& ECC_CE_INT_CAUSE
) {
3016 uint32_t cnt
= ECC_CECNT_GET(csio_rd_reg32(hw
, cnt_addr
));
3018 csio_wr_reg32(hw
, ECC_CECNT_MASK
, cnt_addr
);
3019 csio_warn(hw
, "%u %s correctable ECC data error%s\n",
3020 cnt
, name
[idx
], cnt
> 1 ? "s" : "");
3022 if (v
& ECC_UE_INT_CAUSE
)
3023 csio_fatal(hw
, "%s uncorrectable ECC data error\n", name
[idx
]);
3025 csio_wr_reg32(hw
, v
, addr
);
3026 if (v
& (PERR_INT_CAUSE
| ECC_UE_INT_CAUSE
))
3027 csio_hw_fatal_err(hw
);
3031 * MA interrupt handler.
3033 static void csio_ma_intr_handler(struct csio_hw
*hw
)
3035 uint32_t v
, status
= csio_rd_reg32(hw
, MA_INT_CAUSE
);
3037 if (status
& MEM_PERR_INT_CAUSE
)
3038 csio_fatal(hw
, "MA parity error, parity status %#x\n",
3039 csio_rd_reg32(hw
, MA_PARITY_ERROR_STATUS
));
3040 if (status
& MEM_WRAP_INT_CAUSE
) {
3041 v
= csio_rd_reg32(hw
, MA_INT_WRAP_STATUS
);
3043 "MA address wrap-around error by client %u to address %#x\n",
3044 MEM_WRAP_CLIENT_NUM_GET(v
), MEM_WRAP_ADDRESS_GET(v
) << 4);
3046 csio_wr_reg32(hw
, status
, MA_INT_CAUSE
);
3047 csio_hw_fatal_err(hw
);
3051 * SMB interrupt handler.
3053 static void csio_smb_intr_handler(struct csio_hw
*hw
)
3055 static struct intr_info smb_intr_info
[] = {
3056 { MSTTXFIFOPARINT
, "SMB master Tx FIFO parity error", -1, 1 },
3057 { MSTRXFIFOPARINT
, "SMB master Rx FIFO parity error", -1, 1 },
3058 { SLVFIFOPARINT
, "SMB slave FIFO parity error", -1, 1 },
3062 if (csio_handle_intr_status(hw
, SMB_INT_CAUSE
, smb_intr_info
))
3063 csio_hw_fatal_err(hw
);
3067 * NC-SI interrupt handler.
3069 static void csio_ncsi_intr_handler(struct csio_hw
*hw
)
3071 static struct intr_info ncsi_intr_info
[] = {
3072 { CIM_DM_PRTY_ERR
, "NC-SI CIM parity error", -1, 1 },
3073 { MPS_DM_PRTY_ERR
, "NC-SI MPS parity error", -1, 1 },
3074 { TXFIFO_PRTY_ERR
, "NC-SI Tx FIFO parity error", -1, 1 },
3075 { RXFIFO_PRTY_ERR
, "NC-SI Rx FIFO parity error", -1, 1 },
3079 if (csio_handle_intr_status(hw
, NCSI_INT_CAUSE
, ncsi_intr_info
))
3080 csio_hw_fatal_err(hw
);
3084 * XGMAC interrupt handler.
3086 static void csio_xgmac_intr_handler(struct csio_hw
*hw
, int port
)
3088 uint32_t v
= csio_rd_reg32(hw
, CSIO_MAC_INT_CAUSE_REG(hw
, port
));
3090 v
&= TXFIFO_PRTY_ERR
| RXFIFO_PRTY_ERR
;
3094 if (v
& TXFIFO_PRTY_ERR
)
3095 csio_fatal(hw
, "XGMAC %d Tx FIFO parity error\n", port
);
3096 if (v
& RXFIFO_PRTY_ERR
)
3097 csio_fatal(hw
, "XGMAC %d Rx FIFO parity error\n", port
);
3098 csio_wr_reg32(hw
, v
, CSIO_MAC_INT_CAUSE_REG(hw
, port
));
3099 csio_hw_fatal_err(hw
);
3103 * PL interrupt handler.
3105 static void csio_pl_intr_handler(struct csio_hw
*hw
)
3107 static struct intr_info pl_intr_info
[] = {
3108 { FATALPERR
, "T4 fatal parity error", -1, 1 },
3109 { PERRVFID
, "PL VFID_MAP parity error", -1, 1 },
3113 if (csio_handle_intr_status(hw
, PL_PL_INT_CAUSE
, pl_intr_info
))
3114 csio_hw_fatal_err(hw
);
3118 * csio_hw_slow_intr_handler - control path interrupt handler
3121 * Interrupt handler for non-data global interrupt events, e.g., errors.
3122 * The designation 'slow' is because it involves register reads, while
3123 * data interrupts typically don't involve any MMIOs.
3126 csio_hw_slow_intr_handler(struct csio_hw
*hw
)
3128 uint32_t cause
= csio_rd_reg32(hw
, PL_INT_CAUSE
);
3130 if (!(cause
& CSIO_GLBL_INTR_MASK
)) {
3131 CSIO_INC_STATS(hw
, n_plint_unexp
);
3135 csio_dbg(hw
, "Slow interrupt! cause: 0x%x\n", cause
);
3137 CSIO_INC_STATS(hw
, n_plint_cnt
);
3140 csio_cim_intr_handler(hw
);
3143 csio_mps_intr_handler(hw
);
3146 csio_ncsi_intr_handler(hw
);
3149 csio_pl_intr_handler(hw
);
3152 csio_smb_intr_handler(hw
);
3155 csio_xgmac_intr_handler(hw
, 0);
3158 csio_xgmac_intr_handler(hw
, 1);
3160 if (cause
& XGMAC_KR0
)
3161 csio_xgmac_intr_handler(hw
, 2);
3163 if (cause
& XGMAC_KR1
)
3164 csio_xgmac_intr_handler(hw
, 3);
3167 hw
->chip_ops
->chip_pcie_intr_handler(hw
);
3170 csio_mem_intr_handler(hw
, MEM_MC
);
3173 csio_mem_intr_handler(hw
, MEM_EDC0
);
3176 csio_mem_intr_handler(hw
, MEM_EDC1
);
3179 csio_le_intr_handler(hw
);
3182 csio_tp_intr_handler(hw
);
3185 csio_ma_intr_handler(hw
);
3188 csio_pmtx_intr_handler(hw
);
3191 csio_pmrx_intr_handler(hw
);
3194 csio_ulprx_intr_handler(hw
);
3196 if (cause
& CPL_SWITCH
)
3197 csio_cplsw_intr_handler(hw
);
3200 csio_sge_intr_handler(hw
);
3203 csio_ulptx_intr_handler(hw
);
3205 /* Clear the interrupts just processed for which we are the master. */
3206 csio_wr_reg32(hw
, cause
& CSIO_GLBL_INTR_MASK
, PL_INT_CAUSE
);
3207 csio_rd_reg32(hw
, PL_INT_CAUSE
); /* flush */
3212 /*****************************************************************************
3213 * HW <--> mailbox interfacing routines.
3214 ****************************************************************************/
3216 * csio_mberr_worker - Worker thread (dpc) for mailbox/error completions
3218 * @data: Private data pointer.
3220 * Called from worker thread context.
3223 csio_mberr_worker(void *data
)
3225 struct csio_hw
*hw
= (struct csio_hw
*)data
;
3226 struct csio_mbm
*mbm
= &hw
->mbm
;
3228 struct csio_mb
*mbp_next
;
3231 del_timer_sync(&mbm
->timer
);
3233 spin_lock_irq(&hw
->lock
);
3234 if (list_empty(&mbm
->cbfn_q
)) {
3235 spin_unlock_irq(&hw
->lock
);
3239 list_splice_tail_init(&mbm
->cbfn_q
, &cbfn_q
);
3240 mbm
->stats
.n_cbfnq
= 0;
3242 /* Try to start waiting mailboxes */
3243 if (!list_empty(&mbm
->req_q
)) {
3244 mbp_next
= list_first_entry(&mbm
->req_q
, struct csio_mb
, list
);
3245 list_del_init(&mbp_next
->list
);
3247 rv
= csio_mb_issue(hw
, mbp_next
);
3249 list_add_tail(&mbp_next
->list
, &mbm
->req_q
);
3251 CSIO_DEC_STATS(mbm
, n_activeq
);
3253 spin_unlock_irq(&hw
->lock
);
3255 /* Now callback completions */
3256 csio_mb_completions(hw
, &cbfn_q
);
3260 * csio_hw_mb_timer - Top-level Mailbox timeout handler.
3262 * @data: private data pointer
3266 csio_hw_mb_timer(uintptr_t data
)
3268 struct csio_hw
*hw
= (struct csio_hw
*)data
;
3269 struct csio_mb
*mbp
= NULL
;
3271 spin_lock_irq(&hw
->lock
);
3272 mbp
= csio_mb_tmo_handler(hw
);
3273 spin_unlock_irq(&hw
->lock
);
3275 /* Call back the function for the timed-out Mailbox */
3277 mbp
->mb_cbfn(hw
, mbp
);
3282 * csio_hw_mbm_cleanup - Cleanup Mailbox module.
3285 * Called with lock held, should exit with lock held.
3286 * Cancels outstanding mailboxes (waiting, in-flight) and gathers them
3287 * into a local queue. Drops lock and calls the completions. Holds
3291 csio_hw_mbm_cleanup(struct csio_hw
*hw
)
3295 csio_mb_cancel_all(hw
, &cbfn_q
);
3297 spin_unlock_irq(&hw
->lock
);
3298 csio_mb_completions(hw
, &cbfn_q
);
3299 spin_lock_irq(&hw
->lock
);
3302 /*****************************************************************************
3304 ****************************************************************************/
3306 csio_enqueue_evt(struct csio_hw
*hw
, enum csio_evt type
, void *evt_msg
,
3309 struct csio_evt_msg
*evt_entry
= NULL
;
3311 if (type
>= CSIO_EVT_MAX
)
3314 if (len
> CSIO_EVT_MSG_SIZE
)
3317 if (hw
->flags
& CSIO_HWF_FWEVT_STOP
)
3320 if (list_empty(&hw
->evt_free_q
)) {
3321 csio_err(hw
, "Failed to alloc evt entry, msg type %d len %d\n",
3326 evt_entry
= list_first_entry(&hw
->evt_free_q
,
3327 struct csio_evt_msg
, list
);
3328 list_del_init(&evt_entry
->list
);
3330 /* copy event msg and queue the event */
3331 evt_entry
->type
= type
;
3332 memcpy((void *)evt_entry
->data
, evt_msg
, len
);
3333 list_add_tail(&evt_entry
->list
, &hw
->evt_active_q
);
3335 CSIO_DEC_STATS(hw
, n_evt_freeq
);
3336 CSIO_INC_STATS(hw
, n_evt_activeq
);
3342 csio_enqueue_evt_lock(struct csio_hw
*hw
, enum csio_evt type
, void *evt_msg
,
3343 uint16_t len
, bool msg_sg
)
3345 struct csio_evt_msg
*evt_entry
= NULL
;
3346 struct csio_fl_dma_buf
*fl_sg
;
3348 unsigned long flags
;
3351 if (type
>= CSIO_EVT_MAX
)
3354 if (len
> CSIO_EVT_MSG_SIZE
)
3357 spin_lock_irqsave(&hw
->lock
, flags
);
3358 if (hw
->flags
& CSIO_HWF_FWEVT_STOP
) {
3363 if (list_empty(&hw
->evt_free_q
)) {
3364 csio_err(hw
, "Failed to alloc evt entry, msg type %d len %d\n",
3370 evt_entry
= list_first_entry(&hw
->evt_free_q
,
3371 struct csio_evt_msg
, list
);
3372 list_del_init(&evt_entry
->list
);
3374 /* copy event msg and queue the event */
3375 evt_entry
->type
= type
;
3377 /* If Payload in SG list*/
3379 fl_sg
= (struct csio_fl_dma_buf
*) evt_msg
;
3380 for (n
= 0; (n
< CSIO_MAX_FLBUF_PER_IQWR
&& off
< len
); n
++) {
3381 memcpy((void *)((uintptr_t)evt_entry
->data
+ off
),
3382 fl_sg
->flbufs
[n
].vaddr
,
3383 fl_sg
->flbufs
[n
].len
);
3384 off
+= fl_sg
->flbufs
[n
].len
;
3387 memcpy((void *)evt_entry
->data
, evt_msg
, len
);
3389 list_add_tail(&evt_entry
->list
, &hw
->evt_active_q
);
3390 CSIO_DEC_STATS(hw
, n_evt_freeq
);
3391 CSIO_INC_STATS(hw
, n_evt_activeq
);
3393 spin_unlock_irqrestore(&hw
->lock
, flags
);
3398 csio_free_evt(struct csio_hw
*hw
, struct csio_evt_msg
*evt_entry
)
3401 spin_lock_irq(&hw
->lock
);
3402 list_del_init(&evt_entry
->list
);
3403 list_add_tail(&evt_entry
->list
, &hw
->evt_free_q
);
3404 CSIO_DEC_STATS(hw
, n_evt_activeq
);
3405 CSIO_INC_STATS(hw
, n_evt_freeq
);
3406 spin_unlock_irq(&hw
->lock
);
3411 csio_evtq_flush(struct csio_hw
*hw
)
3415 while (hw
->flags
& CSIO_HWF_FWEVT_PENDING
&& count
--) {
3416 spin_unlock_irq(&hw
->lock
);
3418 spin_lock_irq(&hw
->lock
);
3421 CSIO_DB_ASSERT(!(hw
->flags
& CSIO_HWF_FWEVT_PENDING
));
3425 csio_evtq_stop(struct csio_hw
*hw
)
3427 hw
->flags
|= CSIO_HWF_FWEVT_STOP
;
3431 csio_evtq_start(struct csio_hw
*hw
)
3433 hw
->flags
&= ~CSIO_HWF_FWEVT_STOP
;
3437 csio_evtq_cleanup(struct csio_hw
*hw
)
3439 struct list_head
*evt_entry
, *next_entry
;
3441 /* Release outstanding events from activeq to freeq*/
3442 if (!list_empty(&hw
->evt_active_q
))
3443 list_splice_tail_init(&hw
->evt_active_q
, &hw
->evt_free_q
);
3445 hw
->stats
.n_evt_activeq
= 0;
3446 hw
->flags
&= ~CSIO_HWF_FWEVT_PENDING
;
3448 /* Freeup event entry */
3449 list_for_each_safe(evt_entry
, next_entry
, &hw
->evt_free_q
) {
3451 CSIO_DEC_STATS(hw
, n_evt_freeq
);
3454 hw
->stats
.n_evt_freeq
= 0;
3459 csio_process_fwevtq_entry(struct csio_hw
*hw
, void *wr
, uint32_t len
,
3460 struct csio_fl_dma_buf
*flb
, void *priv
)
3464 uint32_t msg_len
= 0;
3467 op
= ((struct rss_header
*) wr
)->opcode
;
3468 if (op
== CPL_FW6_PLD
) {
3469 CSIO_INC_STATS(hw
, n_cpl_fw6_pld
);
3470 if (!flb
|| !flb
->totlen
) {
3471 CSIO_INC_STATS(hw
, n_cpl_unexp
);
3476 msg_len
= flb
->totlen
;
3478 } else if (op
== CPL_FW6_MSG
|| op
== CPL_FW4_MSG
) {
3480 CSIO_INC_STATS(hw
, n_cpl_fw6_msg
);
3481 /* skip RSS header */
3482 msg
= (void *)((uintptr_t)wr
+ sizeof(__be64
));
3483 msg_len
= (op
== CPL_FW6_MSG
) ? sizeof(struct cpl_fw6_msg
) :
3484 sizeof(struct cpl_fw4_msg
);
3486 csio_warn(hw
, "unexpected CPL %#x on FW event queue\n", op
);
3487 CSIO_INC_STATS(hw
, n_cpl_unexp
);
3492 * Enqueue event to EventQ. Events processing happens
3493 * in Event worker thread context
3495 if (csio_enqueue_evt_lock(hw
, CSIO_EVT_FW
, msg
,
3496 (uint16_t)msg_len
, msg_sg
))
3497 CSIO_INC_STATS(hw
, n_evt_drop
);
3501 csio_evtq_worker(struct work_struct
*work
)
3503 struct csio_hw
*hw
= container_of(work
, struct csio_hw
, evtq_work
);
3504 struct list_head
*evt_entry
, *next_entry
;
3506 struct csio_evt_msg
*evt_msg
;
3507 struct cpl_fw6_msg
*msg
;
3508 struct csio_rnode
*rn
;
3510 uint8_t evtq_stop
= 0;
3512 csio_dbg(hw
, "event worker thread active evts#%d\n",
3513 hw
->stats
.n_evt_activeq
);
3515 spin_lock_irq(&hw
->lock
);
3516 while (!list_empty(&hw
->evt_active_q
)) {
3517 list_splice_tail_init(&hw
->evt_active_q
, &evt_q
);
3518 spin_unlock_irq(&hw
->lock
);
3520 list_for_each_safe(evt_entry
, next_entry
, &evt_q
) {
3521 evt_msg
= (struct csio_evt_msg
*) evt_entry
;
3523 /* Drop events if queue is STOPPED */
3524 spin_lock_irq(&hw
->lock
);
3525 if (hw
->flags
& CSIO_HWF_FWEVT_STOP
)
3527 spin_unlock_irq(&hw
->lock
);
3529 CSIO_INC_STATS(hw
, n_evt_drop
);
3533 switch (evt_msg
->type
) {
3535 msg
= (struct cpl_fw6_msg
*)(evt_msg
->data
);
3537 if ((msg
->opcode
== CPL_FW6_MSG
||
3538 msg
->opcode
== CPL_FW4_MSG
) &&
3540 rv
= csio_mb_fwevt_handler(hw
,
3544 /* Handle any remaining fw events */
3545 csio_fcoe_fwevt_handler(hw
,
3546 msg
->opcode
, msg
->data
);
3547 } else if (msg
->opcode
== CPL_FW6_PLD
) {
3549 csio_fcoe_fwevt_handler(hw
,
3550 msg
->opcode
, msg
->data
);
3553 "Unhandled FW msg op %x type %x\n",
3554 msg
->opcode
, msg
->type
);
3555 CSIO_INC_STATS(hw
, n_evt_drop
);
3560 csio_mberr_worker(hw
);
3563 case CSIO_EVT_DEV_LOSS
:
3564 memcpy(&rn
, evt_msg
->data
, sizeof(rn
));
3565 csio_rnode_devloss_handler(rn
);
3569 csio_warn(hw
, "Unhandled event %x on evtq\n",
3571 CSIO_INC_STATS(hw
, n_evt_unexp
);
3575 csio_free_evt(hw
, evt_msg
);
3578 spin_lock_irq(&hw
->lock
);
3580 hw
->flags
&= ~CSIO_HWF_FWEVT_PENDING
;
3581 spin_unlock_irq(&hw
->lock
);
3585 csio_fwevtq_handler(struct csio_hw
*hw
)
3589 if (csio_q_iqid(hw
, hw
->fwevt_iq_idx
) == CSIO_MAX_QID
) {
3590 CSIO_INC_STATS(hw
, n_int_stray
);
3594 rv
= csio_wr_process_iq_idx(hw
, hw
->fwevt_iq_idx
,
3595 csio_process_fwevtq_entry
, NULL
);
3599 /****************************************************************************
3601 ****************************************************************************/
3603 /* Management module */
3605 * csio_mgmt_req_lookup - Lookup the given IO req exist in Active Q.
3606 * mgmt - mgmt module
3607 * @io_req - io request
3609 * Return - 0:if given IO Req exists in active Q.
3610 * -EINVAL :if lookup fails.
3613 csio_mgmt_req_lookup(struct csio_mgmtm
*mgmtm
, struct csio_ioreq
*io_req
)
3615 struct list_head
*tmp
;
3617 /* Lookup ioreq in the ACTIVEQ */
3618 list_for_each(tmp
, &mgmtm
->active_q
) {
3619 if (io_req
== (struct csio_ioreq
*)tmp
)
3625 #define ECM_MIN_TMO 1000 /* Minimum timeout value for req */
3628 * csio_mgmts_tmo_handler - MGMT IO Timeout handler.
3629 * @data - Event data.
3634 csio_mgmt_tmo_handler(uintptr_t data
)
3636 struct csio_mgmtm
*mgmtm
= (struct csio_mgmtm
*) data
;
3637 struct list_head
*tmp
;
3638 struct csio_ioreq
*io_req
;
3640 csio_dbg(mgmtm
->hw
, "Mgmt timer invoked!\n");
3642 spin_lock_irq(&mgmtm
->hw
->lock
);
3644 list_for_each(tmp
, &mgmtm
->active_q
) {
3645 io_req
= (struct csio_ioreq
*) tmp
;
3646 io_req
->tmo
-= min_t(uint32_t, io_req
->tmo
, ECM_MIN_TMO
);
3649 /* Dequeue the request from retry Q. */
3650 tmp
= csio_list_prev(tmp
);
3651 list_del_init(&io_req
->sm
.sm_list
);
3652 if (io_req
->io_cbfn
) {
3653 /* io_req will be freed by completion handler */
3654 io_req
->wr_status
= -ETIMEDOUT
;
3655 io_req
->io_cbfn(mgmtm
->hw
, io_req
);
3662 /* If retry queue is not empty, re-arm timer */
3663 if (!list_empty(&mgmtm
->active_q
))
3664 mod_timer(&mgmtm
->mgmt_timer
,
3665 jiffies
+ msecs_to_jiffies(ECM_MIN_TMO
));
3666 spin_unlock_irq(&mgmtm
->hw
->lock
);
3670 csio_mgmtm_cleanup(struct csio_mgmtm
*mgmtm
)
3672 struct csio_hw
*hw
= mgmtm
->hw
;
3673 struct csio_ioreq
*io_req
;
3674 struct list_head
*tmp
;
3678 /* Wait for all outstanding req to complete gracefully */
3679 while ((!list_empty(&mgmtm
->active_q
)) && count
--) {
3680 spin_unlock_irq(&hw
->lock
);
3682 spin_lock_irq(&hw
->lock
);
3685 /* release outstanding req from ACTIVEQ */
3686 list_for_each(tmp
, &mgmtm
->active_q
) {
3687 io_req
= (struct csio_ioreq
*) tmp
;
3688 tmp
= csio_list_prev(tmp
);
3689 list_del_init(&io_req
->sm
.sm_list
);
3690 mgmtm
->stats
.n_active
--;
3691 if (io_req
->io_cbfn
) {
3692 /* io_req will be freed by completion handler */
3693 io_req
->wr_status
= -ETIMEDOUT
;
3694 io_req
->io_cbfn(mgmtm
->hw
, io_req
);
3700 * csio_mgmt_init - Mgmt module init entry point
3701 * @mgmtsm - mgmt module
3704 * Initialize mgmt timer, resource wait queue, active queue,
3705 * completion q. Allocate Egress and Ingress
3706 * WR queues and save off the queue index returned by the WR
3707 * module for future use. Allocate and save off mgmt reqs in the
3708 * mgmt_req_freelist for future use. Make sure their SM is initialized
3710 * Returns: 0 - on success
3711 * -ENOMEM - on error.
3714 csio_mgmtm_init(struct csio_mgmtm
*mgmtm
, struct csio_hw
*hw
)
3716 struct timer_list
*timer
= &mgmtm
->mgmt_timer
;
3719 timer
->function
= csio_mgmt_tmo_handler
;
3720 timer
->data
= (unsigned long)mgmtm
;
3722 INIT_LIST_HEAD(&mgmtm
->active_q
);
3723 INIT_LIST_HEAD(&mgmtm
->cbfn_q
);
3726 /*mgmtm->iq_idx = hw->fwevt_iq_idx;*/
3732 * csio_mgmtm_exit - MGMT module exit entry point
3733 * @mgmtsm - mgmt module
3735 * This function called during MGMT module uninit.
3736 * Stop timers, free ioreqs allocated.
3741 csio_mgmtm_exit(struct csio_mgmtm
*mgmtm
)
3743 del_timer_sync(&mgmtm
->mgmt_timer
);
3748 * csio_hw_start - Kicks off the HW State machine
3749 * @hw: Pointer to HW module.
3751 * It is assumed that the initialization is a synchronous operation.
3752 * So when we return afer posting the event, the HW SM should be in
3753 * the ready state, if there were no errors during init.
3756 csio_hw_start(struct csio_hw
*hw
)
3758 spin_lock_irq(&hw
->lock
);
3759 csio_post_event(&hw
->sm
, CSIO_HWE_CFG
);
3760 spin_unlock_irq(&hw
->lock
);
3762 if (csio_is_hw_ready(hw
))
3769 csio_hw_stop(struct csio_hw
*hw
)
3771 csio_post_event(&hw
->sm
, CSIO_HWE_PCI_REMOVE
);
3773 if (csio_is_hw_removing(hw
))
3779 /* Max reset retries */
3780 #define CSIO_MAX_RESET_RETRIES 3
3783 * csio_hw_reset - Reset the hardware
3786 * Caller should hold lock across this function.
3789 csio_hw_reset(struct csio_hw
*hw
)
3791 if (!csio_is_hw_master(hw
))
3794 if (hw
->rst_retries
>= CSIO_MAX_RESET_RETRIES
) {
3795 csio_dbg(hw
, "Max hw reset attempts reached..");
3800 csio_post_event(&hw
->sm
, CSIO_HWE_HBA_RESET
);
3802 if (csio_is_hw_ready(hw
)) {
3803 hw
->rst_retries
= 0;
3804 hw
->stats
.n_reset_start
= jiffies_to_msecs(jiffies
);
3811 * csio_hw_get_device_id - Caches the Adapter's vendor & device id.
3815 csio_hw_get_device_id(struct csio_hw
*hw
)
3817 /* Is the adapter device id cached already ?*/
3818 if (csio_is_dev_id_cached(hw
))
3821 /* Get the PCI vendor & device id */
3822 pci_read_config_word(hw
->pdev
, PCI_VENDOR_ID
,
3823 &hw
->params
.pci
.vendor_id
);
3824 pci_read_config_word(hw
->pdev
, PCI_DEVICE_ID
,
3825 &hw
->params
.pci
.device_id
);
3827 csio_dev_id_cached(hw
);
3828 hw
->chip_id
= (hw
->params
.pci
.device_id
& CSIO_HW_CHIP_MASK
);
3830 } /* csio_hw_get_device_id */
3833 * csio_hw_set_description - Set the model, description of the hw.
3835 * @ven_id: PCI Vendor ID
3836 * @dev_id: PCI Device ID
3839 csio_hw_set_description(struct csio_hw
*hw
, uint16_t ven_id
, uint16_t dev_id
)
3841 uint32_t adap_type
, prot_type
;
3843 if (ven_id
== CSIO_VENDOR_ID
) {
3844 prot_type
= (dev_id
& CSIO_ASIC_DEVID_PROTO_MASK
);
3845 adap_type
= (dev_id
& CSIO_ASIC_DEVID_TYPE_MASK
);
3847 if (prot_type
== CSIO_T4_FCOE_ASIC
) {
3849 csio_t4_fcoe_adapters
[adap_type
].model_no
, 16);
3850 memcpy(hw
->model_desc
,
3851 csio_t4_fcoe_adapters
[adap_type
].description
,
3853 } else if (prot_type
== CSIO_T5_FCOE_ASIC
) {
3855 csio_t5_fcoe_adapters
[adap_type
].model_no
, 16);
3856 memcpy(hw
->model_desc
,
3857 csio_t5_fcoe_adapters
[adap_type
].description
,
3860 char tempName
[32] = "Chelsio FCoE Controller";
3861 memcpy(hw
->model_desc
, tempName
, 32);
3864 } /* csio_hw_set_description */
3867 * csio_hw_init - Initialize HW module.
3868 * @hw: Pointer to HW module.
3870 * Initialize the members of the HW module.
3873 csio_hw_init(struct csio_hw
*hw
)
3877 uint16_t ven_id
, dev_id
;
3878 struct csio_evt_msg
*evt_entry
;
3880 INIT_LIST_HEAD(&hw
->sm
.sm_list
);
3881 csio_init_state(&hw
->sm
, csio_hws_uninit
);
3882 spin_lock_init(&hw
->lock
);
3883 INIT_LIST_HEAD(&hw
->sln_head
);
3885 /* Get the PCI vendor & device id */
3886 csio_hw_get_device_id(hw
);
3888 strcpy(hw
->name
, CSIO_HW_NAME
);
3890 /* Initialize the HW chip ops with T4/T5 specific ops */
3891 hw
->chip_ops
= csio_is_t4(hw
->chip_id
) ? &t4_ops
: &t5_ops
;
3893 /* Set the model & its description */
3895 ven_id
= hw
->params
.pci
.vendor_id
;
3896 dev_id
= hw
->params
.pci
.device_id
;
3898 csio_hw_set_description(hw
, ven_id
, dev_id
);
3900 /* Initialize default log level */
3901 hw
->params
.log_level
= (uint32_t) csio_dbg_level
;
3903 csio_set_fwevt_intr_idx(hw
, -1);
3904 csio_set_nondata_intr_idx(hw
, -1);
3906 /* Init all the modules: Mailbox, WorkRequest and Transport */
3907 if (csio_mbm_init(csio_hw_to_mbm(hw
), hw
, csio_hw_mb_timer
))
3910 rv
= csio_wrm_init(csio_hw_to_wrm(hw
), hw
);
3914 rv
= csio_scsim_init(csio_hw_to_scsim(hw
), hw
);
3918 rv
= csio_mgmtm_init(csio_hw_to_mgmtm(hw
), hw
);
3920 goto err_scsim_exit
;
3921 /* Pre-allocate evtq and initialize them */
3922 INIT_LIST_HEAD(&hw
->evt_active_q
);
3923 INIT_LIST_HEAD(&hw
->evt_free_q
);
3924 for (i
= 0; i
< csio_evtq_sz
; i
++) {
3926 evt_entry
= kzalloc(sizeof(struct csio_evt_msg
), GFP_KERNEL
);
3928 csio_err(hw
, "Failed to initialize eventq");
3929 goto err_evtq_cleanup
;
3932 list_add_tail(&evt_entry
->list
, &hw
->evt_free_q
);
3933 CSIO_INC_STATS(hw
, n_evt_freeq
);
3936 hw
->dev_num
= dev_num
;
3942 csio_evtq_cleanup(hw
);
3943 csio_mgmtm_exit(csio_hw_to_mgmtm(hw
));
3945 csio_scsim_exit(csio_hw_to_scsim(hw
));
3947 csio_wrm_exit(csio_hw_to_wrm(hw
), hw
);
3949 csio_mbm_exit(csio_hw_to_mbm(hw
));
3955 * csio_hw_exit - Un-initialize HW module.
3956 * @hw: Pointer to HW module.
3960 csio_hw_exit(struct csio_hw
*hw
)
3962 csio_evtq_cleanup(hw
);
3963 csio_mgmtm_exit(csio_hw_to_mgmtm(hw
));
3964 csio_scsim_exit(csio_hw_to_scsim(hw
));
3965 csio_wrm_exit(csio_hw_to_wrm(hw
), hw
);
3966 csio_mbm_exit(csio_hw_to_mbm(hw
));