2 * This file is part of the Chelsio FCoE driver for Linux.
4 * Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 #include <linux/pci.h>
36 #include <linux/pci_regs.h>
37 #include <linux/firmware.h>
38 #include <linux/stddef.h>
39 #include <linux/delay.h>
40 #include <linux/string.h>
41 #include <linux/compiler.h>
42 #include <linux/jiffies.h>
43 #include <linux/kernel.h>
44 #include <linux/log2.h>
47 #include "csio_lnode.h"
48 #include "csio_rnode.h"
50 int csio_force_master
;
51 int csio_dbg_level
= 0xFEFF;
52 unsigned int csio_port_mask
= 0xf;
54 /* Default FW event queue entries. */
55 static uint32_t csio_evtq_sz
= CSIO_EVTQ_SIZE
;
57 /* Default MSI param level */
60 /* FCoE function instances */
63 /* FCoE Adapter types & its description */
64 static const struct csio_adap_desc csio_t4_fcoe_adapters
[] = {
65 {"T440-Dbg 10G", "Chelsio T440-Dbg 10G [FCoE]"},
66 {"T420-CR 10G", "Chelsio T420-CR 10G [FCoE]"},
67 {"T422-CR 10G/1G", "Chelsio T422-CR 10G/1G [FCoE]"},
68 {"T440-CR 10G", "Chelsio T440-CR 10G [FCoE]"},
69 {"T420-BCH 10G", "Chelsio T420-BCH 10G [FCoE]"},
70 {"T440-BCH 10G", "Chelsio T440-BCH 10G [FCoE]"},
71 {"T440-CH 10G", "Chelsio T440-CH 10G [FCoE]"},
72 {"T420-SO 10G", "Chelsio T420-SO 10G [FCoE]"},
73 {"T420-CX4 10G", "Chelsio T420-CX4 10G [FCoE]"},
74 {"T420-BT 10G", "Chelsio T420-BT 10G [FCoE]"},
75 {"T404-BT 1G", "Chelsio T404-BT 1G [FCoE]"},
76 {"B420-SR 10G", "Chelsio B420-SR 10G [FCoE]"},
77 {"B404-BT 1G", "Chelsio B404-BT 1G [FCoE]"},
78 {"T480-CR 10G", "Chelsio T480-CR 10G [FCoE]"},
79 {"T440-LP-CR 10G", "Chelsio T440-LP-CR 10G [FCoE]"},
80 {"AMSTERDAM 10G", "Chelsio AMSTERDAM 10G [FCoE]"},
81 {"HUAWEI T480 10G", "Chelsio HUAWEI T480 10G [FCoE]"},
82 {"HUAWEI T440 10G", "Chelsio HUAWEI T440 10G [FCoE]"},
83 {"HUAWEI STG 10G", "Chelsio HUAWEI STG 10G [FCoE]"},
84 {"ACROMAG XAUI 10G", "Chelsio ACROMAG XAUI 10G [FCoE]"},
85 {"ACROMAG SFP+ 10G", "Chelsio ACROMAG SFP+ 10G [FCoE]"},
86 {"QUANTA SFP+ 10G", "Chelsio QUANTA SFP+ 10G [FCoE]"},
87 {"HUAWEI 10Gbase-T", "Chelsio HUAWEI 10Gbase-T [FCoE]"},
88 {"HUAWEI T4TOE 10G", "Chelsio HUAWEI T4TOE 10G [FCoE]"}
91 static const struct csio_adap_desc csio_t5_fcoe_adapters
[] = {
92 {"T580-Dbg 10G", "Chelsio T580-Dbg 10G [FCoE]"},
93 {"T520-CR 10G", "Chelsio T520-CR 10G [FCoE]"},
94 {"T522-CR 10G/1G", "Chelsio T452-CR 10G/1G [FCoE]"},
95 {"T540-CR 10G", "Chelsio T540-CR 10G [FCoE]"},
96 {"T520-BCH 10G", "Chelsio T520-BCH 10G [FCoE]"},
97 {"T540-BCH 10G", "Chelsio T540-BCH 10G [FCoE]"},
98 {"T540-CH 10G", "Chelsio T540-CH 10G [FCoE]"},
99 {"T520-SO 10G", "Chelsio T520-SO 10G [FCoE]"},
100 {"T520-CX4 10G", "Chelsio T520-CX4 10G [FCoE]"},
101 {"T520-BT 10G", "Chelsio T520-BT 10G [FCoE]"},
102 {"T504-BT 1G", "Chelsio T504-BT 1G [FCoE]"},
103 {"B520-SR 10G", "Chelsio B520-SR 10G [FCoE]"},
104 {"B504-BT 1G", "Chelsio B504-BT 1G [FCoE]"},
105 {"T580-CR 10G", "Chelsio T580-CR 10G [FCoE]"},
106 {"T540-LP-CR 10G", "Chelsio T540-LP-CR 10G [FCoE]"},
107 {"AMSTERDAM 10G", "Chelsio AMSTERDAM 10G [FCoE]"},
108 {"T580-LP-CR 40G", "Chelsio T580-LP-CR 40G [FCoE]"},
109 {"T520-LL-CR 10G", "Chelsio T520-LL-CR 10G [FCoE]"},
110 {"T560-CR 40G", "Chelsio T560-CR 40G [FCoE]"},
111 {"T580-CR 40G", "Chelsio T580-CR 40G [FCoE]"}
114 static void csio_mgmtm_cleanup(struct csio_mgmtm
*);
115 static void csio_hw_mbm_cleanup(struct csio_hw
*);
117 /* State machine forward declarations */
118 static void csio_hws_uninit(struct csio_hw
*, enum csio_hw_ev
);
119 static void csio_hws_configuring(struct csio_hw
*, enum csio_hw_ev
);
120 static void csio_hws_initializing(struct csio_hw
*, enum csio_hw_ev
);
121 static void csio_hws_ready(struct csio_hw
*, enum csio_hw_ev
);
122 static void csio_hws_quiescing(struct csio_hw
*, enum csio_hw_ev
);
123 static void csio_hws_quiesced(struct csio_hw
*, enum csio_hw_ev
);
124 static void csio_hws_resetting(struct csio_hw
*, enum csio_hw_ev
);
125 static void csio_hws_removing(struct csio_hw
*, enum csio_hw_ev
);
126 static void csio_hws_pcierr(struct csio_hw
*, enum csio_hw_ev
);
128 static void csio_hw_initialize(struct csio_hw
*hw
);
129 static void csio_evtq_stop(struct csio_hw
*hw
);
130 static void csio_evtq_start(struct csio_hw
*hw
);
132 int csio_is_hw_ready(struct csio_hw
*hw
)
134 return csio_match_state(hw
, csio_hws_ready
);
137 int csio_is_hw_removing(struct csio_hw
*hw
)
139 return csio_match_state(hw
, csio_hws_removing
);
144 * csio_hw_wait_op_done_val - wait until an operation is completed
146 * @reg: the register to check for completion
147 * @mask: a single-bit field within @reg that indicates completion
148 * @polarity: the value of the field when the operation is completed
149 * @attempts: number of check iterations
150 * @delay: delay in usecs between iterations
151 * @valp: where to store the value of the register at completion time
153 * Wait until an operation is completed by checking a bit in a register
154 * up to @attempts times. If @valp is not NULL the value of the register
155 * at the time it indicated completion is stored there. Returns 0 if the
156 * operation completes and -EAGAIN otherwise.
159 csio_hw_wait_op_done_val(struct csio_hw
*hw
, int reg
, uint32_t mask
,
160 int polarity
, int attempts
, int delay
, uint32_t *valp
)
164 val
= csio_rd_reg32(hw
, reg
);
166 if (!!(val
& mask
) == polarity
) {
180 * csio_hw_tp_wr_bits_indirect - set/clear bits in an indirect TP register
182 * @addr: the indirect TP register address
183 * @mask: specifies the field within the register to modify
184 * @val: new value for the field
186 * Sets a field of an indirect TP register to the given value.
189 csio_hw_tp_wr_bits_indirect(struct csio_hw
*hw
, unsigned int addr
,
190 unsigned int mask
, unsigned int val
)
192 csio_wr_reg32(hw
, addr
, TP_PIO_ADDR
);
193 val
|= csio_rd_reg32(hw
, TP_PIO_DATA
) & ~mask
;
194 csio_wr_reg32(hw
, val
, TP_PIO_DATA
);
198 csio_set_reg_field(struct csio_hw
*hw
, uint32_t reg
, uint32_t mask
,
201 uint32_t val
= csio_rd_reg32(hw
, reg
) & ~mask
;
203 csio_wr_reg32(hw
, val
| value
, reg
);
205 csio_rd_reg32(hw
, reg
);
210 csio_memory_write(struct csio_hw
*hw
, int mtype
, u32 addr
, u32 len
, u32
*buf
)
212 return hw
->chip_ops
->chip_memory_rw(hw
, MEMWIN_CSIOSTOR
, mtype
,
217 * EEPROM reads take a few tens of us while writes can take a bit over 5 ms.
219 #define EEPROM_MAX_RD_POLL 40
220 #define EEPROM_MAX_WR_POLL 6
221 #define EEPROM_STAT_ADDR 0x7bfc
222 #define VPD_BASE 0x400
223 #define VPD_BASE_OLD 0
225 #define VPD_INFO_FLD_HDR_SIZE 3
228 * csio_hw_seeprom_read - read a serial EEPROM location
230 * @addr: EEPROM virtual address
231 * @data: where to store the read data
233 * Read a 32-bit word from a location in serial EEPROM using the card's PCI
234 * VPD capability. Note that this function must be called with a virtual
238 csio_hw_seeprom_read(struct csio_hw
*hw
, uint32_t addr
, uint32_t *data
)
241 int attempts
= EEPROM_MAX_RD_POLL
;
242 uint32_t base
= hw
->params
.pci
.vpd_cap_addr
;
244 if (addr
>= EEPROMVSIZE
|| (addr
& 3))
247 pci_write_config_word(hw
->pdev
, base
+ PCI_VPD_ADDR
, (uint16_t)addr
);
251 pci_read_config_word(hw
->pdev
, base
+ PCI_VPD_ADDR
, &val
);
252 } while (!(val
& PCI_VPD_ADDR_F
) && --attempts
);
254 if (!(val
& PCI_VPD_ADDR_F
)) {
255 csio_err(hw
, "reading EEPROM address 0x%x failed\n", addr
);
259 pci_read_config_dword(hw
->pdev
, base
+ PCI_VPD_DATA
, data
);
260 *data
= le32_to_cpu(*data
);
266 * Partial EEPROM Vital Product Data structure. Includes only the ID and
278 * csio_hw_get_vpd_keyword_val - Locates an information field keyword in
280 * @v: Pointer to buffered vpd data structure
281 * @kw: The keyword to search for
283 * Returns the value of the information field keyword or
287 csio_hw_get_vpd_keyword_val(const struct t4_vpd_hdr
*v
, const char *kw
)
290 int32_t offset
, len
;
291 const uint8_t *buf
= &v
->id_tag
;
292 const uint8_t *vpdr_len
= &v
->vpdr_tag
;
293 offset
= sizeof(struct t4_vpd_hdr
);
294 len
= (uint16_t)vpdr_len
[1] + ((uint16_t)vpdr_len
[2] << 8);
296 if (len
+ sizeof(struct t4_vpd_hdr
) > VPD_LEN
)
299 for (i
= offset
; (i
+ VPD_INFO_FLD_HDR_SIZE
) <= (offset
+ len
);) {
300 if (memcmp(buf
+ i
, kw
, 2) == 0) {
301 i
+= VPD_INFO_FLD_HDR_SIZE
;
305 i
+= VPD_INFO_FLD_HDR_SIZE
+ buf
[i
+2];
312 csio_pci_capability(struct pci_dev
*pdev
, int cap
, int *pos
)
314 *pos
= pci_find_capability(pdev
, cap
);
322 * csio_hw_get_vpd_params - read VPD parameters from VPD EEPROM
324 * @p: where to store the parameters
326 * Reads card parameters stored in VPD EEPROM.
329 csio_hw_get_vpd_params(struct csio_hw
*hw
, struct csio_vpd
*p
)
331 int i
, ret
, ec
, sn
, addr
;
333 const struct t4_vpd_hdr
*v
;
334 /* To get around compilation warning from strstrip */
337 if (csio_is_valid_vpd(hw
))
340 ret
= csio_pci_capability(hw
->pdev
, PCI_CAP_ID_VPD
,
341 &hw
->params
.pci
.vpd_cap_addr
);
345 vpd
= kzalloc(VPD_LEN
, GFP_ATOMIC
);
350 * Card information normally starts at VPD_BASE but early cards had
353 ret
= csio_hw_seeprom_read(hw
, VPD_BASE
, (uint32_t *)(vpd
));
354 addr
= *vpd
== 0x82 ? VPD_BASE
: VPD_BASE_OLD
;
356 for (i
= 0; i
< VPD_LEN
; i
+= 4) {
357 ret
= csio_hw_seeprom_read(hw
, addr
+ i
, (uint32_t *)(vpd
+ i
));
364 /* Reset the VPD flag! */
365 hw
->flags
&= (~CSIO_HWF_VPD_VALID
);
367 v
= (const struct t4_vpd_hdr
*)vpd
;
369 #define FIND_VPD_KW(var, name) do { \
370 var = csio_hw_get_vpd_keyword_val(v, name); \
372 csio_err(hw, "missing VPD keyword " name "\n"); \
378 FIND_VPD_KW(i
, "RV");
379 for (csum
= 0; i
>= 0; i
--)
383 csio_err(hw
, "corrupted VPD EEPROM, actual csum %u\n", csum
);
387 FIND_VPD_KW(ec
, "EC");
388 FIND_VPD_KW(sn
, "SN");
391 memcpy(p
->id
, v
->id_data
, ID_LEN
);
393 memcpy(p
->ec
, vpd
+ ec
, EC_LEN
);
395 i
= vpd
[sn
- VPD_INFO_FLD_HDR_SIZE
+ 2];
396 memcpy(p
->sn
, vpd
+ sn
, min(i
, SERNUM_LEN
));
399 csio_valid_vpd_copied(hw
);
406 * csio_hw_sf1_read - read data from the serial flash
408 * @byte_cnt: number of bytes to read
409 * @cont: whether another operation will be chained
410 * @lock: whether to lock SF for PL access only
411 * @valp: where to store the read data
413 * Reads up to 4 bytes of data from the serial flash. The location of
414 * the read needs to be specified prior to calling this by issuing the
415 * appropriate commands to the serial flash.
418 csio_hw_sf1_read(struct csio_hw
*hw
, uint32_t byte_cnt
, int32_t cont
,
419 int32_t lock
, uint32_t *valp
)
423 if (!byte_cnt
|| byte_cnt
> 4)
425 if (csio_rd_reg32(hw
, SF_OP
) & SF_BUSY
)
428 cont
= cont
? SF_CONT
: 0;
429 lock
= lock
? SF_LOCK
: 0;
431 csio_wr_reg32(hw
, lock
| cont
| BYTECNT(byte_cnt
- 1), SF_OP
);
432 ret
= csio_hw_wait_op_done_val(hw
, SF_OP
, SF_BUSY
, 0, SF_ATTEMPTS
,
435 *valp
= csio_rd_reg32(hw
, SF_DATA
);
440 * csio_hw_sf1_write - write data to the serial flash
442 * @byte_cnt: number of bytes to write
443 * @cont: whether another operation will be chained
444 * @lock: whether to lock SF for PL access only
445 * @val: value to write
447 * Writes up to 4 bytes of data to the serial flash. The location of
448 * the write needs to be specified prior to calling this by issuing the
449 * appropriate commands to the serial flash.
452 csio_hw_sf1_write(struct csio_hw
*hw
, uint32_t byte_cnt
, uint32_t cont
,
453 int32_t lock
, uint32_t val
)
455 if (!byte_cnt
|| byte_cnt
> 4)
457 if (csio_rd_reg32(hw
, SF_OP
) & SF_BUSY
)
460 cont
= cont
? SF_CONT
: 0;
461 lock
= lock
? SF_LOCK
: 0;
463 csio_wr_reg32(hw
, val
, SF_DATA
);
464 csio_wr_reg32(hw
, cont
| BYTECNT(byte_cnt
- 1) | OP_WR
| lock
, SF_OP
);
466 return csio_hw_wait_op_done_val(hw
, SF_OP
, SF_BUSY
, 0, SF_ATTEMPTS
,
471 * csio_hw_flash_wait_op - wait for a flash operation to complete
473 * @attempts: max number of polls of the status register
474 * @delay: delay between polls in ms
476 * Wait for a flash operation to complete by polling the status register.
479 csio_hw_flash_wait_op(struct csio_hw
*hw
, int32_t attempts
, int32_t delay
)
485 ret
= csio_hw_sf1_write(hw
, 1, 1, 1, SF_RD_STATUS
);
489 ret
= csio_hw_sf1_read(hw
, 1, 0, 1, &status
);
503 * csio_hw_read_flash - read words from serial flash
505 * @addr: the start address for the read
506 * @nwords: how many 32-bit words to read
507 * @data: where to store the read data
508 * @byte_oriented: whether to store data as bytes or as words
510 * Read the specified number of 32-bit words from the serial flash.
511 * If @byte_oriented is set the read data is stored as a byte array
512 * (i.e., big-endian), otherwise as 32-bit words in the platform's
516 csio_hw_read_flash(struct csio_hw
*hw
, uint32_t addr
, uint32_t nwords
,
517 uint32_t *data
, int32_t byte_oriented
)
521 if (addr
+ nwords
* sizeof(uint32_t) > hw
->params
.sf_size
|| (addr
& 3))
524 addr
= swab32(addr
) | SF_RD_DATA_FAST
;
526 ret
= csio_hw_sf1_write(hw
, 4, 1, 0, addr
);
530 ret
= csio_hw_sf1_read(hw
, 1, 1, 0, data
);
534 for ( ; nwords
; nwords
--, data
++) {
535 ret
= csio_hw_sf1_read(hw
, 4, nwords
> 1, nwords
== 1, data
);
537 csio_wr_reg32(hw
, 0, SF_OP
); /* unlock SF */
541 *data
= htonl(*data
);
547 * csio_hw_write_flash - write up to a page of data to the serial flash
549 * @addr: the start address to write
550 * @n: length of data to write in bytes
551 * @data: the data to write
553 * Writes up to a page of data (256 bytes) to the serial flash starting
554 * at the given address. All the data must be written to the same page.
557 csio_hw_write_flash(struct csio_hw
*hw
, uint32_t addr
,
558 uint32_t n
, const uint8_t *data
)
562 uint32_t i
, c
, left
, val
, offset
= addr
& 0xff;
564 if (addr
>= hw
->params
.sf_size
|| offset
+ n
> SF_PAGE_SIZE
)
567 val
= swab32(addr
) | SF_PROG_PAGE
;
569 ret
= csio_hw_sf1_write(hw
, 1, 0, 1, SF_WR_ENABLE
);
573 ret
= csio_hw_sf1_write(hw
, 4, 1, 1, val
);
577 for (left
= n
; left
; left
-= c
) {
579 for (val
= 0, i
= 0; i
< c
; ++i
)
580 val
= (val
<< 8) + *data
++;
582 ret
= csio_hw_sf1_write(hw
, c
, c
!= left
, 1, val
);
586 ret
= csio_hw_flash_wait_op(hw
, 8, 1);
590 csio_wr_reg32(hw
, 0, SF_OP
); /* unlock SF */
592 /* Read the page to verify the write succeeded */
593 ret
= csio_hw_read_flash(hw
, addr
& ~0xff, ARRAY_SIZE(buf
), buf
, 1);
597 if (memcmp(data
- n
, (uint8_t *)buf
+ offset
, n
)) {
599 "failed to correctly write the flash page at %#x\n",
607 csio_wr_reg32(hw
, 0, SF_OP
); /* unlock SF */
612 * csio_hw_flash_erase_sectors - erase a range of flash sectors
614 * @start: the first sector to erase
615 * @end: the last sector to erase
617 * Erases the sectors in the given inclusive range.
620 csio_hw_flash_erase_sectors(struct csio_hw
*hw
, int32_t start
, int32_t end
)
624 while (start
<= end
) {
626 ret
= csio_hw_sf1_write(hw
, 1, 0, 1, SF_WR_ENABLE
);
630 ret
= csio_hw_sf1_write(hw
, 4, 0, 1,
631 SF_ERASE_SECTOR
| (start
<< 8));
635 ret
= csio_hw_flash_wait_op(hw
, 14, 500);
643 csio_err(hw
, "erase of flash sector %d failed, error %d\n",
645 csio_wr_reg32(hw
, 0, SF_OP
); /* unlock SF */
650 csio_hw_print_fw_version(struct csio_hw
*hw
, char *str
)
652 csio_info(hw
, "%s: %u.%u.%u.%u\n", str
,
653 FW_HDR_FW_VER_MAJOR_GET(hw
->fwrev
),
654 FW_HDR_FW_VER_MINOR_GET(hw
->fwrev
),
655 FW_HDR_FW_VER_MICRO_GET(hw
->fwrev
),
656 FW_HDR_FW_VER_BUILD_GET(hw
->fwrev
));
660 * csio_hw_get_fw_version - read the firmware version
662 * @vers: where to place the version
664 * Reads the FW version from flash.
667 csio_hw_get_fw_version(struct csio_hw
*hw
, uint32_t *vers
)
669 return csio_hw_read_flash(hw
, FW_IMG_START
+
670 offsetof(struct fw_hdr
, fw_ver
), 1,
675 * csio_hw_get_tp_version - read the TP microcode version
677 * @vers: where to place the version
679 * Reads the TP microcode version from flash.
682 csio_hw_get_tp_version(struct csio_hw
*hw
, u32
*vers
)
684 return csio_hw_read_flash(hw
, FLASH_FW_START
+
685 offsetof(struct fw_hdr
, tp_microcode_ver
), 1,
690 * csio_hw_check_fw_version - check if the FW is compatible with
694 * Checks if an adapter's FW is compatible with the driver. Returns 0
695 * if there's exact match, a negative error if the version could not be
696 * read or there's a major/minor version mismatch/minor.
699 csio_hw_check_fw_version(struct csio_hw
*hw
)
701 int ret
, major
, minor
, micro
;
703 ret
= csio_hw_get_fw_version(hw
, &hw
->fwrev
);
705 ret
= csio_hw_get_tp_version(hw
, &hw
->tp_vers
);
709 major
= FW_HDR_FW_VER_MAJOR_GET(hw
->fwrev
);
710 minor
= FW_HDR_FW_VER_MINOR_GET(hw
->fwrev
);
711 micro
= FW_HDR_FW_VER_MICRO_GET(hw
->fwrev
);
713 if (major
!= FW_VERSION_MAJOR(hw
)) { /* major mismatch - fail */
714 csio_err(hw
, "card FW has major version %u, driver wants %u\n",
715 major
, FW_VERSION_MAJOR(hw
));
719 if (minor
== FW_VERSION_MINOR(hw
) && micro
== FW_VERSION_MICRO(hw
))
720 return 0; /* perfect match */
722 /* Minor/micro version mismatch */
727 * csio_hw_fw_dload - download firmware.
729 * @fw_data: firmware image to write.
732 * Write the supplied firmware image to the card's serial flash.
735 csio_hw_fw_dload(struct csio_hw
*hw
, uint8_t *fw_data
, uint32_t size
)
741 uint8_t first_page
[SF_PAGE_SIZE
];
742 const __be32
*p
= (const __be32
*)fw_data
;
743 struct fw_hdr
*hdr
= (struct fw_hdr
*)fw_data
;
744 uint32_t sf_sec_size
;
746 if ((!hw
->params
.sf_size
) || (!hw
->params
.sf_nsec
)) {
747 csio_err(hw
, "Serial Flash data invalid\n");
752 csio_err(hw
, "FW image has no data\n");
757 csio_err(hw
, "FW image size not multiple of 512 bytes\n");
761 if (ntohs(hdr
->len512
) * 512 != size
) {
762 csio_err(hw
, "FW image size differs from size in FW header\n");
766 if (size
> FW_MAX_SIZE
) {
767 csio_err(hw
, "FW image too large, max is %u bytes\n",
772 for (csum
= 0, i
= 0; i
< size
/ sizeof(csum
); i
++)
775 if (csum
!= 0xffffffff) {
776 csio_err(hw
, "corrupted firmware image, checksum %#x\n", csum
);
780 sf_sec_size
= hw
->params
.sf_size
/ hw
->params
.sf_nsec
;
781 i
= DIV_ROUND_UP(size
, sf_sec_size
); /* # of sectors spanned */
783 csio_dbg(hw
, "Erasing sectors... start:%d end:%d\n",
784 FW_START_SEC
, FW_START_SEC
+ i
- 1);
786 ret
= csio_hw_flash_erase_sectors(hw
, FW_START_SEC
,
787 FW_START_SEC
+ i
- 1);
789 csio_err(hw
, "Flash Erase failed\n");
794 * We write the correct version at the end so the driver can see a bad
795 * version if the FW write fails. Start by writing a copy of the
796 * first page with a bad version.
798 memcpy(first_page
, fw_data
, SF_PAGE_SIZE
);
799 ((struct fw_hdr
*)first_page
)->fw_ver
= htonl(0xffffffff);
800 ret
= csio_hw_write_flash(hw
, FW_IMG_START
, SF_PAGE_SIZE
, first_page
);
804 csio_dbg(hw
, "Writing Flash .. start:%d end:%d\n",
805 FW_IMG_START
, FW_IMG_START
+ size
);
808 for (size
-= SF_PAGE_SIZE
; size
; size
-= SF_PAGE_SIZE
) {
809 addr
+= SF_PAGE_SIZE
;
810 fw_data
+= SF_PAGE_SIZE
;
811 ret
= csio_hw_write_flash(hw
, addr
, SF_PAGE_SIZE
, fw_data
);
816 ret
= csio_hw_write_flash(hw
,
818 offsetof(struct fw_hdr
, fw_ver
),
820 (const uint8_t *)&hdr
->fw_ver
);
824 csio_err(hw
, "firmware download failed, error %d\n", ret
);
829 csio_hw_get_flash_params(struct csio_hw
*hw
)
834 ret
= csio_hw_sf1_write(hw
, 1, 1, 0, SF_RD_ID
);
836 ret
= csio_hw_sf1_read(hw
, 3, 0, 1, &info
);
837 csio_wr_reg32(hw
, 0, SF_OP
); /* unlock SF */
841 if ((info
& 0xff) != 0x20) /* not a Numonix flash */
843 info
>>= 16; /* log2 of size */
844 if (info
>= 0x14 && info
< 0x18)
845 hw
->params
.sf_nsec
= 1 << (info
- 16);
846 else if (info
== 0x18)
847 hw
->params
.sf_nsec
= 64;
850 hw
->params
.sf_size
= 1 << info
;
856 csio_set_pcie_completion_timeout(struct csio_hw
*hw
, u8 range
)
861 if (!csio_pci_capability(hw
->pdev
, PCI_CAP_ID_EXP
, &pcie_cap
)) {
862 pci_read_config_word(hw
->pdev
,
863 pcie_cap
+ PCI_EXP_DEVCTL2
, &val
);
866 pci_write_config_word(hw
->pdev
,
867 pcie_cap
+ PCI_EXP_DEVCTL2
, val
);
871 /*****************************************************************************/
872 /* HW State machine assists */
873 /*****************************************************************************/
876 csio_hw_dev_ready(struct csio_hw
*hw
)
881 while (((reg
= csio_rd_reg32(hw
, PL_WHOAMI
)) == 0xFFFFFFFF) &&
885 if ((cnt
== 0) && (((int32_t)(SOURCEPF_GET(reg
)) < 0) ||
886 (SOURCEPF_GET(reg
) >= CSIO_MAX_PFN
))) {
887 csio_err(hw
, "PL_WHOAMI returned 0x%x, cnt:%d\n", reg
, cnt
);
891 hw
->pfn
= SOURCEPF_GET(reg
);
897 * csio_do_hello - Perform the HELLO FW Mailbox command and process response.
899 * @state: Device state
901 * FW_HELLO_CMD has to be polled for completion.
904 csio_do_hello(struct csio_hw
*hw
, enum csio_dev_state
*state
)
908 enum csio_dev_master master
;
909 enum fw_retval retval
;
912 int retries
= FW_CMD_HELLO_RETRIES
;
914 memset(state_str
, 0, sizeof(state_str
));
916 mbp
= mempool_alloc(hw
->mb_mempool
, GFP_ATOMIC
);
919 CSIO_INC_STATS(hw
, n_err_nomem
);
923 master
= csio_force_master
? CSIO_MASTER_MUST
: CSIO_MASTER_MAY
;
926 csio_mb_hello(hw
, mbp
, CSIO_MB_DEFAULT_TMO
, hw
->pfn
,
927 hw
->pfn
, master
, NULL
);
929 rv
= csio_mb_issue(hw
, mbp
);
931 csio_err(hw
, "failed to issue HELLO cmd. ret:%d.\n", rv
);
935 csio_mb_process_hello_rsp(hw
, mbp
, &retval
, state
, &mpfn
);
936 if (retval
!= FW_SUCCESS
) {
937 csio_err(hw
, "HELLO cmd failed with ret: %d\n", retval
);
942 /* Firmware has designated us to be master */
943 if (hw
->pfn
== mpfn
) {
944 hw
->flags
|= CSIO_HWF_MASTER
;
945 } else if (*state
== CSIO_DEV_STATE_UNINIT
) {
947 * If we're not the Master PF then we need to wait around for
948 * the Master PF Driver to finish setting up the adapter.
950 * Note that we also do this wait if we're a non-Master-capable
951 * PF and there is no current Master PF; a Master PF may show up
952 * momentarily and we wouldn't want to fail pointlessly. (This
953 * can happen when an OS loads lots of different drivers rapidly
954 * at the same time). In this case, the Master PF returned by
955 * the firmware will be PCIE_FW_MASTER_MASK so the test below
959 int waiting
= FW_CMD_HELLO_TIMEOUT
;
962 * Wait for the firmware to either indicate an error or
963 * initialized state. If we see either of these we bail out
964 * and report the issue to the caller. If we exhaust the
965 * "hello timeout" and we haven't exhausted our retries, try
966 * again. Otherwise bail with a timeout error.
971 spin_unlock_irq(&hw
->lock
);
973 spin_lock_irq(&hw
->lock
);
977 * If neither Error nor Initialialized are indicated
978 * by the firmware keep waiting till we exaust our
979 * timeout ... and then retry if we haven't exhausted
982 pcie_fw
= csio_rd_reg32(hw
, PCIE_FW
);
983 if (!(pcie_fw
& (PCIE_FW_ERR
|PCIE_FW_INIT
))) {
995 * We either have an Error or Initialized condition
996 * report errors preferentially.
999 if (pcie_fw
& PCIE_FW_ERR
) {
1000 *state
= CSIO_DEV_STATE_ERR
;
1002 } else if (pcie_fw
& PCIE_FW_INIT
)
1003 *state
= CSIO_DEV_STATE_INIT
;
1007 * If we arrived before a Master PF was selected and
1008 * there's not a valid Master PF, grab its identity
1011 if (mpfn
== PCIE_FW_MASTER_MASK
&&
1012 (pcie_fw
& PCIE_FW_MASTER_VLD
))
1013 mpfn
= PCIE_FW_MASTER_GET(pcie_fw
);
1016 hw
->flags
&= ~CSIO_HWF_MASTER
;
1020 case CSIO_DEV_STATE_UNINIT
:
1021 strcpy(state_str
, "Initializing");
1023 case CSIO_DEV_STATE_INIT
:
1024 strcpy(state_str
, "Initialized");
1026 case CSIO_DEV_STATE_ERR
:
1027 strcpy(state_str
, "Error");
1030 strcpy(state_str
, "Unknown");
1034 if (hw
->pfn
== mpfn
)
1035 csio_info(hw
, "PF: %d, Coming up as MASTER, HW state: %s\n",
1036 hw
->pfn
, state_str
);
1039 "PF: %d, Coming up as SLAVE, Master PF: %d, HW state: %s\n",
1040 hw
->pfn
, mpfn
, state_str
);
1043 mempool_free(mbp
, hw
->mb_mempool
);
1049 * csio_do_bye - Perform the BYE FW Mailbox command and process response.
1054 csio_do_bye(struct csio_hw
*hw
)
1056 struct csio_mb
*mbp
;
1057 enum fw_retval retval
;
1059 mbp
= mempool_alloc(hw
->mb_mempool
, GFP_ATOMIC
);
1061 CSIO_INC_STATS(hw
, n_err_nomem
);
1065 csio_mb_bye(hw
, mbp
, CSIO_MB_DEFAULT_TMO
, NULL
);
1067 if (csio_mb_issue(hw
, mbp
)) {
1068 csio_err(hw
, "Issue of BYE command failed\n");
1069 mempool_free(mbp
, hw
->mb_mempool
);
1073 retval
= csio_mb_fw_retval(mbp
);
1074 if (retval
!= FW_SUCCESS
) {
1075 mempool_free(mbp
, hw
->mb_mempool
);
1079 mempool_free(mbp
, hw
->mb_mempool
);
1085 * csio_do_reset- Perform the device reset.
1089 * If fw_rst is set, issues FW reset mbox cmd otherwise
1091 * Performs reset of the function.
1094 csio_do_reset(struct csio_hw
*hw
, bool fw_rst
)
1096 struct csio_mb
*mbp
;
1097 enum fw_retval retval
;
1101 csio_wr_reg32(hw
, PIORSTMODE
| PIORST
, PL_RST
);
1106 mbp
= mempool_alloc(hw
->mb_mempool
, GFP_ATOMIC
);
1108 CSIO_INC_STATS(hw
, n_err_nomem
);
1112 csio_mb_reset(hw
, mbp
, CSIO_MB_DEFAULT_TMO
,
1113 PIORSTMODE
| PIORST
, 0, NULL
);
1115 if (csio_mb_issue(hw
, mbp
)) {
1116 csio_err(hw
, "Issue of RESET command failed.n");
1117 mempool_free(mbp
, hw
->mb_mempool
);
1121 retval
= csio_mb_fw_retval(mbp
);
1122 if (retval
!= FW_SUCCESS
) {
1123 csio_err(hw
, "RESET cmd failed with ret:0x%x.\n", retval
);
1124 mempool_free(mbp
, hw
->mb_mempool
);
1128 mempool_free(mbp
, hw
->mb_mempool
);
1134 csio_hw_validate_caps(struct csio_hw
*hw
, struct csio_mb
*mbp
)
1136 struct fw_caps_config_cmd
*rsp
= (struct fw_caps_config_cmd
*)mbp
->mb
;
1139 caps
= ntohs(rsp
->fcoecaps
);
1141 if (!(caps
& FW_CAPS_CONFIG_FCOE_INITIATOR
)) {
1142 csio_err(hw
, "No FCoE Initiator capability in the firmware.\n");
1146 if (!(caps
& FW_CAPS_CONFIG_FCOE_CTRL_OFLD
)) {
1147 csio_err(hw
, "No FCoE Control Offload capability\n");
1155 * csio_hw_fw_halt - issue a reset/halt to FW and put uP into RESET
1156 * @hw: the HW module
1157 * @mbox: mailbox to use for the FW RESET command (if desired)
1158 * @force: force uP into RESET even if FW RESET command fails
1160 * Issues a RESET command to firmware (if desired) with a HALT indication
1161 * and then puts the microprocessor into RESET state. The RESET command
1162 * will only be issued if a legitimate mailbox is provided (mbox <=
1163 * PCIE_FW_MASTER_MASK).
1165 * This is generally used in order for the host to safely manipulate the
1166 * adapter without fear of conflicting with whatever the firmware might
1167 * be doing. The only way out of this state is to RESTART the firmware
1171 csio_hw_fw_halt(struct csio_hw
*hw
, uint32_t mbox
, int32_t force
)
1173 enum fw_retval retval
= 0;
1176 * If a legitimate mailbox is provided, issue a RESET command
1177 * with a HALT indication.
1179 if (mbox
<= PCIE_FW_MASTER_MASK
) {
1180 struct csio_mb
*mbp
;
1182 mbp
= mempool_alloc(hw
->mb_mempool
, GFP_ATOMIC
);
1184 CSIO_INC_STATS(hw
, n_err_nomem
);
1188 csio_mb_reset(hw
, mbp
, CSIO_MB_DEFAULT_TMO
,
1189 PIORSTMODE
| PIORST
, FW_RESET_CMD_HALT(1),
1192 if (csio_mb_issue(hw
, mbp
)) {
1193 csio_err(hw
, "Issue of RESET command failed!\n");
1194 mempool_free(mbp
, hw
->mb_mempool
);
1198 retval
= csio_mb_fw_retval(mbp
);
1199 mempool_free(mbp
, hw
->mb_mempool
);
1203 * Normally we won't complete the operation if the firmware RESET
1204 * command fails but if our caller insists we'll go ahead and put the
1205 * uP into RESET. This can be useful if the firmware is hung or even
1206 * missing ... We'll have to take the risk of putting the uP into
1207 * RESET without the cooperation of firmware in that case.
1209 * We also force the firmware's HALT flag to be on in case we bypassed
1210 * the firmware RESET command above or we're dealing with old firmware
1211 * which doesn't have the HALT capability. This will serve as a flag
1212 * for the incoming firmware to know that it's coming out of a HALT
1213 * rather than a RESET ... if it's new enough to understand that ...
1215 if (retval
== 0 || force
) {
1216 csio_set_reg_field(hw
, CIM_BOOT_CFG
, UPCRST
, UPCRST
);
1217 csio_set_reg_field(hw
, PCIE_FW
, PCIE_FW_HALT
, PCIE_FW_HALT
);
1221 * And we always return the result of the firmware RESET command
1222 * even when we force the uP into RESET ...
1224 return retval
? -EINVAL
: 0;
1228 * csio_hw_fw_restart - restart the firmware by taking the uP out of RESET
1229 * @hw: the HW module
1230 * @reset: if we want to do a RESET to restart things
1232 * Restart firmware previously halted by csio_hw_fw_halt(). On successful
1233 * return the previous PF Master remains as the new PF Master and there
1234 * is no need to issue a new HELLO command, etc.
1236 * We do this in two ways:
1238 * 1. If we're dealing with newer firmware we'll simply want to take
1239 * the chip's microprocessor out of RESET. This will cause the
1240 * firmware to start up from its start vector. And then we'll loop
1241 * until the firmware indicates it's started again (PCIE_FW.HALT
1242 * reset to 0) or we timeout.
1244 * 2. If we're dealing with older firmware then we'll need to RESET
1245 * the chip since older firmware won't recognize the PCIE_FW.HALT
1246 * flag and automatically RESET itself on startup.
1249 csio_hw_fw_restart(struct csio_hw
*hw
, uint32_t mbox
, int32_t reset
)
1253 * Since we're directing the RESET instead of the firmware
1254 * doing it automatically, we need to clear the PCIE_FW.HALT
1257 csio_set_reg_field(hw
, PCIE_FW
, PCIE_FW_HALT
, 0);
1260 * If we've been given a valid mailbox, first try to get the
1261 * firmware to do the RESET. If that works, great and we can
1262 * return success. Otherwise, if we haven't been given a
1263 * valid mailbox or the RESET command failed, fall back to
1264 * hitting the chip with a hammer.
1266 if (mbox
<= PCIE_FW_MASTER_MASK
) {
1267 csio_set_reg_field(hw
, CIM_BOOT_CFG
, UPCRST
, 0);
1269 if (csio_do_reset(hw
, true) == 0)
1273 csio_wr_reg32(hw
, PIORSTMODE
| PIORST
, PL_RST
);
1278 csio_set_reg_field(hw
, CIM_BOOT_CFG
, UPCRST
, 0);
1279 for (ms
= 0; ms
< FW_CMD_MAX_TIMEOUT
; ) {
1280 if (!(csio_rd_reg32(hw
, PCIE_FW
) & PCIE_FW_HALT
))
1291 * csio_hw_fw_upgrade - perform all of the steps necessary to upgrade FW
1292 * @hw: the HW module
1293 * @mbox: mailbox to use for the FW RESET command (if desired)
1294 * @fw_data: the firmware image to write
1296 * @force: force upgrade even if firmware doesn't cooperate
1298 * Perform all of the steps necessary for upgrading an adapter's
1299 * firmware image. Normally this requires the cooperation of the
1300 * existing firmware in order to halt all existing activities
1301 * but if an invalid mailbox token is passed in we skip that step
1302 * (though we'll still put the adapter microprocessor into RESET in
1305 * On successful return the new firmware will have been loaded and
1306 * the adapter will have been fully RESET losing all previous setup
1307 * state. On unsuccessful return the adapter may be completely hosed ...
1308 * positive errno indicates that the adapter is ~probably~ intact, a
1309 * negative errno indicates that things are looking bad ...
1312 csio_hw_fw_upgrade(struct csio_hw
*hw
, uint32_t mbox
,
1313 const u8
*fw_data
, uint32_t size
, int32_t force
)
1315 const struct fw_hdr
*fw_hdr
= (const struct fw_hdr
*)fw_data
;
1318 ret
= csio_hw_fw_halt(hw
, mbox
, force
);
1319 if (ret
!= 0 && !force
)
1322 ret
= csio_hw_fw_dload(hw
, (uint8_t *) fw_data
, size
);
1327 * Older versions of the firmware don't understand the new
1328 * PCIE_FW.HALT flag and so won't know to perform a RESET when they
1329 * restart. So for newly loaded older firmware we'll have to do the
1330 * RESET for it so it starts up on a clean slate. We can tell if
1331 * the newly loaded firmware will handle this right by checking
1332 * its header flags to see if it advertises the capability.
1334 reset
= ((ntohl(fw_hdr
->flags
) & FW_HDR_FLAGS_RESET_HALT
) == 0);
1335 return csio_hw_fw_restart(hw
, mbox
, reset
);
1340 * csio_hw_fw_config_file - setup an adapter via a Configuration File
1341 * @hw: the HW module
1342 * @mbox: mailbox to use for the FW command
1343 * @mtype: the memory type where the Configuration File is located
1344 * @maddr: the memory address where the Configuration File is located
1345 * @finiver: return value for CF [fini] version
1346 * @finicsum: return value for CF [fini] checksum
1347 * @cfcsum: return value for CF computed checksum
1349 * Issue a command to get the firmware to process the Configuration
1350 * File located at the specified mtype/maddress. If the Configuration
1351 * File is processed successfully and return value pointers are
1352 * provided, the Configuration File "[fini] section version and
1353 * checksum values will be returned along with the computed checksum.
1354 * It's up to the caller to decide how it wants to respond to the
1355 * checksums not matching but it recommended that a prominant warning
1356 * be emitted in order to help people rapidly identify changed or
1357 * corrupted Configuration Files.
1359 * Also note that it's possible to modify things like "niccaps",
1360 * "toecaps",etc. between processing the Configuration File and telling
1361 * the firmware to use the new configuration. Callers which want to
1362 * do this will need to "hand-roll" their own CAPS_CONFIGS commands for
1363 * Configuration Files if they want to do this.
1366 csio_hw_fw_config_file(struct csio_hw
*hw
,
1367 unsigned int mtype
, unsigned int maddr
,
1368 uint32_t *finiver
, uint32_t *finicsum
, uint32_t *cfcsum
)
1370 struct csio_mb
*mbp
;
1371 struct fw_caps_config_cmd
*caps_cmd
;
1375 mbp
= mempool_alloc(hw
->mb_mempool
, GFP_ATOMIC
);
1377 CSIO_INC_STATS(hw
, n_err_nomem
);
1381 * Tell the firmware to process the indicated Configuration File.
1382 * If there are no errors and the caller has provided return value
1383 * pointers for the [fini] section version, checksum and computed
1384 * checksum, pass those back to the caller.
1386 caps_cmd
= (struct fw_caps_config_cmd
*)(mbp
->mb
);
1387 CSIO_INIT_MBP(mbp
, caps_cmd
, CSIO_MB_DEFAULT_TMO
, hw
, NULL
, 1);
1388 caps_cmd
->op_to_write
=
1389 htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD
) |
1392 caps_cmd
->cfvalid_to_len16
=
1393 htonl(FW_CAPS_CONFIG_CMD_CFVALID
|
1394 FW_CAPS_CONFIG_CMD_MEMTYPE_CF(mtype
) |
1395 FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(maddr
>> 16) |
1396 FW_LEN16(*caps_cmd
));
1398 if (csio_mb_issue(hw
, mbp
)) {
1399 csio_err(hw
, "Issue of FW_CAPS_CONFIG_CMD failed!\n");
1403 ret
= csio_mb_fw_retval(mbp
);
1404 if (ret
!= FW_SUCCESS
) {
1405 csio_dbg(hw
, "FW_CAPS_CONFIG_CMD returned %d!\n", rv
);
1410 *finiver
= ntohl(caps_cmd
->finiver
);
1412 *finicsum
= ntohl(caps_cmd
->finicsum
);
1414 *cfcsum
= ntohl(caps_cmd
->cfcsum
);
1416 /* Validate device capabilities */
1417 if (csio_hw_validate_caps(hw
, mbp
)) {
1423 * And now tell the firmware to use the configuration we just loaded.
1425 caps_cmd
->op_to_write
=
1426 htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD
) |
1429 caps_cmd
->cfvalid_to_len16
= htonl(FW_LEN16(*caps_cmd
));
1431 if (csio_mb_issue(hw
, mbp
)) {
1432 csio_err(hw
, "Issue of FW_CAPS_CONFIG_CMD failed!\n");
1436 ret
= csio_mb_fw_retval(mbp
);
1437 if (ret
!= FW_SUCCESS
) {
1438 csio_dbg(hw
, "FW_CAPS_CONFIG_CMD returned %d!\n", rv
);
1444 mempool_free(mbp
, hw
->mb_mempool
);
1449 * csio_get_device_params - Get device parameters.
1454 csio_get_device_params(struct csio_hw
*hw
)
1456 struct csio_wrm
*wrm
= csio_hw_to_wrm(hw
);
1457 struct csio_mb
*mbp
;
1458 enum fw_retval retval
;
1462 /* Initialize portids to -1 */
1463 for (i
= 0; i
< CSIO_MAX_PPORTS
; i
++)
1464 hw
->pport
[i
].portid
= -1;
1466 mbp
= mempool_alloc(hw
->mb_mempool
, GFP_ATOMIC
);
1468 CSIO_INC_STATS(hw
, n_err_nomem
);
1472 /* Get port vec information. */
1473 param
[0] = FW_PARAM_DEV(PORTVEC
);
1475 /* Get Core clock. */
1476 param
[1] = FW_PARAM_DEV(CCLK
);
1478 /* Get EQ id start and end. */
1479 param
[2] = FW_PARAM_PFVF(EQ_START
);
1480 param
[3] = FW_PARAM_PFVF(EQ_END
);
1482 /* Get IQ id start and end. */
1483 param
[4] = FW_PARAM_PFVF(IQFLINT_START
);
1484 param
[5] = FW_PARAM_PFVF(IQFLINT_END
);
1486 csio_mb_params(hw
, mbp
, CSIO_MB_DEFAULT_TMO
, hw
->pfn
, 0,
1487 ARRAY_SIZE(param
), param
, NULL
, false, NULL
);
1488 if (csio_mb_issue(hw
, mbp
)) {
1489 csio_err(hw
, "Issue of FW_PARAMS_CMD(read) failed!\n");
1490 mempool_free(mbp
, hw
->mb_mempool
);
1494 csio_mb_process_read_params_rsp(hw
, mbp
, &retval
,
1495 ARRAY_SIZE(param
), param
);
1496 if (retval
!= FW_SUCCESS
) {
1497 csio_err(hw
, "FW_PARAMS_CMD(read) failed with ret:0x%x!\n",
1499 mempool_free(mbp
, hw
->mb_mempool
);
1503 /* cache the information. */
1504 hw
->port_vec
= param
[0];
1505 hw
->vpd
.cclk
= param
[1];
1506 wrm
->fw_eq_start
= param
[2];
1507 wrm
->fw_iq_start
= param
[4];
1509 /* Using FW configured max iqs & eqs */
1510 if ((hw
->flags
& CSIO_HWF_USING_SOFT_PARAMS
) ||
1511 !csio_is_hw_master(hw
)) {
1512 hw
->cfg_niq
= param
[5] - param
[4] + 1;
1513 hw
->cfg_neq
= param
[3] - param
[2] + 1;
1514 csio_dbg(hw
, "Using fwconfig max niqs %d neqs %d\n",
1515 hw
->cfg_niq
, hw
->cfg_neq
);
1518 hw
->port_vec
&= csio_port_mask
;
1520 hw
->num_pports
= hweight32(hw
->port_vec
);
1522 csio_dbg(hw
, "Port vector: 0x%x, #ports: %d\n",
1523 hw
->port_vec
, hw
->num_pports
);
1525 for (i
= 0; i
< hw
->num_pports
; i
++) {
1526 while ((hw
->port_vec
& (1 << j
)) == 0)
1528 hw
->pport
[i
].portid
= j
++;
1529 csio_dbg(hw
, "Found Port:%d\n", hw
->pport
[i
].portid
);
1531 mempool_free(mbp
, hw
->mb_mempool
);
1538 * csio_config_device_caps - Get and set device capabilities.
1543 csio_config_device_caps(struct csio_hw
*hw
)
1545 struct csio_mb
*mbp
;
1546 enum fw_retval retval
;
1549 mbp
= mempool_alloc(hw
->mb_mempool
, GFP_ATOMIC
);
1551 CSIO_INC_STATS(hw
, n_err_nomem
);
1555 /* Get device capabilities */
1556 csio_mb_caps_config(hw
, mbp
, CSIO_MB_DEFAULT_TMO
, 0, 0, 0, 0, NULL
);
1558 if (csio_mb_issue(hw
, mbp
)) {
1559 csio_err(hw
, "Issue of FW_CAPS_CONFIG_CMD(r) failed!\n");
1563 retval
= csio_mb_fw_retval(mbp
);
1564 if (retval
!= FW_SUCCESS
) {
1565 csio_err(hw
, "FW_CAPS_CONFIG_CMD(r) returned %d!\n", retval
);
1569 /* Validate device capabilities */
1570 if (csio_hw_validate_caps(hw
, mbp
))
1573 /* Don't config device capabilities if already configured */
1574 if (hw
->fw_state
== CSIO_DEV_STATE_INIT
) {
1579 /* Write back desired device capabilities */
1580 csio_mb_caps_config(hw
, mbp
, CSIO_MB_DEFAULT_TMO
, true, true,
1583 if (csio_mb_issue(hw
, mbp
)) {
1584 csio_err(hw
, "Issue of FW_CAPS_CONFIG_CMD(w) failed!\n");
1588 retval
= csio_mb_fw_retval(mbp
);
1589 if (retval
!= FW_SUCCESS
) {
1590 csio_err(hw
, "FW_CAPS_CONFIG_CMD(w) returned %d!\n", retval
);
1596 mempool_free(mbp
, hw
->mb_mempool
);
1601 csio_config_global_rss(struct csio_hw
*hw
)
1603 struct csio_mb
*mbp
;
1604 enum fw_retval retval
;
1606 mbp
= mempool_alloc(hw
->mb_mempool
, GFP_ATOMIC
);
1608 CSIO_INC_STATS(hw
, n_err_nomem
);
1612 csio_rss_glb_config(hw
, mbp
, CSIO_MB_DEFAULT_TMO
,
1613 FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL
,
1614 FW_RSS_GLB_CONFIG_CMD_TNLMAPEN
|
1615 FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ
|
1616 FW_RSS_GLB_CONFIG_CMD_TNLALLLKP
,
1619 if (csio_mb_issue(hw
, mbp
)) {
1620 csio_err(hw
, "Issue of FW_RSS_GLB_CONFIG_CMD failed!\n");
1621 mempool_free(mbp
, hw
->mb_mempool
);
1625 retval
= csio_mb_fw_retval(mbp
);
1626 if (retval
!= FW_SUCCESS
) {
1627 csio_err(hw
, "FW_RSS_GLB_CONFIG_CMD returned 0x%x!\n", retval
);
1628 mempool_free(mbp
, hw
->mb_mempool
);
1632 mempool_free(mbp
, hw
->mb_mempool
);
1638 * csio_config_pfvf - Configure Physical/Virtual functions settings.
1643 csio_config_pfvf(struct csio_hw
*hw
)
1645 struct csio_mb
*mbp
;
1646 enum fw_retval retval
;
1648 mbp
= mempool_alloc(hw
->mb_mempool
, GFP_ATOMIC
);
1650 CSIO_INC_STATS(hw
, n_err_nomem
);
1655 * For now, allow all PFs to access to all ports using a pmask
1656 * value of 0xF (M_FW_PFVF_CMD_PMASK). Once we have VFs, we will
1657 * need to provide access based on some rule.
1659 csio_mb_pfvf(hw
, mbp
, CSIO_MB_DEFAULT_TMO
, hw
->pfn
, 0, CSIO_NEQ
,
1660 CSIO_NETH_CTRL
, CSIO_NIQ_FLINT
, 0, 0, CSIO_NVI
, CSIO_CMASK
,
1661 CSIO_PMASK
, CSIO_NEXACTF
, CSIO_R_CAPS
, CSIO_WX_CAPS
, NULL
);
1663 if (csio_mb_issue(hw
, mbp
)) {
1664 csio_err(hw
, "Issue of FW_PFVF_CMD failed!\n");
1665 mempool_free(mbp
, hw
->mb_mempool
);
1669 retval
= csio_mb_fw_retval(mbp
);
1670 if (retval
!= FW_SUCCESS
) {
1671 csio_err(hw
, "FW_PFVF_CMD returned 0x%x!\n", retval
);
1672 mempool_free(mbp
, hw
->mb_mempool
);
1676 mempool_free(mbp
, hw
->mb_mempool
);
1682 * csio_enable_ports - Bring up all available ports.
1687 csio_enable_ports(struct csio_hw
*hw
)
1689 struct csio_mb
*mbp
;
1690 enum fw_retval retval
;
1694 mbp
= mempool_alloc(hw
->mb_mempool
, GFP_ATOMIC
);
1696 CSIO_INC_STATS(hw
, n_err_nomem
);
1700 for (i
= 0; i
< hw
->num_pports
; i
++) {
1701 portid
= hw
->pport
[i
].portid
;
1703 /* Read PORT information */
1704 csio_mb_port(hw
, mbp
, CSIO_MB_DEFAULT_TMO
, portid
,
1707 if (csio_mb_issue(hw
, mbp
)) {
1708 csio_err(hw
, "failed to issue FW_PORT_CMD(r) port:%d\n",
1710 mempool_free(mbp
, hw
->mb_mempool
);
1714 csio_mb_process_read_port_rsp(hw
, mbp
, &retval
,
1715 &hw
->pport
[i
].pcap
);
1716 if (retval
!= FW_SUCCESS
) {
1717 csio_err(hw
, "FW_PORT_CMD(r) port:%d failed: 0x%x\n",
1719 mempool_free(mbp
, hw
->mb_mempool
);
1723 /* Write back PORT information */
1724 csio_mb_port(hw
, mbp
, CSIO_MB_DEFAULT_TMO
, portid
, true,
1725 (PAUSE_RX
| PAUSE_TX
), hw
->pport
[i
].pcap
, NULL
);
1727 if (csio_mb_issue(hw
, mbp
)) {
1728 csio_err(hw
, "failed to issue FW_PORT_CMD(w) port:%d\n",
1730 mempool_free(mbp
, hw
->mb_mempool
);
1734 retval
= csio_mb_fw_retval(mbp
);
1735 if (retval
!= FW_SUCCESS
) {
1736 csio_err(hw
, "FW_PORT_CMD(w) port:%d failed :0x%x\n",
1738 mempool_free(mbp
, hw
->mb_mempool
);
1742 } /* For all ports */
1744 mempool_free(mbp
, hw
->mb_mempool
);
1750 * csio_get_fcoe_resinfo - Read fcoe fw resource info.
1752 * Issued with lock held.
1755 csio_get_fcoe_resinfo(struct csio_hw
*hw
)
1757 struct csio_fcoe_res_info
*res_info
= &hw
->fres_info
;
1758 struct fw_fcoe_res_info_cmd
*rsp
;
1759 struct csio_mb
*mbp
;
1760 enum fw_retval retval
;
1762 mbp
= mempool_alloc(hw
->mb_mempool
, GFP_ATOMIC
);
1764 CSIO_INC_STATS(hw
, n_err_nomem
);
1768 /* Get FCoE FW resource information */
1769 csio_fcoe_read_res_info_init_mb(hw
, mbp
, CSIO_MB_DEFAULT_TMO
, NULL
);
1771 if (csio_mb_issue(hw
, mbp
)) {
1772 csio_err(hw
, "failed to issue FW_FCOE_RES_INFO_CMD\n");
1773 mempool_free(mbp
, hw
->mb_mempool
);
1777 rsp
= (struct fw_fcoe_res_info_cmd
*)(mbp
->mb
);
1778 retval
= FW_CMD_RETVAL_GET(ntohl(rsp
->retval_len16
));
1779 if (retval
!= FW_SUCCESS
) {
1780 csio_err(hw
, "FW_FCOE_RES_INFO_CMD failed with ret x%x\n",
1782 mempool_free(mbp
, hw
->mb_mempool
);
1786 res_info
->e_d_tov
= ntohs(rsp
->e_d_tov
);
1787 res_info
->r_a_tov_seq
= ntohs(rsp
->r_a_tov_seq
);
1788 res_info
->r_a_tov_els
= ntohs(rsp
->r_a_tov_els
);
1789 res_info
->r_r_tov
= ntohs(rsp
->r_r_tov
);
1790 res_info
->max_xchgs
= ntohl(rsp
->max_xchgs
);
1791 res_info
->max_ssns
= ntohl(rsp
->max_ssns
);
1792 res_info
->used_xchgs
= ntohl(rsp
->used_xchgs
);
1793 res_info
->used_ssns
= ntohl(rsp
->used_ssns
);
1794 res_info
->max_fcfs
= ntohl(rsp
->max_fcfs
);
1795 res_info
->max_vnps
= ntohl(rsp
->max_vnps
);
1796 res_info
->used_fcfs
= ntohl(rsp
->used_fcfs
);
1797 res_info
->used_vnps
= ntohl(rsp
->used_vnps
);
1799 csio_dbg(hw
, "max ssns:%d max xchgs:%d\n", res_info
->max_ssns
,
1800 res_info
->max_xchgs
);
1801 mempool_free(mbp
, hw
->mb_mempool
);
1807 csio_hw_check_fwconfig(struct csio_hw
*hw
, u32
*param
)
1809 struct csio_mb
*mbp
;
1810 enum fw_retval retval
;
1813 mbp
= mempool_alloc(hw
->mb_mempool
, GFP_ATOMIC
);
1815 CSIO_INC_STATS(hw
, n_err_nomem
);
1820 * Find out whether we're dealing with a version of
1821 * the firmware which has configuration file support.
1823 _param
[0] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV
) |
1824 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_CF
));
1826 csio_mb_params(hw
, mbp
, CSIO_MB_DEFAULT_TMO
, hw
->pfn
, 0,
1827 ARRAY_SIZE(_param
), _param
, NULL
, false, NULL
);
1828 if (csio_mb_issue(hw
, mbp
)) {
1829 csio_err(hw
, "Issue of FW_PARAMS_CMD(read) failed!\n");
1830 mempool_free(mbp
, hw
->mb_mempool
);
1834 csio_mb_process_read_params_rsp(hw
, mbp
, &retval
,
1835 ARRAY_SIZE(_param
), _param
);
1836 if (retval
!= FW_SUCCESS
) {
1837 csio_err(hw
, "FW_PARAMS_CMD(read) failed with ret:0x%x!\n",
1839 mempool_free(mbp
, hw
->mb_mempool
);
1843 mempool_free(mbp
, hw
->mb_mempool
);
1850 csio_hw_flash_config(struct csio_hw
*hw
, u32
*fw_cfg_param
, char *path
)
1853 const struct firmware
*cf
;
1854 struct pci_dev
*pci_dev
= hw
->pdev
;
1855 struct device
*dev
= &pci_dev
->dev
;
1856 unsigned int mtype
= 0, maddr
= 0;
1858 int value_to_add
= 0;
1860 if (request_firmware(&cf
, CSIO_CF_FNAME(hw
), dev
) < 0) {
1861 csio_err(hw
, "could not find config file %s, err: %d\n",
1862 CSIO_CF_FNAME(hw
), ret
);
1866 if (cf
->size
%4 != 0)
1867 value_to_add
= 4 - (cf
->size
% 4);
1869 cfg_data
= kzalloc(cf
->size
+value_to_add
, GFP_KERNEL
);
1870 if (cfg_data
== NULL
) {
1875 memcpy((void *)cfg_data
, (const void *)cf
->data
, cf
->size
);
1876 if (csio_hw_check_fwconfig(hw
, fw_cfg_param
) != 0) {
1881 mtype
= FW_PARAMS_PARAM_Y_GET(*fw_cfg_param
);
1882 maddr
= FW_PARAMS_PARAM_Z_GET(*fw_cfg_param
) << 16;
1884 ret
= csio_memory_write(hw
, mtype
, maddr
,
1885 cf
->size
+ value_to_add
, cfg_data
);
1887 if ((ret
== 0) && (value_to_add
!= 0)) {
1892 size_t size
= cf
->size
& ~0x3;
1895 last
.word
= cfg_data
[size
>> 2];
1896 for (i
= value_to_add
; i
< 4; i
++)
1898 ret
= csio_memory_write(hw
, mtype
, maddr
+ size
, 4, &last
.word
);
1901 csio_info(hw
, "config file upgraded to %s\n",
1903 snprintf(path
, 64, "%s%s", "/lib/firmware/", CSIO_CF_FNAME(hw
));
1908 release_firmware(cf
);
1913 * HW initialization: contact FW, obtain config, perform basic init.
1915 * If the firmware we're dealing with has Configuration File support, then
1916 * we use that to perform all configuration -- either using the configuration
1917 * file stored in flash on the adapter or using a filesystem-local file
1920 * If we don't have configuration file support in the firmware, then we'll
1921 * have to set things up the old fashioned way with hard-coded register
1922 * writes and firmware commands ...
1926 * Attempt to initialize the HW via a Firmware Configuration File.
1929 csio_hw_use_fwconfig(struct csio_hw
*hw
, int reset
, u32
*fw_cfg_param
)
1931 unsigned int mtype
, maddr
;
1933 uint32_t finiver
= 0, finicsum
= 0, cfcsum
= 0;
1938 * Reset device if necessary
1941 rv
= csio_do_reset(hw
, true);
1947 * If we have a configuration file in host ,
1948 * then use that. Otherwise, use the configuration file stored
1949 * in the HW flash ...
1951 spin_unlock_irq(&hw
->lock
);
1952 rv
= csio_hw_flash_config(hw
, fw_cfg_param
, path
);
1953 spin_lock_irq(&hw
->lock
);
1955 if (rv
== -ENOENT
) {
1957 * config file was not found. Use default
1958 * config file from flash.
1960 mtype
= FW_MEMTYPE_CF_FLASH
;
1961 maddr
= hw
->chip_ops
->chip_flash_cfg_addr(hw
);
1965 * we revert back to the hardwired config if
1971 mtype
= FW_PARAMS_PARAM_Y_GET(*fw_cfg_param
);
1972 maddr
= FW_PARAMS_PARAM_Z_GET(*fw_cfg_param
) << 16;
1976 hw
->cfg_store
= (uint8_t)mtype
;
1979 * Issue a Capability Configuration command to the firmware to get it
1980 * to parse the Configuration File.
1982 rv
= csio_hw_fw_config_file(hw
, mtype
, maddr
, &finiver
,
1983 &finicsum
, &cfcsum
);
1987 hw
->cfg_finiver
= finiver
;
1988 hw
->cfg_finicsum
= finicsum
;
1989 hw
->cfg_cfcsum
= cfcsum
;
1990 hw
->cfg_csum_status
= true;
1992 if (finicsum
!= cfcsum
) {
1994 "Config File checksum mismatch: csum=%#x, computed=%#x\n",
1997 hw
->cfg_csum_status
= false;
2001 * Note that we're operating with parameters
2002 * not supplied by the driver, rather than from hard-wired
2003 * initialization constants buried in the driver.
2005 hw
->flags
|= CSIO_HWF_USING_SOFT_PARAMS
;
2007 /* device parameters */
2008 rv
= csio_get_device_params(hw
);
2013 csio_wr_sge_init(hw
);
2016 * And finally tell the firmware to initialize itself using the
2017 * parameters from the Configuration File.
2019 /* Post event to notify completion of configuration */
2020 csio_post_event(&hw
->sm
, CSIO_HWE_INIT
);
2023 "Firmware Configuration File %s, version %#x, computed checksum %#x\n",
2024 (using_flash
? "in device FLASH" : path
), finiver
, cfcsum
);
2029 * Something bad happened. Return the error ...
2032 hw
->flags
&= ~CSIO_HWF_USING_SOFT_PARAMS
;
2033 csio_dbg(hw
, "Configuration file error %d\n", rv
);
2038 * Attempt to initialize the adapter via hard-coded, driver supplied
2042 csio_hw_no_fwconfig(struct csio_hw
*hw
, int reset
)
2046 * Reset device if necessary
2049 rv
= csio_do_reset(hw
, true);
2054 /* Get and set device capabilities */
2055 rv
= csio_config_device_caps(hw
);
2059 /* Config Global RSS command */
2060 rv
= csio_config_global_rss(hw
);
2064 /* Configure PF/VF capabilities of device */
2065 rv
= csio_config_pfvf(hw
);
2069 /* device parameters */
2070 rv
= csio_get_device_params(hw
);
2075 csio_wr_sge_init(hw
);
2077 /* Post event to notify completion of configuration */
2078 csio_post_event(&hw
->sm
, CSIO_HWE_INIT
);
2085 * Returns -EINVAL if attempts to flash the firmware failed
2087 * if flashing was not attempted because the card had the
2088 * latest firmware ECANCELED is returned
2091 csio_hw_flash_fw(struct csio_hw
*hw
)
2093 int ret
= -ECANCELED
;
2094 const struct firmware
*fw
;
2095 const struct fw_hdr
*hdr
;
2097 struct pci_dev
*pci_dev
= hw
->pdev
;
2098 struct device
*dev
= &pci_dev
->dev
;
2100 if (request_firmware(&fw
, CSIO_FW_FNAME(hw
), dev
) < 0) {
2101 csio_err(hw
, "could not find firmware image %s, err: %d\n",
2102 CSIO_FW_FNAME(hw
), ret
);
2106 hdr
= (const struct fw_hdr
*)fw
->data
;
2107 fw_ver
= ntohl(hdr
->fw_ver
);
2108 if (FW_HDR_FW_VER_MAJOR_GET(fw_ver
) != FW_VERSION_MAJOR(hw
))
2109 return -EINVAL
; /* wrong major version, won't do */
2112 * If the flash FW is unusable or we found something newer, load it.
2114 if (FW_HDR_FW_VER_MAJOR_GET(hw
->fwrev
) != FW_VERSION_MAJOR(hw
) ||
2115 fw_ver
> hw
->fwrev
) {
2116 ret
= csio_hw_fw_upgrade(hw
, hw
->pfn
, fw
->data
, fw
->size
,
2120 "firmware upgraded to version %pI4 from %s\n",
2121 &hdr
->fw_ver
, CSIO_FW_FNAME(hw
));
2123 csio_err(hw
, "firmware upgrade failed! err=%d\n", ret
);
2127 release_firmware(fw
);
2134 * csio_hw_configure - Configure HW
2139 csio_hw_configure(struct csio_hw
*hw
)
2145 rv
= csio_hw_dev_ready(hw
);
2147 CSIO_INC_STATS(hw
, n_err_fatal
);
2148 csio_post_event(&hw
->sm
, CSIO_HWE_FATAL
);
2153 hw
->chip_ver
= (char)csio_rd_reg32(hw
, PL_REV
);
2155 /* Needed for FW download */
2156 rv
= csio_hw_get_flash_params(hw
);
2158 csio_err(hw
, "Failed to get serial flash params rv:%d\n", rv
);
2159 csio_post_event(&hw
->sm
, CSIO_HWE_FATAL
);
2163 /* Set pci completion timeout value to 4 seconds. */
2164 csio_set_pcie_completion_timeout(hw
, 0xd);
2166 hw
->chip_ops
->chip_set_mem_win(hw
, MEMWIN_CSIOSTOR
);
2168 rv
= csio_hw_get_fw_version(hw
, &hw
->fwrev
);
2172 csio_hw_print_fw_version(hw
, "Firmware revision");
2174 rv
= csio_do_hello(hw
, &hw
->fw_state
);
2176 CSIO_INC_STATS(hw
, n_err_fatal
);
2177 csio_post_event(&hw
->sm
, CSIO_HWE_FATAL
);
2182 rv
= csio_hw_get_vpd_params(hw
, &hw
->vpd
);
2186 if (csio_is_hw_master(hw
) && hw
->fw_state
!= CSIO_DEV_STATE_INIT
) {
2187 rv
= csio_hw_check_fw_version(hw
);
2188 if (rv
== -EINVAL
) {
2190 /* Do firmware update */
2191 spin_unlock_irq(&hw
->lock
);
2192 rv
= csio_hw_flash_fw(hw
);
2193 spin_lock_irq(&hw
->lock
);
2198 * Note that the chip was reset as part of the
2199 * firmware upgrade so we don't reset it again
2200 * below and grab the new firmware version.
2202 rv
= csio_hw_check_fw_version(hw
);
2206 * If the firmware doesn't support Configuration
2207 * Files, use the old Driver-based, hard-wired
2208 * initialization. Otherwise, try using the
2209 * Configuration File support and fall back to the
2210 * Driver-based initialization if there's no
2211 * Configuration File found.
2213 if (csio_hw_check_fwconfig(hw
, param
) == 0) {
2214 rv
= csio_hw_use_fwconfig(hw
, reset
, param
);
2219 "No Configuration File present "
2220 "on adapter. Using hard-wired "
2221 "configuration parameters.\n");
2222 rv
= csio_hw_no_fwconfig(hw
, reset
);
2225 rv
= csio_hw_no_fwconfig(hw
, reset
);
2232 if (hw
->fw_state
== CSIO_DEV_STATE_INIT
) {
2234 hw
->flags
|= CSIO_HWF_USING_SOFT_PARAMS
;
2236 /* device parameters */
2237 rv
= csio_get_device_params(hw
);
2241 /* Get device capabilities */
2242 rv
= csio_config_device_caps(hw
);
2247 csio_wr_sge_init(hw
);
2249 /* Post event to notify completion of configuration */
2250 csio_post_event(&hw
->sm
, CSIO_HWE_INIT
);
2253 } /* if not master */
2260 * csio_hw_initialize - Initialize HW
2265 csio_hw_initialize(struct csio_hw
*hw
)
2267 struct csio_mb
*mbp
;
2268 enum fw_retval retval
;
2272 if (csio_is_hw_master(hw
) && hw
->fw_state
!= CSIO_DEV_STATE_INIT
) {
2273 mbp
= mempool_alloc(hw
->mb_mempool
, GFP_ATOMIC
);
2277 csio_mb_initialize(hw
, mbp
, CSIO_MB_DEFAULT_TMO
, NULL
);
2279 if (csio_mb_issue(hw
, mbp
)) {
2280 csio_err(hw
, "Issue of FW_INITIALIZE_CMD failed!\n");
2284 retval
= csio_mb_fw_retval(mbp
);
2285 if (retval
!= FW_SUCCESS
) {
2286 csio_err(hw
, "FW_INITIALIZE_CMD returned 0x%x!\n",
2291 mempool_free(mbp
, hw
->mb_mempool
);
2294 rv
= csio_get_fcoe_resinfo(hw
);
2296 csio_err(hw
, "Failed to read fcoe resource info: %d\n", rv
);
2300 spin_unlock_irq(&hw
->lock
);
2301 rv
= csio_config_queues(hw
);
2302 spin_lock_irq(&hw
->lock
);
2305 csio_err(hw
, "Config of queues failed!: %d\n", rv
);
2309 for (i
= 0; i
< hw
->num_pports
; i
++)
2310 hw
->pport
[i
].mod_type
= FW_PORT_MOD_TYPE_NA
;
2312 if (csio_is_hw_master(hw
) && hw
->fw_state
!= CSIO_DEV_STATE_INIT
) {
2313 rv
= csio_enable_ports(hw
);
2315 csio_err(hw
, "Failed to enable ports: %d\n", rv
);
2320 csio_post_event(&hw
->sm
, CSIO_HWE_INIT_DONE
);
2324 mempool_free(mbp
, hw
->mb_mempool
);
2329 #define PF_INTR_MASK (PFSW | PFCIM)
2332 * csio_hw_intr_enable - Enable HW interrupts
2333 * @hw: Pointer to HW module.
2335 * Enable interrupts in HW registers.
2338 csio_hw_intr_enable(struct csio_hw
*hw
)
2340 uint16_t vec
= (uint16_t)csio_get_mb_intr_idx(csio_hw_to_mbm(hw
));
2341 uint32_t pf
= SOURCEPF_GET(csio_rd_reg32(hw
, PL_WHOAMI
));
2342 uint32_t pl
= csio_rd_reg32(hw
, PL_INT_ENABLE
);
2345 * Set aivec for MSI/MSIX. PCIE_PF_CFG.INTXType is set up
2346 * by FW, so do nothing for INTX.
2348 if (hw
->intr_mode
== CSIO_IM_MSIX
)
2349 csio_set_reg_field(hw
, MYPF_REG(PCIE_PF_CFG
),
2350 AIVEC(AIVEC_MASK
), vec
);
2351 else if (hw
->intr_mode
== CSIO_IM_MSI
)
2352 csio_set_reg_field(hw
, MYPF_REG(PCIE_PF_CFG
),
2353 AIVEC(AIVEC_MASK
), 0);
2355 csio_wr_reg32(hw
, PF_INTR_MASK
, MYPF_REG(PL_PF_INT_ENABLE
));
2357 /* Turn on MB interrupts - this will internally flush PIO as well */
2358 csio_mb_intr_enable(hw
);
2360 /* These are common registers - only a master can modify them */
2361 if (csio_is_hw_master(hw
)) {
2363 * Disable the Serial FLASH interrupt, if enabled!
2366 csio_wr_reg32(hw
, pl
, PL_INT_ENABLE
);
2368 csio_wr_reg32(hw
, ERR_CPL_EXCEED_IQE_SIZE
|
2369 EGRESS_SIZE_ERR
| ERR_INVALID_CIDX_INC
|
2370 ERR_CPL_OPCODE_0
| ERR_DROPPED_DB
|
2371 ERR_DATA_CPL_ON_HIGH_QID1
|
2372 ERR_DATA_CPL_ON_HIGH_QID0
| ERR_BAD_DB_PIDX3
|
2373 ERR_BAD_DB_PIDX2
| ERR_BAD_DB_PIDX1
|
2374 ERR_BAD_DB_PIDX0
| ERR_ING_CTXT_PRIO
|
2375 ERR_EGR_CTXT_PRIO
| INGRESS_SIZE_ERR
,
2377 csio_set_reg_field(hw
, PL_INT_MAP0
, 0, 1 << pf
);
2380 hw
->flags
|= CSIO_HWF_HW_INTR_ENABLED
;
2385 * csio_hw_intr_disable - Disable HW interrupts
2386 * @hw: Pointer to HW module.
2388 * Turn off Mailbox and PCI_PF_CFG interrupts.
2391 csio_hw_intr_disable(struct csio_hw
*hw
)
2393 uint32_t pf
= SOURCEPF_GET(csio_rd_reg32(hw
, PL_WHOAMI
));
2395 if (!(hw
->flags
& CSIO_HWF_HW_INTR_ENABLED
))
2398 hw
->flags
&= ~CSIO_HWF_HW_INTR_ENABLED
;
2400 csio_wr_reg32(hw
, 0, MYPF_REG(PL_PF_INT_ENABLE
));
2401 if (csio_is_hw_master(hw
))
2402 csio_set_reg_field(hw
, PL_INT_MAP0
, 1 << pf
, 0);
2404 /* Turn off MB interrupts */
2405 csio_mb_intr_disable(hw
);
2410 csio_hw_fatal_err(struct csio_hw
*hw
)
2412 csio_set_reg_field(hw
, SGE_CONTROL
, GLOBALENABLE
, 0);
2413 csio_hw_intr_disable(hw
);
2415 /* Do not reset HW, we may need FW state for debugging */
2416 csio_fatal(hw
, "HW Fatal error encountered!\n");
2419 /*****************************************************************************/
2421 /*****************************************************************************/
2423 * csio_hws_uninit - Uninit state
2429 csio_hws_uninit(struct csio_hw
*hw
, enum csio_hw_ev evt
)
2431 hw
->prev_evt
= hw
->cur_evt
;
2433 CSIO_INC_STATS(hw
, n_evt_sm
[evt
]);
2437 csio_set_state(&hw
->sm
, csio_hws_configuring
);
2438 csio_hw_configure(hw
);
2442 CSIO_INC_STATS(hw
, n_evt_unexp
);
2448 * csio_hws_configuring - Configuring state
2454 csio_hws_configuring(struct csio_hw
*hw
, enum csio_hw_ev evt
)
2456 hw
->prev_evt
= hw
->cur_evt
;
2458 CSIO_INC_STATS(hw
, n_evt_sm
[evt
]);
2462 csio_set_state(&hw
->sm
, csio_hws_initializing
);
2463 csio_hw_initialize(hw
);
2466 case CSIO_HWE_INIT_DONE
:
2467 csio_set_state(&hw
->sm
, csio_hws_ready
);
2468 /* Fan out event to all lnode SMs */
2469 csio_notify_lnodes(hw
, CSIO_LN_NOTIFY_HWREADY
);
2472 case CSIO_HWE_FATAL
:
2473 csio_set_state(&hw
->sm
, csio_hws_uninit
);
2476 case CSIO_HWE_PCI_REMOVE
:
2480 CSIO_INC_STATS(hw
, n_evt_unexp
);
2486 * csio_hws_initializing - Initialiazing state
2492 csio_hws_initializing(struct csio_hw
*hw
, enum csio_hw_ev evt
)
2494 hw
->prev_evt
= hw
->cur_evt
;
2496 CSIO_INC_STATS(hw
, n_evt_sm
[evt
]);
2499 case CSIO_HWE_INIT_DONE
:
2500 csio_set_state(&hw
->sm
, csio_hws_ready
);
2502 /* Fan out event to all lnode SMs */
2503 csio_notify_lnodes(hw
, CSIO_LN_NOTIFY_HWREADY
);
2505 /* Enable interrupts */
2506 csio_hw_intr_enable(hw
);
2509 case CSIO_HWE_FATAL
:
2510 csio_set_state(&hw
->sm
, csio_hws_uninit
);
2513 case CSIO_HWE_PCI_REMOVE
:
2518 CSIO_INC_STATS(hw
, n_evt_unexp
);
2524 * csio_hws_ready - Ready state
2530 csio_hws_ready(struct csio_hw
*hw
, enum csio_hw_ev evt
)
2532 /* Remember the event */
2535 hw
->prev_evt
= hw
->cur_evt
;
2537 CSIO_INC_STATS(hw
, n_evt_sm
[evt
]);
2540 case CSIO_HWE_HBA_RESET
:
2541 case CSIO_HWE_FW_DLOAD
:
2542 case CSIO_HWE_SUSPEND
:
2543 case CSIO_HWE_PCI_REMOVE
:
2544 case CSIO_HWE_PCIERR_DETECTED
:
2545 csio_set_state(&hw
->sm
, csio_hws_quiescing
);
2546 /* cleanup all outstanding cmds */
2547 if (evt
== CSIO_HWE_HBA_RESET
||
2548 evt
== CSIO_HWE_PCIERR_DETECTED
)
2549 csio_scsim_cleanup_io(csio_hw_to_scsim(hw
), false);
2551 csio_scsim_cleanup_io(csio_hw_to_scsim(hw
), true);
2553 csio_hw_intr_disable(hw
);
2554 csio_hw_mbm_cleanup(hw
);
2556 csio_notify_lnodes(hw
, CSIO_LN_NOTIFY_HWSTOP
);
2557 csio_evtq_flush(hw
);
2558 csio_mgmtm_cleanup(csio_hw_to_mgmtm(hw
));
2559 csio_post_event(&hw
->sm
, CSIO_HWE_QUIESCED
);
2562 case CSIO_HWE_FATAL
:
2563 csio_set_state(&hw
->sm
, csio_hws_uninit
);
2567 CSIO_INC_STATS(hw
, n_evt_unexp
);
2573 * csio_hws_quiescing - Quiescing state
2579 csio_hws_quiescing(struct csio_hw
*hw
, enum csio_hw_ev evt
)
2581 hw
->prev_evt
= hw
->cur_evt
;
2583 CSIO_INC_STATS(hw
, n_evt_sm
[evt
]);
2586 case CSIO_HWE_QUIESCED
:
2587 switch (hw
->evtflag
) {
2588 case CSIO_HWE_FW_DLOAD
:
2589 csio_set_state(&hw
->sm
, csio_hws_resetting
);
2590 /* Download firmware */
2593 case CSIO_HWE_HBA_RESET
:
2594 csio_set_state(&hw
->sm
, csio_hws_resetting
);
2595 /* Start reset of the HBA */
2596 csio_notify_lnodes(hw
, CSIO_LN_NOTIFY_HWRESET
);
2597 csio_wr_destroy_queues(hw
, false);
2598 csio_do_reset(hw
, false);
2599 csio_post_event(&hw
->sm
, CSIO_HWE_HBA_RESET_DONE
);
2602 case CSIO_HWE_PCI_REMOVE
:
2603 csio_set_state(&hw
->sm
, csio_hws_removing
);
2604 csio_notify_lnodes(hw
, CSIO_LN_NOTIFY_HWREMOVE
);
2605 csio_wr_destroy_queues(hw
, true);
2606 /* Now send the bye command */
2610 case CSIO_HWE_SUSPEND
:
2611 csio_set_state(&hw
->sm
, csio_hws_quiesced
);
2614 case CSIO_HWE_PCIERR_DETECTED
:
2615 csio_set_state(&hw
->sm
, csio_hws_pcierr
);
2616 csio_wr_destroy_queues(hw
, false);
2620 CSIO_INC_STATS(hw
, n_evt_unexp
);
2627 CSIO_INC_STATS(hw
, n_evt_unexp
);
2633 * csio_hws_quiesced - Quiesced state
2639 csio_hws_quiesced(struct csio_hw
*hw
, enum csio_hw_ev evt
)
2641 hw
->prev_evt
= hw
->cur_evt
;
2643 CSIO_INC_STATS(hw
, n_evt_sm
[evt
]);
2646 case CSIO_HWE_RESUME
:
2647 csio_set_state(&hw
->sm
, csio_hws_configuring
);
2648 csio_hw_configure(hw
);
2652 CSIO_INC_STATS(hw
, n_evt_unexp
);
2658 * csio_hws_resetting - HW Resetting state
2664 csio_hws_resetting(struct csio_hw
*hw
, enum csio_hw_ev evt
)
2666 hw
->prev_evt
= hw
->cur_evt
;
2668 CSIO_INC_STATS(hw
, n_evt_sm
[evt
]);
2671 case CSIO_HWE_HBA_RESET_DONE
:
2672 csio_evtq_start(hw
);
2673 csio_set_state(&hw
->sm
, csio_hws_configuring
);
2674 csio_hw_configure(hw
);
2678 CSIO_INC_STATS(hw
, n_evt_unexp
);
2684 * csio_hws_removing - PCI Hotplug removing state
2690 csio_hws_removing(struct csio_hw
*hw
, enum csio_hw_ev evt
)
2692 hw
->prev_evt
= hw
->cur_evt
;
2694 CSIO_INC_STATS(hw
, n_evt_sm
[evt
]);
2697 case CSIO_HWE_HBA_RESET
:
2698 if (!csio_is_hw_master(hw
))
2701 * The BYE should have alerady been issued, so we cant
2702 * use the mailbox interface. Hence we use the PL_RST
2703 * register directly.
2705 csio_err(hw
, "Resetting HW and waiting 2 seconds...\n");
2706 csio_wr_reg32(hw
, PIORSTMODE
| PIORST
, PL_RST
);
2710 /* Should never receive any new events */
2712 CSIO_INC_STATS(hw
, n_evt_unexp
);
2719 * csio_hws_pcierr - PCI Error state
2725 csio_hws_pcierr(struct csio_hw
*hw
, enum csio_hw_ev evt
)
2727 hw
->prev_evt
= hw
->cur_evt
;
2729 CSIO_INC_STATS(hw
, n_evt_sm
[evt
]);
2732 case CSIO_HWE_PCIERR_SLOT_RESET
:
2733 csio_evtq_start(hw
);
2734 csio_set_state(&hw
->sm
, csio_hws_configuring
);
2735 csio_hw_configure(hw
);
2739 CSIO_INC_STATS(hw
, n_evt_unexp
);
2744 /*****************************************************************************/
2746 /*****************************************************************************/
2749 * csio_handle_intr_status - table driven interrupt handler
2751 * @reg: the interrupt status register to process
2752 * @acts: table of interrupt actions
2754 * A table driven interrupt handler that applies a set of masks to an
2755 * interrupt status word and performs the corresponding actions if the
2756 * interrupts described by the mask have occured. The actions include
2757 * optionally emitting a warning or alert message. The table is terminated
2758 * by an entry specifying mask 0. Returns the number of fatal interrupt
2762 csio_handle_intr_status(struct csio_hw
*hw
, unsigned int reg
,
2763 const struct intr_info
*acts
)
2766 unsigned int mask
= 0;
2767 unsigned int status
= csio_rd_reg32(hw
, reg
);
2769 for ( ; acts
->mask
; ++acts
) {
2770 if (!(status
& acts
->mask
))
2774 csio_fatal(hw
, "Fatal %s (0x%x)\n",
2775 acts
->msg
, status
& acts
->mask
);
2776 } else if (acts
->msg
)
2777 csio_info(hw
, "%s (0x%x)\n",
2778 acts
->msg
, status
& acts
->mask
);
2782 if (status
) /* clear processed interrupts */
2783 csio_wr_reg32(hw
, status
, reg
);
2788 * TP interrupt handler.
2790 static void csio_tp_intr_handler(struct csio_hw
*hw
)
2792 static struct intr_info tp_intr_info
[] = {
2793 { 0x3fffffff, "TP parity error", -1, 1 },
2794 { FLMTXFLSTEMPTY
, "TP out of Tx pages", -1, 1 },
2798 if (csio_handle_intr_status(hw
, TP_INT_CAUSE
, tp_intr_info
))
2799 csio_hw_fatal_err(hw
);
2803 * SGE interrupt handler.
2805 static void csio_sge_intr_handler(struct csio_hw
*hw
)
2809 static struct intr_info sge_intr_info
[] = {
2810 { ERR_CPL_EXCEED_IQE_SIZE
,
2811 "SGE received CPL exceeding IQE size", -1, 1 },
2812 { ERR_INVALID_CIDX_INC
,
2813 "SGE GTS CIDX increment too large", -1, 0 },
2814 { ERR_CPL_OPCODE_0
, "SGE received 0-length CPL", -1, 0 },
2815 { ERR_DROPPED_DB
, "SGE doorbell dropped", -1, 0 },
2816 { ERR_DATA_CPL_ON_HIGH_QID1
| ERR_DATA_CPL_ON_HIGH_QID0
,
2817 "SGE IQID > 1023 received CPL for FL", -1, 0 },
2818 { ERR_BAD_DB_PIDX3
, "SGE DBP 3 pidx increment too large", -1,
2820 { ERR_BAD_DB_PIDX2
, "SGE DBP 2 pidx increment too large", -1,
2822 { ERR_BAD_DB_PIDX1
, "SGE DBP 1 pidx increment too large", -1,
2824 { ERR_BAD_DB_PIDX0
, "SGE DBP 0 pidx increment too large", -1,
2826 { ERR_ING_CTXT_PRIO
,
2827 "SGE too many priority ingress contexts", -1, 0 },
2828 { ERR_EGR_CTXT_PRIO
,
2829 "SGE too many priority egress contexts", -1, 0 },
2830 { INGRESS_SIZE_ERR
, "SGE illegal ingress QID", -1, 0 },
2831 { EGRESS_SIZE_ERR
, "SGE illegal egress QID", -1, 0 },
2835 v
= (uint64_t)csio_rd_reg32(hw
, SGE_INT_CAUSE1
) |
2836 ((uint64_t)csio_rd_reg32(hw
, SGE_INT_CAUSE2
) << 32);
2838 csio_fatal(hw
, "SGE parity error (%#llx)\n",
2839 (unsigned long long)v
);
2840 csio_wr_reg32(hw
, (uint32_t)(v
& 0xFFFFFFFF),
2842 csio_wr_reg32(hw
, (uint32_t)(v
>> 32), SGE_INT_CAUSE2
);
2845 v
|= csio_handle_intr_status(hw
, SGE_INT_CAUSE3
, sge_intr_info
);
2847 if (csio_handle_intr_status(hw
, SGE_INT_CAUSE3
, sge_intr_info
) ||
2849 csio_hw_fatal_err(hw
);
2852 #define CIM_OBQ_INTR (OBQULP0PARERR | OBQULP1PARERR | OBQULP2PARERR |\
2853 OBQULP3PARERR | OBQSGEPARERR | OBQNCSIPARERR)
2854 #define CIM_IBQ_INTR (IBQTP0PARERR | IBQTP1PARERR | IBQULPPARERR |\
2855 IBQSGEHIPARERR | IBQSGELOPARERR | IBQNCSIPARERR)
2858 * CIM interrupt handler.
2860 static void csio_cim_intr_handler(struct csio_hw
*hw
)
2862 static struct intr_info cim_intr_info
[] = {
2863 { PREFDROPINT
, "CIM control register prefetch drop", -1, 1 },
2864 { CIM_OBQ_INTR
, "CIM OBQ parity error", -1, 1 },
2865 { CIM_IBQ_INTR
, "CIM IBQ parity error", -1, 1 },
2866 { MBUPPARERR
, "CIM mailbox uP parity error", -1, 1 },
2867 { MBHOSTPARERR
, "CIM mailbox host parity error", -1, 1 },
2868 { TIEQINPARERRINT
, "CIM TIEQ outgoing parity error", -1, 1 },
2869 { TIEQOUTPARERRINT
, "CIM TIEQ incoming parity error", -1, 1 },
2872 static struct intr_info cim_upintr_info
[] = {
2873 { RSVDSPACEINT
, "CIM reserved space access", -1, 1 },
2874 { ILLTRANSINT
, "CIM illegal transaction", -1, 1 },
2875 { ILLWRINT
, "CIM illegal write", -1, 1 },
2876 { ILLRDINT
, "CIM illegal read", -1, 1 },
2877 { ILLRDBEINT
, "CIM illegal read BE", -1, 1 },
2878 { ILLWRBEINT
, "CIM illegal write BE", -1, 1 },
2879 { SGLRDBOOTINT
, "CIM single read from boot space", -1, 1 },
2880 { SGLWRBOOTINT
, "CIM single write to boot space", -1, 1 },
2881 { BLKWRBOOTINT
, "CIM block write to boot space", -1, 1 },
2882 { SGLRDFLASHINT
, "CIM single read from flash space", -1, 1 },
2883 { SGLWRFLASHINT
, "CIM single write to flash space", -1, 1 },
2884 { BLKWRFLASHINT
, "CIM block write to flash space", -1, 1 },
2885 { SGLRDEEPROMINT
, "CIM single EEPROM read", -1, 1 },
2886 { SGLWREEPROMINT
, "CIM single EEPROM write", -1, 1 },
2887 { BLKRDEEPROMINT
, "CIM block EEPROM read", -1, 1 },
2888 { BLKWREEPROMINT
, "CIM block EEPROM write", -1, 1 },
2889 { SGLRDCTLINT
, "CIM single read from CTL space", -1, 1 },
2890 { SGLWRCTLINT
, "CIM single write to CTL space", -1, 1 },
2891 { BLKRDCTLINT
, "CIM block read from CTL space", -1, 1 },
2892 { BLKWRCTLINT
, "CIM block write to CTL space", -1, 1 },
2893 { SGLRDPLINT
, "CIM single read from PL space", -1, 1 },
2894 { SGLWRPLINT
, "CIM single write to PL space", -1, 1 },
2895 { BLKRDPLINT
, "CIM block read from PL space", -1, 1 },
2896 { BLKWRPLINT
, "CIM block write to PL space", -1, 1 },
2897 { REQOVRLOOKUPINT
, "CIM request FIFO overwrite", -1, 1 },
2898 { RSPOVRLOOKUPINT
, "CIM response FIFO overwrite", -1, 1 },
2899 { TIMEOUTINT
, "CIM PIF timeout", -1, 1 },
2900 { TIMEOUTMAINT
, "CIM PIF MA timeout", -1, 1 },
2906 fat
= csio_handle_intr_status(hw
, CIM_HOST_INT_CAUSE
,
2908 csio_handle_intr_status(hw
, CIM_HOST_UPACC_INT_CAUSE
,
2911 csio_hw_fatal_err(hw
);
2915 * ULP RX interrupt handler.
2917 static void csio_ulprx_intr_handler(struct csio_hw
*hw
)
2919 static struct intr_info ulprx_intr_info
[] = {
2920 { 0x1800000, "ULPRX context error", -1, 1 },
2921 { 0x7fffff, "ULPRX parity error", -1, 1 },
2925 if (csio_handle_intr_status(hw
, ULP_RX_INT_CAUSE
, ulprx_intr_info
))
2926 csio_hw_fatal_err(hw
);
2930 * ULP TX interrupt handler.
2932 static void csio_ulptx_intr_handler(struct csio_hw
*hw
)
2934 static struct intr_info ulptx_intr_info
[] = {
2935 { PBL_BOUND_ERR_CH3
, "ULPTX channel 3 PBL out of bounds", -1,
2937 { PBL_BOUND_ERR_CH2
, "ULPTX channel 2 PBL out of bounds", -1,
2939 { PBL_BOUND_ERR_CH1
, "ULPTX channel 1 PBL out of bounds", -1,
2941 { PBL_BOUND_ERR_CH0
, "ULPTX channel 0 PBL out of bounds", -1,
2943 { 0xfffffff, "ULPTX parity error", -1, 1 },
2947 if (csio_handle_intr_status(hw
, ULP_TX_INT_CAUSE
, ulptx_intr_info
))
2948 csio_hw_fatal_err(hw
);
2952 * PM TX interrupt handler.
2954 static void csio_pmtx_intr_handler(struct csio_hw
*hw
)
2956 static struct intr_info pmtx_intr_info
[] = {
2957 { PCMD_LEN_OVFL0
, "PMTX channel 0 pcmd too large", -1, 1 },
2958 { PCMD_LEN_OVFL1
, "PMTX channel 1 pcmd too large", -1, 1 },
2959 { PCMD_LEN_OVFL2
, "PMTX channel 2 pcmd too large", -1, 1 },
2960 { ZERO_C_CMD_ERROR
, "PMTX 0-length pcmd", -1, 1 },
2961 { 0xffffff0, "PMTX framing error", -1, 1 },
2962 { OESPI_PAR_ERROR
, "PMTX oespi parity error", -1, 1 },
2963 { DB_OPTIONS_PAR_ERROR
, "PMTX db_options parity error", -1,
2965 { ICSPI_PAR_ERROR
, "PMTX icspi parity error", -1, 1 },
2966 { C_PCMD_PAR_ERROR
, "PMTX c_pcmd parity error", -1, 1},
2970 if (csio_handle_intr_status(hw
, PM_TX_INT_CAUSE
, pmtx_intr_info
))
2971 csio_hw_fatal_err(hw
);
2975 * PM RX interrupt handler.
2977 static void csio_pmrx_intr_handler(struct csio_hw
*hw
)
2979 static struct intr_info pmrx_intr_info
[] = {
2980 { ZERO_E_CMD_ERROR
, "PMRX 0-length pcmd", -1, 1 },
2981 { 0x3ffff0, "PMRX framing error", -1, 1 },
2982 { OCSPI_PAR_ERROR
, "PMRX ocspi parity error", -1, 1 },
2983 { DB_OPTIONS_PAR_ERROR
, "PMRX db_options parity error", -1,
2985 { IESPI_PAR_ERROR
, "PMRX iespi parity error", -1, 1 },
2986 { E_PCMD_PAR_ERROR
, "PMRX e_pcmd parity error", -1, 1},
2990 if (csio_handle_intr_status(hw
, PM_RX_INT_CAUSE
, pmrx_intr_info
))
2991 csio_hw_fatal_err(hw
);
2995 * CPL switch interrupt handler.
2997 static void csio_cplsw_intr_handler(struct csio_hw
*hw
)
2999 static struct intr_info cplsw_intr_info
[] = {
3000 { CIM_OP_MAP_PERR
, "CPLSW CIM op_map parity error", -1, 1 },
3001 { CIM_OVFL_ERROR
, "CPLSW CIM overflow", -1, 1 },
3002 { TP_FRAMING_ERROR
, "CPLSW TP framing error", -1, 1 },
3003 { SGE_FRAMING_ERROR
, "CPLSW SGE framing error", -1, 1 },
3004 { CIM_FRAMING_ERROR
, "CPLSW CIM framing error", -1, 1 },
3005 { ZERO_SWITCH_ERROR
, "CPLSW no-switch error", -1, 1 },
3009 if (csio_handle_intr_status(hw
, CPL_INTR_CAUSE
, cplsw_intr_info
))
3010 csio_hw_fatal_err(hw
);
3014 * LE interrupt handler.
3016 static void csio_le_intr_handler(struct csio_hw
*hw
)
3018 static struct intr_info le_intr_info
[] = {
3019 { LIPMISS
, "LE LIP miss", -1, 0 },
3020 { LIP0
, "LE 0 LIP error", -1, 0 },
3021 { PARITYERR
, "LE parity error", -1, 1 },
3022 { UNKNOWNCMD
, "LE unknown command", -1, 1 },
3023 { REQQPARERR
, "LE request queue parity error", -1, 1 },
3027 if (csio_handle_intr_status(hw
, LE_DB_INT_CAUSE
, le_intr_info
))
3028 csio_hw_fatal_err(hw
);
3032 * MPS interrupt handler.
3034 static void csio_mps_intr_handler(struct csio_hw
*hw
)
3036 static struct intr_info mps_rx_intr_info
[] = {
3037 { 0xffffff, "MPS Rx parity error", -1, 1 },
3040 static struct intr_info mps_tx_intr_info
[] = {
3041 { TPFIFO
, "MPS Tx TP FIFO parity error", -1, 1 },
3042 { NCSIFIFO
, "MPS Tx NC-SI FIFO parity error", -1, 1 },
3043 { TXDATAFIFO
, "MPS Tx data FIFO parity error", -1, 1 },
3044 { TXDESCFIFO
, "MPS Tx desc FIFO parity error", -1, 1 },
3045 { BUBBLE
, "MPS Tx underflow", -1, 1 },
3046 { SECNTERR
, "MPS Tx SOP/EOP error", -1, 1 },
3047 { FRMERR
, "MPS Tx framing error", -1, 1 },
3050 static struct intr_info mps_trc_intr_info
[] = {
3051 { FILTMEM
, "MPS TRC filter parity error", -1, 1 },
3052 { PKTFIFO
, "MPS TRC packet FIFO parity error", -1, 1 },
3053 { MISCPERR
, "MPS TRC misc parity error", -1, 1 },
3056 static struct intr_info mps_stat_sram_intr_info
[] = {
3057 { 0x1fffff, "MPS statistics SRAM parity error", -1, 1 },
3060 static struct intr_info mps_stat_tx_intr_info
[] = {
3061 { 0xfffff, "MPS statistics Tx FIFO parity error", -1, 1 },
3064 static struct intr_info mps_stat_rx_intr_info
[] = {
3065 { 0xffffff, "MPS statistics Rx FIFO parity error", -1, 1 },
3068 static struct intr_info mps_cls_intr_info
[] = {
3069 { MATCHSRAM
, "MPS match SRAM parity error", -1, 1 },
3070 { MATCHTCAM
, "MPS match TCAM parity error", -1, 1 },
3071 { HASHSRAM
, "MPS hash SRAM parity error", -1, 1 },
3077 fat
= csio_handle_intr_status(hw
, MPS_RX_PERR_INT_CAUSE
,
3079 csio_handle_intr_status(hw
, MPS_TX_INT_CAUSE
,
3081 csio_handle_intr_status(hw
, MPS_TRC_INT_CAUSE
,
3082 mps_trc_intr_info
) +
3083 csio_handle_intr_status(hw
, MPS_STAT_PERR_INT_CAUSE_SRAM
,
3084 mps_stat_sram_intr_info
) +
3085 csio_handle_intr_status(hw
, MPS_STAT_PERR_INT_CAUSE_TX_FIFO
,
3086 mps_stat_tx_intr_info
) +
3087 csio_handle_intr_status(hw
, MPS_STAT_PERR_INT_CAUSE_RX_FIFO
,
3088 mps_stat_rx_intr_info
) +
3089 csio_handle_intr_status(hw
, MPS_CLS_INT_CAUSE
,
3092 csio_wr_reg32(hw
, 0, MPS_INT_CAUSE
);
3093 csio_rd_reg32(hw
, MPS_INT_CAUSE
); /* flush */
3095 csio_hw_fatal_err(hw
);
3098 #define MEM_INT_MASK (PERR_INT_CAUSE | ECC_CE_INT_CAUSE | ECC_UE_INT_CAUSE)
3101 * EDC/MC interrupt handler.
3103 static void csio_mem_intr_handler(struct csio_hw
*hw
, int idx
)
3105 static const char name
[3][5] = { "EDC0", "EDC1", "MC" };
3107 unsigned int addr
, cnt_addr
, v
;
3109 if (idx
<= MEM_EDC1
) {
3110 addr
= EDC_REG(EDC_INT_CAUSE
, idx
);
3111 cnt_addr
= EDC_REG(EDC_ECC_STATUS
, idx
);
3113 addr
= MC_INT_CAUSE
;
3114 cnt_addr
= MC_ECC_STATUS
;
3117 v
= csio_rd_reg32(hw
, addr
) & MEM_INT_MASK
;
3118 if (v
& PERR_INT_CAUSE
)
3119 csio_fatal(hw
, "%s FIFO parity error\n", name
[idx
]);
3120 if (v
& ECC_CE_INT_CAUSE
) {
3121 uint32_t cnt
= ECC_CECNT_GET(csio_rd_reg32(hw
, cnt_addr
));
3123 csio_wr_reg32(hw
, ECC_CECNT_MASK
, cnt_addr
);
3124 csio_warn(hw
, "%u %s correctable ECC data error%s\n",
3125 cnt
, name
[idx
], cnt
> 1 ? "s" : "");
3127 if (v
& ECC_UE_INT_CAUSE
)
3128 csio_fatal(hw
, "%s uncorrectable ECC data error\n", name
[idx
]);
3130 csio_wr_reg32(hw
, v
, addr
);
3131 if (v
& (PERR_INT_CAUSE
| ECC_UE_INT_CAUSE
))
3132 csio_hw_fatal_err(hw
);
3136 * MA interrupt handler.
3138 static void csio_ma_intr_handler(struct csio_hw
*hw
)
3140 uint32_t v
, status
= csio_rd_reg32(hw
, MA_INT_CAUSE
);
3142 if (status
& MEM_PERR_INT_CAUSE
)
3143 csio_fatal(hw
, "MA parity error, parity status %#x\n",
3144 csio_rd_reg32(hw
, MA_PARITY_ERROR_STATUS
));
3145 if (status
& MEM_WRAP_INT_CAUSE
) {
3146 v
= csio_rd_reg32(hw
, MA_INT_WRAP_STATUS
);
3148 "MA address wrap-around error by client %u to address %#x\n",
3149 MEM_WRAP_CLIENT_NUM_GET(v
), MEM_WRAP_ADDRESS_GET(v
) << 4);
3151 csio_wr_reg32(hw
, status
, MA_INT_CAUSE
);
3152 csio_hw_fatal_err(hw
);
3156 * SMB interrupt handler.
3158 static void csio_smb_intr_handler(struct csio_hw
*hw
)
3160 static struct intr_info smb_intr_info
[] = {
3161 { MSTTXFIFOPARINT
, "SMB master Tx FIFO parity error", -1, 1 },
3162 { MSTRXFIFOPARINT
, "SMB master Rx FIFO parity error", -1, 1 },
3163 { SLVFIFOPARINT
, "SMB slave FIFO parity error", -1, 1 },
3167 if (csio_handle_intr_status(hw
, SMB_INT_CAUSE
, smb_intr_info
))
3168 csio_hw_fatal_err(hw
);
3172 * NC-SI interrupt handler.
3174 static void csio_ncsi_intr_handler(struct csio_hw
*hw
)
3176 static struct intr_info ncsi_intr_info
[] = {
3177 { CIM_DM_PRTY_ERR
, "NC-SI CIM parity error", -1, 1 },
3178 { MPS_DM_PRTY_ERR
, "NC-SI MPS parity error", -1, 1 },
3179 { TXFIFO_PRTY_ERR
, "NC-SI Tx FIFO parity error", -1, 1 },
3180 { RXFIFO_PRTY_ERR
, "NC-SI Rx FIFO parity error", -1, 1 },
3184 if (csio_handle_intr_status(hw
, NCSI_INT_CAUSE
, ncsi_intr_info
))
3185 csio_hw_fatal_err(hw
);
3189 * XGMAC interrupt handler.
3191 static void csio_xgmac_intr_handler(struct csio_hw
*hw
, int port
)
3193 uint32_t v
= csio_rd_reg32(hw
, CSIO_MAC_INT_CAUSE_REG(hw
, port
));
3195 v
&= TXFIFO_PRTY_ERR
| RXFIFO_PRTY_ERR
;
3199 if (v
& TXFIFO_PRTY_ERR
)
3200 csio_fatal(hw
, "XGMAC %d Tx FIFO parity error\n", port
);
3201 if (v
& RXFIFO_PRTY_ERR
)
3202 csio_fatal(hw
, "XGMAC %d Rx FIFO parity error\n", port
);
3203 csio_wr_reg32(hw
, v
, CSIO_MAC_INT_CAUSE_REG(hw
, port
));
3204 csio_hw_fatal_err(hw
);
3208 * PL interrupt handler.
3210 static void csio_pl_intr_handler(struct csio_hw
*hw
)
3212 static struct intr_info pl_intr_info
[] = {
3213 { FATALPERR
, "T4 fatal parity error", -1, 1 },
3214 { PERRVFID
, "PL VFID_MAP parity error", -1, 1 },
3218 if (csio_handle_intr_status(hw
, PL_PL_INT_CAUSE
, pl_intr_info
))
3219 csio_hw_fatal_err(hw
);
3223 * csio_hw_slow_intr_handler - control path interrupt handler
3226 * Interrupt handler for non-data global interrupt events, e.g., errors.
3227 * The designation 'slow' is because it involves register reads, while
3228 * data interrupts typically don't involve any MMIOs.
3231 csio_hw_slow_intr_handler(struct csio_hw
*hw
)
3233 uint32_t cause
= csio_rd_reg32(hw
, PL_INT_CAUSE
);
3235 if (!(cause
& CSIO_GLBL_INTR_MASK
)) {
3236 CSIO_INC_STATS(hw
, n_plint_unexp
);
3240 csio_dbg(hw
, "Slow interrupt! cause: 0x%x\n", cause
);
3242 CSIO_INC_STATS(hw
, n_plint_cnt
);
3245 csio_cim_intr_handler(hw
);
3248 csio_mps_intr_handler(hw
);
3251 csio_ncsi_intr_handler(hw
);
3254 csio_pl_intr_handler(hw
);
3257 csio_smb_intr_handler(hw
);
3260 csio_xgmac_intr_handler(hw
, 0);
3263 csio_xgmac_intr_handler(hw
, 1);
3265 if (cause
& XGMAC_KR0
)
3266 csio_xgmac_intr_handler(hw
, 2);
3268 if (cause
& XGMAC_KR1
)
3269 csio_xgmac_intr_handler(hw
, 3);
3272 hw
->chip_ops
->chip_pcie_intr_handler(hw
);
3275 csio_mem_intr_handler(hw
, MEM_MC
);
3278 csio_mem_intr_handler(hw
, MEM_EDC0
);
3281 csio_mem_intr_handler(hw
, MEM_EDC1
);
3284 csio_le_intr_handler(hw
);
3287 csio_tp_intr_handler(hw
);
3290 csio_ma_intr_handler(hw
);
3293 csio_pmtx_intr_handler(hw
);
3296 csio_pmrx_intr_handler(hw
);
3299 csio_ulprx_intr_handler(hw
);
3301 if (cause
& CPL_SWITCH
)
3302 csio_cplsw_intr_handler(hw
);
3305 csio_sge_intr_handler(hw
);
3308 csio_ulptx_intr_handler(hw
);
3310 /* Clear the interrupts just processed for which we are the master. */
3311 csio_wr_reg32(hw
, cause
& CSIO_GLBL_INTR_MASK
, PL_INT_CAUSE
);
3312 csio_rd_reg32(hw
, PL_INT_CAUSE
); /* flush */
3317 /*****************************************************************************
3318 * HW <--> mailbox interfacing routines.
3319 ****************************************************************************/
3321 * csio_mberr_worker - Worker thread (dpc) for mailbox/error completions
3323 * @data: Private data pointer.
3325 * Called from worker thread context.
3328 csio_mberr_worker(void *data
)
3330 struct csio_hw
*hw
= (struct csio_hw
*)data
;
3331 struct csio_mbm
*mbm
= &hw
->mbm
;
3333 struct csio_mb
*mbp_next
;
3336 del_timer_sync(&mbm
->timer
);
3338 spin_lock_irq(&hw
->lock
);
3339 if (list_empty(&mbm
->cbfn_q
)) {
3340 spin_unlock_irq(&hw
->lock
);
3344 list_splice_tail_init(&mbm
->cbfn_q
, &cbfn_q
);
3345 mbm
->stats
.n_cbfnq
= 0;
3347 /* Try to start waiting mailboxes */
3348 if (!list_empty(&mbm
->req_q
)) {
3349 mbp_next
= list_first_entry(&mbm
->req_q
, struct csio_mb
, list
);
3350 list_del_init(&mbp_next
->list
);
3352 rv
= csio_mb_issue(hw
, mbp_next
);
3354 list_add_tail(&mbp_next
->list
, &mbm
->req_q
);
3356 CSIO_DEC_STATS(mbm
, n_activeq
);
3358 spin_unlock_irq(&hw
->lock
);
3360 /* Now callback completions */
3361 csio_mb_completions(hw
, &cbfn_q
);
3365 * csio_hw_mb_timer - Top-level Mailbox timeout handler.
3367 * @data: private data pointer
3371 csio_hw_mb_timer(uintptr_t data
)
3373 struct csio_hw
*hw
= (struct csio_hw
*)data
;
3374 struct csio_mb
*mbp
= NULL
;
3376 spin_lock_irq(&hw
->lock
);
3377 mbp
= csio_mb_tmo_handler(hw
);
3378 spin_unlock_irq(&hw
->lock
);
3380 /* Call back the function for the timed-out Mailbox */
3382 mbp
->mb_cbfn(hw
, mbp
);
3387 * csio_hw_mbm_cleanup - Cleanup Mailbox module.
3390 * Called with lock held, should exit with lock held.
3391 * Cancels outstanding mailboxes (waiting, in-flight) and gathers them
3392 * into a local queue. Drops lock and calls the completions. Holds
3396 csio_hw_mbm_cleanup(struct csio_hw
*hw
)
3400 csio_mb_cancel_all(hw
, &cbfn_q
);
3402 spin_unlock_irq(&hw
->lock
);
3403 csio_mb_completions(hw
, &cbfn_q
);
3404 spin_lock_irq(&hw
->lock
);
3407 /*****************************************************************************
3409 ****************************************************************************/
3411 csio_enqueue_evt(struct csio_hw
*hw
, enum csio_evt type
, void *evt_msg
,
3414 struct csio_evt_msg
*evt_entry
= NULL
;
3416 if (type
>= CSIO_EVT_MAX
)
3419 if (len
> CSIO_EVT_MSG_SIZE
)
3422 if (hw
->flags
& CSIO_HWF_FWEVT_STOP
)
3425 if (list_empty(&hw
->evt_free_q
)) {
3426 csio_err(hw
, "Failed to alloc evt entry, msg type %d len %d\n",
3431 evt_entry
= list_first_entry(&hw
->evt_free_q
,
3432 struct csio_evt_msg
, list
);
3433 list_del_init(&evt_entry
->list
);
3435 /* copy event msg and queue the event */
3436 evt_entry
->type
= type
;
3437 memcpy((void *)evt_entry
->data
, evt_msg
, len
);
3438 list_add_tail(&evt_entry
->list
, &hw
->evt_active_q
);
3440 CSIO_DEC_STATS(hw
, n_evt_freeq
);
3441 CSIO_INC_STATS(hw
, n_evt_activeq
);
3447 csio_enqueue_evt_lock(struct csio_hw
*hw
, enum csio_evt type
, void *evt_msg
,
3448 uint16_t len
, bool msg_sg
)
3450 struct csio_evt_msg
*evt_entry
= NULL
;
3451 struct csio_fl_dma_buf
*fl_sg
;
3453 unsigned long flags
;
3456 if (type
>= CSIO_EVT_MAX
)
3459 if (len
> CSIO_EVT_MSG_SIZE
)
3462 spin_lock_irqsave(&hw
->lock
, flags
);
3463 if (hw
->flags
& CSIO_HWF_FWEVT_STOP
) {
3468 if (list_empty(&hw
->evt_free_q
)) {
3469 csio_err(hw
, "Failed to alloc evt entry, msg type %d len %d\n",
3475 evt_entry
= list_first_entry(&hw
->evt_free_q
,
3476 struct csio_evt_msg
, list
);
3477 list_del_init(&evt_entry
->list
);
3479 /* copy event msg and queue the event */
3480 evt_entry
->type
= type
;
3482 /* If Payload in SG list*/
3484 fl_sg
= (struct csio_fl_dma_buf
*) evt_msg
;
3485 for (n
= 0; (n
< CSIO_MAX_FLBUF_PER_IQWR
&& off
< len
); n
++) {
3486 memcpy((void *)((uintptr_t)evt_entry
->data
+ off
),
3487 fl_sg
->flbufs
[n
].vaddr
,
3488 fl_sg
->flbufs
[n
].len
);
3489 off
+= fl_sg
->flbufs
[n
].len
;
3492 memcpy((void *)evt_entry
->data
, evt_msg
, len
);
3494 list_add_tail(&evt_entry
->list
, &hw
->evt_active_q
);
3495 CSIO_DEC_STATS(hw
, n_evt_freeq
);
3496 CSIO_INC_STATS(hw
, n_evt_activeq
);
3498 spin_unlock_irqrestore(&hw
->lock
, flags
);
3503 csio_free_evt(struct csio_hw
*hw
, struct csio_evt_msg
*evt_entry
)
3506 spin_lock_irq(&hw
->lock
);
3507 list_del_init(&evt_entry
->list
);
3508 list_add_tail(&evt_entry
->list
, &hw
->evt_free_q
);
3509 CSIO_DEC_STATS(hw
, n_evt_activeq
);
3510 CSIO_INC_STATS(hw
, n_evt_freeq
);
3511 spin_unlock_irq(&hw
->lock
);
3516 csio_evtq_flush(struct csio_hw
*hw
)
3520 while (hw
->flags
& CSIO_HWF_FWEVT_PENDING
&& count
--) {
3521 spin_unlock_irq(&hw
->lock
);
3523 spin_lock_irq(&hw
->lock
);
3526 CSIO_DB_ASSERT(!(hw
->flags
& CSIO_HWF_FWEVT_PENDING
));
3530 csio_evtq_stop(struct csio_hw
*hw
)
3532 hw
->flags
|= CSIO_HWF_FWEVT_STOP
;
3536 csio_evtq_start(struct csio_hw
*hw
)
3538 hw
->flags
&= ~CSIO_HWF_FWEVT_STOP
;
3542 csio_evtq_cleanup(struct csio_hw
*hw
)
3544 struct list_head
*evt_entry
, *next_entry
;
3546 /* Release outstanding events from activeq to freeq*/
3547 if (!list_empty(&hw
->evt_active_q
))
3548 list_splice_tail_init(&hw
->evt_active_q
, &hw
->evt_free_q
);
3550 hw
->stats
.n_evt_activeq
= 0;
3551 hw
->flags
&= ~CSIO_HWF_FWEVT_PENDING
;
3553 /* Freeup event entry */
3554 list_for_each_safe(evt_entry
, next_entry
, &hw
->evt_free_q
) {
3556 CSIO_DEC_STATS(hw
, n_evt_freeq
);
3559 hw
->stats
.n_evt_freeq
= 0;
3564 csio_process_fwevtq_entry(struct csio_hw
*hw
, void *wr
, uint32_t len
,
3565 struct csio_fl_dma_buf
*flb
, void *priv
)
3569 uint32_t msg_len
= 0;
3572 op
= ((struct rss_header
*) wr
)->opcode
;
3573 if (op
== CPL_FW6_PLD
) {
3574 CSIO_INC_STATS(hw
, n_cpl_fw6_pld
);
3575 if (!flb
|| !flb
->totlen
) {
3576 CSIO_INC_STATS(hw
, n_cpl_unexp
);
3581 msg_len
= flb
->totlen
;
3583 } else if (op
== CPL_FW6_MSG
|| op
== CPL_FW4_MSG
) {
3585 CSIO_INC_STATS(hw
, n_cpl_fw6_msg
);
3586 /* skip RSS header */
3587 msg
= (void *)((uintptr_t)wr
+ sizeof(__be64
));
3588 msg_len
= (op
== CPL_FW6_MSG
) ? sizeof(struct cpl_fw6_msg
) :
3589 sizeof(struct cpl_fw4_msg
);
3591 csio_warn(hw
, "unexpected CPL %#x on FW event queue\n", op
);
3592 CSIO_INC_STATS(hw
, n_cpl_unexp
);
3597 * Enqueue event to EventQ. Events processing happens
3598 * in Event worker thread context
3600 if (csio_enqueue_evt_lock(hw
, CSIO_EVT_FW
, msg
,
3601 (uint16_t)msg_len
, msg_sg
))
3602 CSIO_INC_STATS(hw
, n_evt_drop
);
3606 csio_evtq_worker(struct work_struct
*work
)
3608 struct csio_hw
*hw
= container_of(work
, struct csio_hw
, evtq_work
);
3609 struct list_head
*evt_entry
, *next_entry
;
3611 struct csio_evt_msg
*evt_msg
;
3612 struct cpl_fw6_msg
*msg
;
3613 struct csio_rnode
*rn
;
3615 uint8_t evtq_stop
= 0;
3617 csio_dbg(hw
, "event worker thread active evts#%d\n",
3618 hw
->stats
.n_evt_activeq
);
3620 spin_lock_irq(&hw
->lock
);
3621 while (!list_empty(&hw
->evt_active_q
)) {
3622 list_splice_tail_init(&hw
->evt_active_q
, &evt_q
);
3623 spin_unlock_irq(&hw
->lock
);
3625 list_for_each_safe(evt_entry
, next_entry
, &evt_q
) {
3626 evt_msg
= (struct csio_evt_msg
*) evt_entry
;
3628 /* Drop events if queue is STOPPED */
3629 spin_lock_irq(&hw
->lock
);
3630 if (hw
->flags
& CSIO_HWF_FWEVT_STOP
)
3632 spin_unlock_irq(&hw
->lock
);
3634 CSIO_INC_STATS(hw
, n_evt_drop
);
3638 switch (evt_msg
->type
) {
3640 msg
= (struct cpl_fw6_msg
*)(evt_msg
->data
);
3642 if ((msg
->opcode
== CPL_FW6_MSG
||
3643 msg
->opcode
== CPL_FW4_MSG
) &&
3645 rv
= csio_mb_fwevt_handler(hw
,
3649 /* Handle any remaining fw events */
3650 csio_fcoe_fwevt_handler(hw
,
3651 msg
->opcode
, msg
->data
);
3652 } else if (msg
->opcode
== CPL_FW6_PLD
) {
3654 csio_fcoe_fwevt_handler(hw
,
3655 msg
->opcode
, msg
->data
);
3658 "Unhandled FW msg op %x type %x\n",
3659 msg
->opcode
, msg
->type
);
3660 CSIO_INC_STATS(hw
, n_evt_drop
);
3665 csio_mberr_worker(hw
);
3668 case CSIO_EVT_DEV_LOSS
:
3669 memcpy(&rn
, evt_msg
->data
, sizeof(rn
));
3670 csio_rnode_devloss_handler(rn
);
3674 csio_warn(hw
, "Unhandled event %x on evtq\n",
3676 CSIO_INC_STATS(hw
, n_evt_unexp
);
3680 csio_free_evt(hw
, evt_msg
);
3683 spin_lock_irq(&hw
->lock
);
3685 hw
->flags
&= ~CSIO_HWF_FWEVT_PENDING
;
3686 spin_unlock_irq(&hw
->lock
);
3690 csio_fwevtq_handler(struct csio_hw
*hw
)
3694 if (csio_q_iqid(hw
, hw
->fwevt_iq_idx
) == CSIO_MAX_QID
) {
3695 CSIO_INC_STATS(hw
, n_int_stray
);
3699 rv
= csio_wr_process_iq_idx(hw
, hw
->fwevt_iq_idx
,
3700 csio_process_fwevtq_entry
, NULL
);
3704 /****************************************************************************
3706 ****************************************************************************/
3708 /* Management module */
3710 * csio_mgmt_req_lookup - Lookup the given IO req exist in Active Q.
3711 * mgmt - mgmt module
3712 * @io_req - io request
3714 * Return - 0:if given IO Req exists in active Q.
3715 * -EINVAL :if lookup fails.
3718 csio_mgmt_req_lookup(struct csio_mgmtm
*mgmtm
, struct csio_ioreq
*io_req
)
3720 struct list_head
*tmp
;
3722 /* Lookup ioreq in the ACTIVEQ */
3723 list_for_each(tmp
, &mgmtm
->active_q
) {
3724 if (io_req
== (struct csio_ioreq
*)tmp
)
3730 #define ECM_MIN_TMO 1000 /* Minimum timeout value for req */
3733 * csio_mgmts_tmo_handler - MGMT IO Timeout handler.
3734 * @data - Event data.
3739 csio_mgmt_tmo_handler(uintptr_t data
)
3741 struct csio_mgmtm
*mgmtm
= (struct csio_mgmtm
*) data
;
3742 struct list_head
*tmp
;
3743 struct csio_ioreq
*io_req
;
3745 csio_dbg(mgmtm
->hw
, "Mgmt timer invoked!\n");
3747 spin_lock_irq(&mgmtm
->hw
->lock
);
3749 list_for_each(tmp
, &mgmtm
->active_q
) {
3750 io_req
= (struct csio_ioreq
*) tmp
;
3751 io_req
->tmo
-= min_t(uint32_t, io_req
->tmo
, ECM_MIN_TMO
);
3754 /* Dequeue the request from retry Q. */
3755 tmp
= csio_list_prev(tmp
);
3756 list_del_init(&io_req
->sm
.sm_list
);
3757 if (io_req
->io_cbfn
) {
3758 /* io_req will be freed by completion handler */
3759 io_req
->wr_status
= -ETIMEDOUT
;
3760 io_req
->io_cbfn(mgmtm
->hw
, io_req
);
3767 /* If retry queue is not empty, re-arm timer */
3768 if (!list_empty(&mgmtm
->active_q
))
3769 mod_timer(&mgmtm
->mgmt_timer
,
3770 jiffies
+ msecs_to_jiffies(ECM_MIN_TMO
));
3771 spin_unlock_irq(&mgmtm
->hw
->lock
);
3775 csio_mgmtm_cleanup(struct csio_mgmtm
*mgmtm
)
3777 struct csio_hw
*hw
= mgmtm
->hw
;
3778 struct csio_ioreq
*io_req
;
3779 struct list_head
*tmp
;
3783 /* Wait for all outstanding req to complete gracefully */
3784 while ((!list_empty(&mgmtm
->active_q
)) && count
--) {
3785 spin_unlock_irq(&hw
->lock
);
3787 spin_lock_irq(&hw
->lock
);
3790 /* release outstanding req from ACTIVEQ */
3791 list_for_each(tmp
, &mgmtm
->active_q
) {
3792 io_req
= (struct csio_ioreq
*) tmp
;
3793 tmp
= csio_list_prev(tmp
);
3794 list_del_init(&io_req
->sm
.sm_list
);
3795 mgmtm
->stats
.n_active
--;
3796 if (io_req
->io_cbfn
) {
3797 /* io_req will be freed by completion handler */
3798 io_req
->wr_status
= -ETIMEDOUT
;
3799 io_req
->io_cbfn(mgmtm
->hw
, io_req
);
3805 * csio_mgmt_init - Mgmt module init entry point
3806 * @mgmtsm - mgmt module
3809 * Initialize mgmt timer, resource wait queue, active queue,
3810 * completion q. Allocate Egress and Ingress
3811 * WR queues and save off the queue index returned by the WR
3812 * module for future use. Allocate and save off mgmt reqs in the
3813 * mgmt_req_freelist for future use. Make sure their SM is initialized
3815 * Returns: 0 - on success
3816 * -ENOMEM - on error.
3819 csio_mgmtm_init(struct csio_mgmtm
*mgmtm
, struct csio_hw
*hw
)
3821 struct timer_list
*timer
= &mgmtm
->mgmt_timer
;
3824 timer
->function
= csio_mgmt_tmo_handler
;
3825 timer
->data
= (unsigned long)mgmtm
;
3827 INIT_LIST_HEAD(&mgmtm
->active_q
);
3828 INIT_LIST_HEAD(&mgmtm
->cbfn_q
);
3831 /*mgmtm->iq_idx = hw->fwevt_iq_idx;*/
3837 * csio_mgmtm_exit - MGMT module exit entry point
3838 * @mgmtsm - mgmt module
3840 * This function called during MGMT module uninit.
3841 * Stop timers, free ioreqs allocated.
3846 csio_mgmtm_exit(struct csio_mgmtm
*mgmtm
)
3848 del_timer_sync(&mgmtm
->mgmt_timer
);
3853 * csio_hw_start - Kicks off the HW State machine
3854 * @hw: Pointer to HW module.
3856 * It is assumed that the initialization is a synchronous operation.
3857 * So when we return afer posting the event, the HW SM should be in
3858 * the ready state, if there were no errors during init.
3861 csio_hw_start(struct csio_hw
*hw
)
3863 spin_lock_irq(&hw
->lock
);
3864 csio_post_event(&hw
->sm
, CSIO_HWE_CFG
);
3865 spin_unlock_irq(&hw
->lock
);
3867 if (csio_is_hw_ready(hw
))
3874 csio_hw_stop(struct csio_hw
*hw
)
3876 csio_post_event(&hw
->sm
, CSIO_HWE_PCI_REMOVE
);
3878 if (csio_is_hw_removing(hw
))
3884 /* Max reset retries */
3885 #define CSIO_MAX_RESET_RETRIES 3
3888 * csio_hw_reset - Reset the hardware
3891 * Caller should hold lock across this function.
3894 csio_hw_reset(struct csio_hw
*hw
)
3896 if (!csio_is_hw_master(hw
))
3899 if (hw
->rst_retries
>= CSIO_MAX_RESET_RETRIES
) {
3900 csio_dbg(hw
, "Max hw reset attempts reached..");
3905 csio_post_event(&hw
->sm
, CSIO_HWE_HBA_RESET
);
3907 if (csio_is_hw_ready(hw
)) {
3908 hw
->rst_retries
= 0;
3909 hw
->stats
.n_reset_start
= jiffies_to_msecs(jiffies
);
3916 * csio_hw_get_device_id - Caches the Adapter's vendor & device id.
3920 csio_hw_get_device_id(struct csio_hw
*hw
)
3922 /* Is the adapter device id cached already ?*/
3923 if (csio_is_dev_id_cached(hw
))
3926 /* Get the PCI vendor & device id */
3927 pci_read_config_word(hw
->pdev
, PCI_VENDOR_ID
,
3928 &hw
->params
.pci
.vendor_id
);
3929 pci_read_config_word(hw
->pdev
, PCI_DEVICE_ID
,
3930 &hw
->params
.pci
.device_id
);
3932 csio_dev_id_cached(hw
);
3933 hw
->chip_id
= (hw
->params
.pci
.device_id
& CSIO_HW_CHIP_MASK
);
3935 } /* csio_hw_get_device_id */
3938 * csio_hw_set_description - Set the model, description of the hw.
3940 * @ven_id: PCI Vendor ID
3941 * @dev_id: PCI Device ID
3944 csio_hw_set_description(struct csio_hw
*hw
, uint16_t ven_id
, uint16_t dev_id
)
3946 uint32_t adap_type
, prot_type
;
3948 if (ven_id
== CSIO_VENDOR_ID
) {
3949 prot_type
= (dev_id
& CSIO_ASIC_DEVID_PROTO_MASK
);
3950 adap_type
= (dev_id
& CSIO_ASIC_DEVID_TYPE_MASK
);
3952 if (prot_type
== CSIO_T4_FCOE_ASIC
) {
3954 csio_t4_fcoe_adapters
[adap_type
].model_no
, 16);
3955 memcpy(hw
->model_desc
,
3956 csio_t4_fcoe_adapters
[adap_type
].description
,
3958 } else if (prot_type
== CSIO_T5_FCOE_ASIC
) {
3960 csio_t5_fcoe_adapters
[adap_type
].model_no
, 16);
3961 memcpy(hw
->model_desc
,
3962 csio_t5_fcoe_adapters
[adap_type
].description
,
3965 char tempName
[32] = "Chelsio FCoE Controller";
3966 memcpy(hw
->model_desc
, tempName
, 32);
3969 } /* csio_hw_set_description */
3972 * csio_hw_init - Initialize HW module.
3973 * @hw: Pointer to HW module.
3975 * Initialize the members of the HW module.
3978 csio_hw_init(struct csio_hw
*hw
)
3982 uint16_t ven_id
, dev_id
;
3983 struct csio_evt_msg
*evt_entry
;
3985 INIT_LIST_HEAD(&hw
->sm
.sm_list
);
3986 csio_init_state(&hw
->sm
, csio_hws_uninit
);
3987 spin_lock_init(&hw
->lock
);
3988 INIT_LIST_HEAD(&hw
->sln_head
);
3990 /* Get the PCI vendor & device id */
3991 csio_hw_get_device_id(hw
);
3993 strcpy(hw
->name
, CSIO_HW_NAME
);
3995 /* Initialize the HW chip ops with T4/T5 specific ops */
3996 hw
->chip_ops
= csio_is_t4(hw
->chip_id
) ? &t4_ops
: &t5_ops
;
3998 /* Set the model & its description */
4000 ven_id
= hw
->params
.pci
.vendor_id
;
4001 dev_id
= hw
->params
.pci
.device_id
;
4003 csio_hw_set_description(hw
, ven_id
, dev_id
);
4005 /* Initialize default log level */
4006 hw
->params
.log_level
= (uint32_t) csio_dbg_level
;
4008 csio_set_fwevt_intr_idx(hw
, -1);
4009 csio_set_nondata_intr_idx(hw
, -1);
4011 /* Init all the modules: Mailbox, WorkRequest and Transport */
4012 if (csio_mbm_init(csio_hw_to_mbm(hw
), hw
, csio_hw_mb_timer
))
4015 rv
= csio_wrm_init(csio_hw_to_wrm(hw
), hw
);
4019 rv
= csio_scsim_init(csio_hw_to_scsim(hw
), hw
);
4023 rv
= csio_mgmtm_init(csio_hw_to_mgmtm(hw
), hw
);
4025 goto err_scsim_exit
;
4026 /* Pre-allocate evtq and initialize them */
4027 INIT_LIST_HEAD(&hw
->evt_active_q
);
4028 INIT_LIST_HEAD(&hw
->evt_free_q
);
4029 for (i
= 0; i
< csio_evtq_sz
; i
++) {
4031 evt_entry
= kzalloc(sizeof(struct csio_evt_msg
), GFP_KERNEL
);
4033 csio_err(hw
, "Failed to initialize eventq");
4034 goto err_evtq_cleanup
;
4037 list_add_tail(&evt_entry
->list
, &hw
->evt_free_q
);
4038 CSIO_INC_STATS(hw
, n_evt_freeq
);
4041 hw
->dev_num
= dev_num
;
4047 csio_evtq_cleanup(hw
);
4048 csio_mgmtm_exit(csio_hw_to_mgmtm(hw
));
4050 csio_scsim_exit(csio_hw_to_scsim(hw
));
4052 csio_wrm_exit(csio_hw_to_wrm(hw
), hw
);
4054 csio_mbm_exit(csio_hw_to_mbm(hw
));
4060 * csio_hw_exit - Un-initialize HW module.
4061 * @hw: Pointer to HW module.
4065 csio_hw_exit(struct csio_hw
*hw
)
4067 csio_evtq_cleanup(hw
);
4068 csio_mgmtm_exit(csio_hw_to_mgmtm(hw
));
4069 csio_scsim_exit(csio_hw_to_scsim(hw
));
4070 csio_wrm_exit(csio_hw_to_wrm(hw
), hw
);
4071 csio_mbm_exit(csio_hw_to_mbm(hw
));