2 * Copyright (c) 2003-2007 Chelsio, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 #include "firmware_exports.h"
38 * t3_wait_op_done_val - wait until an operation is completed
39 * @adapter: the adapter performing the operation
40 * @reg: the register to check for completion
41 * @mask: a single-bit field within @reg that indicates completion
42 * @polarity: the value of the field when the operation is completed
43 * @attempts: number of check iterations
44 * @delay: delay in usecs between iterations
45 * @valp: where to store the value of the register at completion time
47 * Wait until an operation is completed by checking a bit in a register
48 * up to @attempts times. If @valp is not NULL the value of the register
49 * at the time it indicated completion is stored there. Returns 0 if the
50 * operation completes and -EAGAIN otherwise.
53 int t3_wait_op_done_val(struct adapter
*adapter
, int reg
, u32 mask
,
54 int polarity
, int attempts
, int delay
, u32
*valp
)
57 u32 val
= t3_read_reg(adapter
, reg
);
59 if (!!(val
& mask
) == polarity
) {
72 * t3_write_regs - write a bunch of registers
73 * @adapter: the adapter to program
74 * @p: an array of register address/register value pairs
75 * @n: the number of address/value pairs
76 * @offset: register address offset
78 * Takes an array of register address/register value pairs and writes each
79 * value to the corresponding register. Register addresses are adjusted
80 * by the supplied offset.
82 void t3_write_regs(struct adapter
*adapter
, const struct addr_val_pair
*p
,
83 int n
, unsigned int offset
)
86 t3_write_reg(adapter
, p
->reg_addr
+ offset
, p
->val
);
92 * t3_set_reg_field - set a register field to a value
93 * @adapter: the adapter to program
94 * @addr: the register address
95 * @mask: specifies the portion of the register to modify
96 * @val: the new value for the register field
98 * Sets a register field specified by the supplied mask to the
101 void t3_set_reg_field(struct adapter
*adapter
, unsigned int addr
, u32 mask
,
104 u32 v
= t3_read_reg(adapter
, addr
) & ~mask
;
106 t3_write_reg(adapter
, addr
, v
| val
);
107 t3_read_reg(adapter
, addr
); /* flush */
111 * t3_read_indirect - read indirectly addressed registers
113 * @addr_reg: register holding the indirect address
114 * @data_reg: register holding the value of the indirect register
115 * @vals: where the read register values are stored
116 * @start_idx: index of first indirect register to read
117 * @nregs: how many indirect registers to read
119 * Reads registers that are accessed indirectly through an address/data
122 static void t3_read_indirect(struct adapter
*adap
, unsigned int addr_reg
,
123 unsigned int data_reg
, u32
*vals
,
124 unsigned int nregs
, unsigned int start_idx
)
127 t3_write_reg(adap
, addr_reg
, start_idx
);
128 *vals
++ = t3_read_reg(adap
, data_reg
);
134 * t3_mc7_bd_read - read from MC7 through backdoor accesses
135 * @mc7: identifies MC7 to read from
136 * @start: index of first 64-bit word to read
137 * @n: number of 64-bit words to read
138 * @buf: where to store the read result
140 * Read n 64-bit words from MC7 starting at word start, using backdoor
143 int t3_mc7_bd_read(struct mc7
*mc7
, unsigned int start
, unsigned int n
,
146 static const int shift
[] = { 0, 0, 16, 24 };
147 static const int step
[] = { 0, 32, 16, 8 };
149 unsigned int size64
= mc7
->size
/ 8; /* # of 64-bit words */
150 struct adapter
*adap
= mc7
->adapter
;
152 if (start
>= size64
|| start
+ n
> size64
)
155 start
*= (8 << mc7
->width
);
160 for (i
= (1 << mc7
->width
) - 1; i
>= 0; --i
) {
164 t3_write_reg(adap
, mc7
->offset
+ A_MC7_BD_ADDR
, start
);
165 t3_write_reg(adap
, mc7
->offset
+ A_MC7_BD_OP
, 0);
166 val
= t3_read_reg(adap
, mc7
->offset
+ A_MC7_BD_OP
);
167 while ((val
& F_BUSY
) && attempts
--)
168 val
= t3_read_reg(adap
,
169 mc7
->offset
+ A_MC7_BD_OP
);
173 val
= t3_read_reg(adap
, mc7
->offset
+ A_MC7_BD_DATA1
);
174 if (mc7
->width
== 0) {
175 val64
= t3_read_reg(adap
,
178 val64
|= (u64
) val
<< 32;
181 val
>>= shift
[mc7
->width
];
182 val64
|= (u64
) val
<< (step
[mc7
->width
] * i
);
194 static void mi1_init(struct adapter
*adap
, const struct adapter_info
*ai
)
196 u32 clkdiv
= adap
->params
.vpd
.cclk
/ (2 * adap
->params
.vpd
.mdc
) - 1;
197 u32 val
= F_PREEN
| V_MDIINV(ai
->mdiinv
) | V_MDIEN(ai
->mdien
) |
200 if (!(ai
->caps
& SUPPORTED_10000baseT_Full
))
202 t3_write_reg(adap
, A_MI1_CFG
, val
);
205 #define MDIO_ATTEMPTS 10
208 * MI1 read/write operations for direct-addressed PHYs.
210 static int mi1_read(struct adapter
*adapter
, int phy_addr
, int mmd_addr
,
211 int reg_addr
, unsigned int *valp
)
214 u32 addr
= V_REGADDR(reg_addr
) | V_PHYADDR(phy_addr
);
219 mutex_lock(&adapter
->mdio_lock
);
220 t3_write_reg(adapter
, A_MI1_ADDR
, addr
);
221 t3_write_reg(adapter
, A_MI1_OP
, V_MDI_OP(2));
222 ret
= t3_wait_op_done(adapter
, A_MI1_OP
, F_BUSY
, 0, MDIO_ATTEMPTS
, 20);
224 *valp
= t3_read_reg(adapter
, A_MI1_DATA
);
225 mutex_unlock(&adapter
->mdio_lock
);
229 static int mi1_write(struct adapter
*adapter
, int phy_addr
, int mmd_addr
,
230 int reg_addr
, unsigned int val
)
233 u32 addr
= V_REGADDR(reg_addr
) | V_PHYADDR(phy_addr
);
238 mutex_lock(&adapter
->mdio_lock
);
239 t3_write_reg(adapter
, A_MI1_ADDR
, addr
);
240 t3_write_reg(adapter
, A_MI1_DATA
, val
);
241 t3_write_reg(adapter
, A_MI1_OP
, V_MDI_OP(1));
242 ret
= t3_wait_op_done(adapter
, A_MI1_OP
, F_BUSY
, 0, MDIO_ATTEMPTS
, 20);
243 mutex_unlock(&adapter
->mdio_lock
);
247 static const struct mdio_ops mi1_mdio_ops
= {
253 * MI1 read/write operations for indirect-addressed PHYs.
255 static int mi1_ext_read(struct adapter
*adapter
, int phy_addr
, int mmd_addr
,
256 int reg_addr
, unsigned int *valp
)
259 u32 addr
= V_REGADDR(mmd_addr
) | V_PHYADDR(phy_addr
);
261 mutex_lock(&adapter
->mdio_lock
);
262 t3_write_reg(adapter
, A_MI1_ADDR
, addr
);
263 t3_write_reg(adapter
, A_MI1_DATA
, reg_addr
);
264 t3_write_reg(adapter
, A_MI1_OP
, V_MDI_OP(0));
265 ret
= t3_wait_op_done(adapter
, A_MI1_OP
, F_BUSY
, 0, MDIO_ATTEMPTS
, 20);
267 t3_write_reg(adapter
, A_MI1_OP
, V_MDI_OP(3));
268 ret
= t3_wait_op_done(adapter
, A_MI1_OP
, F_BUSY
, 0,
271 *valp
= t3_read_reg(adapter
, A_MI1_DATA
);
273 mutex_unlock(&adapter
->mdio_lock
);
277 static int mi1_ext_write(struct adapter
*adapter
, int phy_addr
, int mmd_addr
,
278 int reg_addr
, unsigned int val
)
281 u32 addr
= V_REGADDR(mmd_addr
) | V_PHYADDR(phy_addr
);
283 mutex_lock(&adapter
->mdio_lock
);
284 t3_write_reg(adapter
, A_MI1_ADDR
, addr
);
285 t3_write_reg(adapter
, A_MI1_DATA
, reg_addr
);
286 t3_write_reg(adapter
, A_MI1_OP
, V_MDI_OP(0));
287 ret
= t3_wait_op_done(adapter
, A_MI1_OP
, F_BUSY
, 0, MDIO_ATTEMPTS
, 20);
289 t3_write_reg(adapter
, A_MI1_DATA
, val
);
290 t3_write_reg(adapter
, A_MI1_OP
, V_MDI_OP(1));
291 ret
= t3_wait_op_done(adapter
, A_MI1_OP
, F_BUSY
, 0,
294 mutex_unlock(&adapter
->mdio_lock
);
298 static const struct mdio_ops mi1_mdio_ext_ops
= {
304 * t3_mdio_change_bits - modify the value of a PHY register
305 * @phy: the PHY to operate on
306 * @mmd: the device address
307 * @reg: the register address
308 * @clear: what part of the register value to mask off
309 * @set: what part of the register value to set
311 * Changes the value of a PHY register by applying a mask to its current
312 * value and ORing the result with a new value.
314 int t3_mdio_change_bits(struct cphy
*phy
, int mmd
, int reg
, unsigned int clear
,
320 ret
= mdio_read(phy
, mmd
, reg
, &val
);
323 ret
= mdio_write(phy
, mmd
, reg
, val
| set
);
329 * t3_phy_reset - reset a PHY block
330 * @phy: the PHY to operate on
331 * @mmd: the device address of the PHY block to reset
332 * @wait: how long to wait for the reset to complete in 1ms increments
334 * Resets a PHY block and optionally waits for the reset to complete.
335 * @mmd should be 0 for 10/100/1000 PHYs and the device address to reset
338 int t3_phy_reset(struct cphy
*phy
, int mmd
, int wait
)
343 err
= t3_mdio_change_bits(phy
, mmd
, MII_BMCR
, BMCR_PDOWN
, BMCR_RESET
);
348 err
= mdio_read(phy
, mmd
, MII_BMCR
, &ctl
);
354 } while (ctl
&& --wait
);
360 * t3_phy_advertise - set the PHY advertisement registers for autoneg
361 * @phy: the PHY to operate on
362 * @advert: bitmap of capabilities the PHY should advertise
364 * Sets a 10/100/1000 PHY's advertisement registers to advertise the
365 * requested capabilities.
367 int t3_phy_advertise(struct cphy
*phy
, unsigned int advert
)
370 unsigned int val
= 0;
372 err
= mdio_read(phy
, 0, MII_CTRL1000
, &val
);
376 val
&= ~(ADVERTISE_1000HALF
| ADVERTISE_1000FULL
);
377 if (advert
& ADVERTISED_1000baseT_Half
)
378 val
|= ADVERTISE_1000HALF
;
379 if (advert
& ADVERTISED_1000baseT_Full
)
380 val
|= ADVERTISE_1000FULL
;
382 err
= mdio_write(phy
, 0, MII_CTRL1000
, val
);
387 if (advert
& ADVERTISED_10baseT_Half
)
388 val
|= ADVERTISE_10HALF
;
389 if (advert
& ADVERTISED_10baseT_Full
)
390 val
|= ADVERTISE_10FULL
;
391 if (advert
& ADVERTISED_100baseT_Half
)
392 val
|= ADVERTISE_100HALF
;
393 if (advert
& ADVERTISED_100baseT_Full
)
394 val
|= ADVERTISE_100FULL
;
395 if (advert
& ADVERTISED_Pause
)
396 val
|= ADVERTISE_PAUSE_CAP
;
397 if (advert
& ADVERTISED_Asym_Pause
)
398 val
|= ADVERTISE_PAUSE_ASYM
;
399 return mdio_write(phy
, 0, MII_ADVERTISE
, val
);
403 * t3_set_phy_speed_duplex - force PHY speed and duplex
404 * @phy: the PHY to operate on
405 * @speed: requested PHY speed
406 * @duplex: requested PHY duplex
408 * Force a 10/100/1000 PHY's speed and duplex. This also disables
409 * auto-negotiation except for GigE, where auto-negotiation is mandatory.
411 int t3_set_phy_speed_duplex(struct cphy
*phy
, int speed
, int duplex
)
416 err
= mdio_read(phy
, 0, MII_BMCR
, &ctl
);
421 ctl
&= ~(BMCR_SPEED100
| BMCR_SPEED1000
| BMCR_ANENABLE
);
422 if (speed
== SPEED_100
)
423 ctl
|= BMCR_SPEED100
;
424 else if (speed
== SPEED_1000
)
425 ctl
|= BMCR_SPEED1000
;
428 ctl
&= ~(BMCR_FULLDPLX
| BMCR_ANENABLE
);
429 if (duplex
== DUPLEX_FULL
)
430 ctl
|= BMCR_FULLDPLX
;
432 if (ctl
& BMCR_SPEED1000
) /* auto-negotiation required for GigE */
433 ctl
|= BMCR_ANENABLE
;
434 return mdio_write(phy
, 0, MII_BMCR
, ctl
);
437 static const struct adapter_info t3_adap_info
[] = {
439 F_GPIO2_OEN
| F_GPIO4_OEN
|
440 F_GPIO2_OUT_VAL
| F_GPIO4_OUT_VAL
, F_GPIO3
| F_GPIO5
,
442 &mi1_mdio_ops
, "Chelsio PE9000"},
444 F_GPIO2_OEN
| F_GPIO4_OEN
|
445 F_GPIO2_OUT_VAL
| F_GPIO4_OUT_VAL
, F_GPIO3
| F_GPIO5
,
447 &mi1_mdio_ops
, "Chelsio T302"},
449 F_GPIO1_OEN
| F_GPIO6_OEN
| F_GPIO7_OEN
| F_GPIO10_OEN
|
450 F_GPIO1_OUT_VAL
| F_GPIO6_OUT_VAL
| F_GPIO10_OUT_VAL
, 0,
451 SUPPORTED_10000baseT_Full
| SUPPORTED_AUI
,
452 &mi1_mdio_ext_ops
, "Chelsio T310"},
454 F_GPIO1_OEN
| F_GPIO2_OEN
| F_GPIO4_OEN
| F_GPIO5_OEN
| F_GPIO6_OEN
|
455 F_GPIO7_OEN
| F_GPIO10_OEN
| F_GPIO11_OEN
| F_GPIO1_OUT_VAL
|
456 F_GPIO5_OUT_VAL
| F_GPIO6_OUT_VAL
| F_GPIO10_OUT_VAL
, 0,
457 SUPPORTED_10000baseT_Full
| SUPPORTED_AUI
,
458 &mi1_mdio_ext_ops
, "Chelsio T320"},
462 * Return the adapter_info structure with a given index. Out-of-range indices
465 const struct adapter_info
*t3_get_adapter_info(unsigned int id
)
467 return id
< ARRAY_SIZE(t3_adap_info
) ? &t3_adap_info
[id
] : NULL
;
470 #define CAPS_1G (SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Full | \
471 SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg | SUPPORTED_MII)
472 #define CAPS_10G (SUPPORTED_10000baseT_Full | SUPPORTED_AUI)
474 static const struct port_type_info port_types
[] = {
476 {t3_ael1002_phy_prep
, CAPS_10G
| SUPPORTED_FIBRE
,
478 {t3_vsc8211_phy_prep
, CAPS_1G
| SUPPORTED_TP
| SUPPORTED_IRQ
,
479 "10/100/1000BASE-T"},
480 {NULL
, CAPS_1G
| SUPPORTED_TP
| SUPPORTED_IRQ
,
481 "10/100/1000BASE-T"},
482 {t3_xaui_direct_phy_prep
, CAPS_10G
| SUPPORTED_TP
, "10GBASE-CX4"},
483 {NULL
, CAPS_10G
, "10GBASE-KX4"},
484 {t3_qt2045_phy_prep
, CAPS_10G
| SUPPORTED_TP
, "10GBASE-CX4"},
485 {t3_ael1006_phy_prep
, CAPS_10G
| SUPPORTED_FIBRE
,
487 {NULL
, CAPS_10G
| SUPPORTED_TP
, "10GBASE-CX4"},
493 #define VPD_ENTRY(name, len) \
494 u8 name##_kword[2]; u8 name##_len; u8 name##_data[len]
497 * Partial EEPROM Vital Product Data structure. Includes only the ID and
506 VPD_ENTRY(pn
, 16); /* part number */
507 VPD_ENTRY(ec
, 16); /* EC level */
508 VPD_ENTRY(sn
, SERNUM_LEN
); /* serial number */
509 VPD_ENTRY(na
, 12); /* MAC address base */
510 VPD_ENTRY(cclk
, 6); /* core clock */
511 VPD_ENTRY(mclk
, 6); /* mem clock */
512 VPD_ENTRY(uclk
, 6); /* uP clk */
513 VPD_ENTRY(mdc
, 6); /* MDIO clk */
514 VPD_ENTRY(mt
, 2); /* mem timing */
515 VPD_ENTRY(xaui0cfg
, 6); /* XAUI0 config */
516 VPD_ENTRY(xaui1cfg
, 6); /* XAUI1 config */
517 VPD_ENTRY(port0
, 2); /* PHY0 complex */
518 VPD_ENTRY(port1
, 2); /* PHY1 complex */
519 VPD_ENTRY(port2
, 2); /* PHY2 complex */
520 VPD_ENTRY(port3
, 2); /* PHY3 complex */
521 VPD_ENTRY(rv
, 1); /* csum */
522 u32 pad
; /* for multiple-of-4 sizing and alignment */
525 #define EEPROM_MAX_POLL 4
526 #define EEPROM_STAT_ADDR 0x4000
527 #define VPD_BASE 0xc00
530 * t3_seeprom_read - read a VPD EEPROM location
531 * @adapter: adapter to read
532 * @addr: EEPROM address
533 * @data: where to store the read data
535 * Read a 32-bit word from a location in VPD EEPROM using the card's PCI
536 * VPD ROM capability. A zero is written to the flag bit when the
537 * addres is written to the control register. The hardware device will
538 * set the flag to 1 when 4 bytes have been read into the data register.
540 int t3_seeprom_read(struct adapter
*adapter
, u32 addr
, u32
*data
)
543 int attempts
= EEPROM_MAX_POLL
;
544 unsigned int base
= adapter
->params
.pci
.vpd_cap_addr
;
546 if ((addr
>= EEPROMSIZE
&& addr
!= EEPROM_STAT_ADDR
) || (addr
& 3))
549 pci_write_config_word(adapter
->pdev
, base
+ PCI_VPD_ADDR
, addr
);
552 pci_read_config_word(adapter
->pdev
, base
+ PCI_VPD_ADDR
, &val
);
553 } while (!(val
& PCI_VPD_ADDR_F
) && --attempts
);
555 if (!(val
& PCI_VPD_ADDR_F
)) {
556 CH_ERR(adapter
, "reading EEPROM address 0x%x failed\n", addr
);
559 pci_read_config_dword(adapter
->pdev
, base
+ PCI_VPD_DATA
, data
);
560 *data
= le32_to_cpu(*data
);
565 * t3_seeprom_write - write a VPD EEPROM location
566 * @adapter: adapter to write
567 * @addr: EEPROM address
568 * @data: value to write
570 * Write a 32-bit word to a location in VPD EEPROM using the card's PCI
571 * VPD ROM capability.
573 int t3_seeprom_write(struct adapter
*adapter
, u32 addr
, u32 data
)
576 int attempts
= EEPROM_MAX_POLL
;
577 unsigned int base
= adapter
->params
.pci
.vpd_cap_addr
;
579 if ((addr
>= EEPROMSIZE
&& addr
!= EEPROM_STAT_ADDR
) || (addr
& 3))
582 pci_write_config_dword(adapter
->pdev
, base
+ PCI_VPD_DATA
,
584 pci_write_config_word(adapter
->pdev
,base
+ PCI_VPD_ADDR
,
585 addr
| PCI_VPD_ADDR_F
);
588 pci_read_config_word(adapter
->pdev
, base
+ PCI_VPD_ADDR
, &val
);
589 } while ((val
& PCI_VPD_ADDR_F
) && --attempts
);
591 if (val
& PCI_VPD_ADDR_F
) {
592 CH_ERR(adapter
, "write to EEPROM address 0x%x failed\n", addr
);
599 * t3_seeprom_wp - enable/disable EEPROM write protection
600 * @adapter: the adapter
601 * @enable: 1 to enable write protection, 0 to disable it
603 * Enables or disables write protection on the serial EEPROM.
605 int t3_seeprom_wp(struct adapter
*adapter
, int enable
)
607 return t3_seeprom_write(adapter
, EEPROM_STAT_ADDR
, enable
? 0xc : 0);
611 * Convert a character holding a hex digit to a number.
613 static unsigned int hex2int(unsigned char c
)
615 return isdigit(c
) ? c
- '0' : toupper(c
) - 'A' + 10;
619 * get_vpd_params - read VPD parameters from VPD EEPROM
620 * @adapter: adapter to read
621 * @p: where to store the parameters
623 * Reads card parameters stored in VPD EEPROM.
625 static int get_vpd_params(struct adapter
*adapter
, struct vpd_params
*p
)
631 * Card information is normally at VPD_BASE but some early cards had
634 ret
= t3_seeprom_read(adapter
, VPD_BASE
, (u32
*)&vpd
);
637 addr
= vpd
.id_tag
== 0x82 ? VPD_BASE
: 0;
639 for (i
= 0; i
< sizeof(vpd
); i
+= 4) {
640 ret
= t3_seeprom_read(adapter
, addr
+ i
,
641 (u32
*)((u8
*)&vpd
+ i
));
646 p
->cclk
= simple_strtoul(vpd
.cclk_data
, NULL
, 10);
647 p
->mclk
= simple_strtoul(vpd
.mclk_data
, NULL
, 10);
648 p
->uclk
= simple_strtoul(vpd
.uclk_data
, NULL
, 10);
649 p
->mdc
= simple_strtoul(vpd
.mdc_data
, NULL
, 10);
650 p
->mem_timing
= simple_strtoul(vpd
.mt_data
, NULL
, 10);
651 memcpy(p
->sn
, vpd
.sn_data
, SERNUM_LEN
);
653 /* Old eeproms didn't have port information */
654 if (adapter
->params
.rev
== 0 && !vpd
.port0_data
[0]) {
655 p
->port_type
[0] = uses_xaui(adapter
) ? 1 : 2;
656 p
->port_type
[1] = uses_xaui(adapter
) ? 6 : 2;
658 p
->port_type
[0] = hex2int(vpd
.port0_data
[0]);
659 p
->port_type
[1] = hex2int(vpd
.port1_data
[0]);
660 p
->xauicfg
[0] = simple_strtoul(vpd
.xaui0cfg_data
, NULL
, 16);
661 p
->xauicfg
[1] = simple_strtoul(vpd
.xaui1cfg_data
, NULL
, 16);
664 for (i
= 0; i
< 6; i
++)
665 p
->eth_base
[i
] = hex2int(vpd
.na_data
[2 * i
]) * 16 +
666 hex2int(vpd
.na_data
[2 * i
+ 1]);
670 /* serial flash and firmware constants */
672 SF_ATTEMPTS
= 5, /* max retries for SF1 operations */
673 SF_SEC_SIZE
= 64 * 1024, /* serial flash sector size */
674 SF_SIZE
= SF_SEC_SIZE
* 8, /* serial flash size */
676 /* flash command opcodes */
677 SF_PROG_PAGE
= 2, /* program page */
678 SF_WR_DISABLE
= 4, /* disable writes */
679 SF_RD_STATUS
= 5, /* read status register */
680 SF_WR_ENABLE
= 6, /* enable writes */
681 SF_RD_DATA_FAST
= 0xb, /* read flash */
682 SF_ERASE_SECTOR
= 0xd8, /* erase sector */
684 FW_FLASH_BOOT_ADDR
= 0x70000, /* start address of FW in flash */
685 FW_VERS_ADDR
= 0x77ffc, /* flash address holding FW version */
686 FW_MIN_SIZE
= 8 /* at least version and csum */
690 * sf1_read - read data from the serial flash
691 * @adapter: the adapter
692 * @byte_cnt: number of bytes to read
693 * @cont: whether another operation will be chained
694 * @valp: where to store the read data
696 * Reads up to 4 bytes of data from the serial flash. The location of
697 * the read needs to be specified prior to calling this by issuing the
698 * appropriate commands to the serial flash.
700 static int sf1_read(struct adapter
*adapter
, unsigned int byte_cnt
, int cont
,
705 if (!byte_cnt
|| byte_cnt
> 4)
707 if (t3_read_reg(adapter
, A_SF_OP
) & F_BUSY
)
709 t3_write_reg(adapter
, A_SF_OP
, V_CONT(cont
) | V_BYTECNT(byte_cnt
- 1));
710 ret
= t3_wait_op_done(adapter
, A_SF_OP
, F_BUSY
, 0, SF_ATTEMPTS
, 10);
712 *valp
= t3_read_reg(adapter
, A_SF_DATA
);
717 * sf1_write - write data to the serial flash
718 * @adapter: the adapter
719 * @byte_cnt: number of bytes to write
720 * @cont: whether another operation will be chained
721 * @val: value to write
723 * Writes up to 4 bytes of data to the serial flash. The location of
724 * the write needs to be specified prior to calling this by issuing the
725 * appropriate commands to the serial flash.
727 static int sf1_write(struct adapter
*adapter
, unsigned int byte_cnt
, int cont
,
730 if (!byte_cnt
|| byte_cnt
> 4)
732 if (t3_read_reg(adapter
, A_SF_OP
) & F_BUSY
)
734 t3_write_reg(adapter
, A_SF_DATA
, val
);
735 t3_write_reg(adapter
, A_SF_OP
,
736 V_CONT(cont
) | V_BYTECNT(byte_cnt
- 1) | V_OP(1));
737 return t3_wait_op_done(adapter
, A_SF_OP
, F_BUSY
, 0, SF_ATTEMPTS
, 10);
741 * flash_wait_op - wait for a flash operation to complete
742 * @adapter: the adapter
743 * @attempts: max number of polls of the status register
744 * @delay: delay between polls in ms
746 * Wait for a flash operation to complete by polling the status register.
748 static int flash_wait_op(struct adapter
*adapter
, int attempts
, int delay
)
754 if ((ret
= sf1_write(adapter
, 1, 1, SF_RD_STATUS
)) != 0 ||
755 (ret
= sf1_read(adapter
, 1, 0, &status
)) != 0)
767 * t3_read_flash - read words from serial flash
768 * @adapter: the adapter
769 * @addr: the start address for the read
770 * @nwords: how many 32-bit words to read
771 * @data: where to store the read data
772 * @byte_oriented: whether to store data as bytes or as words
774 * Read the specified number of 32-bit words from the serial flash.
775 * If @byte_oriented is set the read data is stored as a byte array
776 * (i.e., big-endian), otherwise as 32-bit words in the platform's
779 int t3_read_flash(struct adapter
*adapter
, unsigned int addr
,
780 unsigned int nwords
, u32
*data
, int byte_oriented
)
784 if (addr
+ nwords
* sizeof(u32
) > SF_SIZE
|| (addr
& 3))
787 addr
= swab32(addr
) | SF_RD_DATA_FAST
;
789 if ((ret
= sf1_write(adapter
, 4, 1, addr
)) != 0 ||
790 (ret
= sf1_read(adapter
, 1, 1, data
)) != 0)
793 for (; nwords
; nwords
--, data
++) {
794 ret
= sf1_read(adapter
, 4, nwords
> 1, data
);
798 *data
= htonl(*data
);
804 * t3_write_flash - write up to a page of data to the serial flash
805 * @adapter: the adapter
806 * @addr: the start address to write
807 * @n: length of data to write
808 * @data: the data to write
810 * Writes up to a page of data (256 bytes) to the serial flash starting
811 * at the given address.
813 static int t3_write_flash(struct adapter
*adapter
, unsigned int addr
,
814 unsigned int n
, const u8
*data
)
818 unsigned int i
, c
, left
, val
, offset
= addr
& 0xff;
820 if (addr
+ n
> SF_SIZE
|| offset
+ n
> 256)
823 val
= swab32(addr
) | SF_PROG_PAGE
;
825 if ((ret
= sf1_write(adapter
, 1, 0, SF_WR_ENABLE
)) != 0 ||
826 (ret
= sf1_write(adapter
, 4, 1, val
)) != 0)
829 for (left
= n
; left
; left
-= c
) {
831 for (val
= 0, i
= 0; i
< c
; ++i
)
832 val
= (val
<< 8) + *data
++;
834 ret
= sf1_write(adapter
, c
, c
!= left
, val
);
838 if ((ret
= flash_wait_op(adapter
, 5, 1)) != 0)
841 /* Read the page to verify the write succeeded */
842 ret
= t3_read_flash(adapter
, addr
& ~0xff, ARRAY_SIZE(buf
), buf
, 1);
846 if (memcmp(data
- n
, (u8
*) buf
+ offset
, n
))
852 * t3_get_tp_version - read the tp sram version
853 * @adapter: the adapter
854 * @vers: where to place the version
856 * Reads the protocol sram version from sram.
858 int t3_get_tp_version(struct adapter
*adapter
, u32
*vers
)
862 /* Get version loaded in SRAM */
863 t3_write_reg(adapter
, A_TP_EMBED_OP_FIELD0
, 0);
864 ret
= t3_wait_op_done(adapter
, A_TP_EMBED_OP_FIELD0
,
869 *vers
= t3_read_reg(adapter
, A_TP_EMBED_OP_FIELD1
);
875 * t3_check_tpsram_version - read the tp sram version
876 * @adapter: the adapter
877 * @must_load: set to 1 if loading a new microcode image is required
879 * Reads the protocol sram version from flash.
881 int t3_check_tpsram_version(struct adapter
*adapter
, int *must_load
)
885 unsigned int major
, minor
;
887 if (adapter
->params
.rev
== T3_REV_A
)
892 ret
= t3_get_tp_version(adapter
, &vers
);
896 major
= G_TP_VERSION_MAJOR(vers
);
897 minor
= G_TP_VERSION_MINOR(vers
);
899 if (major
== TP_VERSION_MAJOR
&& minor
== TP_VERSION_MINOR
)
902 if (major
!= TP_VERSION_MAJOR
)
903 CH_ERR(adapter
, "found wrong TP version (%u.%u), "
904 "driver needs version %d.%d\n", major
, minor
,
905 TP_VERSION_MAJOR
, TP_VERSION_MINOR
);
908 CH_ERR(adapter
, "found wrong TP version (%u.%u), "
909 "driver compiled for version %d.%d\n", major
, minor
,
910 TP_VERSION_MAJOR
, TP_VERSION_MINOR
);
916 * t3_check_tpsram - check if provided protocol SRAM
917 * is compatible with this driver
918 * @adapter: the adapter
919 * @tp_sram: the firmware image to write
922 * Checks if an adapter's tp sram is compatible with the driver.
923 * Returns 0 if the versions are compatible, a negative error otherwise.
925 int t3_check_tpsram(struct adapter
*adapter
, u8
*tp_sram
, unsigned int size
)
929 const u32
*p
= (const u32
*)tp_sram
;
931 /* Verify checksum */
932 for (csum
= 0, i
= 0; i
< size
/ sizeof(csum
); i
++)
934 if (csum
!= 0xffffffff) {
935 CH_ERR(adapter
, "corrupted protocol SRAM image, checksum %u\n",
943 enum fw_version_type
{
949 * t3_get_fw_version - read the firmware version
950 * @adapter: the adapter
951 * @vers: where to place the version
953 * Reads the FW version from flash.
955 int t3_get_fw_version(struct adapter
*adapter
, u32
*vers
)
957 return t3_read_flash(adapter
, FW_VERS_ADDR
, 1, vers
, 0);
961 * t3_check_fw_version - check if the FW is compatible with this driver
962 * @adapter: the adapter
963 * @must_load: set to 1 if loading a new FW image is required
965 * Checks if an adapter's FW is compatible with the driver. Returns 0
966 * if the versions are compatible, a negative error otherwise.
968 int t3_check_fw_version(struct adapter
*adapter
, int *must_load
)
972 unsigned int type
, major
, minor
;
975 ret
= t3_get_fw_version(adapter
, &vers
);
979 type
= G_FW_VERSION_TYPE(vers
);
980 major
= G_FW_VERSION_MAJOR(vers
);
981 minor
= G_FW_VERSION_MINOR(vers
);
983 if (type
== FW_VERSION_T3
&& major
== FW_VERSION_MAJOR
&&
984 minor
== FW_VERSION_MINOR
)
987 if (major
!= FW_VERSION_MAJOR
)
988 CH_ERR(adapter
, "found wrong FW version(%u.%u), "
989 "driver needs version %u.%u\n", major
, minor
,
990 FW_VERSION_MAJOR
, FW_VERSION_MINOR
);
993 CH_WARN(adapter
, "found wrong FW minor version(%u.%u), "
994 "driver compiled for version %u.%u\n", major
, minor
,
995 FW_VERSION_MAJOR
, FW_VERSION_MINOR
);
1002 * t3_flash_erase_sectors - erase a range of flash sectors
1003 * @adapter: the adapter
1004 * @start: the first sector to erase
1005 * @end: the last sector to erase
1007 * Erases the sectors in the given range.
1009 static int t3_flash_erase_sectors(struct adapter
*adapter
, int start
, int end
)
1011 while (start
<= end
) {
1014 if ((ret
= sf1_write(adapter
, 1, 0, SF_WR_ENABLE
)) != 0 ||
1015 (ret
= sf1_write(adapter
, 4, 0,
1016 SF_ERASE_SECTOR
| (start
<< 8))) != 0 ||
1017 (ret
= flash_wait_op(adapter
, 5, 500)) != 0)
1025 * t3_load_fw - download firmware
1026 * @adapter: the adapter
1027 * @fw_data: the firmware image to write
1030 * Write the supplied firmware image to the card's serial flash.
1031 * The FW image has the following sections: @size - 8 bytes of code and
1032 * data, followed by 4 bytes of FW version, followed by the 32-bit
1033 * 1's complement checksum of the whole image.
1035 int t3_load_fw(struct adapter
*adapter
, const u8
*fw_data
, unsigned int size
)
1039 const u32
*p
= (const u32
*)fw_data
;
1040 int ret
, addr
, fw_sector
= FW_FLASH_BOOT_ADDR
>> 16;
1042 if ((size
& 3) || size
< FW_MIN_SIZE
)
1044 if (size
> FW_VERS_ADDR
+ 8 - FW_FLASH_BOOT_ADDR
)
1047 for (csum
= 0, i
= 0; i
< size
/ sizeof(csum
); i
++)
1048 csum
+= ntohl(p
[i
]);
1049 if (csum
!= 0xffffffff) {
1050 CH_ERR(adapter
, "corrupted firmware image, checksum %u\n",
1055 ret
= t3_flash_erase_sectors(adapter
, fw_sector
, fw_sector
);
1059 size
-= 8; /* trim off version and checksum */
1060 for (addr
= FW_FLASH_BOOT_ADDR
; size
;) {
1061 unsigned int chunk_size
= min(size
, 256U);
1063 ret
= t3_write_flash(adapter
, addr
, chunk_size
, fw_data
);
1068 fw_data
+= chunk_size
;
1072 ret
= t3_write_flash(adapter
, FW_VERS_ADDR
, 4, fw_data
);
1075 CH_ERR(adapter
, "firmware download failed, error %d\n", ret
);
1079 #define CIM_CTL_BASE 0x2000
1082 * t3_cim_ctl_blk_read - read a block from CIM control region
1084 * @adap: the adapter
1085 * @addr: the start address within the CIM control region
1086 * @n: number of words to read
1087 * @valp: where to store the result
1089 * Reads a block of 4-byte words from the CIM control region.
1091 int t3_cim_ctl_blk_read(struct adapter
*adap
, unsigned int addr
,
1092 unsigned int n
, unsigned int *valp
)
1096 if (t3_read_reg(adap
, A_CIM_HOST_ACC_CTRL
) & F_HOSTBUSY
)
1099 for ( ; !ret
&& n
--; addr
+= 4) {
1100 t3_write_reg(adap
, A_CIM_HOST_ACC_CTRL
, CIM_CTL_BASE
+ addr
);
1101 ret
= t3_wait_op_done(adap
, A_CIM_HOST_ACC_CTRL
, F_HOSTBUSY
,
1104 *valp
++ = t3_read_reg(adap
, A_CIM_HOST_ACC_DATA
);
1111 * t3_link_changed - handle interface link changes
1112 * @adapter: the adapter
1113 * @port_id: the port index that changed link state
1115 * Called when a port's link settings change to propagate the new values
1116 * to the associated PHY and MAC. After performing the common tasks it
1117 * invokes an OS-specific handler.
1119 void t3_link_changed(struct adapter
*adapter
, int port_id
)
1121 int link_ok
, speed
, duplex
, fc
;
1122 struct port_info
*pi
= adap2pinfo(adapter
, port_id
);
1123 struct cphy
*phy
= &pi
->phy
;
1124 struct cmac
*mac
= &pi
->mac
;
1125 struct link_config
*lc
= &pi
->link_config
;
1127 phy
->ops
->get_link_status(phy
, &link_ok
, &speed
, &duplex
, &fc
);
1129 if (link_ok
!= lc
->link_ok
&& adapter
->params
.rev
> 0 &&
1130 uses_xaui(adapter
)) {
1133 t3_write_reg(adapter
, A_XGM_XAUI_ACT_CTRL
+ mac
->offset
,
1134 link_ok
? F_TXACTENABLE
| F_RXEN
: 0);
1136 lc
->link_ok
= link_ok
;
1137 lc
->speed
= speed
< 0 ? SPEED_INVALID
: speed
;
1138 lc
->duplex
= duplex
< 0 ? DUPLEX_INVALID
: duplex
;
1139 if (lc
->requested_fc
& PAUSE_AUTONEG
)
1140 fc
&= lc
->requested_fc
;
1142 fc
= lc
->requested_fc
& (PAUSE_RX
| PAUSE_TX
);
1144 if (link_ok
&& speed
>= 0 && lc
->autoneg
== AUTONEG_ENABLE
) {
1145 /* Set MAC speed, duplex, and flow control to match PHY. */
1146 t3_mac_set_speed_duplex_fc(mac
, speed
, duplex
, fc
);
1150 t3_os_link_changed(adapter
, port_id
, link_ok
, speed
, duplex
, fc
);
1154 * t3_link_start - apply link configuration to MAC/PHY
1155 * @phy: the PHY to setup
1156 * @mac: the MAC to setup
1157 * @lc: the requested link configuration
1159 * Set up a port's MAC and PHY according to a desired link configuration.
1160 * - If the PHY can auto-negotiate first decide what to advertise, then
1161 * enable/disable auto-negotiation as desired, and reset.
1162 * - If the PHY does not auto-negotiate just reset it.
1163 * - If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
1164 * otherwise do it later based on the outcome of auto-negotiation.
1166 int t3_link_start(struct cphy
*phy
, struct cmac
*mac
, struct link_config
*lc
)
1168 unsigned int fc
= lc
->requested_fc
& (PAUSE_RX
| PAUSE_TX
);
1171 if (lc
->supported
& SUPPORTED_Autoneg
) {
1172 lc
->advertising
&= ~(ADVERTISED_Asym_Pause
| ADVERTISED_Pause
);
1174 lc
->advertising
|= ADVERTISED_Asym_Pause
;
1176 lc
->advertising
|= ADVERTISED_Pause
;
1178 phy
->ops
->advertise(phy
, lc
->advertising
);
1180 if (lc
->autoneg
== AUTONEG_DISABLE
) {
1181 lc
->speed
= lc
->requested_speed
;
1182 lc
->duplex
= lc
->requested_duplex
;
1183 lc
->fc
= (unsigned char)fc
;
1184 t3_mac_set_speed_duplex_fc(mac
, lc
->speed
, lc
->duplex
,
1186 /* Also disables autoneg */
1187 phy
->ops
->set_speed_duplex(phy
, lc
->speed
, lc
->duplex
);
1188 phy
->ops
->reset(phy
, 0);
1190 phy
->ops
->autoneg_enable(phy
);
1192 t3_mac_set_speed_duplex_fc(mac
, -1, -1, fc
);
1193 lc
->fc
= (unsigned char)fc
;
1194 phy
->ops
->reset(phy
, 0);
1200 * t3_set_vlan_accel - control HW VLAN extraction
1201 * @adapter: the adapter
1202 * @ports: bitmap of adapter ports to operate on
1203 * @on: enable (1) or disable (0) HW VLAN extraction
1205 * Enables or disables HW extraction of VLAN tags for the given port.
1207 void t3_set_vlan_accel(struct adapter
*adapter
, unsigned int ports
, int on
)
1209 t3_set_reg_field(adapter
, A_TP_OUT_CONFIG
,
1210 ports
<< S_VLANEXTRACTIONENABLE
,
1211 on
? (ports
<< S_VLANEXTRACTIONENABLE
) : 0);
1215 unsigned int mask
; /* bits to check in interrupt status */
1216 const char *msg
; /* message to print or NULL */
1217 short stat_idx
; /* stat counter to increment or -1 */
1218 unsigned short fatal
:1; /* whether the condition reported is fatal */
1222 * t3_handle_intr_status - table driven interrupt handler
1223 * @adapter: the adapter that generated the interrupt
1224 * @reg: the interrupt status register to process
1225 * @mask: a mask to apply to the interrupt status
1226 * @acts: table of interrupt actions
1227 * @stats: statistics counters tracking interrupt occurences
1229 * A table driven interrupt handler that applies a set of masks to an
1230 * interrupt status word and performs the corresponding actions if the
1231 * interrupts described by the mask have occured. The actions include
1232 * optionally printing a warning or alert message, and optionally
1233 * incrementing a stat counter. The table is terminated by an entry
1234 * specifying mask 0. Returns the number of fatal interrupt conditions.
1236 static int t3_handle_intr_status(struct adapter
*adapter
, unsigned int reg
,
1238 const struct intr_info
*acts
,
1239 unsigned long *stats
)
1242 unsigned int status
= t3_read_reg(adapter
, reg
) & mask
;
1244 for (; acts
->mask
; ++acts
) {
1245 if (!(status
& acts
->mask
))
1249 CH_ALERT(adapter
, "%s (0x%x)\n",
1250 acts
->msg
, status
& acts
->mask
);
1251 } else if (acts
->msg
)
1252 CH_WARN(adapter
, "%s (0x%x)\n",
1253 acts
->msg
, status
& acts
->mask
);
1254 if (acts
->stat_idx
>= 0)
1255 stats
[acts
->stat_idx
]++;
1257 if (status
) /* clear processed interrupts */
1258 t3_write_reg(adapter
, reg
, status
);
1262 #define SGE_INTR_MASK (F_RSPQDISABLED)
1263 #define MC5_INTR_MASK (F_PARITYERR | F_ACTRGNFULL | F_UNKNOWNCMD | \
1264 F_REQQPARERR | F_DISPQPARERR | F_DELACTEMPTY | \
1266 #define MC7_INTR_MASK (F_AE | F_UE | F_CE | V_PE(M_PE))
1267 #define XGM_INTR_MASK (V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR) | \
1268 V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR) | \
1269 F_TXFIFO_UNDERRUN | F_RXFIFO_OVERFLOW)
1270 #define PCIX_INTR_MASK (F_MSTDETPARERR | F_SIGTARABT | F_RCVTARABT | \
1271 F_RCVMSTABT | F_SIGSYSERR | F_DETPARERR | \
1272 F_SPLCMPDIS | F_UNXSPLCMP | F_RCVSPLCMPERR | \
1273 F_DETCORECCERR | F_DETUNCECCERR | F_PIOPARERR | \
1274 V_WFPARERR(M_WFPARERR) | V_RFPARERR(M_RFPARERR) | \
1275 V_CFPARERR(M_CFPARERR) /* | V_MSIXPARERR(M_MSIXPARERR) */)
1276 #define PCIE_INTR_MASK (F_UNXSPLCPLERRR | F_UNXSPLCPLERRC | F_PCIE_PIOPARERR |\
1277 F_PCIE_WFPARERR | F_PCIE_RFPARERR | F_PCIE_CFPARERR | \
1278 /* V_PCIE_MSIXPARERR(M_PCIE_MSIXPARERR) | */ \
1279 V_BISTERR(M_BISTERR) | F_PEXERR)
1280 #define ULPRX_INTR_MASK F_PARERR
1281 #define ULPTX_INTR_MASK 0
1282 #define CPLSW_INTR_MASK (F_TP_FRAMING_ERROR | \
1283 F_SGE_FRAMING_ERROR | F_CIM_FRAMING_ERROR | \
1284 F_ZERO_SWITCH_ERROR)
1285 #define CIM_INTR_MASK (F_BLKWRPLINT | F_BLKRDPLINT | F_BLKWRCTLINT | \
1286 F_BLKRDCTLINT | F_BLKWRFLASHINT | F_BLKRDFLASHINT | \
1287 F_SGLWRFLASHINT | F_WRBLKFLASHINT | F_BLKWRBOOTINT | \
1288 F_FLASHRANGEINT | F_SDRAMRANGEINT | F_RSVDSPACEINT)
1289 #define PMTX_INTR_MASK (F_ZERO_C_CMD_ERROR | ICSPI_FRM_ERR | OESPI_FRM_ERR | \
1290 V_ICSPI_PAR_ERROR(M_ICSPI_PAR_ERROR) | \
1291 V_OESPI_PAR_ERROR(M_OESPI_PAR_ERROR))
1292 #define PMRX_INTR_MASK (F_ZERO_E_CMD_ERROR | IESPI_FRM_ERR | OCSPI_FRM_ERR | \
1293 V_IESPI_PAR_ERROR(M_IESPI_PAR_ERROR) | \
1294 V_OCSPI_PAR_ERROR(M_OCSPI_PAR_ERROR))
1295 #define MPS_INTR_MASK (V_TX0TPPARERRENB(M_TX0TPPARERRENB) | \
1296 V_TX1TPPARERRENB(M_TX1TPPARERRENB) | \
1297 V_RXTPPARERRENB(M_RXTPPARERRENB) | \
1298 V_MCAPARERRENB(M_MCAPARERRENB))
1299 #define PL_INTR_MASK (F_T3DBG | F_XGMAC0_0 | F_XGMAC0_1 | F_MC5A | F_PM1_TX | \
1300 F_PM1_RX | F_ULP2_TX | F_ULP2_RX | F_TP1 | F_CIM | \
1301 F_MC7_CM | F_MC7_PMTX | F_MC7_PMRX | F_SGE3 | F_PCIM0 | \
1302 F_MPS0 | F_CPL_SWITCH)
1305 * Interrupt handler for the PCIX1 module.
1307 static void pci_intr_handler(struct adapter
*adapter
)
1309 static const struct intr_info pcix1_intr_info
[] = {
1310 {F_MSTDETPARERR
, "PCI master detected parity error", -1, 1},
1311 {F_SIGTARABT
, "PCI signaled target abort", -1, 1},
1312 {F_RCVTARABT
, "PCI received target abort", -1, 1},
1313 {F_RCVMSTABT
, "PCI received master abort", -1, 1},
1314 {F_SIGSYSERR
, "PCI signaled system error", -1, 1},
1315 {F_DETPARERR
, "PCI detected parity error", -1, 1},
1316 {F_SPLCMPDIS
, "PCI split completion discarded", -1, 1},
1317 {F_UNXSPLCMP
, "PCI unexpected split completion error", -1, 1},
1318 {F_RCVSPLCMPERR
, "PCI received split completion error", -1,
1320 {F_DETCORECCERR
, "PCI correctable ECC error",
1321 STAT_PCI_CORR_ECC
, 0},
1322 {F_DETUNCECCERR
, "PCI uncorrectable ECC error", -1, 1},
1323 {F_PIOPARERR
, "PCI PIO FIFO parity error", -1, 1},
1324 {V_WFPARERR(M_WFPARERR
), "PCI write FIFO parity error", -1,
1326 {V_RFPARERR(M_RFPARERR
), "PCI read FIFO parity error", -1,
1328 {V_CFPARERR(M_CFPARERR
), "PCI command FIFO parity error", -1,
1330 {V_MSIXPARERR(M_MSIXPARERR
), "PCI MSI-X table/PBA parity "
1335 if (t3_handle_intr_status(adapter
, A_PCIX_INT_CAUSE
, PCIX_INTR_MASK
,
1336 pcix1_intr_info
, adapter
->irq_stats
))
1337 t3_fatal_err(adapter
);
1341 * Interrupt handler for the PCIE module.
1343 static void pcie_intr_handler(struct adapter
*adapter
)
1345 static const struct intr_info pcie_intr_info
[] = {
1346 {F_PEXERR
, "PCI PEX error", -1, 1},
1348 "PCI unexpected split completion DMA read error", -1, 1},
1350 "PCI unexpected split completion DMA command error", -1, 1},
1351 {F_PCIE_PIOPARERR
, "PCI PIO FIFO parity error", -1, 1},
1352 {F_PCIE_WFPARERR
, "PCI write FIFO parity error", -1, 1},
1353 {F_PCIE_RFPARERR
, "PCI read FIFO parity error", -1, 1},
1354 {F_PCIE_CFPARERR
, "PCI command FIFO parity error", -1, 1},
1355 {V_PCIE_MSIXPARERR(M_PCIE_MSIXPARERR
),
1356 "PCI MSI-X table/PBA parity error", -1, 1},
1357 {V_BISTERR(M_BISTERR
), "PCI BIST error", -1, 1},
1361 if (t3_read_reg(adapter
, A_PCIE_INT_CAUSE
) & F_PEXERR
)
1362 CH_ALERT(adapter
, "PEX error code 0x%x\n",
1363 t3_read_reg(adapter
, A_PCIE_PEX_ERR
));
1365 if (t3_handle_intr_status(adapter
, A_PCIE_INT_CAUSE
, PCIE_INTR_MASK
,
1366 pcie_intr_info
, adapter
->irq_stats
))
1367 t3_fatal_err(adapter
);
1371 * TP interrupt handler.
1373 static void tp_intr_handler(struct adapter
*adapter
)
1375 static const struct intr_info tp_intr_info
[] = {
1376 {0xffffff, "TP parity error", -1, 1},
1377 {0x1000000, "TP out of Rx pages", -1, 1},
1378 {0x2000000, "TP out of Tx pages", -1, 1},
1382 if (t3_handle_intr_status(adapter
, A_TP_INT_CAUSE
, 0xffffffff,
1383 tp_intr_info
, NULL
))
1384 t3_fatal_err(adapter
);
1388 * CIM interrupt handler.
1390 static void cim_intr_handler(struct adapter
*adapter
)
1392 static const struct intr_info cim_intr_info
[] = {
1393 {F_RSVDSPACEINT
, "CIM reserved space write", -1, 1},
1394 {F_SDRAMRANGEINT
, "CIM SDRAM address out of range", -1, 1},
1395 {F_FLASHRANGEINT
, "CIM flash address out of range", -1, 1},
1396 {F_BLKWRBOOTINT
, "CIM block write to boot space", -1, 1},
1397 {F_WRBLKFLASHINT
, "CIM write to cached flash space", -1, 1},
1398 {F_SGLWRFLASHINT
, "CIM single write to flash space", -1, 1},
1399 {F_BLKRDFLASHINT
, "CIM block read from flash space", -1, 1},
1400 {F_BLKWRFLASHINT
, "CIM block write to flash space", -1, 1},
1401 {F_BLKRDCTLINT
, "CIM block read from CTL space", -1, 1},
1402 {F_BLKWRCTLINT
, "CIM block write to CTL space", -1, 1},
1403 {F_BLKRDPLINT
, "CIM block read from PL space", -1, 1},
1404 {F_BLKWRPLINT
, "CIM block write to PL space", -1, 1},
1408 if (t3_handle_intr_status(adapter
, A_CIM_HOST_INT_CAUSE
, 0xffffffff,
1409 cim_intr_info
, NULL
))
1410 t3_fatal_err(adapter
);
1414 * ULP RX interrupt handler.
1416 static void ulprx_intr_handler(struct adapter
*adapter
)
1418 static const struct intr_info ulprx_intr_info
[] = {
1419 {F_PARERR
, "ULP RX parity error", -1, 1},
1423 if (t3_handle_intr_status(adapter
, A_ULPRX_INT_CAUSE
, 0xffffffff,
1424 ulprx_intr_info
, NULL
))
1425 t3_fatal_err(adapter
);
1429 * ULP TX interrupt handler.
1431 static void ulptx_intr_handler(struct adapter
*adapter
)
1433 static const struct intr_info ulptx_intr_info
[] = {
1434 {F_PBL_BOUND_ERR_CH0
, "ULP TX channel 0 PBL out of bounds",
1435 STAT_ULP_CH0_PBL_OOB
, 0},
1436 {F_PBL_BOUND_ERR_CH1
, "ULP TX channel 1 PBL out of bounds",
1437 STAT_ULP_CH1_PBL_OOB
, 0},
1441 if (t3_handle_intr_status(adapter
, A_ULPTX_INT_CAUSE
, 0xffffffff,
1442 ulptx_intr_info
, adapter
->irq_stats
))
1443 t3_fatal_err(adapter
);
1446 #define ICSPI_FRM_ERR (F_ICSPI0_FIFO2X_RX_FRAMING_ERROR | \
1447 F_ICSPI1_FIFO2X_RX_FRAMING_ERROR | F_ICSPI0_RX_FRAMING_ERROR | \
1448 F_ICSPI1_RX_FRAMING_ERROR | F_ICSPI0_TX_FRAMING_ERROR | \
1449 F_ICSPI1_TX_FRAMING_ERROR)
1450 #define OESPI_FRM_ERR (F_OESPI0_RX_FRAMING_ERROR | \
1451 F_OESPI1_RX_FRAMING_ERROR | F_OESPI0_TX_FRAMING_ERROR | \
1452 F_OESPI1_TX_FRAMING_ERROR | F_OESPI0_OFIFO2X_TX_FRAMING_ERROR | \
1453 F_OESPI1_OFIFO2X_TX_FRAMING_ERROR)
1456 * PM TX interrupt handler.
1458 static void pmtx_intr_handler(struct adapter
*adapter
)
1460 static const struct intr_info pmtx_intr_info
[] = {
1461 {F_ZERO_C_CMD_ERROR
, "PMTX 0-length pcmd", -1, 1},
1462 {ICSPI_FRM_ERR
, "PMTX ispi framing error", -1, 1},
1463 {OESPI_FRM_ERR
, "PMTX ospi framing error", -1, 1},
1464 {V_ICSPI_PAR_ERROR(M_ICSPI_PAR_ERROR
),
1465 "PMTX ispi parity error", -1, 1},
1466 {V_OESPI_PAR_ERROR(M_OESPI_PAR_ERROR
),
1467 "PMTX ospi parity error", -1, 1},
1471 if (t3_handle_intr_status(adapter
, A_PM1_TX_INT_CAUSE
, 0xffffffff,
1472 pmtx_intr_info
, NULL
))
1473 t3_fatal_err(adapter
);
1476 #define IESPI_FRM_ERR (F_IESPI0_FIFO2X_RX_FRAMING_ERROR | \
1477 F_IESPI1_FIFO2X_RX_FRAMING_ERROR | F_IESPI0_RX_FRAMING_ERROR | \
1478 F_IESPI1_RX_FRAMING_ERROR | F_IESPI0_TX_FRAMING_ERROR | \
1479 F_IESPI1_TX_FRAMING_ERROR)
1480 #define OCSPI_FRM_ERR (F_OCSPI0_RX_FRAMING_ERROR | \
1481 F_OCSPI1_RX_FRAMING_ERROR | F_OCSPI0_TX_FRAMING_ERROR | \
1482 F_OCSPI1_TX_FRAMING_ERROR | F_OCSPI0_OFIFO2X_TX_FRAMING_ERROR | \
1483 F_OCSPI1_OFIFO2X_TX_FRAMING_ERROR)
1486 * PM RX interrupt handler.
1488 static void pmrx_intr_handler(struct adapter
*adapter
)
1490 static const struct intr_info pmrx_intr_info
[] = {
1491 {F_ZERO_E_CMD_ERROR
, "PMRX 0-length pcmd", -1, 1},
1492 {IESPI_FRM_ERR
, "PMRX ispi framing error", -1, 1},
1493 {OCSPI_FRM_ERR
, "PMRX ospi framing error", -1, 1},
1494 {V_IESPI_PAR_ERROR(M_IESPI_PAR_ERROR
),
1495 "PMRX ispi parity error", -1, 1},
1496 {V_OCSPI_PAR_ERROR(M_OCSPI_PAR_ERROR
),
1497 "PMRX ospi parity error", -1, 1},
1501 if (t3_handle_intr_status(adapter
, A_PM1_RX_INT_CAUSE
, 0xffffffff,
1502 pmrx_intr_info
, NULL
))
1503 t3_fatal_err(adapter
);
1507 * CPL switch interrupt handler.
1509 static void cplsw_intr_handler(struct adapter
*adapter
)
1511 static const struct intr_info cplsw_intr_info
[] = {
1512 /* { F_CIM_OVFL_ERROR, "CPL switch CIM overflow", -1, 1 }, */
1513 {F_TP_FRAMING_ERROR
, "CPL switch TP framing error", -1, 1},
1514 {F_SGE_FRAMING_ERROR
, "CPL switch SGE framing error", -1, 1},
1515 {F_CIM_FRAMING_ERROR
, "CPL switch CIM framing error", -1, 1},
1516 {F_ZERO_SWITCH_ERROR
, "CPL switch no-switch error", -1, 1},
1520 if (t3_handle_intr_status(adapter
, A_CPL_INTR_CAUSE
, 0xffffffff,
1521 cplsw_intr_info
, NULL
))
1522 t3_fatal_err(adapter
);
1526 * MPS interrupt handler.
1528 static void mps_intr_handler(struct adapter
*adapter
)
1530 static const struct intr_info mps_intr_info
[] = {
1531 {0x1ff, "MPS parity error", -1, 1},
1535 if (t3_handle_intr_status(adapter
, A_MPS_INT_CAUSE
, 0xffffffff,
1536 mps_intr_info
, NULL
))
1537 t3_fatal_err(adapter
);
1540 #define MC7_INTR_FATAL (F_UE | V_PE(M_PE) | F_AE)
1543 * MC7 interrupt handler.
1545 static void mc7_intr_handler(struct mc7
*mc7
)
1547 struct adapter
*adapter
= mc7
->adapter
;
1548 u32 cause
= t3_read_reg(adapter
, mc7
->offset
+ A_MC7_INT_CAUSE
);
1551 mc7
->stats
.corr_err
++;
1552 CH_WARN(adapter
, "%s MC7 correctable error at addr 0x%x, "
1553 "data 0x%x 0x%x 0x%x\n", mc7
->name
,
1554 t3_read_reg(adapter
, mc7
->offset
+ A_MC7_CE_ADDR
),
1555 t3_read_reg(adapter
, mc7
->offset
+ A_MC7_CE_DATA0
),
1556 t3_read_reg(adapter
, mc7
->offset
+ A_MC7_CE_DATA1
),
1557 t3_read_reg(adapter
, mc7
->offset
+ A_MC7_CE_DATA2
));
1561 mc7
->stats
.uncorr_err
++;
1562 CH_ALERT(adapter
, "%s MC7 uncorrectable error at addr 0x%x, "
1563 "data 0x%x 0x%x 0x%x\n", mc7
->name
,
1564 t3_read_reg(adapter
, mc7
->offset
+ A_MC7_UE_ADDR
),
1565 t3_read_reg(adapter
, mc7
->offset
+ A_MC7_UE_DATA0
),
1566 t3_read_reg(adapter
, mc7
->offset
+ A_MC7_UE_DATA1
),
1567 t3_read_reg(adapter
, mc7
->offset
+ A_MC7_UE_DATA2
));
1571 mc7
->stats
.parity_err
++;
1572 CH_ALERT(adapter
, "%s MC7 parity error 0x%x\n",
1573 mc7
->name
, G_PE(cause
));
1579 if (adapter
->params
.rev
> 0)
1580 addr
= t3_read_reg(adapter
,
1581 mc7
->offset
+ A_MC7_ERR_ADDR
);
1582 mc7
->stats
.addr_err
++;
1583 CH_ALERT(adapter
, "%s MC7 address error: 0x%x\n",
1587 if (cause
& MC7_INTR_FATAL
)
1588 t3_fatal_err(adapter
);
1590 t3_write_reg(adapter
, mc7
->offset
+ A_MC7_INT_CAUSE
, cause
);
1593 #define XGM_INTR_FATAL (V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR) | \
1594 V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR))
1596 * XGMAC interrupt handler.
1598 static int mac_intr_handler(struct adapter
*adap
, unsigned int idx
)
1600 struct cmac
*mac
= &adap2pinfo(adap
, idx
)->mac
;
1601 u32 cause
= t3_read_reg(adap
, A_XGM_INT_CAUSE
+ mac
->offset
);
1603 if (cause
& V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR
)) {
1604 mac
->stats
.tx_fifo_parity_err
++;
1605 CH_ALERT(adap
, "port%d: MAC TX FIFO parity error\n", idx
);
1607 if (cause
& V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR
)) {
1608 mac
->stats
.rx_fifo_parity_err
++;
1609 CH_ALERT(adap
, "port%d: MAC RX FIFO parity error\n", idx
);
1611 if (cause
& F_TXFIFO_UNDERRUN
)
1612 mac
->stats
.tx_fifo_urun
++;
1613 if (cause
& F_RXFIFO_OVERFLOW
)
1614 mac
->stats
.rx_fifo_ovfl
++;
1615 if (cause
& V_SERDES_LOS(M_SERDES_LOS
))
1616 mac
->stats
.serdes_signal_loss
++;
1617 if (cause
& F_XAUIPCSCTCERR
)
1618 mac
->stats
.xaui_pcs_ctc_err
++;
1619 if (cause
& F_XAUIPCSALIGNCHANGE
)
1620 mac
->stats
.xaui_pcs_align_change
++;
1622 t3_write_reg(adap
, A_XGM_INT_CAUSE
+ mac
->offset
, cause
);
1623 if (cause
& XGM_INTR_FATAL
)
1629 * Interrupt handler for PHY events.
1631 int t3_phy_intr_handler(struct adapter
*adapter
)
1633 u32 mask
, gpi
= adapter_info(adapter
)->gpio_intr
;
1634 u32 i
, cause
= t3_read_reg(adapter
, A_T3DBG_INT_CAUSE
);
1636 for_each_port(adapter
, i
) {
1637 struct port_info
*p
= adap2pinfo(adapter
, i
);
1639 mask
= gpi
- (gpi
& (gpi
- 1));
1642 if (!(p
->port_type
->caps
& SUPPORTED_IRQ
))
1646 int phy_cause
= p
->phy
.ops
->intr_handler(&p
->phy
);
1648 if (phy_cause
& cphy_cause_link_change
)
1649 t3_link_changed(adapter
, i
);
1650 if (phy_cause
& cphy_cause_fifo_error
)
1651 p
->phy
.fifo_errors
++;
1655 t3_write_reg(adapter
, A_T3DBG_INT_CAUSE
, cause
);
1660 * T3 slow path (non-data) interrupt handler.
1662 int t3_slow_intr_handler(struct adapter
*adapter
)
1664 u32 cause
= t3_read_reg(adapter
, A_PL_INT_CAUSE0
);
1666 cause
&= adapter
->slow_intr_mask
;
1669 if (cause
& F_PCIM0
) {
1670 if (is_pcie(adapter
))
1671 pcie_intr_handler(adapter
);
1673 pci_intr_handler(adapter
);
1676 t3_sge_err_intr_handler(adapter
);
1677 if (cause
& F_MC7_PMRX
)
1678 mc7_intr_handler(&adapter
->pmrx
);
1679 if (cause
& F_MC7_PMTX
)
1680 mc7_intr_handler(&adapter
->pmtx
);
1681 if (cause
& F_MC7_CM
)
1682 mc7_intr_handler(&adapter
->cm
);
1684 cim_intr_handler(adapter
);
1686 tp_intr_handler(adapter
);
1687 if (cause
& F_ULP2_RX
)
1688 ulprx_intr_handler(adapter
);
1689 if (cause
& F_ULP2_TX
)
1690 ulptx_intr_handler(adapter
);
1691 if (cause
& F_PM1_RX
)
1692 pmrx_intr_handler(adapter
);
1693 if (cause
& F_PM1_TX
)
1694 pmtx_intr_handler(adapter
);
1695 if (cause
& F_CPL_SWITCH
)
1696 cplsw_intr_handler(adapter
);
1698 mps_intr_handler(adapter
);
1700 t3_mc5_intr_handler(&adapter
->mc5
);
1701 if (cause
& F_XGMAC0_0
)
1702 mac_intr_handler(adapter
, 0);
1703 if (cause
& F_XGMAC0_1
)
1704 mac_intr_handler(adapter
, 1);
1705 if (cause
& F_T3DBG
)
1706 t3_os_ext_intr_handler(adapter
);
1708 /* Clear the interrupts just processed. */
1709 t3_write_reg(adapter
, A_PL_INT_CAUSE0
, cause
);
1710 t3_read_reg(adapter
, A_PL_INT_CAUSE0
); /* flush */
1715 * t3_intr_enable - enable interrupts
1716 * @adapter: the adapter whose interrupts should be enabled
1718 * Enable interrupts by setting the interrupt enable registers of the
1719 * various HW modules and then enabling the top-level interrupt
1722 void t3_intr_enable(struct adapter
*adapter
)
1724 static const struct addr_val_pair intr_en_avp
[] = {
1725 {A_SG_INT_ENABLE
, SGE_INTR_MASK
},
1726 {A_MC7_INT_ENABLE
, MC7_INTR_MASK
},
1727 {A_MC7_INT_ENABLE
- MC7_PMRX_BASE_ADDR
+ MC7_PMTX_BASE_ADDR
,
1729 {A_MC7_INT_ENABLE
- MC7_PMRX_BASE_ADDR
+ MC7_CM_BASE_ADDR
,
1731 {A_MC5_DB_INT_ENABLE
, MC5_INTR_MASK
},
1732 {A_ULPRX_INT_ENABLE
, ULPRX_INTR_MASK
},
1733 {A_TP_INT_ENABLE
, 0x3bfffff},
1734 {A_PM1_TX_INT_ENABLE
, PMTX_INTR_MASK
},
1735 {A_PM1_RX_INT_ENABLE
, PMRX_INTR_MASK
},
1736 {A_CIM_HOST_INT_ENABLE
, CIM_INTR_MASK
},
1737 {A_MPS_INT_ENABLE
, MPS_INTR_MASK
},
1740 adapter
->slow_intr_mask
= PL_INTR_MASK
;
1742 t3_write_regs(adapter
, intr_en_avp
, ARRAY_SIZE(intr_en_avp
), 0);
1744 if (adapter
->params
.rev
> 0) {
1745 t3_write_reg(adapter
, A_CPL_INTR_ENABLE
,
1746 CPLSW_INTR_MASK
| F_CIM_OVFL_ERROR
);
1747 t3_write_reg(adapter
, A_ULPTX_INT_ENABLE
,
1748 ULPTX_INTR_MASK
| F_PBL_BOUND_ERR_CH0
|
1749 F_PBL_BOUND_ERR_CH1
);
1751 t3_write_reg(adapter
, A_CPL_INTR_ENABLE
, CPLSW_INTR_MASK
);
1752 t3_write_reg(adapter
, A_ULPTX_INT_ENABLE
, ULPTX_INTR_MASK
);
1755 t3_write_reg(adapter
, A_T3DBG_GPIO_ACT_LOW
,
1756 adapter_info(adapter
)->gpio_intr
);
1757 t3_write_reg(adapter
, A_T3DBG_INT_ENABLE
,
1758 adapter_info(adapter
)->gpio_intr
);
1759 if (is_pcie(adapter
))
1760 t3_write_reg(adapter
, A_PCIE_INT_ENABLE
, PCIE_INTR_MASK
);
1762 t3_write_reg(adapter
, A_PCIX_INT_ENABLE
, PCIX_INTR_MASK
);
1763 t3_write_reg(adapter
, A_PL_INT_ENABLE0
, adapter
->slow_intr_mask
);
1764 t3_read_reg(adapter
, A_PL_INT_ENABLE0
); /* flush */
1768 * t3_intr_disable - disable a card's interrupts
1769 * @adapter: the adapter whose interrupts should be disabled
1771 * Disable interrupts. We only disable the top-level interrupt
1772 * concentrator and the SGE data interrupts.
1774 void t3_intr_disable(struct adapter
*adapter
)
1776 t3_write_reg(adapter
, A_PL_INT_ENABLE0
, 0);
1777 t3_read_reg(adapter
, A_PL_INT_ENABLE0
); /* flush */
1778 adapter
->slow_intr_mask
= 0;
1782 * t3_intr_clear - clear all interrupts
1783 * @adapter: the adapter whose interrupts should be cleared
1785 * Clears all interrupts.
1787 void t3_intr_clear(struct adapter
*adapter
)
1789 static const unsigned int cause_reg_addr
[] = {
1791 A_SG_RSPQ_FL_STATUS
,
1794 A_MC7_INT_CAUSE
- MC7_PMRX_BASE_ADDR
+ MC7_PMTX_BASE_ADDR
,
1795 A_MC7_INT_CAUSE
- MC7_PMRX_BASE_ADDR
+ MC7_CM_BASE_ADDR
,
1796 A_CIM_HOST_INT_CAUSE
,
1809 /* Clear PHY and MAC interrupts for each port. */
1810 for_each_port(adapter
, i
)
1811 t3_port_intr_clear(adapter
, i
);
1813 for (i
= 0; i
< ARRAY_SIZE(cause_reg_addr
); ++i
)
1814 t3_write_reg(adapter
, cause_reg_addr
[i
], 0xffffffff);
1816 if (is_pcie(adapter
))
1817 t3_write_reg(adapter
, A_PCIE_PEX_ERR
, 0xffffffff);
1818 t3_write_reg(adapter
, A_PL_INT_CAUSE0
, 0xffffffff);
1819 t3_read_reg(adapter
, A_PL_INT_CAUSE0
); /* flush */
1823 * t3_port_intr_enable - enable port-specific interrupts
1824 * @adapter: associated adapter
1825 * @idx: index of port whose interrupts should be enabled
1827 * Enable port-specific (i.e., MAC and PHY) interrupts for the given
1830 void t3_port_intr_enable(struct adapter
*adapter
, int idx
)
1832 struct cphy
*phy
= &adap2pinfo(adapter
, idx
)->phy
;
1834 t3_write_reg(adapter
, XGM_REG(A_XGM_INT_ENABLE
, idx
), XGM_INTR_MASK
);
1835 t3_read_reg(adapter
, XGM_REG(A_XGM_INT_ENABLE
, idx
)); /* flush */
1836 phy
->ops
->intr_enable(phy
);
1840 * t3_port_intr_disable - disable port-specific interrupts
1841 * @adapter: associated adapter
1842 * @idx: index of port whose interrupts should be disabled
1844 * Disable port-specific (i.e., MAC and PHY) interrupts for the given
1847 void t3_port_intr_disable(struct adapter
*adapter
, int idx
)
1849 struct cphy
*phy
= &adap2pinfo(adapter
, idx
)->phy
;
1851 t3_write_reg(adapter
, XGM_REG(A_XGM_INT_ENABLE
, idx
), 0);
1852 t3_read_reg(adapter
, XGM_REG(A_XGM_INT_ENABLE
, idx
)); /* flush */
1853 phy
->ops
->intr_disable(phy
);
1857 * t3_port_intr_clear - clear port-specific interrupts
1858 * @adapter: associated adapter
1859 * @idx: index of port whose interrupts to clear
1861 * Clear port-specific (i.e., MAC and PHY) interrupts for the given
1864 void t3_port_intr_clear(struct adapter
*adapter
, int idx
)
1866 struct cphy
*phy
= &adap2pinfo(adapter
, idx
)->phy
;
1868 t3_write_reg(adapter
, XGM_REG(A_XGM_INT_CAUSE
, idx
), 0xffffffff);
1869 t3_read_reg(adapter
, XGM_REG(A_XGM_INT_CAUSE
, idx
)); /* flush */
1870 phy
->ops
->intr_clear(phy
);
1873 #define SG_CONTEXT_CMD_ATTEMPTS 100
1876 * t3_sge_write_context - write an SGE context
1877 * @adapter: the adapter
1878 * @id: the context id
1879 * @type: the context type
1881 * Program an SGE context with the values already loaded in the
1882 * CONTEXT_DATA? registers.
1884 static int t3_sge_write_context(struct adapter
*adapter
, unsigned int id
,
1887 t3_write_reg(adapter
, A_SG_CONTEXT_MASK0
, 0xffffffff);
1888 t3_write_reg(adapter
, A_SG_CONTEXT_MASK1
, 0xffffffff);
1889 t3_write_reg(adapter
, A_SG_CONTEXT_MASK2
, 0xffffffff);
1890 t3_write_reg(adapter
, A_SG_CONTEXT_MASK3
, 0xffffffff);
1891 t3_write_reg(adapter
, A_SG_CONTEXT_CMD
,
1892 V_CONTEXT_CMD_OPCODE(1) | type
| V_CONTEXT(id
));
1893 return t3_wait_op_done(adapter
, A_SG_CONTEXT_CMD
, F_CONTEXT_CMD_BUSY
,
1894 0, SG_CONTEXT_CMD_ATTEMPTS
, 1);
1898 * t3_sge_init_ecntxt - initialize an SGE egress context
1899 * @adapter: the adapter to configure
1900 * @id: the context id
1901 * @gts_enable: whether to enable GTS for the context
1902 * @type: the egress context type
1903 * @respq: associated response queue
1904 * @base_addr: base address of queue
1905 * @size: number of queue entries
1907 * @gen: initial generation value for the context
1908 * @cidx: consumer pointer
1910 * Initialize an SGE egress context and make it ready for use. If the
1911 * platform allows concurrent context operations, the caller is
1912 * responsible for appropriate locking.
1914 int t3_sge_init_ecntxt(struct adapter
*adapter
, unsigned int id
, int gts_enable
,
1915 enum sge_context_type type
, int respq
, u64 base_addr
,
1916 unsigned int size
, unsigned int token
, int gen
,
1919 unsigned int credits
= type
== SGE_CNTXT_OFLD
? 0 : FW_WR_NUM
;
1921 if (base_addr
& 0xfff) /* must be 4K aligned */
1923 if (t3_read_reg(adapter
, A_SG_CONTEXT_CMD
) & F_CONTEXT_CMD_BUSY
)
1927 t3_write_reg(adapter
, A_SG_CONTEXT_DATA0
, V_EC_INDEX(cidx
) |
1928 V_EC_CREDITS(credits
) | V_EC_GTS(gts_enable
));
1929 t3_write_reg(adapter
, A_SG_CONTEXT_DATA1
, V_EC_SIZE(size
) |
1930 V_EC_BASE_LO(base_addr
& 0xffff));
1932 t3_write_reg(adapter
, A_SG_CONTEXT_DATA2
, base_addr
);
1934 t3_write_reg(adapter
, A_SG_CONTEXT_DATA3
,
1935 V_EC_BASE_HI(base_addr
& 0xf) | V_EC_RESPQ(respq
) |
1936 V_EC_TYPE(type
) | V_EC_GEN(gen
) | V_EC_UP_TOKEN(token
) |
1938 return t3_sge_write_context(adapter
, id
, F_EGRESS
);
1942 * t3_sge_init_flcntxt - initialize an SGE free-buffer list context
1943 * @adapter: the adapter to configure
1944 * @id: the context id
1945 * @gts_enable: whether to enable GTS for the context
1946 * @base_addr: base address of queue
1947 * @size: number of queue entries
1948 * @bsize: size of each buffer for this queue
1949 * @cong_thres: threshold to signal congestion to upstream producers
1950 * @gen: initial generation value for the context
1951 * @cidx: consumer pointer
1953 * Initialize an SGE free list context and make it ready for use. The
1954 * caller is responsible for ensuring only one context operation occurs
1957 int t3_sge_init_flcntxt(struct adapter
*adapter
, unsigned int id
,
1958 int gts_enable
, u64 base_addr
, unsigned int size
,
1959 unsigned int bsize
, unsigned int cong_thres
, int gen
,
1962 if (base_addr
& 0xfff) /* must be 4K aligned */
1964 if (t3_read_reg(adapter
, A_SG_CONTEXT_CMD
) & F_CONTEXT_CMD_BUSY
)
1968 t3_write_reg(adapter
, A_SG_CONTEXT_DATA0
, base_addr
);
1970 t3_write_reg(adapter
, A_SG_CONTEXT_DATA1
,
1971 V_FL_BASE_HI((u32
) base_addr
) |
1972 V_FL_INDEX_LO(cidx
& M_FL_INDEX_LO
));
1973 t3_write_reg(adapter
, A_SG_CONTEXT_DATA2
, V_FL_SIZE(size
) |
1974 V_FL_GEN(gen
) | V_FL_INDEX_HI(cidx
>> 12) |
1975 V_FL_ENTRY_SIZE_LO(bsize
& M_FL_ENTRY_SIZE_LO
));
1976 t3_write_reg(adapter
, A_SG_CONTEXT_DATA3
,
1977 V_FL_ENTRY_SIZE_HI(bsize
>> (32 - S_FL_ENTRY_SIZE_LO
)) |
1978 V_FL_CONG_THRES(cong_thres
) | V_FL_GTS(gts_enable
));
1979 return t3_sge_write_context(adapter
, id
, F_FREELIST
);
1983 * t3_sge_init_rspcntxt - initialize an SGE response queue context
1984 * @adapter: the adapter to configure
1985 * @id: the context id
1986 * @irq_vec_idx: MSI-X interrupt vector index, 0 if no MSI-X, -1 if no IRQ
1987 * @base_addr: base address of queue
1988 * @size: number of queue entries
1989 * @fl_thres: threshold for selecting the normal or jumbo free list
1990 * @gen: initial generation value for the context
1991 * @cidx: consumer pointer
1993 * Initialize an SGE response queue context and make it ready for use.
1994 * The caller is responsible for ensuring only one context operation
1997 int t3_sge_init_rspcntxt(struct adapter
*adapter
, unsigned int id
,
1998 int irq_vec_idx
, u64 base_addr
, unsigned int size
,
1999 unsigned int fl_thres
, int gen
, unsigned int cidx
)
2001 unsigned int intr
= 0;
2003 if (base_addr
& 0xfff) /* must be 4K aligned */
2005 if (t3_read_reg(adapter
, A_SG_CONTEXT_CMD
) & F_CONTEXT_CMD_BUSY
)
2009 t3_write_reg(adapter
, A_SG_CONTEXT_DATA0
, V_CQ_SIZE(size
) |
2011 t3_write_reg(adapter
, A_SG_CONTEXT_DATA1
, base_addr
);
2013 if (irq_vec_idx
>= 0)
2014 intr
= V_RQ_MSI_VEC(irq_vec_idx
) | F_RQ_INTR_EN
;
2015 t3_write_reg(adapter
, A_SG_CONTEXT_DATA2
,
2016 V_CQ_BASE_HI((u32
) base_addr
) | intr
| V_RQ_GEN(gen
));
2017 t3_write_reg(adapter
, A_SG_CONTEXT_DATA3
, fl_thres
);
2018 return t3_sge_write_context(adapter
, id
, F_RESPONSEQ
);
2022 * t3_sge_init_cqcntxt - initialize an SGE completion queue context
2023 * @adapter: the adapter to configure
2024 * @id: the context id
2025 * @base_addr: base address of queue
2026 * @size: number of queue entries
2027 * @rspq: response queue for async notifications
2028 * @ovfl_mode: CQ overflow mode
2029 * @credits: completion queue credits
2030 * @credit_thres: the credit threshold
2032 * Initialize an SGE completion queue context and make it ready for use.
2033 * The caller is responsible for ensuring only one context operation
2036 int t3_sge_init_cqcntxt(struct adapter
*adapter
, unsigned int id
, u64 base_addr
,
2037 unsigned int size
, int rspq
, int ovfl_mode
,
2038 unsigned int credits
, unsigned int credit_thres
)
2040 if (base_addr
& 0xfff) /* must be 4K aligned */
2042 if (t3_read_reg(adapter
, A_SG_CONTEXT_CMD
) & F_CONTEXT_CMD_BUSY
)
2046 t3_write_reg(adapter
, A_SG_CONTEXT_DATA0
, V_CQ_SIZE(size
));
2047 t3_write_reg(adapter
, A_SG_CONTEXT_DATA1
, base_addr
);
2049 t3_write_reg(adapter
, A_SG_CONTEXT_DATA2
,
2050 V_CQ_BASE_HI((u32
) base_addr
) | V_CQ_RSPQ(rspq
) |
2051 V_CQ_GEN(1) | V_CQ_OVERFLOW_MODE(ovfl_mode
) |
2052 V_CQ_ERR(ovfl_mode
));
2053 t3_write_reg(adapter
, A_SG_CONTEXT_DATA3
, V_CQ_CREDITS(credits
) |
2054 V_CQ_CREDIT_THRES(credit_thres
));
2055 return t3_sge_write_context(adapter
, id
, F_CQ
);
2059 * t3_sge_enable_ecntxt - enable/disable an SGE egress context
2060 * @adapter: the adapter
2061 * @id: the egress context id
2062 * @enable: enable (1) or disable (0) the context
2064 * Enable or disable an SGE egress context. The caller is responsible for
2065 * ensuring only one context operation occurs at a time.
2067 int t3_sge_enable_ecntxt(struct adapter
*adapter
, unsigned int id
, int enable
)
2069 if (t3_read_reg(adapter
, A_SG_CONTEXT_CMD
) & F_CONTEXT_CMD_BUSY
)
2072 t3_write_reg(adapter
, A_SG_CONTEXT_MASK0
, 0);
2073 t3_write_reg(adapter
, A_SG_CONTEXT_MASK1
, 0);
2074 t3_write_reg(adapter
, A_SG_CONTEXT_MASK2
, 0);
2075 t3_write_reg(adapter
, A_SG_CONTEXT_MASK3
, F_EC_VALID
);
2076 t3_write_reg(adapter
, A_SG_CONTEXT_DATA3
, V_EC_VALID(enable
));
2077 t3_write_reg(adapter
, A_SG_CONTEXT_CMD
,
2078 V_CONTEXT_CMD_OPCODE(1) | F_EGRESS
| V_CONTEXT(id
));
2079 return t3_wait_op_done(adapter
, A_SG_CONTEXT_CMD
, F_CONTEXT_CMD_BUSY
,
2080 0, SG_CONTEXT_CMD_ATTEMPTS
, 1);
2084 * t3_sge_disable_fl - disable an SGE free-buffer list
2085 * @adapter: the adapter
2086 * @id: the free list context id
2088 * Disable an SGE free-buffer list. The caller is responsible for
2089 * ensuring only one context operation occurs at a time.
2091 int t3_sge_disable_fl(struct adapter
*adapter
, unsigned int id
)
2093 if (t3_read_reg(adapter
, A_SG_CONTEXT_CMD
) & F_CONTEXT_CMD_BUSY
)
2096 t3_write_reg(adapter
, A_SG_CONTEXT_MASK0
, 0);
2097 t3_write_reg(adapter
, A_SG_CONTEXT_MASK1
, 0);
2098 t3_write_reg(adapter
, A_SG_CONTEXT_MASK2
, V_FL_SIZE(M_FL_SIZE
));
2099 t3_write_reg(adapter
, A_SG_CONTEXT_MASK3
, 0);
2100 t3_write_reg(adapter
, A_SG_CONTEXT_DATA2
, 0);
2101 t3_write_reg(adapter
, A_SG_CONTEXT_CMD
,
2102 V_CONTEXT_CMD_OPCODE(1) | F_FREELIST
| V_CONTEXT(id
));
2103 return t3_wait_op_done(adapter
, A_SG_CONTEXT_CMD
, F_CONTEXT_CMD_BUSY
,
2104 0, SG_CONTEXT_CMD_ATTEMPTS
, 1);
2108 * t3_sge_disable_rspcntxt - disable an SGE response queue
2109 * @adapter: the adapter
2110 * @id: the response queue context id
2112 * Disable an SGE response queue. The caller is responsible for
2113 * ensuring only one context operation occurs at a time.
2115 int t3_sge_disable_rspcntxt(struct adapter
*adapter
, unsigned int id
)
2117 if (t3_read_reg(adapter
, A_SG_CONTEXT_CMD
) & F_CONTEXT_CMD_BUSY
)
2120 t3_write_reg(adapter
, A_SG_CONTEXT_MASK0
, V_CQ_SIZE(M_CQ_SIZE
));
2121 t3_write_reg(adapter
, A_SG_CONTEXT_MASK1
, 0);
2122 t3_write_reg(adapter
, A_SG_CONTEXT_MASK2
, 0);
2123 t3_write_reg(adapter
, A_SG_CONTEXT_MASK3
, 0);
2124 t3_write_reg(adapter
, A_SG_CONTEXT_DATA0
, 0);
2125 t3_write_reg(adapter
, A_SG_CONTEXT_CMD
,
2126 V_CONTEXT_CMD_OPCODE(1) | F_RESPONSEQ
| V_CONTEXT(id
));
2127 return t3_wait_op_done(adapter
, A_SG_CONTEXT_CMD
, F_CONTEXT_CMD_BUSY
,
2128 0, SG_CONTEXT_CMD_ATTEMPTS
, 1);
2132 * t3_sge_disable_cqcntxt - disable an SGE completion queue
2133 * @adapter: the adapter
2134 * @id: the completion queue context id
2136 * Disable an SGE completion queue. The caller is responsible for
2137 * ensuring only one context operation occurs at a time.
2139 int t3_sge_disable_cqcntxt(struct adapter
*adapter
, unsigned int id
)
2141 if (t3_read_reg(adapter
, A_SG_CONTEXT_CMD
) & F_CONTEXT_CMD_BUSY
)
2144 t3_write_reg(adapter
, A_SG_CONTEXT_MASK0
, V_CQ_SIZE(M_CQ_SIZE
));
2145 t3_write_reg(adapter
, A_SG_CONTEXT_MASK1
, 0);
2146 t3_write_reg(adapter
, A_SG_CONTEXT_MASK2
, 0);
2147 t3_write_reg(adapter
, A_SG_CONTEXT_MASK3
, 0);
2148 t3_write_reg(adapter
, A_SG_CONTEXT_DATA0
, 0);
2149 t3_write_reg(adapter
, A_SG_CONTEXT_CMD
,
2150 V_CONTEXT_CMD_OPCODE(1) | F_CQ
| V_CONTEXT(id
));
2151 return t3_wait_op_done(adapter
, A_SG_CONTEXT_CMD
, F_CONTEXT_CMD_BUSY
,
2152 0, SG_CONTEXT_CMD_ATTEMPTS
, 1);
2156 * t3_sge_cqcntxt_op - perform an operation on a completion queue context
2157 * @adapter: the adapter
2158 * @id: the context id
2159 * @op: the operation to perform
2161 * Perform the selected operation on an SGE completion queue context.
2162 * The caller is responsible for ensuring only one context operation
2165 int t3_sge_cqcntxt_op(struct adapter
*adapter
, unsigned int id
, unsigned int op
,
2166 unsigned int credits
)
2170 if (t3_read_reg(adapter
, A_SG_CONTEXT_CMD
) & F_CONTEXT_CMD_BUSY
)
2173 t3_write_reg(adapter
, A_SG_CONTEXT_DATA0
, credits
<< 16);
2174 t3_write_reg(adapter
, A_SG_CONTEXT_CMD
, V_CONTEXT_CMD_OPCODE(op
) |
2175 V_CONTEXT(id
) | F_CQ
);
2176 if (t3_wait_op_done_val(adapter
, A_SG_CONTEXT_CMD
, F_CONTEXT_CMD_BUSY
,
2177 0, SG_CONTEXT_CMD_ATTEMPTS
, 1, &val
))
2180 if (op
>= 2 && op
< 7) {
2181 if (adapter
->params
.rev
> 0)
2182 return G_CQ_INDEX(val
);
2184 t3_write_reg(adapter
, A_SG_CONTEXT_CMD
,
2185 V_CONTEXT_CMD_OPCODE(0) | F_CQ
| V_CONTEXT(id
));
2186 if (t3_wait_op_done(adapter
, A_SG_CONTEXT_CMD
,
2187 F_CONTEXT_CMD_BUSY
, 0,
2188 SG_CONTEXT_CMD_ATTEMPTS
, 1))
2190 return G_CQ_INDEX(t3_read_reg(adapter
, A_SG_CONTEXT_DATA0
));
2196 * t3_sge_read_context - read an SGE context
2197 * @type: the context type
2198 * @adapter: the adapter
2199 * @id: the context id
2200 * @data: holds the retrieved context
2202 * Read an SGE egress context. The caller is responsible for ensuring
2203 * only one context operation occurs at a time.
2205 static int t3_sge_read_context(unsigned int type
, struct adapter
*adapter
,
2206 unsigned int id
, u32 data
[4])
2208 if (t3_read_reg(adapter
, A_SG_CONTEXT_CMD
) & F_CONTEXT_CMD_BUSY
)
2211 t3_write_reg(adapter
, A_SG_CONTEXT_CMD
,
2212 V_CONTEXT_CMD_OPCODE(0) | type
| V_CONTEXT(id
));
2213 if (t3_wait_op_done(adapter
, A_SG_CONTEXT_CMD
, F_CONTEXT_CMD_BUSY
, 0,
2214 SG_CONTEXT_CMD_ATTEMPTS
, 1))
2216 data
[0] = t3_read_reg(adapter
, A_SG_CONTEXT_DATA0
);
2217 data
[1] = t3_read_reg(adapter
, A_SG_CONTEXT_DATA1
);
2218 data
[2] = t3_read_reg(adapter
, A_SG_CONTEXT_DATA2
);
2219 data
[3] = t3_read_reg(adapter
, A_SG_CONTEXT_DATA3
);
2224 * t3_sge_read_ecntxt - read an SGE egress context
2225 * @adapter: the adapter
2226 * @id: the context id
2227 * @data: holds the retrieved context
2229 * Read an SGE egress context. The caller is responsible for ensuring
2230 * only one context operation occurs at a time.
2232 int t3_sge_read_ecntxt(struct adapter
*adapter
, unsigned int id
, u32 data
[4])
2236 return t3_sge_read_context(F_EGRESS
, adapter
, id
, data
);
2240 * t3_sge_read_cq - read an SGE CQ context
2241 * @adapter: the adapter
2242 * @id: the context id
2243 * @data: holds the retrieved context
2245 * Read an SGE CQ context. The caller is responsible for ensuring
2246 * only one context operation occurs at a time.
2248 int t3_sge_read_cq(struct adapter
*adapter
, unsigned int id
, u32 data
[4])
2252 return t3_sge_read_context(F_CQ
, adapter
, id
, data
);
2256 * t3_sge_read_fl - read an SGE free-list context
2257 * @adapter: the adapter
2258 * @id: the context id
2259 * @data: holds the retrieved context
2261 * Read an SGE free-list context. The caller is responsible for ensuring
2262 * only one context operation occurs at a time.
2264 int t3_sge_read_fl(struct adapter
*adapter
, unsigned int id
, u32 data
[4])
2266 if (id
>= SGE_QSETS
* 2)
2268 return t3_sge_read_context(F_FREELIST
, adapter
, id
, data
);
2272 * t3_sge_read_rspq - read an SGE response queue context
2273 * @adapter: the adapter
2274 * @id: the context id
2275 * @data: holds the retrieved context
2277 * Read an SGE response queue context. The caller is responsible for
2278 * ensuring only one context operation occurs at a time.
2280 int t3_sge_read_rspq(struct adapter
*adapter
, unsigned int id
, u32 data
[4])
2282 if (id
>= SGE_QSETS
)
2284 return t3_sge_read_context(F_RESPONSEQ
, adapter
, id
, data
);
2288 * t3_config_rss - configure Rx packet steering
2289 * @adapter: the adapter
2290 * @rss_config: RSS settings (written to TP_RSS_CONFIG)
2291 * @cpus: values for the CPU lookup table (0xff terminated)
2292 * @rspq: values for the response queue lookup table (0xffff terminated)
2294 * Programs the receive packet steering logic. @cpus and @rspq provide
2295 * the values for the CPU and response queue lookup tables. If they
2296 * provide fewer values than the size of the tables the supplied values
2297 * are used repeatedly until the tables are fully populated.
2299 void t3_config_rss(struct adapter
*adapter
, unsigned int rss_config
,
2300 const u8
* cpus
, const u16
*rspq
)
2302 int i
, j
, cpu_idx
= 0, q_idx
= 0;
2305 for (i
= 0; i
< RSS_TABLE_SIZE
; ++i
) {
2308 for (j
= 0; j
< 2; ++j
) {
2309 val
|= (cpus
[cpu_idx
++] & 0x3f) << (8 * j
);
2310 if (cpus
[cpu_idx
] == 0xff)
2313 t3_write_reg(adapter
, A_TP_RSS_LKP_TABLE
, val
);
2317 for (i
= 0; i
< RSS_TABLE_SIZE
; ++i
) {
2318 t3_write_reg(adapter
, A_TP_RSS_MAP_TABLE
,
2319 (i
<< 16) | rspq
[q_idx
++]);
2320 if (rspq
[q_idx
] == 0xffff)
2324 t3_write_reg(adapter
, A_TP_RSS_CONFIG
, rss_config
);
2328 * t3_read_rss - read the contents of the RSS tables
2329 * @adapter: the adapter
2330 * @lkup: holds the contents of the RSS lookup table
2331 * @map: holds the contents of the RSS map table
2333 * Reads the contents of the receive packet steering tables.
2335 int t3_read_rss(struct adapter
*adapter
, u8
* lkup
, u16
*map
)
2341 for (i
= 0; i
< RSS_TABLE_SIZE
; ++i
) {
2342 t3_write_reg(adapter
, A_TP_RSS_LKP_TABLE
,
2344 val
= t3_read_reg(adapter
, A_TP_RSS_LKP_TABLE
);
2345 if (!(val
& 0x80000000))
2348 *lkup
++ = (val
>> 8);
2352 for (i
= 0; i
< RSS_TABLE_SIZE
; ++i
) {
2353 t3_write_reg(adapter
, A_TP_RSS_MAP_TABLE
,
2355 val
= t3_read_reg(adapter
, A_TP_RSS_MAP_TABLE
);
2356 if (!(val
& 0x80000000))
2364 * t3_tp_set_offload_mode - put TP in NIC/offload mode
2365 * @adap: the adapter
2366 * @enable: 1 to select offload mode, 0 for regular NIC
2368 * Switches TP to NIC/offload mode.
2370 void t3_tp_set_offload_mode(struct adapter
*adap
, int enable
)
2372 if (is_offload(adap
) || !enable
)
2373 t3_set_reg_field(adap
, A_TP_IN_CONFIG
, F_NICMODE
,
2374 V_NICMODE(!enable
));
2378 * pm_num_pages - calculate the number of pages of the payload memory
2379 * @mem_size: the size of the payload memory
2380 * @pg_size: the size of each payload memory page
2382 * Calculate the number of pages, each of the given size, that fit in a
2383 * memory of the specified size, respecting the HW requirement that the
2384 * number of pages must be a multiple of 24.
2386 static inline unsigned int pm_num_pages(unsigned int mem_size
,
2387 unsigned int pg_size
)
2389 unsigned int n
= mem_size
/ pg_size
;
2394 #define mem_region(adap, start, size, reg) \
2395 t3_write_reg((adap), A_ ## reg, (start)); \
2399 * partition_mem - partition memory and configure TP memory settings
2400 * @adap: the adapter
2401 * @p: the TP parameters
2403 * Partitions context and payload memory and configures TP's memory
2406 static void partition_mem(struct adapter
*adap
, const struct tp_params
*p
)
2408 unsigned int m
, pstructs
, tids
= t3_mc5_size(&adap
->mc5
);
2409 unsigned int timers
= 0, timers_shift
= 22;
2411 if (adap
->params
.rev
> 0) {
2412 if (tids
<= 16 * 1024) {
2415 } else if (tids
<= 64 * 1024) {
2418 } else if (tids
<= 256 * 1024) {
2424 t3_write_reg(adap
, A_TP_PMM_SIZE
,
2425 p
->chan_rx_size
| (p
->chan_tx_size
>> 16));
2427 t3_write_reg(adap
, A_TP_PMM_TX_BASE
, 0);
2428 t3_write_reg(adap
, A_TP_PMM_TX_PAGE_SIZE
, p
->tx_pg_size
);
2429 t3_write_reg(adap
, A_TP_PMM_TX_MAX_PAGE
, p
->tx_num_pgs
);
2430 t3_set_reg_field(adap
, A_TP_PARA_REG3
, V_TXDATAACKIDX(M_TXDATAACKIDX
),
2431 V_TXDATAACKIDX(fls(p
->tx_pg_size
) - 12));
2433 t3_write_reg(adap
, A_TP_PMM_RX_BASE
, 0);
2434 t3_write_reg(adap
, A_TP_PMM_RX_PAGE_SIZE
, p
->rx_pg_size
);
2435 t3_write_reg(adap
, A_TP_PMM_RX_MAX_PAGE
, p
->rx_num_pgs
);
2437 pstructs
= p
->rx_num_pgs
+ p
->tx_num_pgs
;
2438 /* Add a bit of headroom and make multiple of 24 */
2440 pstructs
-= pstructs
% 24;
2441 t3_write_reg(adap
, A_TP_CMM_MM_MAX_PSTRUCT
, pstructs
);
2443 m
= tids
* TCB_SIZE
;
2444 mem_region(adap
, m
, (64 << 10) * 64, SG_EGR_CNTX_BADDR
);
2445 mem_region(adap
, m
, (64 << 10) * 64, SG_CQ_CONTEXT_BADDR
);
2446 t3_write_reg(adap
, A_TP_CMM_TIMER_BASE
, V_CMTIMERMAXNUM(timers
) | m
);
2447 m
+= ((p
->ntimer_qs
- 1) << timers_shift
) + (1 << 22);
2448 mem_region(adap
, m
, pstructs
* 64, TP_CMM_MM_BASE
);
2449 mem_region(adap
, m
, 64 * (pstructs
/ 24), TP_CMM_MM_PS_FLST_BASE
);
2450 mem_region(adap
, m
, 64 * (p
->rx_num_pgs
/ 24), TP_CMM_MM_RX_FLST_BASE
);
2451 mem_region(adap
, m
, 64 * (p
->tx_num_pgs
/ 24), TP_CMM_MM_TX_FLST_BASE
);
2453 m
= (m
+ 4095) & ~0xfff;
2454 t3_write_reg(adap
, A_CIM_SDRAM_BASE_ADDR
, m
);
2455 t3_write_reg(adap
, A_CIM_SDRAM_ADDR_SIZE
, p
->cm_size
- m
);
2457 tids
= (p
->cm_size
- m
- (3 << 20)) / 3072 - 32;
2458 m
= t3_mc5_size(&adap
->mc5
) - adap
->params
.mc5
.nservers
-
2459 adap
->params
.mc5
.nfilters
- adap
->params
.mc5
.nroutes
;
2461 adap
->params
.mc5
.nservers
+= m
- tids
;
2464 static inline void tp_wr_indirect(struct adapter
*adap
, unsigned int addr
,
2467 t3_write_reg(adap
, A_TP_PIO_ADDR
, addr
);
2468 t3_write_reg(adap
, A_TP_PIO_DATA
, val
);
2471 static void tp_config(struct adapter
*adap
, const struct tp_params
*p
)
2473 t3_write_reg(adap
, A_TP_GLOBAL_CONFIG
, F_TXPACINGENABLE
| F_PATHMTU
|
2474 F_IPCHECKSUMOFFLOAD
| F_UDPCHECKSUMOFFLOAD
|
2475 F_TCPCHECKSUMOFFLOAD
| V_IPTTL(64));
2476 t3_write_reg(adap
, A_TP_TCP_OPTIONS
, V_MTUDEFAULT(576) |
2477 F_MTUENABLE
| V_WINDOWSCALEMODE(1) |
2478 V_TIMESTAMPSMODE(0) | V_SACKMODE(1) | V_SACKRX(1));
2479 t3_write_reg(adap
, A_TP_DACK_CONFIG
, V_AUTOSTATE3(1) |
2480 V_AUTOSTATE2(1) | V_AUTOSTATE1(0) |
2481 V_BYTETHRESHOLD(16384) | V_MSSTHRESHOLD(2) |
2482 F_AUTOCAREFUL
| F_AUTOENABLE
| V_DACK_MODE(1));
2483 t3_set_reg_field(adap
, A_TP_IN_CONFIG
, F_IPV6ENABLE
| F_NICMODE
,
2484 F_IPV6ENABLE
| F_NICMODE
);
2485 t3_write_reg(adap
, A_TP_TX_RESOURCE_LIMIT
, 0x18141814);
2486 t3_write_reg(adap
, A_TP_PARA_REG4
, 0x5050105);
2487 t3_set_reg_field(adap
, A_TP_PARA_REG6
, 0,
2488 adap
->params
.rev
> 0 ? F_ENABLEESND
:
2491 t3_set_reg_field(adap
, A_TP_PC_CONFIG
,
2493 F_ENABLEOCSPIFULL
|F_TXDEFERENABLE
| F_HEARBEATDACK
|
2494 F_TXCONGESTIONMODE
| F_RXCONGESTIONMODE
);
2495 t3_set_reg_field(adap
, A_TP_PC_CONFIG2
, F_CHDRAFULL
, 0);
2496 t3_write_reg(adap
, A_TP_PROXY_FLOW_CNTL
, 1080);
2497 t3_write_reg(adap
, A_TP_PROXY_FLOW_CNTL
, 1000);
2499 if (adap
->params
.rev
> 0) {
2500 tp_wr_indirect(adap
, A_TP_EGRESS_CONFIG
, F_REWRITEFORCETOSIZE
);
2501 t3_set_reg_field(adap
, A_TP_PARA_REG3
, F_TXPACEAUTO
,
2503 t3_set_reg_field(adap
, A_TP_PC_CONFIG
, F_LOCKTID
, F_LOCKTID
);
2504 t3_set_reg_field(adap
, A_TP_PARA_REG3
, 0, F_TXPACEAUTOSTRICT
);
2506 t3_set_reg_field(adap
, A_TP_PARA_REG3
, 0, F_TXPACEFIXED
);
2508 t3_write_reg(adap
, A_TP_TX_MOD_QUEUE_WEIGHT1
, 0);
2509 t3_write_reg(adap
, A_TP_TX_MOD_QUEUE_WEIGHT0
, 0);
2510 t3_write_reg(adap
, A_TP_MOD_CHANNEL_WEIGHT
, 0);
2511 t3_write_reg(adap
, A_TP_MOD_RATE_LIMIT
, 0xf2200000);
2514 /* Desired TP timer resolution in usec */
2515 #define TP_TMR_RES 50
2517 /* TCP timer values in ms */
2518 #define TP_DACK_TIMER 50
2519 #define TP_RTO_MIN 250
2522 * tp_set_timers - set TP timing parameters
2523 * @adap: the adapter to set
2524 * @core_clk: the core clock frequency in Hz
2526 * Set TP's timing parameters, such as the various timer resolutions and
2527 * the TCP timer values.
2529 static void tp_set_timers(struct adapter
*adap
, unsigned int core_clk
)
2531 unsigned int tre
= fls(core_clk
/ (1000000 / TP_TMR_RES
)) - 1;
2532 unsigned int dack_re
= fls(core_clk
/ 5000) - 1; /* 200us */
2533 unsigned int tstamp_re
= fls(core_clk
/ 1000); /* 1ms, at least */
2534 unsigned int tps
= core_clk
>> tre
;
2536 t3_write_reg(adap
, A_TP_TIMER_RESOLUTION
, V_TIMERRESOLUTION(tre
) |
2537 V_DELAYEDACKRESOLUTION(dack_re
) |
2538 V_TIMESTAMPRESOLUTION(tstamp_re
));
2539 t3_write_reg(adap
, A_TP_DACK_TIMER
,
2540 (core_clk
>> dack_re
) / (1000 / TP_DACK_TIMER
));
2541 t3_write_reg(adap
, A_TP_TCP_BACKOFF_REG0
, 0x3020100);
2542 t3_write_reg(adap
, A_TP_TCP_BACKOFF_REG1
, 0x7060504);
2543 t3_write_reg(adap
, A_TP_TCP_BACKOFF_REG2
, 0xb0a0908);
2544 t3_write_reg(adap
, A_TP_TCP_BACKOFF_REG3
, 0xf0e0d0c);
2545 t3_write_reg(adap
, A_TP_SHIFT_CNT
, V_SYNSHIFTMAX(6) |
2546 V_RXTSHIFTMAXR1(4) | V_RXTSHIFTMAXR2(15) |
2547 V_PERSHIFTBACKOFFMAX(8) | V_PERSHIFTMAX(8) |
2550 #define SECONDS * tps
2552 t3_write_reg(adap
, A_TP_MSL
, adap
->params
.rev
> 0 ? 0 : 2 SECONDS
);
2553 t3_write_reg(adap
, A_TP_RXT_MIN
, tps
/ (1000 / TP_RTO_MIN
));
2554 t3_write_reg(adap
, A_TP_RXT_MAX
, 64 SECONDS
);
2555 t3_write_reg(adap
, A_TP_PERS_MIN
, 5 SECONDS
);
2556 t3_write_reg(adap
, A_TP_PERS_MAX
, 64 SECONDS
);
2557 t3_write_reg(adap
, A_TP_KEEP_IDLE
, 7200 SECONDS
);
2558 t3_write_reg(adap
, A_TP_KEEP_INTVL
, 75 SECONDS
);
2559 t3_write_reg(adap
, A_TP_INIT_SRTT
, 3 SECONDS
);
2560 t3_write_reg(adap
, A_TP_FINWAIT2_TIMER
, 600 SECONDS
);
2566 * t3_tp_set_coalescing_size - set receive coalescing size
2567 * @adap: the adapter
2568 * @size: the receive coalescing size
2569 * @psh: whether a set PSH bit should deliver coalesced data
2571 * Set the receive coalescing size and PSH bit handling.
2573 int t3_tp_set_coalescing_size(struct adapter
*adap
, unsigned int size
, int psh
)
2577 if (size
> MAX_RX_COALESCING_LEN
)
2580 val
= t3_read_reg(adap
, A_TP_PARA_REG3
);
2581 val
&= ~(F_RXCOALESCEENABLE
| F_RXCOALESCEPSHEN
);
2584 val
|= F_RXCOALESCEENABLE
;
2586 val
|= F_RXCOALESCEPSHEN
;
2587 size
= min(MAX_RX_COALESCING_LEN
, size
);
2588 t3_write_reg(adap
, A_TP_PARA_REG2
, V_RXCOALESCESIZE(size
) |
2589 V_MAXRXDATA(MAX_RX_COALESCING_LEN
));
2591 t3_write_reg(adap
, A_TP_PARA_REG3
, val
);
2596 * t3_tp_set_max_rxsize - set the max receive size
2597 * @adap: the adapter
2598 * @size: the max receive size
2600 * Set TP's max receive size. This is the limit that applies when
2601 * receive coalescing is disabled.
2603 void t3_tp_set_max_rxsize(struct adapter
*adap
, unsigned int size
)
2605 t3_write_reg(adap
, A_TP_PARA_REG7
,
2606 V_PMMAXXFERLEN0(size
) | V_PMMAXXFERLEN1(size
));
2609 static void __devinit
init_mtus(unsigned short mtus
[])
2612 * See draft-mathis-plpmtud-00.txt for the values. The min is 88 so
2613 * it can accomodate max size TCP/IP headers when SACK and timestamps
2614 * are enabled and still have at least 8 bytes of payload.
2635 * Initial congestion control parameters.
2637 static void __devinit
init_cong_ctrl(unsigned short *a
, unsigned short *b
)
2639 a
[0] = a
[1] = a
[2] = a
[3] = a
[4] = a
[5] = a
[6] = a
[7] = a
[8] = 1;
2664 b
[0] = b
[1] = b
[2] = b
[3] = b
[4] = b
[5] = b
[6] = b
[7] = b
[8] = 0;
2667 b
[13] = b
[14] = b
[15] = b
[16] = 3;
2668 b
[17] = b
[18] = b
[19] = b
[20] = b
[21] = 4;
2669 b
[22] = b
[23] = b
[24] = b
[25] = b
[26] = b
[27] = 5;
2674 /* The minimum additive increment value for the congestion control table */
2675 #define CC_MIN_INCR 2U
2678 * t3_load_mtus - write the MTU and congestion control HW tables
2679 * @adap: the adapter
2680 * @mtus: the unrestricted values for the MTU table
2681 * @alphs: the values for the congestion control alpha parameter
2682 * @beta: the values for the congestion control beta parameter
2683 * @mtu_cap: the maximum permitted effective MTU
2685 * Write the MTU table with the supplied MTUs capping each at &mtu_cap.
2686 * Update the high-speed congestion control table with the supplied alpha,
2689 void t3_load_mtus(struct adapter
*adap
, unsigned short mtus
[NMTUS
],
2690 unsigned short alpha
[NCCTRL_WIN
],
2691 unsigned short beta
[NCCTRL_WIN
], unsigned short mtu_cap
)
2693 static const unsigned int avg_pkts
[NCCTRL_WIN
] = {
2694 2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640,
2695 896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480,
2696 28672, 40960, 57344, 81920, 114688, 163840, 229376
2701 for (i
= 0; i
< NMTUS
; ++i
) {
2702 unsigned int mtu
= min(mtus
[i
], mtu_cap
);
2703 unsigned int log2
= fls(mtu
);
2705 if (!(mtu
& ((1 << log2
) >> 2))) /* round */
2707 t3_write_reg(adap
, A_TP_MTU_TABLE
,
2708 (i
<< 24) | (log2
<< 16) | mtu
);
2710 for (w
= 0; w
< NCCTRL_WIN
; ++w
) {
2713 inc
= max(((mtu
- 40) * alpha
[w
]) / avg_pkts
[w
],
2716 t3_write_reg(adap
, A_TP_CCTRL_TABLE
, (i
<< 21) |
2717 (w
<< 16) | (beta
[w
] << 13) | inc
);
2723 * t3_read_hw_mtus - returns the values in the HW MTU table
2724 * @adap: the adapter
2725 * @mtus: where to store the HW MTU values
2727 * Reads the HW MTU table.
2729 void t3_read_hw_mtus(struct adapter
*adap
, unsigned short mtus
[NMTUS
])
2733 for (i
= 0; i
< NMTUS
; ++i
) {
2736 t3_write_reg(adap
, A_TP_MTU_TABLE
, 0xff000000 | i
);
2737 val
= t3_read_reg(adap
, A_TP_MTU_TABLE
);
2738 mtus
[i
] = val
& 0x3fff;
2743 * t3_get_cong_cntl_tab - reads the congestion control table
2744 * @adap: the adapter
2745 * @incr: where to store the alpha values
2747 * Reads the additive increments programmed into the HW congestion
2750 void t3_get_cong_cntl_tab(struct adapter
*adap
,
2751 unsigned short incr
[NMTUS
][NCCTRL_WIN
])
2753 unsigned int mtu
, w
;
2755 for (mtu
= 0; mtu
< NMTUS
; ++mtu
)
2756 for (w
= 0; w
< NCCTRL_WIN
; ++w
) {
2757 t3_write_reg(adap
, A_TP_CCTRL_TABLE
,
2758 0xffff0000 | (mtu
<< 5) | w
);
2759 incr
[mtu
][w
] = t3_read_reg(adap
, A_TP_CCTRL_TABLE
) &
2765 * t3_tp_get_mib_stats - read TP's MIB counters
2766 * @adap: the adapter
2767 * @tps: holds the returned counter values
2769 * Returns the values of TP's MIB counters.
2771 void t3_tp_get_mib_stats(struct adapter
*adap
, struct tp_mib_stats
*tps
)
2773 t3_read_indirect(adap
, A_TP_MIB_INDEX
, A_TP_MIB_RDATA
, (u32
*) tps
,
2774 sizeof(*tps
) / sizeof(u32
), 0);
2777 #define ulp_region(adap, name, start, len) \
2778 t3_write_reg((adap), A_ULPRX_ ## name ## _LLIMIT, (start)); \
2779 t3_write_reg((adap), A_ULPRX_ ## name ## _ULIMIT, \
2780 (start) + (len) - 1); \
2783 #define ulptx_region(adap, name, start, len) \
2784 t3_write_reg((adap), A_ULPTX_ ## name ## _LLIMIT, (start)); \
2785 t3_write_reg((adap), A_ULPTX_ ## name ## _ULIMIT, \
2786 (start) + (len) - 1)
2788 static void ulp_config(struct adapter
*adap
, const struct tp_params
*p
)
2790 unsigned int m
= p
->chan_rx_size
;
2792 ulp_region(adap
, ISCSI
, m
, p
->chan_rx_size
/ 8);
2793 ulp_region(adap
, TDDP
, m
, p
->chan_rx_size
/ 8);
2794 ulptx_region(adap
, TPT
, m
, p
->chan_rx_size
/ 4);
2795 ulp_region(adap
, STAG
, m
, p
->chan_rx_size
/ 4);
2796 ulp_region(adap
, RQ
, m
, p
->chan_rx_size
/ 4);
2797 ulptx_region(adap
, PBL
, m
, p
->chan_rx_size
/ 4);
2798 ulp_region(adap
, PBL
, m
, p
->chan_rx_size
/ 4);
2799 t3_write_reg(adap
, A_ULPRX_TDDP_TAGMASK
, 0xffffffff);
2803 * t3_set_proto_sram - set the contents of the protocol sram
2804 * @adapter: the adapter
2805 * @data: the protocol image
2807 * Write the contents of the protocol SRAM.
2809 int t3_set_proto_sram(struct adapter
*adap
, u8
*data
)
2812 u32
*buf
= (u32
*)data
;
2814 for (i
= 0; i
< PROTO_SRAM_LINES
; i
++) {
2815 t3_write_reg(adap
, A_TP_EMBED_OP_FIELD5
, cpu_to_be32(*buf
++));
2816 t3_write_reg(adap
, A_TP_EMBED_OP_FIELD4
, cpu_to_be32(*buf
++));
2817 t3_write_reg(adap
, A_TP_EMBED_OP_FIELD3
, cpu_to_be32(*buf
++));
2818 t3_write_reg(adap
, A_TP_EMBED_OP_FIELD2
, cpu_to_be32(*buf
++));
2819 t3_write_reg(adap
, A_TP_EMBED_OP_FIELD1
, cpu_to_be32(*buf
++));
2821 t3_write_reg(adap
, A_TP_EMBED_OP_FIELD0
, i
<< 1 | 1 << 31);
2822 if (t3_wait_op_done(adap
, A_TP_EMBED_OP_FIELD0
, 1, 1, 5, 1))
2825 t3_write_reg(adap
, A_TP_EMBED_OP_FIELD0
, 0);
2830 void t3_config_trace_filter(struct adapter
*adapter
,
2831 const struct trace_params
*tp
, int filter_index
,
2832 int invert
, int enable
)
2834 u32 addr
, key
[4], mask
[4];
2836 key
[0] = tp
->sport
| (tp
->sip
<< 16);
2837 key
[1] = (tp
->sip
>> 16) | (tp
->dport
<< 16);
2839 key
[3] = tp
->proto
| (tp
->vlan
<< 8) | (tp
->intf
<< 20);
2841 mask
[0] = tp
->sport_mask
| (tp
->sip_mask
<< 16);
2842 mask
[1] = (tp
->sip_mask
>> 16) | (tp
->dport_mask
<< 16);
2843 mask
[2] = tp
->dip_mask
;
2844 mask
[3] = tp
->proto_mask
| (tp
->vlan_mask
<< 8) | (tp
->intf_mask
<< 20);
2847 key
[3] |= (1 << 29);
2849 key
[3] |= (1 << 28);
2851 addr
= filter_index
? A_TP_RX_TRC_KEY0
: A_TP_TX_TRC_KEY0
;
2852 tp_wr_indirect(adapter
, addr
++, key
[0]);
2853 tp_wr_indirect(adapter
, addr
++, mask
[0]);
2854 tp_wr_indirect(adapter
, addr
++, key
[1]);
2855 tp_wr_indirect(adapter
, addr
++, mask
[1]);
2856 tp_wr_indirect(adapter
, addr
++, key
[2]);
2857 tp_wr_indirect(adapter
, addr
++, mask
[2]);
2858 tp_wr_indirect(adapter
, addr
++, key
[3]);
2859 tp_wr_indirect(adapter
, addr
, mask
[3]);
2860 t3_read_reg(adapter
, A_TP_PIO_DATA
);
2864 * t3_config_sched - configure a HW traffic scheduler
2865 * @adap: the adapter
2866 * @kbps: target rate in Kbps
2867 * @sched: the scheduler index
2869 * Configure a HW scheduler for the target rate
2871 int t3_config_sched(struct adapter
*adap
, unsigned int kbps
, int sched
)
2873 unsigned int v
, tps
, cpt
, bpt
, delta
, mindelta
= ~0;
2874 unsigned int clk
= adap
->params
.vpd
.cclk
* 1000;
2875 unsigned int selected_cpt
= 0, selected_bpt
= 0;
2878 kbps
*= 125; /* -> bytes */
2879 for (cpt
= 1; cpt
<= 255; cpt
++) {
2881 bpt
= (kbps
+ tps
/ 2) / tps
;
2882 if (bpt
> 0 && bpt
<= 255) {
2884 delta
= v
>= kbps
? v
- kbps
: kbps
- v
;
2885 if (delta
<= mindelta
) {
2890 } else if (selected_cpt
)
2896 t3_write_reg(adap
, A_TP_TM_PIO_ADDR
,
2897 A_TP_TX_MOD_Q1_Q0_RATE_LIMIT
- sched
/ 2);
2898 v
= t3_read_reg(adap
, A_TP_TM_PIO_DATA
);
2900 v
= (v
& 0xffff) | (selected_cpt
<< 16) | (selected_bpt
<< 24);
2902 v
= (v
& 0xffff0000) | selected_cpt
| (selected_bpt
<< 8);
2903 t3_write_reg(adap
, A_TP_TM_PIO_DATA
, v
);
2907 static int tp_init(struct adapter
*adap
, const struct tp_params
*p
)
2912 t3_set_vlan_accel(adap
, 3, 0);
2914 if (is_offload(adap
)) {
2915 tp_set_timers(adap
, adap
->params
.vpd
.cclk
* 1000);
2916 t3_write_reg(adap
, A_TP_RESET
, F_FLSTINITENABLE
);
2917 busy
= t3_wait_op_done(adap
, A_TP_RESET
, F_FLSTINITENABLE
,
2920 CH_ERR(adap
, "TP initialization timed out\n");
2924 t3_write_reg(adap
, A_TP_RESET
, F_TPRESET
);
2928 int t3_mps_set_active_ports(struct adapter
*adap
, unsigned int port_mask
)
2930 if (port_mask
& ~((1 << adap
->params
.nports
) - 1))
2932 t3_set_reg_field(adap
, A_MPS_CFG
, F_PORT1ACTIVE
| F_PORT0ACTIVE
,
2933 port_mask
<< S_PORT0ACTIVE
);
2938 * Perform the bits of HW initialization that are dependent on the number
2939 * of available ports.
2941 static void init_hw_for_avail_ports(struct adapter
*adap
, int nports
)
2946 t3_set_reg_field(adap
, A_ULPRX_CTL
, F_ROUND_ROBIN
, 0);
2947 t3_set_reg_field(adap
, A_ULPTX_CONFIG
, F_CFG_RR_ARB
, 0);
2948 t3_write_reg(adap
, A_MPS_CFG
, F_TPRXPORTEN
| F_TPTXPORT0EN
|
2949 F_PORT0ACTIVE
| F_ENFORCEPKT
);
2950 t3_write_reg(adap
, A_PM1_TX_CFG
, 0xffffffff);
2952 t3_set_reg_field(adap
, A_ULPRX_CTL
, 0, F_ROUND_ROBIN
);
2953 t3_set_reg_field(adap
, A_ULPTX_CONFIG
, 0, F_CFG_RR_ARB
);
2954 t3_write_reg(adap
, A_ULPTX_DMA_WEIGHT
,
2955 V_D1_WEIGHT(16) | V_D0_WEIGHT(16));
2956 t3_write_reg(adap
, A_MPS_CFG
, F_TPTXPORT0EN
| F_TPTXPORT1EN
|
2957 F_TPRXPORTEN
| F_PORT0ACTIVE
| F_PORT1ACTIVE
|
2959 t3_write_reg(adap
, A_PM1_TX_CFG
, 0x80008000);
2960 t3_set_reg_field(adap
, A_TP_PC_CONFIG
, 0, F_TXTOSQUEUEMAPMODE
);
2961 t3_write_reg(adap
, A_TP_TX_MOD_QUEUE_REQ_MAP
,
2962 V_TX_MOD_QUEUE_REQ_MAP(0xaa));
2963 for (i
= 0; i
< 16; i
++)
2964 t3_write_reg(adap
, A_TP_TX_MOD_QUE_TABLE
,
2965 (i
<< 16) | 0x1010);
2969 static int calibrate_xgm(struct adapter
*adapter
)
2971 if (uses_xaui(adapter
)) {
2974 for (i
= 0; i
< 5; ++i
) {
2975 t3_write_reg(adapter
, A_XGM_XAUI_IMP
, 0);
2976 t3_read_reg(adapter
, A_XGM_XAUI_IMP
);
2978 v
= t3_read_reg(adapter
, A_XGM_XAUI_IMP
);
2979 if (!(v
& (F_XGM_CALFAULT
| F_CALBUSY
))) {
2980 t3_write_reg(adapter
, A_XGM_XAUI_IMP
,
2981 V_XAUIIMP(G_CALIMP(v
) >> 2));
2985 CH_ERR(adapter
, "MAC calibration failed\n");
2988 t3_write_reg(adapter
, A_XGM_RGMII_IMP
,
2989 V_RGMIIIMPPD(2) | V_RGMIIIMPPU(3));
2990 t3_set_reg_field(adapter
, A_XGM_RGMII_IMP
, F_XGM_IMPSETUPDATE
,
2991 F_XGM_IMPSETUPDATE
);
2996 static void calibrate_xgm_t3b(struct adapter
*adapter
)
2998 if (!uses_xaui(adapter
)) {
2999 t3_write_reg(adapter
, A_XGM_RGMII_IMP
, F_CALRESET
|
3000 F_CALUPDATE
| V_RGMIIIMPPD(2) | V_RGMIIIMPPU(3));
3001 t3_set_reg_field(adapter
, A_XGM_RGMII_IMP
, F_CALRESET
, 0);
3002 t3_set_reg_field(adapter
, A_XGM_RGMII_IMP
, 0,
3003 F_XGM_IMPSETUPDATE
);
3004 t3_set_reg_field(adapter
, A_XGM_RGMII_IMP
, F_XGM_IMPSETUPDATE
,
3006 t3_set_reg_field(adapter
, A_XGM_RGMII_IMP
, F_CALUPDATE
, 0);
3007 t3_set_reg_field(adapter
, A_XGM_RGMII_IMP
, 0, F_CALUPDATE
);
3011 struct mc7_timing_params
{
3012 unsigned char ActToPreDly
;
3013 unsigned char ActToRdWrDly
;
3014 unsigned char PreCyc
;
3015 unsigned char RefCyc
[5];
3016 unsigned char BkCyc
;
3017 unsigned char WrToRdDly
;
3018 unsigned char RdToWrDly
;
3022 * Write a value to a register and check that the write completed. These
3023 * writes normally complete in a cycle or two, so one read should suffice.
3024 * The very first read exists to flush the posted write to the device.
3026 static int wrreg_wait(struct adapter
*adapter
, unsigned int addr
, u32 val
)
3028 t3_write_reg(adapter
, addr
, val
);
3029 t3_read_reg(adapter
, addr
); /* flush */
3030 if (!(t3_read_reg(adapter
, addr
) & F_BUSY
))
3032 CH_ERR(adapter
, "write to MC7 register 0x%x timed out\n", addr
);
3036 static int mc7_init(struct mc7
*mc7
, unsigned int mc7_clock
, int mem_type
)
3038 static const unsigned int mc7_mode
[] = {
3039 0x632, 0x642, 0x652, 0x432, 0x442
3041 static const struct mc7_timing_params mc7_timings
[] = {
3042 {12, 3, 4, {20, 28, 34, 52, 0}, 15, 6, 4},
3043 {12, 4, 5, {20, 28, 34, 52, 0}, 16, 7, 4},
3044 {12, 5, 6, {20, 28, 34, 52, 0}, 17, 8, 4},
3045 {9, 3, 4, {15, 21, 26, 39, 0}, 12, 6, 4},
3046 {9, 4, 5, {15, 21, 26, 39, 0}, 13, 7, 4}
3050 unsigned int width
, density
, slow
, attempts
;
3051 struct adapter
*adapter
= mc7
->adapter
;
3052 const struct mc7_timing_params
*p
= &mc7_timings
[mem_type
];
3057 val
= t3_read_reg(adapter
, mc7
->offset
+ A_MC7_CFG
);
3058 slow
= val
& F_SLOW
;
3059 width
= G_WIDTH(val
);
3060 density
= G_DEN(val
);
3062 t3_write_reg(adapter
, mc7
->offset
+ A_MC7_CFG
, val
| F_IFEN
);
3063 val
= t3_read_reg(adapter
, mc7
->offset
+ A_MC7_CFG
); /* flush */
3067 t3_write_reg(adapter
, mc7
->offset
+ A_MC7_CAL
, F_SGL_CAL_EN
);
3068 t3_read_reg(adapter
, mc7
->offset
+ A_MC7_CAL
);
3070 if (t3_read_reg(adapter
, mc7
->offset
+ A_MC7_CAL
) &
3071 (F_BUSY
| F_SGL_CAL_EN
| F_CAL_FAULT
)) {
3072 CH_ERR(adapter
, "%s MC7 calibration timed out\n",
3078 t3_write_reg(adapter
, mc7
->offset
+ A_MC7_PARM
,
3079 V_ACTTOPREDLY(p
->ActToPreDly
) |
3080 V_ACTTORDWRDLY(p
->ActToRdWrDly
) | V_PRECYC(p
->PreCyc
) |
3081 V_REFCYC(p
->RefCyc
[density
]) | V_BKCYC(p
->BkCyc
) |
3082 V_WRTORDDLY(p
->WrToRdDly
) | V_RDTOWRDLY(p
->RdToWrDly
));
3084 t3_write_reg(adapter
, mc7
->offset
+ A_MC7_CFG
,
3085 val
| F_CLKEN
| F_TERM150
);
3086 t3_read_reg(adapter
, mc7
->offset
+ A_MC7_CFG
); /* flush */
3089 t3_set_reg_field(adapter
, mc7
->offset
+ A_MC7_DLL
, F_DLLENB
,
3094 if (wrreg_wait(adapter
, mc7
->offset
+ A_MC7_PRE
, 0) ||
3095 wrreg_wait(adapter
, mc7
->offset
+ A_MC7_EXT_MODE2
, 0) ||
3096 wrreg_wait(adapter
, mc7
->offset
+ A_MC7_EXT_MODE3
, 0) ||
3097 wrreg_wait(adapter
, mc7
->offset
+ A_MC7_EXT_MODE1
, val
))
3101 t3_write_reg(adapter
, mc7
->offset
+ A_MC7_MODE
, 0x100);
3102 t3_set_reg_field(adapter
, mc7
->offset
+ A_MC7_DLL
, F_DLLRST
, 0);
3106 if (wrreg_wait(adapter
, mc7
->offset
+ A_MC7_PRE
, 0) ||
3107 wrreg_wait(adapter
, mc7
->offset
+ A_MC7_REF
, 0) ||
3108 wrreg_wait(adapter
, mc7
->offset
+ A_MC7_REF
, 0) ||
3109 wrreg_wait(adapter
, mc7
->offset
+ A_MC7_MODE
,
3110 mc7_mode
[mem_type
]) ||
3111 wrreg_wait(adapter
, mc7
->offset
+ A_MC7_EXT_MODE1
, val
| 0x380) ||
3112 wrreg_wait(adapter
, mc7
->offset
+ A_MC7_EXT_MODE1
, val
))
3115 /* clock value is in KHz */
3116 mc7_clock
= mc7_clock
* 7812 + mc7_clock
/ 2; /* ns */
3117 mc7_clock
/= 1000000; /* KHz->MHz, ns->us */
3119 t3_write_reg(adapter
, mc7
->offset
+ A_MC7_REF
,
3120 F_PERREFEN
| V_PREREFDIV(mc7_clock
));
3121 t3_read_reg(adapter
, mc7
->offset
+ A_MC7_REF
); /* flush */
3123 t3_write_reg(adapter
, mc7
->offset
+ A_MC7_ECC
, F_ECCGENEN
| F_ECCCHKEN
);
3124 t3_write_reg(adapter
, mc7
->offset
+ A_MC7_BIST_DATA
, 0);
3125 t3_write_reg(adapter
, mc7
->offset
+ A_MC7_BIST_ADDR_BEG
, 0);
3126 t3_write_reg(adapter
, mc7
->offset
+ A_MC7_BIST_ADDR_END
,
3127 (mc7
->size
<< width
) - 1);
3128 t3_write_reg(adapter
, mc7
->offset
+ A_MC7_BIST_OP
, V_OP(1));
3129 t3_read_reg(adapter
, mc7
->offset
+ A_MC7_BIST_OP
); /* flush */
3134 val
= t3_read_reg(adapter
, mc7
->offset
+ A_MC7_BIST_OP
);
3135 } while ((val
& F_BUSY
) && --attempts
);
3137 CH_ERR(adapter
, "%s MC7 BIST timed out\n", mc7
->name
);
3141 /* Enable normal memory accesses. */
3142 t3_set_reg_field(adapter
, mc7
->offset
+ A_MC7_CFG
, 0, F_RDY
);
3149 static void config_pcie(struct adapter
*adap
)
3151 static const u16 ack_lat
[4][6] = {
3152 {237, 416, 559, 1071, 2095, 4143},
3153 {128, 217, 289, 545, 1057, 2081},
3154 {73, 118, 154, 282, 538, 1050},
3155 {67, 107, 86, 150, 278, 534}
3157 static const u16 rpl_tmr
[4][6] = {
3158 {711, 1248, 1677, 3213, 6285, 12429},
3159 {384, 651, 867, 1635, 3171, 6243},
3160 {219, 354, 462, 846, 1614, 3150},
3161 {201, 321, 258, 450, 834, 1602}
3165 unsigned int log2_width
, pldsize
;
3166 unsigned int fst_trn_rx
, fst_trn_tx
, acklat
, rpllmt
;
3168 pci_read_config_word(adap
->pdev
,
3169 adap
->params
.pci
.pcie_cap_addr
+ PCI_EXP_DEVCTL
,
3171 pldsize
= (val
& PCI_EXP_DEVCTL_PAYLOAD
) >> 5;
3172 pci_read_config_word(adap
->pdev
,
3173 adap
->params
.pci
.pcie_cap_addr
+ PCI_EXP_LNKCTL
,
3176 fst_trn_tx
= G_NUMFSTTRNSEQ(t3_read_reg(adap
, A_PCIE_PEX_CTRL0
));
3177 fst_trn_rx
= adap
->params
.rev
== 0 ? fst_trn_tx
:
3178 G_NUMFSTTRNSEQRX(t3_read_reg(adap
, A_PCIE_MODE
));
3179 log2_width
= fls(adap
->params
.pci
.width
) - 1;
3180 acklat
= ack_lat
[log2_width
][pldsize
];
3181 if (val
& 1) /* check LOsEnable */
3182 acklat
+= fst_trn_tx
* 4;
3183 rpllmt
= rpl_tmr
[log2_width
][pldsize
] + fst_trn_rx
* 4;
3185 if (adap
->params
.rev
== 0)
3186 t3_set_reg_field(adap
, A_PCIE_PEX_CTRL1
,
3187 V_T3A_ACKLAT(M_T3A_ACKLAT
),
3188 V_T3A_ACKLAT(acklat
));
3190 t3_set_reg_field(adap
, A_PCIE_PEX_CTRL1
, V_ACKLAT(M_ACKLAT
),
3193 t3_set_reg_field(adap
, A_PCIE_PEX_CTRL0
, V_REPLAYLMT(M_REPLAYLMT
),
3194 V_REPLAYLMT(rpllmt
));
3196 t3_write_reg(adap
, A_PCIE_PEX_ERR
, 0xffffffff);
3197 t3_set_reg_field(adap
, A_PCIE_CFG
, F_PCIE_CLIDECEN
, F_PCIE_CLIDECEN
);
3201 * Initialize and configure T3 HW modules. This performs the
3202 * initialization steps that need to be done once after a card is reset.
3203 * MAC and PHY initialization is handled separarely whenever a port is enabled.
3205 * fw_params are passed to FW and their value is platform dependent. Only the
3206 * top 8 bits are available for use, the rest must be 0.
3208 int t3_init_hw(struct adapter
*adapter
, u32 fw_params
)
3210 int err
= -EIO
, attempts
= 100;
3211 const struct vpd_params
*vpd
= &adapter
->params
.vpd
;
3213 if (adapter
->params
.rev
> 0)
3214 calibrate_xgm_t3b(adapter
);
3215 else if (calibrate_xgm(adapter
))
3219 partition_mem(adapter
, &adapter
->params
.tp
);
3221 if (mc7_init(&adapter
->pmrx
, vpd
->mclk
, vpd
->mem_timing
) ||
3222 mc7_init(&adapter
->pmtx
, vpd
->mclk
, vpd
->mem_timing
) ||
3223 mc7_init(&adapter
->cm
, vpd
->mclk
, vpd
->mem_timing
) ||
3224 t3_mc5_init(&adapter
->mc5
, adapter
->params
.mc5
.nservers
,
3225 adapter
->params
.mc5
.nfilters
,
3226 adapter
->params
.mc5
.nroutes
))
3230 if (tp_init(adapter
, &adapter
->params
.tp
))
3233 t3_tp_set_coalescing_size(adapter
,
3234 min(adapter
->params
.sge
.max_pkt_size
,
3235 MAX_RX_COALESCING_LEN
), 1);
3236 t3_tp_set_max_rxsize(adapter
,
3237 min(adapter
->params
.sge
.max_pkt_size
, 16384U));
3238 ulp_config(adapter
, &adapter
->params
.tp
);
3240 if (is_pcie(adapter
))
3241 config_pcie(adapter
);
3243 t3_set_reg_field(adapter
, A_PCIX_CFG
, 0, F_CLIDECEN
);
3245 t3_write_reg(adapter
, A_PM1_RX_CFG
, 0xffffffff);
3246 t3_write_reg(adapter
, A_PM1_RX_MODE
, 0);
3247 t3_write_reg(adapter
, A_PM1_TX_MODE
, 0);
3248 init_hw_for_avail_ports(adapter
, adapter
->params
.nports
);
3249 t3_sge_init(adapter
, &adapter
->params
.sge
);
3251 t3_write_reg(adapter
, A_CIM_HOST_ACC_DATA
, vpd
->uclk
| fw_params
);
3252 t3_write_reg(adapter
, A_CIM_BOOT_CFG
,
3253 V_BOOTADDR(FW_FLASH_BOOT_ADDR
>> 2));
3254 t3_read_reg(adapter
, A_CIM_BOOT_CFG
); /* flush */
3256 do { /* wait for uP to initialize */
3258 } while (t3_read_reg(adapter
, A_CIM_HOST_ACC_DATA
) && --attempts
);
3260 CH_ERR(adapter
, "uP initialization timed out\n");
3270 * get_pci_mode - determine a card's PCI mode
3271 * @adapter: the adapter
3272 * @p: where to store the PCI settings
3274 * Determines a card's PCI mode and associated parameters, such as speed
3277 static void __devinit
get_pci_mode(struct adapter
*adapter
,
3278 struct pci_params
*p
)
3280 static unsigned short speed_map
[] = { 33, 66, 100, 133 };
3281 u32 pci_mode
, pcie_cap
;
3283 pcie_cap
= pci_find_capability(adapter
->pdev
, PCI_CAP_ID_EXP
);
3287 p
->variant
= PCI_VARIANT_PCIE
;
3288 p
->pcie_cap_addr
= pcie_cap
;
3289 pci_read_config_word(adapter
->pdev
, pcie_cap
+ PCI_EXP_LNKSTA
,
3291 p
->width
= (val
>> 4) & 0x3f;
3295 pci_mode
= t3_read_reg(adapter
, A_PCIX_MODE
);
3296 p
->speed
= speed_map
[G_PCLKRANGE(pci_mode
)];
3297 p
->width
= (pci_mode
& F_64BIT
) ? 64 : 32;
3298 pci_mode
= G_PCIXINITPAT(pci_mode
);
3300 p
->variant
= PCI_VARIANT_PCI
;
3301 else if (pci_mode
< 4)
3302 p
->variant
= PCI_VARIANT_PCIX_MODE1_PARITY
;
3303 else if (pci_mode
< 8)
3304 p
->variant
= PCI_VARIANT_PCIX_MODE1_ECC
;
3306 p
->variant
= PCI_VARIANT_PCIX_266_MODE2
;
3310 * init_link_config - initialize a link's SW state
3311 * @lc: structure holding the link state
3312 * @ai: information about the current card
3314 * Initializes the SW state maintained for each link, including the link's
3315 * capabilities and default speed/duplex/flow-control/autonegotiation
3318 static void __devinit
init_link_config(struct link_config
*lc
,
3321 lc
->supported
= caps
;
3322 lc
->requested_speed
= lc
->speed
= SPEED_INVALID
;
3323 lc
->requested_duplex
= lc
->duplex
= DUPLEX_INVALID
;
3324 lc
->requested_fc
= lc
->fc
= PAUSE_RX
| PAUSE_TX
;
3325 if (lc
->supported
& SUPPORTED_Autoneg
) {
3326 lc
->advertising
= lc
->supported
;
3327 lc
->autoneg
= AUTONEG_ENABLE
;
3328 lc
->requested_fc
|= PAUSE_AUTONEG
;
3330 lc
->advertising
= 0;
3331 lc
->autoneg
= AUTONEG_DISABLE
;
3336 * mc7_calc_size - calculate MC7 memory size
3337 * @cfg: the MC7 configuration
3339 * Calculates the size of an MC7 memory in bytes from the value of its
3340 * configuration register.
3342 static unsigned int __devinit
mc7_calc_size(u32 cfg
)
3344 unsigned int width
= G_WIDTH(cfg
);
3345 unsigned int banks
= !!(cfg
& F_BKS
) + 1;
3346 unsigned int org
= !!(cfg
& F_ORG
) + 1;
3347 unsigned int density
= G_DEN(cfg
);
3348 unsigned int MBs
= ((256 << density
) * banks
) / (org
<< width
);
3353 static void __devinit
mc7_prep(struct adapter
*adapter
, struct mc7
*mc7
,
3354 unsigned int base_addr
, const char *name
)
3358 mc7
->adapter
= adapter
;
3360 mc7
->offset
= base_addr
- MC7_PMRX_BASE_ADDR
;
3361 cfg
= t3_read_reg(adapter
, mc7
->offset
+ A_MC7_CFG
);
3362 mc7
->size
= mc7
->size
= G_DEN(cfg
) == M_DEN
? 0 : mc7_calc_size(cfg
);
3363 mc7
->width
= G_WIDTH(cfg
);
3366 void mac_prep(struct cmac
*mac
, struct adapter
*adapter
, int index
)
3368 mac
->adapter
= adapter
;
3369 mac
->offset
= (XGMAC0_1_BASE_ADDR
- XGMAC0_0_BASE_ADDR
) * index
;
3372 if (adapter
->params
.rev
== 0 && uses_xaui(adapter
)) {
3373 t3_write_reg(adapter
, A_XGM_SERDES_CTRL
+ mac
->offset
,
3374 is_10G(adapter
) ? 0x2901c04 : 0x2301c04);
3375 t3_set_reg_field(adapter
, A_XGM_PORT_CFG
+ mac
->offset
,
3380 void early_hw_init(struct adapter
*adapter
, const struct adapter_info
*ai
)
3382 u32 val
= V_PORTSPEED(is_10G(adapter
) ? 3 : 2);
3384 mi1_init(adapter
, ai
);
3385 t3_write_reg(adapter
, A_I2C_CFG
, /* set for 80KHz */
3386 V_I2C_CLKDIV(adapter
->params
.vpd
.cclk
/ 80 - 1));
3387 t3_write_reg(adapter
, A_T3DBG_GPIO_EN
,
3388 ai
->gpio_out
| F_GPIO0_OEN
| F_GPIO0_OUT_VAL
);
3389 t3_write_reg(adapter
, A_MC5_DB_SERVER_INDEX
, 0);
3391 if (adapter
->params
.rev
== 0 || !uses_xaui(adapter
))
3394 /* Enable MAC clocks so we can access the registers */
3395 t3_write_reg(adapter
, A_XGM_PORT_CFG
, val
);
3396 t3_read_reg(adapter
, A_XGM_PORT_CFG
);
3398 val
|= F_CLKDIVRESET_
;
3399 t3_write_reg(adapter
, A_XGM_PORT_CFG
, val
);
3400 t3_read_reg(adapter
, A_XGM_PORT_CFG
);
3401 t3_write_reg(adapter
, XGM_REG(A_XGM_PORT_CFG
, 1), val
);
3402 t3_read_reg(adapter
, A_XGM_PORT_CFG
);
3406 * Reset the adapter.
3407 * Older PCIe cards lose their config space during reset, PCI-X
3410 static int t3_reset_adapter(struct adapter
*adapter
)
3412 int i
, save_and_restore_pcie
=
3413 adapter
->params
.rev
< T3_REV_B2
&& is_pcie(adapter
);
3416 if (save_and_restore_pcie
)
3417 pci_save_state(adapter
->pdev
);
3418 t3_write_reg(adapter
, A_PL_RST
, F_CRSTWRM
| F_CRSTWRMMODE
);
3421 * Delay. Give Some time to device to reset fully.
3422 * XXX The delay time should be modified.
3424 for (i
= 0; i
< 10; i
++) {
3426 pci_read_config_word(adapter
->pdev
, 0x00, &devid
);
3427 if (devid
== 0x1425)
3431 if (devid
!= 0x1425)
3434 if (save_and_restore_pcie
)
3435 pci_restore_state(adapter
->pdev
);
3440 * Initialize adapter SW state for the various HW modules, set initial values
3441 * for some adapter tunables, take PHYs out of reset, and initialize the MDIO
3444 int __devinit
t3_prep_adapter(struct adapter
*adapter
,
3445 const struct adapter_info
*ai
, int reset
)
3448 unsigned int i
, j
= 0;
3450 get_pci_mode(adapter
, &adapter
->params
.pci
);
3452 adapter
->params
.info
= ai
;
3453 adapter
->params
.nports
= ai
->nports
;
3454 adapter
->params
.rev
= t3_read_reg(adapter
, A_PL_REV
);
3455 adapter
->params
.linkpoll_period
= 0;
3456 adapter
->params
.stats_update_period
= is_10G(adapter
) ?
3457 MAC_STATS_ACCUM_SECS
: (MAC_STATS_ACCUM_SECS
* 10);
3458 adapter
->params
.pci
.vpd_cap_addr
=
3459 pci_find_capability(adapter
->pdev
, PCI_CAP_ID_VPD
);
3460 ret
= get_vpd_params(adapter
, &adapter
->params
.vpd
);
3464 if (reset
&& t3_reset_adapter(adapter
))
3467 t3_sge_prep(adapter
, &adapter
->params
.sge
);
3469 if (adapter
->params
.vpd
.mclk
) {
3470 struct tp_params
*p
= &adapter
->params
.tp
;
3472 mc7_prep(adapter
, &adapter
->pmrx
, MC7_PMRX_BASE_ADDR
, "PMRX");
3473 mc7_prep(adapter
, &adapter
->pmtx
, MC7_PMTX_BASE_ADDR
, "PMTX");
3474 mc7_prep(adapter
, &adapter
->cm
, MC7_CM_BASE_ADDR
, "CM");
3476 p
->nchan
= ai
->nports
;
3477 p
->pmrx_size
= t3_mc7_size(&adapter
->pmrx
);
3478 p
->pmtx_size
= t3_mc7_size(&adapter
->pmtx
);
3479 p
->cm_size
= t3_mc7_size(&adapter
->cm
);
3480 p
->chan_rx_size
= p
->pmrx_size
/ 2; /* only 1 Rx channel */
3481 p
->chan_tx_size
= p
->pmtx_size
/ p
->nchan
;
3482 p
->rx_pg_size
= 64 * 1024;
3483 p
->tx_pg_size
= is_10G(adapter
) ? 64 * 1024 : 16 * 1024;
3484 p
->rx_num_pgs
= pm_num_pages(p
->chan_rx_size
, p
->rx_pg_size
);
3485 p
->tx_num_pgs
= pm_num_pages(p
->chan_tx_size
, p
->tx_pg_size
);
3486 p
->ntimer_qs
= p
->cm_size
>= (128 << 20) ||
3487 adapter
->params
.rev
> 0 ? 12 : 6;
3490 adapter
->params
.offload
= t3_mc7_size(&adapter
->pmrx
) &&
3491 t3_mc7_size(&adapter
->pmtx
) &&
3492 t3_mc7_size(&adapter
->cm
);
3494 if (is_offload(adapter
)) {
3495 adapter
->params
.mc5
.nservers
= DEFAULT_NSERVERS
;
3496 adapter
->params
.mc5
.nfilters
= adapter
->params
.rev
> 0 ?
3497 DEFAULT_NFILTERS
: 0;
3498 adapter
->params
.mc5
.nroutes
= 0;
3499 t3_mc5_prep(adapter
, &adapter
->mc5
, MC5_MODE_144_BIT
);
3501 init_mtus(adapter
->params
.mtus
);
3502 init_cong_ctrl(adapter
->params
.a_wnd
, adapter
->params
.b_wnd
);
3505 early_hw_init(adapter
, ai
);
3507 for_each_port(adapter
, i
) {
3509 struct port_info
*p
= adap2pinfo(adapter
, i
);
3511 while (!adapter
->params
.vpd
.port_type
[j
])
3514 p
->port_type
= &port_types
[adapter
->params
.vpd
.port_type
[j
]];
3515 p
->port_type
->phy_prep(&p
->phy
, adapter
, ai
->phy_base_addr
+ j
,
3517 mac_prep(&p
->mac
, adapter
, j
);
3521 * The VPD EEPROM stores the base Ethernet address for the
3522 * card. A port's address is derived from the base by adding
3523 * the port's index to the base's low octet.
3525 memcpy(hw_addr
, adapter
->params
.vpd
.eth_base
, 5);
3526 hw_addr
[5] = adapter
->params
.vpd
.eth_base
[5] + i
;
3528 memcpy(adapter
->port
[i
]->dev_addr
, hw_addr
,
3530 memcpy(adapter
->port
[i
]->perm_addr
, hw_addr
,
3532 init_link_config(&p
->link_config
, p
->port_type
->caps
);
3533 p
->phy
.ops
->power_down(&p
->phy
, 1);
3534 if (!(p
->port_type
->caps
& SUPPORTED_IRQ
))
3535 adapter
->params
.linkpoll_period
= 10;
3541 void t3_led_ready(struct adapter
*adapter
)
3543 t3_set_reg_field(adapter
, A_T3DBG_GPIO_EN
, F_GPIO0_OUT_VAL
,