1 /**************************************************************************
3 Copyright (c) 2007, Chelsio Inc.
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Neither the name of the Chelsio Corporation nor the names of its
13 contributors may be used to endorse or promote products derived from
14 this software without specific prior written permission.
16 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
20 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26 POSSIBILITY OF SUCH DAMAGE.
28 ***************************************************************************/
30 #include <sys/cdefs.h>
32 __KERNEL_RCSID(0, "$NetBSD: cxgb_t3_hw.c,v 1.4 2007/12/11 11:25:49 lukem Exp $");
35 __FBSDID("$FreeBSD: src/sys/dev/cxgb/common/cxgb_t3_hw.c,v 1.9 2007/09/09 03:51:25 kmacy Exp $");
40 #include <cxgb_include.h>
43 #include <dev/cxgb/cxgb_include.h>
46 #include "cxgb_include.h"
51 #define msleep t3_os_sleep
54 * t3_wait_op_done_val - wait until an operation is completed
55 * @adapter: the adapter performing the operation
56 * @reg: the register to check for completion
57 * @mask: a single-bit field within @reg that indicates completion
58 * @polarity: the value of the field when the operation is completed
59 * @attempts: number of check iterations
60 * @delay: delay in usecs between iterations
61 * @valp: where to store the value of the register at completion time
63 * Wait until an operation is completed by checking a bit in a register
64 * up to @attempts times. If @valp is not NULL the value of the register
65 * at the time it indicated completion is stored there. Returns 0 if the
66 * operation completes and -EAGAIN otherwise.
68 int t3_wait_op_done_val(adapter_t
*adapter
, int reg
, u32 mask
, int polarity
,
69 int attempts
, int delay
, u32
*valp
)
72 u32 val
= t3_read_reg(adapter
, reg
);
74 if (!!(val
& mask
) == polarity
) {
87 * t3_write_regs - write a bunch of registers
88 * @adapter: the adapter to program
89 * @p: an array of register address/register value pairs
90 * @n: the number of address/value pairs
91 * @offset: register address offset
93 * Takes an array of register address/register value pairs and writes each
94 * value to the corresponding register. Register addresses are adjusted
95 * by the supplied offset.
97 void t3_write_regs(adapter_t
*adapter
, const struct addr_val_pair
*p
, int n
,
101 t3_write_reg(adapter
, p
->reg_addr
+ offset
, p
->val
);
107 * t3_set_reg_field - set a register field to a value
108 * @adapter: the adapter to program
109 * @addr: the register address
110 * @mask: specifies the portion of the register to modify
111 * @val: the new value for the register field
113 * Sets a register field specified by the supplied mask to the
116 void t3_set_reg_field(adapter_t
*adapter
, unsigned int addr
, u32 mask
, u32 val
)
118 u32 v
= t3_read_reg(adapter
, addr
) & ~mask
;
120 t3_write_reg(adapter
, addr
, v
| val
);
121 (void) t3_read_reg(adapter
, addr
); /* flush */
125 * t3_read_indirect - read indirectly addressed registers
127 * @addr_reg: register holding the indirect address
128 * @data_reg: register holding the value of the indirect register
129 * @vals: where the read register values are stored
130 * @start_idx: index of first indirect register to read
131 * @nregs: how many indirect registers to read
133 * Reads registers that are accessed indirectly through an address/data
136 static void t3_read_indirect(adapter_t
*adap
, unsigned int addr_reg
,
137 unsigned int data_reg
, u32
*vals
, unsigned int nregs
,
138 unsigned int start_idx
)
141 t3_write_reg(adap
, addr_reg
, start_idx
);
142 *vals
++ = t3_read_reg(adap
, data_reg
);
148 * t3_mc7_bd_read - read from MC7 through backdoor accesses
149 * @mc7: identifies MC7 to read from
150 * @start: index of first 64-bit word to read
151 * @n: number of 64-bit words to read
152 * @buf: where to store the read result
154 * Read n 64-bit words from MC7 starting at word start, using backdoor
157 int t3_mc7_bd_read(struct mc7
*mc7
, unsigned int start
, unsigned int n
,
160 static int shift
[] = { 0, 0, 16, 24 };
161 static int step
[] = { 0, 32, 16, 8 };
163 unsigned int size64
= mc7
->size
/ 8; /* # of 64-bit words */
164 adapter_t
*adap
= mc7
->adapter
;
166 if (start
>= size64
|| start
+ n
> size64
)
169 start
*= (8 << mc7
->width
);
174 for (i
= (1 << mc7
->width
) - 1; i
>= 0; --i
) {
178 t3_write_reg(adap
, mc7
->offset
+ A_MC7_BD_ADDR
,
180 t3_write_reg(adap
, mc7
->offset
+ A_MC7_BD_OP
, 0);
181 val
= t3_read_reg(adap
, mc7
->offset
+ A_MC7_BD_OP
);
182 while ((val
& F_BUSY
) && attempts
--)
183 val
= t3_read_reg(adap
,
184 mc7
->offset
+ A_MC7_BD_OP
);
188 val
= t3_read_reg(adap
, mc7
->offset
+ A_MC7_BD_DATA1
);
189 if (mc7
->width
== 0) {
190 val64
= t3_read_reg(adap
,
191 mc7
->offset
+ A_MC7_BD_DATA0
);
192 val64
|= (u64
)val
<< 32;
195 val
>>= shift
[mc7
->width
];
196 val64
|= (u64
)val
<< (step
[mc7
->width
] * i
);
208 static void mi1_init(adapter_t
*adap
, const struct adapter_info
*ai
)
210 u32 clkdiv
= adap
->params
.vpd
.cclk
/ (2 * adap
->params
.vpd
.mdc
) - 1;
211 u32 val
= F_PREEN
| V_MDIINV(ai
->mdiinv
) | V_MDIEN(ai
->mdien
) |
214 if (!(ai
->caps
& SUPPORTED_10000baseT_Full
))
216 t3_write_reg(adap
, A_MI1_CFG
, val
);
219 #define MDIO_ATTEMPTS 20
222 * MI1 read/write operations for direct-addressed PHYs.
224 static int mi1_read(adapter_t
*adapter
, int phy_addr
, int mmd_addr
,
225 int reg_addr
, unsigned int *valp
)
228 u32 addr
= V_REGADDR(reg_addr
) | V_PHYADDR(phy_addr
);
234 t3_write_reg(adapter
, A_MI1_ADDR
, addr
);
235 t3_write_reg(adapter
, A_MI1_OP
, V_MDI_OP(2));
236 ret
= t3_wait_op_done(adapter
, A_MI1_OP
, F_BUSY
, 0, MDIO_ATTEMPTS
, 10);
238 *valp
= t3_read_reg(adapter
, A_MI1_DATA
);
239 MDIO_UNLOCK(adapter
);
243 static int mi1_write(adapter_t
*adapter
, int phy_addr
, int mmd_addr
,
244 int reg_addr
, unsigned int val
)
247 u32 addr
= V_REGADDR(reg_addr
) | V_PHYADDR(phy_addr
);
253 t3_write_reg(adapter
, A_MI1_ADDR
, addr
);
254 t3_write_reg(adapter
, A_MI1_DATA
, val
);
255 t3_write_reg(adapter
, A_MI1_OP
, V_MDI_OP(1));
256 ret
= t3_wait_op_done(adapter
, A_MI1_OP
, F_BUSY
, 0, MDIO_ATTEMPTS
, 10);
257 MDIO_UNLOCK(adapter
);
261 static struct mdio_ops mi1_mdio_ops
= {
267 * MI1 read/write operations for indirect-addressed PHYs.
269 static int mi1_ext_read(adapter_t
*adapter
, int phy_addr
, int mmd_addr
,
270 int reg_addr
, unsigned int *valp
)
273 u32 addr
= V_REGADDR(mmd_addr
) | V_PHYADDR(phy_addr
);
276 t3_write_reg(adapter
, A_MI1_ADDR
, addr
);
277 t3_write_reg(adapter
, A_MI1_DATA
, reg_addr
);
278 t3_write_reg(adapter
, A_MI1_OP
, V_MDI_OP(0));
279 ret
= t3_wait_op_done(adapter
, A_MI1_OP
, F_BUSY
, 0, MDIO_ATTEMPTS
, 10);
281 t3_write_reg(adapter
, A_MI1_OP
, V_MDI_OP(3));
282 ret
= t3_wait_op_done(adapter
, A_MI1_OP
, F_BUSY
, 0,
285 *valp
= t3_read_reg(adapter
, A_MI1_DATA
);
287 MDIO_UNLOCK(adapter
);
291 static int mi1_ext_write(adapter_t
*adapter
, int phy_addr
, int mmd_addr
,
292 int reg_addr
, unsigned int val
)
295 u32 addr
= V_REGADDR(mmd_addr
) | V_PHYADDR(phy_addr
);
298 t3_write_reg(adapter
, A_MI1_ADDR
, addr
);
299 t3_write_reg(adapter
, A_MI1_DATA
, reg_addr
);
300 t3_write_reg(adapter
, A_MI1_OP
, V_MDI_OP(0));
301 ret
= t3_wait_op_done(adapter
, A_MI1_OP
, F_BUSY
, 0, MDIO_ATTEMPTS
, 10);
303 t3_write_reg(adapter
, A_MI1_DATA
, val
);
304 t3_write_reg(adapter
, A_MI1_OP
, V_MDI_OP(1));
305 ret
= t3_wait_op_done(adapter
, A_MI1_OP
, F_BUSY
, 0,
308 MDIO_UNLOCK(adapter
);
312 static struct mdio_ops mi1_mdio_ext_ops
= {
318 * t3_mdio_change_bits - modify the value of a PHY register
319 * @phy: the PHY to operate on
320 * @mmd: the device address
321 * @reg: the register address
322 * @clear: what part of the register value to mask off
323 * @set: what part of the register value to set
325 * Changes the value of a PHY register by applying a mask to its current
326 * value and ORing the result with a new value.
328 int t3_mdio_change_bits(struct cphy
*phy
, int mmd
, int reg
, unsigned int clear
,
334 ret
= mdio_read(phy
, mmd
, reg
, &val
);
337 ret
= mdio_write(phy
, mmd
, reg
, val
| set
);
343 * t3_phy_reset - reset a PHY block
344 * @phy: the PHY to operate on
345 * @mmd: the device address of the PHY block to reset
346 * @wait: how long to wait for the reset to complete in 1ms increments
348 * Resets a PHY block and optionally waits for the reset to complete.
349 * @mmd should be 0 for 10/100/1000 PHYs and the device address to reset
352 int t3_phy_reset(struct cphy
*phy
, int mmd
, int wait
)
357 err
= t3_mdio_change_bits(phy
, mmd
, MII_BMCR
, BMCR_PDOWN
, BMCR_RESET
);
362 err
= mdio_read(phy
, mmd
, MII_BMCR
, &ctl
);
368 } while (ctl
&& --wait
);
374 * t3_phy_advertise - set the PHY advertisement registers for autoneg
375 * @phy: the PHY to operate on
376 * @advert: bitmap of capabilities the PHY should advertise
378 * Sets a 10/100/1000 PHY's advertisement registers to advertise the
379 * requested capabilities.
381 int t3_phy_advertise(struct cphy
*phy
, unsigned int advert
)
384 unsigned int val
= 0;
386 err
= mdio_read(phy
, 0, MII_CTRL1000
, &val
);
390 val
&= ~(ADVERTISE_1000HALF
| ADVERTISE_1000FULL
);
391 if (advert
& ADVERTISED_1000baseT_Half
)
392 val
|= ADVERTISE_1000HALF
;
393 if (advert
& ADVERTISED_1000baseT_Full
)
394 val
|= ADVERTISE_1000FULL
;
396 err
= mdio_write(phy
, 0, MII_CTRL1000
, val
);
401 if (advert
& ADVERTISED_10baseT_Half
)
402 val
|= ADVERTISE_10HALF
;
403 if (advert
& ADVERTISED_10baseT_Full
)
404 val
|= ADVERTISE_10FULL
;
405 if (advert
& ADVERTISED_100baseT_Half
)
406 val
|= ADVERTISE_100HALF
;
407 if (advert
& ADVERTISED_100baseT_Full
)
408 val
|= ADVERTISE_100FULL
;
409 if (advert
& ADVERTISED_Pause
)
410 val
|= ADVERTISE_PAUSE_CAP
;
411 if (advert
& ADVERTISED_Asym_Pause
)
412 val
|= ADVERTISE_PAUSE_ASYM
;
413 return mdio_write(phy
, 0, MII_ADVERTISE
, val
);
417 * t3_set_phy_speed_duplex - force PHY speed and duplex
418 * @phy: the PHY to operate on
419 * @speed: requested PHY speed
420 * @duplex: requested PHY duplex
422 * Force a 10/100/1000 PHY's speed and duplex. This also disables
423 * auto-negotiation except for GigE, where auto-negotiation is mandatory.
425 int t3_set_phy_speed_duplex(struct cphy
*phy
, int speed
, int duplex
)
430 err
= mdio_read(phy
, 0, MII_BMCR
, &ctl
);
435 ctl
&= ~(BMCR_SPEED100
| BMCR_SPEED1000
| BMCR_ANENABLE
);
436 if (speed
== SPEED_100
)
437 ctl
|= BMCR_SPEED100
;
438 else if (speed
== SPEED_1000
)
439 ctl
|= BMCR_SPEED1000
;
442 ctl
&= ~(BMCR_FULLDPLX
| BMCR_ANENABLE
);
443 if (duplex
== DUPLEX_FULL
)
444 ctl
|= BMCR_FULLDPLX
;
446 if (ctl
& BMCR_SPEED1000
) /* auto-negotiation required for GigE */
447 ctl
|= BMCR_ANENABLE
;
448 return mdio_write(phy
, 0, MII_BMCR
, ctl
);
451 static struct adapter_info t3_adap_info
[] = {
453 F_GPIO2_OEN
| F_GPIO4_OEN
|
454 F_GPIO2_OUT_VAL
| F_GPIO4_OUT_VAL
, F_GPIO3
| F_GPIO5
,
456 &mi1_mdio_ops
, "Chelsio PE9000" },
458 F_GPIO2_OEN
| F_GPIO4_OEN
|
459 F_GPIO2_OUT_VAL
| F_GPIO4_OUT_VAL
, F_GPIO3
| F_GPIO5
,
461 &mi1_mdio_ops
, "Chelsio T302" },
463 F_GPIO1_OEN
| F_GPIO6_OEN
| F_GPIO7_OEN
| F_GPIO10_OEN
|
464 F_GPIO1_OUT_VAL
| F_GPIO6_OUT_VAL
| F_GPIO10_OUT_VAL
, 0,
465 SUPPORTED_10000baseT_Full
| SUPPORTED_AUI
,
466 &mi1_mdio_ext_ops
, "Chelsio T310" },
468 F_GPIO1_OEN
| F_GPIO2_OEN
| F_GPIO4_OEN
| F_GPIO5_OEN
| F_GPIO6_OEN
|
469 F_GPIO7_OEN
| F_GPIO10_OEN
| F_GPIO11_OEN
| F_GPIO1_OUT_VAL
|
470 F_GPIO5_OUT_VAL
| F_GPIO6_OUT_VAL
| F_GPIO10_OUT_VAL
, 0,
471 SUPPORTED_10000baseT_Full
| SUPPORTED_AUI
,
472 &mi1_mdio_ext_ops
, "Chelsio T320" },
474 F_GPIO5_OEN
| F_GPIO6_OEN
| F_GPIO7_OEN
| F_GPIO5_OUT_VAL
|
475 F_GPIO6_OUT_VAL
| F_GPIO7_OUT_VAL
,
476 F_GPIO1
| F_GPIO2
| F_GPIO3
| F_GPIO4
, SUPPORTED_AUI
,
477 &mi1_mdio_ops
, "Chelsio T304" },
481 * Return the adapter_info structure with a given index. Out-of-range indices
484 const struct adapter_info
*t3_get_adapter_info(unsigned int id
)
486 return id
< ARRAY_SIZE(t3_adap_info
) ? &t3_adap_info
[id
] : NULL
;
489 #define CAPS_1G (SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Full | \
490 SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg | SUPPORTED_MII)
491 #define CAPS_10G (SUPPORTED_10000baseT_Full | SUPPORTED_AUI)
493 static struct port_type_info port_types
[] = {
495 { t3_ael1002_phy_prep
, CAPS_10G
| SUPPORTED_FIBRE
,
497 { t3_vsc8211_phy_prep
, CAPS_1G
| SUPPORTED_TP
| SUPPORTED_IRQ
,
498 "10/100/1000BASE-T" },
499 { t3_mv88e1xxx_phy_prep
, CAPS_1G
| SUPPORTED_TP
| SUPPORTED_IRQ
,
500 "10/100/1000BASE-T" },
501 { t3_xaui_direct_phy_prep
, CAPS_10G
| SUPPORTED_TP
, "10GBASE-CX4" },
502 { NULL
, CAPS_10G
, "10GBASE-KX4" },
503 { t3_qt2045_phy_prep
, CAPS_10G
| SUPPORTED_TP
, "10GBASE-CX4" },
504 { t3_ael1006_phy_prep
, CAPS_10G
| SUPPORTED_FIBRE
,
506 { NULL
, CAPS_10G
| SUPPORTED_TP
, "10GBASE-CX4" },
512 #define VPD_ENTRY(name, len) \
513 u8 name##_kword[2]; u8 name##_len; char name##_data[len]
516 * Partial EEPROM Vital Product Data structure. Includes only the ID and
525 VPD_ENTRY(pn
, 16); /* part number */
526 VPD_ENTRY(ec
, 16); /* EC level */
527 VPD_ENTRY(sn
, SERNUM_LEN
); /* serial number */
528 VPD_ENTRY(na
, 12); /* MAC address base */
529 VPD_ENTRY(cclk
, 6); /* core clock */
530 VPD_ENTRY(mclk
, 6); /* mem clock */
531 VPD_ENTRY(uclk
, 6); /* uP clk */
532 VPD_ENTRY(mdc
, 6); /* MDIO clk */
533 VPD_ENTRY(mt
, 2); /* mem timing */
534 VPD_ENTRY(xaui0cfg
, 6); /* XAUI0 config */
535 VPD_ENTRY(xaui1cfg
, 6); /* XAUI1 config */
536 VPD_ENTRY(port0
, 2); /* PHY0 complex */
537 VPD_ENTRY(port1
, 2); /* PHY1 complex */
538 VPD_ENTRY(port2
, 2); /* PHY2 complex */
539 VPD_ENTRY(port3
, 2); /* PHY3 complex */
540 VPD_ENTRY(rv
, 1); /* csum */
541 u32 pad
; /* for multiple-of-4 sizing and alignment */
544 #define EEPROM_MAX_POLL 4
545 #define EEPROM_STAT_ADDR 0x4000
546 #define VPD_BASE 0xc00
549 * t3_seeprom_read - read a VPD EEPROM location
550 * @adapter: adapter to read
551 * @addr: EEPROM address
552 * @data: where to store the read data
554 * Read a 32-bit word from a location in VPD EEPROM using the card's PCI
555 * VPD ROM capability. A zero is written to the flag bit when the
556 * addres is written to the control register. The hardware device will
557 * set the flag to 1 when 4 bytes have been read into the data register.
559 int t3_seeprom_read(adapter_t
*adapter
, u32 addr
, u32
*data
)
562 int attempts
= EEPROM_MAX_POLL
;
563 unsigned int base
= adapter
->params
.pci
.vpd_cap_addr
;
565 if ((addr
>= EEPROMSIZE
&& addr
!= EEPROM_STAT_ADDR
) || (addr
& 3))
568 t3_os_pci_write_config_2(adapter
, base
+ PCI_VPD_ADDR
, (u16
)addr
);
571 t3_os_pci_read_config_2(adapter
, base
+ PCI_VPD_ADDR
, &val
);
572 } while (!(val
& PCI_VPD_ADDR_F
) && --attempts
);
574 if (!(val
& PCI_VPD_ADDR_F
)) {
575 CH_ERR(adapter
, "reading EEPROM address 0x%x failed\n", addr
);
578 t3_os_pci_read_config_4(adapter
, base
+ PCI_VPD_DATA
, data
);
579 *data
= le32_to_cpu(*data
);
584 * t3_seeprom_write - write a VPD EEPROM location
585 * @adapter: adapter to write
586 * @addr: EEPROM address
587 * @data: value to write
589 * Write a 32-bit word to a location in VPD EEPROM using the card's PCI
590 * VPD ROM capability.
592 int t3_seeprom_write(adapter_t
*adapter
, u32 addr
, u32 data
)
595 int attempts
= EEPROM_MAX_POLL
;
596 unsigned int base
= adapter
->params
.pci
.vpd_cap_addr
;
598 if ((addr
>= EEPROMSIZE
&& addr
!= EEPROM_STAT_ADDR
) || (addr
& 3))
601 t3_os_pci_write_config_4(adapter
, base
+ PCI_VPD_DATA
,
603 t3_os_pci_write_config_2(adapter
, base
+ PCI_VPD_ADDR
,
604 (u16
)addr
| PCI_VPD_ADDR_F
);
607 t3_os_pci_read_config_2(adapter
, base
+ PCI_VPD_ADDR
, &val
);
608 } while ((val
& PCI_VPD_ADDR_F
) && --attempts
);
610 if (val
& PCI_VPD_ADDR_F
) {
611 CH_ERR(adapter
, "write to EEPROM address 0x%x failed\n", addr
);
618 * t3_seeprom_wp - enable/disable EEPROM write protection
619 * @adapter: the adapter
620 * @enable: 1 to enable write protection, 0 to disable it
622 * Enables or disables write protection on the serial EEPROM.
624 int t3_seeprom_wp(adapter_t
*adapter
, int enable
)
626 return t3_seeprom_write(adapter
, EEPROM_STAT_ADDR
, enable
? 0xc : 0);
630 * Convert a character holding a hex digit to a number.
632 static unsigned int hex2int(unsigned char c
)
634 return isdigit(c
) ? c
- '0' : toupper(c
) - 'A' + 10;
638 * get_vpd_params - read VPD parameters from VPD EEPROM
639 * @adapter: adapter to read
640 * @p: where to store the parameters
642 * Reads card parameters stored in VPD EEPROM.
644 static int get_vpd_params(adapter_t
*adapter
, struct vpd_params
*p
)
650 * Card information is normally at VPD_BASE but some early cards had
653 ret
= t3_seeprom_read(adapter
, VPD_BASE
, (u32
*)&vpd
);
656 addr
= vpd
.id_tag
== 0x82 ? VPD_BASE
: 0;
658 for (i
= 0; i
< sizeof(vpd
); i
+= 4) {
659 ret
= t3_seeprom_read(adapter
, addr
+ i
,
660 (u32
*)((u8
*)&vpd
+ i
));
665 p
->cclk
= simple_strtoul(vpd
.cclk_data
, NULL
, 10);
666 p
->mclk
= simple_strtoul(vpd
.mclk_data
, NULL
, 10);
667 p
->uclk
= simple_strtoul(vpd
.uclk_data
, NULL
, 10);
668 p
->mdc
= simple_strtoul(vpd
.mdc_data
, NULL
, 10);
669 p
->mem_timing
= simple_strtoul(vpd
.mt_data
, NULL
, 10);
670 memcpy(p
->sn
, vpd
.sn_data
, SERNUM_LEN
);
672 /* Old eeproms didn't have port information */
673 if (adapter
->params
.rev
== 0 && !vpd
.port0_data
[0]) {
674 p
->port_type
[0] = uses_xaui(adapter
) ? 1 : 2;
675 p
->port_type
[1] = uses_xaui(adapter
) ? 6 : 2;
677 p
->port_type
[0] = (u8
)hex2int(vpd
.port0_data
[0]);
678 p
->port_type
[1] = (u8
)hex2int(vpd
.port1_data
[0]);
679 p
->port_type
[2] = (u8
)hex2int(vpd
.port2_data
[0]);
680 p
->port_type
[3] = (u8
)hex2int(vpd
.port3_data
[0]);
681 p
->xauicfg
[0] = simple_strtoul(vpd
.xaui0cfg_data
, NULL
, 16);
682 p
->xauicfg
[1] = simple_strtoul(vpd
.xaui1cfg_data
, NULL
, 16);
685 for (i
= 0; i
< 6; i
++)
686 p
->eth_base
[i
] = hex2int(vpd
.na_data
[2 * i
]) * 16 +
687 hex2int(vpd
.na_data
[2 * i
+ 1]);
691 /* serial flash and firmware constants */
693 SF_ATTEMPTS
= 5, /* max retries for SF1 operations */
694 SF_SEC_SIZE
= 64 * 1024, /* serial flash sector size */
695 SF_SIZE
= SF_SEC_SIZE
* 8, /* serial flash size */
697 /* flash command opcodes */
698 SF_PROG_PAGE
= 2, /* program page */
699 SF_WR_DISABLE
= 4, /* disable writes */
700 SF_RD_STATUS
= 5, /* read status register */
701 SF_WR_ENABLE
= 6, /* enable writes */
702 SF_RD_DATA_FAST
= 0xb, /* read flash */
703 SF_ERASE_SECTOR
= 0xd8, /* erase sector */
705 FW_FLASH_BOOT_ADDR
= 0x70000, /* start address of FW in flash */
706 FW_VERS_ADDR
= 0x77ffc, /* flash address holding FW version */
707 FW_MIN_SIZE
= 8 /* at least version and csum */
711 * sf1_read - read data from the serial flash
712 * @adapter: the adapter
713 * @byte_cnt: number of bytes to read
714 * @cont: whether another operation will be chained
715 * @valp: where to store the read data
717 * Reads up to 4 bytes of data from the serial flash. The location of
718 * the read needs to be specified prior to calling this by issuing the
719 * appropriate commands to the serial flash.
721 static int sf1_read(adapter_t
*adapter
, unsigned int byte_cnt
, int cont
,
726 if (!byte_cnt
|| byte_cnt
> 4)
728 if (t3_read_reg(adapter
, A_SF_OP
) & F_BUSY
)
730 t3_write_reg(adapter
, A_SF_OP
, V_CONT(cont
) | V_BYTECNT(byte_cnt
- 1));
731 ret
= t3_wait_op_done(adapter
, A_SF_OP
, F_BUSY
, 0, SF_ATTEMPTS
, 10);
733 *valp
= t3_read_reg(adapter
, A_SF_DATA
);
738 * sf1_write - write data to the serial flash
739 * @adapter: the adapter
740 * @byte_cnt: number of bytes to write
741 * @cont: whether another operation will be chained
742 * @val: value to write
744 * Writes up to 4 bytes of data to the serial flash. The location of
745 * the write needs to be specified prior to calling this by issuing the
746 * appropriate commands to the serial flash.
748 static int sf1_write(adapter_t
*adapter
, unsigned int byte_cnt
, int cont
,
751 if (!byte_cnt
|| byte_cnt
> 4)
753 if (t3_read_reg(adapter
, A_SF_OP
) & F_BUSY
)
755 t3_write_reg(adapter
, A_SF_DATA
, val
);
756 t3_write_reg(adapter
, A_SF_OP
,
757 V_CONT(cont
) | V_BYTECNT(byte_cnt
- 1) | V_OP(1));
758 return t3_wait_op_done(adapter
, A_SF_OP
, F_BUSY
, 0, SF_ATTEMPTS
, 10);
762 * flash_wait_op - wait for a flash operation to complete
763 * @adapter: the adapter
764 * @attempts: max number of polls of the status register
765 * @delay: delay between polls in ms
767 * Wait for a flash operation to complete by polling the status register.
769 static int flash_wait_op(adapter_t
*adapter
, int attempts
, int delay
)
775 if ((ret
= sf1_write(adapter
, 1, 1, SF_RD_STATUS
)) != 0 ||
776 (ret
= sf1_read(adapter
, 1, 0, &status
)) != 0)
788 * t3_read_flash - read words from serial flash
789 * @adapter: the adapter
790 * @addr: the start address for the read
791 * @nwords: how many 32-bit words to read
792 * @data: where to store the read data
793 * @byte_oriented: whether to store data as bytes or as words
795 * Read the specified number of 32-bit words from the serial flash.
796 * If @byte_oriented is set the read data is stored as a byte array
797 * (i.e., big-endian), otherwise as 32-bit words in the platform's
800 int t3_read_flash(adapter_t
*adapter
, unsigned int addr
, unsigned int nwords
,
801 u32
*data
, int byte_oriented
)
805 if (addr
+ nwords
* sizeof(u32
) > SF_SIZE
|| (addr
& 3))
808 addr
= swab32(addr
) | SF_RD_DATA_FAST
;
810 if ((ret
= sf1_write(adapter
, 4, 1, addr
)) != 0 ||
811 (ret
= sf1_read(adapter
, 1, 1, data
)) != 0)
814 for ( ; nwords
; nwords
--, data
++) {
815 ret
= sf1_read(adapter
, 4, nwords
> 1, data
);
819 *data
= htonl(*data
);
825 * t3_write_flash - write up to a page of data to the serial flash
826 * @adapter: the adapter
827 * @addr: the start address to write
828 * @n: length of data to write
829 * @data: the data to write
831 * Writes up to a page of data (256 bytes) to the serial flash starting
832 * at the given address.
834 static int t3_write_flash(adapter_t
*adapter
, unsigned int addr
,
835 unsigned int n
, const u8
*data
)
839 unsigned int i
, c
, left
, val
, offset
= addr
& 0xff;
841 if (addr
+ n
> SF_SIZE
|| offset
+ n
> 256)
844 val
= swab32(addr
) | SF_PROG_PAGE
;
846 if ((ret
= sf1_write(adapter
, 1, 0, SF_WR_ENABLE
)) != 0 ||
847 (ret
= sf1_write(adapter
, 4, 1, val
)) != 0)
850 for (left
= n
; left
; left
-= c
) {
852 for (val
= 0, i
= 0; i
< c
; ++i
)
853 val
= (val
<< 8) + *data
++;
855 ret
= sf1_write(adapter
, c
, c
!= left
, val
);
859 if ((ret
= flash_wait_op(adapter
, 5, 1)) != 0)
862 /* Read the page to verify the write succeeded */
863 ret
= t3_read_flash(adapter
, addr
& ~0xff, ARRAY_SIZE(buf
), buf
, 1);
867 if (memcmp(data
- n
, (u8
*)buf
+ offset
, n
))
873 * t3_get_tp_version - read the tp sram version
874 * @adapter: the adapter
875 * @vers: where to place the version
877 * Reads the protocol sram version from sram.
879 int t3_get_tp_version(adapter_t
*adapter
, u32
*vers
)
883 /* Get version loaded in SRAM */
884 t3_write_reg(adapter
, A_TP_EMBED_OP_FIELD0
, 0);
885 ret
= t3_wait_op_done(adapter
, A_TP_EMBED_OP_FIELD0
,
890 *vers
= t3_read_reg(adapter
, A_TP_EMBED_OP_FIELD1
);
896 * t3_check_tpsram_version - read the tp sram version
897 * @adapter: the adapter
900 int t3_check_tpsram_version(adapter_t
*adapter
)
904 unsigned int major
, minor
;
906 /* Get version loaded in SRAM */
907 t3_write_reg(adapter
, A_TP_EMBED_OP_FIELD0
, 0);
908 ret
= t3_wait_op_done(adapter
, A_TP_EMBED_OP_FIELD0
,
913 vers
= t3_read_reg(adapter
, A_TP_EMBED_OP_FIELD1
);
915 major
= G_TP_VERSION_MAJOR(vers
);
916 minor
= G_TP_VERSION_MINOR(vers
);
918 if (major
== TP_VERSION_MAJOR
&& minor
== TP_VERSION_MINOR
)
921 CH_WARN(adapter
, "found wrong TP version (%u.%u), "
922 "driver needs version %d.%d\n", major
, minor
,
923 TP_VERSION_MAJOR
, TP_VERSION_MINOR
);
928 * t3_check_tpsram - check if provided protocol SRAM
929 * is compatible with this driver
930 * @adapter: the adapter
931 * @tp_sram: the firmware image to write
934 * Checks if an adapter's tp sram is compatible with the driver.
935 * Returns 0 if the versions are compatible, a negative error otherwise.
937 int t3_check_tpsram(adapter_t
*adapter
, const u8
*tp_sram
, unsigned int size
)
941 const u32
*p
= (const u32
*)tp_sram
;
943 /* Verify checksum */
944 for (csum
= 0, i
= 0; i
< size
/ sizeof(csum
); i
++)
946 if (csum
!= 0xffffffff) {
947 CH_ERR(adapter
, "corrupted protocol SRAM image, checksum %u\n",
955 enum fw_version_type
{
961 * t3_get_fw_version - read the firmware version
962 * @adapter: the adapter
963 * @vers: where to place the version
965 * Reads the FW version from flash.
967 int t3_get_fw_version(adapter_t
*adapter
, u32
*vers
)
969 return t3_read_flash(adapter
, FW_VERS_ADDR
, 1, vers
, 0);
973 * t3_check_fw_version - check if the FW is compatible with this driver
974 * @adapter: the adapter
976 * Checks if an adapter's FW is compatible with the driver. Returns 0
977 * if the versions are compatible, a negative error otherwise.
979 int t3_check_fw_version(adapter_t
*adapter
)
983 unsigned int type
, major
, minor
;
985 ret
= t3_get_fw_version(adapter
, &vers
);
989 type
= G_FW_VERSION_TYPE(vers
);
990 major
= G_FW_VERSION_MAJOR(vers
);
991 minor
= G_FW_VERSION_MINOR(vers
);
993 if (type
== FW_VERSION_T3
&& major
== FW_VERSION_MAJOR
&&
994 minor
== FW_VERSION_MINOR
)
997 CH_WARN(adapter
, "found wrong FW version (%u.%u), "
998 "driver needs version %d.%d\n", major
, minor
,
999 FW_VERSION_MAJOR
, FW_VERSION_MINOR
);
1004 * t3_flash_erase_sectors - erase a range of flash sectors
1005 * @adapter: the adapter
1006 * @start: the first sector to erase
1007 * @end: the last sector to erase
1009 * Erases the sectors in the given range.
1011 static int t3_flash_erase_sectors(adapter_t
*adapter
, int start
, int end
)
1013 while (start
<= end
) {
1016 if ((ret
= sf1_write(adapter
, 1, 0, SF_WR_ENABLE
)) != 0 ||
1017 (ret
= sf1_write(adapter
, 4, 0,
1018 SF_ERASE_SECTOR
| (start
<< 8))) != 0 ||
1019 (ret
= flash_wait_op(adapter
, 5, 500)) != 0)
1027 * t3_load_fw - download firmware
1028 * @adapter: the adapter
1029 * @fw_data: the firmware image to write
1032 * Write the supplied firmware image to the card's serial flash.
1033 * The FW image has the following sections: @size - 8 bytes of code and
1034 * data, followed by 4 bytes of FW version, followed by the 32-bit
1035 * 1's complement checksum of the whole image.
1037 int t3_load_fw(adapter_t
*adapter
, const u8
*fw_data
, unsigned int size
)
1041 const u32
*p
= (const u32
*)fw_data
;
1042 int ret
, addr
, fw_sector
= FW_FLASH_BOOT_ADDR
>> 16;
1044 if ((size
& 3) || size
< FW_MIN_SIZE
)
1046 if (size
> FW_VERS_ADDR
+ 8 - FW_FLASH_BOOT_ADDR
)
1049 for (csum
= 0, i
= 0; i
< size
/ sizeof(csum
); i
++)
1050 csum
+= ntohl(p
[i
]);
1051 if (csum
!= 0xffffffff) {
1052 CH_ERR(adapter
, "corrupted firmware image, checksum %u\n",
1057 ret
= t3_flash_erase_sectors(adapter
, fw_sector
, fw_sector
);
1061 size
-= 8; /* trim off version and checksum */
1062 for (addr
= FW_FLASH_BOOT_ADDR
; size
; ) {
1063 unsigned int chunk_size
= min(size
, 256U);
1065 ret
= t3_write_flash(adapter
, addr
, chunk_size
, fw_data
);
1070 fw_data
+= chunk_size
;
1074 ret
= t3_write_flash(adapter
, FW_VERS_ADDR
, 4, fw_data
);
1077 CH_ERR(adapter
, "firmware download failed, error %d\n", ret
);
1081 #define CIM_CTL_BASE 0x2000
1084 * t3_cim_ctl_blk_read - read a block from CIM control region
1085 * @adap: the adapter
1086 * @addr: the start address within the CIM control region
1087 * @n: number of words to read
1088 * @valp: where to store the result
1090 * Reads a block of 4-byte words from the CIM control region.
1092 int t3_cim_ctl_blk_read(adapter_t
*adap
, unsigned int addr
, unsigned int n
,
1097 if (t3_read_reg(adap
, A_CIM_HOST_ACC_CTRL
) & F_HOSTBUSY
)
1100 for ( ; !ret
&& n
--; addr
+= 4) {
1101 t3_write_reg(adap
, A_CIM_HOST_ACC_CTRL
, CIM_CTL_BASE
+ addr
);
1102 ret
= t3_wait_op_done(adap
, A_CIM_HOST_ACC_CTRL
, F_HOSTBUSY
,
1105 *valp
++ = t3_read_reg(adap
, A_CIM_HOST_ACC_DATA
);
1111 * t3_link_changed - handle interface link changes
1112 * @adapter: the adapter
1113 * @port_id: the port index that changed link state
1115 * Called when a port's link settings change to propagate the new values
1116 * to the associated PHY and MAC. After performing the common tasks it
1117 * invokes an OS-specific handler.
1119 void t3_link_changed(adapter_t
*adapter
, int port_id
)
1121 int link_ok
, speed
, duplex
, fc
;
1122 struct port_info
*pi
= adap2pinfo(adapter
, port_id
);
1123 struct cphy
*phy
= &pi
->phy
;
1124 struct cmac
*mac
= &pi
->mac
;
1125 struct link_config
*lc
= &pi
->link_config
;
1127 phy
->ops
->get_link_status(phy
, &link_ok
, &speed
, &duplex
, &fc
);
1129 if (link_ok
!= lc
->link_ok
&& adapter
->params
.rev
> 0 &&
1130 uses_xaui(adapter
)) {
1133 t3_write_reg(adapter
, A_XGM_XAUI_ACT_CTRL
+ mac
->offset
,
1134 link_ok
? F_TXACTENABLE
| F_RXEN
: 0);
1136 lc
->link_ok
= (unsigned char)link_ok
;
1137 lc
->speed
= speed
< 0 ? SPEED_INVALID
: speed
;
1138 lc
->duplex
= duplex
< 0 ? DUPLEX_INVALID
: duplex
;
1139 if (lc
->requested_fc
& PAUSE_AUTONEG
)
1140 fc
&= lc
->requested_fc
;
1142 fc
= lc
->requested_fc
& (PAUSE_RX
| PAUSE_TX
);
1144 if (link_ok
&& speed
>= 0 && lc
->autoneg
== AUTONEG_ENABLE
) {
1145 /* Set MAC speed, duplex, and flow control to match PHY. */
1146 t3_mac_set_speed_duplex_fc(mac
, speed
, duplex
, fc
);
1147 lc
->fc
= (unsigned char)fc
;
1150 t3_os_link_changed(adapter
, port_id
, link_ok
, speed
, duplex
, fc
);
1154 * t3_link_start - apply link configuration to MAC/PHY
1155 * @phy: the PHY to setup
1156 * @mac: the MAC to setup
1157 * @lc: the requested link configuration
1159 * Set up a port's MAC and PHY according to a desired link configuration.
1160 * - If the PHY can auto-negotiate first decide what to advertise, then
1161 * enable/disable auto-negotiation as desired, and reset.
1162 * - If the PHY does not auto-negotiate just reset it.
1163 * - If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
1164 * otherwise do it later based on the outcome of auto-negotiation.
1166 int t3_link_start(struct cphy
*phy
, struct cmac
*mac
, struct link_config
*lc
)
1168 unsigned int fc
= lc
->requested_fc
& (PAUSE_RX
| PAUSE_TX
);
1171 if (lc
->supported
& SUPPORTED_Autoneg
) {
1172 lc
->advertising
&= ~(ADVERTISED_Asym_Pause
| ADVERTISED_Pause
);
1174 lc
->advertising
|= ADVERTISED_Asym_Pause
;
1176 lc
->advertising
|= ADVERTISED_Pause
;
1178 phy
->ops
->advertise(phy
, lc
->advertising
);
1180 if (lc
->autoneg
== AUTONEG_DISABLE
) {
1181 lc
->speed
= lc
->requested_speed
;
1182 lc
->duplex
= lc
->requested_duplex
;
1183 lc
->fc
= (unsigned char)fc
;
1184 t3_mac_set_speed_duplex_fc(mac
, lc
->speed
, lc
->duplex
,
1186 /* Also disables autoneg */
1187 phy
->ops
->set_speed_duplex(phy
, lc
->speed
, lc
->duplex
);
1188 phy
->ops
->reset(phy
, 0);
1190 phy
->ops
->autoneg_enable(phy
);
1192 t3_mac_set_speed_duplex_fc(mac
, -1, -1, fc
);
1193 lc
->fc
= (unsigned char)fc
;
1194 phy
->ops
->reset(phy
, 0);
1200 * t3_set_vlan_accel - control HW VLAN extraction
1201 * @adapter: the adapter
1202 * @ports: bitmap of adapter ports to operate on
1203 * @on: enable (1) or disable (0) HW VLAN extraction
1205 * Enables or disables HW extraction of VLAN tags for the given port.
1207 void t3_set_vlan_accel(adapter_t
*adapter
, unsigned int ports
, int on
)
1209 t3_set_reg_field(adapter
, A_TP_OUT_CONFIG
,
1210 ports
<< S_VLANEXTRACTIONENABLE
,
1211 on
? (ports
<< S_VLANEXTRACTIONENABLE
) : 0);
1215 unsigned int mask
; /* bits to check in interrupt status */
1216 const char *msg
; /* message to print or NULL */
1217 short stat_idx
; /* stat counter to increment or -1 */
1218 unsigned short fatal
:1; /* whether the condition reported is fatal */
1222 * t3_handle_intr_status - table driven interrupt handler
1223 * @adapter: the adapter that generated the interrupt
1224 * @reg: the interrupt status register to process
1225 * @mask: a mask to apply to the interrupt status
1226 * @acts: table of interrupt actions
1227 * @stats: statistics counters tracking interrupt occurences
1229 * A table driven interrupt handler that applies a set of masks to an
1230 * interrupt status word and performs the corresponding actions if the
1231 * interrupts described by the mask have occured. The actions include
1232 * optionally printing a warning or alert message, and optionally
1233 * incrementing a stat counter. The table is terminated by an entry
1234 * specifying mask 0. Returns the number of fatal interrupt conditions.
1236 static int t3_handle_intr_status(adapter_t
*adapter
, unsigned int reg
,
1238 const struct intr_info
*acts
,
1239 unsigned long *stats
)
1242 unsigned int status
= t3_read_reg(adapter
, reg
) & mask
;
1244 for ( ; acts
->mask
; ++acts
) {
1245 if (!(status
& acts
->mask
)) continue;
1248 CH_ALERT(adapter
, "%s (0x%x)\n",
1249 acts
->msg
, status
& acts
->mask
);
1250 } else if (acts
->msg
)
1251 CH_WARN(adapter
, "%s (0x%x)\n",
1252 acts
->msg
, status
& acts
->mask
);
1253 if (acts
->stat_idx
>= 0)
1254 stats
[acts
->stat_idx
]++;
1256 if (status
) /* clear processed interrupts */
1257 t3_write_reg(adapter
, reg
, status
);
1261 #define SGE_INTR_MASK (F_RSPQDISABLED)
1262 #define MC5_INTR_MASK (F_PARITYERR | F_ACTRGNFULL | F_UNKNOWNCMD | \
1263 F_REQQPARERR | F_DISPQPARERR | F_DELACTEMPTY | \
1265 #define MC7_INTR_MASK (F_AE | F_UE | F_CE | V_PE(M_PE))
1266 #define XGM_INTR_MASK (V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR) | \
1267 V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR) | \
1268 F_TXFIFO_UNDERRUN | F_RXFIFO_OVERFLOW)
1269 #define PCIX_INTR_MASK (F_MSTDETPARERR | F_SIGTARABT | F_RCVTARABT | \
1270 F_RCVMSTABT | F_SIGSYSERR | F_DETPARERR | \
1271 F_SPLCMPDIS | F_UNXSPLCMP | F_RCVSPLCMPERR | \
1272 F_DETCORECCERR | F_DETUNCECCERR | F_PIOPARERR | \
1273 V_WFPARERR(M_WFPARERR) | V_RFPARERR(M_RFPARERR) | \
1274 V_CFPARERR(M_CFPARERR) /* | V_MSIXPARERR(M_MSIXPARERR) */)
1275 #define PCIE_INTR_MASK (F_UNXSPLCPLERRR | F_UNXSPLCPLERRC | F_PCIE_PIOPARERR |\
1276 F_PCIE_WFPARERR | F_PCIE_RFPARERR | F_PCIE_CFPARERR | \
1277 /* V_PCIE_MSIXPARERR(M_PCIE_MSIXPARERR) | */ \
1278 V_BISTERR(M_BISTERR) | F_PEXERR)
1279 #define ULPRX_INTR_MASK F_PARERR
1280 #define ULPTX_INTR_MASK 0
1281 #define CPLSW_INTR_MASK (F_TP_FRAMING_ERROR | \
1282 F_SGE_FRAMING_ERROR | F_CIM_FRAMING_ERROR | \
1283 F_ZERO_SWITCH_ERROR)
1284 #define CIM_INTR_MASK (F_BLKWRPLINT | F_BLKRDPLINT | F_BLKWRCTLINT | \
1285 F_BLKRDCTLINT | F_BLKWRFLASHINT | F_BLKRDFLASHINT | \
1286 F_SGLWRFLASHINT | F_WRBLKFLASHINT | F_BLKWRBOOTINT | \
1287 F_FLASHRANGEINT | F_SDRAMRANGEINT | F_RSVDSPACEINT)
1288 #define PMTX_INTR_MASK (F_ZERO_C_CMD_ERROR | ICSPI_FRM_ERR | OESPI_FRM_ERR | \
1289 V_ICSPI_PAR_ERROR(M_ICSPI_PAR_ERROR) | \
1290 V_OESPI_PAR_ERROR(M_OESPI_PAR_ERROR))
1291 #define PMRX_INTR_MASK (F_ZERO_E_CMD_ERROR | IESPI_FRM_ERR | OCSPI_FRM_ERR | \
1292 V_IESPI_PAR_ERROR(M_IESPI_PAR_ERROR) | \
1293 V_OCSPI_PAR_ERROR(M_OCSPI_PAR_ERROR))
1294 #define MPS_INTR_MASK (V_TX0TPPARERRENB(M_TX0TPPARERRENB) | \
1295 V_TX1TPPARERRENB(M_TX1TPPARERRENB) | \
1296 V_RXTPPARERRENB(M_RXTPPARERRENB) | \
1297 V_MCAPARERRENB(M_MCAPARERRENB))
1298 #define PL_INTR_MASK (F_T3DBG | F_XGMAC0_0 | F_XGMAC0_1 | F_MC5A | F_PM1_TX | \
1299 F_PM1_RX | F_ULP2_TX | F_ULP2_RX | F_TP1 | F_CIM | \
1300 F_MC7_CM | F_MC7_PMTX | F_MC7_PMRX | F_SGE3 | F_PCIM0 | \
1301 F_MPS0 | F_CPL_SWITCH)
1304 * Interrupt handler for the PCIX1 module.
1306 static void pci_intr_handler(adapter_t
*adapter
)
1308 static struct intr_info pcix1_intr_info
[] = {
1309 { F_MSTDETPARERR
, "PCI master detected parity error", -1, 1 },
1310 { F_SIGTARABT
, "PCI signaled target abort", -1, 1 },
1311 { F_RCVTARABT
, "PCI received target abort", -1, 1 },
1312 { F_RCVMSTABT
, "PCI received master abort", -1, 1 },
1313 { F_SIGSYSERR
, "PCI signaled system error", -1, 1 },
1314 { F_DETPARERR
, "PCI detected parity error", -1, 1 },
1315 { F_SPLCMPDIS
, "PCI split completion discarded", -1, 1 },
1316 { F_UNXSPLCMP
, "PCI unexpected split completion error", -1, 1 },
1317 { F_RCVSPLCMPERR
, "PCI received split completion error", -1,
1319 { F_DETCORECCERR
, "PCI correctable ECC error",
1320 STAT_PCI_CORR_ECC
, 0 },
1321 { F_DETUNCECCERR
, "PCI uncorrectable ECC error", -1, 1 },
1322 { F_PIOPARERR
, "PCI PIO FIFO parity error", -1, 1 },
1323 { V_WFPARERR(M_WFPARERR
), "PCI write FIFO parity error", -1,
1325 { V_RFPARERR(M_RFPARERR
), "PCI read FIFO parity error", -1,
1327 { V_CFPARERR(M_CFPARERR
), "PCI command FIFO parity error", -1,
1329 { V_MSIXPARERR(M_MSIXPARERR
), "PCI MSI-X table/PBA parity "
1334 if (t3_handle_intr_status(adapter
, A_PCIX_INT_CAUSE
, PCIX_INTR_MASK
,
1335 pcix1_intr_info
, adapter
->irq_stats
))
1336 t3_fatal_err(adapter
);
1340 * Interrupt handler for the PCIE module.
1342 static void pcie_intr_handler(adapter_t
*adapter
)
1344 static struct intr_info pcie_intr_info
[] = {
1345 { F_PEXERR
, "PCI PEX error", -1, 1 },
1347 "PCI unexpected split completion DMA read error", -1, 1 },
1349 "PCI unexpected split completion DMA command error", -1, 1 },
1350 { F_PCIE_PIOPARERR
, "PCI PIO FIFO parity error", -1, 1 },
1351 { F_PCIE_WFPARERR
, "PCI write FIFO parity error", -1, 1 },
1352 { F_PCIE_RFPARERR
, "PCI read FIFO parity error", -1, 1 },
1353 { F_PCIE_CFPARERR
, "PCI command FIFO parity error", -1, 1 },
1354 { V_PCIE_MSIXPARERR(M_PCIE_MSIXPARERR
),
1355 "PCI MSI-X table/PBA parity error", -1, 1 },
1356 { V_BISTERR(M_BISTERR
), "PCI BIST error", -1, 1 },
1360 if (t3_read_reg(adapter
, A_PCIE_INT_CAUSE
) & F_PEXERR
)
1361 CH_ALERT(adapter
, "PEX error code 0x%x\n",
1362 t3_read_reg(adapter
, A_PCIE_PEX_ERR
));
1364 if (t3_handle_intr_status(adapter
, A_PCIE_INT_CAUSE
, PCIE_INTR_MASK
,
1365 pcie_intr_info
, adapter
->irq_stats
))
1366 t3_fatal_err(adapter
);
1370 * TP interrupt handler.
1372 static void tp_intr_handler(adapter_t
*adapter
)
1374 static struct intr_info tp_intr_info
[] = {
1375 { 0xffffff, "TP parity error", -1, 1 },
1376 { 0x1000000, "TP out of Rx pages", -1, 1 },
1377 { 0x2000000, "TP out of Tx pages", -1, 1 },
1381 if (t3_handle_intr_status(adapter
, A_TP_INT_CAUSE
, 0xffffffff,
1382 tp_intr_info
, NULL
))
1383 t3_fatal_err(adapter
);
1387 * CIM interrupt handler.
1389 static void cim_intr_handler(adapter_t
*adapter
)
1391 static struct intr_info cim_intr_info
[] = {
1392 { F_RSVDSPACEINT
, "CIM reserved space write", -1, 1 },
1393 { F_SDRAMRANGEINT
, "CIM SDRAM address out of range", -1, 1 },
1394 { F_FLASHRANGEINT
, "CIM flash address out of range", -1, 1 },
1395 { F_BLKWRBOOTINT
, "CIM block write to boot space", -1, 1 },
1396 { F_WRBLKFLASHINT
, "CIM write to cached flash space", -1, 1 },
1397 { F_SGLWRFLASHINT
, "CIM single write to flash space", -1, 1 },
1398 { F_BLKRDFLASHINT
, "CIM block read from flash space", -1, 1 },
1399 { F_BLKWRFLASHINT
, "CIM block write to flash space", -1, 1 },
1400 { F_BLKRDCTLINT
, "CIM block read from CTL space", -1, 1 },
1401 { F_BLKWRCTLINT
, "CIM block write to CTL space", -1, 1 },
1402 { F_BLKRDPLINT
, "CIM block read from PL space", -1, 1 },
1403 { F_BLKWRPLINT
, "CIM block write to PL space", -1, 1 },
1407 if (t3_handle_intr_status(adapter
, A_CIM_HOST_INT_CAUSE
, 0xffffffff,
1408 cim_intr_info
, NULL
))
1409 t3_fatal_err(adapter
);
1413 * ULP RX interrupt handler.
1415 static void ulprx_intr_handler(adapter_t
*adapter
)
1417 static struct intr_info ulprx_intr_info
[] = {
1418 { F_PARERR
, "ULP RX parity error", -1, 1 },
1422 if (t3_handle_intr_status(adapter
, A_ULPRX_INT_CAUSE
, 0xffffffff,
1423 ulprx_intr_info
, NULL
))
1424 t3_fatal_err(adapter
);
1428 * ULP TX interrupt handler.
1430 static void ulptx_intr_handler(adapter_t
*adapter
)
1432 static struct intr_info ulptx_intr_info
[] = {
1433 { F_PBL_BOUND_ERR_CH0
, "ULP TX channel 0 PBL out of bounds",
1434 STAT_ULP_CH0_PBL_OOB
, 0 },
1435 { F_PBL_BOUND_ERR_CH1
, "ULP TX channel 1 PBL out of bounds",
1436 STAT_ULP_CH1_PBL_OOB
, 0 },
1440 if (t3_handle_intr_status(adapter
, A_ULPTX_INT_CAUSE
, 0xffffffff,
1441 ulptx_intr_info
, adapter
->irq_stats
))
1442 t3_fatal_err(adapter
);
1445 #define ICSPI_FRM_ERR (F_ICSPI0_FIFO2X_RX_FRAMING_ERROR | \
1446 F_ICSPI1_FIFO2X_RX_FRAMING_ERROR | F_ICSPI0_RX_FRAMING_ERROR | \
1447 F_ICSPI1_RX_FRAMING_ERROR | F_ICSPI0_TX_FRAMING_ERROR | \
1448 F_ICSPI1_TX_FRAMING_ERROR)
1449 #define OESPI_FRM_ERR (F_OESPI0_RX_FRAMING_ERROR | \
1450 F_OESPI1_RX_FRAMING_ERROR | F_OESPI0_TX_FRAMING_ERROR | \
1451 F_OESPI1_TX_FRAMING_ERROR | F_OESPI0_OFIFO2X_TX_FRAMING_ERROR | \
1452 F_OESPI1_OFIFO2X_TX_FRAMING_ERROR)
1455 * PM TX interrupt handler.
1457 static void pmtx_intr_handler(adapter_t
*adapter
)
1459 static struct intr_info pmtx_intr_info
[] = {
1460 { F_ZERO_C_CMD_ERROR
, "PMTX 0-length pcmd", -1, 1 },
1461 { ICSPI_FRM_ERR
, "PMTX ispi framing error", -1, 1 },
1462 { OESPI_FRM_ERR
, "PMTX ospi framing error", -1, 1 },
1463 { V_ICSPI_PAR_ERROR(M_ICSPI_PAR_ERROR
),
1464 "PMTX ispi parity error", -1, 1 },
1465 { V_OESPI_PAR_ERROR(M_OESPI_PAR_ERROR
),
1466 "PMTX ospi parity error", -1, 1 },
1470 if (t3_handle_intr_status(adapter
, A_PM1_TX_INT_CAUSE
, 0xffffffff,
1471 pmtx_intr_info
, NULL
))
1472 t3_fatal_err(adapter
);
1475 #define IESPI_FRM_ERR (F_IESPI0_FIFO2X_RX_FRAMING_ERROR | \
1476 F_IESPI1_FIFO2X_RX_FRAMING_ERROR | F_IESPI0_RX_FRAMING_ERROR | \
1477 F_IESPI1_RX_FRAMING_ERROR | F_IESPI0_TX_FRAMING_ERROR | \
1478 F_IESPI1_TX_FRAMING_ERROR)
1479 #define OCSPI_FRM_ERR (F_OCSPI0_RX_FRAMING_ERROR | \
1480 F_OCSPI1_RX_FRAMING_ERROR | F_OCSPI0_TX_FRAMING_ERROR | \
1481 F_OCSPI1_TX_FRAMING_ERROR | F_OCSPI0_OFIFO2X_TX_FRAMING_ERROR | \
1482 F_OCSPI1_OFIFO2X_TX_FRAMING_ERROR)
1485 * PM RX interrupt handler.
1487 static void pmrx_intr_handler(adapter_t
*adapter
)
1489 static struct intr_info pmrx_intr_info
[] = {
1490 { F_ZERO_E_CMD_ERROR
, "PMRX 0-length pcmd", -1, 1 },
1491 { IESPI_FRM_ERR
, "PMRX ispi framing error", -1, 1 },
1492 { OCSPI_FRM_ERR
, "PMRX ospi framing error", -1, 1 },
1493 { V_IESPI_PAR_ERROR(M_IESPI_PAR_ERROR
),
1494 "PMRX ispi parity error", -1, 1 },
1495 { V_OCSPI_PAR_ERROR(M_OCSPI_PAR_ERROR
),
1496 "PMRX ospi parity error", -1, 1 },
1500 if (t3_handle_intr_status(adapter
, A_PM1_RX_INT_CAUSE
, 0xffffffff,
1501 pmrx_intr_info
, NULL
))
1502 t3_fatal_err(adapter
);
1506 * CPL switch interrupt handler.
1508 static void cplsw_intr_handler(adapter_t
*adapter
)
1510 static struct intr_info cplsw_intr_info
[] = {
1511 // { F_CIM_OVFL_ERROR, "CPL switch CIM overflow", -1, 1 },
1512 { F_TP_FRAMING_ERROR
, "CPL switch TP framing error", -1, 1 },
1513 { F_SGE_FRAMING_ERROR
, "CPL switch SGE framing error", -1, 1 },
1514 { F_CIM_FRAMING_ERROR
, "CPL switch CIM framing error", -1, 1 },
1515 { F_ZERO_SWITCH_ERROR
, "CPL switch no-switch error", -1, 1 },
1519 if (t3_handle_intr_status(adapter
, A_CPL_INTR_CAUSE
, 0xffffffff,
1520 cplsw_intr_info
, NULL
))
1521 t3_fatal_err(adapter
);
1525 * MPS interrupt handler.
1527 static void mps_intr_handler(adapter_t
*adapter
)
1529 static struct intr_info mps_intr_info
[] = {
1530 { 0x1ff, "MPS parity error", -1, 1 },
1534 if (t3_handle_intr_status(adapter
, A_MPS_INT_CAUSE
, 0xffffffff,
1535 mps_intr_info
, NULL
))
1536 t3_fatal_err(adapter
);
1539 #define MC7_INTR_FATAL (F_UE | V_PE(M_PE) | F_AE)
1542 * MC7 interrupt handler.
1544 static void mc7_intr_handler(struct mc7
*mc7
)
1546 adapter_t
*adapter
= mc7
->adapter
;
1547 u32 cause
= t3_read_reg(adapter
, mc7
->offset
+ A_MC7_INT_CAUSE
);
1550 mc7
->stats
.corr_err
++;
1551 CH_WARN(adapter
, "%s MC7 correctable error at addr 0x%x, "
1552 "data 0x%x 0x%x 0x%x\n", mc7
->name
,
1553 t3_read_reg(adapter
, mc7
->offset
+ A_MC7_CE_ADDR
),
1554 t3_read_reg(adapter
, mc7
->offset
+ A_MC7_CE_DATA0
),
1555 t3_read_reg(adapter
, mc7
->offset
+ A_MC7_CE_DATA1
),
1556 t3_read_reg(adapter
, mc7
->offset
+ A_MC7_CE_DATA2
));
1560 mc7
->stats
.uncorr_err
++;
1561 CH_ALERT(adapter
, "%s MC7 uncorrectable error at addr 0x%x, "
1562 "data 0x%x 0x%x 0x%x\n", mc7
->name
,
1563 t3_read_reg(adapter
, mc7
->offset
+ A_MC7_UE_ADDR
),
1564 t3_read_reg(adapter
, mc7
->offset
+ A_MC7_UE_DATA0
),
1565 t3_read_reg(adapter
, mc7
->offset
+ A_MC7_UE_DATA1
),
1566 t3_read_reg(adapter
, mc7
->offset
+ A_MC7_UE_DATA2
));
1570 mc7
->stats
.parity_err
++;
1571 CH_ALERT(adapter
, "%s MC7 parity error 0x%x\n",
1572 mc7
->name
, G_PE(cause
));
1578 if (adapter
->params
.rev
> 0)
1579 addr
= t3_read_reg(adapter
,
1580 mc7
->offset
+ A_MC7_ERR_ADDR
);
1581 mc7
->stats
.addr_err
++;
1582 CH_ALERT(adapter
, "%s MC7 address error: 0x%x\n",
1586 if (cause
& MC7_INTR_FATAL
)
1587 t3_fatal_err(adapter
);
1589 t3_write_reg(adapter
, mc7
->offset
+ A_MC7_INT_CAUSE
, cause
);
1592 #define XGM_INTR_FATAL (V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR) | \
1593 V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR))
1595 * XGMAC interrupt handler.
1597 static int mac_intr_handler(adapter_t
*adap
, unsigned int idx
)
1602 idx
= idx
== 0 ? 0 : adapter_info(adap
)->nports0
; /* MAC idx -> port */
1603 mac
= &adap2pinfo(adap
, idx
)->mac
;
1604 cause
= t3_read_reg(adap
, A_XGM_INT_CAUSE
+ mac
->offset
);
1606 if (cause
& V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR
)) {
1607 mac
->stats
.tx_fifo_parity_err
++;
1608 CH_ALERT(adap
, "port%d: MAC TX FIFO parity error\n", idx
);
1610 if (cause
& V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR
)) {
1611 mac
->stats
.rx_fifo_parity_err
++;
1612 CH_ALERT(adap
, "port%d: MAC RX FIFO parity error\n", idx
);
1614 if (cause
& F_TXFIFO_UNDERRUN
)
1615 mac
->stats
.tx_fifo_urun
++;
1616 if (cause
& F_RXFIFO_OVERFLOW
)
1617 mac
->stats
.rx_fifo_ovfl
++;
1618 if (cause
& V_SERDES_LOS(M_SERDES_LOS
))
1619 mac
->stats
.serdes_signal_loss
++;
1620 if (cause
& F_XAUIPCSCTCERR
)
1621 mac
->stats
.xaui_pcs_ctc_err
++;
1622 if (cause
& F_XAUIPCSALIGNCHANGE
)
1623 mac
->stats
.xaui_pcs_align_change
++;
1625 t3_write_reg(adap
, A_XGM_INT_CAUSE
+ mac
->offset
, cause
);
1626 if (cause
& XGM_INTR_FATAL
)
1632 * Interrupt handler for PHY events.
1634 int t3_phy_intr_handler(adapter_t
*adapter
)
1636 u32 mask
, gpi
= adapter_info(adapter
)->gpio_intr
;
1637 u32 i
, cause
= t3_read_reg(adapter
, A_T3DBG_INT_CAUSE
);
1639 for_each_port(adapter
, i
) {
1640 struct port_info
*p
= adap2pinfo(adapter
, i
);
1642 mask
= gpi
- (gpi
& (gpi
- 1));
1645 if (!(p
->port_type
->caps
& SUPPORTED_IRQ
))
1649 int phy_cause
= p
->phy
.ops
->intr_handler(&p
->phy
);
1651 if (phy_cause
& cphy_cause_link_change
)
1652 t3_link_changed(adapter
, i
);
1653 if (phy_cause
& cphy_cause_fifo_error
)
1654 p
->phy
.fifo_errors
++;
1658 t3_write_reg(adapter
, A_T3DBG_INT_CAUSE
, cause
);
1663 * t3_slow_intr_handler - control path interrupt handler
1664 * @adapter: the adapter
1666 * T3 interrupt handler for non-data interrupt events, e.g., errors.
1667 * The designation 'slow' is because it involves register reads, while
1668 * data interrupts typically don't involve any MMIOs.
1670 int t3_slow_intr_handler(adapter_t
*adapter
)
1672 u32 cause
= t3_read_reg(adapter
, A_PL_INT_CAUSE0
);
1674 cause
&= adapter
->slow_intr_mask
;
1677 if (cause
& F_PCIM0
) {
1678 if (is_pcie(adapter
))
1679 pcie_intr_handler(adapter
);
1681 pci_intr_handler(adapter
);
1684 t3_sge_err_intr_handler(adapter
);
1685 if (cause
& F_MC7_PMRX
)
1686 mc7_intr_handler(&adapter
->pmrx
);
1687 if (cause
& F_MC7_PMTX
)
1688 mc7_intr_handler(&adapter
->pmtx
);
1689 if (cause
& F_MC7_CM
)
1690 mc7_intr_handler(&adapter
->cm
);
1692 cim_intr_handler(adapter
);
1694 tp_intr_handler(adapter
);
1695 if (cause
& F_ULP2_RX
)
1696 ulprx_intr_handler(adapter
);
1697 if (cause
& F_ULP2_TX
)
1698 ulptx_intr_handler(adapter
);
1699 if (cause
& F_PM1_RX
)
1700 pmrx_intr_handler(adapter
);
1701 if (cause
& F_PM1_TX
)
1702 pmtx_intr_handler(adapter
);
1703 if (cause
& F_CPL_SWITCH
)
1704 cplsw_intr_handler(adapter
);
1706 mps_intr_handler(adapter
);
1708 t3_mc5_intr_handler(&adapter
->mc5
);
1709 if (cause
& F_XGMAC0_0
)
1710 mac_intr_handler(adapter
, 0);
1711 if (cause
& F_XGMAC0_1
)
1712 mac_intr_handler(adapter
, 1);
1713 if (cause
& F_T3DBG
)
1714 t3_os_ext_intr_handler(adapter
);
1716 /* Clear the interrupts just processed. */
1717 t3_write_reg(adapter
, A_PL_INT_CAUSE0
, cause
);
1718 (void) t3_read_reg(adapter
, A_PL_INT_CAUSE0
); /* flush */
1723 * t3_intr_enable - enable interrupts
1724 * @adapter: the adapter whose interrupts should be enabled
1726 * Enable interrupts by setting the interrupt enable registers of the
1727 * various HW modules and then enabling the top-level interrupt
1730 void t3_intr_enable(adapter_t
*adapter
)
1732 static struct addr_val_pair intr_en_avp
[] = {
1733 { A_SG_INT_ENABLE
, SGE_INTR_MASK
},
1734 { A_MC7_INT_ENABLE
, MC7_INTR_MASK
},
1735 { A_MC7_INT_ENABLE
- MC7_PMRX_BASE_ADDR
+ MC7_PMTX_BASE_ADDR
,
1737 { A_MC7_INT_ENABLE
- MC7_PMRX_BASE_ADDR
+ MC7_CM_BASE_ADDR
,
1739 { A_MC5_DB_INT_ENABLE
, MC5_INTR_MASK
},
1740 { A_ULPRX_INT_ENABLE
, ULPRX_INTR_MASK
},
1741 { A_TP_INT_ENABLE
, 0x3bfffff },
1742 { A_PM1_TX_INT_ENABLE
, PMTX_INTR_MASK
},
1743 { A_PM1_RX_INT_ENABLE
, PMRX_INTR_MASK
},
1744 { A_CIM_HOST_INT_ENABLE
, CIM_INTR_MASK
},
1745 { A_MPS_INT_ENABLE
, MPS_INTR_MASK
},
1748 adapter
->slow_intr_mask
= PL_INTR_MASK
;
1750 t3_write_regs(adapter
, intr_en_avp
, ARRAY_SIZE(intr_en_avp
), 0);
1752 if (adapter
->params
.rev
> 0) {
1753 t3_write_reg(adapter
, A_CPL_INTR_ENABLE
,
1754 CPLSW_INTR_MASK
| F_CIM_OVFL_ERROR
);
1755 t3_write_reg(adapter
, A_ULPTX_INT_ENABLE
,
1756 ULPTX_INTR_MASK
| F_PBL_BOUND_ERR_CH0
|
1757 F_PBL_BOUND_ERR_CH1
);
1759 t3_write_reg(adapter
, A_CPL_INTR_ENABLE
, CPLSW_INTR_MASK
);
1760 t3_write_reg(adapter
, A_ULPTX_INT_ENABLE
, ULPTX_INTR_MASK
);
1763 t3_write_reg(adapter
, A_T3DBG_GPIO_ACT_LOW
,
1764 adapter_info(adapter
)->gpio_intr
);
1765 t3_write_reg(adapter
, A_T3DBG_INT_ENABLE
,
1766 adapter_info(adapter
)->gpio_intr
);
1767 if (is_pcie(adapter
))
1768 t3_write_reg(adapter
, A_PCIE_INT_ENABLE
, PCIE_INTR_MASK
);
1770 t3_write_reg(adapter
, A_PCIX_INT_ENABLE
, PCIX_INTR_MASK
);
1771 t3_write_reg(adapter
, A_PL_INT_ENABLE0
, adapter
->slow_intr_mask
);
1772 (void) t3_read_reg(adapter
, A_PL_INT_ENABLE0
); /* flush */
1776 * t3_intr_disable - disable a card's interrupts
1777 * @adapter: the adapter whose interrupts should be disabled
1779 * Disable interrupts. We only disable the top-level interrupt
1780 * concentrator and the SGE data interrupts.
1782 void t3_intr_disable(adapter_t
*adapter
)
1784 t3_write_reg(adapter
, A_PL_INT_ENABLE0
, 0);
1785 (void) t3_read_reg(adapter
, A_PL_INT_ENABLE0
); /* flush */
1786 adapter
->slow_intr_mask
= 0;
1790 * t3_intr_clear - clear all interrupts
1791 * @adapter: the adapter whose interrupts should be cleared
1793 * Clears all interrupts.
1795 void t3_intr_clear(adapter_t
*adapter
)
1797 static const unsigned int cause_reg_addr
[] = {
1799 A_SG_RSPQ_FL_STATUS
,
1802 A_MC7_INT_CAUSE
- MC7_PMRX_BASE_ADDR
+ MC7_PMTX_BASE_ADDR
,
1803 A_MC7_INT_CAUSE
- MC7_PMRX_BASE_ADDR
+ MC7_CM_BASE_ADDR
,
1804 A_CIM_HOST_INT_CAUSE
,
1817 /* Clear PHY and MAC interrupts for each port. */
1818 for_each_port(adapter
, i
)
1819 t3_port_intr_clear(adapter
, i
);
1821 for (i
= 0; i
< ARRAY_SIZE(cause_reg_addr
); ++i
)
1822 t3_write_reg(adapter
, cause_reg_addr
[i
], 0xffffffff);
1824 if (is_pcie(adapter
))
1825 t3_write_reg(adapter
, A_PCIE_PEX_ERR
, 0xffffffff);
1826 t3_write_reg(adapter
, A_PL_INT_CAUSE0
, 0xffffffff);
1827 (void) t3_read_reg(adapter
, A_PL_INT_CAUSE0
); /* flush */
1831 * t3_port_intr_enable - enable port-specific interrupts
1832 * @adapter: associated adapter
1833 * @idx: index of port whose interrupts should be enabled
1835 * Enable port-specific (i.e., MAC and PHY) interrupts for the given
1838 void t3_port_intr_enable(adapter_t
*adapter
, int idx
)
1840 struct port_info
*pi
= adap2pinfo(adapter
, idx
);
1842 t3_write_reg(adapter
, A_XGM_INT_ENABLE
+ pi
->mac
.offset
, XGM_INTR_MASK
);
1843 pi
->phy
.ops
->intr_enable(&pi
->phy
);
1847 * t3_port_intr_disable - disable port-specific interrupts
1848 * @adapter: associated adapter
1849 * @idx: index of port whose interrupts should be disabled
1851 * Disable port-specific (i.e., MAC and PHY) interrupts for the given
1854 void t3_port_intr_disable(adapter_t
*adapter
, int idx
)
1856 struct port_info
*pi
= adap2pinfo(adapter
, idx
);
1858 t3_write_reg(adapter
, A_XGM_INT_ENABLE
+ pi
->mac
.offset
, 0);
1859 pi
->phy
.ops
->intr_disable(&pi
->phy
);
1863 * t3_port_intr_clear - clear port-specific interrupts
1864 * @adapter: associated adapter
1865 * @idx: index of port whose interrupts to clear
1867 * Clear port-specific (i.e., MAC and PHY) interrupts for the given
1870 void t3_port_intr_clear(adapter_t
*adapter
, int idx
)
1872 struct port_info
*pi
= adap2pinfo(adapter
, idx
);
1874 t3_write_reg(adapter
, A_XGM_INT_CAUSE
+ pi
->mac
.offset
, 0xffffffff);
1875 pi
->phy
.ops
->intr_clear(&pi
->phy
);
1878 #define SG_CONTEXT_CMD_ATTEMPTS 100
1881 * t3_sge_write_context - write an SGE context
1882 * @adapter: the adapter
1883 * @id: the context id
1884 * @type: the context type
1886 * Program an SGE context with the values already loaded in the
1887 * CONTEXT_DATA? registers.
1889 static int t3_sge_write_context(adapter_t
*adapter
, unsigned int id
,
1892 t3_write_reg(adapter
, A_SG_CONTEXT_MASK0
, 0xffffffff);
1893 t3_write_reg(adapter
, A_SG_CONTEXT_MASK1
, 0xffffffff);
1894 t3_write_reg(adapter
, A_SG_CONTEXT_MASK2
, 0xffffffff);
1895 t3_write_reg(adapter
, A_SG_CONTEXT_MASK3
, 0xffffffff);
1896 t3_write_reg(adapter
, A_SG_CONTEXT_CMD
,
1897 V_CONTEXT_CMD_OPCODE(1) | type
| V_CONTEXT(id
));
1898 return t3_wait_op_done(adapter
, A_SG_CONTEXT_CMD
, F_CONTEXT_CMD_BUSY
,
1899 0, SG_CONTEXT_CMD_ATTEMPTS
, 1);
1903 * t3_sge_init_ecntxt - initialize an SGE egress context
1904 * @adapter: the adapter to configure
1905 * @id: the context id
1906 * @gts_enable: whether to enable GTS for the context
1907 * @type: the egress context type
1908 * @respq: associated response queue
1909 * @base_addr: base address of queue
1910 * @size: number of queue entries
1912 * @gen: initial generation value for the context
1913 * @cidx: consumer pointer
1915 * Initialize an SGE egress context and make it ready for use. If the
1916 * platform allows concurrent context operations, the caller is
1917 * responsible for appropriate locking.
1919 int t3_sge_init_ecntxt(adapter_t
*adapter
, unsigned int id
, int gts_enable
,
1920 enum sge_context_type type
, int respq
, u64 base_addr
,
1921 unsigned int size
, unsigned int token
, int gen
,
1924 unsigned int credits
= type
== SGE_CNTXT_OFLD
? 0 : FW_WR_NUM
;
1926 if (base_addr
& 0xfff) /* must be 4K aligned */
1928 if (t3_read_reg(adapter
, A_SG_CONTEXT_CMD
) & F_CONTEXT_CMD_BUSY
)
1932 t3_write_reg(adapter
, A_SG_CONTEXT_DATA0
, V_EC_INDEX(cidx
) |
1933 V_EC_CREDITS(credits
) | V_EC_GTS(gts_enable
));
1934 t3_write_reg(adapter
, A_SG_CONTEXT_DATA1
, V_EC_SIZE(size
) |
1935 V_EC_BASE_LO((u32
)base_addr
& 0xffff));
1937 t3_write_reg(adapter
, A_SG_CONTEXT_DATA2
, (u32
)base_addr
);
1939 t3_write_reg(adapter
, A_SG_CONTEXT_DATA3
,
1940 V_EC_BASE_HI((u32
)base_addr
& 0xf) | V_EC_RESPQ(respq
) |
1941 V_EC_TYPE(type
) | V_EC_GEN(gen
) | V_EC_UP_TOKEN(token
) |
1943 return t3_sge_write_context(adapter
, id
, F_EGRESS
);
1947 * t3_sge_init_flcntxt - initialize an SGE free-buffer list context
1948 * @adapter: the adapter to configure
1949 * @id: the context id
1950 * @gts_enable: whether to enable GTS for the context
1951 * @base_addr: base address of queue
1952 * @size: number of queue entries
1953 * @bsize: size of each buffer for this queue
1954 * @cong_thres: threshold to signal congestion to upstream producers
1955 * @gen: initial generation value for the context
1956 * @cidx: consumer pointer
1958 * Initialize an SGE free list context and make it ready for use. The
1959 * caller is responsible for ensuring only one context operation occurs
1962 int t3_sge_init_flcntxt(adapter_t
*adapter
, unsigned int id
, int gts_enable
,
1963 u64 base_addr
, unsigned int size
, unsigned int bsize
,
1964 unsigned int cong_thres
, int gen
, unsigned int cidx
)
1966 if (base_addr
& 0xfff) /* must be 4K aligned */
1968 if (t3_read_reg(adapter
, A_SG_CONTEXT_CMD
) & F_CONTEXT_CMD_BUSY
)
1972 t3_write_reg(adapter
, A_SG_CONTEXT_DATA0
, (u32
)base_addr
);
1974 t3_write_reg(adapter
, A_SG_CONTEXT_DATA1
,
1975 V_FL_BASE_HI((u32
)base_addr
) |
1976 V_FL_INDEX_LO(cidx
& M_FL_INDEX_LO
));
1977 t3_write_reg(adapter
, A_SG_CONTEXT_DATA2
, V_FL_SIZE(size
) |
1978 V_FL_GEN(gen
) | V_FL_INDEX_HI(cidx
>> 12) |
1979 V_FL_ENTRY_SIZE_LO(bsize
& M_FL_ENTRY_SIZE_LO
));
1980 t3_write_reg(adapter
, A_SG_CONTEXT_DATA3
,
1981 V_FL_ENTRY_SIZE_HI(bsize
>> (32 - S_FL_ENTRY_SIZE_LO
)) |
1982 V_FL_CONG_THRES(cong_thres
) | V_FL_GTS(gts_enable
));
1983 return t3_sge_write_context(adapter
, id
, F_FREELIST
);
1987 * t3_sge_init_rspcntxt - initialize an SGE response queue context
1988 * @adapter: the adapter to configure
1989 * @id: the context id
1990 * @irq_vec_idx: MSI-X interrupt vector index, 0 if no MSI-X, -1 if no IRQ
1991 * @base_addr: base address of queue
1992 * @size: number of queue entries
1993 * @fl_thres: threshold for selecting the normal or jumbo free list
1994 * @gen: initial generation value for the context
1995 * @cidx: consumer pointer
1997 * Initialize an SGE response queue context and make it ready for use.
1998 * The caller is responsible for ensuring only one context operation
2001 int t3_sge_init_rspcntxt(adapter_t
*adapter
, unsigned int id
, int irq_vec_idx
,
2002 u64 base_addr
, unsigned int size
,
2003 unsigned int fl_thres
, int gen
, unsigned int cidx
)
2005 unsigned int intr
= 0;
2007 if (base_addr
& 0xfff) /* must be 4K aligned */
2009 if (t3_read_reg(adapter
, A_SG_CONTEXT_CMD
) & F_CONTEXT_CMD_BUSY
)
2013 t3_write_reg(adapter
, A_SG_CONTEXT_DATA0
, V_CQ_SIZE(size
) |
2015 t3_write_reg(adapter
, A_SG_CONTEXT_DATA1
, (u32
)base_addr
);
2017 if (irq_vec_idx
>= 0)
2018 intr
= V_RQ_MSI_VEC(irq_vec_idx
) | F_RQ_INTR_EN
;
2019 t3_write_reg(adapter
, A_SG_CONTEXT_DATA2
,
2020 V_CQ_BASE_HI((u32
)base_addr
) | intr
| V_RQ_GEN(gen
));
2021 t3_write_reg(adapter
, A_SG_CONTEXT_DATA3
, fl_thres
);
2022 return t3_sge_write_context(adapter
, id
, F_RESPONSEQ
);
2026 * t3_sge_init_cqcntxt - initialize an SGE completion queue context
2027 * @adapter: the adapter to configure
2028 * @id: the context id
2029 * @base_addr: base address of queue
2030 * @size: number of queue entries
2031 * @rspq: response queue for async notifications
2032 * @ovfl_mode: CQ overflow mode
2033 * @credits: completion queue credits
2034 * @credit_thres: the credit threshold
2036 * Initialize an SGE completion queue context and make it ready for use.
2037 * The caller is responsible for ensuring only one context operation
2040 int t3_sge_init_cqcntxt(adapter_t
*adapter
, unsigned int id
, u64 base_addr
,
2041 unsigned int size
, int rspq
, int ovfl_mode
,
2042 unsigned int credits
, unsigned int credit_thres
)
2044 if (base_addr
& 0xfff) /* must be 4K aligned */
2046 if (t3_read_reg(adapter
, A_SG_CONTEXT_CMD
) & F_CONTEXT_CMD_BUSY
)
2050 t3_write_reg(adapter
, A_SG_CONTEXT_DATA0
, V_CQ_SIZE(size
));
2051 t3_write_reg(adapter
, A_SG_CONTEXT_DATA1
, (u32
)base_addr
);
2053 t3_write_reg(adapter
, A_SG_CONTEXT_DATA2
,
2054 V_CQ_BASE_HI((u32
)base_addr
) | V_CQ_RSPQ(rspq
) |
2055 V_CQ_GEN(1) | V_CQ_OVERFLOW_MODE(ovfl_mode
) |
2056 V_CQ_ERR(ovfl_mode
));
2057 t3_write_reg(adapter
, A_SG_CONTEXT_DATA3
, V_CQ_CREDITS(credits
) |
2058 V_CQ_CREDIT_THRES(credit_thres
));
2059 return t3_sge_write_context(adapter
, id
, F_CQ
);
2063 * t3_sge_enable_ecntxt - enable/disable an SGE egress context
2064 * @adapter: the adapter
2065 * @id: the egress context id
2066 * @enable: enable (1) or disable (0) the context
2068 * Enable or disable an SGE egress context. The caller is responsible for
2069 * ensuring only one context operation occurs at a time.
2071 int t3_sge_enable_ecntxt(adapter_t
*adapter
, unsigned int id
, int enable
)
2073 if (t3_read_reg(adapter
, A_SG_CONTEXT_CMD
) & F_CONTEXT_CMD_BUSY
)
2076 t3_write_reg(adapter
, A_SG_CONTEXT_MASK0
, 0);
2077 t3_write_reg(adapter
, A_SG_CONTEXT_MASK1
, 0);
2078 t3_write_reg(adapter
, A_SG_CONTEXT_MASK2
, 0);
2079 t3_write_reg(adapter
, A_SG_CONTEXT_MASK3
, F_EC_VALID
);
2080 t3_write_reg(adapter
, A_SG_CONTEXT_DATA3
, V_EC_VALID(enable
));
2081 t3_write_reg(adapter
, A_SG_CONTEXT_CMD
,
2082 V_CONTEXT_CMD_OPCODE(1) | F_EGRESS
| V_CONTEXT(id
));
2083 return t3_wait_op_done(adapter
, A_SG_CONTEXT_CMD
, F_CONTEXT_CMD_BUSY
,
2084 0, SG_CONTEXT_CMD_ATTEMPTS
, 1);
2088 * t3_sge_disable_fl - disable an SGE free-buffer list
2089 * @adapter: the adapter
2090 * @id: the free list context id
2092 * Disable an SGE free-buffer list. The caller is responsible for
2093 * ensuring only one context operation occurs at a time.
2095 int t3_sge_disable_fl(adapter_t
*adapter
, unsigned int id
)
2097 if (t3_read_reg(adapter
, A_SG_CONTEXT_CMD
) & F_CONTEXT_CMD_BUSY
)
2100 t3_write_reg(adapter
, A_SG_CONTEXT_MASK0
, 0);
2101 t3_write_reg(adapter
, A_SG_CONTEXT_MASK1
, 0);
2102 t3_write_reg(adapter
, A_SG_CONTEXT_MASK2
, V_FL_SIZE(M_FL_SIZE
));
2103 t3_write_reg(adapter
, A_SG_CONTEXT_MASK3
, 0);
2104 t3_write_reg(adapter
, A_SG_CONTEXT_DATA2
, 0);
2105 t3_write_reg(adapter
, A_SG_CONTEXT_CMD
,
2106 V_CONTEXT_CMD_OPCODE(1) | F_FREELIST
| V_CONTEXT(id
));
2107 return t3_wait_op_done(adapter
, A_SG_CONTEXT_CMD
, F_CONTEXT_CMD_BUSY
,
2108 0, SG_CONTEXT_CMD_ATTEMPTS
, 1);
2112 * t3_sge_disable_rspcntxt - disable an SGE response queue
2113 * @adapter: the adapter
2114 * @id: the response queue context id
2116 * Disable an SGE response queue. The caller is responsible for
2117 * ensuring only one context operation occurs at a time.
2119 int t3_sge_disable_rspcntxt(adapter_t
*adapter
, unsigned int id
)
2121 if (t3_read_reg(adapter
, A_SG_CONTEXT_CMD
) & F_CONTEXT_CMD_BUSY
)
2124 t3_write_reg(adapter
, A_SG_CONTEXT_MASK0
, V_CQ_SIZE(M_CQ_SIZE
));
2125 t3_write_reg(adapter
, A_SG_CONTEXT_MASK1
, 0);
2126 t3_write_reg(adapter
, A_SG_CONTEXT_MASK2
, 0);
2127 t3_write_reg(adapter
, A_SG_CONTEXT_MASK3
, 0);
2128 t3_write_reg(adapter
, A_SG_CONTEXT_DATA0
, 0);
2129 t3_write_reg(adapter
, A_SG_CONTEXT_CMD
,
2130 V_CONTEXT_CMD_OPCODE(1) | F_RESPONSEQ
| V_CONTEXT(id
));
2131 return t3_wait_op_done(adapter
, A_SG_CONTEXT_CMD
, F_CONTEXT_CMD_BUSY
,
2132 0, SG_CONTEXT_CMD_ATTEMPTS
, 1);
2136 * t3_sge_disable_cqcntxt - disable an SGE completion queue
2137 * @adapter: the adapter
2138 * @id: the completion queue context id
2140 * Disable an SGE completion queue. The caller is responsible for
2141 * ensuring only one context operation occurs at a time.
2143 int t3_sge_disable_cqcntxt(adapter_t
*adapter
, unsigned int id
)
2145 if (t3_read_reg(adapter
, A_SG_CONTEXT_CMD
) & F_CONTEXT_CMD_BUSY
)
2148 t3_write_reg(adapter
, A_SG_CONTEXT_MASK0
, V_CQ_SIZE(M_CQ_SIZE
));
2149 t3_write_reg(adapter
, A_SG_CONTEXT_MASK1
, 0);
2150 t3_write_reg(adapter
, A_SG_CONTEXT_MASK2
, 0);
2151 t3_write_reg(adapter
, A_SG_CONTEXT_MASK3
, 0);
2152 t3_write_reg(adapter
, A_SG_CONTEXT_DATA0
, 0);
2153 t3_write_reg(adapter
, A_SG_CONTEXT_CMD
,
2154 V_CONTEXT_CMD_OPCODE(1) | F_CQ
| V_CONTEXT(id
));
2155 return t3_wait_op_done(adapter
, A_SG_CONTEXT_CMD
, F_CONTEXT_CMD_BUSY
,
2156 0, SG_CONTEXT_CMD_ATTEMPTS
, 1);
2160 * t3_sge_cqcntxt_op - perform an operation on a completion queue context
2161 * @adapter: the adapter
2162 * @id: the context id
2163 * @op: the operation to perform
2164 * @credits: credits to return to the CQ
2166 * Perform the selected operation on an SGE completion queue context.
2167 * The caller is responsible for ensuring only one context operation
2170 * For most operations the function returns the current HW position in
2171 * the completion queue.
2173 int t3_sge_cqcntxt_op(adapter_t
*adapter
, unsigned int id
, unsigned int op
,
2174 unsigned int credits
)
2178 if (t3_read_reg(adapter
, A_SG_CONTEXT_CMD
) & F_CONTEXT_CMD_BUSY
)
2181 t3_write_reg(adapter
, A_SG_CONTEXT_DATA0
, credits
<< 16);
2182 t3_write_reg(adapter
, A_SG_CONTEXT_CMD
, V_CONTEXT_CMD_OPCODE(op
) |
2183 V_CONTEXT(id
) | F_CQ
);
2184 if (t3_wait_op_done_val(adapter
, A_SG_CONTEXT_CMD
, F_CONTEXT_CMD_BUSY
,
2185 0, SG_CONTEXT_CMD_ATTEMPTS
, 1, &val
))
2188 if (op
>= 2 && op
< 7) {
2189 if (adapter
->params
.rev
> 0)
2190 return G_CQ_INDEX(val
);
2192 t3_write_reg(adapter
, A_SG_CONTEXT_CMD
,
2193 V_CONTEXT_CMD_OPCODE(0) | F_CQ
| V_CONTEXT(id
));
2194 if (t3_wait_op_done(adapter
, A_SG_CONTEXT_CMD
,
2195 F_CONTEXT_CMD_BUSY
, 0,
2196 SG_CONTEXT_CMD_ATTEMPTS
, 1))
2198 return G_CQ_INDEX(t3_read_reg(adapter
, A_SG_CONTEXT_DATA0
));
2204 * t3_sge_read_context - read an SGE context
2205 * @type: the context type
2206 * @adapter: the adapter
2207 * @id: the context id
2208 * @data: holds the retrieved context
2210 * Read an SGE egress context. The caller is responsible for ensuring
2211 * only one context operation occurs at a time.
2213 static int t3_sge_read_context(unsigned int type
, adapter_t
*adapter
,
2214 unsigned int id
, u32 data
[4])
2216 if (t3_read_reg(adapter
, A_SG_CONTEXT_CMD
) & F_CONTEXT_CMD_BUSY
)
2219 t3_write_reg(adapter
, A_SG_CONTEXT_CMD
,
2220 V_CONTEXT_CMD_OPCODE(0) | type
| V_CONTEXT(id
));
2221 if (t3_wait_op_done(adapter
, A_SG_CONTEXT_CMD
, F_CONTEXT_CMD_BUSY
, 0,
2222 SG_CONTEXT_CMD_ATTEMPTS
, 1))
2224 data
[0] = t3_read_reg(adapter
, A_SG_CONTEXT_DATA0
);
2225 data
[1] = t3_read_reg(adapter
, A_SG_CONTEXT_DATA1
);
2226 data
[2] = t3_read_reg(adapter
, A_SG_CONTEXT_DATA2
);
2227 data
[3] = t3_read_reg(adapter
, A_SG_CONTEXT_DATA3
);
2232 * t3_sge_read_ecntxt - read an SGE egress context
2233 * @adapter: the adapter
2234 * @id: the context id
2235 * @data: holds the retrieved context
2237 * Read an SGE egress context. The caller is responsible for ensuring
2238 * only one context operation occurs at a time.
2240 int t3_sge_read_ecntxt(adapter_t
*adapter
, unsigned int id
, u32 data
[4])
2244 return t3_sge_read_context(F_EGRESS
, adapter
, id
, data
);
2248 * t3_sge_read_cq - read an SGE CQ context
2249 * @adapter: the adapter
2250 * @id: the context id
2251 * @data: holds the retrieved context
2253 * Read an SGE CQ context. The caller is responsible for ensuring
2254 * only one context operation occurs at a time.
2256 int t3_sge_read_cq(adapter_t
*adapter
, unsigned int id
, u32 data
[4])
2260 return t3_sge_read_context(F_CQ
, adapter
, id
, data
);
2264 * t3_sge_read_fl - read an SGE free-list context
2265 * @adapter: the adapter
2266 * @id: the context id
2267 * @data: holds the retrieved context
2269 * Read an SGE free-list context. The caller is responsible for ensuring
2270 * only one context operation occurs at a time.
2272 int t3_sge_read_fl(adapter_t
*adapter
, unsigned int id
, u32 data
[4])
2274 if (id
>= SGE_QSETS
* 2)
2276 return t3_sge_read_context(F_FREELIST
, adapter
, id
, data
);
2280 * t3_sge_read_rspq - read an SGE response queue context
2281 * @adapter: the adapter
2282 * @id: the context id
2283 * @data: holds the retrieved context
2285 * Read an SGE response queue context. The caller is responsible for
2286 * ensuring only one context operation occurs at a time.
2288 int t3_sge_read_rspq(adapter_t
*adapter
, unsigned int id
, u32 data
[4])
2290 if (id
>= SGE_QSETS
)
2292 return t3_sge_read_context(F_RESPONSEQ
, adapter
, id
, data
);
2296 * t3_config_rss - configure Rx packet steering
2297 * @adapter: the adapter
2298 * @rss_config: RSS settings (written to TP_RSS_CONFIG)
2299 * @cpus: values for the CPU lookup table (0xff terminated)
2300 * @rspq: values for the response queue lookup table (0xffff terminated)
2302 * Programs the receive packet steering logic. @cpus and @rspq provide
2303 * the values for the CPU and response queue lookup tables. If they
2304 * provide fewer values than the size of the tables the supplied values
2305 * are used repeatedly until the tables are fully populated.
2307 void t3_config_rss(adapter_t
*adapter
, unsigned int rss_config
, const u8
*cpus
,
2310 int i
, j
, cpu_idx
= 0, q_idx
= 0;
2313 for (i
= 0; i
< RSS_TABLE_SIZE
; ++i
) {
2316 for (j
= 0; j
< 2; ++j
) {
2317 val
|= (cpus
[cpu_idx
++] & 0x3f) << (8 * j
);
2318 if (cpus
[cpu_idx
] == 0xff)
2321 t3_write_reg(adapter
, A_TP_RSS_LKP_TABLE
, val
);
2325 for (i
= 0; i
< RSS_TABLE_SIZE
; ++i
) {
2326 t3_write_reg(adapter
, A_TP_RSS_MAP_TABLE
,
2327 (i
<< 16) | rspq
[q_idx
++]);
2328 if (rspq
[q_idx
] == 0xffff)
2332 t3_write_reg(adapter
, A_TP_RSS_CONFIG
, rss_config
);
2336 * t3_read_rss - read the contents of the RSS tables
2337 * @adapter: the adapter
2338 * @lkup: holds the contents of the RSS lookup table
2339 * @map: holds the contents of the RSS map table
2341 * Reads the contents of the receive packet steering tables.
2343 int t3_read_rss(adapter_t
*adapter
, u8
*lkup
, u16
*map
)
2349 for (i
= 0; i
< RSS_TABLE_SIZE
; ++i
) {
2350 t3_write_reg(adapter
, A_TP_RSS_LKP_TABLE
,
2352 val
= t3_read_reg(adapter
, A_TP_RSS_LKP_TABLE
);
2353 if (!(val
& 0x80000000))
2356 *lkup
++ = (u8
)(val
>> 8);
2360 for (i
= 0; i
< RSS_TABLE_SIZE
; ++i
) {
2361 t3_write_reg(adapter
, A_TP_RSS_MAP_TABLE
,
2363 val
= t3_read_reg(adapter
, A_TP_RSS_MAP_TABLE
);
2364 if (!(val
& 0x80000000))
2372 * t3_tp_set_offload_mode - put TP in NIC/offload mode
2373 * @adap: the adapter
2374 * @enable: 1 to select offload mode, 0 for regular NIC
2376 * Switches TP to NIC/offload mode.
2378 void t3_tp_set_offload_mode(adapter_t
*adap
, int enable
)
2380 if (is_offload(adap
) || !enable
)
2381 t3_set_reg_field(adap
, A_TP_IN_CONFIG
, F_NICMODE
,
2382 V_NICMODE(!enable
));
2386 * tp_wr_bits_indirect - set/clear bits in an indirect TP register
2387 * @adap: the adapter
2388 * @addr: the indirect TP register address
2389 * @mask: specifies the field within the register to modify
2390 * @val: new value for the field
2392 * Sets a field of an indirect TP register to the given value.
2394 static void tp_wr_bits_indirect(adapter_t
*adap
, unsigned int addr
,
2395 unsigned int mask
, unsigned int val
)
2397 t3_write_reg(adap
, A_TP_PIO_ADDR
, addr
);
2398 val
|= t3_read_reg(adap
, A_TP_PIO_DATA
) & ~mask
;
2399 t3_write_reg(adap
, A_TP_PIO_DATA
, val
);
2403 * t3_enable_filters - enable the HW filters
2404 * @adap: the adapter
2406 * Enables the HW filters for NIC traffic.
2408 void t3_enable_filters(adapter_t
*adap
)
2410 t3_set_reg_field(adap
, A_TP_IN_CONFIG
, F_NICMODE
, 0);
2411 t3_set_reg_field(adap
, A_MC5_DB_CONFIG
, 0, F_FILTEREN
);
2412 t3_set_reg_field(adap
, A_TP_GLOBAL_CONFIG
, 0, V_FIVETUPLELOOKUP(3));
2413 tp_wr_bits_indirect(adap
, A_TP_INGRESS_CONFIG
, 0, F_LOOKUPEVERYPKT
);
2417 * pm_num_pages - calculate the number of pages of the payload memory
2418 * @mem_size: the size of the payload memory
2419 * @pg_size: the size of each payload memory page
2421 * Calculate the number of pages, each of the given size, that fit in a
2422 * memory of the specified size, respecting the HW requirement that the
2423 * number of pages must be a multiple of 24.
2425 static inline unsigned int pm_num_pages(unsigned int mem_size
,
2426 unsigned int pg_size
)
2428 unsigned int n
= mem_size
/ pg_size
;
2433 #define mem_region(adap, start, size, reg) \
2434 t3_write_reg((adap), A_ ## reg, (start)); \
2439 * fls: find last bit set.
2441 static __inline
int fls(int x
)
2447 if (!(x
& 0xffff0000u
)) {
2451 if (!(x
& 0xff000000u
)) {
2455 if (!(x
& 0xf0000000u
)) {
2459 if (!(x
& 0xc0000000u
)) {
2463 if (!(x
& 0x80000000u
)) {
2472 * partition_mem - partition memory and configure TP memory settings
2473 * @adap: the adapter
2474 * @p: the TP parameters
2476 * Partitions context and payload memory and configures TP's memory
2479 static void partition_mem(adapter_t
*adap
, const struct tp_params
*p
)
2481 unsigned int m
, pstructs
, tids
= t3_mc5_size(&adap
->mc5
);
2482 unsigned int timers
= 0, timers_shift
= 22;
2484 if (adap
->params
.rev
> 0) {
2485 if (tids
<= 16 * 1024) {
2488 } else if (tids
<= 64 * 1024) {
2491 } else if (tids
<= 256 * 1024) {
2497 t3_write_reg(adap
, A_TP_PMM_SIZE
,
2498 p
->chan_rx_size
| (p
->chan_tx_size
>> 16));
2500 t3_write_reg(adap
, A_TP_PMM_TX_BASE
, 0);
2501 t3_write_reg(adap
, A_TP_PMM_TX_PAGE_SIZE
, p
->tx_pg_size
);
2502 t3_write_reg(adap
, A_TP_PMM_TX_MAX_PAGE
, p
->tx_num_pgs
);
2503 t3_set_reg_field(adap
, A_TP_PARA_REG3
, V_TXDATAACKIDX(M_TXDATAACKIDX
),
2504 V_TXDATAACKIDX(fls(p
->tx_pg_size
) - 12));
2506 t3_write_reg(adap
, A_TP_PMM_RX_BASE
, 0);
2507 t3_write_reg(adap
, A_TP_PMM_RX_PAGE_SIZE
, p
->rx_pg_size
);
2508 t3_write_reg(adap
, A_TP_PMM_RX_MAX_PAGE
, p
->rx_num_pgs
);
2510 pstructs
= p
->rx_num_pgs
+ p
->tx_num_pgs
;
2511 /* Add a bit of headroom and make multiple of 24 */
2513 pstructs
-= pstructs
% 24;
2514 t3_write_reg(adap
, A_TP_CMM_MM_MAX_PSTRUCT
, pstructs
);
2516 m
= tids
* TCB_SIZE
;
2517 mem_region(adap
, m
, (64 << 10) * 64, SG_EGR_CNTX_BADDR
);
2518 mem_region(adap
, m
, (64 << 10) * 64, SG_CQ_CONTEXT_BADDR
);
2519 t3_write_reg(adap
, A_TP_CMM_TIMER_BASE
, V_CMTIMERMAXNUM(timers
) | m
);
2520 m
+= ((p
->ntimer_qs
- 1) << timers_shift
) + (1 << 22);
2521 mem_region(adap
, m
, pstructs
* 64, TP_CMM_MM_BASE
);
2522 mem_region(adap
, m
, 64 * (pstructs
/ 24), TP_CMM_MM_PS_FLST_BASE
);
2523 mem_region(adap
, m
, 64 * (p
->rx_num_pgs
/ 24), TP_CMM_MM_RX_FLST_BASE
);
2524 mem_region(adap
, m
, 64 * (p
->tx_num_pgs
/ 24), TP_CMM_MM_TX_FLST_BASE
);
2526 m
= (m
+ 4095) & ~0xfff;
2527 t3_write_reg(adap
, A_CIM_SDRAM_BASE_ADDR
, m
);
2528 t3_write_reg(adap
, A_CIM_SDRAM_ADDR_SIZE
, p
->cm_size
- m
);
2530 tids
= (p
->cm_size
- m
- (3 << 20)) / 3072 - 32;
2531 m
= t3_mc5_size(&adap
->mc5
) - adap
->params
.mc5
.nservers
-
2532 adap
->params
.mc5
.nfilters
- adap
->params
.mc5
.nroutes
;
2534 adap
->params
.mc5
.nservers
+= m
- tids
;
2537 static inline void tp_wr_indirect(adapter_t
*adap
, unsigned int addr
, u32 val
)
2539 t3_write_reg(adap
, A_TP_PIO_ADDR
, addr
);
2540 t3_write_reg(adap
, A_TP_PIO_DATA
, val
);
2543 static void tp_config(adapter_t
*adap
, const struct tp_params
*p
)
2545 t3_write_reg(adap
, A_TP_GLOBAL_CONFIG
, F_TXPACINGENABLE
| F_PATHMTU
|
2546 F_IPCHECKSUMOFFLOAD
| F_UDPCHECKSUMOFFLOAD
|
2547 F_TCPCHECKSUMOFFLOAD
| V_IPTTL(64));
2548 t3_write_reg(adap
, A_TP_TCP_OPTIONS
, V_MTUDEFAULT(576) |
2549 F_MTUENABLE
| V_WINDOWSCALEMODE(1) |
2550 V_TIMESTAMPSMODE(0) | V_SACKMODE(1) | V_SACKRX(1));
2551 t3_write_reg(adap
, A_TP_DACK_CONFIG
, V_AUTOSTATE3(1) |
2552 V_AUTOSTATE2(1) | V_AUTOSTATE1(0) |
2553 V_BYTETHRESHOLD(16384) | V_MSSTHRESHOLD(2) |
2554 F_AUTOCAREFUL
| F_AUTOENABLE
| V_DACK_MODE(1));
2555 t3_set_reg_field(adap
, A_TP_IN_CONFIG
, F_IPV6ENABLE
| F_NICMODE
,
2556 F_IPV6ENABLE
| F_NICMODE
);
2557 t3_write_reg(adap
, A_TP_TX_RESOURCE_LIMIT
, 0x18141814);
2558 t3_write_reg(adap
, A_TP_PARA_REG4
, 0x5050105);
2559 t3_set_reg_field(adap
, A_TP_PARA_REG6
, 0,
2560 adap
->params
.rev
> 0 ? F_ENABLEESND
:
2562 t3_set_reg_field(adap
, A_TP_PC_CONFIG
,
2564 F_ENABLEOCSPIFULL
|F_TXDEFERENABLE
| F_HEARBEATDACK
|
2565 F_TXCONGESTIONMODE
| F_RXCONGESTIONMODE
);
2566 t3_set_reg_field(adap
, A_TP_PC_CONFIG2
, F_CHDRAFULL
, 0);
2567 t3_write_reg(adap
, A_TP_PROXY_FLOW_CNTL
, 1080);
2568 t3_write_reg(adap
, A_TP_PROXY_FLOW_CNTL
, 1000);
2570 if (adap
->params
.rev
> 0) {
2571 tp_wr_indirect(adap
, A_TP_EGRESS_CONFIG
, F_REWRITEFORCETOSIZE
);
2572 t3_set_reg_field(adap
, A_TP_PARA_REG3
, 0,
2573 F_TXPACEAUTO
| F_TXPACEAUTOSTRICT
);
2574 t3_set_reg_field(adap
, A_TP_PC_CONFIG
, F_LOCKTID
, F_LOCKTID
);
2575 tp_wr_indirect(adap
, A_TP_VLAN_PRI_MAP
, 0xfa50);
2576 tp_wr_indirect(adap
, A_TP_MAC_MATCH_MAP0
, 0xfac688);
2577 tp_wr_indirect(adap
, A_TP_MAC_MATCH_MAP1
, 0xfac688);
2579 t3_set_reg_field(adap
, A_TP_PARA_REG3
, 0, F_TXPACEFIXED
);
2581 t3_write_reg(adap
, A_TP_TX_MOD_QUEUE_WEIGHT1
, 0);
2582 t3_write_reg(adap
, A_TP_TX_MOD_QUEUE_WEIGHT0
, 0);
2583 t3_write_reg(adap
, A_TP_MOD_CHANNEL_WEIGHT
, 0);
2584 t3_write_reg(adap
, A_TP_MOD_RATE_LIMIT
, 0xf2200000);
2586 if (adap
->params
.nports
> 2) {
2587 t3_set_reg_field(adap
, A_TP_PC_CONFIG2
, 0,
2588 F_ENABLETXPORTFROMDA
| F_ENABLERXPORTFROMADDR
);
2589 tp_wr_bits_indirect(adap
, A_TP_QOS_RX_MAP_MODE
,
2590 V_RXMAPMODE(M_RXMAPMODE
), 0);
2591 tp_wr_indirect(adap
, A_TP_INGRESS_CONFIG
, V_BITPOS0(48) |
2592 V_BITPOS1(49) | V_BITPOS2(50) | V_BITPOS3(51) |
2593 F_ENABLEEXTRACT
| F_ENABLEEXTRACTIONSFD
|
2594 F_ENABLEINSERTION
| F_ENABLEINSERTIONSFD
);
2595 tp_wr_indirect(adap
, A_TP_PREAMBLE_MSB
, 0xfb000000);
2596 tp_wr_indirect(adap
, A_TP_PREAMBLE_LSB
, 0xd5);
2597 tp_wr_indirect(adap
, A_TP_INTF_FROM_TX_PKT
, F_INTFFROMTXPKT
);
2601 /* TCP timer values in ms */
2602 #define TP_DACK_TIMER 50
2603 #define TP_RTO_MIN 250
2606 * tp_set_timers - set TP timing parameters
2607 * @adap: the adapter to set
2608 * @core_clk: the core clock frequency in Hz
2610 * Set TP's timing parameters, such as the various timer resolutions and
2611 * the TCP timer values.
2613 static void tp_set_timers(adapter_t
*adap
, unsigned int core_clk
)
2615 unsigned int tre
= adap
->params
.tp
.tre
;
2616 unsigned int dack_re
= adap
->params
.tp
.dack_re
;
2617 unsigned int tstamp_re
= fls(core_clk
/ 1000); /* 1ms, at least */
2618 unsigned int tps
= core_clk
>> tre
;
2620 t3_write_reg(adap
, A_TP_TIMER_RESOLUTION
, V_TIMERRESOLUTION(tre
) |
2621 V_DELAYEDACKRESOLUTION(dack_re
) |
2622 V_TIMESTAMPRESOLUTION(tstamp_re
));
2623 t3_write_reg(adap
, A_TP_DACK_TIMER
,
2624 (core_clk
>> dack_re
) / (1000 / TP_DACK_TIMER
));
2625 t3_write_reg(adap
, A_TP_TCP_BACKOFF_REG0
, 0x3020100);
2626 t3_write_reg(adap
, A_TP_TCP_BACKOFF_REG1
, 0x7060504);
2627 t3_write_reg(adap
, A_TP_TCP_BACKOFF_REG2
, 0xb0a0908);
2628 t3_write_reg(adap
, A_TP_TCP_BACKOFF_REG3
, 0xf0e0d0c);
2629 t3_write_reg(adap
, A_TP_SHIFT_CNT
, V_SYNSHIFTMAX(6) |
2630 V_RXTSHIFTMAXR1(4) | V_RXTSHIFTMAXR2(15) |
2631 V_PERSHIFTBACKOFFMAX(8) | V_PERSHIFTMAX(8) |
2634 #define SECONDS * tps
2636 t3_write_reg(adap
, A_TP_MSL
,
2637 adap
->params
.rev
> 0 ? 0 : 2 SECONDS
);
2638 t3_write_reg(adap
, A_TP_RXT_MIN
, tps
/ (1000 / TP_RTO_MIN
));
2639 t3_write_reg(adap
, A_TP_RXT_MAX
, 64 SECONDS
);
2640 t3_write_reg(adap
, A_TP_PERS_MIN
, 5 SECONDS
);
2641 t3_write_reg(adap
, A_TP_PERS_MAX
, 64 SECONDS
);
2642 t3_write_reg(adap
, A_TP_KEEP_IDLE
, 7200 SECONDS
);
2643 t3_write_reg(adap
, A_TP_KEEP_INTVL
, 75 SECONDS
);
2644 t3_write_reg(adap
, A_TP_INIT_SRTT
, 3 SECONDS
);
2645 t3_write_reg(adap
, A_TP_FINWAIT2_TIMER
, 600 SECONDS
);
2650 #ifdef CONFIG_CHELSIO_T3_CORE
2652 * t3_tp_set_coalescing_size - set receive coalescing size
2653 * @adap: the adapter
2654 * @size: the receive coalescing size
2655 * @psh: whether a set PSH bit should deliver coalesced data
2657 * Set the receive coalescing size and PSH bit handling.
2659 int t3_tp_set_coalescing_size(adapter_t
*adap
, unsigned int size
, int psh
)
2663 if (size
> MAX_RX_COALESCING_LEN
)
2666 val
= t3_read_reg(adap
, A_TP_PARA_REG3
);
2667 val
&= ~(F_RXCOALESCEENABLE
| F_RXCOALESCEPSHEN
);
2670 val
|= F_RXCOALESCEENABLE
;
2672 val
|= F_RXCOALESCEPSHEN
;
2673 size
= min(MAX_RX_COALESCING_LEN
, size
);
2674 t3_write_reg(adap
, A_TP_PARA_REG2
, V_RXCOALESCESIZE(size
) |
2675 V_MAXRXDATA(MAX_RX_COALESCING_LEN
));
2677 t3_write_reg(adap
, A_TP_PARA_REG3
, val
);
2682 * t3_tp_set_max_rxsize - set the max receive size
2683 * @adap: the adapter
2684 * @size: the max receive size
2686 * Set TP's max receive size. This is the limit that applies when
2687 * receive coalescing is disabled.
2689 void t3_tp_set_max_rxsize(adapter_t
*adap
, unsigned int size
)
2691 t3_write_reg(adap
, A_TP_PARA_REG7
,
2692 V_PMMAXXFERLEN0(size
) | V_PMMAXXFERLEN1(size
));
2695 static void __devinit
init_mtus(unsigned short mtus
[])
2698 * See draft-mathis-plpmtud-00.txt for the values. The min is 88 so
2699 * it can accomodate max size TCP/IP headers when SACK and timestamps
2700 * are enabled and still have at least 8 bytes of payload.
2721 * init_cong_ctrl - initialize congestion control parameters
2722 * @a: the alpha values for congestion control
2723 * @b: the beta values for congestion control
2725 * Initialize the congestion control parameters.
2727 static void __devinit
init_cong_ctrl(unsigned short *a
, unsigned short *b
)
2729 a
[0] = a
[1] = a
[2] = a
[3] = a
[4] = a
[5] = a
[6] = a
[7] = a
[8] = 1;
2754 b
[0] = b
[1] = b
[2] = b
[3] = b
[4] = b
[5] = b
[6] = b
[7] = b
[8] = 0;
2757 b
[13] = b
[14] = b
[15] = b
[16] = 3;
2758 b
[17] = b
[18] = b
[19] = b
[20] = b
[21] = 4;
2759 b
[22] = b
[23] = b
[24] = b
[25] = b
[26] = b
[27] = 5;
2764 /* The minimum additive increment value for the congestion control table */
2765 #define CC_MIN_INCR 2U
2768 * t3_load_mtus - write the MTU and congestion control HW tables
2769 * @adap: the adapter
2770 * @mtus: the unrestricted values for the MTU table
2771 * @alpha: the values for the congestion control alpha parameter
2772 * @beta: the values for the congestion control beta parameter
2773 * @mtu_cap: the maximum permitted effective MTU
2775 * Write the MTU table with the supplied MTUs capping each at &mtu_cap.
2776 * Update the high-speed congestion control table with the supplied alpha,
2779 void t3_load_mtus(adapter_t
*adap
, unsigned short mtus
[NMTUS
],
2780 unsigned short alpha
[NCCTRL_WIN
],
2781 unsigned short beta
[NCCTRL_WIN
], unsigned short mtu_cap
)
2783 static const unsigned int avg_pkts
[NCCTRL_WIN
] = {
2784 2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640,
2785 896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480,
2786 28672, 40960, 57344, 81920, 114688, 163840, 229376 };
2790 for (i
= 0; i
< NMTUS
; ++i
) {
2791 unsigned int mtu
= min(mtus
[i
], mtu_cap
);
2792 unsigned int log2
= fls(mtu
);
2794 if (!(mtu
& ((1 << log2
) >> 2))) /* round */
2796 t3_write_reg(adap
, A_TP_MTU_TABLE
,
2797 (i
<< 24) | (log2
<< 16) | mtu
);
2799 for (w
= 0; w
< NCCTRL_WIN
; ++w
) {
2802 inc
= max(((mtu
- 40) * alpha
[w
]) / avg_pkts
[w
],
2805 t3_write_reg(adap
, A_TP_CCTRL_TABLE
, (i
<< 21) |
2806 (w
<< 16) | (beta
[w
] << 13) | inc
);
2812 * t3_read_hw_mtus - returns the values in the HW MTU table
2813 * @adap: the adapter
2814 * @mtus: where to store the HW MTU values
2816 * Reads the HW MTU table.
2818 void t3_read_hw_mtus(adapter_t
*adap
, unsigned short mtus
[NMTUS
])
2822 for (i
= 0; i
< NMTUS
; ++i
) {
2825 t3_write_reg(adap
, A_TP_MTU_TABLE
, 0xff000000 | i
);
2826 val
= t3_read_reg(adap
, A_TP_MTU_TABLE
);
2827 mtus
[i
] = val
& 0x3fff;
2832 * t3_get_cong_cntl_tab - reads the congestion control table
2833 * @adap: the adapter
2834 * @incr: where to store the alpha values
2836 * Reads the additive increments programmed into the HW congestion
2839 void t3_get_cong_cntl_tab(adapter_t
*adap
,
2840 unsigned short incr
[NMTUS
][NCCTRL_WIN
])
2842 unsigned int mtu
, w
;
2844 for (mtu
= 0; mtu
< NMTUS
; ++mtu
)
2845 for (w
= 0; w
< NCCTRL_WIN
; ++w
) {
2846 t3_write_reg(adap
, A_TP_CCTRL_TABLE
,
2847 0xffff0000 | (mtu
<< 5) | w
);
2848 incr
[mtu
][w
] = (unsigned short)t3_read_reg(adap
,
2849 A_TP_CCTRL_TABLE
) & 0x1fff;
2854 * t3_tp_get_mib_stats - read TP's MIB counters
2855 * @adap: the adapter
2856 * @tps: holds the returned counter values
2858 * Returns the values of TP's MIB counters.
2860 void t3_tp_get_mib_stats(adapter_t
*adap
, struct tp_mib_stats
*tps
)
2862 t3_read_indirect(adap
, A_TP_MIB_INDEX
, A_TP_MIB_RDATA
, (u32
*)tps
,
2863 sizeof(*tps
) / sizeof(u32
), 0);
2867 * t3_read_pace_tbl - read the pace table
2868 * @adap: the adapter
2869 * @pace_vals: holds the returned values
2871 * Returns the values of TP's pace table in nanoseconds.
2873 void t3_read_pace_tbl(adapter_t
*adap
, unsigned int pace_vals
[NTX_SCHED
])
2875 unsigned int i
, tick_ns
= dack_ticks_to_usec(adap
, 1000);
2877 for (i
= 0; i
< NTX_SCHED
; i
++) {
2878 t3_write_reg(adap
, A_TP_PACE_TABLE
, 0xffff0000 + i
);
2879 pace_vals
[i
] = t3_read_reg(adap
, A_TP_PACE_TABLE
) * tick_ns
;
2884 * t3_set_pace_tbl - set the pace table
2885 * @adap: the adapter
2886 * @pace_vals: the pace values in nanoseconds
2887 * @start: index of the first entry in the HW pace table to set
2888 * @n: how many entries to set
2890 * Sets (a subset of the) HW pace table.
2892 void t3_set_pace_tbl(adapter_t
*adap
, unsigned int *pace_vals
,
2893 unsigned int start
, unsigned int n
)
2895 unsigned int tick_ns
= dack_ticks_to_usec(adap
, 1000);
2897 for ( ; n
; n
--, start
++, pace_vals
++)
2898 t3_write_reg(adap
, A_TP_PACE_TABLE
, (start
<< 16) |
2899 ((*pace_vals
+ tick_ns
/ 2) / tick_ns
));
2902 #define ulp_region(adap, name, start, len) \
2903 t3_write_reg((adap), A_ULPRX_ ## name ## _LLIMIT, (start)); \
2904 t3_write_reg((adap), A_ULPRX_ ## name ## _ULIMIT, \
2905 (start) + (len) - 1); \
2908 #define ulptx_region(adap, name, start, len) \
2909 t3_write_reg((adap), A_ULPTX_ ## name ## _LLIMIT, (start)); \
2910 t3_write_reg((adap), A_ULPTX_ ## name ## _ULIMIT, \
2911 (start) + (len) - 1)
2913 static void ulp_config(adapter_t
*adap
, const struct tp_params
*p
)
2915 unsigned int m
= p
->chan_rx_size
;
2917 ulp_region(adap
, ISCSI
, m
, p
->chan_rx_size
/ 8);
2918 ulp_region(adap
, TDDP
, m
, p
->chan_rx_size
/ 8);
2919 ulptx_region(adap
, TPT
, m
, p
->chan_rx_size
/ 4);
2920 ulp_region(adap
, STAG
, m
, p
->chan_rx_size
/ 4);
2921 ulp_region(adap
, RQ
, m
, p
->chan_rx_size
/ 4);
2922 ulptx_region(adap
, PBL
, m
, p
->chan_rx_size
/ 4);
2923 ulp_region(adap
, PBL
, m
, p
->chan_rx_size
/ 4);
2924 t3_write_reg(adap
, A_ULPRX_TDDP_TAGMASK
, 0xffffffff);
2929 * t3_set_proto_sram - set the contents of the protocol sram
2930 * @adapter: the adapter
2931 * @data: the protocol image
2933 * Write the contents of the protocol SRAM.
2935 int t3_set_proto_sram(adapter_t
*adap
, const u8
*data
)
2938 const u32
*buf
= (const u32
*)data
;
2940 for (i
= 0; i
< PROTO_SRAM_LINES
; i
++) {
2941 t3_write_reg(adap
, A_TP_EMBED_OP_FIELD5
, cpu_to_be32(*buf
++));
2942 t3_write_reg(adap
, A_TP_EMBED_OP_FIELD4
, cpu_to_be32(*buf
++));
2943 t3_write_reg(adap
, A_TP_EMBED_OP_FIELD3
, cpu_to_be32(*buf
++));
2944 t3_write_reg(adap
, A_TP_EMBED_OP_FIELD2
, cpu_to_be32(*buf
++));
2945 t3_write_reg(adap
, A_TP_EMBED_OP_FIELD1
, cpu_to_be32(*buf
++));
2947 t3_write_reg(adap
, A_TP_EMBED_OP_FIELD0
, i
<< 1 | 1 << 31);
2948 if (t3_wait_op_done(adap
, A_TP_EMBED_OP_FIELD0
, 1, 1, 5, 1))
2956 * t3_config_trace_filter - configure one of the tracing filters
2957 * @adapter: the adapter
2958 * @tp: the desired trace filter parameters
2959 * @filter_index: which filter to configure
2960 * @invert: if set non-matching packets are traced instead of matching ones
2961 * @enable: whether to enable or disable the filter
2963 * Configures one of the tracing filters available in HW.
2965 void t3_config_trace_filter(adapter_t
*adapter
, const struct trace_params
*tp
,
2966 int filter_index
, int invert
, int enable
)
2968 u32 addr
, key
[4], mask
[4];
2970 key
[0] = tp
->sport
| (tp
->sip
<< 16);
2971 key
[1] = (tp
->sip
>> 16) | (tp
->dport
<< 16);
2973 key
[3] = tp
->proto
| (tp
->vlan
<< 8) | (tp
->intf
<< 20);
2975 mask
[0] = tp
->sport_mask
| (tp
->sip_mask
<< 16);
2976 mask
[1] = (tp
->sip_mask
>> 16) | (tp
->dport_mask
<< 16);
2977 mask
[2] = tp
->dip_mask
;
2978 mask
[3] = tp
->proto_mask
| (tp
->vlan_mask
<< 8) | (tp
->intf_mask
<< 20);
2981 key
[3] |= (1 << 29);
2983 key
[3] |= (1 << 28);
2985 addr
= filter_index
? A_TP_RX_TRC_KEY0
: A_TP_TX_TRC_KEY0
;
2986 tp_wr_indirect(adapter
, addr
++, key
[0]);
2987 tp_wr_indirect(adapter
, addr
++, mask
[0]);
2988 tp_wr_indirect(adapter
, addr
++, key
[1]);
2989 tp_wr_indirect(adapter
, addr
++, mask
[1]);
2990 tp_wr_indirect(adapter
, addr
++, key
[2]);
2991 tp_wr_indirect(adapter
, addr
++, mask
[2]);
2992 tp_wr_indirect(adapter
, addr
++, key
[3]);
2993 tp_wr_indirect(adapter
, addr
, mask
[3]);
2994 (void) t3_read_reg(adapter
, A_TP_PIO_DATA
);
2998 * t3_config_sched - configure a HW traffic scheduler
2999 * @adap: the adapter
3000 * @kbps: target rate in Kbps
3001 * @sched: the scheduler index
3003 * Configure a Tx HW scheduler for the target rate.
3005 int t3_config_sched(adapter_t
*adap
, unsigned int kbps
, int sched
)
3007 unsigned int v
, tps
, cpt
, bpt
, delta
, mindelta
= ~0;
3008 unsigned int clk
= adap
->params
.vpd
.cclk
* 1000;
3009 unsigned int selected_cpt
= 0, selected_bpt
= 0;
3012 kbps
*= 125; /* -> bytes */
3013 for (cpt
= 1; cpt
<= 255; cpt
++) {
3015 bpt
= (kbps
+ tps
/ 2) / tps
;
3016 if (bpt
> 0 && bpt
<= 255) {
3018 delta
= v
>= kbps
? v
- kbps
: kbps
- v
;
3019 if (delta
<= mindelta
) {
3024 } else if (selected_cpt
)
3030 t3_write_reg(adap
, A_TP_TM_PIO_ADDR
,
3031 A_TP_TX_MOD_Q1_Q0_RATE_LIMIT
- sched
/ 2);
3032 v
= t3_read_reg(adap
, A_TP_TM_PIO_DATA
);
3034 v
= (v
& 0xffff) | (selected_cpt
<< 16) | (selected_bpt
<< 24);
3036 v
= (v
& 0xffff0000) | selected_cpt
| (selected_bpt
<< 8);
3037 t3_write_reg(adap
, A_TP_TM_PIO_DATA
, v
);
3042 * t3_set_sched_ipg - set the IPG for a Tx HW packet rate scheduler
3043 * @adap: the adapter
3044 * @sched: the scheduler index
3045 * @ipg: the interpacket delay in tenths of nanoseconds
3047 * Set the interpacket delay for a HW packet rate scheduler.
3049 int t3_set_sched_ipg(adapter_t
*adap
, int sched
, unsigned int ipg
)
3051 unsigned int v
, addr
= A_TP_TX_MOD_Q1_Q0_TIMER_SEPARATOR
- sched
/ 2;
3053 /* convert ipg to nearest number of core clocks */
3054 ipg
*= core_ticks_per_usec(adap
);
3055 ipg
= (ipg
+ 5000) / 10000;
3059 t3_write_reg(adap
, A_TP_TM_PIO_ADDR
, addr
);
3060 v
= t3_read_reg(adap
, A_TP_TM_PIO_DATA
);
3062 v
= (v
& 0xffff) | (ipg
<< 16);
3064 v
= (v
& 0xffff0000) | ipg
;
3065 t3_write_reg(adap
, A_TP_TM_PIO_DATA
, v
);
3066 t3_read_reg(adap
, A_TP_TM_PIO_DATA
);
3071 * t3_get_tx_sched - get the configuration of a Tx HW traffic scheduler
3072 * @adap: the adapter
3073 * @sched: the scheduler index
3074 * @kbps: the byte rate in Kbps
3075 * @ipg: the interpacket delay in tenths of nanoseconds
3077 * Return the current configuration of a HW Tx scheduler.
3079 void t3_get_tx_sched(adapter_t
*adap
, unsigned int sched
, unsigned int *kbps
,
3082 unsigned int v
, addr
, bpt
, cpt
;
3085 addr
= A_TP_TX_MOD_Q1_Q0_RATE_LIMIT
- sched
/ 2;
3086 t3_write_reg(adap
, A_TP_TM_PIO_ADDR
, addr
);
3087 v
= t3_read_reg(adap
, A_TP_TM_PIO_DATA
);
3090 bpt
= (v
>> 8) & 0xff;
3093 *kbps
= 0; /* scheduler disabled */
3095 v
= (adap
->params
.vpd
.cclk
* 1000) / cpt
;
3096 *kbps
= (v
* bpt
) / 125;
3100 addr
= A_TP_TX_MOD_Q1_Q0_TIMER_SEPARATOR
- sched
/ 2;
3101 t3_write_reg(adap
, A_TP_TM_PIO_ADDR
, addr
);
3102 v
= t3_read_reg(adap
, A_TP_TM_PIO_DATA
);
3106 *ipg
= (10000 * v
) / core_ticks_per_usec(adap
);
3111 * tp_init - configure TP
3112 * @adap: the adapter
3113 * @p: TP configuration parameters
3115 * Initializes the TP HW module.
3117 static int tp_init(adapter_t
*adap
, const struct tp_params
*p
)
3122 t3_set_vlan_accel(adap
, 3, 0);
3124 if (is_offload(adap
)) {
3125 tp_set_timers(adap
, adap
->params
.vpd
.cclk
* 1000);
3126 t3_write_reg(adap
, A_TP_RESET
, F_FLSTINITENABLE
);
3127 busy
= t3_wait_op_done(adap
, A_TP_RESET
, F_FLSTINITENABLE
,
3130 CH_ERR(adap
, "TP initialization timed out\n");
3134 t3_write_reg(adap
, A_TP_RESET
, F_TPRESET
);
3139 * t3_mps_set_active_ports - configure port failover
3140 * @adap: the adapter
3141 * @port_mask: bitmap of active ports
3143 * Sets the active ports according to the supplied bitmap.
3145 int t3_mps_set_active_ports(adapter_t
*adap
, unsigned int port_mask
)
3147 if (port_mask
& ~((1 << adap
->params
.nports
) - 1))
3149 t3_set_reg_field(adap
, A_MPS_CFG
, F_PORT1ACTIVE
| F_PORT0ACTIVE
,
3150 port_mask
<< S_PORT0ACTIVE
);
3155 * chan_init_hw - channel-dependent HW initialization
3156 * @adap: the adapter
3157 * @chan_map: bitmap of Tx channels being used
3159 * Perform the bits of HW initialization that are dependent on the Tx
3160 * channels being used.
3162 static void chan_init_hw(adapter_t
*adap
, unsigned int chan_map
)
3166 if (chan_map
!= 3) { /* one channel */
3167 t3_set_reg_field(adap
, A_ULPRX_CTL
, F_ROUND_ROBIN
, 0);
3168 t3_set_reg_field(adap
, A_ULPTX_CONFIG
, F_CFG_RR_ARB
, 0);
3169 t3_write_reg(adap
, A_MPS_CFG
, F_TPRXPORTEN
| F_ENFORCEPKT
|
3170 (chan_map
== 1 ? F_TPTXPORT0EN
| F_PORT0ACTIVE
:
3171 F_TPTXPORT1EN
| F_PORT1ACTIVE
));
3172 t3_write_reg(adap
, A_PM1_TX_CFG
,
3173 chan_map
== 1 ? 0xffffffff : 0);
3175 t3_write_reg(adap
, A_TP_TX_MOD_QUEUE_REQ_MAP
,
3176 V_TX_MOD_QUEUE_REQ_MAP(0xff));
3177 t3_write_reg(adap
, A_TP_TX_MOD_QUE_TABLE
, (12 << 16) | 0xd9c8);
3178 t3_write_reg(adap
, A_TP_TX_MOD_QUE_TABLE
, (13 << 16) | 0xfbea);
3179 } else { /* two channels */
3180 t3_set_reg_field(adap
, A_ULPRX_CTL
, 0, F_ROUND_ROBIN
);
3181 t3_set_reg_field(adap
, A_ULPTX_CONFIG
, 0, F_CFG_RR_ARB
);
3182 t3_write_reg(adap
, A_ULPTX_DMA_WEIGHT
,
3183 V_D1_WEIGHT(16) | V_D0_WEIGHT(16));
3184 t3_write_reg(adap
, A_MPS_CFG
, F_TPTXPORT0EN
| F_TPTXPORT1EN
|
3185 F_TPRXPORTEN
| F_PORT0ACTIVE
| F_PORT1ACTIVE
|
3187 t3_write_reg(adap
, A_PM1_TX_CFG
, 0x80008000);
3188 t3_set_reg_field(adap
, A_TP_PC_CONFIG
, 0, F_TXTOSQUEUEMAPMODE
);
3189 t3_write_reg(adap
, A_TP_TX_MOD_QUEUE_REQ_MAP
,
3190 V_TX_MOD_QUEUE_REQ_MAP(0xaa));
3191 for (i
= 0; i
< 16; i
++)
3192 t3_write_reg(adap
, A_TP_TX_MOD_QUE_TABLE
,
3193 (i
<< 16) | 0x1010);
3194 t3_write_reg(adap
, A_TP_TX_MOD_QUE_TABLE
, (12 << 16) | 0xba98);
3195 t3_write_reg(adap
, A_TP_TX_MOD_QUE_TABLE
, (13 << 16) | 0xfedc);
3199 static int calibrate_xgm(adapter_t
*adapter
)
3201 if (uses_xaui(adapter
)) {
3204 for (i
= 0; i
< 5; ++i
) {
3205 t3_write_reg(adapter
, A_XGM_XAUI_IMP
, 0);
3206 (void) t3_read_reg(adapter
, A_XGM_XAUI_IMP
);
3208 v
= t3_read_reg(adapter
, A_XGM_XAUI_IMP
);
3209 if (!(v
& (F_XGM_CALFAULT
| F_CALBUSY
))) {
3210 t3_write_reg(adapter
, A_XGM_XAUI_IMP
,
3211 V_XAUIIMP(G_CALIMP(v
) >> 2));
3215 CH_ERR(adapter
, "MAC calibration failed\n");
3218 t3_write_reg(adapter
, A_XGM_RGMII_IMP
,
3219 V_RGMIIIMPPD(2) | V_RGMIIIMPPU(3));
3220 t3_set_reg_field(adapter
, A_XGM_RGMII_IMP
, F_XGM_IMPSETUPDATE
,
3221 F_XGM_IMPSETUPDATE
);
3226 static void calibrate_xgm_t3b(adapter_t
*adapter
)
3228 if (!uses_xaui(adapter
)) {
3229 t3_write_reg(adapter
, A_XGM_RGMII_IMP
, F_CALRESET
|
3230 F_CALUPDATE
| V_RGMIIIMPPD(2) | V_RGMIIIMPPU(3));
3231 t3_set_reg_field(adapter
, A_XGM_RGMII_IMP
, F_CALRESET
, 0);
3232 t3_set_reg_field(adapter
, A_XGM_RGMII_IMP
, 0,
3233 F_XGM_IMPSETUPDATE
);
3234 t3_set_reg_field(adapter
, A_XGM_RGMII_IMP
, F_XGM_IMPSETUPDATE
,
3236 t3_set_reg_field(adapter
, A_XGM_RGMII_IMP
, F_CALUPDATE
, 0);
3237 t3_set_reg_field(adapter
, A_XGM_RGMII_IMP
, 0, F_CALUPDATE
);
3241 struct mc7_timing_params
{
3242 unsigned char ActToPreDly
;
3243 unsigned char ActToRdWrDly
;
3244 unsigned char PreCyc
;
3245 unsigned char RefCyc
[5];
3246 unsigned char BkCyc
;
3247 unsigned char WrToRdDly
;
3248 unsigned char RdToWrDly
;
3252 * Write a value to a register and check that the write completed. These
3253 * writes normally complete in a cycle or two, so one read should suffice.
3254 * The very first read exists to flush the posted write to the device.
3256 static int wrreg_wait(adapter_t
*adapter
, unsigned int addr
, u32 val
)
3258 t3_write_reg(adapter
, addr
, val
);
3259 (void) t3_read_reg(adapter
, addr
); /* flush */
3260 if (!(t3_read_reg(adapter
, addr
) & F_BUSY
))
3262 CH_ERR(adapter
, "write to MC7 register 0x%x timed out\n", addr
);
3266 static int mc7_init(struct mc7
*mc7
, unsigned int mc7_clock
, int mem_type
)
3268 static const unsigned int mc7_mode
[] = {
3269 0x632, 0x642, 0x652, 0x432, 0x442
3271 static const struct mc7_timing_params mc7_timings
[] = {
3272 { 12, 3, 4, { 20, 28, 34, 52, 0 }, 15, 6, 4 },
3273 { 12, 4, 5, { 20, 28, 34, 52, 0 }, 16, 7, 4 },
3274 { 12, 5, 6, { 20, 28, 34, 52, 0 }, 17, 8, 4 },
3275 { 9, 3, 4, { 15, 21, 26, 39, 0 }, 12, 6, 4 },
3276 { 9, 4, 5, { 15, 21, 26, 39, 0 }, 13, 7, 4 }
3280 unsigned int width
, density
, slow
, attempts
;
3281 adapter_t
*adapter
= mc7
->adapter
;
3282 const struct mc7_timing_params
*p
= &mc7_timings
[mem_type
];
3287 val
= t3_read_reg(adapter
, mc7
->offset
+ A_MC7_CFG
);
3288 slow
= val
& F_SLOW
;
3289 width
= G_WIDTH(val
);
3290 density
= G_DEN(val
);
3292 t3_write_reg(adapter
, mc7
->offset
+ A_MC7_CFG
, val
| F_IFEN
);
3293 val
= t3_read_reg(adapter
, mc7
->offset
+ A_MC7_CFG
); /* flush */
3297 t3_write_reg(adapter
, mc7
->offset
+ A_MC7_CAL
, F_SGL_CAL_EN
);
3298 (void) t3_read_reg(adapter
, mc7
->offset
+ A_MC7_CAL
);
3300 if (t3_read_reg(adapter
, mc7
->offset
+ A_MC7_CAL
) &
3301 (F_BUSY
| F_SGL_CAL_EN
| F_CAL_FAULT
)) {
3302 CH_ERR(adapter
, "%s MC7 calibration timed out\n",
3308 t3_write_reg(adapter
, mc7
->offset
+ A_MC7_PARM
,
3309 V_ACTTOPREDLY(p
->ActToPreDly
) |
3310 V_ACTTORDWRDLY(p
->ActToRdWrDly
) | V_PRECYC(p
->PreCyc
) |
3311 V_REFCYC(p
->RefCyc
[density
]) | V_BKCYC(p
->BkCyc
) |
3312 V_WRTORDDLY(p
->WrToRdDly
) | V_RDTOWRDLY(p
->RdToWrDly
));
3314 t3_write_reg(adapter
, mc7
->offset
+ A_MC7_CFG
,
3315 val
| F_CLKEN
| F_TERM150
);
3316 (void) t3_read_reg(adapter
, mc7
->offset
+ A_MC7_CFG
); /* flush */
3319 t3_set_reg_field(adapter
, mc7
->offset
+ A_MC7_DLL
, F_DLLENB
,
3324 if (wrreg_wait(adapter
, mc7
->offset
+ A_MC7_PRE
, 0) ||
3325 wrreg_wait(adapter
, mc7
->offset
+ A_MC7_EXT_MODE2
, 0) ||
3326 wrreg_wait(adapter
, mc7
->offset
+ A_MC7_EXT_MODE3
, 0) ||
3327 wrreg_wait(adapter
, mc7
->offset
+ A_MC7_EXT_MODE1
, val
))
3331 t3_write_reg(adapter
, mc7
->offset
+ A_MC7_MODE
, 0x100);
3332 t3_set_reg_field(adapter
, mc7
->offset
+ A_MC7_DLL
,
3337 if (wrreg_wait(adapter
, mc7
->offset
+ A_MC7_PRE
, 0) ||
3338 wrreg_wait(adapter
, mc7
->offset
+ A_MC7_REF
, 0) ||
3339 wrreg_wait(adapter
, mc7
->offset
+ A_MC7_REF
, 0) ||
3340 wrreg_wait(adapter
, mc7
->offset
+ A_MC7_MODE
,
3341 mc7_mode
[mem_type
]) ||
3342 wrreg_wait(adapter
, mc7
->offset
+ A_MC7_EXT_MODE1
, val
| 0x380) ||
3343 wrreg_wait(adapter
, mc7
->offset
+ A_MC7_EXT_MODE1
, val
))
3346 /* clock value is in KHz */
3347 mc7_clock
= mc7_clock
* 7812 + mc7_clock
/ 2; /* ns */
3348 mc7_clock
/= 1000000; /* KHz->MHz, ns->us */
3350 t3_write_reg(adapter
, mc7
->offset
+ A_MC7_REF
,
3351 F_PERREFEN
| V_PREREFDIV(mc7_clock
));
3352 (void) t3_read_reg(adapter
, mc7
->offset
+ A_MC7_REF
); /* flush */
3354 t3_write_reg(adapter
, mc7
->offset
+ A_MC7_ECC
,
3355 F_ECCGENEN
| F_ECCCHKEN
);
3356 t3_write_reg(adapter
, mc7
->offset
+ A_MC7_BIST_DATA
, 0);
3357 t3_write_reg(adapter
, mc7
->offset
+ A_MC7_BIST_ADDR_BEG
, 0);
3358 t3_write_reg(adapter
, mc7
->offset
+ A_MC7_BIST_ADDR_END
,
3359 (mc7
->size
<< width
) - 1);
3360 t3_write_reg(adapter
, mc7
->offset
+ A_MC7_BIST_OP
, V_OP(1));
3361 (void) t3_read_reg(adapter
, mc7
->offset
+ A_MC7_BIST_OP
); /* flush */
3366 val
= t3_read_reg(adapter
, mc7
->offset
+ A_MC7_BIST_OP
);
3367 } while ((val
& F_BUSY
) && --attempts
);
3369 CH_ERR(adapter
, "%s MC7 BIST timed out\n", mc7
->name
);
3373 /* Enable normal memory accesses. */
3374 t3_set_reg_field(adapter
, mc7
->offset
+ A_MC7_CFG
, 0, F_RDY
);
3381 static void config_pcie(adapter_t
*adap
)
3383 static const u16 ack_lat
[4][6] = {
3384 { 237, 416, 559, 1071, 2095, 4143 },
3385 { 128, 217, 289, 545, 1057, 2081 },
3386 { 73, 118, 154, 282, 538, 1050 },
3387 { 67, 107, 86, 150, 278, 534 }
3389 static const u16 rpl_tmr
[4][6] = {
3390 { 711, 1248, 1677, 3213, 6285, 12429 },
3391 { 384, 651, 867, 1635, 3171, 6243 },
3392 { 219, 354, 462, 846, 1614, 3150 },
3393 { 201, 321, 258, 450, 834, 1602 }
3397 unsigned int log2_width
, pldsize
;
3398 unsigned int fst_trn_rx
, fst_trn_tx
, acklat
, rpllmt
;
3400 t3_os_pci_read_config_2(adap
,
3401 adap
->params
.pci
.pcie_cap_addr
+ PCI_EXP_DEVCTL
,
3403 pldsize
= (val
& PCI_EXP_DEVCTL_PAYLOAD
) >> 5;
3405 t3_os_pci_read_config_2(adap
,
3406 adap
->params
.pci
.pcie_cap_addr
+ PCI_EXP_LNKCTL
,
3409 fst_trn_tx
= G_NUMFSTTRNSEQ(t3_read_reg(adap
, A_PCIE_PEX_CTRL0
));
3410 fst_trn_rx
= adap
->params
.rev
== 0 ? fst_trn_tx
:
3411 G_NUMFSTTRNSEQRX(t3_read_reg(adap
, A_PCIE_MODE
));
3412 log2_width
= fls(adap
->params
.pci
.width
) - 1;
3413 acklat
= ack_lat
[log2_width
][pldsize
];
3414 if (val
& 1) /* check LOsEnable */
3415 acklat
+= fst_trn_tx
* 4;
3416 rpllmt
= rpl_tmr
[log2_width
][pldsize
] + fst_trn_rx
* 4;
3418 if (adap
->params
.rev
== 0)
3419 t3_set_reg_field(adap
, A_PCIE_PEX_CTRL1
,
3420 V_T3A_ACKLAT(M_T3A_ACKLAT
),
3421 V_T3A_ACKLAT(acklat
));
3423 t3_set_reg_field(adap
, A_PCIE_PEX_CTRL1
, V_ACKLAT(M_ACKLAT
),
3426 t3_set_reg_field(adap
, A_PCIE_PEX_CTRL0
, V_REPLAYLMT(M_REPLAYLMT
),
3427 V_REPLAYLMT(rpllmt
));
3429 t3_write_reg(adap
, A_PCIE_PEX_ERR
, 0xffffffff);
3430 t3_set_reg_field(adap
, A_PCIE_CFG
, F_PCIE_CLIDECEN
, F_PCIE_CLIDECEN
);
3434 * t3_init_hw - initialize and configure T3 HW modules
3435 * @adapter: the adapter
3436 * @fw_params: initial parameters to pass to firmware (optional)
3438 * Initialize and configure T3 HW modules. This performs the
3439 * initialization steps that need to be done once after a card is reset.
3440 * MAC and PHY initialization is handled separarely whenever a port is
3443 * @fw_params are passed to FW and their value is platform dependent.
3444 * Only the top 8 bits are available for use, the rest must be 0.
3446 int t3_init_hw(adapter_t
*adapter
, u32 fw_params
)
3448 int err
= -EIO
, attempts
= 100;
3449 const struct vpd_params
*vpd
= &adapter
->params
.vpd
;
3451 if (adapter
->params
.rev
> 0)
3452 calibrate_xgm_t3b(adapter
);
3453 else if (calibrate_xgm(adapter
))
3456 if (adapter
->params
.nports
> 2)
3457 t3_mac_reset(&adap2pinfo(adapter
, 0)->mac
);
3460 partition_mem(adapter
, &adapter
->params
.tp
);
3462 if (mc7_init(&adapter
->pmrx
, vpd
->mclk
, vpd
->mem_timing
) ||
3463 mc7_init(&adapter
->pmtx
, vpd
->mclk
, vpd
->mem_timing
) ||
3464 mc7_init(&adapter
->cm
, vpd
->mclk
, vpd
->mem_timing
) ||
3465 t3_mc5_init(&adapter
->mc5
, adapter
->params
.mc5
.nservers
,
3466 adapter
->params
.mc5
.nfilters
,
3467 adapter
->params
.mc5
.nroutes
))
3471 if (tp_init(adapter
, &adapter
->params
.tp
))
3474 #ifdef CONFIG_CHELSIO_T3_CORE
3475 t3_tp_set_coalescing_size(adapter
,
3476 min(adapter
->params
.sge
.max_pkt_size
,
3477 MAX_RX_COALESCING_LEN
), 1);
3478 t3_tp_set_max_rxsize(adapter
,
3479 min(adapter
->params
.sge
.max_pkt_size
, 16384U));
3480 ulp_config(adapter
, &adapter
->params
.tp
);
3482 if (is_pcie(adapter
))
3483 config_pcie(adapter
);
3485 t3_set_reg_field(adapter
, A_PCIX_CFG
, 0, F_CLIDECEN
);
3487 t3_write_reg(adapter
, A_PM1_RX_CFG
, 0xffffffff);
3488 t3_write_reg(adapter
, A_PM1_RX_MODE
, 0);
3489 t3_write_reg(adapter
, A_PM1_TX_MODE
, 0);
3490 chan_init_hw(adapter
, adapter
->params
.chan_map
);
3491 t3_sge_init(adapter
, &adapter
->params
.sge
);
3493 t3_write_reg(adapter
, A_CIM_HOST_ACC_DATA
, vpd
->uclk
| fw_params
);
3494 t3_write_reg(adapter
, A_CIM_BOOT_CFG
,
3495 V_BOOTADDR(FW_FLASH_BOOT_ADDR
>> 2));
3496 (void) t3_read_reg(adapter
, A_CIM_BOOT_CFG
); /* flush */
3498 do { /* wait for uP to initialize */
3500 } while (t3_read_reg(adapter
, A_CIM_HOST_ACC_DATA
) && --attempts
);
3502 CH_ERR(adapter
, "uP initialization timed out\n");
3512 * get_pci_mode - determine a card's PCI mode
3513 * @adapter: the adapter
3514 * @p: where to store the PCI settings
3516 * Determines a card's PCI mode and associated parameters, such as speed
3519 static void __devinit
get_pci_mode(adapter_t
*adapter
, struct pci_params
*p
)
3521 static unsigned short speed_map
[] = { 33, 66, 100, 133 };
3522 u32 pcie_mode
, pcie_cap
;
3524 pcie_cap
= t3_os_find_pci_capability(adapter
, PCI_CAP_ID_EXP
);
3528 p
->variant
= PCI_VARIANT_PCIE
;
3529 p
->pcie_cap_addr
= pcie_cap
;
3530 t3_os_pci_read_config_2(adapter
, pcie_cap
+ PCI_EXP_LNKSTA
,
3532 p
->width
= (val
>> 4) & 0x3f;
3536 pcie_mode
= t3_read_reg(adapter
, A_PCIX_MODE
);
3537 p
->speed
= speed_map
[G_PCLKRANGE(pcie_mode
)];
3538 p
->width
= (pcie_mode
& F_64BIT
) ? 64 : 32;
3539 pcie_mode
= G_PCIXINITPAT(pcie_mode
);
3541 p
->variant
= PCI_VARIANT_PCI
;
3542 else if (pcie_mode
< 4)
3543 p
->variant
= PCI_VARIANT_PCIX_MODE1_PARITY
;
3544 else if (pcie_mode
< 8)
3545 p
->variant
= PCI_VARIANT_PCIX_MODE1_ECC
;
3547 p
->variant
= PCI_VARIANT_PCIX_266_MODE2
;
3551 * init_link_config - initialize a link's SW state
3552 * @lc: structure holding the link state
3553 * @caps: link capabilities
3555 * Initializes the SW state maintained for each link, including the link's
3556 * capabilities and default speed/duplex/flow-control/autonegotiation
3559 static void __devinit
init_link_config(struct link_config
*lc
,
3562 lc
->supported
= caps
;
3563 lc
->requested_speed
= lc
->speed
= SPEED_INVALID
;
3564 lc
->requested_duplex
= lc
->duplex
= DUPLEX_INVALID
;
3565 lc
->requested_fc
= lc
->fc
= PAUSE_RX
| PAUSE_TX
;
3566 if (lc
->supported
& SUPPORTED_Autoneg
) {
3567 lc
->advertising
= lc
->supported
;
3568 lc
->autoneg
= AUTONEG_ENABLE
;
3569 lc
->requested_fc
|= PAUSE_AUTONEG
;
3571 lc
->advertising
= 0;
3572 lc
->autoneg
= AUTONEG_DISABLE
;
3577 * mc7_calc_size - calculate MC7 memory size
3578 * @cfg: the MC7 configuration
3580 * Calculates the size of an MC7 memory in bytes from the value of its
3581 * configuration register.
3583 static unsigned int __devinit
mc7_calc_size(u32 cfg
)
3585 unsigned int width
= G_WIDTH(cfg
);
3586 unsigned int banks
= !!(cfg
& F_BKS
) + 1;
3587 unsigned int org
= !!(cfg
& F_ORG
) + 1;
3588 unsigned int density
= G_DEN(cfg
);
3589 unsigned int MBs
= ((256 << density
) * banks
) / (org
<< width
);
3594 static void __devinit
mc7_prep(adapter_t
*adapter
, struct mc7
*mc7
,
3595 unsigned int base_addr
, const char *name
)
3599 mc7
->adapter
= adapter
;
3601 mc7
->offset
= base_addr
- MC7_PMRX_BASE_ADDR
;
3602 cfg
= t3_read_reg(adapter
, mc7
->offset
+ A_MC7_CFG
);
3603 mc7
->size
= G_DEN(cfg
) == M_DEN
? 0 : mc7_calc_size(cfg
);
3604 mc7
->width
= G_WIDTH(cfg
);
3607 void mac_prep(struct cmac
*mac
, adapter_t
*adapter
, int index
)
3609 mac
->adapter
= adapter
;
3610 mac
->multiport
= adapter
->params
.nports
> 2;
3611 if (mac
->multiport
) {
3612 mac
->ext_port
= (unsigned char)index
;
3618 mac
->offset
= (XGMAC0_1_BASE_ADDR
- XGMAC0_0_BASE_ADDR
) * index
;
3620 if (adapter
->params
.rev
== 0 && uses_xaui(adapter
)) {
3621 t3_write_reg(adapter
, A_XGM_SERDES_CTRL
+ mac
->offset
,
3622 is_10G(adapter
) ? 0x2901c04 : 0x2301c04);
3623 t3_set_reg_field(adapter
, A_XGM_PORT_CFG
+ mac
->offset
,
3629 * early_hw_init - HW initialization done at card detection time
3630 * @adapter: the adapter
3631 * @ai: contains information about the adapter type and properties
3633 * Perfoms the part of HW initialization that is done early on when the
3634 * driver first detecs the card. Most of the HW state is initialized
3635 * lazily later on when a port or an offload function are first used.
3637 void early_hw_init(adapter_t
*adapter
, const struct adapter_info
*ai
)
3639 u32 val
= V_PORTSPEED(is_10G(adapter
) || adapter
->params
.nports
> 2 ?
3642 mi1_init(adapter
, ai
);
3643 t3_write_reg(adapter
, A_I2C_CFG
, /* set for 80KHz */
3644 V_I2C_CLKDIV(adapter
->params
.vpd
.cclk
/ 80 - 1));
3645 t3_write_reg(adapter
, A_T3DBG_GPIO_EN
,
3646 ai
->gpio_out
| F_GPIO0_OEN
| F_GPIO0_OUT_VAL
);
3647 t3_write_reg(adapter
, A_MC5_DB_SERVER_INDEX
, 0);
3649 if (adapter
->params
.rev
== 0 || !uses_xaui(adapter
))
3652 /* Enable MAC clocks so we can access the registers */
3653 t3_write_reg(adapter
, A_XGM_PORT_CFG
, val
);
3654 (void) t3_read_reg(adapter
, A_XGM_PORT_CFG
);
3656 val
|= F_CLKDIVRESET_
;
3657 t3_write_reg(adapter
, A_XGM_PORT_CFG
, val
);
3658 (void) t3_read_reg(adapter
, A_XGM_PORT_CFG
);
3659 t3_write_reg(adapter
, XGM_REG(A_XGM_PORT_CFG
, 1), val
);
3660 (void) t3_read_reg(adapter
, A_XGM_PORT_CFG
);
3664 * t3_reset_adapter - reset the adapter
3665 * @adapter: the adapter
3667 * Reset the adapter.
3669 static int t3_reset_adapter(adapter_t
*adapter
)
3671 int i
, save_and_restore_pcie
=
3672 adapter
->params
.rev
< T3_REV_B2
&& is_pcie(adapter
);
3675 if (save_and_restore_pcie
)
3676 t3_os_pci_save_state(adapter
);
3677 t3_write_reg(adapter
, A_PL_RST
, F_CRSTWRM
| F_CRSTWRMMODE
);
3680 * Delay. Give Some time to device to reset fully.
3681 * XXX The delay time should be modified.
3683 for (i
= 0; i
< 10; i
++) {
3685 t3_os_pci_read_config_2(adapter
, 0x00, &devid
);
3686 if (devid
== 0x1425)
3690 if (devid
!= 0x1425)
3693 if (save_and_restore_pcie
)
3694 t3_os_pci_restore_state(adapter
);
3699 * t3_prep_adapter - prepare SW and HW for operation
3700 * @adapter: the adapter
3701 * @ai: contains information about the adapter type and properties
3703 * Initialize adapter SW state for the various HW modules, set initial
3704 * values for some adapter tunables, take PHYs out of reset, and
3705 * initialize the MDIO interface.
3707 int __devinit
t3_prep_adapter(adapter_t
*adapter
,
3708 const struct adapter_info
*ai
, int reset
)
3711 unsigned int i
, j
= 0;
3713 get_pci_mode(adapter
, &adapter
->params
.pci
);
3715 adapter
->params
.info
= ai
;
3716 adapter
->params
.nports
= ai
->nports0
+ ai
->nports1
;
3717 adapter
->params
.chan_map
= !!ai
->nports0
| (!!ai
->nports1
<< 1);
3718 adapter
->params
.rev
= t3_read_reg(adapter
, A_PL_REV
);
3719 adapter
->params
.linkpoll_period
= 0;
3720 if (adapter
->params
.nports
> 2)
3721 adapter
->params
.stats_update_period
= VSC_STATS_ACCUM_SECS
;
3723 adapter
->params
.stats_update_period
= is_10G(adapter
) ?
3724 MAC_STATS_ACCUM_SECS
: (MAC_STATS_ACCUM_SECS
* 10);
3725 adapter
->params
.pci
.vpd_cap_addr
=
3726 t3_os_find_pci_capability(adapter
, PCI_CAP_ID_VPD
);
3728 ret
= get_vpd_params(adapter
, &adapter
->params
.vpd
);
3732 if (reset
&& t3_reset_adapter(adapter
))
3735 t3_sge_prep(adapter
, &adapter
->params
.sge
);
3737 if (adapter
->params
.vpd
.mclk
) {
3738 struct tp_params
*p
= &adapter
->params
.tp
;
3740 mc7_prep(adapter
, &adapter
->pmrx
, MC7_PMRX_BASE_ADDR
, "PMRX");
3741 mc7_prep(adapter
, &adapter
->pmtx
, MC7_PMTX_BASE_ADDR
, "PMTX");
3742 mc7_prep(adapter
, &adapter
->cm
, MC7_CM_BASE_ADDR
, "CM");
3744 p
->nchan
= adapter
->params
.chan_map
== 3 ? 2 : 1;
3745 p
->pmrx_size
= t3_mc7_size(&adapter
->pmrx
);
3746 p
->pmtx_size
= t3_mc7_size(&adapter
->pmtx
);
3747 p
->cm_size
= t3_mc7_size(&adapter
->cm
);
3748 p
->chan_rx_size
= p
->pmrx_size
/ 2; /* only 1 Rx channel */
3749 p
->chan_tx_size
= p
->pmtx_size
/ p
->nchan
;
3750 p
->rx_pg_size
= 64 * 1024;
3751 p
->tx_pg_size
= is_10G(adapter
) ? 64 * 1024 : 16 * 1024;
3752 p
->rx_num_pgs
= pm_num_pages(p
->chan_rx_size
, p
->rx_pg_size
);
3753 p
->tx_num_pgs
= pm_num_pages(p
->chan_tx_size
, p
->tx_pg_size
);
3754 p
->ntimer_qs
= p
->cm_size
>= (128 << 20) ||
3755 adapter
->params
.rev
> 0 ? 12 : 6;
3756 p
->tre
= fls(adapter
->params
.vpd
.cclk
/ (1000 / TP_TMR_RES
)) -
3758 p
->dack_re
= fls(adapter
->params
.vpd
.cclk
/ 10) - 1; /* 100us */
3761 adapter
->params
.offload
= t3_mc7_size(&adapter
->pmrx
) &&
3762 t3_mc7_size(&adapter
->pmtx
) &&
3763 t3_mc7_size(&adapter
->cm
);
3765 if (is_offload(adapter
)) {
3766 adapter
->params
.mc5
.nservers
= DEFAULT_NSERVERS
;
3767 adapter
->params
.mc5
.nfilters
= adapter
->params
.rev
> 0 ?
3768 DEFAULT_NFILTERS
: 0;
3769 adapter
->params
.mc5
.nroutes
= 0;
3770 t3_mc5_prep(adapter
, &adapter
->mc5
, MC5_MODE_144_BIT
);
3772 #ifdef CONFIG_CHELSIO_T3_CORE
3773 init_mtus(adapter
->params
.mtus
);
3774 init_cong_ctrl(adapter
->params
.a_wnd
, adapter
->params
.b_wnd
);
3778 early_hw_init(adapter
, ai
);
3780 if (adapter
->params
.nports
> 2 &&
3781 (ret
= t3_vsc7323_init(adapter
, adapter
->params
.nports
)))
3784 for_each_port(adapter
, i
) {
3786 struct port_info
*p
= adap2pinfo(adapter
, i
);
3788 while (!adapter
->params
.vpd
.port_type
[j
])
3791 p
->port_type
= &port_types
[adapter
->params
.vpd
.port_type
[j
]];
3792 p
->port_type
->phy_prep(&p
->phy
, adapter
, ai
->phy_base_addr
+ j
,
3794 mac_prep(&p
->mac
, adapter
, j
);
3798 * The VPD EEPROM stores the base Ethernet address for the
3799 * card. A port's address is derived from the base by adding
3800 * the port's index to the base's low octet.
3802 memcpy(hw_addr
, adapter
->params
.vpd
.eth_base
, 5);
3803 hw_addr
[5] = adapter
->params
.vpd
.eth_base
[5] + i
;
3805 t3_os_set_hw_addr(adapter
, i
, hw_addr
);
3806 init_link_config(&p
->link_config
, p
->port_type
->caps
);
3807 p
->phy
.ops
->power_down(&p
->phy
, 1);
3808 if (!(p
->port_type
->caps
& SUPPORTED_IRQ
))
3809 adapter
->params
.linkpoll_period
= 10;
3815 void t3_led_ready(adapter_t
*adapter
)
3817 t3_set_reg_field(adapter
, A_T3DBG_GPIO_EN
, F_GPIO0_OUT_VAL
,
3821 void t3_port_failover(adapter_t
*adapter
, int port
)
3825 val
= port
? F_PORT1ACTIVE
: F_PORT0ACTIVE
;
3826 t3_set_reg_field(adapter
, A_MPS_CFG
, F_PORT0ACTIVE
| F_PORT1ACTIVE
,
3830 void t3_failover_done(adapter_t
*adapter
, int port
)
3832 t3_set_reg_field(adapter
, A_MPS_CFG
, F_PORT0ACTIVE
| F_PORT1ACTIVE
,
3833 F_PORT0ACTIVE
| F_PORT1ACTIVE
);
3836 void t3_failover_clear(adapter_t
*adapter
)
3838 t3_set_reg_field(adapter
, A_MPS_CFG
, F_PORT0ACTIVE
| F_PORT1ACTIVE
,
3839 F_PORT0ACTIVE
| F_PORT1ACTIVE
);