cxgb3: Use netif_set_real_num_{rx,tx}_queues()
[zen-stable.git] / drivers / net / cxgb3 / t3_hw.c
blob421d5589cecdb75400669b7252df728cc1213799
1 /*
2 * Copyright (c) 2003-2008 Chelsio, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
32 #include "common.h"
33 #include "regs.h"
34 #include "sge_defs.h"
35 #include "firmware_exports.h"
37 /**
38 * t3_wait_op_done_val - wait until an operation is completed
39 * @adapter: the adapter performing the operation
40 * @reg: the register to check for completion
41 * @mask: a single-bit field within @reg that indicates completion
42 * @polarity: the value of the field when the operation is completed
43 * @attempts: number of check iterations
44 * @delay: delay in usecs between iterations
45 * @valp: where to store the value of the register at completion time
47 * Wait until an operation is completed by checking a bit in a register
48 * up to @attempts times. If @valp is not NULL the value of the register
49 * at the time it indicated completion is stored there. Returns 0 if the
50 * operation completes and -EAGAIN otherwise.
53 int t3_wait_op_done_val(struct adapter *adapter, int reg, u32 mask,
54 int polarity, int attempts, int delay, u32 *valp)
56 while (1) {
57 u32 val = t3_read_reg(adapter, reg);
59 if (!!(val & mask) == polarity) {
60 if (valp)
61 *valp = val;
62 return 0;
64 if (--attempts == 0)
65 return -EAGAIN;
66 if (delay)
67 udelay(delay);
71 /**
72 * t3_write_regs - write a bunch of registers
73 * @adapter: the adapter to program
74 * @p: an array of register address/register value pairs
75 * @n: the number of address/value pairs
76 * @offset: register address offset
78 * Takes an array of register address/register value pairs and writes each
79 * value to the corresponding register. Register addresses are adjusted
80 * by the supplied offset.
82 void t3_write_regs(struct adapter *adapter, const struct addr_val_pair *p,
83 int n, unsigned int offset)
85 while (n--) {
86 t3_write_reg(adapter, p->reg_addr + offset, p->val);
87 p++;
91 /**
92 * t3_set_reg_field - set a register field to a value
93 * @adapter: the adapter to program
94 * @addr: the register address
95 * @mask: specifies the portion of the register to modify
96 * @val: the new value for the register field
98 * Sets a register field specified by the supplied mask to the
99 * given value.
101 void t3_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask,
102 u32 val)
104 u32 v = t3_read_reg(adapter, addr) & ~mask;
106 t3_write_reg(adapter, addr, v | val);
107 t3_read_reg(adapter, addr); /* flush */
111 * t3_read_indirect - read indirectly addressed registers
112 * @adap: the adapter
113 * @addr_reg: register holding the indirect address
114 * @data_reg: register holding the value of the indirect register
115 * @vals: where the read register values are stored
116 * @start_idx: index of first indirect register to read
117 * @nregs: how many indirect registers to read
119 * Reads registers that are accessed indirectly through an address/data
120 * register pair.
122 static void t3_read_indirect(struct adapter *adap, unsigned int addr_reg,
123 unsigned int data_reg, u32 *vals,
124 unsigned int nregs, unsigned int start_idx)
126 while (nregs--) {
127 t3_write_reg(adap, addr_reg, start_idx);
128 *vals++ = t3_read_reg(adap, data_reg);
129 start_idx++;
134 * t3_mc7_bd_read - read from MC7 through backdoor accesses
135 * @mc7: identifies MC7 to read from
136 * @start: index of first 64-bit word to read
137 * @n: number of 64-bit words to read
138 * @buf: where to store the read result
140 * Read n 64-bit words from MC7 starting at word start, using backdoor
141 * accesses.
143 int t3_mc7_bd_read(struct mc7 *mc7, unsigned int start, unsigned int n,
144 u64 *buf)
146 static const int shift[] = { 0, 0, 16, 24 };
147 static const int step[] = { 0, 32, 16, 8 };
149 unsigned int size64 = mc7->size / 8; /* # of 64-bit words */
150 struct adapter *adap = mc7->adapter;
152 if (start >= size64 || start + n > size64)
153 return -EINVAL;
155 start *= (8 << mc7->width);
156 while (n--) {
157 int i;
158 u64 val64 = 0;
160 for (i = (1 << mc7->width) - 1; i >= 0; --i) {
161 int attempts = 10;
162 u32 val;
164 t3_write_reg(adap, mc7->offset + A_MC7_BD_ADDR, start);
165 t3_write_reg(adap, mc7->offset + A_MC7_BD_OP, 0);
166 val = t3_read_reg(adap, mc7->offset + A_MC7_BD_OP);
167 while ((val & F_BUSY) && attempts--)
168 val = t3_read_reg(adap,
169 mc7->offset + A_MC7_BD_OP);
170 if (val & F_BUSY)
171 return -EIO;
173 val = t3_read_reg(adap, mc7->offset + A_MC7_BD_DATA1);
174 if (mc7->width == 0) {
175 val64 = t3_read_reg(adap,
176 mc7->offset +
177 A_MC7_BD_DATA0);
178 val64 |= (u64) val << 32;
179 } else {
180 if (mc7->width > 1)
181 val >>= shift[mc7->width];
182 val64 |= (u64) val << (step[mc7->width] * i);
184 start += 8;
186 *buf++ = val64;
188 return 0;
192 * Initialize MI1.
194 static void mi1_init(struct adapter *adap, const struct adapter_info *ai)
196 u32 clkdiv = adap->params.vpd.cclk / (2 * adap->params.vpd.mdc) - 1;
197 u32 val = F_PREEN | V_CLKDIV(clkdiv);
199 t3_write_reg(adap, A_MI1_CFG, val);
202 #define MDIO_ATTEMPTS 20
205 * MI1 read/write operations for clause 22 PHYs.
207 static int t3_mi1_read(struct net_device *dev, int phy_addr, int mmd_addr,
208 u16 reg_addr)
210 struct port_info *pi = netdev_priv(dev);
211 struct adapter *adapter = pi->adapter;
212 int ret;
213 u32 addr = V_REGADDR(reg_addr) | V_PHYADDR(phy_addr);
215 mutex_lock(&adapter->mdio_lock);
216 t3_set_reg_field(adapter, A_MI1_CFG, V_ST(M_ST), V_ST(1));
217 t3_write_reg(adapter, A_MI1_ADDR, addr);
218 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(2));
219 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 10);
220 if (!ret)
221 ret = t3_read_reg(adapter, A_MI1_DATA);
222 mutex_unlock(&adapter->mdio_lock);
223 return ret;
226 static int t3_mi1_write(struct net_device *dev, int phy_addr, int mmd_addr,
227 u16 reg_addr, u16 val)
229 struct port_info *pi = netdev_priv(dev);
230 struct adapter *adapter = pi->adapter;
231 int ret;
232 u32 addr = V_REGADDR(reg_addr) | V_PHYADDR(phy_addr);
234 mutex_lock(&adapter->mdio_lock);
235 t3_set_reg_field(adapter, A_MI1_CFG, V_ST(M_ST), V_ST(1));
236 t3_write_reg(adapter, A_MI1_ADDR, addr);
237 t3_write_reg(adapter, A_MI1_DATA, val);
238 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(1));
239 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 10);
240 mutex_unlock(&adapter->mdio_lock);
241 return ret;
244 static const struct mdio_ops mi1_mdio_ops = {
245 .read = t3_mi1_read,
246 .write = t3_mi1_write,
247 .mode_support = MDIO_SUPPORTS_C22
251 * Performs the address cycle for clause 45 PHYs.
252 * Must be called with the MDIO_LOCK held.
254 static int mi1_wr_addr(struct adapter *adapter, int phy_addr, int mmd_addr,
255 int reg_addr)
257 u32 addr = V_REGADDR(mmd_addr) | V_PHYADDR(phy_addr);
259 t3_set_reg_field(adapter, A_MI1_CFG, V_ST(M_ST), 0);
260 t3_write_reg(adapter, A_MI1_ADDR, addr);
261 t3_write_reg(adapter, A_MI1_DATA, reg_addr);
262 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(0));
263 return t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0,
264 MDIO_ATTEMPTS, 10);
268 * MI1 read/write operations for indirect-addressed PHYs.
270 static int mi1_ext_read(struct net_device *dev, int phy_addr, int mmd_addr,
271 u16 reg_addr)
273 struct port_info *pi = netdev_priv(dev);
274 struct adapter *adapter = pi->adapter;
275 int ret;
277 mutex_lock(&adapter->mdio_lock);
278 ret = mi1_wr_addr(adapter, phy_addr, mmd_addr, reg_addr);
279 if (!ret) {
280 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(3));
281 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0,
282 MDIO_ATTEMPTS, 10);
283 if (!ret)
284 ret = t3_read_reg(adapter, A_MI1_DATA);
286 mutex_unlock(&adapter->mdio_lock);
287 return ret;
290 static int mi1_ext_write(struct net_device *dev, int phy_addr, int mmd_addr,
291 u16 reg_addr, u16 val)
293 struct port_info *pi = netdev_priv(dev);
294 struct adapter *adapter = pi->adapter;
295 int ret;
297 mutex_lock(&adapter->mdio_lock);
298 ret = mi1_wr_addr(adapter, phy_addr, mmd_addr, reg_addr);
299 if (!ret) {
300 t3_write_reg(adapter, A_MI1_DATA, val);
301 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(1));
302 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0,
303 MDIO_ATTEMPTS, 10);
305 mutex_unlock(&adapter->mdio_lock);
306 return ret;
309 static const struct mdio_ops mi1_mdio_ext_ops = {
310 .read = mi1_ext_read,
311 .write = mi1_ext_write,
312 .mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22
316 * t3_mdio_change_bits - modify the value of a PHY register
317 * @phy: the PHY to operate on
318 * @mmd: the device address
319 * @reg: the register address
320 * @clear: what part of the register value to mask off
321 * @set: what part of the register value to set
323 * Changes the value of a PHY register by applying a mask to its current
324 * value and ORing the result with a new value.
326 int t3_mdio_change_bits(struct cphy *phy, int mmd, int reg, unsigned int clear,
327 unsigned int set)
329 int ret;
330 unsigned int val;
332 ret = t3_mdio_read(phy, mmd, reg, &val);
333 if (!ret) {
334 val &= ~clear;
335 ret = t3_mdio_write(phy, mmd, reg, val | set);
337 return ret;
341 * t3_phy_reset - reset a PHY block
342 * @phy: the PHY to operate on
343 * @mmd: the device address of the PHY block to reset
344 * @wait: how long to wait for the reset to complete in 1ms increments
346 * Resets a PHY block and optionally waits for the reset to complete.
347 * @mmd should be 0 for 10/100/1000 PHYs and the device address to reset
348 * for 10G PHYs.
350 int t3_phy_reset(struct cphy *phy, int mmd, int wait)
352 int err;
353 unsigned int ctl;
355 err = t3_mdio_change_bits(phy, mmd, MDIO_CTRL1, MDIO_CTRL1_LPOWER,
356 MDIO_CTRL1_RESET);
357 if (err || !wait)
358 return err;
360 do {
361 err = t3_mdio_read(phy, mmd, MDIO_CTRL1, &ctl);
362 if (err)
363 return err;
364 ctl &= MDIO_CTRL1_RESET;
365 if (ctl)
366 msleep(1);
367 } while (ctl && --wait);
369 return ctl ? -1 : 0;
373 * t3_phy_advertise - set the PHY advertisement registers for autoneg
374 * @phy: the PHY to operate on
375 * @advert: bitmap of capabilities the PHY should advertise
377 * Sets a 10/100/1000 PHY's advertisement registers to advertise the
378 * requested capabilities.
380 int t3_phy_advertise(struct cphy *phy, unsigned int advert)
382 int err;
383 unsigned int val = 0;
385 err = t3_mdio_read(phy, MDIO_DEVAD_NONE, MII_CTRL1000, &val);
386 if (err)
387 return err;
389 val &= ~(ADVERTISE_1000HALF | ADVERTISE_1000FULL);
390 if (advert & ADVERTISED_1000baseT_Half)
391 val |= ADVERTISE_1000HALF;
392 if (advert & ADVERTISED_1000baseT_Full)
393 val |= ADVERTISE_1000FULL;
395 err = t3_mdio_write(phy, MDIO_DEVAD_NONE, MII_CTRL1000, val);
396 if (err)
397 return err;
399 val = 1;
400 if (advert & ADVERTISED_10baseT_Half)
401 val |= ADVERTISE_10HALF;
402 if (advert & ADVERTISED_10baseT_Full)
403 val |= ADVERTISE_10FULL;
404 if (advert & ADVERTISED_100baseT_Half)
405 val |= ADVERTISE_100HALF;
406 if (advert & ADVERTISED_100baseT_Full)
407 val |= ADVERTISE_100FULL;
408 if (advert & ADVERTISED_Pause)
409 val |= ADVERTISE_PAUSE_CAP;
410 if (advert & ADVERTISED_Asym_Pause)
411 val |= ADVERTISE_PAUSE_ASYM;
412 return t3_mdio_write(phy, MDIO_DEVAD_NONE, MII_ADVERTISE, val);
416 * t3_phy_advertise_fiber - set fiber PHY advertisement register
417 * @phy: the PHY to operate on
418 * @advert: bitmap of capabilities the PHY should advertise
420 * Sets a fiber PHY's advertisement register to advertise the
421 * requested capabilities.
423 int t3_phy_advertise_fiber(struct cphy *phy, unsigned int advert)
425 unsigned int val = 0;
427 if (advert & ADVERTISED_1000baseT_Half)
428 val |= ADVERTISE_1000XHALF;
429 if (advert & ADVERTISED_1000baseT_Full)
430 val |= ADVERTISE_1000XFULL;
431 if (advert & ADVERTISED_Pause)
432 val |= ADVERTISE_1000XPAUSE;
433 if (advert & ADVERTISED_Asym_Pause)
434 val |= ADVERTISE_1000XPSE_ASYM;
435 return t3_mdio_write(phy, MDIO_DEVAD_NONE, MII_ADVERTISE, val);
439 * t3_set_phy_speed_duplex - force PHY speed and duplex
440 * @phy: the PHY to operate on
441 * @speed: requested PHY speed
442 * @duplex: requested PHY duplex
444 * Force a 10/100/1000 PHY's speed and duplex. This also disables
445 * auto-negotiation except for GigE, where auto-negotiation is mandatory.
447 int t3_set_phy_speed_duplex(struct cphy *phy, int speed, int duplex)
449 int err;
450 unsigned int ctl;
452 err = t3_mdio_read(phy, MDIO_DEVAD_NONE, MII_BMCR, &ctl);
453 if (err)
454 return err;
456 if (speed >= 0) {
457 ctl &= ~(BMCR_SPEED100 | BMCR_SPEED1000 | BMCR_ANENABLE);
458 if (speed == SPEED_100)
459 ctl |= BMCR_SPEED100;
460 else if (speed == SPEED_1000)
461 ctl |= BMCR_SPEED1000;
463 if (duplex >= 0) {
464 ctl &= ~(BMCR_FULLDPLX | BMCR_ANENABLE);
465 if (duplex == DUPLEX_FULL)
466 ctl |= BMCR_FULLDPLX;
468 if (ctl & BMCR_SPEED1000) /* auto-negotiation required for GigE */
469 ctl |= BMCR_ANENABLE;
470 return t3_mdio_write(phy, MDIO_DEVAD_NONE, MII_BMCR, ctl);
473 int t3_phy_lasi_intr_enable(struct cphy *phy)
475 return t3_mdio_write(phy, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_CTRL,
476 MDIO_PMA_LASI_LSALARM);
479 int t3_phy_lasi_intr_disable(struct cphy *phy)
481 return t3_mdio_write(phy, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_CTRL, 0);
484 int t3_phy_lasi_intr_clear(struct cphy *phy)
486 u32 val;
488 return t3_mdio_read(phy, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_STAT, &val);
491 int t3_phy_lasi_intr_handler(struct cphy *phy)
493 unsigned int status;
494 int err = t3_mdio_read(phy, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_STAT,
495 &status);
497 if (err)
498 return err;
499 return (status & MDIO_PMA_LASI_LSALARM) ? cphy_cause_link_change : 0;
502 static const struct adapter_info t3_adap_info[] = {
503 {1, 1, 0,
504 F_GPIO2_OEN | F_GPIO4_OEN |
505 F_GPIO2_OUT_VAL | F_GPIO4_OUT_VAL, { S_GPIO3, S_GPIO5 }, 0,
506 &mi1_mdio_ops, "Chelsio PE9000"},
507 {1, 1, 0,
508 F_GPIO2_OEN | F_GPIO4_OEN |
509 F_GPIO2_OUT_VAL | F_GPIO4_OUT_VAL, { S_GPIO3, S_GPIO5 }, 0,
510 &mi1_mdio_ops, "Chelsio T302"},
511 {1, 0, 0,
512 F_GPIO1_OEN | F_GPIO6_OEN | F_GPIO7_OEN | F_GPIO10_OEN |
513 F_GPIO11_OEN | F_GPIO1_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL,
514 { 0 }, SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
515 &mi1_mdio_ext_ops, "Chelsio T310"},
516 {1, 1, 0,
517 F_GPIO1_OEN | F_GPIO2_OEN | F_GPIO4_OEN | F_GPIO5_OEN | F_GPIO6_OEN |
518 F_GPIO7_OEN | F_GPIO10_OEN | F_GPIO11_OEN | F_GPIO1_OUT_VAL |
519 F_GPIO5_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL,
520 { S_GPIO9, S_GPIO3 }, SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
521 &mi1_mdio_ext_ops, "Chelsio T320"},
524 {1, 0, 0,
525 F_GPIO1_OEN | F_GPIO2_OEN | F_GPIO4_OEN | F_GPIO6_OEN | F_GPIO7_OEN |
526 F_GPIO10_OEN | F_GPIO1_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL,
527 { S_GPIO9 }, SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
528 &mi1_mdio_ext_ops, "Chelsio T310" },
529 {1, 0, 0,
530 F_GPIO1_OEN | F_GPIO6_OEN | F_GPIO7_OEN |
531 F_GPIO1_OUT_VAL | F_GPIO6_OUT_VAL,
532 { S_GPIO9 }, SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
533 &mi1_mdio_ext_ops, "Chelsio N320E-G2" },
537 * Return the adapter_info structure with a given index. Out-of-range indices
538 * return NULL.
540 const struct adapter_info *t3_get_adapter_info(unsigned int id)
542 return id < ARRAY_SIZE(t3_adap_info) ? &t3_adap_info[id] : NULL;
545 struct port_type_info {
546 int (*phy_prep)(struct cphy *phy, struct adapter *adapter,
547 int phy_addr, const struct mdio_ops *ops);
550 static const struct port_type_info port_types[] = {
551 { NULL },
552 { t3_ael1002_phy_prep },
553 { t3_vsc8211_phy_prep },
554 { NULL},
555 { t3_xaui_direct_phy_prep },
556 { t3_ael2005_phy_prep },
557 { t3_qt2045_phy_prep },
558 { t3_ael1006_phy_prep },
559 { NULL },
560 { t3_aq100x_phy_prep },
561 { t3_ael2020_phy_prep },
564 #define VPD_ENTRY(name, len) \
565 u8 name##_kword[2]; u8 name##_len; u8 name##_data[len]
568 * Partial EEPROM Vital Product Data structure. Includes only the ID and
569 * VPD-R sections.
571 struct t3_vpd {
572 u8 id_tag;
573 u8 id_len[2];
574 u8 id_data[16];
575 u8 vpdr_tag;
576 u8 vpdr_len[2];
577 VPD_ENTRY(pn, 16); /* part number */
578 VPD_ENTRY(ec, 16); /* EC level */
579 VPD_ENTRY(sn, SERNUM_LEN); /* serial number */
580 VPD_ENTRY(na, 12); /* MAC address base */
581 VPD_ENTRY(cclk, 6); /* core clock */
582 VPD_ENTRY(mclk, 6); /* mem clock */
583 VPD_ENTRY(uclk, 6); /* uP clk */
584 VPD_ENTRY(mdc, 6); /* MDIO clk */
585 VPD_ENTRY(mt, 2); /* mem timing */
586 VPD_ENTRY(xaui0cfg, 6); /* XAUI0 config */
587 VPD_ENTRY(xaui1cfg, 6); /* XAUI1 config */
588 VPD_ENTRY(port0, 2); /* PHY0 complex */
589 VPD_ENTRY(port1, 2); /* PHY1 complex */
590 VPD_ENTRY(port2, 2); /* PHY2 complex */
591 VPD_ENTRY(port3, 2); /* PHY3 complex */
592 VPD_ENTRY(rv, 1); /* csum */
593 u32 pad; /* for multiple-of-4 sizing and alignment */
596 #define EEPROM_MAX_POLL 40
597 #define EEPROM_STAT_ADDR 0x4000
598 #define VPD_BASE 0xc00
601 * t3_seeprom_read - read a VPD EEPROM location
602 * @adapter: adapter to read
603 * @addr: EEPROM address
604 * @data: where to store the read data
606 * Read a 32-bit word from a location in VPD EEPROM using the card's PCI
607 * VPD ROM capability. A zero is written to the flag bit when the
608 * addres is written to the control register. The hardware device will
609 * set the flag to 1 when 4 bytes have been read into the data register.
611 int t3_seeprom_read(struct adapter *adapter, u32 addr, __le32 *data)
613 u16 val;
614 int attempts = EEPROM_MAX_POLL;
615 u32 v;
616 unsigned int base = adapter->params.pci.vpd_cap_addr;
618 if ((addr >= EEPROMSIZE && addr != EEPROM_STAT_ADDR) || (addr & 3))
619 return -EINVAL;
621 pci_write_config_word(adapter->pdev, base + PCI_VPD_ADDR, addr);
622 do {
623 udelay(10);
624 pci_read_config_word(adapter->pdev, base + PCI_VPD_ADDR, &val);
625 } while (!(val & PCI_VPD_ADDR_F) && --attempts);
627 if (!(val & PCI_VPD_ADDR_F)) {
628 CH_ERR(adapter, "reading EEPROM address 0x%x failed\n", addr);
629 return -EIO;
631 pci_read_config_dword(adapter->pdev, base + PCI_VPD_DATA, &v);
632 *data = cpu_to_le32(v);
633 return 0;
637 * t3_seeprom_write - write a VPD EEPROM location
638 * @adapter: adapter to write
639 * @addr: EEPROM address
640 * @data: value to write
642 * Write a 32-bit word to a location in VPD EEPROM using the card's PCI
643 * VPD ROM capability.
645 int t3_seeprom_write(struct adapter *adapter, u32 addr, __le32 data)
647 u16 val;
648 int attempts = EEPROM_MAX_POLL;
649 unsigned int base = adapter->params.pci.vpd_cap_addr;
651 if ((addr >= EEPROMSIZE && addr != EEPROM_STAT_ADDR) || (addr & 3))
652 return -EINVAL;
654 pci_write_config_dword(adapter->pdev, base + PCI_VPD_DATA,
655 le32_to_cpu(data));
656 pci_write_config_word(adapter->pdev,base + PCI_VPD_ADDR,
657 addr | PCI_VPD_ADDR_F);
658 do {
659 msleep(1);
660 pci_read_config_word(adapter->pdev, base + PCI_VPD_ADDR, &val);
661 } while ((val & PCI_VPD_ADDR_F) && --attempts);
663 if (val & PCI_VPD_ADDR_F) {
664 CH_ERR(adapter, "write to EEPROM address 0x%x failed\n", addr);
665 return -EIO;
667 return 0;
671 * t3_seeprom_wp - enable/disable EEPROM write protection
672 * @adapter: the adapter
673 * @enable: 1 to enable write protection, 0 to disable it
675 * Enables or disables write protection on the serial EEPROM.
677 int t3_seeprom_wp(struct adapter *adapter, int enable)
679 return t3_seeprom_write(adapter, EEPROM_STAT_ADDR, enable ? 0xc : 0);
683 * get_vpd_params - read VPD parameters from VPD EEPROM
684 * @adapter: adapter to read
685 * @p: where to store the parameters
687 * Reads card parameters stored in VPD EEPROM.
689 static int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
691 int i, addr, ret;
692 struct t3_vpd vpd;
695 * Card information is normally at VPD_BASE but some early cards had
696 * it at 0.
698 ret = t3_seeprom_read(adapter, VPD_BASE, (__le32 *)&vpd);
699 if (ret)
700 return ret;
701 addr = vpd.id_tag == 0x82 ? VPD_BASE : 0;
703 for (i = 0; i < sizeof(vpd); i += 4) {
704 ret = t3_seeprom_read(adapter, addr + i,
705 (__le32 *)((u8 *)&vpd + i));
706 if (ret)
707 return ret;
710 p->cclk = simple_strtoul(vpd.cclk_data, NULL, 10);
711 p->mclk = simple_strtoul(vpd.mclk_data, NULL, 10);
712 p->uclk = simple_strtoul(vpd.uclk_data, NULL, 10);
713 p->mdc = simple_strtoul(vpd.mdc_data, NULL, 10);
714 p->mem_timing = simple_strtoul(vpd.mt_data, NULL, 10);
715 memcpy(p->sn, vpd.sn_data, SERNUM_LEN);
717 /* Old eeproms didn't have port information */
718 if (adapter->params.rev == 0 && !vpd.port0_data[0]) {
719 p->port_type[0] = uses_xaui(adapter) ? 1 : 2;
720 p->port_type[1] = uses_xaui(adapter) ? 6 : 2;
721 } else {
722 p->port_type[0] = hex_to_bin(vpd.port0_data[0]);
723 p->port_type[1] = hex_to_bin(vpd.port1_data[0]);
724 p->xauicfg[0] = simple_strtoul(vpd.xaui0cfg_data, NULL, 16);
725 p->xauicfg[1] = simple_strtoul(vpd.xaui1cfg_data, NULL, 16);
728 for (i = 0; i < 6; i++)
729 p->eth_base[i] = hex_to_bin(vpd.na_data[2 * i]) * 16 +
730 hex_to_bin(vpd.na_data[2 * i + 1]);
731 return 0;
734 /* serial flash and firmware constants */
735 enum {
736 SF_ATTEMPTS = 5, /* max retries for SF1 operations */
737 SF_SEC_SIZE = 64 * 1024, /* serial flash sector size */
738 SF_SIZE = SF_SEC_SIZE * 8, /* serial flash size */
740 /* flash command opcodes */
741 SF_PROG_PAGE = 2, /* program page */
742 SF_WR_DISABLE = 4, /* disable writes */
743 SF_RD_STATUS = 5, /* read status register */
744 SF_WR_ENABLE = 6, /* enable writes */
745 SF_RD_DATA_FAST = 0xb, /* read flash */
746 SF_ERASE_SECTOR = 0xd8, /* erase sector */
748 FW_FLASH_BOOT_ADDR = 0x70000, /* start address of FW in flash */
749 FW_VERS_ADDR = 0x7fffc, /* flash address holding FW version */
750 FW_MIN_SIZE = 8 /* at least version and csum */
754 * sf1_read - read data from the serial flash
755 * @adapter: the adapter
756 * @byte_cnt: number of bytes to read
757 * @cont: whether another operation will be chained
758 * @valp: where to store the read data
760 * Reads up to 4 bytes of data from the serial flash. The location of
761 * the read needs to be specified prior to calling this by issuing the
762 * appropriate commands to the serial flash.
764 static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont,
765 u32 *valp)
767 int ret;
769 if (!byte_cnt || byte_cnt > 4)
770 return -EINVAL;
771 if (t3_read_reg(adapter, A_SF_OP) & F_BUSY)
772 return -EBUSY;
773 t3_write_reg(adapter, A_SF_OP, V_CONT(cont) | V_BYTECNT(byte_cnt - 1));
774 ret = t3_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 10);
775 if (!ret)
776 *valp = t3_read_reg(adapter, A_SF_DATA);
777 return ret;
781 * sf1_write - write data to the serial flash
782 * @adapter: the adapter
783 * @byte_cnt: number of bytes to write
784 * @cont: whether another operation will be chained
785 * @val: value to write
787 * Writes up to 4 bytes of data to the serial flash. The location of
788 * the write needs to be specified prior to calling this by issuing the
789 * appropriate commands to the serial flash.
791 static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont,
792 u32 val)
794 if (!byte_cnt || byte_cnt > 4)
795 return -EINVAL;
796 if (t3_read_reg(adapter, A_SF_OP) & F_BUSY)
797 return -EBUSY;
798 t3_write_reg(adapter, A_SF_DATA, val);
799 t3_write_reg(adapter, A_SF_OP,
800 V_CONT(cont) | V_BYTECNT(byte_cnt - 1) | V_OP(1));
801 return t3_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 10);
805 * flash_wait_op - wait for a flash operation to complete
806 * @adapter: the adapter
807 * @attempts: max number of polls of the status register
808 * @delay: delay between polls in ms
810 * Wait for a flash operation to complete by polling the status register.
812 static int flash_wait_op(struct adapter *adapter, int attempts, int delay)
814 int ret;
815 u32 status;
817 while (1) {
818 if ((ret = sf1_write(adapter, 1, 1, SF_RD_STATUS)) != 0 ||
819 (ret = sf1_read(adapter, 1, 0, &status)) != 0)
820 return ret;
821 if (!(status & 1))
822 return 0;
823 if (--attempts == 0)
824 return -EAGAIN;
825 if (delay)
826 msleep(delay);
831 * t3_read_flash - read words from serial flash
832 * @adapter: the adapter
833 * @addr: the start address for the read
834 * @nwords: how many 32-bit words to read
835 * @data: where to store the read data
836 * @byte_oriented: whether to store data as bytes or as words
838 * Read the specified number of 32-bit words from the serial flash.
839 * If @byte_oriented is set the read data is stored as a byte array
840 * (i.e., big-endian), otherwise as 32-bit words in the platform's
841 * natural endianess.
843 int t3_read_flash(struct adapter *adapter, unsigned int addr,
844 unsigned int nwords, u32 *data, int byte_oriented)
846 int ret;
848 if (addr + nwords * sizeof(u32) > SF_SIZE || (addr & 3))
849 return -EINVAL;
851 addr = swab32(addr) | SF_RD_DATA_FAST;
853 if ((ret = sf1_write(adapter, 4, 1, addr)) != 0 ||
854 (ret = sf1_read(adapter, 1, 1, data)) != 0)
855 return ret;
857 for (; nwords; nwords--, data++) {
858 ret = sf1_read(adapter, 4, nwords > 1, data);
859 if (ret)
860 return ret;
861 if (byte_oriented)
862 *data = htonl(*data);
864 return 0;
868 * t3_write_flash - write up to a page of data to the serial flash
869 * @adapter: the adapter
870 * @addr: the start address to write
871 * @n: length of data to write
872 * @data: the data to write
874 * Writes up to a page of data (256 bytes) to the serial flash starting
875 * at the given address.
877 static int t3_write_flash(struct adapter *adapter, unsigned int addr,
878 unsigned int n, const u8 *data)
880 int ret;
881 u32 buf[64];
882 unsigned int i, c, left, val, offset = addr & 0xff;
884 if (addr + n > SF_SIZE || offset + n > 256)
885 return -EINVAL;
887 val = swab32(addr) | SF_PROG_PAGE;
889 if ((ret = sf1_write(adapter, 1, 0, SF_WR_ENABLE)) != 0 ||
890 (ret = sf1_write(adapter, 4, 1, val)) != 0)
891 return ret;
893 for (left = n; left; left -= c) {
894 c = min(left, 4U);
895 for (val = 0, i = 0; i < c; ++i)
896 val = (val << 8) + *data++;
898 ret = sf1_write(adapter, c, c != left, val);
899 if (ret)
900 return ret;
902 if ((ret = flash_wait_op(adapter, 5, 1)) != 0)
903 return ret;
905 /* Read the page to verify the write succeeded */
906 ret = t3_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf, 1);
907 if (ret)
908 return ret;
910 if (memcmp(data - n, (u8 *) buf + offset, n))
911 return -EIO;
912 return 0;
916 * t3_get_tp_version - read the tp sram version
917 * @adapter: the adapter
918 * @vers: where to place the version
920 * Reads the protocol sram version from sram.
922 int t3_get_tp_version(struct adapter *adapter, u32 *vers)
924 int ret;
926 /* Get version loaded in SRAM */
927 t3_write_reg(adapter, A_TP_EMBED_OP_FIELD0, 0);
928 ret = t3_wait_op_done(adapter, A_TP_EMBED_OP_FIELD0,
929 1, 1, 5, 1);
930 if (ret)
931 return ret;
933 *vers = t3_read_reg(adapter, A_TP_EMBED_OP_FIELD1);
935 return 0;
939 * t3_check_tpsram_version - read the tp sram version
940 * @adapter: the adapter
942 * Reads the protocol sram version from flash.
944 int t3_check_tpsram_version(struct adapter *adapter)
946 int ret;
947 u32 vers;
948 unsigned int major, minor;
950 if (adapter->params.rev == T3_REV_A)
951 return 0;
954 ret = t3_get_tp_version(adapter, &vers);
955 if (ret)
956 return ret;
958 major = G_TP_VERSION_MAJOR(vers);
959 minor = G_TP_VERSION_MINOR(vers);
961 if (major == TP_VERSION_MAJOR && minor == TP_VERSION_MINOR)
962 return 0;
963 else {
964 CH_ERR(adapter, "found wrong TP version (%u.%u), "
965 "driver compiled for version %d.%d\n", major, minor,
966 TP_VERSION_MAJOR, TP_VERSION_MINOR);
968 return -EINVAL;
972 * t3_check_tpsram - check if provided protocol SRAM
973 * is compatible with this driver
974 * @adapter: the adapter
975 * @tp_sram: the firmware image to write
976 * @size: image size
978 * Checks if an adapter's tp sram is compatible with the driver.
979 * Returns 0 if the versions are compatible, a negative error otherwise.
981 int t3_check_tpsram(struct adapter *adapter, const u8 *tp_sram,
982 unsigned int size)
984 u32 csum;
985 unsigned int i;
986 const __be32 *p = (const __be32 *)tp_sram;
988 /* Verify checksum */
989 for (csum = 0, i = 0; i < size / sizeof(csum); i++)
990 csum += ntohl(p[i]);
991 if (csum != 0xffffffff) {
992 CH_ERR(adapter, "corrupted protocol SRAM image, checksum %u\n",
993 csum);
994 return -EINVAL;
997 return 0;
1000 enum fw_version_type {
1001 FW_VERSION_N3,
1002 FW_VERSION_T3
1006 * t3_get_fw_version - read the firmware version
1007 * @adapter: the adapter
1008 * @vers: where to place the version
1010 * Reads the FW version from flash.
1012 int t3_get_fw_version(struct adapter *adapter, u32 *vers)
1014 return t3_read_flash(adapter, FW_VERS_ADDR, 1, vers, 0);
1018 * t3_check_fw_version - check if the FW is compatible with this driver
1019 * @adapter: the adapter
1021 * Checks if an adapter's FW is compatible with the driver. Returns 0
1022 * if the versions are compatible, a negative error otherwise.
1024 int t3_check_fw_version(struct adapter *adapter)
1026 int ret;
1027 u32 vers;
1028 unsigned int type, major, minor;
1030 ret = t3_get_fw_version(adapter, &vers);
1031 if (ret)
1032 return ret;
1034 type = G_FW_VERSION_TYPE(vers);
1035 major = G_FW_VERSION_MAJOR(vers);
1036 minor = G_FW_VERSION_MINOR(vers);
1038 if (type == FW_VERSION_T3 && major == FW_VERSION_MAJOR &&
1039 minor == FW_VERSION_MINOR)
1040 return 0;
1041 else if (major != FW_VERSION_MAJOR || minor < FW_VERSION_MINOR)
1042 CH_WARN(adapter, "found old FW minor version(%u.%u), "
1043 "driver compiled for version %u.%u\n", major, minor,
1044 FW_VERSION_MAJOR, FW_VERSION_MINOR);
1045 else {
1046 CH_WARN(adapter, "found newer FW version(%u.%u), "
1047 "driver compiled for version %u.%u\n", major, minor,
1048 FW_VERSION_MAJOR, FW_VERSION_MINOR);
1049 return 0;
1051 return -EINVAL;
1055 * t3_flash_erase_sectors - erase a range of flash sectors
1056 * @adapter: the adapter
1057 * @start: the first sector to erase
1058 * @end: the last sector to erase
1060 * Erases the sectors in the given range.
1062 static int t3_flash_erase_sectors(struct adapter *adapter, int start, int end)
1064 while (start <= end) {
1065 int ret;
1067 if ((ret = sf1_write(adapter, 1, 0, SF_WR_ENABLE)) != 0 ||
1068 (ret = sf1_write(adapter, 4, 0,
1069 SF_ERASE_SECTOR | (start << 8))) != 0 ||
1070 (ret = flash_wait_op(adapter, 5, 500)) != 0)
1071 return ret;
1072 start++;
1074 return 0;
1078 * t3_load_fw - download firmware
1079 * @adapter: the adapter
1080 * @fw_data: the firmware image to write
1081 * @size: image size
1083 * Write the supplied firmware image to the card's serial flash.
1084 * The FW image has the following sections: @size - 8 bytes of code and
1085 * data, followed by 4 bytes of FW version, followed by the 32-bit
1086 * 1's complement checksum of the whole image.
1088 int t3_load_fw(struct adapter *adapter, const u8 *fw_data, unsigned int size)
1090 u32 csum;
1091 unsigned int i;
1092 const __be32 *p = (const __be32 *)fw_data;
1093 int ret, addr, fw_sector = FW_FLASH_BOOT_ADDR >> 16;
1095 if ((size & 3) || size < FW_MIN_SIZE)
1096 return -EINVAL;
1097 if (size > FW_VERS_ADDR + 8 - FW_FLASH_BOOT_ADDR)
1098 return -EFBIG;
1100 for (csum = 0, i = 0; i < size / sizeof(csum); i++)
1101 csum += ntohl(p[i]);
1102 if (csum != 0xffffffff) {
1103 CH_ERR(adapter, "corrupted firmware image, checksum %u\n",
1104 csum);
1105 return -EINVAL;
1108 ret = t3_flash_erase_sectors(adapter, fw_sector, fw_sector);
1109 if (ret)
1110 goto out;
1112 size -= 8; /* trim off version and checksum */
1113 for (addr = FW_FLASH_BOOT_ADDR; size;) {
1114 unsigned int chunk_size = min(size, 256U);
1116 ret = t3_write_flash(adapter, addr, chunk_size, fw_data);
1117 if (ret)
1118 goto out;
1120 addr += chunk_size;
1121 fw_data += chunk_size;
1122 size -= chunk_size;
1125 ret = t3_write_flash(adapter, FW_VERS_ADDR, 4, fw_data);
1126 out:
1127 if (ret)
1128 CH_ERR(adapter, "firmware download failed, error %d\n", ret);
1129 return ret;
1132 #define CIM_CTL_BASE 0x2000
1135 * t3_cim_ctl_blk_read - read a block from CIM control region
1137 * @adap: the adapter
1138 * @addr: the start address within the CIM control region
1139 * @n: number of words to read
1140 * @valp: where to store the result
1142 * Reads a block of 4-byte words from the CIM control region.
1144 int t3_cim_ctl_blk_read(struct adapter *adap, unsigned int addr,
1145 unsigned int n, unsigned int *valp)
1147 int ret = 0;
1149 if (t3_read_reg(adap, A_CIM_HOST_ACC_CTRL) & F_HOSTBUSY)
1150 return -EBUSY;
1152 for ( ; !ret && n--; addr += 4) {
1153 t3_write_reg(adap, A_CIM_HOST_ACC_CTRL, CIM_CTL_BASE + addr);
1154 ret = t3_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, F_HOSTBUSY,
1155 0, 5, 2);
1156 if (!ret)
1157 *valp++ = t3_read_reg(adap, A_CIM_HOST_ACC_DATA);
1159 return ret;
1162 static void t3_gate_rx_traffic(struct cmac *mac, u32 *rx_cfg,
1163 u32 *rx_hash_high, u32 *rx_hash_low)
1165 /* stop Rx unicast traffic */
1166 t3_mac_disable_exact_filters(mac);
1168 /* stop broadcast, multicast, promiscuous mode traffic */
1169 *rx_cfg = t3_read_reg(mac->adapter, A_XGM_RX_CFG);
1170 t3_set_reg_field(mac->adapter, A_XGM_RX_CFG,
1171 F_ENHASHMCAST | F_DISBCAST | F_COPYALLFRAMES,
1172 F_DISBCAST);
1174 *rx_hash_high = t3_read_reg(mac->adapter, A_XGM_RX_HASH_HIGH);
1175 t3_write_reg(mac->adapter, A_XGM_RX_HASH_HIGH, 0);
1177 *rx_hash_low = t3_read_reg(mac->adapter, A_XGM_RX_HASH_LOW);
1178 t3_write_reg(mac->adapter, A_XGM_RX_HASH_LOW, 0);
1180 /* Leave time to drain max RX fifo */
1181 msleep(1);
1184 static void t3_open_rx_traffic(struct cmac *mac, u32 rx_cfg,
1185 u32 rx_hash_high, u32 rx_hash_low)
1187 t3_mac_enable_exact_filters(mac);
1188 t3_set_reg_field(mac->adapter, A_XGM_RX_CFG,
1189 F_ENHASHMCAST | F_DISBCAST | F_COPYALLFRAMES,
1190 rx_cfg);
1191 t3_write_reg(mac->adapter, A_XGM_RX_HASH_HIGH, rx_hash_high);
1192 t3_write_reg(mac->adapter, A_XGM_RX_HASH_LOW, rx_hash_low);
1196 * t3_link_changed - handle interface link changes
1197 * @adapter: the adapter
1198 * @port_id: the port index that changed link state
1200 * Called when a port's link settings change to propagate the new values
1201 * to the associated PHY and MAC. After performing the common tasks it
1202 * invokes an OS-specific handler.
1204 void t3_link_changed(struct adapter *adapter, int port_id)
1206 int link_ok, speed, duplex, fc;
1207 struct port_info *pi = adap2pinfo(adapter, port_id);
1208 struct cphy *phy = &pi->phy;
1209 struct cmac *mac = &pi->mac;
1210 struct link_config *lc = &pi->link_config;
1212 phy->ops->get_link_status(phy, &link_ok, &speed, &duplex, &fc);
1214 if (!lc->link_ok && link_ok) {
1215 u32 rx_cfg, rx_hash_high, rx_hash_low;
1216 u32 status;
1218 t3_xgm_intr_enable(adapter, port_id);
1219 t3_gate_rx_traffic(mac, &rx_cfg, &rx_hash_high, &rx_hash_low);
1220 t3_write_reg(adapter, A_XGM_RX_CTRL + mac->offset, 0);
1221 t3_mac_enable(mac, MAC_DIRECTION_RX);
1223 status = t3_read_reg(adapter, A_XGM_INT_STATUS + mac->offset);
1224 if (status & F_LINKFAULTCHANGE) {
1225 mac->stats.link_faults++;
1226 pi->link_fault = 1;
1228 t3_open_rx_traffic(mac, rx_cfg, rx_hash_high, rx_hash_low);
1231 if (lc->requested_fc & PAUSE_AUTONEG)
1232 fc &= lc->requested_fc;
1233 else
1234 fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1236 if (link_ok == lc->link_ok && speed == lc->speed &&
1237 duplex == lc->duplex && fc == lc->fc)
1238 return; /* nothing changed */
1240 if (link_ok != lc->link_ok && adapter->params.rev > 0 &&
1241 uses_xaui(adapter)) {
1242 if (link_ok)
1243 t3b_pcs_reset(mac);
1244 t3_write_reg(adapter, A_XGM_XAUI_ACT_CTRL + mac->offset,
1245 link_ok ? F_TXACTENABLE | F_RXEN : 0);
1247 lc->link_ok = link_ok;
1248 lc->speed = speed < 0 ? SPEED_INVALID : speed;
1249 lc->duplex = duplex < 0 ? DUPLEX_INVALID : duplex;
1251 if (link_ok && speed >= 0 && lc->autoneg == AUTONEG_ENABLE) {
1252 /* Set MAC speed, duplex, and flow control to match PHY. */
1253 t3_mac_set_speed_duplex_fc(mac, speed, duplex, fc);
1254 lc->fc = fc;
1257 t3_os_link_changed(adapter, port_id, link_ok && !pi->link_fault,
1258 speed, duplex, fc);
1261 void t3_link_fault(struct adapter *adapter, int port_id)
1263 struct port_info *pi = adap2pinfo(adapter, port_id);
1264 struct cmac *mac = &pi->mac;
1265 struct cphy *phy = &pi->phy;
1266 struct link_config *lc = &pi->link_config;
1267 int link_ok, speed, duplex, fc, link_fault;
1268 u32 rx_cfg, rx_hash_high, rx_hash_low;
1270 t3_gate_rx_traffic(mac, &rx_cfg, &rx_hash_high, &rx_hash_low);
1272 if (adapter->params.rev > 0 && uses_xaui(adapter))
1273 t3_write_reg(adapter, A_XGM_XAUI_ACT_CTRL + mac->offset, 0);
1275 t3_write_reg(adapter, A_XGM_RX_CTRL + mac->offset, 0);
1276 t3_mac_enable(mac, MAC_DIRECTION_RX);
1278 t3_open_rx_traffic(mac, rx_cfg, rx_hash_high, rx_hash_low);
1280 link_fault = t3_read_reg(adapter,
1281 A_XGM_INT_STATUS + mac->offset);
1282 link_fault &= F_LINKFAULTCHANGE;
1284 link_ok = lc->link_ok;
1285 speed = lc->speed;
1286 duplex = lc->duplex;
1287 fc = lc->fc;
1289 phy->ops->get_link_status(phy, &link_ok, &speed, &duplex, &fc);
1291 if (link_fault) {
1292 lc->link_ok = 0;
1293 lc->speed = SPEED_INVALID;
1294 lc->duplex = DUPLEX_INVALID;
1296 t3_os_link_fault(adapter, port_id, 0);
1298 /* Account link faults only when the phy reports a link up */
1299 if (link_ok)
1300 mac->stats.link_faults++;
1301 } else {
1302 if (link_ok)
1303 t3_write_reg(adapter, A_XGM_XAUI_ACT_CTRL + mac->offset,
1304 F_TXACTENABLE | F_RXEN);
1306 pi->link_fault = 0;
1307 lc->link_ok = (unsigned char)link_ok;
1308 lc->speed = speed < 0 ? SPEED_INVALID : speed;
1309 lc->duplex = duplex < 0 ? DUPLEX_INVALID : duplex;
1310 t3_os_link_fault(adapter, port_id, link_ok);
1315 * t3_link_start - apply link configuration to MAC/PHY
1316 * @phy: the PHY to setup
1317 * @mac: the MAC to setup
1318 * @lc: the requested link configuration
1320 * Set up a port's MAC and PHY according to a desired link configuration.
1321 * - If the PHY can auto-negotiate first decide what to advertise, then
1322 * enable/disable auto-negotiation as desired, and reset.
1323 * - If the PHY does not auto-negotiate just reset it.
1324 * - If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
1325 * otherwise do it later based on the outcome of auto-negotiation.
1327 int t3_link_start(struct cphy *phy, struct cmac *mac, struct link_config *lc)
1329 unsigned int fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1331 lc->link_ok = 0;
1332 if (lc->supported & SUPPORTED_Autoneg) {
1333 lc->advertising &= ~(ADVERTISED_Asym_Pause | ADVERTISED_Pause);
1334 if (fc) {
1335 lc->advertising |= ADVERTISED_Asym_Pause;
1336 if (fc & PAUSE_RX)
1337 lc->advertising |= ADVERTISED_Pause;
1339 phy->ops->advertise(phy, lc->advertising);
1341 if (lc->autoneg == AUTONEG_DISABLE) {
1342 lc->speed = lc->requested_speed;
1343 lc->duplex = lc->requested_duplex;
1344 lc->fc = (unsigned char)fc;
1345 t3_mac_set_speed_duplex_fc(mac, lc->speed, lc->duplex,
1346 fc);
1347 /* Also disables autoneg */
1348 phy->ops->set_speed_duplex(phy, lc->speed, lc->duplex);
1349 } else
1350 phy->ops->autoneg_enable(phy);
1351 } else {
1352 t3_mac_set_speed_duplex_fc(mac, -1, -1, fc);
1353 lc->fc = (unsigned char)fc;
1354 phy->ops->reset(phy, 0);
1356 return 0;
1360 * t3_set_vlan_accel - control HW VLAN extraction
1361 * @adapter: the adapter
1362 * @ports: bitmap of adapter ports to operate on
1363 * @on: enable (1) or disable (0) HW VLAN extraction
1365 * Enables or disables HW extraction of VLAN tags for the given port.
1367 void t3_set_vlan_accel(struct adapter *adapter, unsigned int ports, int on)
1369 t3_set_reg_field(adapter, A_TP_OUT_CONFIG,
1370 ports << S_VLANEXTRACTIONENABLE,
1371 on ? (ports << S_VLANEXTRACTIONENABLE) : 0);
1374 struct intr_info {
1375 unsigned int mask; /* bits to check in interrupt status */
1376 const char *msg; /* message to print or NULL */
1377 short stat_idx; /* stat counter to increment or -1 */
1378 unsigned short fatal; /* whether the condition reported is fatal */
1382 * t3_handle_intr_status - table driven interrupt handler
1383 * @adapter: the adapter that generated the interrupt
1384 * @reg: the interrupt status register to process
1385 * @mask: a mask to apply to the interrupt status
1386 * @acts: table of interrupt actions
1387 * @stats: statistics counters tracking interrupt occurences
1389 * A table driven interrupt handler that applies a set of masks to an
1390 * interrupt status word and performs the corresponding actions if the
1391 * interrupts described by the mask have occured. The actions include
1392 * optionally printing a warning or alert message, and optionally
1393 * incrementing a stat counter. The table is terminated by an entry
1394 * specifying mask 0. Returns the number of fatal interrupt conditions.
1396 static int t3_handle_intr_status(struct adapter *adapter, unsigned int reg,
1397 unsigned int mask,
1398 const struct intr_info *acts,
1399 unsigned long *stats)
1401 int fatal = 0;
1402 unsigned int status = t3_read_reg(adapter, reg) & mask;
1404 for (; acts->mask; ++acts) {
1405 if (!(status & acts->mask))
1406 continue;
1407 if (acts->fatal) {
1408 fatal++;
1409 CH_ALERT(adapter, "%s (0x%x)\n",
1410 acts->msg, status & acts->mask);
1411 status &= ~acts->mask;
1412 } else if (acts->msg)
1413 CH_WARN(adapter, "%s (0x%x)\n",
1414 acts->msg, status & acts->mask);
1415 if (acts->stat_idx >= 0)
1416 stats[acts->stat_idx]++;
1418 if (status) /* clear processed interrupts */
1419 t3_write_reg(adapter, reg, status);
1420 return fatal;
1423 #define SGE_INTR_MASK (F_RSPQDISABLED | \
1424 F_UC_REQ_FRAMINGERROR | F_R_REQ_FRAMINGERROR | \
1425 F_CPPARITYERROR | F_OCPARITYERROR | F_RCPARITYERROR | \
1426 F_IRPARITYERROR | V_ITPARITYERROR(M_ITPARITYERROR) | \
1427 V_FLPARITYERROR(M_FLPARITYERROR) | F_LODRBPARITYERROR | \
1428 F_HIDRBPARITYERROR | F_LORCQPARITYERROR | \
1429 F_HIRCQPARITYERROR | F_LOPRIORITYDBFULL | \
1430 F_HIPRIORITYDBFULL | F_LOPRIORITYDBEMPTY | \
1431 F_HIPRIORITYDBEMPTY | F_HIPIODRBDROPERR | \
1432 F_LOPIODRBDROPERR)
1433 #define MC5_INTR_MASK (F_PARITYERR | F_ACTRGNFULL | F_UNKNOWNCMD | \
1434 F_REQQPARERR | F_DISPQPARERR | F_DELACTEMPTY | \
1435 F_NFASRCHFAIL)
1436 #define MC7_INTR_MASK (F_AE | F_UE | F_CE | V_PE(M_PE))
1437 #define XGM_INTR_MASK (V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR) | \
1438 V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR) | \
1439 F_TXFIFO_UNDERRUN)
1440 #define PCIX_INTR_MASK (F_MSTDETPARERR | F_SIGTARABT | F_RCVTARABT | \
1441 F_RCVMSTABT | F_SIGSYSERR | F_DETPARERR | \
1442 F_SPLCMPDIS | F_UNXSPLCMP | F_RCVSPLCMPERR | \
1443 F_DETCORECCERR | F_DETUNCECCERR | F_PIOPARERR | \
1444 V_WFPARERR(M_WFPARERR) | V_RFPARERR(M_RFPARERR) | \
1445 V_CFPARERR(M_CFPARERR) /* | V_MSIXPARERR(M_MSIXPARERR) */)
1446 #define PCIE_INTR_MASK (F_UNXSPLCPLERRR | F_UNXSPLCPLERRC | F_PCIE_PIOPARERR |\
1447 F_PCIE_WFPARERR | F_PCIE_RFPARERR | F_PCIE_CFPARERR | \
1448 /* V_PCIE_MSIXPARERR(M_PCIE_MSIXPARERR) | */ \
1449 F_RETRYBUFPARERR | F_RETRYLUTPARERR | F_RXPARERR | \
1450 F_TXPARERR | V_BISTERR(M_BISTERR))
1451 #define ULPRX_INTR_MASK (F_PARERRDATA | F_PARERRPCMD | F_ARBPF1PERR | \
1452 F_ARBPF0PERR | F_ARBFPERR | F_PCMDMUXPERR | \
1453 F_DATASELFRAMEERR1 | F_DATASELFRAMEERR0)
1454 #define ULPTX_INTR_MASK 0xfc
1455 #define CPLSW_INTR_MASK (F_CIM_OP_MAP_PERR | F_TP_FRAMING_ERROR | \
1456 F_SGE_FRAMING_ERROR | F_CIM_FRAMING_ERROR | \
1457 F_ZERO_SWITCH_ERROR)
1458 #define CIM_INTR_MASK (F_BLKWRPLINT | F_BLKRDPLINT | F_BLKWRCTLINT | \
1459 F_BLKRDCTLINT | F_BLKWRFLASHINT | F_BLKRDFLASHINT | \
1460 F_SGLWRFLASHINT | F_WRBLKFLASHINT | F_BLKWRBOOTINT | \
1461 F_FLASHRANGEINT | F_SDRAMRANGEINT | F_RSVDSPACEINT | \
1462 F_DRAMPARERR | F_ICACHEPARERR | F_DCACHEPARERR | \
1463 F_OBQSGEPARERR | F_OBQULPHIPARERR | F_OBQULPLOPARERR | \
1464 F_IBQSGELOPARERR | F_IBQSGEHIPARERR | F_IBQULPPARERR | \
1465 F_IBQTPPARERR | F_ITAGPARERR | F_DTAGPARERR)
1466 #define PMTX_INTR_MASK (F_ZERO_C_CMD_ERROR | ICSPI_FRM_ERR | OESPI_FRM_ERR | \
1467 V_ICSPI_PAR_ERROR(M_ICSPI_PAR_ERROR) | \
1468 V_OESPI_PAR_ERROR(M_OESPI_PAR_ERROR))
1469 #define PMRX_INTR_MASK (F_ZERO_E_CMD_ERROR | IESPI_FRM_ERR | OCSPI_FRM_ERR | \
1470 V_IESPI_PAR_ERROR(M_IESPI_PAR_ERROR) | \
1471 V_OCSPI_PAR_ERROR(M_OCSPI_PAR_ERROR))
1472 #define MPS_INTR_MASK (V_TX0TPPARERRENB(M_TX0TPPARERRENB) | \
1473 V_TX1TPPARERRENB(M_TX1TPPARERRENB) | \
1474 V_RXTPPARERRENB(M_RXTPPARERRENB) | \
1475 V_MCAPARERRENB(M_MCAPARERRENB))
1476 #define XGM_EXTRA_INTR_MASK (F_LINKFAULTCHANGE)
1477 #define PL_INTR_MASK (F_T3DBG | F_XGMAC0_0 | F_XGMAC0_1 | F_MC5A | F_PM1_TX | \
1478 F_PM1_RX | F_ULP2_TX | F_ULP2_RX | F_TP1 | F_CIM | \
1479 F_MC7_CM | F_MC7_PMTX | F_MC7_PMRX | F_SGE3 | F_PCIM0 | \
1480 F_MPS0 | F_CPL_SWITCH)
1482 * Interrupt handler for the PCIX1 module.
1484 static void pci_intr_handler(struct adapter *adapter)
1486 static const struct intr_info pcix1_intr_info[] = {
1487 {F_MSTDETPARERR, "PCI master detected parity error", -1, 1},
1488 {F_SIGTARABT, "PCI signaled target abort", -1, 1},
1489 {F_RCVTARABT, "PCI received target abort", -1, 1},
1490 {F_RCVMSTABT, "PCI received master abort", -1, 1},
1491 {F_SIGSYSERR, "PCI signaled system error", -1, 1},
1492 {F_DETPARERR, "PCI detected parity error", -1, 1},
1493 {F_SPLCMPDIS, "PCI split completion discarded", -1, 1},
1494 {F_UNXSPLCMP, "PCI unexpected split completion error", -1, 1},
1495 {F_RCVSPLCMPERR, "PCI received split completion error", -1,
1497 {F_DETCORECCERR, "PCI correctable ECC error",
1498 STAT_PCI_CORR_ECC, 0},
1499 {F_DETUNCECCERR, "PCI uncorrectable ECC error", -1, 1},
1500 {F_PIOPARERR, "PCI PIO FIFO parity error", -1, 1},
1501 {V_WFPARERR(M_WFPARERR), "PCI write FIFO parity error", -1,
1503 {V_RFPARERR(M_RFPARERR), "PCI read FIFO parity error", -1,
1505 {V_CFPARERR(M_CFPARERR), "PCI command FIFO parity error", -1,
1507 {V_MSIXPARERR(M_MSIXPARERR), "PCI MSI-X table/PBA parity "
1508 "error", -1, 1},
1512 if (t3_handle_intr_status(adapter, A_PCIX_INT_CAUSE, PCIX_INTR_MASK,
1513 pcix1_intr_info, adapter->irq_stats))
1514 t3_fatal_err(adapter);
1518 * Interrupt handler for the PCIE module.
1520 static void pcie_intr_handler(struct adapter *adapter)
1522 static const struct intr_info pcie_intr_info[] = {
1523 {F_PEXERR, "PCI PEX error", -1, 1},
1524 {F_UNXSPLCPLERRR,
1525 "PCI unexpected split completion DMA read error", -1, 1},
1526 {F_UNXSPLCPLERRC,
1527 "PCI unexpected split completion DMA command error", -1, 1},
1528 {F_PCIE_PIOPARERR, "PCI PIO FIFO parity error", -1, 1},
1529 {F_PCIE_WFPARERR, "PCI write FIFO parity error", -1, 1},
1530 {F_PCIE_RFPARERR, "PCI read FIFO parity error", -1, 1},
1531 {F_PCIE_CFPARERR, "PCI command FIFO parity error", -1, 1},
1532 {V_PCIE_MSIXPARERR(M_PCIE_MSIXPARERR),
1533 "PCI MSI-X table/PBA parity error", -1, 1},
1534 {F_RETRYBUFPARERR, "PCI retry buffer parity error", -1, 1},
1535 {F_RETRYLUTPARERR, "PCI retry LUT parity error", -1, 1},
1536 {F_RXPARERR, "PCI Rx parity error", -1, 1},
1537 {F_TXPARERR, "PCI Tx parity error", -1, 1},
1538 {V_BISTERR(M_BISTERR), "PCI BIST error", -1, 1},
1542 if (t3_read_reg(adapter, A_PCIE_INT_CAUSE) & F_PEXERR)
1543 CH_ALERT(adapter, "PEX error code 0x%x\n",
1544 t3_read_reg(adapter, A_PCIE_PEX_ERR));
1546 if (t3_handle_intr_status(adapter, A_PCIE_INT_CAUSE, PCIE_INTR_MASK,
1547 pcie_intr_info, adapter->irq_stats))
1548 t3_fatal_err(adapter);
1552 * TP interrupt handler.
1554 static void tp_intr_handler(struct adapter *adapter)
1556 static const struct intr_info tp_intr_info[] = {
1557 {0xffffff, "TP parity error", -1, 1},
1558 {0x1000000, "TP out of Rx pages", -1, 1},
1559 {0x2000000, "TP out of Tx pages", -1, 1},
1563 static struct intr_info tp_intr_info_t3c[] = {
1564 {0x1fffffff, "TP parity error", -1, 1},
1565 {F_FLMRXFLSTEMPTY, "TP out of Rx pages", -1, 1},
1566 {F_FLMTXFLSTEMPTY, "TP out of Tx pages", -1, 1},
1570 if (t3_handle_intr_status(adapter, A_TP_INT_CAUSE, 0xffffffff,
1571 adapter->params.rev < T3_REV_C ?
1572 tp_intr_info : tp_intr_info_t3c, NULL))
1573 t3_fatal_err(adapter);
1577 * CIM interrupt handler.
1579 static void cim_intr_handler(struct adapter *adapter)
1581 static const struct intr_info cim_intr_info[] = {
1582 {F_RSVDSPACEINT, "CIM reserved space write", -1, 1},
1583 {F_SDRAMRANGEINT, "CIM SDRAM address out of range", -1, 1},
1584 {F_FLASHRANGEINT, "CIM flash address out of range", -1, 1},
1585 {F_BLKWRBOOTINT, "CIM block write to boot space", -1, 1},
1586 {F_WRBLKFLASHINT, "CIM write to cached flash space", -1, 1},
1587 {F_SGLWRFLASHINT, "CIM single write to flash space", -1, 1},
1588 {F_BLKRDFLASHINT, "CIM block read from flash space", -1, 1},
1589 {F_BLKWRFLASHINT, "CIM block write to flash space", -1, 1},
1590 {F_BLKRDCTLINT, "CIM block read from CTL space", -1, 1},
1591 {F_BLKWRCTLINT, "CIM block write to CTL space", -1, 1},
1592 {F_BLKRDPLINT, "CIM block read from PL space", -1, 1},
1593 {F_BLKWRPLINT, "CIM block write to PL space", -1, 1},
1594 {F_DRAMPARERR, "CIM DRAM parity error", -1, 1},
1595 {F_ICACHEPARERR, "CIM icache parity error", -1, 1},
1596 {F_DCACHEPARERR, "CIM dcache parity error", -1, 1},
1597 {F_OBQSGEPARERR, "CIM OBQ SGE parity error", -1, 1},
1598 {F_OBQULPHIPARERR, "CIM OBQ ULPHI parity error", -1, 1},
1599 {F_OBQULPLOPARERR, "CIM OBQ ULPLO parity error", -1, 1},
1600 {F_IBQSGELOPARERR, "CIM IBQ SGELO parity error", -1, 1},
1601 {F_IBQSGEHIPARERR, "CIM IBQ SGEHI parity error", -1, 1},
1602 {F_IBQULPPARERR, "CIM IBQ ULP parity error", -1, 1},
1603 {F_IBQTPPARERR, "CIM IBQ TP parity error", -1, 1},
1604 {F_ITAGPARERR, "CIM itag parity error", -1, 1},
1605 {F_DTAGPARERR, "CIM dtag parity error", -1, 1},
1609 if (t3_handle_intr_status(adapter, A_CIM_HOST_INT_CAUSE, 0xffffffff,
1610 cim_intr_info, NULL))
1611 t3_fatal_err(adapter);
1615 * ULP RX interrupt handler.
1617 static void ulprx_intr_handler(struct adapter *adapter)
1619 static const struct intr_info ulprx_intr_info[] = {
1620 {F_PARERRDATA, "ULP RX data parity error", -1, 1},
1621 {F_PARERRPCMD, "ULP RX command parity error", -1, 1},
1622 {F_ARBPF1PERR, "ULP RX ArbPF1 parity error", -1, 1},
1623 {F_ARBPF0PERR, "ULP RX ArbPF0 parity error", -1, 1},
1624 {F_ARBFPERR, "ULP RX ArbF parity error", -1, 1},
1625 {F_PCMDMUXPERR, "ULP RX PCMDMUX parity error", -1, 1},
1626 {F_DATASELFRAMEERR1, "ULP RX frame error", -1, 1},
1627 {F_DATASELFRAMEERR0, "ULP RX frame error", -1, 1},
1631 if (t3_handle_intr_status(adapter, A_ULPRX_INT_CAUSE, 0xffffffff,
1632 ulprx_intr_info, NULL))
1633 t3_fatal_err(adapter);
1637 * ULP TX interrupt handler.
1639 static void ulptx_intr_handler(struct adapter *adapter)
1641 static const struct intr_info ulptx_intr_info[] = {
1642 {F_PBL_BOUND_ERR_CH0, "ULP TX channel 0 PBL out of bounds",
1643 STAT_ULP_CH0_PBL_OOB, 0},
1644 {F_PBL_BOUND_ERR_CH1, "ULP TX channel 1 PBL out of bounds",
1645 STAT_ULP_CH1_PBL_OOB, 0},
1646 {0xfc, "ULP TX parity error", -1, 1},
1650 if (t3_handle_intr_status(adapter, A_ULPTX_INT_CAUSE, 0xffffffff,
1651 ulptx_intr_info, adapter->irq_stats))
1652 t3_fatal_err(adapter);
1655 #define ICSPI_FRM_ERR (F_ICSPI0_FIFO2X_RX_FRAMING_ERROR | \
1656 F_ICSPI1_FIFO2X_RX_FRAMING_ERROR | F_ICSPI0_RX_FRAMING_ERROR | \
1657 F_ICSPI1_RX_FRAMING_ERROR | F_ICSPI0_TX_FRAMING_ERROR | \
1658 F_ICSPI1_TX_FRAMING_ERROR)
1659 #define OESPI_FRM_ERR (F_OESPI0_RX_FRAMING_ERROR | \
1660 F_OESPI1_RX_FRAMING_ERROR | F_OESPI0_TX_FRAMING_ERROR | \
1661 F_OESPI1_TX_FRAMING_ERROR | F_OESPI0_OFIFO2X_TX_FRAMING_ERROR | \
1662 F_OESPI1_OFIFO2X_TX_FRAMING_ERROR)
1665 * PM TX interrupt handler.
1667 static void pmtx_intr_handler(struct adapter *adapter)
1669 static const struct intr_info pmtx_intr_info[] = {
1670 {F_ZERO_C_CMD_ERROR, "PMTX 0-length pcmd", -1, 1},
1671 {ICSPI_FRM_ERR, "PMTX ispi framing error", -1, 1},
1672 {OESPI_FRM_ERR, "PMTX ospi framing error", -1, 1},
1673 {V_ICSPI_PAR_ERROR(M_ICSPI_PAR_ERROR),
1674 "PMTX ispi parity error", -1, 1},
1675 {V_OESPI_PAR_ERROR(M_OESPI_PAR_ERROR),
1676 "PMTX ospi parity error", -1, 1},
1680 if (t3_handle_intr_status(adapter, A_PM1_TX_INT_CAUSE, 0xffffffff,
1681 pmtx_intr_info, NULL))
1682 t3_fatal_err(adapter);
1685 #define IESPI_FRM_ERR (F_IESPI0_FIFO2X_RX_FRAMING_ERROR | \
1686 F_IESPI1_FIFO2X_RX_FRAMING_ERROR | F_IESPI0_RX_FRAMING_ERROR | \
1687 F_IESPI1_RX_FRAMING_ERROR | F_IESPI0_TX_FRAMING_ERROR | \
1688 F_IESPI1_TX_FRAMING_ERROR)
1689 #define OCSPI_FRM_ERR (F_OCSPI0_RX_FRAMING_ERROR | \
1690 F_OCSPI1_RX_FRAMING_ERROR | F_OCSPI0_TX_FRAMING_ERROR | \
1691 F_OCSPI1_TX_FRAMING_ERROR | F_OCSPI0_OFIFO2X_TX_FRAMING_ERROR | \
1692 F_OCSPI1_OFIFO2X_TX_FRAMING_ERROR)
1695 * PM RX interrupt handler.
1697 static void pmrx_intr_handler(struct adapter *adapter)
1699 static const struct intr_info pmrx_intr_info[] = {
1700 {F_ZERO_E_CMD_ERROR, "PMRX 0-length pcmd", -1, 1},
1701 {IESPI_FRM_ERR, "PMRX ispi framing error", -1, 1},
1702 {OCSPI_FRM_ERR, "PMRX ospi framing error", -1, 1},
1703 {V_IESPI_PAR_ERROR(M_IESPI_PAR_ERROR),
1704 "PMRX ispi parity error", -1, 1},
1705 {V_OCSPI_PAR_ERROR(M_OCSPI_PAR_ERROR),
1706 "PMRX ospi parity error", -1, 1},
1710 if (t3_handle_intr_status(adapter, A_PM1_RX_INT_CAUSE, 0xffffffff,
1711 pmrx_intr_info, NULL))
1712 t3_fatal_err(adapter);
1716 * CPL switch interrupt handler.
1718 static void cplsw_intr_handler(struct adapter *adapter)
1720 static const struct intr_info cplsw_intr_info[] = {
1721 {F_CIM_OP_MAP_PERR, "CPL switch CIM parity error", -1, 1},
1722 {F_CIM_OVFL_ERROR, "CPL switch CIM overflow", -1, 1},
1723 {F_TP_FRAMING_ERROR, "CPL switch TP framing error", -1, 1},
1724 {F_SGE_FRAMING_ERROR, "CPL switch SGE framing error", -1, 1},
1725 {F_CIM_FRAMING_ERROR, "CPL switch CIM framing error", -1, 1},
1726 {F_ZERO_SWITCH_ERROR, "CPL switch no-switch error", -1, 1},
1730 if (t3_handle_intr_status(adapter, A_CPL_INTR_CAUSE, 0xffffffff,
1731 cplsw_intr_info, NULL))
1732 t3_fatal_err(adapter);
1736 * MPS interrupt handler.
1738 static void mps_intr_handler(struct adapter *adapter)
1740 static const struct intr_info mps_intr_info[] = {
1741 {0x1ff, "MPS parity error", -1, 1},
1745 if (t3_handle_intr_status(adapter, A_MPS_INT_CAUSE, 0xffffffff,
1746 mps_intr_info, NULL))
1747 t3_fatal_err(adapter);
1750 #define MC7_INTR_FATAL (F_UE | V_PE(M_PE) | F_AE)
1753 * MC7 interrupt handler.
1755 static void mc7_intr_handler(struct mc7 *mc7)
1757 struct adapter *adapter = mc7->adapter;
1758 u32 cause = t3_read_reg(adapter, mc7->offset + A_MC7_INT_CAUSE);
1760 if (cause & F_CE) {
1761 mc7->stats.corr_err++;
1762 CH_WARN(adapter, "%s MC7 correctable error at addr 0x%x, "
1763 "data 0x%x 0x%x 0x%x\n", mc7->name,
1764 t3_read_reg(adapter, mc7->offset + A_MC7_CE_ADDR),
1765 t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA0),
1766 t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA1),
1767 t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA2));
1770 if (cause & F_UE) {
1771 mc7->stats.uncorr_err++;
1772 CH_ALERT(adapter, "%s MC7 uncorrectable error at addr 0x%x, "
1773 "data 0x%x 0x%x 0x%x\n", mc7->name,
1774 t3_read_reg(adapter, mc7->offset + A_MC7_UE_ADDR),
1775 t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA0),
1776 t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA1),
1777 t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA2));
1780 if (G_PE(cause)) {
1781 mc7->stats.parity_err++;
1782 CH_ALERT(adapter, "%s MC7 parity error 0x%x\n",
1783 mc7->name, G_PE(cause));
1786 if (cause & F_AE) {
1787 u32 addr = 0;
1789 if (adapter->params.rev > 0)
1790 addr = t3_read_reg(adapter,
1791 mc7->offset + A_MC7_ERR_ADDR);
1792 mc7->stats.addr_err++;
1793 CH_ALERT(adapter, "%s MC7 address error: 0x%x\n",
1794 mc7->name, addr);
1797 if (cause & MC7_INTR_FATAL)
1798 t3_fatal_err(adapter);
1800 t3_write_reg(adapter, mc7->offset + A_MC7_INT_CAUSE, cause);
1803 #define XGM_INTR_FATAL (V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR) | \
1804 V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR))
1806 * XGMAC interrupt handler.
1808 static int mac_intr_handler(struct adapter *adap, unsigned int idx)
1810 struct cmac *mac = &adap2pinfo(adap, idx)->mac;
1812 * We mask out interrupt causes for which we're not taking interrupts.
1813 * This allows us to use polling logic to monitor some of the other
1814 * conditions when taking interrupts would impose too much load on the
1815 * system.
1817 u32 cause = t3_read_reg(adap, A_XGM_INT_CAUSE + mac->offset) &
1818 ~F_RXFIFO_OVERFLOW;
1820 if (cause & V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR)) {
1821 mac->stats.tx_fifo_parity_err++;
1822 CH_ALERT(adap, "port%d: MAC TX FIFO parity error\n", idx);
1824 if (cause & V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR)) {
1825 mac->stats.rx_fifo_parity_err++;
1826 CH_ALERT(adap, "port%d: MAC RX FIFO parity error\n", idx);
1828 if (cause & F_TXFIFO_UNDERRUN)
1829 mac->stats.tx_fifo_urun++;
1830 if (cause & F_RXFIFO_OVERFLOW)
1831 mac->stats.rx_fifo_ovfl++;
1832 if (cause & V_SERDES_LOS(M_SERDES_LOS))
1833 mac->stats.serdes_signal_loss++;
1834 if (cause & F_XAUIPCSCTCERR)
1835 mac->stats.xaui_pcs_ctc_err++;
1836 if (cause & F_XAUIPCSALIGNCHANGE)
1837 mac->stats.xaui_pcs_align_change++;
1838 if (cause & F_XGM_INT) {
1839 t3_set_reg_field(adap,
1840 A_XGM_INT_ENABLE + mac->offset,
1841 F_XGM_INT, 0);
1842 mac->stats.link_faults++;
1844 t3_os_link_fault_handler(adap, idx);
1847 if (cause & XGM_INTR_FATAL)
1848 t3_fatal_err(adap);
1850 t3_write_reg(adap, A_XGM_INT_CAUSE + mac->offset, cause);
1851 return cause != 0;
1855 * Interrupt handler for PHY events.
1857 int t3_phy_intr_handler(struct adapter *adapter)
1859 u32 i, cause = t3_read_reg(adapter, A_T3DBG_INT_CAUSE);
1861 for_each_port(adapter, i) {
1862 struct port_info *p = adap2pinfo(adapter, i);
1864 if (!(p->phy.caps & SUPPORTED_IRQ))
1865 continue;
1867 if (cause & (1 << adapter_info(adapter)->gpio_intr[i])) {
1868 int phy_cause = p->phy.ops->intr_handler(&p->phy);
1870 if (phy_cause & cphy_cause_link_change)
1871 t3_link_changed(adapter, i);
1872 if (phy_cause & cphy_cause_fifo_error)
1873 p->phy.fifo_errors++;
1874 if (phy_cause & cphy_cause_module_change)
1875 t3_os_phymod_changed(adapter, i);
1879 t3_write_reg(adapter, A_T3DBG_INT_CAUSE, cause);
1880 return 0;
1884 * T3 slow path (non-data) interrupt handler.
1886 int t3_slow_intr_handler(struct adapter *adapter)
1888 u32 cause = t3_read_reg(adapter, A_PL_INT_CAUSE0);
1890 cause &= adapter->slow_intr_mask;
1891 if (!cause)
1892 return 0;
1893 if (cause & F_PCIM0) {
1894 if (is_pcie(adapter))
1895 pcie_intr_handler(adapter);
1896 else
1897 pci_intr_handler(adapter);
1899 if (cause & F_SGE3)
1900 t3_sge_err_intr_handler(adapter);
1901 if (cause & F_MC7_PMRX)
1902 mc7_intr_handler(&adapter->pmrx);
1903 if (cause & F_MC7_PMTX)
1904 mc7_intr_handler(&adapter->pmtx);
1905 if (cause & F_MC7_CM)
1906 mc7_intr_handler(&adapter->cm);
1907 if (cause & F_CIM)
1908 cim_intr_handler(adapter);
1909 if (cause & F_TP1)
1910 tp_intr_handler(adapter);
1911 if (cause & F_ULP2_RX)
1912 ulprx_intr_handler(adapter);
1913 if (cause & F_ULP2_TX)
1914 ulptx_intr_handler(adapter);
1915 if (cause & F_PM1_RX)
1916 pmrx_intr_handler(adapter);
1917 if (cause & F_PM1_TX)
1918 pmtx_intr_handler(adapter);
1919 if (cause & F_CPL_SWITCH)
1920 cplsw_intr_handler(adapter);
1921 if (cause & F_MPS0)
1922 mps_intr_handler(adapter);
1923 if (cause & F_MC5A)
1924 t3_mc5_intr_handler(&adapter->mc5);
1925 if (cause & F_XGMAC0_0)
1926 mac_intr_handler(adapter, 0);
1927 if (cause & F_XGMAC0_1)
1928 mac_intr_handler(adapter, 1);
1929 if (cause & F_T3DBG)
1930 t3_os_ext_intr_handler(adapter);
1932 /* Clear the interrupts just processed. */
1933 t3_write_reg(adapter, A_PL_INT_CAUSE0, cause);
1934 t3_read_reg(adapter, A_PL_INT_CAUSE0); /* flush */
1935 return 1;
1938 static unsigned int calc_gpio_intr(struct adapter *adap)
1940 unsigned int i, gpi_intr = 0;
1942 for_each_port(adap, i)
1943 if ((adap2pinfo(adap, i)->phy.caps & SUPPORTED_IRQ) &&
1944 adapter_info(adap)->gpio_intr[i])
1945 gpi_intr |= 1 << adapter_info(adap)->gpio_intr[i];
1946 return gpi_intr;
1950 * t3_intr_enable - enable interrupts
1951 * @adapter: the adapter whose interrupts should be enabled
1953 * Enable interrupts by setting the interrupt enable registers of the
1954 * various HW modules and then enabling the top-level interrupt
1955 * concentrator.
1957 void t3_intr_enable(struct adapter *adapter)
1959 static const struct addr_val_pair intr_en_avp[] = {
1960 {A_SG_INT_ENABLE, SGE_INTR_MASK},
1961 {A_MC7_INT_ENABLE, MC7_INTR_MASK},
1962 {A_MC7_INT_ENABLE - MC7_PMRX_BASE_ADDR + MC7_PMTX_BASE_ADDR,
1963 MC7_INTR_MASK},
1964 {A_MC7_INT_ENABLE - MC7_PMRX_BASE_ADDR + MC7_CM_BASE_ADDR,
1965 MC7_INTR_MASK},
1966 {A_MC5_DB_INT_ENABLE, MC5_INTR_MASK},
1967 {A_ULPRX_INT_ENABLE, ULPRX_INTR_MASK},
1968 {A_PM1_TX_INT_ENABLE, PMTX_INTR_MASK},
1969 {A_PM1_RX_INT_ENABLE, PMRX_INTR_MASK},
1970 {A_CIM_HOST_INT_ENABLE, CIM_INTR_MASK},
1971 {A_MPS_INT_ENABLE, MPS_INTR_MASK},
1974 adapter->slow_intr_mask = PL_INTR_MASK;
1976 t3_write_regs(adapter, intr_en_avp, ARRAY_SIZE(intr_en_avp), 0);
1977 t3_write_reg(adapter, A_TP_INT_ENABLE,
1978 adapter->params.rev >= T3_REV_C ? 0x2bfffff : 0x3bfffff);
1980 if (adapter->params.rev > 0) {
1981 t3_write_reg(adapter, A_CPL_INTR_ENABLE,
1982 CPLSW_INTR_MASK | F_CIM_OVFL_ERROR);
1983 t3_write_reg(adapter, A_ULPTX_INT_ENABLE,
1984 ULPTX_INTR_MASK | F_PBL_BOUND_ERR_CH0 |
1985 F_PBL_BOUND_ERR_CH1);
1986 } else {
1987 t3_write_reg(adapter, A_CPL_INTR_ENABLE, CPLSW_INTR_MASK);
1988 t3_write_reg(adapter, A_ULPTX_INT_ENABLE, ULPTX_INTR_MASK);
1991 t3_write_reg(adapter, A_T3DBG_INT_ENABLE, calc_gpio_intr(adapter));
1993 if (is_pcie(adapter))
1994 t3_write_reg(adapter, A_PCIE_INT_ENABLE, PCIE_INTR_MASK);
1995 else
1996 t3_write_reg(adapter, A_PCIX_INT_ENABLE, PCIX_INTR_MASK);
1997 t3_write_reg(adapter, A_PL_INT_ENABLE0, adapter->slow_intr_mask);
1998 t3_read_reg(adapter, A_PL_INT_ENABLE0); /* flush */
2002 * t3_intr_disable - disable a card's interrupts
2003 * @adapter: the adapter whose interrupts should be disabled
2005 * Disable interrupts. We only disable the top-level interrupt
2006 * concentrator and the SGE data interrupts.
2008 void t3_intr_disable(struct adapter *adapter)
2010 t3_write_reg(adapter, A_PL_INT_ENABLE0, 0);
2011 t3_read_reg(adapter, A_PL_INT_ENABLE0); /* flush */
2012 adapter->slow_intr_mask = 0;
2016 * t3_intr_clear - clear all interrupts
2017 * @adapter: the adapter whose interrupts should be cleared
2019 * Clears all interrupts.
2021 void t3_intr_clear(struct adapter *adapter)
2023 static const unsigned int cause_reg_addr[] = {
2024 A_SG_INT_CAUSE,
2025 A_SG_RSPQ_FL_STATUS,
2026 A_PCIX_INT_CAUSE,
2027 A_MC7_INT_CAUSE,
2028 A_MC7_INT_CAUSE - MC7_PMRX_BASE_ADDR + MC7_PMTX_BASE_ADDR,
2029 A_MC7_INT_CAUSE - MC7_PMRX_BASE_ADDR + MC7_CM_BASE_ADDR,
2030 A_CIM_HOST_INT_CAUSE,
2031 A_TP_INT_CAUSE,
2032 A_MC5_DB_INT_CAUSE,
2033 A_ULPRX_INT_CAUSE,
2034 A_ULPTX_INT_CAUSE,
2035 A_CPL_INTR_CAUSE,
2036 A_PM1_TX_INT_CAUSE,
2037 A_PM1_RX_INT_CAUSE,
2038 A_MPS_INT_CAUSE,
2039 A_T3DBG_INT_CAUSE,
2041 unsigned int i;
2043 /* Clear PHY and MAC interrupts for each port. */
2044 for_each_port(adapter, i)
2045 t3_port_intr_clear(adapter, i);
2047 for (i = 0; i < ARRAY_SIZE(cause_reg_addr); ++i)
2048 t3_write_reg(adapter, cause_reg_addr[i], 0xffffffff);
2050 if (is_pcie(adapter))
2051 t3_write_reg(adapter, A_PCIE_PEX_ERR, 0xffffffff);
2052 t3_write_reg(adapter, A_PL_INT_CAUSE0, 0xffffffff);
2053 t3_read_reg(adapter, A_PL_INT_CAUSE0); /* flush */
2056 void t3_xgm_intr_enable(struct adapter *adapter, int idx)
2058 struct port_info *pi = adap2pinfo(adapter, idx);
2060 t3_write_reg(adapter, A_XGM_XGM_INT_ENABLE + pi->mac.offset,
2061 XGM_EXTRA_INTR_MASK);
2064 void t3_xgm_intr_disable(struct adapter *adapter, int idx)
2066 struct port_info *pi = adap2pinfo(adapter, idx);
2068 t3_write_reg(adapter, A_XGM_XGM_INT_DISABLE + pi->mac.offset,
2069 0x7ff);
2073 * t3_port_intr_enable - enable port-specific interrupts
2074 * @adapter: associated adapter
2075 * @idx: index of port whose interrupts should be enabled
2077 * Enable port-specific (i.e., MAC and PHY) interrupts for the given
2078 * adapter port.
2080 void t3_port_intr_enable(struct adapter *adapter, int idx)
2082 struct cphy *phy = &adap2pinfo(adapter, idx)->phy;
2084 t3_write_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx), XGM_INTR_MASK);
2085 t3_read_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx)); /* flush */
2086 phy->ops->intr_enable(phy);
2090 * t3_port_intr_disable - disable port-specific interrupts
2091 * @adapter: associated adapter
2092 * @idx: index of port whose interrupts should be disabled
2094 * Disable port-specific (i.e., MAC and PHY) interrupts for the given
2095 * adapter port.
2097 void t3_port_intr_disable(struct adapter *adapter, int idx)
2099 struct cphy *phy = &adap2pinfo(adapter, idx)->phy;
2101 t3_write_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx), 0);
2102 t3_read_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx)); /* flush */
2103 phy->ops->intr_disable(phy);
2107 * t3_port_intr_clear - clear port-specific interrupts
2108 * @adapter: associated adapter
2109 * @idx: index of port whose interrupts to clear
2111 * Clear port-specific (i.e., MAC and PHY) interrupts for the given
2112 * adapter port.
2114 void t3_port_intr_clear(struct adapter *adapter, int idx)
2116 struct cphy *phy = &adap2pinfo(adapter, idx)->phy;
2118 t3_write_reg(adapter, XGM_REG(A_XGM_INT_CAUSE, idx), 0xffffffff);
2119 t3_read_reg(adapter, XGM_REG(A_XGM_INT_CAUSE, idx)); /* flush */
2120 phy->ops->intr_clear(phy);
2123 #define SG_CONTEXT_CMD_ATTEMPTS 100
2126 * t3_sge_write_context - write an SGE context
2127 * @adapter: the adapter
2128 * @id: the context id
2129 * @type: the context type
2131 * Program an SGE context with the values already loaded in the
2132 * CONTEXT_DATA? registers.
2134 static int t3_sge_write_context(struct adapter *adapter, unsigned int id,
2135 unsigned int type)
2137 if (type == F_RESPONSEQ) {
2139 * Can't write the Response Queue Context bits for
2140 * Interrupt Armed or the Reserve bits after the chip
2141 * has been initialized out of reset. Writing to these
2142 * bits can confuse the hardware.
2144 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0xffffffff);
2145 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0xffffffff);
2146 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0x17ffffff);
2147 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0xffffffff);
2148 } else {
2149 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0xffffffff);
2150 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0xffffffff);
2151 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0xffffffff);
2152 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0xffffffff);
2154 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2155 V_CONTEXT_CMD_OPCODE(1) | type | V_CONTEXT(id));
2156 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2157 0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2161 * clear_sge_ctxt - completely clear an SGE context
2162 * @adapter: the adapter
2163 * @id: the context id
2164 * @type: the context type
2166 * Completely clear an SGE context. Used predominantly at post-reset
2167 * initialization. Note in particular that we don't skip writing to any
2168 * "sensitive bits" in the contexts the way that t3_sge_write_context()
2169 * does ...
2171 static int clear_sge_ctxt(struct adapter *adap, unsigned int id,
2172 unsigned int type)
2174 t3_write_reg(adap, A_SG_CONTEXT_DATA0, 0);
2175 t3_write_reg(adap, A_SG_CONTEXT_DATA1, 0);
2176 t3_write_reg(adap, A_SG_CONTEXT_DATA2, 0);
2177 t3_write_reg(adap, A_SG_CONTEXT_DATA3, 0);
2178 t3_write_reg(adap, A_SG_CONTEXT_MASK0, 0xffffffff);
2179 t3_write_reg(adap, A_SG_CONTEXT_MASK1, 0xffffffff);
2180 t3_write_reg(adap, A_SG_CONTEXT_MASK2, 0xffffffff);
2181 t3_write_reg(adap, A_SG_CONTEXT_MASK3, 0xffffffff);
2182 t3_write_reg(adap, A_SG_CONTEXT_CMD,
2183 V_CONTEXT_CMD_OPCODE(1) | type | V_CONTEXT(id));
2184 return t3_wait_op_done(adap, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2185 0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2189 * t3_sge_init_ecntxt - initialize an SGE egress context
2190 * @adapter: the adapter to configure
2191 * @id: the context id
2192 * @gts_enable: whether to enable GTS for the context
2193 * @type: the egress context type
2194 * @respq: associated response queue
2195 * @base_addr: base address of queue
2196 * @size: number of queue entries
2197 * @token: uP token
2198 * @gen: initial generation value for the context
2199 * @cidx: consumer pointer
2201 * Initialize an SGE egress context and make it ready for use. If the
2202 * platform allows concurrent context operations, the caller is
2203 * responsible for appropriate locking.
2205 int t3_sge_init_ecntxt(struct adapter *adapter, unsigned int id, int gts_enable,
2206 enum sge_context_type type, int respq, u64 base_addr,
2207 unsigned int size, unsigned int token, int gen,
2208 unsigned int cidx)
2210 unsigned int credits = type == SGE_CNTXT_OFLD ? 0 : FW_WR_NUM;
2212 if (base_addr & 0xfff) /* must be 4K aligned */
2213 return -EINVAL;
2214 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2215 return -EBUSY;
2217 base_addr >>= 12;
2218 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_EC_INDEX(cidx) |
2219 V_EC_CREDITS(credits) | V_EC_GTS(gts_enable));
2220 t3_write_reg(adapter, A_SG_CONTEXT_DATA1, V_EC_SIZE(size) |
2221 V_EC_BASE_LO(base_addr & 0xffff));
2222 base_addr >>= 16;
2223 t3_write_reg(adapter, A_SG_CONTEXT_DATA2, base_addr);
2224 base_addr >>= 32;
2225 t3_write_reg(adapter, A_SG_CONTEXT_DATA3,
2226 V_EC_BASE_HI(base_addr & 0xf) | V_EC_RESPQ(respq) |
2227 V_EC_TYPE(type) | V_EC_GEN(gen) | V_EC_UP_TOKEN(token) |
2228 F_EC_VALID);
2229 return t3_sge_write_context(adapter, id, F_EGRESS);
2233 * t3_sge_init_flcntxt - initialize an SGE free-buffer list context
2234 * @adapter: the adapter to configure
2235 * @id: the context id
2236 * @gts_enable: whether to enable GTS for the context
2237 * @base_addr: base address of queue
2238 * @size: number of queue entries
2239 * @bsize: size of each buffer for this queue
2240 * @cong_thres: threshold to signal congestion to upstream producers
2241 * @gen: initial generation value for the context
2242 * @cidx: consumer pointer
2244 * Initialize an SGE free list context and make it ready for use. The
2245 * caller is responsible for ensuring only one context operation occurs
2246 * at a time.
2248 int t3_sge_init_flcntxt(struct adapter *adapter, unsigned int id,
2249 int gts_enable, u64 base_addr, unsigned int size,
2250 unsigned int bsize, unsigned int cong_thres, int gen,
2251 unsigned int cidx)
2253 if (base_addr & 0xfff) /* must be 4K aligned */
2254 return -EINVAL;
2255 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2256 return -EBUSY;
2258 base_addr >>= 12;
2259 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, base_addr);
2260 base_addr >>= 32;
2261 t3_write_reg(adapter, A_SG_CONTEXT_DATA1,
2262 V_FL_BASE_HI((u32) base_addr) |
2263 V_FL_INDEX_LO(cidx & M_FL_INDEX_LO));
2264 t3_write_reg(adapter, A_SG_CONTEXT_DATA2, V_FL_SIZE(size) |
2265 V_FL_GEN(gen) | V_FL_INDEX_HI(cidx >> 12) |
2266 V_FL_ENTRY_SIZE_LO(bsize & M_FL_ENTRY_SIZE_LO));
2267 t3_write_reg(adapter, A_SG_CONTEXT_DATA3,
2268 V_FL_ENTRY_SIZE_HI(bsize >> (32 - S_FL_ENTRY_SIZE_LO)) |
2269 V_FL_CONG_THRES(cong_thres) | V_FL_GTS(gts_enable));
2270 return t3_sge_write_context(adapter, id, F_FREELIST);
2274 * t3_sge_init_rspcntxt - initialize an SGE response queue context
2275 * @adapter: the adapter to configure
2276 * @id: the context id
2277 * @irq_vec_idx: MSI-X interrupt vector index, 0 if no MSI-X, -1 if no IRQ
2278 * @base_addr: base address of queue
2279 * @size: number of queue entries
2280 * @fl_thres: threshold for selecting the normal or jumbo free list
2281 * @gen: initial generation value for the context
2282 * @cidx: consumer pointer
2284 * Initialize an SGE response queue context and make it ready for use.
2285 * The caller is responsible for ensuring only one context operation
2286 * occurs at a time.
2288 int t3_sge_init_rspcntxt(struct adapter *adapter, unsigned int id,
2289 int irq_vec_idx, u64 base_addr, unsigned int size,
2290 unsigned int fl_thres, int gen, unsigned int cidx)
2292 unsigned int intr = 0;
2294 if (base_addr & 0xfff) /* must be 4K aligned */
2295 return -EINVAL;
2296 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2297 return -EBUSY;
2299 base_addr >>= 12;
2300 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_CQ_SIZE(size) |
2301 V_CQ_INDEX(cidx));
2302 t3_write_reg(adapter, A_SG_CONTEXT_DATA1, base_addr);
2303 base_addr >>= 32;
2304 if (irq_vec_idx >= 0)
2305 intr = V_RQ_MSI_VEC(irq_vec_idx) | F_RQ_INTR_EN;
2306 t3_write_reg(adapter, A_SG_CONTEXT_DATA2,
2307 V_CQ_BASE_HI((u32) base_addr) | intr | V_RQ_GEN(gen));
2308 t3_write_reg(adapter, A_SG_CONTEXT_DATA3, fl_thres);
2309 return t3_sge_write_context(adapter, id, F_RESPONSEQ);
2313 * t3_sge_init_cqcntxt - initialize an SGE completion queue context
2314 * @adapter: the adapter to configure
2315 * @id: the context id
2316 * @base_addr: base address of queue
2317 * @size: number of queue entries
2318 * @rspq: response queue for async notifications
2319 * @ovfl_mode: CQ overflow mode
2320 * @credits: completion queue credits
2321 * @credit_thres: the credit threshold
2323 * Initialize an SGE completion queue context and make it ready for use.
2324 * The caller is responsible for ensuring only one context operation
2325 * occurs at a time.
2327 int t3_sge_init_cqcntxt(struct adapter *adapter, unsigned int id, u64 base_addr,
2328 unsigned int size, int rspq, int ovfl_mode,
2329 unsigned int credits, unsigned int credit_thres)
2331 if (base_addr & 0xfff) /* must be 4K aligned */
2332 return -EINVAL;
2333 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2334 return -EBUSY;
2336 base_addr >>= 12;
2337 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_CQ_SIZE(size));
2338 t3_write_reg(adapter, A_SG_CONTEXT_DATA1, base_addr);
2339 base_addr >>= 32;
2340 t3_write_reg(adapter, A_SG_CONTEXT_DATA2,
2341 V_CQ_BASE_HI((u32) base_addr) | V_CQ_RSPQ(rspq) |
2342 V_CQ_GEN(1) | V_CQ_OVERFLOW_MODE(ovfl_mode) |
2343 V_CQ_ERR(ovfl_mode));
2344 t3_write_reg(adapter, A_SG_CONTEXT_DATA3, V_CQ_CREDITS(credits) |
2345 V_CQ_CREDIT_THRES(credit_thres));
2346 return t3_sge_write_context(adapter, id, F_CQ);
2350 * t3_sge_enable_ecntxt - enable/disable an SGE egress context
2351 * @adapter: the adapter
2352 * @id: the egress context id
2353 * @enable: enable (1) or disable (0) the context
2355 * Enable or disable an SGE egress context. The caller is responsible for
2356 * ensuring only one context operation occurs at a time.
2358 int t3_sge_enable_ecntxt(struct adapter *adapter, unsigned int id, int enable)
2360 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2361 return -EBUSY;
2363 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0);
2364 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2365 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
2366 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, F_EC_VALID);
2367 t3_write_reg(adapter, A_SG_CONTEXT_DATA3, V_EC_VALID(enable));
2368 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2369 V_CONTEXT_CMD_OPCODE(1) | F_EGRESS | V_CONTEXT(id));
2370 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2371 0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2375 * t3_sge_disable_fl - disable an SGE free-buffer list
2376 * @adapter: the adapter
2377 * @id: the free list context id
2379 * Disable an SGE free-buffer list. The caller is responsible for
2380 * ensuring only one context operation occurs at a time.
2382 int t3_sge_disable_fl(struct adapter *adapter, unsigned int id)
2384 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2385 return -EBUSY;
2387 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0);
2388 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2389 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, V_FL_SIZE(M_FL_SIZE));
2390 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
2391 t3_write_reg(adapter, A_SG_CONTEXT_DATA2, 0);
2392 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2393 V_CONTEXT_CMD_OPCODE(1) | F_FREELIST | V_CONTEXT(id));
2394 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2395 0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2399 * t3_sge_disable_rspcntxt - disable an SGE response queue
2400 * @adapter: the adapter
2401 * @id: the response queue context id
2403 * Disable an SGE response queue. The caller is responsible for
2404 * ensuring only one context operation occurs at a time.
2406 int t3_sge_disable_rspcntxt(struct adapter *adapter, unsigned int id)
2408 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2409 return -EBUSY;
2411 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, V_CQ_SIZE(M_CQ_SIZE));
2412 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2413 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
2414 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
2415 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, 0);
2416 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2417 V_CONTEXT_CMD_OPCODE(1) | F_RESPONSEQ | V_CONTEXT(id));
2418 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2419 0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2423 * t3_sge_disable_cqcntxt - disable an SGE completion queue
2424 * @adapter: the adapter
2425 * @id: the completion queue context id
2427 * Disable an SGE completion queue. The caller is responsible for
2428 * ensuring only one context operation occurs at a time.
2430 int t3_sge_disable_cqcntxt(struct adapter *adapter, unsigned int id)
2432 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2433 return -EBUSY;
2435 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, V_CQ_SIZE(M_CQ_SIZE));
2436 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2437 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
2438 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
2439 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, 0);
2440 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2441 V_CONTEXT_CMD_OPCODE(1) | F_CQ | V_CONTEXT(id));
2442 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2443 0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2447 * t3_sge_cqcntxt_op - perform an operation on a completion queue context
2448 * @adapter: the adapter
2449 * @id: the context id
2450 * @op: the operation to perform
2452 * Perform the selected operation on an SGE completion queue context.
2453 * The caller is responsible for ensuring only one context operation
2454 * occurs at a time.
2456 int t3_sge_cqcntxt_op(struct adapter *adapter, unsigned int id, unsigned int op,
2457 unsigned int credits)
2459 u32 val;
2461 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2462 return -EBUSY;
2464 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, credits << 16);
2465 t3_write_reg(adapter, A_SG_CONTEXT_CMD, V_CONTEXT_CMD_OPCODE(op) |
2466 V_CONTEXT(id) | F_CQ);
2467 if (t3_wait_op_done_val(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2468 0, SG_CONTEXT_CMD_ATTEMPTS, 1, &val))
2469 return -EIO;
2471 if (op >= 2 && op < 7) {
2472 if (adapter->params.rev > 0)
2473 return G_CQ_INDEX(val);
2475 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2476 V_CONTEXT_CMD_OPCODE(0) | F_CQ | V_CONTEXT(id));
2477 if (t3_wait_op_done(adapter, A_SG_CONTEXT_CMD,
2478 F_CONTEXT_CMD_BUSY, 0,
2479 SG_CONTEXT_CMD_ATTEMPTS, 1))
2480 return -EIO;
2481 return G_CQ_INDEX(t3_read_reg(adapter, A_SG_CONTEXT_DATA0));
2483 return 0;
2487 * t3_sge_read_context - read an SGE context
2488 * @type: the context type
2489 * @adapter: the adapter
2490 * @id: the context id
2491 * @data: holds the retrieved context
2493 * Read an SGE egress context. The caller is responsible for ensuring
2494 * only one context operation occurs at a time.
2496 static int t3_sge_read_context(unsigned int type, struct adapter *adapter,
2497 unsigned int id, u32 data[4])
2499 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2500 return -EBUSY;
2502 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2503 V_CONTEXT_CMD_OPCODE(0) | type | V_CONTEXT(id));
2504 if (t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY, 0,
2505 SG_CONTEXT_CMD_ATTEMPTS, 1))
2506 return -EIO;
2507 data[0] = t3_read_reg(adapter, A_SG_CONTEXT_DATA0);
2508 data[1] = t3_read_reg(adapter, A_SG_CONTEXT_DATA1);
2509 data[2] = t3_read_reg(adapter, A_SG_CONTEXT_DATA2);
2510 data[3] = t3_read_reg(adapter, A_SG_CONTEXT_DATA3);
2511 return 0;
2515 * t3_sge_read_ecntxt - read an SGE egress context
2516 * @adapter: the adapter
2517 * @id: the context id
2518 * @data: holds the retrieved context
2520 * Read an SGE egress context. The caller is responsible for ensuring
2521 * only one context operation occurs at a time.
2523 int t3_sge_read_ecntxt(struct adapter *adapter, unsigned int id, u32 data[4])
2525 if (id >= 65536)
2526 return -EINVAL;
2527 return t3_sge_read_context(F_EGRESS, adapter, id, data);
2531 * t3_sge_read_cq - read an SGE CQ context
2532 * @adapter: the adapter
2533 * @id: the context id
2534 * @data: holds the retrieved context
2536 * Read an SGE CQ context. The caller is responsible for ensuring
2537 * only one context operation occurs at a time.
2539 int t3_sge_read_cq(struct adapter *adapter, unsigned int id, u32 data[4])
2541 if (id >= 65536)
2542 return -EINVAL;
2543 return t3_sge_read_context(F_CQ, adapter, id, data);
2547 * t3_sge_read_fl - read an SGE free-list context
2548 * @adapter: the adapter
2549 * @id: the context id
2550 * @data: holds the retrieved context
2552 * Read an SGE free-list context. The caller is responsible for ensuring
2553 * only one context operation occurs at a time.
2555 int t3_sge_read_fl(struct adapter *adapter, unsigned int id, u32 data[4])
2557 if (id >= SGE_QSETS * 2)
2558 return -EINVAL;
2559 return t3_sge_read_context(F_FREELIST, adapter, id, data);
2563 * t3_sge_read_rspq - read an SGE response queue context
2564 * @adapter: the adapter
2565 * @id: the context id
2566 * @data: holds the retrieved context
2568 * Read an SGE response queue context. The caller is responsible for
2569 * ensuring only one context operation occurs at a time.
2571 int t3_sge_read_rspq(struct adapter *adapter, unsigned int id, u32 data[4])
2573 if (id >= SGE_QSETS)
2574 return -EINVAL;
2575 return t3_sge_read_context(F_RESPONSEQ, adapter, id, data);
2579 * t3_config_rss - configure Rx packet steering
2580 * @adapter: the adapter
2581 * @rss_config: RSS settings (written to TP_RSS_CONFIG)
2582 * @cpus: values for the CPU lookup table (0xff terminated)
2583 * @rspq: values for the response queue lookup table (0xffff terminated)
2585 * Programs the receive packet steering logic. @cpus and @rspq provide
2586 * the values for the CPU and response queue lookup tables. If they
2587 * provide fewer values than the size of the tables the supplied values
2588 * are used repeatedly until the tables are fully populated.
2590 void t3_config_rss(struct adapter *adapter, unsigned int rss_config,
2591 const u8 * cpus, const u16 *rspq)
2593 int i, j, cpu_idx = 0, q_idx = 0;
2595 if (cpus)
2596 for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2597 u32 val = i << 16;
2599 for (j = 0; j < 2; ++j) {
2600 val |= (cpus[cpu_idx++] & 0x3f) << (8 * j);
2601 if (cpus[cpu_idx] == 0xff)
2602 cpu_idx = 0;
2604 t3_write_reg(adapter, A_TP_RSS_LKP_TABLE, val);
2607 if (rspq)
2608 for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2609 t3_write_reg(adapter, A_TP_RSS_MAP_TABLE,
2610 (i << 16) | rspq[q_idx++]);
2611 if (rspq[q_idx] == 0xffff)
2612 q_idx = 0;
2615 t3_write_reg(adapter, A_TP_RSS_CONFIG, rss_config);
2619 * t3_read_rss - read the contents of the RSS tables
2620 * @adapter: the adapter
2621 * @lkup: holds the contents of the RSS lookup table
2622 * @map: holds the contents of the RSS map table
2624 * Reads the contents of the receive packet steering tables.
2626 int t3_read_rss(struct adapter *adapter, u8 * lkup, u16 *map)
2628 int i;
2629 u32 val;
2631 if (lkup)
2632 for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2633 t3_write_reg(adapter, A_TP_RSS_LKP_TABLE,
2634 0xffff0000 | i);
2635 val = t3_read_reg(adapter, A_TP_RSS_LKP_TABLE);
2636 if (!(val & 0x80000000))
2637 return -EAGAIN;
2638 *lkup++ = val;
2639 *lkup++ = (val >> 8);
2642 if (map)
2643 for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2644 t3_write_reg(adapter, A_TP_RSS_MAP_TABLE,
2645 0xffff0000 | i);
2646 val = t3_read_reg(adapter, A_TP_RSS_MAP_TABLE);
2647 if (!(val & 0x80000000))
2648 return -EAGAIN;
2649 *map++ = val;
2651 return 0;
2655 * t3_tp_set_offload_mode - put TP in NIC/offload mode
2656 * @adap: the adapter
2657 * @enable: 1 to select offload mode, 0 for regular NIC
2659 * Switches TP to NIC/offload mode.
2661 void t3_tp_set_offload_mode(struct adapter *adap, int enable)
2663 if (is_offload(adap) || !enable)
2664 t3_set_reg_field(adap, A_TP_IN_CONFIG, F_NICMODE,
2665 V_NICMODE(!enable));
2669 * pm_num_pages - calculate the number of pages of the payload memory
2670 * @mem_size: the size of the payload memory
2671 * @pg_size: the size of each payload memory page
2673 * Calculate the number of pages, each of the given size, that fit in a
2674 * memory of the specified size, respecting the HW requirement that the
2675 * number of pages must be a multiple of 24.
2677 static inline unsigned int pm_num_pages(unsigned int mem_size,
2678 unsigned int pg_size)
2680 unsigned int n = mem_size / pg_size;
2682 return n - n % 24;
2685 #define mem_region(adap, start, size, reg) \
2686 t3_write_reg((adap), A_ ## reg, (start)); \
2687 start += size
2690 * partition_mem - partition memory and configure TP memory settings
2691 * @adap: the adapter
2692 * @p: the TP parameters
2694 * Partitions context and payload memory and configures TP's memory
2695 * registers.
2697 static void partition_mem(struct adapter *adap, const struct tp_params *p)
2699 unsigned int m, pstructs, tids = t3_mc5_size(&adap->mc5);
2700 unsigned int timers = 0, timers_shift = 22;
2702 if (adap->params.rev > 0) {
2703 if (tids <= 16 * 1024) {
2704 timers = 1;
2705 timers_shift = 16;
2706 } else if (tids <= 64 * 1024) {
2707 timers = 2;
2708 timers_shift = 18;
2709 } else if (tids <= 256 * 1024) {
2710 timers = 3;
2711 timers_shift = 20;
2715 t3_write_reg(adap, A_TP_PMM_SIZE,
2716 p->chan_rx_size | (p->chan_tx_size >> 16));
2718 t3_write_reg(adap, A_TP_PMM_TX_BASE, 0);
2719 t3_write_reg(adap, A_TP_PMM_TX_PAGE_SIZE, p->tx_pg_size);
2720 t3_write_reg(adap, A_TP_PMM_TX_MAX_PAGE, p->tx_num_pgs);
2721 t3_set_reg_field(adap, A_TP_PARA_REG3, V_TXDATAACKIDX(M_TXDATAACKIDX),
2722 V_TXDATAACKIDX(fls(p->tx_pg_size) - 12));
2724 t3_write_reg(adap, A_TP_PMM_RX_BASE, 0);
2725 t3_write_reg(adap, A_TP_PMM_RX_PAGE_SIZE, p->rx_pg_size);
2726 t3_write_reg(adap, A_TP_PMM_RX_MAX_PAGE, p->rx_num_pgs);
2728 pstructs = p->rx_num_pgs + p->tx_num_pgs;
2729 /* Add a bit of headroom and make multiple of 24 */
2730 pstructs += 48;
2731 pstructs -= pstructs % 24;
2732 t3_write_reg(adap, A_TP_CMM_MM_MAX_PSTRUCT, pstructs);
2734 m = tids * TCB_SIZE;
2735 mem_region(adap, m, (64 << 10) * 64, SG_EGR_CNTX_BADDR);
2736 mem_region(adap, m, (64 << 10) * 64, SG_CQ_CONTEXT_BADDR);
2737 t3_write_reg(adap, A_TP_CMM_TIMER_BASE, V_CMTIMERMAXNUM(timers) | m);
2738 m += ((p->ntimer_qs - 1) << timers_shift) + (1 << 22);
2739 mem_region(adap, m, pstructs * 64, TP_CMM_MM_BASE);
2740 mem_region(adap, m, 64 * (pstructs / 24), TP_CMM_MM_PS_FLST_BASE);
2741 mem_region(adap, m, 64 * (p->rx_num_pgs / 24), TP_CMM_MM_RX_FLST_BASE);
2742 mem_region(adap, m, 64 * (p->tx_num_pgs / 24), TP_CMM_MM_TX_FLST_BASE);
2744 m = (m + 4095) & ~0xfff;
2745 t3_write_reg(adap, A_CIM_SDRAM_BASE_ADDR, m);
2746 t3_write_reg(adap, A_CIM_SDRAM_ADDR_SIZE, p->cm_size - m);
2748 tids = (p->cm_size - m - (3 << 20)) / 3072 - 32;
2749 m = t3_mc5_size(&adap->mc5) - adap->params.mc5.nservers -
2750 adap->params.mc5.nfilters - adap->params.mc5.nroutes;
2751 if (tids < m)
2752 adap->params.mc5.nservers += m - tids;
2755 static inline void tp_wr_indirect(struct adapter *adap, unsigned int addr,
2756 u32 val)
2758 t3_write_reg(adap, A_TP_PIO_ADDR, addr);
2759 t3_write_reg(adap, A_TP_PIO_DATA, val);
2762 static void tp_config(struct adapter *adap, const struct tp_params *p)
2764 t3_write_reg(adap, A_TP_GLOBAL_CONFIG, F_TXPACINGENABLE | F_PATHMTU |
2765 F_IPCHECKSUMOFFLOAD | F_UDPCHECKSUMOFFLOAD |
2766 F_TCPCHECKSUMOFFLOAD | V_IPTTL(64));
2767 t3_write_reg(adap, A_TP_TCP_OPTIONS, V_MTUDEFAULT(576) |
2768 F_MTUENABLE | V_WINDOWSCALEMODE(1) |
2769 V_TIMESTAMPSMODE(1) | V_SACKMODE(1) | V_SACKRX(1));
2770 t3_write_reg(adap, A_TP_DACK_CONFIG, V_AUTOSTATE3(1) |
2771 V_AUTOSTATE2(1) | V_AUTOSTATE1(0) |
2772 V_BYTETHRESHOLD(26880) | V_MSSTHRESHOLD(2) |
2773 F_AUTOCAREFUL | F_AUTOENABLE | V_DACK_MODE(1));
2774 t3_set_reg_field(adap, A_TP_IN_CONFIG, F_RXFBARBPRIO | F_TXFBARBPRIO,
2775 F_IPV6ENABLE | F_NICMODE);
2776 t3_write_reg(adap, A_TP_TX_RESOURCE_LIMIT, 0x18141814);
2777 t3_write_reg(adap, A_TP_PARA_REG4, 0x5050105);
2778 t3_set_reg_field(adap, A_TP_PARA_REG6, 0,
2779 adap->params.rev > 0 ? F_ENABLEESND :
2780 F_T3A_ENABLEESND);
2782 t3_set_reg_field(adap, A_TP_PC_CONFIG,
2783 F_ENABLEEPCMDAFULL,
2784 F_ENABLEOCSPIFULL |F_TXDEFERENABLE | F_HEARBEATDACK |
2785 F_TXCONGESTIONMODE | F_RXCONGESTIONMODE);
2786 t3_set_reg_field(adap, A_TP_PC_CONFIG2, F_CHDRAFULL,
2787 F_ENABLEIPV6RSS | F_ENABLENONOFDTNLSYN |
2788 F_ENABLEARPMISS | F_DISBLEDAPARBIT0);
2789 t3_write_reg(adap, A_TP_PROXY_FLOW_CNTL, 1080);
2790 t3_write_reg(adap, A_TP_PROXY_FLOW_CNTL, 1000);
2792 if (adap->params.rev > 0) {
2793 tp_wr_indirect(adap, A_TP_EGRESS_CONFIG, F_REWRITEFORCETOSIZE);
2794 t3_set_reg_field(adap, A_TP_PARA_REG3, F_TXPACEAUTO,
2795 F_TXPACEAUTO);
2796 t3_set_reg_field(adap, A_TP_PC_CONFIG, F_LOCKTID, F_LOCKTID);
2797 t3_set_reg_field(adap, A_TP_PARA_REG3, 0, F_TXPACEAUTOSTRICT);
2798 } else
2799 t3_set_reg_field(adap, A_TP_PARA_REG3, 0, F_TXPACEFIXED);
2801 if (adap->params.rev == T3_REV_C)
2802 t3_set_reg_field(adap, A_TP_PC_CONFIG,
2803 V_TABLELATENCYDELTA(M_TABLELATENCYDELTA),
2804 V_TABLELATENCYDELTA(4));
2806 t3_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT1, 0);
2807 t3_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT0, 0);
2808 t3_write_reg(adap, A_TP_MOD_CHANNEL_WEIGHT, 0);
2809 t3_write_reg(adap, A_TP_MOD_RATE_LIMIT, 0xf2200000);
2812 /* Desired TP timer resolution in usec */
2813 #define TP_TMR_RES 50
2815 /* TCP timer values in ms */
2816 #define TP_DACK_TIMER 50
2817 #define TP_RTO_MIN 250
2820 * tp_set_timers - set TP timing parameters
2821 * @adap: the adapter to set
2822 * @core_clk: the core clock frequency in Hz
2824 * Set TP's timing parameters, such as the various timer resolutions and
2825 * the TCP timer values.
2827 static void tp_set_timers(struct adapter *adap, unsigned int core_clk)
2829 unsigned int tre = fls(core_clk / (1000000 / TP_TMR_RES)) - 1;
2830 unsigned int dack_re = fls(core_clk / 5000) - 1; /* 200us */
2831 unsigned int tstamp_re = fls(core_clk / 1000); /* 1ms, at least */
2832 unsigned int tps = core_clk >> tre;
2834 t3_write_reg(adap, A_TP_TIMER_RESOLUTION, V_TIMERRESOLUTION(tre) |
2835 V_DELAYEDACKRESOLUTION(dack_re) |
2836 V_TIMESTAMPRESOLUTION(tstamp_re));
2837 t3_write_reg(adap, A_TP_DACK_TIMER,
2838 (core_clk >> dack_re) / (1000 / TP_DACK_TIMER));
2839 t3_write_reg(adap, A_TP_TCP_BACKOFF_REG0, 0x3020100);
2840 t3_write_reg(adap, A_TP_TCP_BACKOFF_REG1, 0x7060504);
2841 t3_write_reg(adap, A_TP_TCP_BACKOFF_REG2, 0xb0a0908);
2842 t3_write_reg(adap, A_TP_TCP_BACKOFF_REG3, 0xf0e0d0c);
2843 t3_write_reg(adap, A_TP_SHIFT_CNT, V_SYNSHIFTMAX(6) |
2844 V_RXTSHIFTMAXR1(4) | V_RXTSHIFTMAXR2(15) |
2845 V_PERSHIFTBACKOFFMAX(8) | V_PERSHIFTMAX(8) |
2846 V_KEEPALIVEMAX(9));
2848 #define SECONDS * tps
2850 t3_write_reg(adap, A_TP_MSL, adap->params.rev > 0 ? 0 : 2 SECONDS);
2851 t3_write_reg(adap, A_TP_RXT_MIN, tps / (1000 / TP_RTO_MIN));
2852 t3_write_reg(adap, A_TP_RXT_MAX, 64 SECONDS);
2853 t3_write_reg(adap, A_TP_PERS_MIN, 5 SECONDS);
2854 t3_write_reg(adap, A_TP_PERS_MAX, 64 SECONDS);
2855 t3_write_reg(adap, A_TP_KEEP_IDLE, 7200 SECONDS);
2856 t3_write_reg(adap, A_TP_KEEP_INTVL, 75 SECONDS);
2857 t3_write_reg(adap, A_TP_INIT_SRTT, 3 SECONDS);
2858 t3_write_reg(adap, A_TP_FINWAIT2_TIMER, 600 SECONDS);
2860 #undef SECONDS
2864 * t3_tp_set_coalescing_size - set receive coalescing size
2865 * @adap: the adapter
2866 * @size: the receive coalescing size
2867 * @psh: whether a set PSH bit should deliver coalesced data
2869 * Set the receive coalescing size and PSH bit handling.
2871 int t3_tp_set_coalescing_size(struct adapter *adap, unsigned int size, int psh)
2873 u32 val;
2875 if (size > MAX_RX_COALESCING_LEN)
2876 return -EINVAL;
2878 val = t3_read_reg(adap, A_TP_PARA_REG3);
2879 val &= ~(F_RXCOALESCEENABLE | F_RXCOALESCEPSHEN);
2881 if (size) {
2882 val |= F_RXCOALESCEENABLE;
2883 if (psh)
2884 val |= F_RXCOALESCEPSHEN;
2885 size = min(MAX_RX_COALESCING_LEN, size);
2886 t3_write_reg(adap, A_TP_PARA_REG2, V_RXCOALESCESIZE(size) |
2887 V_MAXRXDATA(MAX_RX_COALESCING_LEN));
2889 t3_write_reg(adap, A_TP_PARA_REG3, val);
2890 return 0;
2894 * t3_tp_set_max_rxsize - set the max receive size
2895 * @adap: the adapter
2896 * @size: the max receive size
2898 * Set TP's max receive size. This is the limit that applies when
2899 * receive coalescing is disabled.
2901 void t3_tp_set_max_rxsize(struct adapter *adap, unsigned int size)
2903 t3_write_reg(adap, A_TP_PARA_REG7,
2904 V_PMMAXXFERLEN0(size) | V_PMMAXXFERLEN1(size));
2907 static void init_mtus(unsigned short mtus[])
2910 * See draft-mathis-plpmtud-00.txt for the values. The min is 88 so
2911 * it can accomodate max size TCP/IP headers when SACK and timestamps
2912 * are enabled and still have at least 8 bytes of payload.
2914 mtus[0] = 88;
2915 mtus[1] = 88;
2916 mtus[2] = 256;
2917 mtus[3] = 512;
2918 mtus[4] = 576;
2919 mtus[5] = 1024;
2920 mtus[6] = 1280;
2921 mtus[7] = 1492;
2922 mtus[8] = 1500;
2923 mtus[9] = 2002;
2924 mtus[10] = 2048;
2925 mtus[11] = 4096;
2926 mtus[12] = 4352;
2927 mtus[13] = 8192;
2928 mtus[14] = 9000;
2929 mtus[15] = 9600;
2933 * Initial congestion control parameters.
2935 static void init_cong_ctrl(unsigned short *a, unsigned short *b)
2937 a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1;
2938 a[9] = 2;
2939 a[10] = 3;
2940 a[11] = 4;
2941 a[12] = 5;
2942 a[13] = 6;
2943 a[14] = 7;
2944 a[15] = 8;
2945 a[16] = 9;
2946 a[17] = 10;
2947 a[18] = 14;
2948 a[19] = 17;
2949 a[20] = 21;
2950 a[21] = 25;
2951 a[22] = 30;
2952 a[23] = 35;
2953 a[24] = 45;
2954 a[25] = 60;
2955 a[26] = 80;
2956 a[27] = 100;
2957 a[28] = 200;
2958 a[29] = 300;
2959 a[30] = 400;
2960 a[31] = 500;
2962 b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0;
2963 b[9] = b[10] = 1;
2964 b[11] = b[12] = 2;
2965 b[13] = b[14] = b[15] = b[16] = 3;
2966 b[17] = b[18] = b[19] = b[20] = b[21] = 4;
2967 b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5;
2968 b[28] = b[29] = 6;
2969 b[30] = b[31] = 7;
2972 /* The minimum additive increment value for the congestion control table */
2973 #define CC_MIN_INCR 2U
2976 * t3_load_mtus - write the MTU and congestion control HW tables
2977 * @adap: the adapter
2978 * @mtus: the unrestricted values for the MTU table
2979 * @alphs: the values for the congestion control alpha parameter
2980 * @beta: the values for the congestion control beta parameter
2981 * @mtu_cap: the maximum permitted effective MTU
2983 * Write the MTU table with the supplied MTUs capping each at &mtu_cap.
2984 * Update the high-speed congestion control table with the supplied alpha,
2985 * beta, and MTUs.
2987 void t3_load_mtus(struct adapter *adap, unsigned short mtus[NMTUS],
2988 unsigned short alpha[NCCTRL_WIN],
2989 unsigned short beta[NCCTRL_WIN], unsigned short mtu_cap)
2991 static const unsigned int avg_pkts[NCCTRL_WIN] = {
2992 2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640,
2993 896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480,
2994 28672, 40960, 57344, 81920, 114688, 163840, 229376
2997 unsigned int i, w;
2999 for (i = 0; i < NMTUS; ++i) {
3000 unsigned int mtu = min(mtus[i], mtu_cap);
3001 unsigned int log2 = fls(mtu);
3003 if (!(mtu & ((1 << log2) >> 2))) /* round */
3004 log2--;
3005 t3_write_reg(adap, A_TP_MTU_TABLE,
3006 (i << 24) | (log2 << 16) | mtu);
3008 for (w = 0; w < NCCTRL_WIN; ++w) {
3009 unsigned int inc;
3011 inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w],
3012 CC_MIN_INCR);
3014 t3_write_reg(adap, A_TP_CCTRL_TABLE, (i << 21) |
3015 (w << 16) | (beta[w] << 13) | inc);
3021 * t3_read_hw_mtus - returns the values in the HW MTU table
3022 * @adap: the adapter
3023 * @mtus: where to store the HW MTU values
3025 * Reads the HW MTU table.
3027 void t3_read_hw_mtus(struct adapter *adap, unsigned short mtus[NMTUS])
3029 int i;
3031 for (i = 0; i < NMTUS; ++i) {
3032 unsigned int val;
3034 t3_write_reg(adap, A_TP_MTU_TABLE, 0xff000000 | i);
3035 val = t3_read_reg(adap, A_TP_MTU_TABLE);
3036 mtus[i] = val & 0x3fff;
3041 * t3_get_cong_cntl_tab - reads the congestion control table
3042 * @adap: the adapter
3043 * @incr: where to store the alpha values
3045 * Reads the additive increments programmed into the HW congestion
3046 * control table.
3048 void t3_get_cong_cntl_tab(struct adapter *adap,
3049 unsigned short incr[NMTUS][NCCTRL_WIN])
3051 unsigned int mtu, w;
3053 for (mtu = 0; mtu < NMTUS; ++mtu)
3054 for (w = 0; w < NCCTRL_WIN; ++w) {
3055 t3_write_reg(adap, A_TP_CCTRL_TABLE,
3056 0xffff0000 | (mtu << 5) | w);
3057 incr[mtu][w] = t3_read_reg(adap, A_TP_CCTRL_TABLE) &
3058 0x1fff;
3063 * t3_tp_get_mib_stats - read TP's MIB counters
3064 * @adap: the adapter
3065 * @tps: holds the returned counter values
3067 * Returns the values of TP's MIB counters.
3069 void t3_tp_get_mib_stats(struct adapter *adap, struct tp_mib_stats *tps)
3071 t3_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_RDATA, (u32 *) tps,
3072 sizeof(*tps) / sizeof(u32), 0);
3075 #define ulp_region(adap, name, start, len) \
3076 t3_write_reg((adap), A_ULPRX_ ## name ## _LLIMIT, (start)); \
3077 t3_write_reg((adap), A_ULPRX_ ## name ## _ULIMIT, \
3078 (start) + (len) - 1); \
3079 start += len
3081 #define ulptx_region(adap, name, start, len) \
3082 t3_write_reg((adap), A_ULPTX_ ## name ## _LLIMIT, (start)); \
3083 t3_write_reg((adap), A_ULPTX_ ## name ## _ULIMIT, \
3084 (start) + (len) - 1)
3086 static void ulp_config(struct adapter *adap, const struct tp_params *p)
3088 unsigned int m = p->chan_rx_size;
3090 ulp_region(adap, ISCSI, m, p->chan_rx_size / 8);
3091 ulp_region(adap, TDDP, m, p->chan_rx_size / 8);
3092 ulptx_region(adap, TPT, m, p->chan_rx_size / 4);
3093 ulp_region(adap, STAG, m, p->chan_rx_size / 4);
3094 ulp_region(adap, RQ, m, p->chan_rx_size / 4);
3095 ulptx_region(adap, PBL, m, p->chan_rx_size / 4);
3096 ulp_region(adap, PBL, m, p->chan_rx_size / 4);
3097 t3_write_reg(adap, A_ULPRX_TDDP_TAGMASK, 0xffffffff);
3101 * t3_set_proto_sram - set the contents of the protocol sram
3102 * @adapter: the adapter
3103 * @data: the protocol image
3105 * Write the contents of the protocol SRAM.
3107 int t3_set_proto_sram(struct adapter *adap, const u8 *data)
3109 int i;
3110 const __be32 *buf = (const __be32 *)data;
3112 for (i = 0; i < PROTO_SRAM_LINES; i++) {
3113 t3_write_reg(adap, A_TP_EMBED_OP_FIELD5, be32_to_cpu(*buf++));
3114 t3_write_reg(adap, A_TP_EMBED_OP_FIELD4, be32_to_cpu(*buf++));
3115 t3_write_reg(adap, A_TP_EMBED_OP_FIELD3, be32_to_cpu(*buf++));
3116 t3_write_reg(adap, A_TP_EMBED_OP_FIELD2, be32_to_cpu(*buf++));
3117 t3_write_reg(adap, A_TP_EMBED_OP_FIELD1, be32_to_cpu(*buf++));
3119 t3_write_reg(adap, A_TP_EMBED_OP_FIELD0, i << 1 | 1 << 31);
3120 if (t3_wait_op_done(adap, A_TP_EMBED_OP_FIELD0, 1, 1, 5, 1))
3121 return -EIO;
3123 t3_write_reg(adap, A_TP_EMBED_OP_FIELD0, 0);
3125 return 0;
3128 void t3_config_trace_filter(struct adapter *adapter,
3129 const struct trace_params *tp, int filter_index,
3130 int invert, int enable)
3132 u32 addr, key[4], mask[4];
3134 key[0] = tp->sport | (tp->sip << 16);
3135 key[1] = (tp->sip >> 16) | (tp->dport << 16);
3136 key[2] = tp->dip;
3137 key[3] = tp->proto | (tp->vlan << 8) | (tp->intf << 20);
3139 mask[0] = tp->sport_mask | (tp->sip_mask << 16);
3140 mask[1] = (tp->sip_mask >> 16) | (tp->dport_mask << 16);
3141 mask[2] = tp->dip_mask;
3142 mask[3] = tp->proto_mask | (tp->vlan_mask << 8) | (tp->intf_mask << 20);
3144 if (invert)
3145 key[3] |= (1 << 29);
3146 if (enable)
3147 key[3] |= (1 << 28);
3149 addr = filter_index ? A_TP_RX_TRC_KEY0 : A_TP_TX_TRC_KEY0;
3150 tp_wr_indirect(adapter, addr++, key[0]);
3151 tp_wr_indirect(adapter, addr++, mask[0]);
3152 tp_wr_indirect(adapter, addr++, key[1]);
3153 tp_wr_indirect(adapter, addr++, mask[1]);
3154 tp_wr_indirect(adapter, addr++, key[2]);
3155 tp_wr_indirect(adapter, addr++, mask[2]);
3156 tp_wr_indirect(adapter, addr++, key[3]);
3157 tp_wr_indirect(adapter, addr, mask[3]);
3158 t3_read_reg(adapter, A_TP_PIO_DATA);
3162 * t3_config_sched - configure a HW traffic scheduler
3163 * @adap: the adapter
3164 * @kbps: target rate in Kbps
3165 * @sched: the scheduler index
3167 * Configure a HW scheduler for the target rate
3169 int t3_config_sched(struct adapter *adap, unsigned int kbps, int sched)
3171 unsigned int v, tps, cpt, bpt, delta, mindelta = ~0;
3172 unsigned int clk = adap->params.vpd.cclk * 1000;
3173 unsigned int selected_cpt = 0, selected_bpt = 0;
3175 if (kbps > 0) {
3176 kbps *= 125; /* -> bytes */
3177 for (cpt = 1; cpt <= 255; cpt++) {
3178 tps = clk / cpt;
3179 bpt = (kbps + tps / 2) / tps;
3180 if (bpt > 0 && bpt <= 255) {
3181 v = bpt * tps;
3182 delta = v >= kbps ? v - kbps : kbps - v;
3183 if (delta <= mindelta) {
3184 mindelta = delta;
3185 selected_cpt = cpt;
3186 selected_bpt = bpt;
3188 } else if (selected_cpt)
3189 break;
3191 if (!selected_cpt)
3192 return -EINVAL;
3194 t3_write_reg(adap, A_TP_TM_PIO_ADDR,
3195 A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2);
3196 v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
3197 if (sched & 1)
3198 v = (v & 0xffff) | (selected_cpt << 16) | (selected_bpt << 24);
3199 else
3200 v = (v & 0xffff0000) | selected_cpt | (selected_bpt << 8);
3201 t3_write_reg(adap, A_TP_TM_PIO_DATA, v);
3202 return 0;
3205 static int tp_init(struct adapter *adap, const struct tp_params *p)
3207 int busy = 0;
3209 tp_config(adap, p);
3210 t3_set_vlan_accel(adap, 3, 0);
3212 if (is_offload(adap)) {
3213 tp_set_timers(adap, adap->params.vpd.cclk * 1000);
3214 t3_write_reg(adap, A_TP_RESET, F_FLSTINITENABLE);
3215 busy = t3_wait_op_done(adap, A_TP_RESET, F_FLSTINITENABLE,
3216 0, 1000, 5);
3217 if (busy)
3218 CH_ERR(adap, "TP initialization timed out\n");
3221 if (!busy)
3222 t3_write_reg(adap, A_TP_RESET, F_TPRESET);
3223 return busy;
3226 int t3_mps_set_active_ports(struct adapter *adap, unsigned int port_mask)
3228 if (port_mask & ~((1 << adap->params.nports) - 1))
3229 return -EINVAL;
3230 t3_set_reg_field(adap, A_MPS_CFG, F_PORT1ACTIVE | F_PORT0ACTIVE,
3231 port_mask << S_PORT0ACTIVE);
3232 return 0;
3236 * Perform the bits of HW initialization that are dependent on the Tx
3237 * channels being used.
3239 static void chan_init_hw(struct adapter *adap, unsigned int chan_map)
3241 int i;
3243 if (chan_map != 3) { /* one channel */
3244 t3_set_reg_field(adap, A_ULPRX_CTL, F_ROUND_ROBIN, 0);
3245 t3_set_reg_field(adap, A_ULPTX_CONFIG, F_CFG_RR_ARB, 0);
3246 t3_write_reg(adap, A_MPS_CFG, F_TPRXPORTEN | F_ENFORCEPKT |
3247 (chan_map == 1 ? F_TPTXPORT0EN | F_PORT0ACTIVE :
3248 F_TPTXPORT1EN | F_PORT1ACTIVE));
3249 t3_write_reg(adap, A_PM1_TX_CFG,
3250 chan_map == 1 ? 0xffffffff : 0);
3251 } else { /* two channels */
3252 t3_set_reg_field(adap, A_ULPRX_CTL, 0, F_ROUND_ROBIN);
3253 t3_set_reg_field(adap, A_ULPTX_CONFIG, 0, F_CFG_RR_ARB);
3254 t3_write_reg(adap, A_ULPTX_DMA_WEIGHT,
3255 V_D1_WEIGHT(16) | V_D0_WEIGHT(16));
3256 t3_write_reg(adap, A_MPS_CFG, F_TPTXPORT0EN | F_TPTXPORT1EN |
3257 F_TPRXPORTEN | F_PORT0ACTIVE | F_PORT1ACTIVE |
3258 F_ENFORCEPKT);
3259 t3_write_reg(adap, A_PM1_TX_CFG, 0x80008000);
3260 t3_set_reg_field(adap, A_TP_PC_CONFIG, 0, F_TXTOSQUEUEMAPMODE);
3261 t3_write_reg(adap, A_TP_TX_MOD_QUEUE_REQ_MAP,
3262 V_TX_MOD_QUEUE_REQ_MAP(0xaa));
3263 for (i = 0; i < 16; i++)
3264 t3_write_reg(adap, A_TP_TX_MOD_QUE_TABLE,
3265 (i << 16) | 0x1010);
3269 static int calibrate_xgm(struct adapter *adapter)
3271 if (uses_xaui(adapter)) {
3272 unsigned int v, i;
3274 for (i = 0; i < 5; ++i) {
3275 t3_write_reg(adapter, A_XGM_XAUI_IMP, 0);
3276 t3_read_reg(adapter, A_XGM_XAUI_IMP);
3277 msleep(1);
3278 v = t3_read_reg(adapter, A_XGM_XAUI_IMP);
3279 if (!(v & (F_XGM_CALFAULT | F_CALBUSY))) {
3280 t3_write_reg(adapter, A_XGM_XAUI_IMP,
3281 V_XAUIIMP(G_CALIMP(v) >> 2));
3282 return 0;
3285 CH_ERR(adapter, "MAC calibration failed\n");
3286 return -1;
3287 } else {
3288 t3_write_reg(adapter, A_XGM_RGMII_IMP,
3289 V_RGMIIIMPPD(2) | V_RGMIIIMPPU(3));
3290 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_XGM_IMPSETUPDATE,
3291 F_XGM_IMPSETUPDATE);
3293 return 0;
3296 static void calibrate_xgm_t3b(struct adapter *adapter)
3298 if (!uses_xaui(adapter)) {
3299 t3_write_reg(adapter, A_XGM_RGMII_IMP, F_CALRESET |
3300 F_CALUPDATE | V_RGMIIIMPPD(2) | V_RGMIIIMPPU(3));
3301 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_CALRESET, 0);
3302 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, 0,
3303 F_XGM_IMPSETUPDATE);
3304 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_XGM_IMPSETUPDATE,
3306 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_CALUPDATE, 0);
3307 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, 0, F_CALUPDATE);
3311 struct mc7_timing_params {
3312 unsigned char ActToPreDly;
3313 unsigned char ActToRdWrDly;
3314 unsigned char PreCyc;
3315 unsigned char RefCyc[5];
3316 unsigned char BkCyc;
3317 unsigned char WrToRdDly;
3318 unsigned char RdToWrDly;
3322 * Write a value to a register and check that the write completed. These
3323 * writes normally complete in a cycle or two, so one read should suffice.
3324 * The very first read exists to flush the posted write to the device.
3326 static int wrreg_wait(struct adapter *adapter, unsigned int addr, u32 val)
3328 t3_write_reg(adapter, addr, val);
3329 t3_read_reg(adapter, addr); /* flush */
3330 if (!(t3_read_reg(adapter, addr) & F_BUSY))
3331 return 0;
3332 CH_ERR(adapter, "write to MC7 register 0x%x timed out\n", addr);
3333 return -EIO;
3336 static int mc7_init(struct mc7 *mc7, unsigned int mc7_clock, int mem_type)
3338 static const unsigned int mc7_mode[] = {
3339 0x632, 0x642, 0x652, 0x432, 0x442
3341 static const struct mc7_timing_params mc7_timings[] = {
3342 {12, 3, 4, {20, 28, 34, 52, 0}, 15, 6, 4},
3343 {12, 4, 5, {20, 28, 34, 52, 0}, 16, 7, 4},
3344 {12, 5, 6, {20, 28, 34, 52, 0}, 17, 8, 4},
3345 {9, 3, 4, {15, 21, 26, 39, 0}, 12, 6, 4},
3346 {9, 4, 5, {15, 21, 26, 39, 0}, 13, 7, 4}
3349 u32 val;
3350 unsigned int width, density, slow, attempts;
3351 struct adapter *adapter = mc7->adapter;
3352 const struct mc7_timing_params *p = &mc7_timings[mem_type];
3354 if (!mc7->size)
3355 return 0;
3357 val = t3_read_reg(adapter, mc7->offset + A_MC7_CFG);
3358 slow = val & F_SLOW;
3359 width = G_WIDTH(val);
3360 density = G_DEN(val);
3362 t3_write_reg(adapter, mc7->offset + A_MC7_CFG, val | F_IFEN);
3363 val = t3_read_reg(adapter, mc7->offset + A_MC7_CFG); /* flush */
3364 msleep(1);
3366 if (!slow) {
3367 t3_write_reg(adapter, mc7->offset + A_MC7_CAL, F_SGL_CAL_EN);
3368 t3_read_reg(adapter, mc7->offset + A_MC7_CAL);
3369 msleep(1);
3370 if (t3_read_reg(adapter, mc7->offset + A_MC7_CAL) &
3371 (F_BUSY | F_SGL_CAL_EN | F_CAL_FAULT)) {
3372 CH_ERR(adapter, "%s MC7 calibration timed out\n",
3373 mc7->name);
3374 goto out_fail;
3378 t3_write_reg(adapter, mc7->offset + A_MC7_PARM,
3379 V_ACTTOPREDLY(p->ActToPreDly) |
3380 V_ACTTORDWRDLY(p->ActToRdWrDly) | V_PRECYC(p->PreCyc) |
3381 V_REFCYC(p->RefCyc[density]) | V_BKCYC(p->BkCyc) |
3382 V_WRTORDDLY(p->WrToRdDly) | V_RDTOWRDLY(p->RdToWrDly));
3384 t3_write_reg(adapter, mc7->offset + A_MC7_CFG,
3385 val | F_CLKEN | F_TERM150);
3386 t3_read_reg(adapter, mc7->offset + A_MC7_CFG); /* flush */
3388 if (!slow)
3389 t3_set_reg_field(adapter, mc7->offset + A_MC7_DLL, F_DLLENB,
3390 F_DLLENB);
3391 udelay(1);
3393 val = slow ? 3 : 6;
3394 if (wrreg_wait(adapter, mc7->offset + A_MC7_PRE, 0) ||
3395 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE2, 0) ||
3396 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE3, 0) ||
3397 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val))
3398 goto out_fail;
3400 if (!slow) {
3401 t3_write_reg(adapter, mc7->offset + A_MC7_MODE, 0x100);
3402 t3_set_reg_field(adapter, mc7->offset + A_MC7_DLL, F_DLLRST, 0);
3403 udelay(5);
3406 if (wrreg_wait(adapter, mc7->offset + A_MC7_PRE, 0) ||
3407 wrreg_wait(adapter, mc7->offset + A_MC7_REF, 0) ||
3408 wrreg_wait(adapter, mc7->offset + A_MC7_REF, 0) ||
3409 wrreg_wait(adapter, mc7->offset + A_MC7_MODE,
3410 mc7_mode[mem_type]) ||
3411 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val | 0x380) ||
3412 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val))
3413 goto out_fail;
3415 /* clock value is in KHz */
3416 mc7_clock = mc7_clock * 7812 + mc7_clock / 2; /* ns */
3417 mc7_clock /= 1000000; /* KHz->MHz, ns->us */
3419 t3_write_reg(adapter, mc7->offset + A_MC7_REF,
3420 F_PERREFEN | V_PREREFDIV(mc7_clock));
3421 t3_read_reg(adapter, mc7->offset + A_MC7_REF); /* flush */
3423 t3_write_reg(adapter, mc7->offset + A_MC7_ECC, F_ECCGENEN | F_ECCCHKEN);
3424 t3_write_reg(adapter, mc7->offset + A_MC7_BIST_DATA, 0);
3425 t3_write_reg(adapter, mc7->offset + A_MC7_BIST_ADDR_BEG, 0);
3426 t3_write_reg(adapter, mc7->offset + A_MC7_BIST_ADDR_END,
3427 (mc7->size << width) - 1);
3428 t3_write_reg(adapter, mc7->offset + A_MC7_BIST_OP, V_OP(1));
3429 t3_read_reg(adapter, mc7->offset + A_MC7_BIST_OP); /* flush */
3431 attempts = 50;
3432 do {
3433 msleep(250);
3434 val = t3_read_reg(adapter, mc7->offset + A_MC7_BIST_OP);
3435 } while ((val & F_BUSY) && --attempts);
3436 if (val & F_BUSY) {
3437 CH_ERR(adapter, "%s MC7 BIST timed out\n", mc7->name);
3438 goto out_fail;
3441 /* Enable normal memory accesses. */
3442 t3_set_reg_field(adapter, mc7->offset + A_MC7_CFG, 0, F_RDY);
3443 return 0;
3445 out_fail:
3446 return -1;
3449 static void config_pcie(struct adapter *adap)
3451 static const u16 ack_lat[4][6] = {
3452 {237, 416, 559, 1071, 2095, 4143},
3453 {128, 217, 289, 545, 1057, 2081},
3454 {73, 118, 154, 282, 538, 1050},
3455 {67, 107, 86, 150, 278, 534}
3457 static const u16 rpl_tmr[4][6] = {
3458 {711, 1248, 1677, 3213, 6285, 12429},
3459 {384, 651, 867, 1635, 3171, 6243},
3460 {219, 354, 462, 846, 1614, 3150},
3461 {201, 321, 258, 450, 834, 1602}
3464 u16 val, devid;
3465 unsigned int log2_width, pldsize;
3466 unsigned int fst_trn_rx, fst_trn_tx, acklat, rpllmt;
3468 pci_read_config_word(adap->pdev,
3469 adap->params.pci.pcie_cap_addr + PCI_EXP_DEVCTL,
3470 &val);
3471 pldsize = (val & PCI_EXP_DEVCTL_PAYLOAD) >> 5;
3473 pci_read_config_word(adap->pdev, 0x2, &devid);
3474 if (devid == 0x37) {
3475 pci_write_config_word(adap->pdev,
3476 adap->params.pci.pcie_cap_addr +
3477 PCI_EXP_DEVCTL,
3478 val & ~PCI_EXP_DEVCTL_READRQ &
3479 ~PCI_EXP_DEVCTL_PAYLOAD);
3480 pldsize = 0;
3483 pci_read_config_word(adap->pdev,
3484 adap->params.pci.pcie_cap_addr + PCI_EXP_LNKCTL,
3485 &val);
3487 fst_trn_tx = G_NUMFSTTRNSEQ(t3_read_reg(adap, A_PCIE_PEX_CTRL0));
3488 fst_trn_rx = adap->params.rev == 0 ? fst_trn_tx :
3489 G_NUMFSTTRNSEQRX(t3_read_reg(adap, A_PCIE_MODE));
3490 log2_width = fls(adap->params.pci.width) - 1;
3491 acklat = ack_lat[log2_width][pldsize];
3492 if (val & 1) /* check LOsEnable */
3493 acklat += fst_trn_tx * 4;
3494 rpllmt = rpl_tmr[log2_width][pldsize] + fst_trn_rx * 4;
3496 if (adap->params.rev == 0)
3497 t3_set_reg_field(adap, A_PCIE_PEX_CTRL1,
3498 V_T3A_ACKLAT(M_T3A_ACKLAT),
3499 V_T3A_ACKLAT(acklat));
3500 else
3501 t3_set_reg_field(adap, A_PCIE_PEX_CTRL1, V_ACKLAT(M_ACKLAT),
3502 V_ACKLAT(acklat));
3504 t3_set_reg_field(adap, A_PCIE_PEX_CTRL0, V_REPLAYLMT(M_REPLAYLMT),
3505 V_REPLAYLMT(rpllmt));
3507 t3_write_reg(adap, A_PCIE_PEX_ERR, 0xffffffff);
3508 t3_set_reg_field(adap, A_PCIE_CFG, 0,
3509 F_ENABLELINKDWNDRST | F_ENABLELINKDOWNRST |
3510 F_PCIE_DMASTOPEN | F_PCIE_CLIDECEN);
3514 * Initialize and configure T3 HW modules. This performs the
3515 * initialization steps that need to be done once after a card is reset.
3516 * MAC and PHY initialization is handled separarely whenever a port is enabled.
3518 * fw_params are passed to FW and their value is platform dependent. Only the
3519 * top 8 bits are available for use, the rest must be 0.
3521 int t3_init_hw(struct adapter *adapter, u32 fw_params)
3523 int err = -EIO, attempts, i;
3524 const struct vpd_params *vpd = &adapter->params.vpd;
3526 if (adapter->params.rev > 0)
3527 calibrate_xgm_t3b(adapter);
3528 else if (calibrate_xgm(adapter))
3529 goto out_err;
3531 if (vpd->mclk) {
3532 partition_mem(adapter, &adapter->params.tp);
3534 if (mc7_init(&adapter->pmrx, vpd->mclk, vpd->mem_timing) ||
3535 mc7_init(&adapter->pmtx, vpd->mclk, vpd->mem_timing) ||
3536 mc7_init(&adapter->cm, vpd->mclk, vpd->mem_timing) ||
3537 t3_mc5_init(&adapter->mc5, adapter->params.mc5.nservers,
3538 adapter->params.mc5.nfilters,
3539 adapter->params.mc5.nroutes))
3540 goto out_err;
3542 for (i = 0; i < 32; i++)
3543 if (clear_sge_ctxt(adapter, i, F_CQ))
3544 goto out_err;
3547 if (tp_init(adapter, &adapter->params.tp))
3548 goto out_err;
3550 t3_tp_set_coalescing_size(adapter,
3551 min(adapter->params.sge.max_pkt_size,
3552 MAX_RX_COALESCING_LEN), 1);
3553 t3_tp_set_max_rxsize(adapter,
3554 min(adapter->params.sge.max_pkt_size, 16384U));
3555 ulp_config(adapter, &adapter->params.tp);
3557 if (is_pcie(adapter))
3558 config_pcie(adapter);
3559 else
3560 t3_set_reg_field(adapter, A_PCIX_CFG, 0,
3561 F_DMASTOPEN | F_CLIDECEN);
3563 if (adapter->params.rev == T3_REV_C)
3564 t3_set_reg_field(adapter, A_ULPTX_CONFIG, 0,
3565 F_CFG_CQE_SOP_MASK);
3567 t3_write_reg(adapter, A_PM1_RX_CFG, 0xffffffff);
3568 t3_write_reg(adapter, A_PM1_RX_MODE, 0);
3569 t3_write_reg(adapter, A_PM1_TX_MODE, 0);
3570 chan_init_hw(adapter, adapter->params.chan_map);
3571 t3_sge_init(adapter, &adapter->params.sge);
3572 t3_set_reg_field(adapter, A_PL_RST, 0, F_FATALPERREN);
3574 t3_write_reg(adapter, A_T3DBG_GPIO_ACT_LOW, calc_gpio_intr(adapter));
3576 t3_write_reg(adapter, A_CIM_HOST_ACC_DATA, vpd->uclk | fw_params);
3577 t3_write_reg(adapter, A_CIM_BOOT_CFG,
3578 V_BOOTADDR(FW_FLASH_BOOT_ADDR >> 2));
3579 t3_read_reg(adapter, A_CIM_BOOT_CFG); /* flush */
3581 attempts = 100;
3582 do { /* wait for uP to initialize */
3583 msleep(20);
3584 } while (t3_read_reg(adapter, A_CIM_HOST_ACC_DATA) && --attempts);
3585 if (!attempts) {
3586 CH_ERR(adapter, "uP initialization timed out\n");
3587 goto out_err;
3590 err = 0;
3591 out_err:
3592 return err;
3596 * get_pci_mode - determine a card's PCI mode
3597 * @adapter: the adapter
3598 * @p: where to store the PCI settings
3600 * Determines a card's PCI mode and associated parameters, such as speed
3601 * and width.
3603 static void get_pci_mode(struct adapter *adapter, struct pci_params *p)
3605 static unsigned short speed_map[] = { 33, 66, 100, 133 };
3606 u32 pci_mode, pcie_cap;
3608 pcie_cap = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP);
3609 if (pcie_cap) {
3610 u16 val;
3612 p->variant = PCI_VARIANT_PCIE;
3613 p->pcie_cap_addr = pcie_cap;
3614 pci_read_config_word(adapter->pdev, pcie_cap + PCI_EXP_LNKSTA,
3615 &val);
3616 p->width = (val >> 4) & 0x3f;
3617 return;
3620 pci_mode = t3_read_reg(adapter, A_PCIX_MODE);
3621 p->speed = speed_map[G_PCLKRANGE(pci_mode)];
3622 p->width = (pci_mode & F_64BIT) ? 64 : 32;
3623 pci_mode = G_PCIXINITPAT(pci_mode);
3624 if (pci_mode == 0)
3625 p->variant = PCI_VARIANT_PCI;
3626 else if (pci_mode < 4)
3627 p->variant = PCI_VARIANT_PCIX_MODE1_PARITY;
3628 else if (pci_mode < 8)
3629 p->variant = PCI_VARIANT_PCIX_MODE1_ECC;
3630 else
3631 p->variant = PCI_VARIANT_PCIX_266_MODE2;
3635 * init_link_config - initialize a link's SW state
3636 * @lc: structure holding the link state
3637 * @ai: information about the current card
3639 * Initializes the SW state maintained for each link, including the link's
3640 * capabilities and default speed/duplex/flow-control/autonegotiation
3641 * settings.
3643 static void init_link_config(struct link_config *lc, unsigned int caps)
3645 lc->supported = caps;
3646 lc->requested_speed = lc->speed = SPEED_INVALID;
3647 lc->requested_duplex = lc->duplex = DUPLEX_INVALID;
3648 lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX;
3649 if (lc->supported & SUPPORTED_Autoneg) {
3650 lc->advertising = lc->supported;
3651 lc->autoneg = AUTONEG_ENABLE;
3652 lc->requested_fc |= PAUSE_AUTONEG;
3653 } else {
3654 lc->advertising = 0;
3655 lc->autoneg = AUTONEG_DISABLE;
3660 * mc7_calc_size - calculate MC7 memory size
3661 * @cfg: the MC7 configuration
3663 * Calculates the size of an MC7 memory in bytes from the value of its
3664 * configuration register.
3666 static unsigned int mc7_calc_size(u32 cfg)
3668 unsigned int width = G_WIDTH(cfg);
3669 unsigned int banks = !!(cfg & F_BKS) + 1;
3670 unsigned int org = !!(cfg & F_ORG) + 1;
3671 unsigned int density = G_DEN(cfg);
3672 unsigned int MBs = ((256 << density) * banks) / (org << width);
3674 return MBs << 20;
3677 static void mc7_prep(struct adapter *adapter, struct mc7 *mc7,
3678 unsigned int base_addr, const char *name)
3680 u32 cfg;
3682 mc7->adapter = adapter;
3683 mc7->name = name;
3684 mc7->offset = base_addr - MC7_PMRX_BASE_ADDR;
3685 cfg = t3_read_reg(adapter, mc7->offset + A_MC7_CFG);
3686 mc7->size = G_DEN(cfg) == M_DEN ? 0 : mc7_calc_size(cfg);
3687 mc7->width = G_WIDTH(cfg);
3690 void mac_prep(struct cmac *mac, struct adapter *adapter, int index)
3692 u16 devid;
3694 mac->adapter = adapter;
3695 pci_read_config_word(adapter->pdev, 0x2, &devid);
3697 if (devid == 0x37 && !adapter->params.vpd.xauicfg[1])
3698 index = 0;
3699 mac->offset = (XGMAC0_1_BASE_ADDR - XGMAC0_0_BASE_ADDR) * index;
3700 mac->nucast = 1;
3702 if (adapter->params.rev == 0 && uses_xaui(adapter)) {
3703 t3_write_reg(adapter, A_XGM_SERDES_CTRL + mac->offset,
3704 is_10G(adapter) ? 0x2901c04 : 0x2301c04);
3705 t3_set_reg_field(adapter, A_XGM_PORT_CFG + mac->offset,
3706 F_ENRGMII, 0);
3710 void early_hw_init(struct adapter *adapter, const struct adapter_info *ai)
3712 u32 val = V_PORTSPEED(is_10G(adapter) ? 3 : 2);
3714 mi1_init(adapter, ai);
3715 t3_write_reg(adapter, A_I2C_CFG, /* set for 80KHz */
3716 V_I2C_CLKDIV(adapter->params.vpd.cclk / 80 - 1));
3717 t3_write_reg(adapter, A_T3DBG_GPIO_EN,
3718 ai->gpio_out | F_GPIO0_OEN | F_GPIO0_OUT_VAL);
3719 t3_write_reg(adapter, A_MC5_DB_SERVER_INDEX, 0);
3720 t3_write_reg(adapter, A_SG_OCO_BASE, V_BASE1(0xfff));
3722 if (adapter->params.rev == 0 || !uses_xaui(adapter))
3723 val |= F_ENRGMII;
3725 /* Enable MAC clocks so we can access the registers */
3726 t3_write_reg(adapter, A_XGM_PORT_CFG, val);
3727 t3_read_reg(adapter, A_XGM_PORT_CFG);
3729 val |= F_CLKDIVRESET_;
3730 t3_write_reg(adapter, A_XGM_PORT_CFG, val);
3731 t3_read_reg(adapter, A_XGM_PORT_CFG);
3732 t3_write_reg(adapter, XGM_REG(A_XGM_PORT_CFG, 1), val);
3733 t3_read_reg(adapter, A_XGM_PORT_CFG);
3737 * Reset the adapter.
3738 * Older PCIe cards lose their config space during reset, PCI-X
3739 * ones don't.
3741 int t3_reset_adapter(struct adapter *adapter)
3743 int i, save_and_restore_pcie =
3744 adapter->params.rev < T3_REV_B2 && is_pcie(adapter);
3745 uint16_t devid = 0;
3747 if (save_and_restore_pcie)
3748 pci_save_state(adapter->pdev);
3749 t3_write_reg(adapter, A_PL_RST, F_CRSTWRM | F_CRSTWRMMODE);
3752 * Delay. Give Some time to device to reset fully.
3753 * XXX The delay time should be modified.
3755 for (i = 0; i < 10; i++) {
3756 msleep(50);
3757 pci_read_config_word(adapter->pdev, 0x00, &devid);
3758 if (devid == 0x1425)
3759 break;
3762 if (devid != 0x1425)
3763 return -1;
3765 if (save_and_restore_pcie)
3766 pci_restore_state(adapter->pdev);
3767 return 0;
3770 static int init_parity(struct adapter *adap)
3772 int i, err, addr;
3774 if (t3_read_reg(adap, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
3775 return -EBUSY;
3777 for (err = i = 0; !err && i < 16; i++)
3778 err = clear_sge_ctxt(adap, i, F_EGRESS);
3779 for (i = 0xfff0; !err && i <= 0xffff; i++)
3780 err = clear_sge_ctxt(adap, i, F_EGRESS);
3781 for (i = 0; !err && i < SGE_QSETS; i++)
3782 err = clear_sge_ctxt(adap, i, F_RESPONSEQ);
3783 if (err)
3784 return err;
3786 t3_write_reg(adap, A_CIM_IBQ_DBG_DATA, 0);
3787 for (i = 0; i < 4; i++)
3788 for (addr = 0; addr <= M_IBQDBGADDR; addr++) {
3789 t3_write_reg(adap, A_CIM_IBQ_DBG_CFG, F_IBQDBGEN |
3790 F_IBQDBGWR | V_IBQDBGQID(i) |
3791 V_IBQDBGADDR(addr));
3792 err = t3_wait_op_done(adap, A_CIM_IBQ_DBG_CFG,
3793 F_IBQDBGBUSY, 0, 2, 1);
3794 if (err)
3795 return err;
3797 return 0;
3801 * Initialize adapter SW state for the various HW modules, set initial values
3802 * for some adapter tunables, take PHYs out of reset, and initialize the MDIO
3803 * interface.
3805 int t3_prep_adapter(struct adapter *adapter, const struct adapter_info *ai,
3806 int reset)
3808 int ret;
3809 unsigned int i, j = -1;
3811 get_pci_mode(adapter, &adapter->params.pci);
3813 adapter->params.info = ai;
3814 adapter->params.nports = ai->nports0 + ai->nports1;
3815 adapter->params.chan_map = (!!ai->nports0) | (!!ai->nports1 << 1);
3816 adapter->params.rev = t3_read_reg(adapter, A_PL_REV);
3818 * We used to only run the "adapter check task" once a second if
3819 * we had PHYs which didn't support interrupts (we would check
3820 * their link status once a second). Now we check other conditions
3821 * in that routine which could potentially impose a very high
3822 * interrupt load on the system. As such, we now always scan the
3823 * adapter state once a second ...
3825 adapter->params.linkpoll_period = 10;
3826 adapter->params.stats_update_period = is_10G(adapter) ?
3827 MAC_STATS_ACCUM_SECS : (MAC_STATS_ACCUM_SECS * 10);
3828 adapter->params.pci.vpd_cap_addr =
3829 pci_find_capability(adapter->pdev, PCI_CAP_ID_VPD);
3830 ret = get_vpd_params(adapter, &adapter->params.vpd);
3831 if (ret < 0)
3832 return ret;
3834 if (reset && t3_reset_adapter(adapter))
3835 return -1;
3837 t3_sge_prep(adapter, &adapter->params.sge);
3839 if (adapter->params.vpd.mclk) {
3840 struct tp_params *p = &adapter->params.tp;
3842 mc7_prep(adapter, &adapter->pmrx, MC7_PMRX_BASE_ADDR, "PMRX");
3843 mc7_prep(adapter, &adapter->pmtx, MC7_PMTX_BASE_ADDR, "PMTX");
3844 mc7_prep(adapter, &adapter->cm, MC7_CM_BASE_ADDR, "CM");
3846 p->nchan = adapter->params.chan_map == 3 ? 2 : 1;
3847 p->pmrx_size = t3_mc7_size(&adapter->pmrx);
3848 p->pmtx_size = t3_mc7_size(&adapter->pmtx);
3849 p->cm_size = t3_mc7_size(&adapter->cm);
3850 p->chan_rx_size = p->pmrx_size / 2; /* only 1 Rx channel */
3851 p->chan_tx_size = p->pmtx_size / p->nchan;
3852 p->rx_pg_size = 64 * 1024;
3853 p->tx_pg_size = is_10G(adapter) ? 64 * 1024 : 16 * 1024;
3854 p->rx_num_pgs = pm_num_pages(p->chan_rx_size, p->rx_pg_size);
3855 p->tx_num_pgs = pm_num_pages(p->chan_tx_size, p->tx_pg_size);
3856 p->ntimer_qs = p->cm_size >= (128 << 20) ||
3857 adapter->params.rev > 0 ? 12 : 6;
3860 adapter->params.offload = t3_mc7_size(&adapter->pmrx) &&
3861 t3_mc7_size(&adapter->pmtx) &&
3862 t3_mc7_size(&adapter->cm);
3864 if (is_offload(adapter)) {
3865 adapter->params.mc5.nservers = DEFAULT_NSERVERS;
3866 adapter->params.mc5.nfilters = adapter->params.rev > 0 ?
3867 DEFAULT_NFILTERS : 0;
3868 adapter->params.mc5.nroutes = 0;
3869 t3_mc5_prep(adapter, &adapter->mc5, MC5_MODE_144_BIT);
3871 init_mtus(adapter->params.mtus);
3872 init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
3875 early_hw_init(adapter, ai);
3876 ret = init_parity(adapter);
3877 if (ret)
3878 return ret;
3880 for_each_port(adapter, i) {
3881 u8 hw_addr[6];
3882 const struct port_type_info *pti;
3883 struct port_info *p = adap2pinfo(adapter, i);
3885 while (!adapter->params.vpd.port_type[++j])
3888 pti = &port_types[adapter->params.vpd.port_type[j]];
3889 if (!pti->phy_prep) {
3890 CH_ALERT(adapter, "Invalid port type index %d\n",
3891 adapter->params.vpd.port_type[j]);
3892 return -EINVAL;
3895 p->phy.mdio.dev = adapter->port[i];
3896 ret = pti->phy_prep(&p->phy, adapter, ai->phy_base_addr + j,
3897 ai->mdio_ops);
3898 if (ret)
3899 return ret;
3900 mac_prep(&p->mac, adapter, j);
3903 * The VPD EEPROM stores the base Ethernet address for the
3904 * card. A port's address is derived from the base by adding
3905 * the port's index to the base's low octet.
3907 memcpy(hw_addr, adapter->params.vpd.eth_base, 5);
3908 hw_addr[5] = adapter->params.vpd.eth_base[5] + i;
3910 memcpy(adapter->port[i]->dev_addr, hw_addr,
3911 ETH_ALEN);
3912 memcpy(adapter->port[i]->perm_addr, hw_addr,
3913 ETH_ALEN);
3914 init_link_config(&p->link_config, p->phy.caps);
3915 p->phy.ops->power_down(&p->phy, 1);
3918 * If the PHY doesn't support interrupts for link status
3919 * changes, schedule a scan of the adapter links at least
3920 * once a second.
3922 if (!(p->phy.caps & SUPPORTED_IRQ) &&
3923 adapter->params.linkpoll_period > 10)
3924 adapter->params.linkpoll_period = 10;
3927 return 0;
3930 void t3_led_ready(struct adapter *adapter)
3932 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
3933 F_GPIO0_OUT_VAL);
3936 int t3_replay_prep_adapter(struct adapter *adapter)
3938 const struct adapter_info *ai = adapter->params.info;
3939 unsigned int i, j = -1;
3940 int ret;
3942 early_hw_init(adapter, ai);
3943 ret = init_parity(adapter);
3944 if (ret)
3945 return ret;
3947 for_each_port(adapter, i) {
3948 const struct port_type_info *pti;
3949 struct port_info *p = adap2pinfo(adapter, i);
3951 while (!adapter->params.vpd.port_type[++j])
3954 pti = &port_types[adapter->params.vpd.port_type[j]];
3955 ret = pti->phy_prep(&p->phy, adapter, p->phy.mdio.prtad, NULL);
3956 if (ret)
3957 return ret;
3958 p->phy.ops->power_down(&p->phy, 1);
3961 return 0;