MIPS: handle write_combine in pci_mmap_page_range
[linux-2.6/linux-loongson.git] / drivers / net / cxgb3 / t3_hw.c
blob4f68aeb2679adcbf0f11a617ab815fa06a6bb9f5
1 /*
2 * Copyright (c) 2003-2008 Chelsio, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
32 #include "common.h"
33 #include "regs.h"
34 #include "sge_defs.h"
35 #include "firmware_exports.h"
37 /**
38 * t3_wait_op_done_val - wait until an operation is completed
39 * @adapter: the adapter performing the operation
40 * @reg: the register to check for completion
41 * @mask: a single-bit field within @reg that indicates completion
42 * @polarity: the value of the field when the operation is completed
43 * @attempts: number of check iterations
44 * @delay: delay in usecs between iterations
45 * @valp: where to store the value of the register at completion time
47 * Wait until an operation is completed by checking a bit in a register
48 * up to @attempts times. If @valp is not NULL the value of the register
49 * at the time it indicated completion is stored there. Returns 0 if the
50 * operation completes and -EAGAIN otherwise.
53 int t3_wait_op_done_val(struct adapter *adapter, int reg, u32 mask,
54 int polarity, int attempts, int delay, u32 *valp)
56 while (1) {
57 u32 val = t3_read_reg(adapter, reg);
59 if (!!(val & mask) == polarity) {
60 if (valp)
61 *valp = val;
62 return 0;
64 if (--attempts == 0)
65 return -EAGAIN;
66 if (delay)
67 udelay(delay);
71 /**
72 * t3_write_regs - write a bunch of registers
73 * @adapter: the adapter to program
74 * @p: an array of register address/register value pairs
75 * @n: the number of address/value pairs
76 * @offset: register address offset
78 * Takes an array of register address/register value pairs and writes each
79 * value to the corresponding register. Register addresses are adjusted
80 * by the supplied offset.
82 void t3_write_regs(struct adapter *adapter, const struct addr_val_pair *p,
83 int n, unsigned int offset)
85 while (n--) {
86 t3_write_reg(adapter, p->reg_addr + offset, p->val);
87 p++;
91 /**
92 * t3_set_reg_field - set a register field to a value
93 * @adapter: the adapter to program
94 * @addr: the register address
95 * @mask: specifies the portion of the register to modify
96 * @val: the new value for the register field
98 * Sets a register field specified by the supplied mask to the
99 * given value.
101 void t3_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask,
102 u32 val)
104 u32 v = t3_read_reg(adapter, addr) & ~mask;
106 t3_write_reg(adapter, addr, v | val);
107 t3_read_reg(adapter, addr); /* flush */
111 * t3_read_indirect - read indirectly addressed registers
112 * @adap: the adapter
113 * @addr_reg: register holding the indirect address
114 * @data_reg: register holding the value of the indirect register
115 * @vals: where the read register values are stored
116 * @start_idx: index of first indirect register to read
117 * @nregs: how many indirect registers to read
119 * Reads registers that are accessed indirectly through an address/data
120 * register pair.
122 static void t3_read_indirect(struct adapter *adap, unsigned int addr_reg,
123 unsigned int data_reg, u32 *vals,
124 unsigned int nregs, unsigned int start_idx)
126 while (nregs--) {
127 t3_write_reg(adap, addr_reg, start_idx);
128 *vals++ = t3_read_reg(adap, data_reg);
129 start_idx++;
134 * t3_mc7_bd_read - read from MC7 through backdoor accesses
135 * @mc7: identifies MC7 to read from
136 * @start: index of first 64-bit word to read
137 * @n: number of 64-bit words to read
138 * @buf: where to store the read result
140 * Read n 64-bit words from MC7 starting at word start, using backdoor
141 * accesses.
143 int t3_mc7_bd_read(struct mc7 *mc7, unsigned int start, unsigned int n,
144 u64 *buf)
146 static const int shift[] = { 0, 0, 16, 24 };
147 static const int step[] = { 0, 32, 16, 8 };
149 unsigned int size64 = mc7->size / 8; /* # of 64-bit words */
150 struct adapter *adap = mc7->adapter;
152 if (start >= size64 || start + n > size64)
153 return -EINVAL;
155 start *= (8 << mc7->width);
156 while (n--) {
157 int i;
158 u64 val64 = 0;
160 for (i = (1 << mc7->width) - 1; i >= 0; --i) {
161 int attempts = 10;
162 u32 val;
164 t3_write_reg(adap, mc7->offset + A_MC7_BD_ADDR, start);
165 t3_write_reg(adap, mc7->offset + A_MC7_BD_OP, 0);
166 val = t3_read_reg(adap, mc7->offset + A_MC7_BD_OP);
167 while ((val & F_BUSY) && attempts--)
168 val = t3_read_reg(adap,
169 mc7->offset + A_MC7_BD_OP);
170 if (val & F_BUSY)
171 return -EIO;
173 val = t3_read_reg(adap, mc7->offset + A_MC7_BD_DATA1);
174 if (mc7->width == 0) {
175 val64 = t3_read_reg(adap,
176 mc7->offset +
177 A_MC7_BD_DATA0);
178 val64 |= (u64) val << 32;
179 } else {
180 if (mc7->width > 1)
181 val >>= shift[mc7->width];
182 val64 |= (u64) val << (step[mc7->width] * i);
184 start += 8;
186 *buf++ = val64;
188 return 0;
192 * Initialize MI1.
194 static void mi1_init(struct adapter *adap, const struct adapter_info *ai)
196 u32 clkdiv = adap->params.vpd.cclk / (2 * adap->params.vpd.mdc) - 1;
197 u32 val = F_PREEN | V_CLKDIV(clkdiv);
199 t3_write_reg(adap, A_MI1_CFG, val);
202 #define MDIO_ATTEMPTS 20
205 * MI1 read/write operations for clause 22 PHYs.
207 static int t3_mi1_read(struct adapter *adapter, int phy_addr, int mmd_addr,
208 int reg_addr, unsigned int *valp)
210 int ret;
211 u32 addr = V_REGADDR(reg_addr) | V_PHYADDR(phy_addr);
213 if (mmd_addr)
214 return -EINVAL;
216 mutex_lock(&adapter->mdio_lock);
217 t3_set_reg_field(adapter, A_MI1_CFG, V_ST(M_ST), V_ST(1));
218 t3_write_reg(adapter, A_MI1_ADDR, addr);
219 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(2));
220 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 10);
221 if (!ret)
222 *valp = t3_read_reg(adapter, A_MI1_DATA);
223 mutex_unlock(&adapter->mdio_lock);
224 return ret;
227 static int t3_mi1_write(struct adapter *adapter, int phy_addr, int mmd_addr,
228 int reg_addr, unsigned int val)
230 int ret;
231 u32 addr = V_REGADDR(reg_addr) | V_PHYADDR(phy_addr);
233 if (mmd_addr)
234 return -EINVAL;
236 mutex_lock(&adapter->mdio_lock);
237 t3_set_reg_field(adapter, A_MI1_CFG, V_ST(M_ST), V_ST(1));
238 t3_write_reg(adapter, A_MI1_ADDR, addr);
239 t3_write_reg(adapter, A_MI1_DATA, val);
240 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(1));
241 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 10);
242 mutex_unlock(&adapter->mdio_lock);
243 return ret;
246 static const struct mdio_ops mi1_mdio_ops = {
247 t3_mi1_read,
248 t3_mi1_write
252 * Performs the address cycle for clause 45 PHYs.
253 * Must be called with the MDIO_LOCK held.
255 static int mi1_wr_addr(struct adapter *adapter, int phy_addr, int mmd_addr,
256 int reg_addr)
258 u32 addr = V_REGADDR(mmd_addr) | V_PHYADDR(phy_addr);
260 t3_set_reg_field(adapter, A_MI1_CFG, V_ST(M_ST), 0);
261 t3_write_reg(adapter, A_MI1_ADDR, addr);
262 t3_write_reg(adapter, A_MI1_DATA, reg_addr);
263 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(0));
264 return t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0,
265 MDIO_ATTEMPTS, 10);
269 * MI1 read/write operations for indirect-addressed PHYs.
271 static int mi1_ext_read(struct adapter *adapter, int phy_addr, int mmd_addr,
272 int reg_addr, unsigned int *valp)
274 int ret;
276 mutex_lock(&adapter->mdio_lock);
277 ret = mi1_wr_addr(adapter, phy_addr, mmd_addr, reg_addr);
278 if (!ret) {
279 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(3));
280 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0,
281 MDIO_ATTEMPTS, 10);
282 if (!ret)
283 *valp = t3_read_reg(adapter, A_MI1_DATA);
285 mutex_unlock(&adapter->mdio_lock);
286 return ret;
289 static int mi1_ext_write(struct adapter *adapter, int phy_addr, int mmd_addr,
290 int reg_addr, unsigned int val)
292 int ret;
294 mutex_lock(&adapter->mdio_lock);
295 ret = mi1_wr_addr(adapter, phy_addr, mmd_addr, reg_addr);
296 if (!ret) {
297 t3_write_reg(adapter, A_MI1_DATA, val);
298 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(1));
299 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0,
300 MDIO_ATTEMPTS, 10);
302 mutex_unlock(&adapter->mdio_lock);
303 return ret;
306 static const struct mdio_ops mi1_mdio_ext_ops = {
307 mi1_ext_read,
308 mi1_ext_write
312 * t3_mdio_change_bits - modify the value of a PHY register
313 * @phy: the PHY to operate on
314 * @mmd: the device address
315 * @reg: the register address
316 * @clear: what part of the register value to mask off
317 * @set: what part of the register value to set
319 * Changes the value of a PHY register by applying a mask to its current
320 * value and ORing the result with a new value.
322 int t3_mdio_change_bits(struct cphy *phy, int mmd, int reg, unsigned int clear,
323 unsigned int set)
325 int ret;
326 unsigned int val;
328 ret = mdio_read(phy, mmd, reg, &val);
329 if (!ret) {
330 val &= ~clear;
331 ret = mdio_write(phy, mmd, reg, val | set);
333 return ret;
337 * t3_phy_reset - reset a PHY block
338 * @phy: the PHY to operate on
339 * @mmd: the device address of the PHY block to reset
340 * @wait: how long to wait for the reset to complete in 1ms increments
342 * Resets a PHY block and optionally waits for the reset to complete.
343 * @mmd should be 0 for 10/100/1000 PHYs and the device address to reset
344 * for 10G PHYs.
346 int t3_phy_reset(struct cphy *phy, int mmd, int wait)
348 int err;
349 unsigned int ctl;
351 err = t3_mdio_change_bits(phy, mmd, MII_BMCR, BMCR_PDOWN, BMCR_RESET);
352 if (err || !wait)
353 return err;
355 do {
356 err = mdio_read(phy, mmd, MII_BMCR, &ctl);
357 if (err)
358 return err;
359 ctl &= BMCR_RESET;
360 if (ctl)
361 msleep(1);
362 } while (ctl && --wait);
364 return ctl ? -1 : 0;
368 * t3_phy_advertise - set the PHY advertisement registers for autoneg
369 * @phy: the PHY to operate on
370 * @advert: bitmap of capabilities the PHY should advertise
372 * Sets a 10/100/1000 PHY's advertisement registers to advertise the
373 * requested capabilities.
375 int t3_phy_advertise(struct cphy *phy, unsigned int advert)
377 int err;
378 unsigned int val = 0;
380 err = mdio_read(phy, 0, MII_CTRL1000, &val);
381 if (err)
382 return err;
384 val &= ~(ADVERTISE_1000HALF | ADVERTISE_1000FULL);
385 if (advert & ADVERTISED_1000baseT_Half)
386 val |= ADVERTISE_1000HALF;
387 if (advert & ADVERTISED_1000baseT_Full)
388 val |= ADVERTISE_1000FULL;
390 err = mdio_write(phy, 0, MII_CTRL1000, val);
391 if (err)
392 return err;
394 val = 1;
395 if (advert & ADVERTISED_10baseT_Half)
396 val |= ADVERTISE_10HALF;
397 if (advert & ADVERTISED_10baseT_Full)
398 val |= ADVERTISE_10FULL;
399 if (advert & ADVERTISED_100baseT_Half)
400 val |= ADVERTISE_100HALF;
401 if (advert & ADVERTISED_100baseT_Full)
402 val |= ADVERTISE_100FULL;
403 if (advert & ADVERTISED_Pause)
404 val |= ADVERTISE_PAUSE_CAP;
405 if (advert & ADVERTISED_Asym_Pause)
406 val |= ADVERTISE_PAUSE_ASYM;
407 return mdio_write(phy, 0, MII_ADVERTISE, val);
411 * t3_phy_advertise_fiber - set fiber PHY advertisement register
412 * @phy: the PHY to operate on
413 * @advert: bitmap of capabilities the PHY should advertise
415 * Sets a fiber PHY's advertisement register to advertise the
416 * requested capabilities.
418 int t3_phy_advertise_fiber(struct cphy *phy, unsigned int advert)
420 unsigned int val = 0;
422 if (advert & ADVERTISED_1000baseT_Half)
423 val |= ADVERTISE_1000XHALF;
424 if (advert & ADVERTISED_1000baseT_Full)
425 val |= ADVERTISE_1000XFULL;
426 if (advert & ADVERTISED_Pause)
427 val |= ADVERTISE_1000XPAUSE;
428 if (advert & ADVERTISED_Asym_Pause)
429 val |= ADVERTISE_1000XPSE_ASYM;
430 return mdio_write(phy, 0, MII_ADVERTISE, val);
434 * t3_set_phy_speed_duplex - force PHY speed and duplex
435 * @phy: the PHY to operate on
436 * @speed: requested PHY speed
437 * @duplex: requested PHY duplex
439 * Force a 10/100/1000 PHY's speed and duplex. This also disables
440 * auto-negotiation except for GigE, where auto-negotiation is mandatory.
442 int t3_set_phy_speed_duplex(struct cphy *phy, int speed, int duplex)
444 int err;
445 unsigned int ctl;
447 err = mdio_read(phy, 0, MII_BMCR, &ctl);
448 if (err)
449 return err;
451 if (speed >= 0) {
452 ctl &= ~(BMCR_SPEED100 | BMCR_SPEED1000 | BMCR_ANENABLE);
453 if (speed == SPEED_100)
454 ctl |= BMCR_SPEED100;
455 else if (speed == SPEED_1000)
456 ctl |= BMCR_SPEED1000;
458 if (duplex >= 0) {
459 ctl &= ~(BMCR_FULLDPLX | BMCR_ANENABLE);
460 if (duplex == DUPLEX_FULL)
461 ctl |= BMCR_FULLDPLX;
463 if (ctl & BMCR_SPEED1000) /* auto-negotiation required for GigE */
464 ctl |= BMCR_ANENABLE;
465 return mdio_write(phy, 0, MII_BMCR, ctl);
468 int t3_phy_lasi_intr_enable(struct cphy *phy)
470 return mdio_write(phy, MDIO_DEV_PMA_PMD, LASI_CTRL, 1);
473 int t3_phy_lasi_intr_disable(struct cphy *phy)
475 return mdio_write(phy, MDIO_DEV_PMA_PMD, LASI_CTRL, 0);
478 int t3_phy_lasi_intr_clear(struct cphy *phy)
480 u32 val;
482 return mdio_read(phy, MDIO_DEV_PMA_PMD, LASI_STAT, &val);
485 int t3_phy_lasi_intr_handler(struct cphy *phy)
487 unsigned int status;
488 int err = mdio_read(phy, MDIO_DEV_PMA_PMD, LASI_STAT, &status);
490 if (err)
491 return err;
492 return (status & 1) ? cphy_cause_link_change : 0;
495 static const struct adapter_info t3_adap_info[] = {
496 {1, 1, 0,
497 F_GPIO2_OEN | F_GPIO4_OEN |
498 F_GPIO2_OUT_VAL | F_GPIO4_OUT_VAL, { S_GPIO3, S_GPIO5 }, 0,
499 &mi1_mdio_ops, "Chelsio PE9000"},
500 {1, 1, 0,
501 F_GPIO2_OEN | F_GPIO4_OEN |
502 F_GPIO2_OUT_VAL | F_GPIO4_OUT_VAL, { S_GPIO3, S_GPIO5 }, 0,
503 &mi1_mdio_ops, "Chelsio T302"},
504 {1, 0, 0,
505 F_GPIO1_OEN | F_GPIO6_OEN | F_GPIO7_OEN | F_GPIO10_OEN |
506 F_GPIO11_OEN | F_GPIO1_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL,
507 { 0 }, SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
508 &mi1_mdio_ext_ops, "Chelsio T310"},
509 {1, 1, 0,
510 F_GPIO1_OEN | F_GPIO2_OEN | F_GPIO4_OEN | F_GPIO5_OEN | F_GPIO6_OEN |
511 F_GPIO7_OEN | F_GPIO10_OEN | F_GPIO11_OEN | F_GPIO1_OUT_VAL |
512 F_GPIO5_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL,
513 { S_GPIO9, S_GPIO3 }, SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
514 &mi1_mdio_ext_ops, "Chelsio T320"},
517 {1, 0, 0,
518 F_GPIO1_OEN | F_GPIO2_OEN | F_GPIO4_OEN | F_GPIO6_OEN | F_GPIO7_OEN |
519 F_GPIO10_OEN | F_GPIO1_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL,
520 { S_GPIO9 }, SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
521 &mi1_mdio_ext_ops, "Chelsio T310" },
525 * Return the adapter_info structure with a given index. Out-of-range indices
526 * return NULL.
528 const struct adapter_info *t3_get_adapter_info(unsigned int id)
530 return id < ARRAY_SIZE(t3_adap_info) ? &t3_adap_info[id] : NULL;
533 struct port_type_info {
534 int (*phy_prep)(struct cphy *phy, struct adapter *adapter,
535 int phy_addr, const struct mdio_ops *ops);
538 static const struct port_type_info port_types[] = {
539 { NULL },
540 { t3_ael1002_phy_prep },
541 { t3_vsc8211_phy_prep },
542 { NULL},
543 { t3_xaui_direct_phy_prep },
544 { t3_ael2005_phy_prep },
545 { t3_qt2045_phy_prep },
546 { t3_ael1006_phy_prep },
547 { NULL },
550 #define VPD_ENTRY(name, len) \
551 u8 name##_kword[2]; u8 name##_len; u8 name##_data[len]
554 * Partial EEPROM Vital Product Data structure. Includes only the ID and
555 * VPD-R sections.
557 struct t3_vpd {
558 u8 id_tag;
559 u8 id_len[2];
560 u8 id_data[16];
561 u8 vpdr_tag;
562 u8 vpdr_len[2];
563 VPD_ENTRY(pn, 16); /* part number */
564 VPD_ENTRY(ec, 16); /* EC level */
565 VPD_ENTRY(sn, SERNUM_LEN); /* serial number */
566 VPD_ENTRY(na, 12); /* MAC address base */
567 VPD_ENTRY(cclk, 6); /* core clock */
568 VPD_ENTRY(mclk, 6); /* mem clock */
569 VPD_ENTRY(uclk, 6); /* uP clk */
570 VPD_ENTRY(mdc, 6); /* MDIO clk */
571 VPD_ENTRY(mt, 2); /* mem timing */
572 VPD_ENTRY(xaui0cfg, 6); /* XAUI0 config */
573 VPD_ENTRY(xaui1cfg, 6); /* XAUI1 config */
574 VPD_ENTRY(port0, 2); /* PHY0 complex */
575 VPD_ENTRY(port1, 2); /* PHY1 complex */
576 VPD_ENTRY(port2, 2); /* PHY2 complex */
577 VPD_ENTRY(port3, 2); /* PHY3 complex */
578 VPD_ENTRY(rv, 1); /* csum */
579 u32 pad; /* for multiple-of-4 sizing and alignment */
582 #define EEPROM_MAX_POLL 40
583 #define EEPROM_STAT_ADDR 0x4000
584 #define VPD_BASE 0xc00
587 * t3_seeprom_read - read a VPD EEPROM location
588 * @adapter: adapter to read
589 * @addr: EEPROM address
590 * @data: where to store the read data
592 * Read a 32-bit word from a location in VPD EEPROM using the card's PCI
593 * VPD ROM capability. A zero is written to the flag bit when the
594 * addres is written to the control register. The hardware device will
595 * set the flag to 1 when 4 bytes have been read into the data register.
597 int t3_seeprom_read(struct adapter *adapter, u32 addr, __le32 *data)
599 u16 val;
600 int attempts = EEPROM_MAX_POLL;
601 u32 v;
602 unsigned int base = adapter->params.pci.vpd_cap_addr;
604 if ((addr >= EEPROMSIZE && addr != EEPROM_STAT_ADDR) || (addr & 3))
605 return -EINVAL;
607 pci_write_config_word(adapter->pdev, base + PCI_VPD_ADDR, addr);
608 do {
609 udelay(10);
610 pci_read_config_word(adapter->pdev, base + PCI_VPD_ADDR, &val);
611 } while (!(val & PCI_VPD_ADDR_F) && --attempts);
613 if (!(val & PCI_VPD_ADDR_F)) {
614 CH_ERR(adapter, "reading EEPROM address 0x%x failed\n", addr);
615 return -EIO;
617 pci_read_config_dword(adapter->pdev, base + PCI_VPD_DATA, &v);
618 *data = cpu_to_le32(v);
619 return 0;
623 * t3_seeprom_write - write a VPD EEPROM location
624 * @adapter: adapter to write
625 * @addr: EEPROM address
626 * @data: value to write
628 * Write a 32-bit word to a location in VPD EEPROM using the card's PCI
629 * VPD ROM capability.
631 int t3_seeprom_write(struct adapter *adapter, u32 addr, __le32 data)
633 u16 val;
634 int attempts = EEPROM_MAX_POLL;
635 unsigned int base = adapter->params.pci.vpd_cap_addr;
637 if ((addr >= EEPROMSIZE && addr != EEPROM_STAT_ADDR) || (addr & 3))
638 return -EINVAL;
640 pci_write_config_dword(adapter->pdev, base + PCI_VPD_DATA,
641 le32_to_cpu(data));
642 pci_write_config_word(adapter->pdev,base + PCI_VPD_ADDR,
643 addr | PCI_VPD_ADDR_F);
644 do {
645 msleep(1);
646 pci_read_config_word(adapter->pdev, base + PCI_VPD_ADDR, &val);
647 } while ((val & PCI_VPD_ADDR_F) && --attempts);
649 if (val & PCI_VPD_ADDR_F) {
650 CH_ERR(adapter, "write to EEPROM address 0x%x failed\n", addr);
651 return -EIO;
653 return 0;
657 * t3_seeprom_wp - enable/disable EEPROM write protection
658 * @adapter: the adapter
659 * @enable: 1 to enable write protection, 0 to disable it
661 * Enables or disables write protection on the serial EEPROM.
663 int t3_seeprom_wp(struct adapter *adapter, int enable)
665 return t3_seeprom_write(adapter, EEPROM_STAT_ADDR, enable ? 0xc : 0);
669 * Convert a character holding a hex digit to a number.
671 static unsigned int hex2int(unsigned char c)
673 return isdigit(c) ? c - '0' : toupper(c) - 'A' + 10;
677 * get_vpd_params - read VPD parameters from VPD EEPROM
678 * @adapter: adapter to read
679 * @p: where to store the parameters
681 * Reads card parameters stored in VPD EEPROM.
683 static int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
685 int i, addr, ret;
686 struct t3_vpd vpd;
689 * Card information is normally at VPD_BASE but some early cards had
690 * it at 0.
692 ret = t3_seeprom_read(adapter, VPD_BASE, (__le32 *)&vpd);
693 if (ret)
694 return ret;
695 addr = vpd.id_tag == 0x82 ? VPD_BASE : 0;
697 for (i = 0; i < sizeof(vpd); i += 4) {
698 ret = t3_seeprom_read(adapter, addr + i,
699 (__le32 *)((u8 *)&vpd + i));
700 if (ret)
701 return ret;
704 p->cclk = simple_strtoul(vpd.cclk_data, NULL, 10);
705 p->mclk = simple_strtoul(vpd.mclk_data, NULL, 10);
706 p->uclk = simple_strtoul(vpd.uclk_data, NULL, 10);
707 p->mdc = simple_strtoul(vpd.mdc_data, NULL, 10);
708 p->mem_timing = simple_strtoul(vpd.mt_data, NULL, 10);
709 memcpy(p->sn, vpd.sn_data, SERNUM_LEN);
711 /* Old eeproms didn't have port information */
712 if (adapter->params.rev == 0 && !vpd.port0_data[0]) {
713 p->port_type[0] = uses_xaui(adapter) ? 1 : 2;
714 p->port_type[1] = uses_xaui(adapter) ? 6 : 2;
715 } else {
716 p->port_type[0] = hex2int(vpd.port0_data[0]);
717 p->port_type[1] = hex2int(vpd.port1_data[0]);
718 p->xauicfg[0] = simple_strtoul(vpd.xaui0cfg_data, NULL, 16);
719 p->xauicfg[1] = simple_strtoul(vpd.xaui1cfg_data, NULL, 16);
722 for (i = 0; i < 6; i++)
723 p->eth_base[i] = hex2int(vpd.na_data[2 * i]) * 16 +
724 hex2int(vpd.na_data[2 * i + 1]);
725 return 0;
728 /* serial flash and firmware constants */
729 enum {
730 SF_ATTEMPTS = 5, /* max retries for SF1 operations */
731 SF_SEC_SIZE = 64 * 1024, /* serial flash sector size */
732 SF_SIZE = SF_SEC_SIZE * 8, /* serial flash size */
734 /* flash command opcodes */
735 SF_PROG_PAGE = 2, /* program page */
736 SF_WR_DISABLE = 4, /* disable writes */
737 SF_RD_STATUS = 5, /* read status register */
738 SF_WR_ENABLE = 6, /* enable writes */
739 SF_RD_DATA_FAST = 0xb, /* read flash */
740 SF_ERASE_SECTOR = 0xd8, /* erase sector */
742 FW_FLASH_BOOT_ADDR = 0x70000, /* start address of FW in flash */
743 FW_VERS_ADDR = 0x7fffc, /* flash address holding FW version */
744 FW_MIN_SIZE = 8 /* at least version and csum */
748 * sf1_read - read data from the serial flash
749 * @adapter: the adapter
750 * @byte_cnt: number of bytes to read
751 * @cont: whether another operation will be chained
752 * @valp: where to store the read data
754 * Reads up to 4 bytes of data from the serial flash. The location of
755 * the read needs to be specified prior to calling this by issuing the
756 * appropriate commands to the serial flash.
758 static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont,
759 u32 *valp)
761 int ret;
763 if (!byte_cnt || byte_cnt > 4)
764 return -EINVAL;
765 if (t3_read_reg(adapter, A_SF_OP) & F_BUSY)
766 return -EBUSY;
767 t3_write_reg(adapter, A_SF_OP, V_CONT(cont) | V_BYTECNT(byte_cnt - 1));
768 ret = t3_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 10);
769 if (!ret)
770 *valp = t3_read_reg(adapter, A_SF_DATA);
771 return ret;
775 * sf1_write - write data to the serial flash
776 * @adapter: the adapter
777 * @byte_cnt: number of bytes to write
778 * @cont: whether another operation will be chained
779 * @val: value to write
781 * Writes up to 4 bytes of data to the serial flash. The location of
782 * the write needs to be specified prior to calling this by issuing the
783 * appropriate commands to the serial flash.
785 static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont,
786 u32 val)
788 if (!byte_cnt || byte_cnt > 4)
789 return -EINVAL;
790 if (t3_read_reg(adapter, A_SF_OP) & F_BUSY)
791 return -EBUSY;
792 t3_write_reg(adapter, A_SF_DATA, val);
793 t3_write_reg(adapter, A_SF_OP,
794 V_CONT(cont) | V_BYTECNT(byte_cnt - 1) | V_OP(1));
795 return t3_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 10);
799 * flash_wait_op - wait for a flash operation to complete
800 * @adapter: the adapter
801 * @attempts: max number of polls of the status register
802 * @delay: delay between polls in ms
804 * Wait for a flash operation to complete by polling the status register.
806 static int flash_wait_op(struct adapter *adapter, int attempts, int delay)
808 int ret;
809 u32 status;
811 while (1) {
812 if ((ret = sf1_write(adapter, 1, 1, SF_RD_STATUS)) != 0 ||
813 (ret = sf1_read(adapter, 1, 0, &status)) != 0)
814 return ret;
815 if (!(status & 1))
816 return 0;
817 if (--attempts == 0)
818 return -EAGAIN;
819 if (delay)
820 msleep(delay);
825 * t3_read_flash - read words from serial flash
826 * @adapter: the adapter
827 * @addr: the start address for the read
828 * @nwords: how many 32-bit words to read
829 * @data: where to store the read data
830 * @byte_oriented: whether to store data as bytes or as words
832 * Read the specified number of 32-bit words from the serial flash.
833 * If @byte_oriented is set the read data is stored as a byte array
834 * (i.e., big-endian), otherwise as 32-bit words in the platform's
835 * natural endianess.
837 int t3_read_flash(struct adapter *adapter, unsigned int addr,
838 unsigned int nwords, u32 *data, int byte_oriented)
840 int ret;
842 if (addr + nwords * sizeof(u32) > SF_SIZE || (addr & 3))
843 return -EINVAL;
845 addr = swab32(addr) | SF_RD_DATA_FAST;
847 if ((ret = sf1_write(adapter, 4, 1, addr)) != 0 ||
848 (ret = sf1_read(adapter, 1, 1, data)) != 0)
849 return ret;
851 for (; nwords; nwords--, data++) {
852 ret = sf1_read(adapter, 4, nwords > 1, data);
853 if (ret)
854 return ret;
855 if (byte_oriented)
856 *data = htonl(*data);
858 return 0;
862 * t3_write_flash - write up to a page of data to the serial flash
863 * @adapter: the adapter
864 * @addr: the start address to write
865 * @n: length of data to write
866 * @data: the data to write
868 * Writes up to a page of data (256 bytes) to the serial flash starting
869 * at the given address.
871 static int t3_write_flash(struct adapter *adapter, unsigned int addr,
872 unsigned int n, const u8 *data)
874 int ret;
875 u32 buf[64];
876 unsigned int i, c, left, val, offset = addr & 0xff;
878 if (addr + n > SF_SIZE || offset + n > 256)
879 return -EINVAL;
881 val = swab32(addr) | SF_PROG_PAGE;
883 if ((ret = sf1_write(adapter, 1, 0, SF_WR_ENABLE)) != 0 ||
884 (ret = sf1_write(adapter, 4, 1, val)) != 0)
885 return ret;
887 for (left = n; left; left -= c) {
888 c = min(left, 4U);
889 for (val = 0, i = 0; i < c; ++i)
890 val = (val << 8) + *data++;
892 ret = sf1_write(adapter, c, c != left, val);
893 if (ret)
894 return ret;
896 if ((ret = flash_wait_op(adapter, 5, 1)) != 0)
897 return ret;
899 /* Read the page to verify the write succeeded */
900 ret = t3_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf, 1);
901 if (ret)
902 return ret;
904 if (memcmp(data - n, (u8 *) buf + offset, n))
905 return -EIO;
906 return 0;
910 * t3_get_tp_version - read the tp sram version
911 * @adapter: the adapter
912 * @vers: where to place the version
914 * Reads the protocol sram version from sram.
916 int t3_get_tp_version(struct adapter *adapter, u32 *vers)
918 int ret;
920 /* Get version loaded in SRAM */
921 t3_write_reg(adapter, A_TP_EMBED_OP_FIELD0, 0);
922 ret = t3_wait_op_done(adapter, A_TP_EMBED_OP_FIELD0,
923 1, 1, 5, 1);
924 if (ret)
925 return ret;
927 *vers = t3_read_reg(adapter, A_TP_EMBED_OP_FIELD1);
929 return 0;
933 * t3_check_tpsram_version - read the tp sram version
934 * @adapter: the adapter
936 * Reads the protocol sram version from flash.
938 int t3_check_tpsram_version(struct adapter *adapter)
940 int ret;
941 u32 vers;
942 unsigned int major, minor;
944 if (adapter->params.rev == T3_REV_A)
945 return 0;
948 ret = t3_get_tp_version(adapter, &vers);
949 if (ret)
950 return ret;
952 major = G_TP_VERSION_MAJOR(vers);
953 minor = G_TP_VERSION_MINOR(vers);
955 if (major == TP_VERSION_MAJOR && minor == TP_VERSION_MINOR)
956 return 0;
957 else {
958 CH_ERR(adapter, "found wrong TP version (%u.%u), "
959 "driver compiled for version %d.%d\n", major, minor,
960 TP_VERSION_MAJOR, TP_VERSION_MINOR);
962 return -EINVAL;
966 * t3_check_tpsram - check if provided protocol SRAM
967 * is compatible with this driver
968 * @adapter: the adapter
969 * @tp_sram: the firmware image to write
970 * @size: image size
972 * Checks if an adapter's tp sram is compatible with the driver.
973 * Returns 0 if the versions are compatible, a negative error otherwise.
975 int t3_check_tpsram(struct adapter *adapter, const u8 *tp_sram,
976 unsigned int size)
978 u32 csum;
979 unsigned int i;
980 const __be32 *p = (const __be32 *)tp_sram;
982 /* Verify checksum */
983 for (csum = 0, i = 0; i < size / sizeof(csum); i++)
984 csum += ntohl(p[i]);
985 if (csum != 0xffffffff) {
986 CH_ERR(adapter, "corrupted protocol SRAM image, checksum %u\n",
987 csum);
988 return -EINVAL;
991 return 0;
994 enum fw_version_type {
995 FW_VERSION_N3,
996 FW_VERSION_T3
1000 * t3_get_fw_version - read the firmware version
1001 * @adapter: the adapter
1002 * @vers: where to place the version
1004 * Reads the FW version from flash.
1006 int t3_get_fw_version(struct adapter *adapter, u32 *vers)
1008 return t3_read_flash(adapter, FW_VERS_ADDR, 1, vers, 0);
1012 * t3_check_fw_version - check if the FW is compatible with this driver
1013 * @adapter: the adapter
1015 * Checks if an adapter's FW is compatible with the driver. Returns 0
1016 * if the versions are compatible, a negative error otherwise.
1018 int t3_check_fw_version(struct adapter *adapter)
1020 int ret;
1021 u32 vers;
1022 unsigned int type, major, minor;
1024 ret = t3_get_fw_version(adapter, &vers);
1025 if (ret)
1026 return ret;
1028 type = G_FW_VERSION_TYPE(vers);
1029 major = G_FW_VERSION_MAJOR(vers);
1030 minor = G_FW_VERSION_MINOR(vers);
1032 if (type == FW_VERSION_T3 && major == FW_VERSION_MAJOR &&
1033 minor == FW_VERSION_MINOR)
1034 return 0;
1035 else if (major != FW_VERSION_MAJOR || minor < FW_VERSION_MINOR)
1036 CH_WARN(adapter, "found old FW minor version(%u.%u), "
1037 "driver compiled for version %u.%u\n", major, minor,
1038 FW_VERSION_MAJOR, FW_VERSION_MINOR);
1039 else {
1040 CH_WARN(adapter, "found newer FW version(%u.%u), "
1041 "driver compiled for version %u.%u\n", major, minor,
1042 FW_VERSION_MAJOR, FW_VERSION_MINOR);
1043 return 0;
1045 return -EINVAL;
1049 * t3_flash_erase_sectors - erase a range of flash sectors
1050 * @adapter: the adapter
1051 * @start: the first sector to erase
1052 * @end: the last sector to erase
1054 * Erases the sectors in the given range.
1056 static int t3_flash_erase_sectors(struct adapter *adapter, int start, int end)
1058 while (start <= end) {
1059 int ret;
1061 if ((ret = sf1_write(adapter, 1, 0, SF_WR_ENABLE)) != 0 ||
1062 (ret = sf1_write(adapter, 4, 0,
1063 SF_ERASE_SECTOR | (start << 8))) != 0 ||
1064 (ret = flash_wait_op(adapter, 5, 500)) != 0)
1065 return ret;
1066 start++;
1068 return 0;
1072 * t3_load_fw - download firmware
1073 * @adapter: the adapter
1074 * @fw_data: the firmware image to write
1075 * @size: image size
1077 * Write the supplied firmware image to the card's serial flash.
1078 * The FW image has the following sections: @size - 8 bytes of code and
1079 * data, followed by 4 bytes of FW version, followed by the 32-bit
1080 * 1's complement checksum of the whole image.
1082 int t3_load_fw(struct adapter *adapter, const u8 *fw_data, unsigned int size)
1084 u32 csum;
1085 unsigned int i;
1086 const __be32 *p = (const __be32 *)fw_data;
1087 int ret, addr, fw_sector = FW_FLASH_BOOT_ADDR >> 16;
1089 if ((size & 3) || size < FW_MIN_SIZE)
1090 return -EINVAL;
1091 if (size > FW_VERS_ADDR + 8 - FW_FLASH_BOOT_ADDR)
1092 return -EFBIG;
1094 for (csum = 0, i = 0; i < size / sizeof(csum); i++)
1095 csum += ntohl(p[i]);
1096 if (csum != 0xffffffff) {
1097 CH_ERR(adapter, "corrupted firmware image, checksum %u\n",
1098 csum);
1099 return -EINVAL;
1102 ret = t3_flash_erase_sectors(adapter, fw_sector, fw_sector);
1103 if (ret)
1104 goto out;
1106 size -= 8; /* trim off version and checksum */
1107 for (addr = FW_FLASH_BOOT_ADDR; size;) {
1108 unsigned int chunk_size = min(size, 256U);
1110 ret = t3_write_flash(adapter, addr, chunk_size, fw_data);
1111 if (ret)
1112 goto out;
1114 addr += chunk_size;
1115 fw_data += chunk_size;
1116 size -= chunk_size;
1119 ret = t3_write_flash(adapter, FW_VERS_ADDR, 4, fw_data);
1120 out:
1121 if (ret)
1122 CH_ERR(adapter, "firmware download failed, error %d\n", ret);
1123 return ret;
1126 #define CIM_CTL_BASE 0x2000
1129 * t3_cim_ctl_blk_read - read a block from CIM control region
1131 * @adap: the adapter
1132 * @addr: the start address within the CIM control region
1133 * @n: number of words to read
1134 * @valp: where to store the result
1136 * Reads a block of 4-byte words from the CIM control region.
1138 int t3_cim_ctl_blk_read(struct adapter *adap, unsigned int addr,
1139 unsigned int n, unsigned int *valp)
1141 int ret = 0;
1143 if (t3_read_reg(adap, A_CIM_HOST_ACC_CTRL) & F_HOSTBUSY)
1144 return -EBUSY;
1146 for ( ; !ret && n--; addr += 4) {
1147 t3_write_reg(adap, A_CIM_HOST_ACC_CTRL, CIM_CTL_BASE + addr);
1148 ret = t3_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, F_HOSTBUSY,
1149 0, 5, 2);
1150 if (!ret)
1151 *valp++ = t3_read_reg(adap, A_CIM_HOST_ACC_DATA);
1153 return ret;
1156 static void t3_gate_rx_traffic(struct cmac *mac, u32 *rx_cfg,
1157 u32 *rx_hash_high, u32 *rx_hash_low)
1159 /* stop Rx unicast traffic */
1160 t3_mac_disable_exact_filters(mac);
1162 /* stop broadcast, multicast, promiscuous mode traffic */
1163 *rx_cfg = t3_read_reg(mac->adapter, A_XGM_RX_CFG);
1164 t3_set_reg_field(mac->adapter, A_XGM_RX_CFG,
1165 F_ENHASHMCAST | F_DISBCAST | F_COPYALLFRAMES,
1166 F_DISBCAST);
1168 *rx_hash_high = t3_read_reg(mac->adapter, A_XGM_RX_HASH_HIGH);
1169 t3_write_reg(mac->adapter, A_XGM_RX_HASH_HIGH, 0);
1171 *rx_hash_low = t3_read_reg(mac->adapter, A_XGM_RX_HASH_LOW);
1172 t3_write_reg(mac->adapter, A_XGM_RX_HASH_LOW, 0);
1174 /* Leave time to drain max RX fifo */
1175 msleep(1);
1178 static void t3_open_rx_traffic(struct cmac *mac, u32 rx_cfg,
1179 u32 rx_hash_high, u32 rx_hash_low)
1181 t3_mac_enable_exact_filters(mac);
1182 t3_set_reg_field(mac->adapter, A_XGM_RX_CFG,
1183 F_ENHASHMCAST | F_DISBCAST | F_COPYALLFRAMES,
1184 rx_cfg);
1185 t3_write_reg(mac->adapter, A_XGM_RX_HASH_HIGH, rx_hash_high);
1186 t3_write_reg(mac->adapter, A_XGM_RX_HASH_LOW, rx_hash_low);
1190 * t3_link_changed - handle interface link changes
1191 * @adapter: the adapter
1192 * @port_id: the port index that changed link state
1194 * Called when a port's link settings change to propagate the new values
1195 * to the associated PHY and MAC. After performing the common tasks it
1196 * invokes an OS-specific handler.
1198 void t3_link_changed(struct adapter *adapter, int port_id)
1200 int link_ok, speed, duplex, fc;
1201 struct port_info *pi = adap2pinfo(adapter, port_id);
1202 struct cphy *phy = &pi->phy;
1203 struct cmac *mac = &pi->mac;
1204 struct link_config *lc = &pi->link_config;
1206 phy->ops->get_link_status(phy, &link_ok, &speed, &duplex, &fc);
1208 if (!lc->link_ok && link_ok) {
1209 u32 rx_cfg, rx_hash_high, rx_hash_low;
1210 u32 status;
1212 t3_xgm_intr_enable(adapter, port_id);
1213 t3_gate_rx_traffic(mac, &rx_cfg, &rx_hash_high, &rx_hash_low);
1214 t3_write_reg(adapter, A_XGM_RX_CTRL + mac->offset, 0);
1215 t3_mac_enable(mac, MAC_DIRECTION_RX);
1217 status = t3_read_reg(adapter, A_XGM_INT_STATUS + mac->offset);
1218 if (status & F_LINKFAULTCHANGE) {
1219 mac->stats.link_faults++;
1220 pi->link_fault = 1;
1222 t3_open_rx_traffic(mac, rx_cfg, rx_hash_high, rx_hash_low);
1225 if (lc->requested_fc & PAUSE_AUTONEG)
1226 fc &= lc->requested_fc;
1227 else
1228 fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1230 if (link_ok == lc->link_ok && speed == lc->speed &&
1231 duplex == lc->duplex && fc == lc->fc)
1232 return; /* nothing changed */
1234 if (link_ok != lc->link_ok && adapter->params.rev > 0 &&
1235 uses_xaui(adapter)) {
1236 if (link_ok)
1237 t3b_pcs_reset(mac);
1238 t3_write_reg(adapter, A_XGM_XAUI_ACT_CTRL + mac->offset,
1239 link_ok ? F_TXACTENABLE | F_RXEN : 0);
1241 lc->link_ok = link_ok;
1242 lc->speed = speed < 0 ? SPEED_INVALID : speed;
1243 lc->duplex = duplex < 0 ? DUPLEX_INVALID : duplex;
1245 if (link_ok && speed >= 0 && lc->autoneg == AUTONEG_ENABLE) {
1246 /* Set MAC speed, duplex, and flow control to match PHY. */
1247 t3_mac_set_speed_duplex_fc(mac, speed, duplex, fc);
1248 lc->fc = fc;
1251 t3_os_link_changed(adapter, port_id, link_ok, speed, duplex, fc);
1254 void t3_link_fault(struct adapter *adapter, int port_id)
1256 struct port_info *pi = adap2pinfo(adapter, port_id);
1257 struct cmac *mac = &pi->mac;
1258 struct cphy *phy = &pi->phy;
1259 struct link_config *lc = &pi->link_config;
1260 int link_ok, speed, duplex, fc, link_fault;
1261 u32 rx_cfg, rx_hash_high, rx_hash_low;
1263 t3_gate_rx_traffic(mac, &rx_cfg, &rx_hash_high, &rx_hash_low);
1265 if (adapter->params.rev > 0 && uses_xaui(adapter))
1266 t3_write_reg(adapter, A_XGM_XAUI_ACT_CTRL + mac->offset, 0);
1268 t3_write_reg(adapter, A_XGM_RX_CTRL + mac->offset, 0);
1269 t3_mac_enable(mac, MAC_DIRECTION_RX);
1271 t3_open_rx_traffic(mac, rx_cfg, rx_hash_high, rx_hash_low);
1273 link_fault = t3_read_reg(adapter,
1274 A_XGM_INT_STATUS + mac->offset);
1275 link_fault &= F_LINKFAULTCHANGE;
1277 phy->ops->get_link_status(phy, &link_ok, &speed, &duplex, &fc);
1279 if (link_fault) {
1280 lc->link_ok = 0;
1281 lc->speed = SPEED_INVALID;
1282 lc->duplex = DUPLEX_INVALID;
1284 t3_os_link_fault(adapter, port_id, 0);
1286 /* Account link faults only when the phy reports a link up */
1287 if (link_ok)
1288 mac->stats.link_faults++;
1289 } else {
1290 if (link_ok)
1291 t3_write_reg(adapter, A_XGM_XAUI_ACT_CTRL + mac->offset,
1292 F_TXACTENABLE | F_RXEN);
1294 pi->link_fault = 0;
1295 lc->link_ok = (unsigned char)link_ok;
1296 lc->speed = speed < 0 ? SPEED_INVALID : speed;
1297 lc->duplex = duplex < 0 ? DUPLEX_INVALID : duplex;
1298 t3_os_link_fault(adapter, port_id, link_ok);
1303 * t3_link_start - apply link configuration to MAC/PHY
1304 * @phy: the PHY to setup
1305 * @mac: the MAC to setup
1306 * @lc: the requested link configuration
1308 * Set up a port's MAC and PHY according to a desired link configuration.
1309 * - If the PHY can auto-negotiate first decide what to advertise, then
1310 * enable/disable auto-negotiation as desired, and reset.
1311 * - If the PHY does not auto-negotiate just reset it.
1312 * - If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
1313 * otherwise do it later based on the outcome of auto-negotiation.
1315 int t3_link_start(struct cphy *phy, struct cmac *mac, struct link_config *lc)
1317 unsigned int fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1319 lc->link_ok = 0;
1320 if (lc->supported & SUPPORTED_Autoneg) {
1321 lc->advertising &= ~(ADVERTISED_Asym_Pause | ADVERTISED_Pause);
1322 if (fc) {
1323 lc->advertising |= ADVERTISED_Asym_Pause;
1324 if (fc & PAUSE_RX)
1325 lc->advertising |= ADVERTISED_Pause;
1327 phy->ops->advertise(phy, lc->advertising);
1329 if (lc->autoneg == AUTONEG_DISABLE) {
1330 lc->speed = lc->requested_speed;
1331 lc->duplex = lc->requested_duplex;
1332 lc->fc = (unsigned char)fc;
1333 t3_mac_set_speed_duplex_fc(mac, lc->speed, lc->duplex,
1334 fc);
1335 /* Also disables autoneg */
1336 phy->ops->set_speed_duplex(phy, lc->speed, lc->duplex);
1337 } else
1338 phy->ops->autoneg_enable(phy);
1339 } else {
1340 t3_mac_set_speed_duplex_fc(mac, -1, -1, fc);
1341 lc->fc = (unsigned char)fc;
1342 phy->ops->reset(phy, 0);
1344 return 0;
1348 * t3_set_vlan_accel - control HW VLAN extraction
1349 * @adapter: the adapter
1350 * @ports: bitmap of adapter ports to operate on
1351 * @on: enable (1) or disable (0) HW VLAN extraction
1353 * Enables or disables HW extraction of VLAN tags for the given port.
1355 void t3_set_vlan_accel(struct adapter *adapter, unsigned int ports, int on)
1357 t3_set_reg_field(adapter, A_TP_OUT_CONFIG,
1358 ports << S_VLANEXTRACTIONENABLE,
1359 on ? (ports << S_VLANEXTRACTIONENABLE) : 0);
1362 struct intr_info {
1363 unsigned int mask; /* bits to check in interrupt status */
1364 const char *msg; /* message to print or NULL */
1365 short stat_idx; /* stat counter to increment or -1 */
1366 unsigned short fatal; /* whether the condition reported is fatal */
1370 * t3_handle_intr_status - table driven interrupt handler
1371 * @adapter: the adapter that generated the interrupt
1372 * @reg: the interrupt status register to process
1373 * @mask: a mask to apply to the interrupt status
1374 * @acts: table of interrupt actions
1375 * @stats: statistics counters tracking interrupt occurences
1377 * A table driven interrupt handler that applies a set of masks to an
1378 * interrupt status word and performs the corresponding actions if the
1379 * interrupts described by the mask have occured. The actions include
1380 * optionally printing a warning or alert message, and optionally
1381 * incrementing a stat counter. The table is terminated by an entry
1382 * specifying mask 0. Returns the number of fatal interrupt conditions.
1384 static int t3_handle_intr_status(struct adapter *adapter, unsigned int reg,
1385 unsigned int mask,
1386 const struct intr_info *acts,
1387 unsigned long *stats)
1389 int fatal = 0;
1390 unsigned int status = t3_read_reg(adapter, reg) & mask;
1392 for (; acts->mask; ++acts) {
1393 if (!(status & acts->mask))
1394 continue;
1395 if (acts->fatal) {
1396 fatal++;
1397 CH_ALERT(adapter, "%s (0x%x)\n",
1398 acts->msg, status & acts->mask);
1399 } else if (acts->msg)
1400 CH_WARN(adapter, "%s (0x%x)\n",
1401 acts->msg, status & acts->mask);
1402 if (acts->stat_idx >= 0)
1403 stats[acts->stat_idx]++;
1405 if (status) /* clear processed interrupts */
1406 t3_write_reg(adapter, reg, status);
1407 return fatal;
1410 #define SGE_INTR_MASK (F_RSPQDISABLED | \
1411 F_UC_REQ_FRAMINGERROR | F_R_REQ_FRAMINGERROR | \
1412 F_CPPARITYERROR | F_OCPARITYERROR | F_RCPARITYERROR | \
1413 F_IRPARITYERROR | V_ITPARITYERROR(M_ITPARITYERROR) | \
1414 V_FLPARITYERROR(M_FLPARITYERROR) | F_LODRBPARITYERROR | \
1415 F_HIDRBPARITYERROR | F_LORCQPARITYERROR | \
1416 F_HIRCQPARITYERROR)
1417 #define MC5_INTR_MASK (F_PARITYERR | F_ACTRGNFULL | F_UNKNOWNCMD | \
1418 F_REQQPARERR | F_DISPQPARERR | F_DELACTEMPTY | \
1419 F_NFASRCHFAIL)
1420 #define MC7_INTR_MASK (F_AE | F_UE | F_CE | V_PE(M_PE))
1421 #define XGM_INTR_MASK (V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR) | \
1422 V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR) | \
1423 F_TXFIFO_UNDERRUN)
1424 #define PCIX_INTR_MASK (F_MSTDETPARERR | F_SIGTARABT | F_RCVTARABT | \
1425 F_RCVMSTABT | F_SIGSYSERR | F_DETPARERR | \
1426 F_SPLCMPDIS | F_UNXSPLCMP | F_RCVSPLCMPERR | \
1427 F_DETCORECCERR | F_DETUNCECCERR | F_PIOPARERR | \
1428 V_WFPARERR(M_WFPARERR) | V_RFPARERR(M_RFPARERR) | \
1429 V_CFPARERR(M_CFPARERR) /* | V_MSIXPARERR(M_MSIXPARERR) */)
1430 #define PCIE_INTR_MASK (F_UNXSPLCPLERRR | F_UNXSPLCPLERRC | F_PCIE_PIOPARERR |\
1431 F_PCIE_WFPARERR | F_PCIE_RFPARERR | F_PCIE_CFPARERR | \
1432 /* V_PCIE_MSIXPARERR(M_PCIE_MSIXPARERR) | */ \
1433 F_RETRYBUFPARERR | F_RETRYLUTPARERR | F_RXPARERR | \
1434 F_TXPARERR | V_BISTERR(M_BISTERR))
1435 #define ULPRX_INTR_MASK (F_PARERRDATA | F_PARERRPCMD | F_ARBPF1PERR | \
1436 F_ARBPF0PERR | F_ARBFPERR | F_PCMDMUXPERR | \
1437 F_DATASELFRAMEERR1 | F_DATASELFRAMEERR0)
1438 #define ULPTX_INTR_MASK 0xfc
1439 #define CPLSW_INTR_MASK (F_CIM_OP_MAP_PERR | F_TP_FRAMING_ERROR | \
1440 F_SGE_FRAMING_ERROR | F_CIM_FRAMING_ERROR | \
1441 F_ZERO_SWITCH_ERROR)
1442 #define CIM_INTR_MASK (F_BLKWRPLINT | F_BLKRDPLINT | F_BLKWRCTLINT | \
1443 F_BLKRDCTLINT | F_BLKWRFLASHINT | F_BLKRDFLASHINT | \
1444 F_SGLWRFLASHINT | F_WRBLKFLASHINT | F_BLKWRBOOTINT | \
1445 F_FLASHRANGEINT | F_SDRAMRANGEINT | F_RSVDSPACEINT | \
1446 F_DRAMPARERR | F_ICACHEPARERR | F_DCACHEPARERR | \
1447 F_OBQSGEPARERR | F_OBQULPHIPARERR | F_OBQULPLOPARERR | \
1448 F_IBQSGELOPARERR | F_IBQSGEHIPARERR | F_IBQULPPARERR | \
1449 F_IBQTPPARERR | F_ITAGPARERR | F_DTAGPARERR)
1450 #define PMTX_INTR_MASK (F_ZERO_C_CMD_ERROR | ICSPI_FRM_ERR | OESPI_FRM_ERR | \
1451 V_ICSPI_PAR_ERROR(M_ICSPI_PAR_ERROR) | \
1452 V_OESPI_PAR_ERROR(M_OESPI_PAR_ERROR))
1453 #define PMRX_INTR_MASK (F_ZERO_E_CMD_ERROR | IESPI_FRM_ERR | OCSPI_FRM_ERR | \
1454 V_IESPI_PAR_ERROR(M_IESPI_PAR_ERROR) | \
1455 V_OCSPI_PAR_ERROR(M_OCSPI_PAR_ERROR))
1456 #define MPS_INTR_MASK (V_TX0TPPARERRENB(M_TX0TPPARERRENB) | \
1457 V_TX1TPPARERRENB(M_TX1TPPARERRENB) | \
1458 V_RXTPPARERRENB(M_RXTPPARERRENB) | \
1459 V_MCAPARERRENB(M_MCAPARERRENB))
1460 #define XGM_EXTRA_INTR_MASK (F_LINKFAULTCHANGE)
1461 #define PL_INTR_MASK (F_T3DBG | F_XGMAC0_0 | F_XGMAC0_1 | F_MC5A | F_PM1_TX | \
1462 F_PM1_RX | F_ULP2_TX | F_ULP2_RX | F_TP1 | F_CIM | \
1463 F_MC7_CM | F_MC7_PMTX | F_MC7_PMRX | F_SGE3 | F_PCIM0 | \
1464 F_MPS0 | F_CPL_SWITCH)
1466 * Interrupt handler for the PCIX1 module.
1468 static void pci_intr_handler(struct adapter *adapter)
1470 static const struct intr_info pcix1_intr_info[] = {
1471 {F_MSTDETPARERR, "PCI master detected parity error", -1, 1},
1472 {F_SIGTARABT, "PCI signaled target abort", -1, 1},
1473 {F_RCVTARABT, "PCI received target abort", -1, 1},
1474 {F_RCVMSTABT, "PCI received master abort", -1, 1},
1475 {F_SIGSYSERR, "PCI signaled system error", -1, 1},
1476 {F_DETPARERR, "PCI detected parity error", -1, 1},
1477 {F_SPLCMPDIS, "PCI split completion discarded", -1, 1},
1478 {F_UNXSPLCMP, "PCI unexpected split completion error", -1, 1},
1479 {F_RCVSPLCMPERR, "PCI received split completion error", -1,
1481 {F_DETCORECCERR, "PCI correctable ECC error",
1482 STAT_PCI_CORR_ECC, 0},
1483 {F_DETUNCECCERR, "PCI uncorrectable ECC error", -1, 1},
1484 {F_PIOPARERR, "PCI PIO FIFO parity error", -1, 1},
1485 {V_WFPARERR(M_WFPARERR), "PCI write FIFO parity error", -1,
1487 {V_RFPARERR(M_RFPARERR), "PCI read FIFO parity error", -1,
1489 {V_CFPARERR(M_CFPARERR), "PCI command FIFO parity error", -1,
1491 {V_MSIXPARERR(M_MSIXPARERR), "PCI MSI-X table/PBA parity "
1492 "error", -1, 1},
1496 if (t3_handle_intr_status(adapter, A_PCIX_INT_CAUSE, PCIX_INTR_MASK,
1497 pcix1_intr_info, adapter->irq_stats))
1498 t3_fatal_err(adapter);
1502 * Interrupt handler for the PCIE module.
1504 static void pcie_intr_handler(struct adapter *adapter)
1506 static const struct intr_info pcie_intr_info[] = {
1507 {F_PEXERR, "PCI PEX error", -1, 1},
1508 {F_UNXSPLCPLERRR,
1509 "PCI unexpected split completion DMA read error", -1, 1},
1510 {F_UNXSPLCPLERRC,
1511 "PCI unexpected split completion DMA command error", -1, 1},
1512 {F_PCIE_PIOPARERR, "PCI PIO FIFO parity error", -1, 1},
1513 {F_PCIE_WFPARERR, "PCI write FIFO parity error", -1, 1},
1514 {F_PCIE_RFPARERR, "PCI read FIFO parity error", -1, 1},
1515 {F_PCIE_CFPARERR, "PCI command FIFO parity error", -1, 1},
1516 {V_PCIE_MSIXPARERR(M_PCIE_MSIXPARERR),
1517 "PCI MSI-X table/PBA parity error", -1, 1},
1518 {F_RETRYBUFPARERR, "PCI retry buffer parity error", -1, 1},
1519 {F_RETRYLUTPARERR, "PCI retry LUT parity error", -1, 1},
1520 {F_RXPARERR, "PCI Rx parity error", -1, 1},
1521 {F_TXPARERR, "PCI Tx parity error", -1, 1},
1522 {V_BISTERR(M_BISTERR), "PCI BIST error", -1, 1},
1526 if (t3_read_reg(adapter, A_PCIE_INT_CAUSE) & F_PEXERR)
1527 CH_ALERT(adapter, "PEX error code 0x%x\n",
1528 t3_read_reg(adapter, A_PCIE_PEX_ERR));
1530 if (t3_handle_intr_status(adapter, A_PCIE_INT_CAUSE, PCIE_INTR_MASK,
1531 pcie_intr_info, adapter->irq_stats))
1532 t3_fatal_err(adapter);
1536 * TP interrupt handler.
1538 static void tp_intr_handler(struct adapter *adapter)
1540 static const struct intr_info tp_intr_info[] = {
1541 {0xffffff, "TP parity error", -1, 1},
1542 {0x1000000, "TP out of Rx pages", -1, 1},
1543 {0x2000000, "TP out of Tx pages", -1, 1},
1547 static struct intr_info tp_intr_info_t3c[] = {
1548 {0x1fffffff, "TP parity error", -1, 1},
1549 {F_FLMRXFLSTEMPTY, "TP out of Rx pages", -1, 1},
1550 {F_FLMTXFLSTEMPTY, "TP out of Tx pages", -1, 1},
1554 if (t3_handle_intr_status(adapter, A_TP_INT_CAUSE, 0xffffffff,
1555 adapter->params.rev < T3_REV_C ?
1556 tp_intr_info : tp_intr_info_t3c, NULL))
1557 t3_fatal_err(adapter);
1561 * CIM interrupt handler.
1563 static void cim_intr_handler(struct adapter *adapter)
1565 static const struct intr_info cim_intr_info[] = {
1566 {F_RSVDSPACEINT, "CIM reserved space write", -1, 1},
1567 {F_SDRAMRANGEINT, "CIM SDRAM address out of range", -1, 1},
1568 {F_FLASHRANGEINT, "CIM flash address out of range", -1, 1},
1569 {F_BLKWRBOOTINT, "CIM block write to boot space", -1, 1},
1570 {F_WRBLKFLASHINT, "CIM write to cached flash space", -1, 1},
1571 {F_SGLWRFLASHINT, "CIM single write to flash space", -1, 1},
1572 {F_BLKRDFLASHINT, "CIM block read from flash space", -1, 1},
1573 {F_BLKWRFLASHINT, "CIM block write to flash space", -1, 1},
1574 {F_BLKRDCTLINT, "CIM block read from CTL space", -1, 1},
1575 {F_BLKWRCTLINT, "CIM block write to CTL space", -1, 1},
1576 {F_BLKRDPLINT, "CIM block read from PL space", -1, 1},
1577 {F_BLKWRPLINT, "CIM block write to PL space", -1, 1},
1578 {F_DRAMPARERR, "CIM DRAM parity error", -1, 1},
1579 {F_ICACHEPARERR, "CIM icache parity error", -1, 1},
1580 {F_DCACHEPARERR, "CIM dcache parity error", -1, 1},
1581 {F_OBQSGEPARERR, "CIM OBQ SGE parity error", -1, 1},
1582 {F_OBQULPHIPARERR, "CIM OBQ ULPHI parity error", -1, 1},
1583 {F_OBQULPLOPARERR, "CIM OBQ ULPLO parity error", -1, 1},
1584 {F_IBQSGELOPARERR, "CIM IBQ SGELO parity error", -1, 1},
1585 {F_IBQSGEHIPARERR, "CIM IBQ SGEHI parity error", -1, 1},
1586 {F_IBQULPPARERR, "CIM IBQ ULP parity error", -1, 1},
1587 {F_IBQTPPARERR, "CIM IBQ TP parity error", -1, 1},
1588 {F_ITAGPARERR, "CIM itag parity error", -1, 1},
1589 {F_DTAGPARERR, "CIM dtag parity error", -1, 1},
1593 if (t3_handle_intr_status(adapter, A_CIM_HOST_INT_CAUSE, 0xffffffff,
1594 cim_intr_info, NULL))
1595 t3_fatal_err(adapter);
1599 * ULP RX interrupt handler.
1601 static void ulprx_intr_handler(struct adapter *adapter)
1603 static const struct intr_info ulprx_intr_info[] = {
1604 {F_PARERRDATA, "ULP RX data parity error", -1, 1},
1605 {F_PARERRPCMD, "ULP RX command parity error", -1, 1},
1606 {F_ARBPF1PERR, "ULP RX ArbPF1 parity error", -1, 1},
1607 {F_ARBPF0PERR, "ULP RX ArbPF0 parity error", -1, 1},
1608 {F_ARBFPERR, "ULP RX ArbF parity error", -1, 1},
1609 {F_PCMDMUXPERR, "ULP RX PCMDMUX parity error", -1, 1},
1610 {F_DATASELFRAMEERR1, "ULP RX frame error", -1, 1},
1611 {F_DATASELFRAMEERR0, "ULP RX frame error", -1, 1},
1615 if (t3_handle_intr_status(adapter, A_ULPRX_INT_CAUSE, 0xffffffff,
1616 ulprx_intr_info, NULL))
1617 t3_fatal_err(adapter);
1621 * ULP TX interrupt handler.
1623 static void ulptx_intr_handler(struct adapter *adapter)
1625 static const struct intr_info ulptx_intr_info[] = {
1626 {F_PBL_BOUND_ERR_CH0, "ULP TX channel 0 PBL out of bounds",
1627 STAT_ULP_CH0_PBL_OOB, 0},
1628 {F_PBL_BOUND_ERR_CH1, "ULP TX channel 1 PBL out of bounds",
1629 STAT_ULP_CH1_PBL_OOB, 0},
1630 {0xfc, "ULP TX parity error", -1, 1},
1634 if (t3_handle_intr_status(adapter, A_ULPTX_INT_CAUSE, 0xffffffff,
1635 ulptx_intr_info, adapter->irq_stats))
1636 t3_fatal_err(adapter);
1639 #define ICSPI_FRM_ERR (F_ICSPI0_FIFO2X_RX_FRAMING_ERROR | \
1640 F_ICSPI1_FIFO2X_RX_FRAMING_ERROR | F_ICSPI0_RX_FRAMING_ERROR | \
1641 F_ICSPI1_RX_FRAMING_ERROR | F_ICSPI0_TX_FRAMING_ERROR | \
1642 F_ICSPI1_TX_FRAMING_ERROR)
1643 #define OESPI_FRM_ERR (F_OESPI0_RX_FRAMING_ERROR | \
1644 F_OESPI1_RX_FRAMING_ERROR | F_OESPI0_TX_FRAMING_ERROR | \
1645 F_OESPI1_TX_FRAMING_ERROR | F_OESPI0_OFIFO2X_TX_FRAMING_ERROR | \
1646 F_OESPI1_OFIFO2X_TX_FRAMING_ERROR)
1649 * PM TX interrupt handler.
1651 static void pmtx_intr_handler(struct adapter *adapter)
1653 static const struct intr_info pmtx_intr_info[] = {
1654 {F_ZERO_C_CMD_ERROR, "PMTX 0-length pcmd", -1, 1},
1655 {ICSPI_FRM_ERR, "PMTX ispi framing error", -1, 1},
1656 {OESPI_FRM_ERR, "PMTX ospi framing error", -1, 1},
1657 {V_ICSPI_PAR_ERROR(M_ICSPI_PAR_ERROR),
1658 "PMTX ispi parity error", -1, 1},
1659 {V_OESPI_PAR_ERROR(M_OESPI_PAR_ERROR),
1660 "PMTX ospi parity error", -1, 1},
1664 if (t3_handle_intr_status(adapter, A_PM1_TX_INT_CAUSE, 0xffffffff,
1665 pmtx_intr_info, NULL))
1666 t3_fatal_err(adapter);
1669 #define IESPI_FRM_ERR (F_IESPI0_FIFO2X_RX_FRAMING_ERROR | \
1670 F_IESPI1_FIFO2X_RX_FRAMING_ERROR | F_IESPI0_RX_FRAMING_ERROR | \
1671 F_IESPI1_RX_FRAMING_ERROR | F_IESPI0_TX_FRAMING_ERROR | \
1672 F_IESPI1_TX_FRAMING_ERROR)
1673 #define OCSPI_FRM_ERR (F_OCSPI0_RX_FRAMING_ERROR | \
1674 F_OCSPI1_RX_FRAMING_ERROR | F_OCSPI0_TX_FRAMING_ERROR | \
1675 F_OCSPI1_TX_FRAMING_ERROR | F_OCSPI0_OFIFO2X_TX_FRAMING_ERROR | \
1676 F_OCSPI1_OFIFO2X_TX_FRAMING_ERROR)
1679 * PM RX interrupt handler.
1681 static void pmrx_intr_handler(struct adapter *adapter)
1683 static const struct intr_info pmrx_intr_info[] = {
1684 {F_ZERO_E_CMD_ERROR, "PMRX 0-length pcmd", -1, 1},
1685 {IESPI_FRM_ERR, "PMRX ispi framing error", -1, 1},
1686 {OCSPI_FRM_ERR, "PMRX ospi framing error", -1, 1},
1687 {V_IESPI_PAR_ERROR(M_IESPI_PAR_ERROR),
1688 "PMRX ispi parity error", -1, 1},
1689 {V_OCSPI_PAR_ERROR(M_OCSPI_PAR_ERROR),
1690 "PMRX ospi parity error", -1, 1},
1694 if (t3_handle_intr_status(adapter, A_PM1_RX_INT_CAUSE, 0xffffffff,
1695 pmrx_intr_info, NULL))
1696 t3_fatal_err(adapter);
1700 * CPL switch interrupt handler.
1702 static void cplsw_intr_handler(struct adapter *adapter)
1704 static const struct intr_info cplsw_intr_info[] = {
1705 {F_CIM_OP_MAP_PERR, "CPL switch CIM parity error", -1, 1},
1706 {F_CIM_OVFL_ERROR, "CPL switch CIM overflow", -1, 1},
1707 {F_TP_FRAMING_ERROR, "CPL switch TP framing error", -1, 1},
1708 {F_SGE_FRAMING_ERROR, "CPL switch SGE framing error", -1, 1},
1709 {F_CIM_FRAMING_ERROR, "CPL switch CIM framing error", -1, 1},
1710 {F_ZERO_SWITCH_ERROR, "CPL switch no-switch error", -1, 1},
1714 if (t3_handle_intr_status(adapter, A_CPL_INTR_CAUSE, 0xffffffff,
1715 cplsw_intr_info, NULL))
1716 t3_fatal_err(adapter);
1720 * MPS interrupt handler.
1722 static void mps_intr_handler(struct adapter *adapter)
1724 static const struct intr_info mps_intr_info[] = {
1725 {0x1ff, "MPS parity error", -1, 1},
1729 if (t3_handle_intr_status(adapter, A_MPS_INT_CAUSE, 0xffffffff,
1730 mps_intr_info, NULL))
1731 t3_fatal_err(adapter);
1734 #define MC7_INTR_FATAL (F_UE | V_PE(M_PE) | F_AE)
1737 * MC7 interrupt handler.
1739 static void mc7_intr_handler(struct mc7 *mc7)
1741 struct adapter *adapter = mc7->adapter;
1742 u32 cause = t3_read_reg(adapter, mc7->offset + A_MC7_INT_CAUSE);
1744 if (cause & F_CE) {
1745 mc7->stats.corr_err++;
1746 CH_WARN(adapter, "%s MC7 correctable error at addr 0x%x, "
1747 "data 0x%x 0x%x 0x%x\n", mc7->name,
1748 t3_read_reg(adapter, mc7->offset + A_MC7_CE_ADDR),
1749 t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA0),
1750 t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA1),
1751 t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA2));
1754 if (cause & F_UE) {
1755 mc7->stats.uncorr_err++;
1756 CH_ALERT(adapter, "%s MC7 uncorrectable error at addr 0x%x, "
1757 "data 0x%x 0x%x 0x%x\n", mc7->name,
1758 t3_read_reg(adapter, mc7->offset + A_MC7_UE_ADDR),
1759 t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA0),
1760 t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA1),
1761 t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA2));
1764 if (G_PE(cause)) {
1765 mc7->stats.parity_err++;
1766 CH_ALERT(adapter, "%s MC7 parity error 0x%x\n",
1767 mc7->name, G_PE(cause));
1770 if (cause & F_AE) {
1771 u32 addr = 0;
1773 if (adapter->params.rev > 0)
1774 addr = t3_read_reg(adapter,
1775 mc7->offset + A_MC7_ERR_ADDR);
1776 mc7->stats.addr_err++;
1777 CH_ALERT(adapter, "%s MC7 address error: 0x%x\n",
1778 mc7->name, addr);
1781 if (cause & MC7_INTR_FATAL)
1782 t3_fatal_err(adapter);
1784 t3_write_reg(adapter, mc7->offset + A_MC7_INT_CAUSE, cause);
1787 #define XGM_INTR_FATAL (V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR) | \
1788 V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR))
1790 * XGMAC interrupt handler.
1792 static int mac_intr_handler(struct adapter *adap, unsigned int idx)
1794 struct cmac *mac = &adap2pinfo(adap, idx)->mac;
1796 * We mask out interrupt causes for which we're not taking interrupts.
1797 * This allows us to use polling logic to monitor some of the other
1798 * conditions when taking interrupts would impose too much load on the
1799 * system.
1801 u32 cause = t3_read_reg(adap, A_XGM_INT_CAUSE + mac->offset) &
1802 ~F_RXFIFO_OVERFLOW;
1804 if (cause & V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR)) {
1805 mac->stats.tx_fifo_parity_err++;
1806 CH_ALERT(adap, "port%d: MAC TX FIFO parity error\n", idx);
1808 if (cause & V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR)) {
1809 mac->stats.rx_fifo_parity_err++;
1810 CH_ALERT(adap, "port%d: MAC RX FIFO parity error\n", idx);
1812 if (cause & F_TXFIFO_UNDERRUN)
1813 mac->stats.tx_fifo_urun++;
1814 if (cause & F_RXFIFO_OVERFLOW)
1815 mac->stats.rx_fifo_ovfl++;
1816 if (cause & V_SERDES_LOS(M_SERDES_LOS))
1817 mac->stats.serdes_signal_loss++;
1818 if (cause & F_XAUIPCSCTCERR)
1819 mac->stats.xaui_pcs_ctc_err++;
1820 if (cause & F_XAUIPCSALIGNCHANGE)
1821 mac->stats.xaui_pcs_align_change++;
1822 if (cause & F_XGM_INT) {
1823 t3_set_reg_field(adap,
1824 A_XGM_INT_ENABLE + mac->offset,
1825 F_XGM_INT, 0);
1826 mac->stats.link_faults++;
1828 t3_os_link_fault_handler(adap, idx);
1831 t3_write_reg(adap, A_XGM_INT_CAUSE + mac->offset, cause);
1833 if (cause & XGM_INTR_FATAL)
1834 t3_fatal_err(adap);
1836 return cause != 0;
1840 * Interrupt handler for PHY events.
1842 int t3_phy_intr_handler(struct adapter *adapter)
1844 u32 i, cause = t3_read_reg(adapter, A_T3DBG_INT_CAUSE);
1846 for_each_port(adapter, i) {
1847 struct port_info *p = adap2pinfo(adapter, i);
1849 if (!(p->phy.caps & SUPPORTED_IRQ))
1850 continue;
1852 if (cause & (1 << adapter_info(adapter)->gpio_intr[i])) {
1853 int phy_cause = p->phy.ops->intr_handler(&p->phy);
1855 if (phy_cause & cphy_cause_link_change)
1856 t3_link_changed(adapter, i);
1857 if (phy_cause & cphy_cause_fifo_error)
1858 p->phy.fifo_errors++;
1859 if (phy_cause & cphy_cause_module_change)
1860 t3_os_phymod_changed(adapter, i);
1864 t3_write_reg(adapter, A_T3DBG_INT_CAUSE, cause);
1865 return 0;
1869 * T3 slow path (non-data) interrupt handler.
1871 int t3_slow_intr_handler(struct adapter *adapter)
1873 u32 cause = t3_read_reg(adapter, A_PL_INT_CAUSE0);
1875 cause &= adapter->slow_intr_mask;
1876 if (!cause)
1877 return 0;
1878 if (cause & F_PCIM0) {
1879 if (is_pcie(adapter))
1880 pcie_intr_handler(adapter);
1881 else
1882 pci_intr_handler(adapter);
1884 if (cause & F_SGE3)
1885 t3_sge_err_intr_handler(adapter);
1886 if (cause & F_MC7_PMRX)
1887 mc7_intr_handler(&adapter->pmrx);
1888 if (cause & F_MC7_PMTX)
1889 mc7_intr_handler(&adapter->pmtx);
1890 if (cause & F_MC7_CM)
1891 mc7_intr_handler(&adapter->cm);
1892 if (cause & F_CIM)
1893 cim_intr_handler(adapter);
1894 if (cause & F_TP1)
1895 tp_intr_handler(adapter);
1896 if (cause & F_ULP2_RX)
1897 ulprx_intr_handler(adapter);
1898 if (cause & F_ULP2_TX)
1899 ulptx_intr_handler(adapter);
1900 if (cause & F_PM1_RX)
1901 pmrx_intr_handler(adapter);
1902 if (cause & F_PM1_TX)
1903 pmtx_intr_handler(adapter);
1904 if (cause & F_CPL_SWITCH)
1905 cplsw_intr_handler(adapter);
1906 if (cause & F_MPS0)
1907 mps_intr_handler(adapter);
1908 if (cause & F_MC5A)
1909 t3_mc5_intr_handler(&adapter->mc5);
1910 if (cause & F_XGMAC0_0)
1911 mac_intr_handler(adapter, 0);
1912 if (cause & F_XGMAC0_1)
1913 mac_intr_handler(adapter, 1);
1914 if (cause & F_T3DBG)
1915 t3_os_ext_intr_handler(adapter);
1917 /* Clear the interrupts just processed. */
1918 t3_write_reg(adapter, A_PL_INT_CAUSE0, cause);
1919 t3_read_reg(adapter, A_PL_INT_CAUSE0); /* flush */
1920 return 1;
1923 static unsigned int calc_gpio_intr(struct adapter *adap)
1925 unsigned int i, gpi_intr = 0;
1927 for_each_port(adap, i)
1928 if ((adap2pinfo(adap, i)->phy.caps & SUPPORTED_IRQ) &&
1929 adapter_info(adap)->gpio_intr[i])
1930 gpi_intr |= 1 << adapter_info(adap)->gpio_intr[i];
1931 return gpi_intr;
1935 * t3_intr_enable - enable interrupts
1936 * @adapter: the adapter whose interrupts should be enabled
1938 * Enable interrupts by setting the interrupt enable registers of the
1939 * various HW modules and then enabling the top-level interrupt
1940 * concentrator.
1942 void t3_intr_enable(struct adapter *adapter)
1944 static const struct addr_val_pair intr_en_avp[] = {
1945 {A_SG_INT_ENABLE, SGE_INTR_MASK},
1946 {A_MC7_INT_ENABLE, MC7_INTR_MASK},
1947 {A_MC7_INT_ENABLE - MC7_PMRX_BASE_ADDR + MC7_PMTX_BASE_ADDR,
1948 MC7_INTR_MASK},
1949 {A_MC7_INT_ENABLE - MC7_PMRX_BASE_ADDR + MC7_CM_BASE_ADDR,
1950 MC7_INTR_MASK},
1951 {A_MC5_DB_INT_ENABLE, MC5_INTR_MASK},
1952 {A_ULPRX_INT_ENABLE, ULPRX_INTR_MASK},
1953 {A_PM1_TX_INT_ENABLE, PMTX_INTR_MASK},
1954 {A_PM1_RX_INT_ENABLE, PMRX_INTR_MASK},
1955 {A_CIM_HOST_INT_ENABLE, CIM_INTR_MASK},
1956 {A_MPS_INT_ENABLE, MPS_INTR_MASK},
1959 adapter->slow_intr_mask = PL_INTR_MASK;
1961 t3_write_regs(adapter, intr_en_avp, ARRAY_SIZE(intr_en_avp), 0);
1962 t3_write_reg(adapter, A_TP_INT_ENABLE,
1963 adapter->params.rev >= T3_REV_C ? 0x2bfffff : 0x3bfffff);
1965 if (adapter->params.rev > 0) {
1966 t3_write_reg(adapter, A_CPL_INTR_ENABLE,
1967 CPLSW_INTR_MASK | F_CIM_OVFL_ERROR);
1968 t3_write_reg(adapter, A_ULPTX_INT_ENABLE,
1969 ULPTX_INTR_MASK | F_PBL_BOUND_ERR_CH0 |
1970 F_PBL_BOUND_ERR_CH1);
1971 } else {
1972 t3_write_reg(adapter, A_CPL_INTR_ENABLE, CPLSW_INTR_MASK);
1973 t3_write_reg(adapter, A_ULPTX_INT_ENABLE, ULPTX_INTR_MASK);
1976 t3_write_reg(adapter, A_T3DBG_INT_ENABLE, calc_gpio_intr(adapter));
1978 if (is_pcie(adapter))
1979 t3_write_reg(adapter, A_PCIE_INT_ENABLE, PCIE_INTR_MASK);
1980 else
1981 t3_write_reg(adapter, A_PCIX_INT_ENABLE, PCIX_INTR_MASK);
1982 t3_write_reg(adapter, A_PL_INT_ENABLE0, adapter->slow_intr_mask);
1983 t3_read_reg(adapter, A_PL_INT_ENABLE0); /* flush */
1987 * t3_intr_disable - disable a card's interrupts
1988 * @adapter: the adapter whose interrupts should be disabled
1990 * Disable interrupts. We only disable the top-level interrupt
1991 * concentrator and the SGE data interrupts.
1993 void t3_intr_disable(struct adapter *adapter)
1995 t3_write_reg(adapter, A_PL_INT_ENABLE0, 0);
1996 t3_read_reg(adapter, A_PL_INT_ENABLE0); /* flush */
1997 adapter->slow_intr_mask = 0;
2001 * t3_intr_clear - clear all interrupts
2002 * @adapter: the adapter whose interrupts should be cleared
2004 * Clears all interrupts.
2006 void t3_intr_clear(struct adapter *adapter)
2008 static const unsigned int cause_reg_addr[] = {
2009 A_SG_INT_CAUSE,
2010 A_SG_RSPQ_FL_STATUS,
2011 A_PCIX_INT_CAUSE,
2012 A_MC7_INT_CAUSE,
2013 A_MC7_INT_CAUSE - MC7_PMRX_BASE_ADDR + MC7_PMTX_BASE_ADDR,
2014 A_MC7_INT_CAUSE - MC7_PMRX_BASE_ADDR + MC7_CM_BASE_ADDR,
2015 A_CIM_HOST_INT_CAUSE,
2016 A_TP_INT_CAUSE,
2017 A_MC5_DB_INT_CAUSE,
2018 A_ULPRX_INT_CAUSE,
2019 A_ULPTX_INT_CAUSE,
2020 A_CPL_INTR_CAUSE,
2021 A_PM1_TX_INT_CAUSE,
2022 A_PM1_RX_INT_CAUSE,
2023 A_MPS_INT_CAUSE,
2024 A_T3DBG_INT_CAUSE,
2026 unsigned int i;
2028 /* Clear PHY and MAC interrupts for each port. */
2029 for_each_port(adapter, i)
2030 t3_port_intr_clear(adapter, i);
2032 for (i = 0; i < ARRAY_SIZE(cause_reg_addr); ++i)
2033 t3_write_reg(adapter, cause_reg_addr[i], 0xffffffff);
2035 if (is_pcie(adapter))
2036 t3_write_reg(adapter, A_PCIE_PEX_ERR, 0xffffffff);
2037 t3_write_reg(adapter, A_PL_INT_CAUSE0, 0xffffffff);
2038 t3_read_reg(adapter, A_PL_INT_CAUSE0); /* flush */
2041 void t3_xgm_intr_enable(struct adapter *adapter, int idx)
2043 struct port_info *pi = adap2pinfo(adapter, idx);
2045 t3_write_reg(adapter, A_XGM_XGM_INT_ENABLE + pi->mac.offset,
2046 XGM_EXTRA_INTR_MASK);
2049 void t3_xgm_intr_disable(struct adapter *adapter, int idx)
2051 struct port_info *pi = adap2pinfo(adapter, idx);
2053 t3_write_reg(adapter, A_XGM_XGM_INT_DISABLE + pi->mac.offset,
2054 0x7ff);
2058 * t3_port_intr_enable - enable port-specific interrupts
2059 * @adapter: associated adapter
2060 * @idx: index of port whose interrupts should be enabled
2062 * Enable port-specific (i.e., MAC and PHY) interrupts for the given
2063 * adapter port.
2065 void t3_port_intr_enable(struct adapter *adapter, int idx)
2067 struct cphy *phy = &adap2pinfo(adapter, idx)->phy;
2069 t3_write_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx), XGM_INTR_MASK);
2070 t3_read_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx)); /* flush */
2071 phy->ops->intr_enable(phy);
2075 * t3_port_intr_disable - disable port-specific interrupts
2076 * @adapter: associated adapter
2077 * @idx: index of port whose interrupts should be disabled
2079 * Disable port-specific (i.e., MAC and PHY) interrupts for the given
2080 * adapter port.
2082 void t3_port_intr_disable(struct adapter *adapter, int idx)
2084 struct cphy *phy = &adap2pinfo(adapter, idx)->phy;
2086 t3_write_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx), 0);
2087 t3_read_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx)); /* flush */
2088 phy->ops->intr_disable(phy);
2092 * t3_port_intr_clear - clear port-specific interrupts
2093 * @adapter: associated adapter
2094 * @idx: index of port whose interrupts to clear
2096 * Clear port-specific (i.e., MAC and PHY) interrupts for the given
2097 * adapter port.
2099 void t3_port_intr_clear(struct adapter *adapter, int idx)
2101 struct cphy *phy = &adap2pinfo(adapter, idx)->phy;
2103 t3_write_reg(adapter, XGM_REG(A_XGM_INT_CAUSE, idx), 0xffffffff);
2104 t3_read_reg(adapter, XGM_REG(A_XGM_INT_CAUSE, idx)); /* flush */
2105 phy->ops->intr_clear(phy);
2108 #define SG_CONTEXT_CMD_ATTEMPTS 100
2111 * t3_sge_write_context - write an SGE context
2112 * @adapter: the adapter
2113 * @id: the context id
2114 * @type: the context type
2116 * Program an SGE context with the values already loaded in the
2117 * CONTEXT_DATA? registers.
2119 static int t3_sge_write_context(struct adapter *adapter, unsigned int id,
2120 unsigned int type)
2122 if (type == F_RESPONSEQ) {
2124 * Can't write the Response Queue Context bits for
2125 * Interrupt Armed or the Reserve bits after the chip
2126 * has been initialized out of reset. Writing to these
2127 * bits can confuse the hardware.
2129 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0xffffffff);
2130 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0xffffffff);
2131 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0x17ffffff);
2132 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0xffffffff);
2133 } else {
2134 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0xffffffff);
2135 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0xffffffff);
2136 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0xffffffff);
2137 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0xffffffff);
2139 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2140 V_CONTEXT_CMD_OPCODE(1) | type | V_CONTEXT(id));
2141 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2142 0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2146 * clear_sge_ctxt - completely clear an SGE context
2147 * @adapter: the adapter
2148 * @id: the context id
2149 * @type: the context type
2151 * Completely clear an SGE context. Used predominantly at post-reset
2152 * initialization. Note in particular that we don't skip writing to any
2153 * "sensitive bits" in the contexts the way that t3_sge_write_context()
2154 * does ...
2156 static int clear_sge_ctxt(struct adapter *adap, unsigned int id,
2157 unsigned int type)
2159 t3_write_reg(adap, A_SG_CONTEXT_DATA0, 0);
2160 t3_write_reg(adap, A_SG_CONTEXT_DATA1, 0);
2161 t3_write_reg(adap, A_SG_CONTEXT_DATA2, 0);
2162 t3_write_reg(adap, A_SG_CONTEXT_DATA3, 0);
2163 t3_write_reg(adap, A_SG_CONTEXT_MASK0, 0xffffffff);
2164 t3_write_reg(adap, A_SG_CONTEXT_MASK1, 0xffffffff);
2165 t3_write_reg(adap, A_SG_CONTEXT_MASK2, 0xffffffff);
2166 t3_write_reg(adap, A_SG_CONTEXT_MASK3, 0xffffffff);
2167 t3_write_reg(adap, A_SG_CONTEXT_CMD,
2168 V_CONTEXT_CMD_OPCODE(1) | type | V_CONTEXT(id));
2169 return t3_wait_op_done(adap, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2170 0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2174 * t3_sge_init_ecntxt - initialize an SGE egress context
2175 * @adapter: the adapter to configure
2176 * @id: the context id
2177 * @gts_enable: whether to enable GTS for the context
2178 * @type: the egress context type
2179 * @respq: associated response queue
2180 * @base_addr: base address of queue
2181 * @size: number of queue entries
2182 * @token: uP token
2183 * @gen: initial generation value for the context
2184 * @cidx: consumer pointer
2186 * Initialize an SGE egress context and make it ready for use. If the
2187 * platform allows concurrent context operations, the caller is
2188 * responsible for appropriate locking.
2190 int t3_sge_init_ecntxt(struct adapter *adapter, unsigned int id, int gts_enable,
2191 enum sge_context_type type, int respq, u64 base_addr,
2192 unsigned int size, unsigned int token, int gen,
2193 unsigned int cidx)
2195 unsigned int credits = type == SGE_CNTXT_OFLD ? 0 : FW_WR_NUM;
2197 if (base_addr & 0xfff) /* must be 4K aligned */
2198 return -EINVAL;
2199 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2200 return -EBUSY;
2202 base_addr >>= 12;
2203 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_EC_INDEX(cidx) |
2204 V_EC_CREDITS(credits) | V_EC_GTS(gts_enable));
2205 t3_write_reg(adapter, A_SG_CONTEXT_DATA1, V_EC_SIZE(size) |
2206 V_EC_BASE_LO(base_addr & 0xffff));
2207 base_addr >>= 16;
2208 t3_write_reg(adapter, A_SG_CONTEXT_DATA2, base_addr);
2209 base_addr >>= 32;
2210 t3_write_reg(adapter, A_SG_CONTEXT_DATA3,
2211 V_EC_BASE_HI(base_addr & 0xf) | V_EC_RESPQ(respq) |
2212 V_EC_TYPE(type) | V_EC_GEN(gen) | V_EC_UP_TOKEN(token) |
2213 F_EC_VALID);
2214 return t3_sge_write_context(adapter, id, F_EGRESS);
2218 * t3_sge_init_flcntxt - initialize an SGE free-buffer list context
2219 * @adapter: the adapter to configure
2220 * @id: the context id
2221 * @gts_enable: whether to enable GTS for the context
2222 * @base_addr: base address of queue
2223 * @size: number of queue entries
2224 * @bsize: size of each buffer for this queue
2225 * @cong_thres: threshold to signal congestion to upstream producers
2226 * @gen: initial generation value for the context
2227 * @cidx: consumer pointer
2229 * Initialize an SGE free list context and make it ready for use. The
2230 * caller is responsible for ensuring only one context operation occurs
2231 * at a time.
2233 int t3_sge_init_flcntxt(struct adapter *adapter, unsigned int id,
2234 int gts_enable, u64 base_addr, unsigned int size,
2235 unsigned int bsize, unsigned int cong_thres, int gen,
2236 unsigned int cidx)
2238 if (base_addr & 0xfff) /* must be 4K aligned */
2239 return -EINVAL;
2240 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2241 return -EBUSY;
2243 base_addr >>= 12;
2244 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, base_addr);
2245 base_addr >>= 32;
2246 t3_write_reg(adapter, A_SG_CONTEXT_DATA1,
2247 V_FL_BASE_HI((u32) base_addr) |
2248 V_FL_INDEX_LO(cidx & M_FL_INDEX_LO));
2249 t3_write_reg(adapter, A_SG_CONTEXT_DATA2, V_FL_SIZE(size) |
2250 V_FL_GEN(gen) | V_FL_INDEX_HI(cidx >> 12) |
2251 V_FL_ENTRY_SIZE_LO(bsize & M_FL_ENTRY_SIZE_LO));
2252 t3_write_reg(adapter, A_SG_CONTEXT_DATA3,
2253 V_FL_ENTRY_SIZE_HI(bsize >> (32 - S_FL_ENTRY_SIZE_LO)) |
2254 V_FL_CONG_THRES(cong_thres) | V_FL_GTS(gts_enable));
2255 return t3_sge_write_context(adapter, id, F_FREELIST);
2259 * t3_sge_init_rspcntxt - initialize an SGE response queue context
2260 * @adapter: the adapter to configure
2261 * @id: the context id
2262 * @irq_vec_idx: MSI-X interrupt vector index, 0 if no MSI-X, -1 if no IRQ
2263 * @base_addr: base address of queue
2264 * @size: number of queue entries
2265 * @fl_thres: threshold for selecting the normal or jumbo free list
2266 * @gen: initial generation value for the context
2267 * @cidx: consumer pointer
2269 * Initialize an SGE response queue context and make it ready for use.
2270 * The caller is responsible for ensuring only one context operation
2271 * occurs at a time.
2273 int t3_sge_init_rspcntxt(struct adapter *adapter, unsigned int id,
2274 int irq_vec_idx, u64 base_addr, unsigned int size,
2275 unsigned int fl_thres, int gen, unsigned int cidx)
2277 unsigned int intr = 0;
2279 if (base_addr & 0xfff) /* must be 4K aligned */
2280 return -EINVAL;
2281 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2282 return -EBUSY;
2284 base_addr >>= 12;
2285 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_CQ_SIZE(size) |
2286 V_CQ_INDEX(cidx));
2287 t3_write_reg(adapter, A_SG_CONTEXT_DATA1, base_addr);
2288 base_addr >>= 32;
2289 if (irq_vec_idx >= 0)
2290 intr = V_RQ_MSI_VEC(irq_vec_idx) | F_RQ_INTR_EN;
2291 t3_write_reg(adapter, A_SG_CONTEXT_DATA2,
2292 V_CQ_BASE_HI((u32) base_addr) | intr | V_RQ_GEN(gen));
2293 t3_write_reg(adapter, A_SG_CONTEXT_DATA3, fl_thres);
2294 return t3_sge_write_context(adapter, id, F_RESPONSEQ);
2298 * t3_sge_init_cqcntxt - initialize an SGE completion queue context
2299 * @adapter: the adapter to configure
2300 * @id: the context id
2301 * @base_addr: base address of queue
2302 * @size: number of queue entries
2303 * @rspq: response queue for async notifications
2304 * @ovfl_mode: CQ overflow mode
2305 * @credits: completion queue credits
2306 * @credit_thres: the credit threshold
2308 * Initialize an SGE completion queue context and make it ready for use.
2309 * The caller is responsible for ensuring only one context operation
2310 * occurs at a time.
2312 int t3_sge_init_cqcntxt(struct adapter *adapter, unsigned int id, u64 base_addr,
2313 unsigned int size, int rspq, int ovfl_mode,
2314 unsigned int credits, unsigned int credit_thres)
2316 if (base_addr & 0xfff) /* must be 4K aligned */
2317 return -EINVAL;
2318 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2319 return -EBUSY;
2321 base_addr >>= 12;
2322 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_CQ_SIZE(size));
2323 t3_write_reg(adapter, A_SG_CONTEXT_DATA1, base_addr);
2324 base_addr >>= 32;
2325 t3_write_reg(adapter, A_SG_CONTEXT_DATA2,
2326 V_CQ_BASE_HI((u32) base_addr) | V_CQ_RSPQ(rspq) |
2327 V_CQ_GEN(1) | V_CQ_OVERFLOW_MODE(ovfl_mode) |
2328 V_CQ_ERR(ovfl_mode));
2329 t3_write_reg(adapter, A_SG_CONTEXT_DATA3, V_CQ_CREDITS(credits) |
2330 V_CQ_CREDIT_THRES(credit_thres));
2331 return t3_sge_write_context(adapter, id, F_CQ);
2335 * t3_sge_enable_ecntxt - enable/disable an SGE egress context
2336 * @adapter: the adapter
2337 * @id: the egress context id
2338 * @enable: enable (1) or disable (0) the context
2340 * Enable or disable an SGE egress context. The caller is responsible for
2341 * ensuring only one context operation occurs at a time.
2343 int t3_sge_enable_ecntxt(struct adapter *adapter, unsigned int id, int enable)
2345 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2346 return -EBUSY;
2348 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0);
2349 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2350 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
2351 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, F_EC_VALID);
2352 t3_write_reg(adapter, A_SG_CONTEXT_DATA3, V_EC_VALID(enable));
2353 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2354 V_CONTEXT_CMD_OPCODE(1) | F_EGRESS | V_CONTEXT(id));
2355 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2356 0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2360 * t3_sge_disable_fl - disable an SGE free-buffer list
2361 * @adapter: the adapter
2362 * @id: the free list context id
2364 * Disable an SGE free-buffer list. The caller is responsible for
2365 * ensuring only one context operation occurs at a time.
2367 int t3_sge_disable_fl(struct adapter *adapter, unsigned int id)
2369 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2370 return -EBUSY;
2372 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0);
2373 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2374 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, V_FL_SIZE(M_FL_SIZE));
2375 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
2376 t3_write_reg(adapter, A_SG_CONTEXT_DATA2, 0);
2377 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2378 V_CONTEXT_CMD_OPCODE(1) | F_FREELIST | V_CONTEXT(id));
2379 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2380 0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2384 * t3_sge_disable_rspcntxt - disable an SGE response queue
2385 * @adapter: the adapter
2386 * @id: the response queue context id
2388 * Disable an SGE response queue. The caller is responsible for
2389 * ensuring only one context operation occurs at a time.
2391 int t3_sge_disable_rspcntxt(struct adapter *adapter, unsigned int id)
2393 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2394 return -EBUSY;
2396 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, V_CQ_SIZE(M_CQ_SIZE));
2397 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2398 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
2399 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
2400 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, 0);
2401 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2402 V_CONTEXT_CMD_OPCODE(1) | F_RESPONSEQ | V_CONTEXT(id));
2403 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2404 0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2408 * t3_sge_disable_cqcntxt - disable an SGE completion queue
2409 * @adapter: the adapter
2410 * @id: the completion queue context id
2412 * Disable an SGE completion queue. The caller is responsible for
2413 * ensuring only one context operation occurs at a time.
2415 int t3_sge_disable_cqcntxt(struct adapter *adapter, unsigned int id)
2417 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2418 return -EBUSY;
2420 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, V_CQ_SIZE(M_CQ_SIZE));
2421 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2422 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
2423 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
2424 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, 0);
2425 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2426 V_CONTEXT_CMD_OPCODE(1) | F_CQ | V_CONTEXT(id));
2427 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2428 0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2432 * t3_sge_cqcntxt_op - perform an operation on a completion queue context
2433 * @adapter: the adapter
2434 * @id: the context id
2435 * @op: the operation to perform
2437 * Perform the selected operation on an SGE completion queue context.
2438 * The caller is responsible for ensuring only one context operation
2439 * occurs at a time.
2441 int t3_sge_cqcntxt_op(struct adapter *adapter, unsigned int id, unsigned int op,
2442 unsigned int credits)
2444 u32 val;
2446 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2447 return -EBUSY;
2449 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, credits << 16);
2450 t3_write_reg(adapter, A_SG_CONTEXT_CMD, V_CONTEXT_CMD_OPCODE(op) |
2451 V_CONTEXT(id) | F_CQ);
2452 if (t3_wait_op_done_val(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2453 0, SG_CONTEXT_CMD_ATTEMPTS, 1, &val))
2454 return -EIO;
2456 if (op >= 2 && op < 7) {
2457 if (adapter->params.rev > 0)
2458 return G_CQ_INDEX(val);
2460 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2461 V_CONTEXT_CMD_OPCODE(0) | F_CQ | V_CONTEXT(id));
2462 if (t3_wait_op_done(adapter, A_SG_CONTEXT_CMD,
2463 F_CONTEXT_CMD_BUSY, 0,
2464 SG_CONTEXT_CMD_ATTEMPTS, 1))
2465 return -EIO;
2466 return G_CQ_INDEX(t3_read_reg(adapter, A_SG_CONTEXT_DATA0));
2468 return 0;
2472 * t3_sge_read_context - read an SGE context
2473 * @type: the context type
2474 * @adapter: the adapter
2475 * @id: the context id
2476 * @data: holds the retrieved context
2478 * Read an SGE egress context. The caller is responsible for ensuring
2479 * only one context operation occurs at a time.
2481 static int t3_sge_read_context(unsigned int type, struct adapter *adapter,
2482 unsigned int id, u32 data[4])
2484 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2485 return -EBUSY;
2487 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2488 V_CONTEXT_CMD_OPCODE(0) | type | V_CONTEXT(id));
2489 if (t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY, 0,
2490 SG_CONTEXT_CMD_ATTEMPTS, 1))
2491 return -EIO;
2492 data[0] = t3_read_reg(adapter, A_SG_CONTEXT_DATA0);
2493 data[1] = t3_read_reg(adapter, A_SG_CONTEXT_DATA1);
2494 data[2] = t3_read_reg(adapter, A_SG_CONTEXT_DATA2);
2495 data[3] = t3_read_reg(adapter, A_SG_CONTEXT_DATA3);
2496 return 0;
2500 * t3_sge_read_ecntxt - read an SGE egress context
2501 * @adapter: the adapter
2502 * @id: the context id
2503 * @data: holds the retrieved context
2505 * Read an SGE egress context. The caller is responsible for ensuring
2506 * only one context operation occurs at a time.
2508 int t3_sge_read_ecntxt(struct adapter *adapter, unsigned int id, u32 data[4])
2510 if (id >= 65536)
2511 return -EINVAL;
2512 return t3_sge_read_context(F_EGRESS, adapter, id, data);
2516 * t3_sge_read_cq - read an SGE CQ context
2517 * @adapter: the adapter
2518 * @id: the context id
2519 * @data: holds the retrieved context
2521 * Read an SGE CQ context. The caller is responsible for ensuring
2522 * only one context operation occurs at a time.
2524 int t3_sge_read_cq(struct adapter *adapter, unsigned int id, u32 data[4])
2526 if (id >= 65536)
2527 return -EINVAL;
2528 return t3_sge_read_context(F_CQ, adapter, id, data);
2532 * t3_sge_read_fl - read an SGE free-list context
2533 * @adapter: the adapter
2534 * @id: the context id
2535 * @data: holds the retrieved context
2537 * Read an SGE free-list context. The caller is responsible for ensuring
2538 * only one context operation occurs at a time.
2540 int t3_sge_read_fl(struct adapter *adapter, unsigned int id, u32 data[4])
2542 if (id >= SGE_QSETS * 2)
2543 return -EINVAL;
2544 return t3_sge_read_context(F_FREELIST, adapter, id, data);
2548 * t3_sge_read_rspq - read an SGE response queue context
2549 * @adapter: the adapter
2550 * @id: the context id
2551 * @data: holds the retrieved context
2553 * Read an SGE response queue context. The caller is responsible for
2554 * ensuring only one context operation occurs at a time.
2556 int t3_sge_read_rspq(struct adapter *adapter, unsigned int id, u32 data[4])
2558 if (id >= SGE_QSETS)
2559 return -EINVAL;
2560 return t3_sge_read_context(F_RESPONSEQ, adapter, id, data);
2564 * t3_config_rss - configure Rx packet steering
2565 * @adapter: the adapter
2566 * @rss_config: RSS settings (written to TP_RSS_CONFIG)
2567 * @cpus: values for the CPU lookup table (0xff terminated)
2568 * @rspq: values for the response queue lookup table (0xffff terminated)
2570 * Programs the receive packet steering logic. @cpus and @rspq provide
2571 * the values for the CPU and response queue lookup tables. If they
2572 * provide fewer values than the size of the tables the supplied values
2573 * are used repeatedly until the tables are fully populated.
2575 void t3_config_rss(struct adapter *adapter, unsigned int rss_config,
2576 const u8 * cpus, const u16 *rspq)
2578 int i, j, cpu_idx = 0, q_idx = 0;
2580 if (cpus)
2581 for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2582 u32 val = i << 16;
2584 for (j = 0; j < 2; ++j) {
2585 val |= (cpus[cpu_idx++] & 0x3f) << (8 * j);
2586 if (cpus[cpu_idx] == 0xff)
2587 cpu_idx = 0;
2589 t3_write_reg(adapter, A_TP_RSS_LKP_TABLE, val);
2592 if (rspq)
2593 for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2594 t3_write_reg(adapter, A_TP_RSS_MAP_TABLE,
2595 (i << 16) | rspq[q_idx++]);
2596 if (rspq[q_idx] == 0xffff)
2597 q_idx = 0;
2600 t3_write_reg(adapter, A_TP_RSS_CONFIG, rss_config);
2604 * t3_read_rss - read the contents of the RSS tables
2605 * @adapter: the adapter
2606 * @lkup: holds the contents of the RSS lookup table
2607 * @map: holds the contents of the RSS map table
2609 * Reads the contents of the receive packet steering tables.
2611 int t3_read_rss(struct adapter *adapter, u8 * lkup, u16 *map)
2613 int i;
2614 u32 val;
2616 if (lkup)
2617 for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2618 t3_write_reg(adapter, A_TP_RSS_LKP_TABLE,
2619 0xffff0000 | i);
2620 val = t3_read_reg(adapter, A_TP_RSS_LKP_TABLE);
2621 if (!(val & 0x80000000))
2622 return -EAGAIN;
2623 *lkup++ = val;
2624 *lkup++ = (val >> 8);
2627 if (map)
2628 for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2629 t3_write_reg(adapter, A_TP_RSS_MAP_TABLE,
2630 0xffff0000 | i);
2631 val = t3_read_reg(adapter, A_TP_RSS_MAP_TABLE);
2632 if (!(val & 0x80000000))
2633 return -EAGAIN;
2634 *map++ = val;
2636 return 0;
2640 * t3_tp_set_offload_mode - put TP in NIC/offload mode
2641 * @adap: the adapter
2642 * @enable: 1 to select offload mode, 0 for regular NIC
2644 * Switches TP to NIC/offload mode.
2646 void t3_tp_set_offload_mode(struct adapter *adap, int enable)
2648 if (is_offload(adap) || !enable)
2649 t3_set_reg_field(adap, A_TP_IN_CONFIG, F_NICMODE,
2650 V_NICMODE(!enable));
2654 * pm_num_pages - calculate the number of pages of the payload memory
2655 * @mem_size: the size of the payload memory
2656 * @pg_size: the size of each payload memory page
2658 * Calculate the number of pages, each of the given size, that fit in a
2659 * memory of the specified size, respecting the HW requirement that the
2660 * number of pages must be a multiple of 24.
2662 static inline unsigned int pm_num_pages(unsigned int mem_size,
2663 unsigned int pg_size)
2665 unsigned int n = mem_size / pg_size;
2667 return n - n % 24;
2670 #define mem_region(adap, start, size, reg) \
2671 t3_write_reg((adap), A_ ## reg, (start)); \
2672 start += size
2675 * partition_mem - partition memory and configure TP memory settings
2676 * @adap: the adapter
2677 * @p: the TP parameters
2679 * Partitions context and payload memory and configures TP's memory
2680 * registers.
2682 static void partition_mem(struct adapter *adap, const struct tp_params *p)
2684 unsigned int m, pstructs, tids = t3_mc5_size(&adap->mc5);
2685 unsigned int timers = 0, timers_shift = 22;
2687 if (adap->params.rev > 0) {
2688 if (tids <= 16 * 1024) {
2689 timers = 1;
2690 timers_shift = 16;
2691 } else if (tids <= 64 * 1024) {
2692 timers = 2;
2693 timers_shift = 18;
2694 } else if (tids <= 256 * 1024) {
2695 timers = 3;
2696 timers_shift = 20;
2700 t3_write_reg(adap, A_TP_PMM_SIZE,
2701 p->chan_rx_size | (p->chan_tx_size >> 16));
2703 t3_write_reg(adap, A_TP_PMM_TX_BASE, 0);
2704 t3_write_reg(adap, A_TP_PMM_TX_PAGE_SIZE, p->tx_pg_size);
2705 t3_write_reg(adap, A_TP_PMM_TX_MAX_PAGE, p->tx_num_pgs);
2706 t3_set_reg_field(adap, A_TP_PARA_REG3, V_TXDATAACKIDX(M_TXDATAACKIDX),
2707 V_TXDATAACKIDX(fls(p->tx_pg_size) - 12));
2709 t3_write_reg(adap, A_TP_PMM_RX_BASE, 0);
2710 t3_write_reg(adap, A_TP_PMM_RX_PAGE_SIZE, p->rx_pg_size);
2711 t3_write_reg(adap, A_TP_PMM_RX_MAX_PAGE, p->rx_num_pgs);
2713 pstructs = p->rx_num_pgs + p->tx_num_pgs;
2714 /* Add a bit of headroom and make multiple of 24 */
2715 pstructs += 48;
2716 pstructs -= pstructs % 24;
2717 t3_write_reg(adap, A_TP_CMM_MM_MAX_PSTRUCT, pstructs);
2719 m = tids * TCB_SIZE;
2720 mem_region(adap, m, (64 << 10) * 64, SG_EGR_CNTX_BADDR);
2721 mem_region(adap, m, (64 << 10) * 64, SG_CQ_CONTEXT_BADDR);
2722 t3_write_reg(adap, A_TP_CMM_TIMER_BASE, V_CMTIMERMAXNUM(timers) | m);
2723 m += ((p->ntimer_qs - 1) << timers_shift) + (1 << 22);
2724 mem_region(adap, m, pstructs * 64, TP_CMM_MM_BASE);
2725 mem_region(adap, m, 64 * (pstructs / 24), TP_CMM_MM_PS_FLST_BASE);
2726 mem_region(adap, m, 64 * (p->rx_num_pgs / 24), TP_CMM_MM_RX_FLST_BASE);
2727 mem_region(adap, m, 64 * (p->tx_num_pgs / 24), TP_CMM_MM_TX_FLST_BASE);
2729 m = (m + 4095) & ~0xfff;
2730 t3_write_reg(adap, A_CIM_SDRAM_BASE_ADDR, m);
2731 t3_write_reg(adap, A_CIM_SDRAM_ADDR_SIZE, p->cm_size - m);
2733 tids = (p->cm_size - m - (3 << 20)) / 3072 - 32;
2734 m = t3_mc5_size(&adap->mc5) - adap->params.mc5.nservers -
2735 adap->params.mc5.nfilters - adap->params.mc5.nroutes;
2736 if (tids < m)
2737 adap->params.mc5.nservers += m - tids;
2740 static inline void tp_wr_indirect(struct adapter *adap, unsigned int addr,
2741 u32 val)
2743 t3_write_reg(adap, A_TP_PIO_ADDR, addr);
2744 t3_write_reg(adap, A_TP_PIO_DATA, val);
2747 static void tp_config(struct adapter *adap, const struct tp_params *p)
2749 t3_write_reg(adap, A_TP_GLOBAL_CONFIG, F_TXPACINGENABLE | F_PATHMTU |
2750 F_IPCHECKSUMOFFLOAD | F_UDPCHECKSUMOFFLOAD |
2751 F_TCPCHECKSUMOFFLOAD | V_IPTTL(64));
2752 t3_write_reg(adap, A_TP_TCP_OPTIONS, V_MTUDEFAULT(576) |
2753 F_MTUENABLE | V_WINDOWSCALEMODE(1) |
2754 V_TIMESTAMPSMODE(1) | V_SACKMODE(1) | V_SACKRX(1));
2755 t3_write_reg(adap, A_TP_DACK_CONFIG, V_AUTOSTATE3(1) |
2756 V_AUTOSTATE2(1) | V_AUTOSTATE1(0) |
2757 V_BYTETHRESHOLD(26880) | V_MSSTHRESHOLD(2) |
2758 F_AUTOCAREFUL | F_AUTOENABLE | V_DACK_MODE(1));
2759 t3_set_reg_field(adap, A_TP_IN_CONFIG, F_RXFBARBPRIO | F_TXFBARBPRIO,
2760 F_IPV6ENABLE | F_NICMODE);
2761 t3_write_reg(adap, A_TP_TX_RESOURCE_LIMIT, 0x18141814);
2762 t3_write_reg(adap, A_TP_PARA_REG4, 0x5050105);
2763 t3_set_reg_field(adap, A_TP_PARA_REG6, 0,
2764 adap->params.rev > 0 ? F_ENABLEESND :
2765 F_T3A_ENABLEESND);
2767 t3_set_reg_field(adap, A_TP_PC_CONFIG,
2768 F_ENABLEEPCMDAFULL,
2769 F_ENABLEOCSPIFULL |F_TXDEFERENABLE | F_HEARBEATDACK |
2770 F_TXCONGESTIONMODE | F_RXCONGESTIONMODE);
2771 t3_set_reg_field(adap, A_TP_PC_CONFIG2, F_CHDRAFULL,
2772 F_ENABLEIPV6RSS | F_ENABLENONOFDTNLSYN |
2773 F_ENABLEARPMISS | F_DISBLEDAPARBIT0);
2774 t3_write_reg(adap, A_TP_PROXY_FLOW_CNTL, 1080);
2775 t3_write_reg(adap, A_TP_PROXY_FLOW_CNTL, 1000);
2777 if (adap->params.rev > 0) {
2778 tp_wr_indirect(adap, A_TP_EGRESS_CONFIG, F_REWRITEFORCETOSIZE);
2779 t3_set_reg_field(adap, A_TP_PARA_REG3, F_TXPACEAUTO,
2780 F_TXPACEAUTO);
2781 t3_set_reg_field(adap, A_TP_PC_CONFIG, F_LOCKTID, F_LOCKTID);
2782 t3_set_reg_field(adap, A_TP_PARA_REG3, 0, F_TXPACEAUTOSTRICT);
2783 } else
2784 t3_set_reg_field(adap, A_TP_PARA_REG3, 0, F_TXPACEFIXED);
2786 if (adap->params.rev == T3_REV_C)
2787 t3_set_reg_field(adap, A_TP_PC_CONFIG,
2788 V_TABLELATENCYDELTA(M_TABLELATENCYDELTA),
2789 V_TABLELATENCYDELTA(4));
2791 t3_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT1, 0);
2792 t3_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT0, 0);
2793 t3_write_reg(adap, A_TP_MOD_CHANNEL_WEIGHT, 0);
2794 t3_write_reg(adap, A_TP_MOD_RATE_LIMIT, 0xf2200000);
2797 /* Desired TP timer resolution in usec */
2798 #define TP_TMR_RES 50
2800 /* TCP timer values in ms */
2801 #define TP_DACK_TIMER 50
2802 #define TP_RTO_MIN 250
2805 * tp_set_timers - set TP timing parameters
2806 * @adap: the adapter to set
2807 * @core_clk: the core clock frequency in Hz
2809 * Set TP's timing parameters, such as the various timer resolutions and
2810 * the TCP timer values.
2812 static void tp_set_timers(struct adapter *adap, unsigned int core_clk)
2814 unsigned int tre = fls(core_clk / (1000000 / TP_TMR_RES)) - 1;
2815 unsigned int dack_re = fls(core_clk / 5000) - 1; /* 200us */
2816 unsigned int tstamp_re = fls(core_clk / 1000); /* 1ms, at least */
2817 unsigned int tps = core_clk >> tre;
2819 t3_write_reg(adap, A_TP_TIMER_RESOLUTION, V_TIMERRESOLUTION(tre) |
2820 V_DELAYEDACKRESOLUTION(dack_re) |
2821 V_TIMESTAMPRESOLUTION(tstamp_re));
2822 t3_write_reg(adap, A_TP_DACK_TIMER,
2823 (core_clk >> dack_re) / (1000 / TP_DACK_TIMER));
2824 t3_write_reg(adap, A_TP_TCP_BACKOFF_REG0, 0x3020100);
2825 t3_write_reg(adap, A_TP_TCP_BACKOFF_REG1, 0x7060504);
2826 t3_write_reg(adap, A_TP_TCP_BACKOFF_REG2, 0xb0a0908);
2827 t3_write_reg(adap, A_TP_TCP_BACKOFF_REG3, 0xf0e0d0c);
2828 t3_write_reg(adap, A_TP_SHIFT_CNT, V_SYNSHIFTMAX(6) |
2829 V_RXTSHIFTMAXR1(4) | V_RXTSHIFTMAXR2(15) |
2830 V_PERSHIFTBACKOFFMAX(8) | V_PERSHIFTMAX(8) |
2831 V_KEEPALIVEMAX(9));
2833 #define SECONDS * tps
2835 t3_write_reg(adap, A_TP_MSL, adap->params.rev > 0 ? 0 : 2 SECONDS);
2836 t3_write_reg(adap, A_TP_RXT_MIN, tps / (1000 / TP_RTO_MIN));
2837 t3_write_reg(adap, A_TP_RXT_MAX, 64 SECONDS);
2838 t3_write_reg(adap, A_TP_PERS_MIN, 5 SECONDS);
2839 t3_write_reg(adap, A_TP_PERS_MAX, 64 SECONDS);
2840 t3_write_reg(adap, A_TP_KEEP_IDLE, 7200 SECONDS);
2841 t3_write_reg(adap, A_TP_KEEP_INTVL, 75 SECONDS);
2842 t3_write_reg(adap, A_TP_INIT_SRTT, 3 SECONDS);
2843 t3_write_reg(adap, A_TP_FINWAIT2_TIMER, 600 SECONDS);
2845 #undef SECONDS
2849 * t3_tp_set_coalescing_size - set receive coalescing size
2850 * @adap: the adapter
2851 * @size: the receive coalescing size
2852 * @psh: whether a set PSH bit should deliver coalesced data
2854 * Set the receive coalescing size and PSH bit handling.
2856 int t3_tp_set_coalescing_size(struct adapter *adap, unsigned int size, int psh)
2858 u32 val;
2860 if (size > MAX_RX_COALESCING_LEN)
2861 return -EINVAL;
2863 val = t3_read_reg(adap, A_TP_PARA_REG3);
2864 val &= ~(F_RXCOALESCEENABLE | F_RXCOALESCEPSHEN);
2866 if (size) {
2867 val |= F_RXCOALESCEENABLE;
2868 if (psh)
2869 val |= F_RXCOALESCEPSHEN;
2870 size = min(MAX_RX_COALESCING_LEN, size);
2871 t3_write_reg(adap, A_TP_PARA_REG2, V_RXCOALESCESIZE(size) |
2872 V_MAXRXDATA(MAX_RX_COALESCING_LEN));
2874 t3_write_reg(adap, A_TP_PARA_REG3, val);
2875 return 0;
2879 * t3_tp_set_max_rxsize - set the max receive size
2880 * @adap: the adapter
2881 * @size: the max receive size
2883 * Set TP's max receive size. This is the limit that applies when
2884 * receive coalescing is disabled.
2886 void t3_tp_set_max_rxsize(struct adapter *adap, unsigned int size)
2888 t3_write_reg(adap, A_TP_PARA_REG7,
2889 V_PMMAXXFERLEN0(size) | V_PMMAXXFERLEN1(size));
2892 static void init_mtus(unsigned short mtus[])
2895 * See draft-mathis-plpmtud-00.txt for the values. The min is 88 so
2896 * it can accomodate max size TCP/IP headers when SACK and timestamps
2897 * are enabled and still have at least 8 bytes of payload.
2899 mtus[0] = 88;
2900 mtus[1] = 88;
2901 mtus[2] = 256;
2902 mtus[3] = 512;
2903 mtus[4] = 576;
2904 mtus[5] = 1024;
2905 mtus[6] = 1280;
2906 mtus[7] = 1492;
2907 mtus[8] = 1500;
2908 mtus[9] = 2002;
2909 mtus[10] = 2048;
2910 mtus[11] = 4096;
2911 mtus[12] = 4352;
2912 mtus[13] = 8192;
2913 mtus[14] = 9000;
2914 mtus[15] = 9600;
2918 * Initial congestion control parameters.
2920 static void init_cong_ctrl(unsigned short *a, unsigned short *b)
2922 a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1;
2923 a[9] = 2;
2924 a[10] = 3;
2925 a[11] = 4;
2926 a[12] = 5;
2927 a[13] = 6;
2928 a[14] = 7;
2929 a[15] = 8;
2930 a[16] = 9;
2931 a[17] = 10;
2932 a[18] = 14;
2933 a[19] = 17;
2934 a[20] = 21;
2935 a[21] = 25;
2936 a[22] = 30;
2937 a[23] = 35;
2938 a[24] = 45;
2939 a[25] = 60;
2940 a[26] = 80;
2941 a[27] = 100;
2942 a[28] = 200;
2943 a[29] = 300;
2944 a[30] = 400;
2945 a[31] = 500;
2947 b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0;
2948 b[9] = b[10] = 1;
2949 b[11] = b[12] = 2;
2950 b[13] = b[14] = b[15] = b[16] = 3;
2951 b[17] = b[18] = b[19] = b[20] = b[21] = 4;
2952 b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5;
2953 b[28] = b[29] = 6;
2954 b[30] = b[31] = 7;
2957 /* The minimum additive increment value for the congestion control table */
2958 #define CC_MIN_INCR 2U
2961 * t3_load_mtus - write the MTU and congestion control HW tables
2962 * @adap: the adapter
2963 * @mtus: the unrestricted values for the MTU table
2964 * @alphs: the values for the congestion control alpha parameter
2965 * @beta: the values for the congestion control beta parameter
2966 * @mtu_cap: the maximum permitted effective MTU
2968 * Write the MTU table with the supplied MTUs capping each at &mtu_cap.
2969 * Update the high-speed congestion control table with the supplied alpha,
2970 * beta, and MTUs.
2972 void t3_load_mtus(struct adapter *adap, unsigned short mtus[NMTUS],
2973 unsigned short alpha[NCCTRL_WIN],
2974 unsigned short beta[NCCTRL_WIN], unsigned short mtu_cap)
2976 static const unsigned int avg_pkts[NCCTRL_WIN] = {
2977 2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640,
2978 896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480,
2979 28672, 40960, 57344, 81920, 114688, 163840, 229376
2982 unsigned int i, w;
2984 for (i = 0; i < NMTUS; ++i) {
2985 unsigned int mtu = min(mtus[i], mtu_cap);
2986 unsigned int log2 = fls(mtu);
2988 if (!(mtu & ((1 << log2) >> 2))) /* round */
2989 log2--;
2990 t3_write_reg(adap, A_TP_MTU_TABLE,
2991 (i << 24) | (log2 << 16) | mtu);
2993 for (w = 0; w < NCCTRL_WIN; ++w) {
2994 unsigned int inc;
2996 inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w],
2997 CC_MIN_INCR);
2999 t3_write_reg(adap, A_TP_CCTRL_TABLE, (i << 21) |
3000 (w << 16) | (beta[w] << 13) | inc);
3006 * t3_read_hw_mtus - returns the values in the HW MTU table
3007 * @adap: the adapter
3008 * @mtus: where to store the HW MTU values
3010 * Reads the HW MTU table.
3012 void t3_read_hw_mtus(struct adapter *adap, unsigned short mtus[NMTUS])
3014 int i;
3016 for (i = 0; i < NMTUS; ++i) {
3017 unsigned int val;
3019 t3_write_reg(adap, A_TP_MTU_TABLE, 0xff000000 | i);
3020 val = t3_read_reg(adap, A_TP_MTU_TABLE);
3021 mtus[i] = val & 0x3fff;
3026 * t3_get_cong_cntl_tab - reads the congestion control table
3027 * @adap: the adapter
3028 * @incr: where to store the alpha values
3030 * Reads the additive increments programmed into the HW congestion
3031 * control table.
3033 void t3_get_cong_cntl_tab(struct adapter *adap,
3034 unsigned short incr[NMTUS][NCCTRL_WIN])
3036 unsigned int mtu, w;
3038 for (mtu = 0; mtu < NMTUS; ++mtu)
3039 for (w = 0; w < NCCTRL_WIN; ++w) {
3040 t3_write_reg(adap, A_TP_CCTRL_TABLE,
3041 0xffff0000 | (mtu << 5) | w);
3042 incr[mtu][w] = t3_read_reg(adap, A_TP_CCTRL_TABLE) &
3043 0x1fff;
3048 * t3_tp_get_mib_stats - read TP's MIB counters
3049 * @adap: the adapter
3050 * @tps: holds the returned counter values
3052 * Returns the values of TP's MIB counters.
3054 void t3_tp_get_mib_stats(struct adapter *adap, struct tp_mib_stats *tps)
3056 t3_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_RDATA, (u32 *) tps,
3057 sizeof(*tps) / sizeof(u32), 0);
3060 #define ulp_region(adap, name, start, len) \
3061 t3_write_reg((adap), A_ULPRX_ ## name ## _LLIMIT, (start)); \
3062 t3_write_reg((adap), A_ULPRX_ ## name ## _ULIMIT, \
3063 (start) + (len) - 1); \
3064 start += len
3066 #define ulptx_region(adap, name, start, len) \
3067 t3_write_reg((adap), A_ULPTX_ ## name ## _LLIMIT, (start)); \
3068 t3_write_reg((adap), A_ULPTX_ ## name ## _ULIMIT, \
3069 (start) + (len) - 1)
3071 static void ulp_config(struct adapter *adap, const struct tp_params *p)
3073 unsigned int m = p->chan_rx_size;
3075 ulp_region(adap, ISCSI, m, p->chan_rx_size / 8);
3076 ulp_region(adap, TDDP, m, p->chan_rx_size / 8);
3077 ulptx_region(adap, TPT, m, p->chan_rx_size / 4);
3078 ulp_region(adap, STAG, m, p->chan_rx_size / 4);
3079 ulp_region(adap, RQ, m, p->chan_rx_size / 4);
3080 ulptx_region(adap, PBL, m, p->chan_rx_size / 4);
3081 ulp_region(adap, PBL, m, p->chan_rx_size / 4);
3082 t3_write_reg(adap, A_ULPRX_TDDP_TAGMASK, 0xffffffff);
3086 * t3_set_proto_sram - set the contents of the protocol sram
3087 * @adapter: the adapter
3088 * @data: the protocol image
3090 * Write the contents of the protocol SRAM.
3092 int t3_set_proto_sram(struct adapter *adap, const u8 *data)
3094 int i;
3095 const __be32 *buf = (const __be32 *)data;
3097 for (i = 0; i < PROTO_SRAM_LINES; i++) {
3098 t3_write_reg(adap, A_TP_EMBED_OP_FIELD5, be32_to_cpu(*buf++));
3099 t3_write_reg(adap, A_TP_EMBED_OP_FIELD4, be32_to_cpu(*buf++));
3100 t3_write_reg(adap, A_TP_EMBED_OP_FIELD3, be32_to_cpu(*buf++));
3101 t3_write_reg(adap, A_TP_EMBED_OP_FIELD2, be32_to_cpu(*buf++));
3102 t3_write_reg(adap, A_TP_EMBED_OP_FIELD1, be32_to_cpu(*buf++));
3104 t3_write_reg(adap, A_TP_EMBED_OP_FIELD0, i << 1 | 1 << 31);
3105 if (t3_wait_op_done(adap, A_TP_EMBED_OP_FIELD0, 1, 1, 5, 1))
3106 return -EIO;
3108 t3_write_reg(adap, A_TP_EMBED_OP_FIELD0, 0);
3110 return 0;
3113 void t3_config_trace_filter(struct adapter *adapter,
3114 const struct trace_params *tp, int filter_index,
3115 int invert, int enable)
3117 u32 addr, key[4], mask[4];
3119 key[0] = tp->sport | (tp->sip << 16);
3120 key[1] = (tp->sip >> 16) | (tp->dport << 16);
3121 key[2] = tp->dip;
3122 key[3] = tp->proto | (tp->vlan << 8) | (tp->intf << 20);
3124 mask[0] = tp->sport_mask | (tp->sip_mask << 16);
3125 mask[1] = (tp->sip_mask >> 16) | (tp->dport_mask << 16);
3126 mask[2] = tp->dip_mask;
3127 mask[3] = tp->proto_mask | (tp->vlan_mask << 8) | (tp->intf_mask << 20);
3129 if (invert)
3130 key[3] |= (1 << 29);
3131 if (enable)
3132 key[3] |= (1 << 28);
3134 addr = filter_index ? A_TP_RX_TRC_KEY0 : A_TP_TX_TRC_KEY0;
3135 tp_wr_indirect(adapter, addr++, key[0]);
3136 tp_wr_indirect(adapter, addr++, mask[0]);
3137 tp_wr_indirect(adapter, addr++, key[1]);
3138 tp_wr_indirect(adapter, addr++, mask[1]);
3139 tp_wr_indirect(adapter, addr++, key[2]);
3140 tp_wr_indirect(adapter, addr++, mask[2]);
3141 tp_wr_indirect(adapter, addr++, key[3]);
3142 tp_wr_indirect(adapter, addr, mask[3]);
3143 t3_read_reg(adapter, A_TP_PIO_DATA);
3147 * t3_config_sched - configure a HW traffic scheduler
3148 * @adap: the adapter
3149 * @kbps: target rate in Kbps
3150 * @sched: the scheduler index
3152 * Configure a HW scheduler for the target rate
3154 int t3_config_sched(struct adapter *adap, unsigned int kbps, int sched)
3156 unsigned int v, tps, cpt, bpt, delta, mindelta = ~0;
3157 unsigned int clk = adap->params.vpd.cclk * 1000;
3158 unsigned int selected_cpt = 0, selected_bpt = 0;
3160 if (kbps > 0) {
3161 kbps *= 125; /* -> bytes */
3162 for (cpt = 1; cpt <= 255; cpt++) {
3163 tps = clk / cpt;
3164 bpt = (kbps + tps / 2) / tps;
3165 if (bpt > 0 && bpt <= 255) {
3166 v = bpt * tps;
3167 delta = v >= kbps ? v - kbps : kbps - v;
3168 if (delta <= mindelta) {
3169 mindelta = delta;
3170 selected_cpt = cpt;
3171 selected_bpt = bpt;
3173 } else if (selected_cpt)
3174 break;
3176 if (!selected_cpt)
3177 return -EINVAL;
3179 t3_write_reg(adap, A_TP_TM_PIO_ADDR,
3180 A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2);
3181 v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
3182 if (sched & 1)
3183 v = (v & 0xffff) | (selected_cpt << 16) | (selected_bpt << 24);
3184 else
3185 v = (v & 0xffff0000) | selected_cpt | (selected_bpt << 8);
3186 t3_write_reg(adap, A_TP_TM_PIO_DATA, v);
3187 return 0;
3190 static int tp_init(struct adapter *adap, const struct tp_params *p)
3192 int busy = 0;
3194 tp_config(adap, p);
3195 t3_set_vlan_accel(adap, 3, 0);
3197 if (is_offload(adap)) {
3198 tp_set_timers(adap, adap->params.vpd.cclk * 1000);
3199 t3_write_reg(adap, A_TP_RESET, F_FLSTINITENABLE);
3200 busy = t3_wait_op_done(adap, A_TP_RESET, F_FLSTINITENABLE,
3201 0, 1000, 5);
3202 if (busy)
3203 CH_ERR(adap, "TP initialization timed out\n");
3206 if (!busy)
3207 t3_write_reg(adap, A_TP_RESET, F_TPRESET);
3208 return busy;
3211 int t3_mps_set_active_ports(struct adapter *adap, unsigned int port_mask)
3213 if (port_mask & ~((1 << adap->params.nports) - 1))
3214 return -EINVAL;
3215 t3_set_reg_field(adap, A_MPS_CFG, F_PORT1ACTIVE | F_PORT0ACTIVE,
3216 port_mask << S_PORT0ACTIVE);
3217 return 0;
3221 * Perform the bits of HW initialization that are dependent on the Tx
3222 * channels being used.
3224 static void chan_init_hw(struct adapter *adap, unsigned int chan_map)
3226 int i;
3228 if (chan_map != 3) { /* one channel */
3229 t3_set_reg_field(adap, A_ULPRX_CTL, F_ROUND_ROBIN, 0);
3230 t3_set_reg_field(adap, A_ULPTX_CONFIG, F_CFG_RR_ARB, 0);
3231 t3_write_reg(adap, A_MPS_CFG, F_TPRXPORTEN | F_ENFORCEPKT |
3232 (chan_map == 1 ? F_TPTXPORT0EN | F_PORT0ACTIVE :
3233 F_TPTXPORT1EN | F_PORT1ACTIVE));
3234 t3_write_reg(adap, A_PM1_TX_CFG,
3235 chan_map == 1 ? 0xffffffff : 0);
3236 } else { /* two channels */
3237 t3_set_reg_field(adap, A_ULPRX_CTL, 0, F_ROUND_ROBIN);
3238 t3_set_reg_field(adap, A_ULPTX_CONFIG, 0, F_CFG_RR_ARB);
3239 t3_write_reg(adap, A_ULPTX_DMA_WEIGHT,
3240 V_D1_WEIGHT(16) | V_D0_WEIGHT(16));
3241 t3_write_reg(adap, A_MPS_CFG, F_TPTXPORT0EN | F_TPTXPORT1EN |
3242 F_TPRXPORTEN | F_PORT0ACTIVE | F_PORT1ACTIVE |
3243 F_ENFORCEPKT);
3244 t3_write_reg(adap, A_PM1_TX_CFG, 0x80008000);
3245 t3_set_reg_field(adap, A_TP_PC_CONFIG, 0, F_TXTOSQUEUEMAPMODE);
3246 t3_write_reg(adap, A_TP_TX_MOD_QUEUE_REQ_MAP,
3247 V_TX_MOD_QUEUE_REQ_MAP(0xaa));
3248 for (i = 0; i < 16; i++)
3249 t3_write_reg(adap, A_TP_TX_MOD_QUE_TABLE,
3250 (i << 16) | 0x1010);
3254 static int calibrate_xgm(struct adapter *adapter)
3256 if (uses_xaui(adapter)) {
3257 unsigned int v, i;
3259 for (i = 0; i < 5; ++i) {
3260 t3_write_reg(adapter, A_XGM_XAUI_IMP, 0);
3261 t3_read_reg(adapter, A_XGM_XAUI_IMP);
3262 msleep(1);
3263 v = t3_read_reg(adapter, A_XGM_XAUI_IMP);
3264 if (!(v & (F_XGM_CALFAULT | F_CALBUSY))) {
3265 t3_write_reg(adapter, A_XGM_XAUI_IMP,
3266 V_XAUIIMP(G_CALIMP(v) >> 2));
3267 return 0;
3270 CH_ERR(adapter, "MAC calibration failed\n");
3271 return -1;
3272 } else {
3273 t3_write_reg(adapter, A_XGM_RGMII_IMP,
3274 V_RGMIIIMPPD(2) | V_RGMIIIMPPU(3));
3275 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_XGM_IMPSETUPDATE,
3276 F_XGM_IMPSETUPDATE);
3278 return 0;
3281 static void calibrate_xgm_t3b(struct adapter *adapter)
3283 if (!uses_xaui(adapter)) {
3284 t3_write_reg(adapter, A_XGM_RGMII_IMP, F_CALRESET |
3285 F_CALUPDATE | V_RGMIIIMPPD(2) | V_RGMIIIMPPU(3));
3286 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_CALRESET, 0);
3287 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, 0,
3288 F_XGM_IMPSETUPDATE);
3289 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_XGM_IMPSETUPDATE,
3291 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_CALUPDATE, 0);
3292 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, 0, F_CALUPDATE);
3296 struct mc7_timing_params {
3297 unsigned char ActToPreDly;
3298 unsigned char ActToRdWrDly;
3299 unsigned char PreCyc;
3300 unsigned char RefCyc[5];
3301 unsigned char BkCyc;
3302 unsigned char WrToRdDly;
3303 unsigned char RdToWrDly;
3307 * Write a value to a register and check that the write completed. These
3308 * writes normally complete in a cycle or two, so one read should suffice.
3309 * The very first read exists to flush the posted write to the device.
3311 static int wrreg_wait(struct adapter *adapter, unsigned int addr, u32 val)
3313 t3_write_reg(adapter, addr, val);
3314 t3_read_reg(adapter, addr); /* flush */
3315 if (!(t3_read_reg(adapter, addr) & F_BUSY))
3316 return 0;
3317 CH_ERR(adapter, "write to MC7 register 0x%x timed out\n", addr);
3318 return -EIO;
3321 static int mc7_init(struct mc7 *mc7, unsigned int mc7_clock, int mem_type)
3323 static const unsigned int mc7_mode[] = {
3324 0x632, 0x642, 0x652, 0x432, 0x442
3326 static const struct mc7_timing_params mc7_timings[] = {
3327 {12, 3, 4, {20, 28, 34, 52, 0}, 15, 6, 4},
3328 {12, 4, 5, {20, 28, 34, 52, 0}, 16, 7, 4},
3329 {12, 5, 6, {20, 28, 34, 52, 0}, 17, 8, 4},
3330 {9, 3, 4, {15, 21, 26, 39, 0}, 12, 6, 4},
3331 {9, 4, 5, {15, 21, 26, 39, 0}, 13, 7, 4}
3334 u32 val;
3335 unsigned int width, density, slow, attempts;
3336 struct adapter *adapter = mc7->adapter;
3337 const struct mc7_timing_params *p = &mc7_timings[mem_type];
3339 if (!mc7->size)
3340 return 0;
3342 val = t3_read_reg(adapter, mc7->offset + A_MC7_CFG);
3343 slow = val & F_SLOW;
3344 width = G_WIDTH(val);
3345 density = G_DEN(val);
3347 t3_write_reg(adapter, mc7->offset + A_MC7_CFG, val | F_IFEN);
3348 val = t3_read_reg(adapter, mc7->offset + A_MC7_CFG); /* flush */
3349 msleep(1);
3351 if (!slow) {
3352 t3_write_reg(adapter, mc7->offset + A_MC7_CAL, F_SGL_CAL_EN);
3353 t3_read_reg(adapter, mc7->offset + A_MC7_CAL);
3354 msleep(1);
3355 if (t3_read_reg(adapter, mc7->offset + A_MC7_CAL) &
3356 (F_BUSY | F_SGL_CAL_EN | F_CAL_FAULT)) {
3357 CH_ERR(adapter, "%s MC7 calibration timed out\n",
3358 mc7->name);
3359 goto out_fail;
3363 t3_write_reg(adapter, mc7->offset + A_MC7_PARM,
3364 V_ACTTOPREDLY(p->ActToPreDly) |
3365 V_ACTTORDWRDLY(p->ActToRdWrDly) | V_PRECYC(p->PreCyc) |
3366 V_REFCYC(p->RefCyc[density]) | V_BKCYC(p->BkCyc) |
3367 V_WRTORDDLY(p->WrToRdDly) | V_RDTOWRDLY(p->RdToWrDly));
3369 t3_write_reg(adapter, mc7->offset + A_MC7_CFG,
3370 val | F_CLKEN | F_TERM150);
3371 t3_read_reg(adapter, mc7->offset + A_MC7_CFG); /* flush */
3373 if (!slow)
3374 t3_set_reg_field(adapter, mc7->offset + A_MC7_DLL, F_DLLENB,
3375 F_DLLENB);
3376 udelay(1);
3378 val = slow ? 3 : 6;
3379 if (wrreg_wait(adapter, mc7->offset + A_MC7_PRE, 0) ||
3380 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE2, 0) ||
3381 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE3, 0) ||
3382 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val))
3383 goto out_fail;
3385 if (!slow) {
3386 t3_write_reg(adapter, mc7->offset + A_MC7_MODE, 0x100);
3387 t3_set_reg_field(adapter, mc7->offset + A_MC7_DLL, F_DLLRST, 0);
3388 udelay(5);
3391 if (wrreg_wait(adapter, mc7->offset + A_MC7_PRE, 0) ||
3392 wrreg_wait(adapter, mc7->offset + A_MC7_REF, 0) ||
3393 wrreg_wait(adapter, mc7->offset + A_MC7_REF, 0) ||
3394 wrreg_wait(adapter, mc7->offset + A_MC7_MODE,
3395 mc7_mode[mem_type]) ||
3396 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val | 0x380) ||
3397 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val))
3398 goto out_fail;
3400 /* clock value is in KHz */
3401 mc7_clock = mc7_clock * 7812 + mc7_clock / 2; /* ns */
3402 mc7_clock /= 1000000; /* KHz->MHz, ns->us */
3404 t3_write_reg(adapter, mc7->offset + A_MC7_REF,
3405 F_PERREFEN | V_PREREFDIV(mc7_clock));
3406 t3_read_reg(adapter, mc7->offset + A_MC7_REF); /* flush */
3408 t3_write_reg(adapter, mc7->offset + A_MC7_ECC, F_ECCGENEN | F_ECCCHKEN);
3409 t3_write_reg(adapter, mc7->offset + A_MC7_BIST_DATA, 0);
3410 t3_write_reg(adapter, mc7->offset + A_MC7_BIST_ADDR_BEG, 0);
3411 t3_write_reg(adapter, mc7->offset + A_MC7_BIST_ADDR_END,
3412 (mc7->size << width) - 1);
3413 t3_write_reg(adapter, mc7->offset + A_MC7_BIST_OP, V_OP(1));
3414 t3_read_reg(adapter, mc7->offset + A_MC7_BIST_OP); /* flush */
3416 attempts = 50;
3417 do {
3418 msleep(250);
3419 val = t3_read_reg(adapter, mc7->offset + A_MC7_BIST_OP);
3420 } while ((val & F_BUSY) && --attempts);
3421 if (val & F_BUSY) {
3422 CH_ERR(adapter, "%s MC7 BIST timed out\n", mc7->name);
3423 goto out_fail;
3426 /* Enable normal memory accesses. */
3427 t3_set_reg_field(adapter, mc7->offset + A_MC7_CFG, 0, F_RDY);
3428 return 0;
3430 out_fail:
3431 return -1;
3434 static void config_pcie(struct adapter *adap)
3436 static const u16 ack_lat[4][6] = {
3437 {237, 416, 559, 1071, 2095, 4143},
3438 {128, 217, 289, 545, 1057, 2081},
3439 {73, 118, 154, 282, 538, 1050},
3440 {67, 107, 86, 150, 278, 534}
3442 static const u16 rpl_tmr[4][6] = {
3443 {711, 1248, 1677, 3213, 6285, 12429},
3444 {384, 651, 867, 1635, 3171, 6243},
3445 {219, 354, 462, 846, 1614, 3150},
3446 {201, 321, 258, 450, 834, 1602}
3449 u16 val;
3450 unsigned int log2_width, pldsize;
3451 unsigned int fst_trn_rx, fst_trn_tx, acklat, rpllmt;
3453 pci_read_config_word(adap->pdev,
3454 adap->params.pci.pcie_cap_addr + PCI_EXP_DEVCTL,
3455 &val);
3456 pldsize = (val & PCI_EXP_DEVCTL_PAYLOAD) >> 5;
3457 pci_read_config_word(adap->pdev,
3458 adap->params.pci.pcie_cap_addr + PCI_EXP_LNKCTL,
3459 &val);
3461 fst_trn_tx = G_NUMFSTTRNSEQ(t3_read_reg(adap, A_PCIE_PEX_CTRL0));
3462 fst_trn_rx = adap->params.rev == 0 ? fst_trn_tx :
3463 G_NUMFSTTRNSEQRX(t3_read_reg(adap, A_PCIE_MODE));
3464 log2_width = fls(adap->params.pci.width) - 1;
3465 acklat = ack_lat[log2_width][pldsize];
3466 if (val & 1) /* check LOsEnable */
3467 acklat += fst_trn_tx * 4;
3468 rpllmt = rpl_tmr[log2_width][pldsize] + fst_trn_rx * 4;
3470 if (adap->params.rev == 0)
3471 t3_set_reg_field(adap, A_PCIE_PEX_CTRL1,
3472 V_T3A_ACKLAT(M_T3A_ACKLAT),
3473 V_T3A_ACKLAT(acklat));
3474 else
3475 t3_set_reg_field(adap, A_PCIE_PEX_CTRL1, V_ACKLAT(M_ACKLAT),
3476 V_ACKLAT(acklat));
3478 t3_set_reg_field(adap, A_PCIE_PEX_CTRL0, V_REPLAYLMT(M_REPLAYLMT),
3479 V_REPLAYLMT(rpllmt));
3481 t3_write_reg(adap, A_PCIE_PEX_ERR, 0xffffffff);
3482 t3_set_reg_field(adap, A_PCIE_CFG, 0,
3483 F_ENABLELINKDWNDRST | F_ENABLELINKDOWNRST |
3484 F_PCIE_DMASTOPEN | F_PCIE_CLIDECEN);
3488 * Initialize and configure T3 HW modules. This performs the
3489 * initialization steps that need to be done once after a card is reset.
3490 * MAC and PHY initialization is handled separarely whenever a port is enabled.
3492 * fw_params are passed to FW and their value is platform dependent. Only the
3493 * top 8 bits are available for use, the rest must be 0.
3495 int t3_init_hw(struct adapter *adapter, u32 fw_params)
3497 int err = -EIO, attempts, i;
3498 const struct vpd_params *vpd = &adapter->params.vpd;
3500 if (adapter->params.rev > 0)
3501 calibrate_xgm_t3b(adapter);
3502 else if (calibrate_xgm(adapter))
3503 goto out_err;
3505 if (vpd->mclk) {
3506 partition_mem(adapter, &adapter->params.tp);
3508 if (mc7_init(&adapter->pmrx, vpd->mclk, vpd->mem_timing) ||
3509 mc7_init(&adapter->pmtx, vpd->mclk, vpd->mem_timing) ||
3510 mc7_init(&adapter->cm, vpd->mclk, vpd->mem_timing) ||
3511 t3_mc5_init(&adapter->mc5, adapter->params.mc5.nservers,
3512 adapter->params.mc5.nfilters,
3513 adapter->params.mc5.nroutes))
3514 goto out_err;
3516 for (i = 0; i < 32; i++)
3517 if (clear_sge_ctxt(adapter, i, F_CQ))
3518 goto out_err;
3521 if (tp_init(adapter, &adapter->params.tp))
3522 goto out_err;
3524 t3_tp_set_coalescing_size(adapter,
3525 min(adapter->params.sge.max_pkt_size,
3526 MAX_RX_COALESCING_LEN), 1);
3527 t3_tp_set_max_rxsize(adapter,
3528 min(adapter->params.sge.max_pkt_size, 16384U));
3529 ulp_config(adapter, &adapter->params.tp);
3531 if (is_pcie(adapter))
3532 config_pcie(adapter);
3533 else
3534 t3_set_reg_field(adapter, A_PCIX_CFG, 0,
3535 F_DMASTOPEN | F_CLIDECEN);
3537 if (adapter->params.rev == T3_REV_C)
3538 t3_set_reg_field(adapter, A_ULPTX_CONFIG, 0,
3539 F_CFG_CQE_SOP_MASK);
3541 t3_write_reg(adapter, A_PM1_RX_CFG, 0xffffffff);
3542 t3_write_reg(adapter, A_PM1_RX_MODE, 0);
3543 t3_write_reg(adapter, A_PM1_TX_MODE, 0);
3544 chan_init_hw(adapter, adapter->params.chan_map);
3545 t3_sge_init(adapter, &adapter->params.sge);
3547 t3_write_reg(adapter, A_T3DBG_GPIO_ACT_LOW, calc_gpio_intr(adapter));
3549 t3_write_reg(adapter, A_CIM_HOST_ACC_DATA, vpd->uclk | fw_params);
3550 t3_write_reg(adapter, A_CIM_BOOT_CFG,
3551 V_BOOTADDR(FW_FLASH_BOOT_ADDR >> 2));
3552 t3_read_reg(adapter, A_CIM_BOOT_CFG); /* flush */
3554 attempts = 100;
3555 do { /* wait for uP to initialize */
3556 msleep(20);
3557 } while (t3_read_reg(adapter, A_CIM_HOST_ACC_DATA) && --attempts);
3558 if (!attempts) {
3559 CH_ERR(adapter, "uP initialization timed out\n");
3560 goto out_err;
3563 err = 0;
3564 out_err:
3565 return err;
3569 * get_pci_mode - determine a card's PCI mode
3570 * @adapter: the adapter
3571 * @p: where to store the PCI settings
3573 * Determines a card's PCI mode and associated parameters, such as speed
3574 * and width.
3576 static void get_pci_mode(struct adapter *adapter, struct pci_params *p)
3578 static unsigned short speed_map[] = { 33, 66, 100, 133 };
3579 u32 pci_mode, pcie_cap;
3581 pcie_cap = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP);
3582 if (pcie_cap) {
3583 u16 val;
3585 p->variant = PCI_VARIANT_PCIE;
3586 p->pcie_cap_addr = pcie_cap;
3587 pci_read_config_word(adapter->pdev, pcie_cap + PCI_EXP_LNKSTA,
3588 &val);
3589 p->width = (val >> 4) & 0x3f;
3590 return;
3593 pci_mode = t3_read_reg(adapter, A_PCIX_MODE);
3594 p->speed = speed_map[G_PCLKRANGE(pci_mode)];
3595 p->width = (pci_mode & F_64BIT) ? 64 : 32;
3596 pci_mode = G_PCIXINITPAT(pci_mode);
3597 if (pci_mode == 0)
3598 p->variant = PCI_VARIANT_PCI;
3599 else if (pci_mode < 4)
3600 p->variant = PCI_VARIANT_PCIX_MODE1_PARITY;
3601 else if (pci_mode < 8)
3602 p->variant = PCI_VARIANT_PCIX_MODE1_ECC;
3603 else
3604 p->variant = PCI_VARIANT_PCIX_266_MODE2;
3608 * init_link_config - initialize a link's SW state
3609 * @lc: structure holding the link state
3610 * @ai: information about the current card
3612 * Initializes the SW state maintained for each link, including the link's
3613 * capabilities and default speed/duplex/flow-control/autonegotiation
3614 * settings.
3616 static void init_link_config(struct link_config *lc, unsigned int caps)
3618 lc->supported = caps;
3619 lc->requested_speed = lc->speed = SPEED_INVALID;
3620 lc->requested_duplex = lc->duplex = DUPLEX_INVALID;
3621 lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX;
3622 if (lc->supported & SUPPORTED_Autoneg) {
3623 lc->advertising = lc->supported;
3624 lc->autoneg = AUTONEG_ENABLE;
3625 lc->requested_fc |= PAUSE_AUTONEG;
3626 } else {
3627 lc->advertising = 0;
3628 lc->autoneg = AUTONEG_DISABLE;
3633 * mc7_calc_size - calculate MC7 memory size
3634 * @cfg: the MC7 configuration
3636 * Calculates the size of an MC7 memory in bytes from the value of its
3637 * configuration register.
3639 static unsigned int mc7_calc_size(u32 cfg)
3641 unsigned int width = G_WIDTH(cfg);
3642 unsigned int banks = !!(cfg & F_BKS) + 1;
3643 unsigned int org = !!(cfg & F_ORG) + 1;
3644 unsigned int density = G_DEN(cfg);
3645 unsigned int MBs = ((256 << density) * banks) / (org << width);
3647 return MBs << 20;
3650 static void mc7_prep(struct adapter *adapter, struct mc7 *mc7,
3651 unsigned int base_addr, const char *name)
3653 u32 cfg;
3655 mc7->adapter = adapter;
3656 mc7->name = name;
3657 mc7->offset = base_addr - MC7_PMRX_BASE_ADDR;
3658 cfg = t3_read_reg(adapter, mc7->offset + A_MC7_CFG);
3659 mc7->size = mc7->size = G_DEN(cfg) == M_DEN ? 0 : mc7_calc_size(cfg);
3660 mc7->width = G_WIDTH(cfg);
3663 void mac_prep(struct cmac *mac, struct adapter *adapter, int index)
3665 mac->adapter = adapter;
3666 mac->offset = (XGMAC0_1_BASE_ADDR - XGMAC0_0_BASE_ADDR) * index;
3667 mac->nucast = 1;
3669 if (adapter->params.rev == 0 && uses_xaui(adapter)) {
3670 t3_write_reg(adapter, A_XGM_SERDES_CTRL + mac->offset,
3671 is_10G(adapter) ? 0x2901c04 : 0x2301c04);
3672 t3_set_reg_field(adapter, A_XGM_PORT_CFG + mac->offset,
3673 F_ENRGMII, 0);
3677 void early_hw_init(struct adapter *adapter, const struct adapter_info *ai)
3679 u32 val = V_PORTSPEED(is_10G(adapter) ? 3 : 2);
3681 mi1_init(adapter, ai);
3682 t3_write_reg(adapter, A_I2C_CFG, /* set for 80KHz */
3683 V_I2C_CLKDIV(adapter->params.vpd.cclk / 80 - 1));
3684 t3_write_reg(adapter, A_T3DBG_GPIO_EN,
3685 ai->gpio_out | F_GPIO0_OEN | F_GPIO0_OUT_VAL);
3686 t3_write_reg(adapter, A_MC5_DB_SERVER_INDEX, 0);
3687 t3_write_reg(adapter, A_SG_OCO_BASE, V_BASE1(0xfff));
3689 if (adapter->params.rev == 0 || !uses_xaui(adapter))
3690 val |= F_ENRGMII;
3692 /* Enable MAC clocks so we can access the registers */
3693 t3_write_reg(adapter, A_XGM_PORT_CFG, val);
3694 t3_read_reg(adapter, A_XGM_PORT_CFG);
3696 val |= F_CLKDIVRESET_;
3697 t3_write_reg(adapter, A_XGM_PORT_CFG, val);
3698 t3_read_reg(adapter, A_XGM_PORT_CFG);
3699 t3_write_reg(adapter, XGM_REG(A_XGM_PORT_CFG, 1), val);
3700 t3_read_reg(adapter, A_XGM_PORT_CFG);
3704 * Reset the adapter.
3705 * Older PCIe cards lose their config space during reset, PCI-X
3706 * ones don't.
3708 int t3_reset_adapter(struct adapter *adapter)
3710 int i, save_and_restore_pcie =
3711 adapter->params.rev < T3_REV_B2 && is_pcie(adapter);
3712 uint16_t devid = 0;
3714 if (save_and_restore_pcie)
3715 pci_save_state(adapter->pdev);
3716 t3_write_reg(adapter, A_PL_RST, F_CRSTWRM | F_CRSTWRMMODE);
3719 * Delay. Give Some time to device to reset fully.
3720 * XXX The delay time should be modified.
3722 for (i = 0; i < 10; i++) {
3723 msleep(50);
3724 pci_read_config_word(adapter->pdev, 0x00, &devid);
3725 if (devid == 0x1425)
3726 break;
3729 if (devid != 0x1425)
3730 return -1;
3732 if (save_and_restore_pcie)
3733 pci_restore_state(adapter->pdev);
3734 return 0;
3737 static int init_parity(struct adapter *adap)
3739 int i, err, addr;
3741 if (t3_read_reg(adap, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
3742 return -EBUSY;
3744 for (err = i = 0; !err && i < 16; i++)
3745 err = clear_sge_ctxt(adap, i, F_EGRESS);
3746 for (i = 0xfff0; !err && i <= 0xffff; i++)
3747 err = clear_sge_ctxt(adap, i, F_EGRESS);
3748 for (i = 0; !err && i < SGE_QSETS; i++)
3749 err = clear_sge_ctxt(adap, i, F_RESPONSEQ);
3750 if (err)
3751 return err;
3753 t3_write_reg(adap, A_CIM_IBQ_DBG_DATA, 0);
3754 for (i = 0; i < 4; i++)
3755 for (addr = 0; addr <= M_IBQDBGADDR; addr++) {
3756 t3_write_reg(adap, A_CIM_IBQ_DBG_CFG, F_IBQDBGEN |
3757 F_IBQDBGWR | V_IBQDBGQID(i) |
3758 V_IBQDBGADDR(addr));
3759 err = t3_wait_op_done(adap, A_CIM_IBQ_DBG_CFG,
3760 F_IBQDBGBUSY, 0, 2, 1);
3761 if (err)
3762 return err;
3764 return 0;
3768 * Initialize adapter SW state for the various HW modules, set initial values
3769 * for some adapter tunables, take PHYs out of reset, and initialize the MDIO
3770 * interface.
3772 int t3_prep_adapter(struct adapter *adapter, const struct adapter_info *ai,
3773 int reset)
3775 int ret;
3776 unsigned int i, j = -1;
3778 get_pci_mode(adapter, &adapter->params.pci);
3780 adapter->params.info = ai;
3781 adapter->params.nports = ai->nports0 + ai->nports1;
3782 adapter->params.chan_map = (!!ai->nports0) | (!!ai->nports1 << 1);
3783 adapter->params.rev = t3_read_reg(adapter, A_PL_REV);
3785 * We used to only run the "adapter check task" once a second if
3786 * we had PHYs which didn't support interrupts (we would check
3787 * their link status once a second). Now we check other conditions
3788 * in that routine which could potentially impose a very high
3789 * interrupt load on the system. As such, we now always scan the
3790 * adapter state once a second ...
3792 adapter->params.linkpoll_period = 10;
3793 adapter->params.stats_update_period = is_10G(adapter) ?
3794 MAC_STATS_ACCUM_SECS : (MAC_STATS_ACCUM_SECS * 10);
3795 adapter->params.pci.vpd_cap_addr =
3796 pci_find_capability(adapter->pdev, PCI_CAP_ID_VPD);
3797 ret = get_vpd_params(adapter, &adapter->params.vpd);
3798 if (ret < 0)
3799 return ret;
3801 if (reset && t3_reset_adapter(adapter))
3802 return -1;
3804 t3_sge_prep(adapter, &adapter->params.sge);
3806 if (adapter->params.vpd.mclk) {
3807 struct tp_params *p = &adapter->params.tp;
3809 mc7_prep(adapter, &adapter->pmrx, MC7_PMRX_BASE_ADDR, "PMRX");
3810 mc7_prep(adapter, &adapter->pmtx, MC7_PMTX_BASE_ADDR, "PMTX");
3811 mc7_prep(adapter, &adapter->cm, MC7_CM_BASE_ADDR, "CM");
3813 p->nchan = adapter->params.chan_map == 3 ? 2 : 1;
3814 p->pmrx_size = t3_mc7_size(&adapter->pmrx);
3815 p->pmtx_size = t3_mc7_size(&adapter->pmtx);
3816 p->cm_size = t3_mc7_size(&adapter->cm);
3817 p->chan_rx_size = p->pmrx_size / 2; /* only 1 Rx channel */
3818 p->chan_tx_size = p->pmtx_size / p->nchan;
3819 p->rx_pg_size = 64 * 1024;
3820 p->tx_pg_size = is_10G(adapter) ? 64 * 1024 : 16 * 1024;
3821 p->rx_num_pgs = pm_num_pages(p->chan_rx_size, p->rx_pg_size);
3822 p->tx_num_pgs = pm_num_pages(p->chan_tx_size, p->tx_pg_size);
3823 p->ntimer_qs = p->cm_size >= (128 << 20) ||
3824 adapter->params.rev > 0 ? 12 : 6;
3827 adapter->params.offload = t3_mc7_size(&adapter->pmrx) &&
3828 t3_mc7_size(&adapter->pmtx) &&
3829 t3_mc7_size(&adapter->cm);
3831 if (is_offload(adapter)) {
3832 adapter->params.mc5.nservers = DEFAULT_NSERVERS;
3833 adapter->params.mc5.nfilters = adapter->params.rev > 0 ?
3834 DEFAULT_NFILTERS : 0;
3835 adapter->params.mc5.nroutes = 0;
3836 t3_mc5_prep(adapter, &adapter->mc5, MC5_MODE_144_BIT);
3838 init_mtus(adapter->params.mtus);
3839 init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
3842 early_hw_init(adapter, ai);
3843 ret = init_parity(adapter);
3844 if (ret)
3845 return ret;
3847 for_each_port(adapter, i) {
3848 u8 hw_addr[6];
3849 const struct port_type_info *pti;
3850 struct port_info *p = adap2pinfo(adapter, i);
3852 while (!adapter->params.vpd.port_type[++j])
3855 pti = &port_types[adapter->params.vpd.port_type[j]];
3856 if (!pti->phy_prep) {
3857 CH_ALERT(adapter, "Invalid port type index %d\n",
3858 adapter->params.vpd.port_type[j]);
3859 return -EINVAL;
3862 ret = pti->phy_prep(&p->phy, adapter, ai->phy_base_addr + j,
3863 ai->mdio_ops);
3864 if (ret)
3865 return ret;
3866 mac_prep(&p->mac, adapter, j);
3869 * The VPD EEPROM stores the base Ethernet address for the
3870 * card. A port's address is derived from the base by adding
3871 * the port's index to the base's low octet.
3873 memcpy(hw_addr, adapter->params.vpd.eth_base, 5);
3874 hw_addr[5] = adapter->params.vpd.eth_base[5] + i;
3876 memcpy(adapter->port[i]->dev_addr, hw_addr,
3877 ETH_ALEN);
3878 memcpy(adapter->port[i]->perm_addr, hw_addr,
3879 ETH_ALEN);
3880 init_link_config(&p->link_config, p->phy.caps);
3881 p->phy.ops->power_down(&p->phy, 1);
3884 * If the PHY doesn't support interrupts for link status
3885 * changes, schedule a scan of the adapter links at least
3886 * once a second.
3888 if (!(p->phy.caps & SUPPORTED_IRQ) &&
3889 adapter->params.linkpoll_period > 10)
3890 adapter->params.linkpoll_period = 10;
3893 return 0;
3896 void t3_led_ready(struct adapter *adapter)
3898 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
3899 F_GPIO0_OUT_VAL);
3902 int t3_replay_prep_adapter(struct adapter *adapter)
3904 const struct adapter_info *ai = adapter->params.info;
3905 unsigned int i, j = -1;
3906 int ret;
3908 early_hw_init(adapter, ai);
3909 ret = init_parity(adapter);
3910 if (ret)
3911 return ret;
3913 for_each_port(adapter, i) {
3914 const struct port_type_info *pti;
3915 struct port_info *p = adap2pinfo(adapter, i);
3917 while (!adapter->params.vpd.port_type[++j])
3920 pti = &port_types[adapter->params.vpd.port_type[j]];
3921 ret = pti->phy_prep(&p->phy, adapter, p->phy.addr, NULL);
3922 if (ret)
3923 return ret;
3924 p->phy.ops->power_down(&p->phy, 1);
3927 return 0;