1 /**************************************************************************
3 Copyright (c) 2007, Chelsio Inc.
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Neither the name of the Chelsio Corporation nor the names of its
13 contributors may be used to endorse or promote products derived from
14 this software without specific prior written permission.
16 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
20 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26 POSSIBILITY OF SUCH DAMAGE.
28 ***************************************************************************/
30 #include <sys/cdefs.h>
32 __KERNEL_RCSID(0, "$NetBSD: cxgb_main.c,v 1.15 2009/05/06 09:25:14 cegger Exp $");
35 __FBSDID("$FreeBSD: src/sys/dev/cxgb/cxgb_main.c,v 1.36 2007/09/11 23:49:27 kmacy Exp $");
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/kernel.h>
43 #include <sys/module.h>
44 #include <sys/pciio.h>
47 #include <machine/bus.h>
49 #include <machine/resource.h>
50 #include <sys/bus_dma.h>
53 #include <sys/ioccom.h>
56 #include <sys/linker.h>
57 #include <sys/firmware.h>
59 #include <sys/socket.h>
60 #include <sys/sockio.h>
64 #include <sys/sysctl.h>
65 #include <sys/queue.h>
67 #include <sys/taskqueue.h>
72 #include <net/ethernet.h>
75 #include <net/if_arp.h>
76 #include <net/if_dl.h>
77 #include <net/if_media.h>
78 #include <net/if_types.h>
80 #include <netinet/in_systm.h>
81 #include <netinet/in.h>
83 #include <netinet/if_ether.h>
85 #include <netinet/ip.h>
86 #include <netinet/ip.h>
87 #include <netinet/tcp.h>
88 #include <netinet/udp.h>
89 #include <netinet/if_inarp.h>
91 #include <dev/pci/pcireg.h>
92 #include <dev/pci/pcivar.h>
94 #include <dev/pci/pci_private.h>
98 #include <cxgb_include.h>
101 #include <dev/cxgb/cxgb_include.h>
104 #include <dev/pci/cxgb_include.h>
108 #ifdef PRIV_SUPPORTED
109 #include <sys/priv.h>
113 #include <altq/altq_conf.h>
116 static int cxgb_setup_msix(adapter_t
*, int);
117 static void cxgb_teardown_msix(adapter_t
*);
119 static void cxgb_init(void *);
122 static int cxgb_init(struct ifnet
*);
124 static void cxgb_init_locked(struct port_info
*);
125 static void cxgb_stop_locked(struct port_info
*);
126 static void cxgb_set_rxmode(struct port_info
*);
128 static int cxgb_ioctl(struct ifnet
*, unsigned long, caddr_t
);
131 static int cxgb_ioctl(struct ifnet
*, unsigned long, void *);
133 static void cxgb_start(struct ifnet
*);
135 static void cxgb_stop(struct ifnet
*, int);
138 static void cxgb_start_proc(void *, int ncount
);
141 static void cxgb_start_proc(struct work
*, void *);
143 static int cxgb_media_change(struct ifnet
*);
144 static void cxgb_media_status(struct ifnet
*, struct ifmediareq
*);
145 static int setup_sge_qsets(adapter_t
*);
147 static void cxgb_async_intr(void *);
148 static void cxgb_ext_intr_handler(void *, int);
149 static void cxgb_tick_handler(void *, int);
152 static int cxgb_async_intr(void *);
153 static void cxgb_ext_intr_handler(struct work
*, void *);
154 static void cxgb_tick_handler(struct work
*, void *);
156 static void cxgb_down_locked(struct adapter
*sc
);
157 static void cxgb_tick(void *);
158 static void setup_rss(adapter_t
*sc
);
160 /* Attachment glue for the PCI controller end of the device. Each port of
161 * the device is attached separately, as defined later.
164 static int cxgb_controller_probe(device_t
);
165 static int cxgb_controller_attach(device_t
);
166 static int cxgb_controller_detach(device_t
);
169 static int cxgb_controller_match(device_t dev
, cfdata_t match
, void *context
);
170 static void cxgb_controller_attach(device_t parent
, device_t dev
, void *context
);
171 static int cxgb_controller_detach(device_t dev
, int flags
);
173 static void cxgb_free(struct adapter
*);
174 static __inline
void reg_block_dump(struct adapter
*ap
, uint8_t *buf
, unsigned int start
,
177 static void cxgb_get_regs(adapter_t
*sc
, struct ifconf_regs
*regs
, uint8_t *buf
);
178 static int cxgb_get_regs_len(void);
179 static int offload_open(struct port_info
*pi
);
181 static void touch_bars(device_t dev
);
184 static int offload_close(struct toedev
*tdev
);
189 static device_method_t cxgb_controller_methods
[] = {
190 DEVMETHOD(device_probe
, cxgb_controller_probe
),
191 DEVMETHOD(device_attach
, cxgb_controller_attach
),
192 DEVMETHOD(device_detach
, cxgb_controller_detach
),
195 DEVMETHOD(bus_print_child
, bus_generic_print_child
),
196 DEVMETHOD(bus_driver_added
, bus_generic_driver_added
),
201 static driver_t cxgb_controller_driver
= {
203 cxgb_controller_methods
,
204 sizeof(struct adapter
)
207 static devclass_t cxgb_controller_devclass
;
208 DRIVER_MODULE(cxgbc
, pci
, cxgb_controller_driver
, cxgb_controller_devclass
, 0, 0);
212 CFATTACH_DECL(cxgbc
, sizeof(struct adapter
), cxgb_controller_match
, cxgb_controller_attach
, cxgb_controller_detach
, NULL
);
216 * Attachment glue for the ports. Attachment is done directly to the
220 static int cxgb_port_probe(device_t
);
221 static int cxgb_port_attach(device_t
);
222 static int cxgb_port_detach(device_t
);
226 static int cxgb_port_match(device_t dev
, cfdata_t match
, void *context
);
227 static void cxgb_port_attach(device_t dev
, device_t self
, void *context
);
228 static int cxgb_port_detach(device_t dev
, int flags
);
232 static device_method_t cxgb_port_methods
[] = {
233 DEVMETHOD(device_probe
, cxgb_port_probe
),
234 DEVMETHOD(device_attach
, cxgb_port_attach
),
235 DEVMETHOD(device_detach
, cxgb_port_detach
),
239 static driver_t cxgb_port_driver
= {
245 static d_ioctl_t cxgb_extension_ioctl
;
246 static d_open_t cxgb_extension_open
;
247 static d_close_t cxgb_extension_close
;
249 static struct cdevsw cxgb_cdevsw
= {
250 .d_version
= D_VERSION
,
252 .d_open
= cxgb_extension_open
,
253 .d_close
= cxgb_extension_close
,
254 .d_ioctl
= cxgb_extension_ioctl
,
258 static devclass_t cxgb_port_devclass
;
259 DRIVER_MODULE(cxgb
, cxgbc
, cxgb_port_driver
, cxgb_port_devclass
, 0, 0);
263 CFATTACH_DECL(cxgb
, sizeof(struct port_device
), cxgb_port_match
, cxgb_port_attach
, cxgb_port_detach
, NULL
);
266 #define SGE_MSIX_COUNT (SGE_QSETS + 1)
268 extern int collapse_mbufs
;
271 * The driver uses the best interrupt scheme available on a platform in the
272 * order MSI-X, MSI, legacy pin interrupts. This parameter determines which
273 * of these schemes the driver may consider as follows:
275 * msi = 2: choose from among all three options
276 * msi = 1 : only consider MSI and pin interrupts
277 * msi = 0: force pin interrupts
279 static int msi_allowed
= 2;
283 TUNABLE_INT("hw.cxgb.msi_allowed", &msi_allowed
);
284 SYSCTL_NODE(_hw
, OID_AUTO
, cxgb
, CTLFLAG_RD
, 0, "CXGB driver parameters");
285 SYSCTL_UINT(_hw_cxgb
, OID_AUTO
, msi_allowed
, CTLFLAG_RDTUN
, &msi_allowed
, 0,
286 "MSI-X, MSI, INTx selector");
289 * The driver enables offload as a default.
290 * To disable it, use ofld_disable = 1.
292 static int ofld_disable
= 0;
293 TUNABLE_INT("hw.cxgb.ofld_disable", &ofld_disable
);
294 SYSCTL_UINT(_hw_cxgb
, OID_AUTO
, ofld_disable
, CTLFLAG_RDTUN
, &ofld_disable
, 0,
295 "disable ULP offload");
298 * The driver uses an auto-queue algorithm by default.
299 * To disable it and force a single queue-set per port, use singleq = 1.
301 static int singleq
= 1;
302 TUNABLE_INT("hw.cxgb.singleq", &singleq
);
303 SYSCTL_UINT(_hw_cxgb
, OID_AUTO
, singleq
, CTLFLAG_RDTUN
, &singleq
, 0,
304 "use a single queue-set per port");
309 * The driver uses an auto-queue algorithm by default.
310 * To disable it and force a single queue-set per port, use singleq = 1.
312 static int singleq
= 1;
316 MAX_TXQ_ENTRIES
= 16384,
317 MAX_CTRL_TXQ_ENTRIES
= 1024,
318 MAX_RSPQ_ENTRIES
= 16384,
319 MAX_RX_BUFFERS
= 16384,
320 MAX_RX_JUMBO_BUFFERS
= 16384,
322 MIN_CTRL_TXQ_ENTRIES
= 4,
323 MIN_RSPQ_ENTRIES
= 32,
325 MIN_FL_JUMBO_ENTRIES
= 32
340 u32 report_filter_id
:1;
348 enum { FILTER_NO_VLAN_PRI
= 7 };
350 #define PORT_MASK ((1 << MAX_NPORTS) - 1)
352 /* Table for probing the cards. The desc field isn't actually used */
358 } cxgb_identifiers
[] = {
359 {PCI_VENDOR_ID_CHELSIO
, 0x0020, 0, "PE9000"},
360 {PCI_VENDOR_ID_CHELSIO
, 0x0021, 1, "T302E"},
361 {PCI_VENDOR_ID_CHELSIO
, 0x0022, 2, "T310E"},
362 {PCI_VENDOR_ID_CHELSIO
, 0x0023, 3, "T320X"},
363 {PCI_VENDOR_ID_CHELSIO
, 0x0024, 1, "T302X"},
364 {PCI_VENDOR_ID_CHELSIO
, 0x0025, 3, "T320E"},
365 {PCI_VENDOR_ID_CHELSIO
, 0x0026, 2, "T310X"},
366 {PCI_VENDOR_ID_CHELSIO
, 0x0030, 2, "T3B10"},
367 {PCI_VENDOR_ID_CHELSIO
, 0x0031, 3, "T3B20"},
368 {PCI_VENDOR_ID_CHELSIO
, 0x0032, 1, "T3B02"},
369 {PCI_VENDOR_ID_CHELSIO
, 0x0033, 4, "T3B04"},
375 static int set_eeprom(struct port_info
*pi
, const uint8_t *data
, int len
, int offset
);
379 t3rev2char(struct adapter
*adapter
)
383 switch(adapter
->params
.rev
) {
399 static struct cxgb_ident
*
400 cxgb_get_ident(device_t dev
)
402 struct cxgb_ident
*id
;
404 for (id
= cxgb_identifiers
; id
->desc
!= NULL
; id
++) {
405 if ((id
->vendor
== pci_get_vendor(dev
)) &&
406 (id
->device
== pci_get_device(dev
))) {
413 static const struct adapter_info
*
414 cxgb_get_adapter_info(device_t dev
)
416 struct cxgb_ident
*id
;
417 const struct adapter_info
*ai
;
419 id
= cxgb_get_ident(dev
);
423 ai
= t3_get_adapter_info(id
->index
);
429 cxgb_controller_probe(device_t dev
)
431 const struct adapter_info
*ai
;
432 char *ports
, buf
[80];
435 ai
= cxgb_get_adapter_info(dev
);
439 nports
= ai
->nports0
+ ai
->nports1
;
445 snprintf(buf
, sizeof(buf
), "%s RNIC, %d %s", ai
->desc
, nports
, ports
);
446 device_set_desc_copy(dev
, buf
);
447 return (BUS_PROBE_DEFAULT
);
452 static struct cxgb_ident
*cxgb_get_ident(struct pci_attach_args
*pa
)
454 struct cxgb_ident
*id
;
455 int vendorid
, deviceid
;
457 vendorid
= PCI_VENDOR(pci_conf_read(pa
->pa_pc
, pa
->pa_tag
, PCI_ID_REG
));
458 deviceid
= PCI_PRODUCT(pci_conf_read(pa
->pa_pc
, pa
->pa_tag
, PCI_ID_REG
));
460 for (id
= cxgb_identifiers
; id
->desc
!= NULL
; id
++) {
461 if ((id
->vendor
== vendorid
) &&
462 (id
->device
== deviceid
)) {
469 static const struct adapter_info
*cxgb_get_adapter_info(struct pci_attach_args
*pa
)
471 struct cxgb_ident
*id
;
472 const struct adapter_info
*ai
;
474 id
= cxgb_get_ident(pa
);
478 ai
= t3_get_adapter_info(id
->index
);
482 static int cxgb_controller_match(device_t dev
, cfdata_t match
, void *context
)
484 struct pci_attach_args
*pa
= context
;
485 const struct adapter_info
*ai
;
487 ai
= cxgb_get_adapter_info(pa
);
491 return (100); // we ARE the best driver for this card!!
495 #define FW_FNAME "t3fw%d%d%d"
496 #define TPEEPROM_NAME "t3%ctpe%d%d%d"
497 #define TPSRAM_NAME "t3%cps%d%d%d"
501 upgrade_fw(adapter_t
*sc
)
504 #ifdef FIRMWARE_LATEST
505 const struct firmware
*fw
;
511 snprintf(&buf
[0], sizeof(buf
), FW_FNAME
, FW_VERSION_MAJOR
,
512 FW_VERSION_MINOR
, FW_VERSION_MICRO
);
515 fw
= firmware_get(buf
);
522 device_printf(sc
->dev
, "Could not find firmware image %s\n", buf
);
525 device_printf(sc
->dev
, "updating firmware on card with %s\n", buf
);
526 status
= t3_load_fw(sc
, (const uint8_t *)fw
->data
, fw
->datasize
);
528 device_printf(sc
->dev
, "firmware update returned %s %d\n", (status
== 0) ? "success" : "fail", status
);
530 firmware_put(fw
, FIRMWARE_UNLOAD
);
537 int cxgb_cfprint(void *aux
, const char *info
);
538 int cxgb_cfprint(void *aux
, const char *info
)
542 printf("cxgb_cfprint(%p, \"%s\")\n", aux
, info
);
549 void cxgb_make_task(void *context
)
551 struct cxgb_task
*w
= (struct cxgb_task
*)context
;
553 // we can only use workqueue_create() once the system is up and running
554 workqueue_create(&w
->wq
, w
->name
, w
->func
, w
->context
, PRIBIO
, IPL_NET
, 0);
555 // printf("======>> create workqueue for %s %p\n", w->name, w->wq);
561 cxgb_controller_attach(device_t dev
)
565 cxgb_controller_attach(device_t parent
, device_t dev
, void *context
)
569 const struct adapter_info
*ai
;
572 struct pci_attach_args
*pa
= context
;
573 struct cxgb_attach_args cxgb_args
;
585 sc
= device_get_softc(dev
);
588 sc
= device_private(dev
);
592 memcpy(&sc
->pa
, pa
, sizeof(struct pci_attach_args
));
596 ai
= cxgb_get_adapter_info(dev
);
599 ai
= cxgb_get_adapter_info(pa
);
603 * XXX not really related but a recent addition
606 /* find the PCIe link width and set max read request to 4KB*/
607 if (pci_find_extcap(dev
, PCIY_EXPRESS
, ®
) == 0) {
609 lnk
= pci_read_config(dev
, reg
+ 0x12, 2);
610 sc
->link_width
= (lnk
>> 4) & 0x3f;
612 pectl
= pci_read_config(dev
, reg
+ 0x8, 2);
613 pectl
= (pectl
& ~0x7000) | (5 << 12);
614 pci_write_config(dev
, reg
+ 0x8, pectl
, 2);
617 if (sc
->link_width
!= 0 && sc
->link_width
<= 4 &&
618 (ai
->nports0
+ ai
->nports1
) <= 2) {
619 device_printf(sc
->dev
,
620 "PCIe x%d Link, expect reduced performance\n",
627 pci_enable_busmaster(dev
);
630 * Allocate the registers and make them available to the driver.
631 * The registers that we care about for NIC mode are in BAR 0
634 sc
->regs_rid
= PCIR_BAR(0);
635 if ((sc
->regs_res
= bus_alloc_resource_any(dev
, SYS_RES_MEMORY
,
636 &sc
->regs_rid
, RF_ACTIVE
)) == NULL
) {
637 device_printf(dev
, "Cannot allocate BAR\n");
641 snprintf(sc
->lockbuf
, ADAPTER_LOCK_NAME_LEN
, "cxgb controller lock %d",
642 device_get_unit(dev
));
643 ADAPTER_LOCK_INIT(sc
, sc
->lockbuf
);
645 snprintf(sc
->reglockbuf
, ADAPTER_LOCK_NAME_LEN
, "SGE reg lock %d",
646 device_get_unit(dev
));
647 snprintf(sc
->mdiolockbuf
, ADAPTER_LOCK_NAME_LEN
, "cxgb mdio lock %d",
648 device_get_unit(dev
));
649 snprintf(sc
->elmerlockbuf
, ADAPTER_LOCK_NAME_LEN
, "cxgb elmer lock %d",
650 device_get_unit(dev
));
653 sc
->regs_rid
= PCI_MAPREG_START
;
654 t3_os_pci_read_config_4(sc
, PCI_MAPREG_START
, ®
);
656 // call bus_space_map
657 sc
->bar0
= reg
&0xFFFFF000;
658 bus_space_map(sc
->pa
.pa_memt
, sc
->bar0
, 4096, 0, &sc
->bar0_handle
);
661 MTX_INIT(&sc
->sge
.reg_lock
, sc
->reglockbuf
, NULL
, MTX_DEF
);
662 MTX_INIT(&sc
->mdio_lock
, sc
->mdiolockbuf
, NULL
, MTX_DEF
);
663 MTX_INIT(&sc
->elmer_lock
, sc
->elmerlockbuf
, NULL
, MTX_DEF
);
666 sc
->bt
= rman_get_bustag(sc
->regs_res
);
667 sc
->bh
= rman_get_bushandle(sc
->regs_res
);
668 sc
->mmio_len
= rman_get_size(sc
->regs_res
);
671 sc
->bt
= sc
->pa
.pa_memt
;
672 sc
->bh
= sc
->bar0_handle
;
676 if (t3_prep_adapter(sc
, ai
, 1) < 0) {
677 printf("prep adapter failed\n");
681 /* Allocate the BAR for doing MSI-X. If it succeeds, try to allocate
682 * enough messages for the queue sets. If that fails, try falling
683 * back to MSI. If that fails, then try falling back to the legacy
684 * interrupt pin model.
688 sc
->msix_regs_rid
= 0x20;
689 if ((msi_allowed
>= 2) &&
690 (sc
->msix_regs_res
= bus_alloc_resource_any(dev
, SYS_RES_MEMORY
,
691 &sc
->msix_regs_rid
, RF_ACTIVE
)) != NULL
) {
693 msi_needed
= sc
->msi_count
= SGE_MSIX_COUNT
;
695 if (((error
= pci_alloc_msix(dev
, &sc
->msi_count
)) != 0) ||
696 (sc
->msi_count
!= msi_needed
)) {
697 device_printf(dev
, "msix allocation failed - msi_count = %d"
698 " msi_needed=%d will try msi err=%d\n", sc
->msi_count
,
701 pci_release_msi(dev
);
702 bus_release_resource(dev
, SYS_RES_MEMORY
,
703 sc
->msix_regs_rid
, sc
->msix_regs_res
);
704 sc
->msix_regs_res
= NULL
;
706 sc
->flags
|= USING_MSIX
;
707 sc
->cxgb_intr
= t3_intr_msix
;
711 if ((msi_allowed
>= 1) && (sc
->msi_count
== 0)) {
713 if (pci_alloc_msi(dev
, &sc
->msi_count
)) {
714 device_printf(dev
, "alloc msi failed - will try INTx\n");
716 pci_release_msi(dev
);
718 sc
->flags
|= USING_MSI
;
720 sc
->cxgb_intr
= t3_intr_msi
;
724 if (sc
->msi_count
== 0) {
725 device_printf(dev
, "using line interrupts\n");
727 sc
->cxgb_intr
= t3b_intr
;
731 /* Create a private taskqueue thread for handling driver events */
732 #ifdef TASKQUEUE_CURRENT
733 sc
->tq
= taskqueue_create("cxgb_taskq", M_NOWAIT
,
734 taskqueue_thread_enqueue
, &sc
->tq
);
736 sc
->tq
= taskqueue_create_fast("cxgb_taskq", M_NOWAIT
,
737 taskqueue_thread_enqueue
, &sc
->tq
);
739 if (sc
->tq
== NULL
) {
740 device_printf(dev
, "failed to allocate controller task queue\n");
744 taskqueue_start_threads(&sc
->tq
, 1, PI_NET
, "%s taskq",
745 device_get_nameunit(dev
));
746 TASK_INIT(&sc
->ext_intr_task
, 0, cxgb_ext_intr_handler
, sc
);
747 TASK_INIT(&sc
->tick_task
, 0, cxgb_tick_handler
, sc
);
750 sc
->ext_intr_task
.name
= "cxgb_ext_intr_handler";
751 sc
->ext_intr_task
.func
= cxgb_ext_intr_handler
;
752 sc
->ext_intr_task
.context
= sc
;
753 kthread_create(PRI_NONE
, 0, NULL
, cxgb_make_task
, &sc
->ext_intr_task
, NULL
, "cxgb_make_task");
755 sc
->tick_task
.name
= "cxgb_tick_handler";
756 sc
->tick_task
.func
= cxgb_tick_handler
;
757 sc
->tick_task
.context
= sc
;
758 kthread_create(PRI_NONE
, 0, NULL
, cxgb_make_task
, &sc
->tick_task
, NULL
, "cxgb_make_task");
761 /* Create a periodic callout for checking adapter status */
763 callout_init(&sc
->cxgb_tick_ch
, TRUE
);
766 callout_init(&sc
->cxgb_tick_ch
, 0);
769 if (t3_check_fw_version(sc
) != 0) {
771 * Warn user that a firmware update will be attempted in init.
773 device_printf(dev
, "firmware needs to be updated to version %d.%d.%d\n",
774 FW_VERSION_MAJOR
, FW_VERSION_MINOR
, FW_VERSION_MICRO
);
775 sc
->flags
&= ~FW_UPTODATE
;
777 sc
->flags
|= FW_UPTODATE
;
780 if (t3_check_tpsram_version(sc
) != 0) {
782 * Warn user that a firmware update will be attempted in init.
784 device_printf(dev
, "SRAM needs to be updated to version %c-%d.%d.%d\n",
785 t3rev2char(sc
), TP_VERSION_MAJOR
, TP_VERSION_MINOR
, TP_VERSION_MICRO
);
786 sc
->flags
&= ~TPS_UPTODATE
;
788 sc
->flags
|= TPS_UPTODATE
;
791 if ((sc
->flags
& USING_MSIX
) && !singleq
)
793 port_qsets
= min((SGE_QSETS
/(sc
)->params
.nports
), mp_ncpus
);
796 port_qsets
= (SGE_QSETS
/(sc
)->params
.nports
);
800 * Create a child device for each MAC. The ethernet attachment
801 * will be done in these children.
803 for (i
= 0; i
< (sc
)->params
.nports
; i
++) {
804 struct port_info
*pi
;
807 if ((child
= device_add_child(dev
, "cxgb", -1)) == NULL
) {
808 device_printf(dev
, "failed to add child port\n");
815 pi
->nqsets
= port_qsets
;
816 pi
->first_qset
= i
*port_qsets
;
818 pi
->tx_chan
= i
>= ai
->nports0
;
819 pi
->txpkt_intf
= pi
->tx_chan
? 2 * (i
- ai
->nports0
) + 1 : 2 * i
;
820 sc
->rxpkt_map
[pi
->txpkt_intf
] = i
;
825 printf("\n"); // for cleaner formatting in dmesg
826 child
= config_found_sm_loc(dev
, "cxgbc", locs
, &cxgb_args
,
827 cxgb_cfprint
, config_stdsubmatch
);
828 printf("\n"); // for cleaner formatting in dmesg
830 sc
->portdev
[i
] = child
;
832 device_set_softc(child
, pi
);
837 if ((error
= bus_generic_attach(dev
)) != 0)
842 * XXX need to poll for link status
844 sc
->params
.stats_update_period
= 1;
846 /* initialize sge private state */
847 t3_sge_init_adapter(sc
);
853 if (is_offload(sc
)) {
854 setbit(&sc
->registered_device_map
, OFFLOAD_DEVMAP_BIT
);
855 cxgb_adapter_ofld(sc
);
858 error
= t3_get_fw_version(sc
, &vers
);
862 snprintf(&sc
->fw_version
[0], sizeof(sc
->fw_version
), "%d.%d.%d",
863 G_FW_VERSION_MAJOR(vers
), G_FW_VERSION_MINOR(vers
),
864 G_FW_VERSION_MICRO(vers
));
866 printf("******** firmware rev %s\n", sc
->fw_version
);
885 cxgb_controller_detach(device_t dev
)
888 cxgb_controller_detach(device_t dev
, int flags
)
894 sc
= device_get_softc(dev
);
897 sc
= device_private(dev
);
906 cxgb_free(struct adapter
*sc
)
914 cxgb_down_locked(sc
);
917 if (sc
->flags
& (USING_MSI
| USING_MSIX
)) {
918 device_printf(sc
->dev
, "releasing msi message(s)\n");
919 pci_release_msi(sc
->dev
);
921 device_printf(sc
->dev
, "no msi message to release\n");
923 if (sc
->msix_regs_res
!= NULL
) {
924 bus_release_resource(sc
->dev
, SYS_RES_MEMORY
, sc
->msix_regs_rid
,
930 if (sc
->tq
!= NULL
) {
931 taskqueue_drain(sc
->tq
, &sc
->ext_intr_task
);
932 taskqueue_drain(sc
->tq
, &sc
->tick_task
);
935 t3_sge_deinit_sw(sc
);
937 * Wait for last callout
940 tsleep(&sc
, 0, "cxgb unload", 3*hz
);
942 for (i
= 0; i
< (sc
)->params
.nports
; ++i
) {
943 if (sc
->portdev
[i
] != NULL
)
945 device_delete_child(sc
->dev
, sc
->portdev
[i
]);
955 bus_generic_detach(sc
->dev
);
957 taskqueue_free(sc
->tq
);
960 if (is_offload(sc
)) {
961 cxgb_adapter_unofld(sc
);
962 if (isset(&sc
->open_device_map
, OFFLOAD_DEVMAP_BIT
))
963 offload_close(&sc
->tdev
);
967 t3_free_sge_resources(sc
);
968 free(sc
->filters
, M_DEVBUF
);
974 if (sc
->regs_res
!= NULL
)
975 bus_release_resource(sc
->dev
, SYS_RES_MEMORY
, sc
->regs_rid
,
979 MTX_DESTROY(&sc
->mdio_lock
);
980 MTX_DESTROY(&sc
->sge
.reg_lock
);
981 MTX_DESTROY(&sc
->elmer_lock
);
982 ADAPTER_LOCK_DEINIT(sc
);
988 * setup_sge_qsets - configure SGE Tx/Rx/response queues
989 * @sc: the controller softc
991 * Determines how many sets of SGE queues to use and initializes them.
992 * We support multiple queue sets per port if we have MSI-X, otherwise
993 * just one queue set per port.
996 setup_sge_qsets(adapter_t
*sc
)
998 int i
, j
, err
, irq_idx
= 0, qset_idx
= 0;
999 u_int ntxq
= SGE_TXQ_PER_SET
;
1001 if ((err
= t3_sge_alloc(sc
)) != 0) {
1002 device_printf(sc
->dev
, "t3_sge_alloc returned %d\n", err
);
1006 if (sc
->params
.rev
> 0 && !(sc
->flags
& USING_MSI
))
1009 for (i
= 0; i
< (sc
)->params
.nports
; i
++) {
1010 struct port_info
*pi
= &sc
->port
[i
];
1012 for (j
= 0; j
< pi
->nqsets
; j
++, qset_idx
++) {
1013 err
= t3_sge_alloc_qset(sc
, qset_idx
, (sc
)->params
.nports
,
1014 (sc
->flags
& USING_MSIX
) ? qset_idx
+ 1 : irq_idx
,
1015 &sc
->params
.sge
.qset
[qset_idx
], ntxq
, pi
);
1017 t3_free_sge_resources(sc
);
1018 device_printf(sc
->dev
, "t3_sge_alloc_qset failed with %d\n",
1029 cxgb_teardown_msix(adapter_t
*sc
)
1033 for (nqsets
= i
= 0; i
< (sc
)->params
.nports
; i
++)
1034 nqsets
+= sc
->port
[i
].nqsets
;
1036 for (i
= 0; i
< nqsets
; i
++) {
1037 if (sc
->msix_intr_tag
[i
] != NULL
) {
1039 bus_teardown_intr(sc
->dev
, sc
->msix_irq_res
[i
],
1040 sc
->msix_intr_tag
[i
]);
1042 sc
->msix_intr_tag
[i
] = NULL
;
1044 if (sc
->msix_irq_res
[i
] != NULL
) {
1046 bus_release_resource(sc
->dev
, SYS_RES_IRQ
,
1047 sc
->msix_irq_rid
[i
], sc
->msix_irq_res
[i
]);
1049 sc
->msix_irq_res
[i
] = NULL
;
1055 cxgb_setup_msix(adapter_t
*sc
, int msix_count
)
1057 int i
, j
, k
, nqsets
, rid
;
1059 /* The first message indicates link changes and error conditions */
1062 if ((sc
->irq_res
= bus_alloc_resource_any(sc
->dev
, SYS_RES_IRQ
,
1063 &sc
->irq_rid
, RF_SHAREABLE
| RF_ACTIVE
)) == NULL
) {
1064 device_printf(sc
->dev
, "Cannot allocate msix interrupt\n");
1068 if (bus_setup_intr(sc
->dev
, sc
->irq_res
, INTR_MPSAFE
|INTR_TYPE_NET
,
1072 cxgb_async_intr
, sc
, &sc
->intr_tag
)) {
1073 device_printf(sc
->dev
, "Cannot set up interrupt\n");
1078 /* Allocate PCI interrupt resources. */
1079 if (pci_intr_map(&sc
->pa
, &sc
->intr_handle
))
1081 printf("cxgb_setup_msix(%d): pci_intr_map() failed\n", __LINE__
);
1084 sc
->intr_cookie
= pci_intr_establish(sc
->pa
.pa_pc
, sc
->intr_handle
,
1085 IPL_NET
, cxgb_async_intr
, sc
);
1086 if (sc
->intr_cookie
== NULL
)
1088 printf("cxgb_setup_msix(%d): pci_intr_establish() failed\n", __LINE__
);
1092 for (i
= k
= 0; i
< (sc
)->params
.nports
; i
++) {
1093 nqsets
= sc
->port
[i
].nqsets
;
1094 for (j
= 0; j
< nqsets
; j
++, k
++) {
1096 struct sge_qset
*qs
= &sc
->sge
.qs
[k
];
1101 printf("rid=%d ", rid
);
1103 if ((sc
->msix_irq_res
[k
] = bus_alloc_resource_any(
1104 sc
->dev
, SYS_RES_IRQ
, &rid
,
1105 RF_SHAREABLE
| RF_ACTIVE
)) == NULL
) {
1106 device_printf(sc
->dev
, "Cannot allocate "
1107 "interrupt for message %d\n", rid
);
1110 sc
->msix_irq_rid
[k
] = rid
;
1111 printf("setting up interrupt for port=%d\n",
1113 if (bus_setup_intr(sc
->dev
, sc
->msix_irq_res
[k
],
1114 INTR_MPSAFE
|INTR_TYPE_NET
,
1118 t3_intr_msix
, qs
, &sc
->msix_intr_tag
[k
])) {
1119 device_printf(sc
->dev
, "Cannot set up "
1120 "interrupt for message %d\n", rid
);
1136 cxgb_port_probe(device_t dev
)
1138 struct port_info
*p
;
1141 p
= device_get_softc(dev
);
1143 snprintf(buf
, sizeof(buf
), "Port %d %s", p
->port_id
, p
->port_type
->desc
);
1144 device_set_desc_copy(dev
, buf
);
1149 static int cxgb_port_match(device_t dev
, cfdata_t match
, void *context
)
1157 cxgb_makedev(struct port_info
*pi
)
1160 pi
->port_cdev
= make_dev(&cxgb_cdevsw
, pi
->ifp
->if_dunit
,
1161 UID_ROOT
, GID_WHEEL
, 0600, if_name(pi
->ifp
));
1163 if (pi
->port_cdev
== NULL
)
1166 pi
->port_cdev
->si_drv1
= (void *)pi
;
1171 #ifdef TSO_SUPPORTED
1172 #define CXGB_CAP (IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_HWCSUM | IFCAP_VLAN_HWCSUM | IFCAP_TSO | IFCAP_JUMBO_MTU)
1173 /* Don't enable TSO6 yet */
1174 #define CXGB_CAP_ENABLE (IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_HWCSUM | IFCAP_VLAN_HWCSUM | IFCAP_TSO4 | IFCAP_JUMBO_MTU)
1176 #define CXGB_CAP (IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_HWCSUM | IFCAP_JUMBO_MTU)
1177 /* Don't enable TSO6 yet */
1178 #define CXGB_CAP_ENABLE (IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_HWCSUM | IFCAP_JUMBO_MTU)
1179 #define IFCAP_TSO4 0x0
1180 #define IFCAP_TSO6 0x0
1181 #define CSUM_TSO 0x0
1186 #define IFCAP_HWCSUM (IFCAP_CSUM_IPv4_Rx | IFCAP_CSUM_IPv4_Tx)
1187 #define IFCAP_RXCSUM IFCAP_CSUM_IPv4_Rx
1188 #define IFCAP_TXCSUM IFCAP_CSUM_IPv4_Tx
1190 #ifdef TSO_SUPPORTED
1191 #define CXGB_CAP (IFCAP_HWCSUM | IFCAP_TSO)
1192 /* Don't enable TSO6 yet */
1193 #define CXGB_CAP_ENABLE (IFCAP_HWCSUM | IFCAP_TSO4)
1195 #define CXGB_CAP (IFCAP_HWCSUM)
1196 /* Don't enable TSO6 yet */
1197 #define CXGB_CAP_ENABLE (IFCAP_HWCSUM)
1198 #define IFCAP_TSO4 0x0
1199 #define IFCAP_TSO6 0x0
1200 #define CSUM_TSO 0x0
1206 cxgb_port_attach(device_t dev
)
1210 cxgb_port_attach(device_t dev
, device_t self
, void *context
)
1213 struct port_info
*p
;
1215 struct port_device
*pd
;
1216 int *port_number
= (int *)context
;
1224 p
= device_get_softc(dev
);
1227 pd
= (struct port_device
*)self
; // device is first element in port_device
1229 pd
->parent
= (struct adapter
*)dev
;
1230 pd
->port_number
= *port_number
;
1231 p
= &pd
->parent
->port
[*port_number
];
1236 snprintf(p
->lockbuf
, PORT_NAME_LEN
, "cxgb port lock %d:%d",
1237 device_get_unit(device_get_parent(dev
)), p
->port_id
);
1239 PORT_LOCK_INIT(p
, p
->lockbuf
);
1241 /* Allocate an ifnet object and set it up */
1243 ifp
= p
->ifp
= if_alloc(IFT_ETHER
);
1246 ifp
= p
->ifp
= (void *)malloc(sizeof (struct ifnet
), M_IFADDR
, M_WAITOK
);
1249 device_printf(dev
, "Cannot allocate ifnet\n");
1258 memset(ifp
, 0, sizeof(struct ifnet
));
1262 * Note that there is currently no watchdog timer.
1265 if_initname(ifp
, device_get_name(dev
), device_get_unit(dev
));
1268 snprintf(buf
, sizeof(buf
), "cxgb%d", p
->port
);
1269 strcpy(ifp
->if_xname
, buf
);
1271 ifp
->if_init
= cxgb_init
;
1273 ifp
->if_flags
= IFF_BROADCAST
| IFF_SIMPLEX
| IFF_MULTICAST
;
1274 ifp
->if_ioctl
= cxgb_ioctl
;
1275 ifp
->if_start
= cxgb_start
;
1277 ifp
->if_stop
= cxgb_stop
;
1279 ifp
->if_timer
= 0; /* Disable ifnet watchdog */
1280 ifp
->if_watchdog
= NULL
;
1283 ifp
->if_snd
.ifq_drv_maxlen
= TX_ETH_Q_SIZE
;
1284 IFQ_SET_MAXLEN(&ifp
->if_snd
, ifp
->if_snd
.ifq_drv_maxlen
);
1287 ifp
->if_snd
.ifq_maxlen
= TX_ETH_Q_SIZE
;
1288 IFQ_SET_MAXLEN(&ifp
->if_snd
, ifp
->if_snd
.ifq_maxlen
);
1291 IFQ_SET_READY(&ifp
->if_snd
);
1294 ifp
->if_hwassist
= ifp
->if_capabilities
= ifp
->if_capenable
= 0;
1295 ifp
->if_capabilities
|= CXGB_CAP
;
1296 ifp
->if_capenable
|= CXGB_CAP_ENABLE
;
1297 ifp
->if_hwassist
|= (CSUM_TCP
| CSUM_UDP
| CSUM_IP
| CSUM_TSO
);
1300 ifp
->if_capabilities
= ifp
->if_capenable
= 0;
1302 ifp
->if_baudrate
= 10000000000; // 10 Gbps
1304 * disable TSO on 4-port - it isn't supported by the firmware yet
1306 if (p
->adapter
->params
.nports
> 2) {
1307 ifp
->if_capabilities
&= ~(IFCAP_TSO4
| IFCAP_TSO6
);
1308 ifp
->if_capenable
&= ~(IFCAP_TSO4
| IFCAP_TSO6
);
1310 ifp
->if_hwassist
&= ~CSUM_TSO
;
1317 ether_ifattach(ifp
, p
->hw_addr
);
1319 * Only default to jumbo frames on 10GigE
1321 if (p
->adapter
->params
.nports
<= 2)
1324 if ((err
= cxgb_makedev(p
)) != 0) {
1325 printf("makedev failed %d\n", err
);
1329 ifmedia_init(&p
->media
, IFM_IMASK
, cxgb_media_change
,
1332 if (!strcmp(p
->port_type
->desc
, "10GBASE-CX4")) {
1333 media_flags
= IFM_ETHER
| IFM_10G_CX4
| IFM_FDX
;
1334 } else if (!strcmp(p
->port_type
->desc
, "10GBASE-SR")) {
1335 media_flags
= IFM_ETHER
| IFM_10G_SR
| IFM_FDX
;
1336 } else if (!strcmp(p
->port_type
->desc
, "10GBASE-XR")) {
1337 media_flags
= IFM_ETHER
| IFM_10G_LR
| IFM_FDX
;
1338 } else if (!strcmp(p
->port_type
->desc
, "10/100/1000BASE-T")) {
1339 ifmedia_add(&p
->media
, IFM_ETHER
| IFM_10_T
, 0, NULL
);
1340 ifmedia_add(&p
->media
, IFM_ETHER
| IFM_10_T
| IFM_FDX
,
1342 ifmedia_add(&p
->media
, IFM_ETHER
| IFM_100_TX
,
1344 ifmedia_add(&p
->media
, IFM_ETHER
| IFM_100_TX
| IFM_FDX
,
1346 ifmedia_add(&p
->media
, IFM_ETHER
| IFM_1000_T
| IFM_FDX
,
1350 printf("unsupported media type %s\n", p
->port_type
->desc
);
1359 ifmedia_add(&p
->media
, media_flags
, 0, NULL
);
1360 ifmedia_set(&p
->media
, media_flags
);
1362 ifmedia_add(&p
->media
, IFM_ETHER
| IFM_AUTO
, 0, NULL
);
1363 ifmedia_set(&p
->media
, IFM_ETHER
| IFM_AUTO
);
1366 snprintf(p
->taskqbuf
, TASKQ_NAME_LEN
, "cxgb_port_taskq%d", p
->port_id
);
1368 #ifdef TASKQUEUE_CURRENT
1369 /* Create a port for handling TX without starvation */
1370 p
->tq
= taskqueue_create(p
->taskqbuf
, M_NOWAIT
,
1371 taskqueue_thread_enqueue
, &p
->tq
);
1373 /* Create a port for handling TX without starvation */
1374 p
->tq
= taskqueue_create_fast(p
->taskqbuf
, M_NOWAIT
,
1375 taskqueue_thread_enqueue
, &p
->tq
);
1378 if (p
->tq
== NULL
) {
1379 device_printf(dev
, "failed to allocate port task queue\n");
1382 taskqueue_start_threads(&p
->tq
, 1, PI_NET
, "%s taskq",
1383 device_get_nameunit(dev
));
1385 TASK_INIT(&p
->start_task
, 0, cxgb_start_proc
, ifp
);
1388 p
->start_task
.name
= "cxgb_start_proc";
1389 p
->start_task
.func
= cxgb_start_proc
;
1390 p
->start_task
.context
= ifp
;
1391 kthread_create(PRI_NONE
, 0, NULL
, cxgb_make_task
, &p
->start_task
, NULL
, "cxgb_make_task");
1394 t3_sge_init_port(p
);
1403 cxgb_port_detach(device_t dev
)
1406 cxgb_port_detach(device_t dev
, int flags
)
1409 struct port_info
*p
;
1412 p
= device_get_softc(dev
);
1415 p
= (struct port_info
*)dev
; // device is first thing in adapter
1419 if (p
->ifp
->if_drv_flags
& IFF_DRV_RUNNING
)
1420 cxgb_stop_locked(p
);
1424 if (p
->tq
!= NULL
) {
1425 taskqueue_drain(p
->tq
, &p
->start_task
);
1426 taskqueue_free(p
->tq
);
1431 if (p
->start_task
.wq
!= NULL
) {
1432 workqueue_destroy(p
->start_task
.wq
);
1433 p
->start_task
.wq
= NULL
;
1437 ether_ifdetach(p
->ifp
);
1439 * the lock may be acquired in ifdetach
1441 PORT_LOCK_DEINIT(p
);
1445 if (p
->port_cdev
!= NULL
)
1446 destroy_dev(p
->port_cdev
);
1456 t3_fatal_err(struct adapter
*sc
)
1460 if (sc
->flags
& FULL_INIT_DONE
) {
1462 t3_write_reg(sc
, A_XGM_TX_CTRL
, 0);
1463 t3_write_reg(sc
, A_XGM_RX_CTRL
, 0);
1464 t3_write_reg(sc
, XGM_REG(A_XGM_TX_CTRL
, 1), 0);
1465 t3_write_reg(sc
, XGM_REG(A_XGM_RX_CTRL
, 1), 0);
1466 t3_intr_disable(sc
);
1468 device_printf(sc
->dev
,"encountered fatal error, operation suspended\n");
1469 if (!t3_cim_ctl_blk_read(sc
, 0xa0, 4, fw_status
))
1470 device_printf(sc
->dev
, "FW_ status: 0x%x, 0x%x, 0x%x, 0x%x\n",
1471 fw_status
[0], fw_status
[1], fw_status
[2], fw_status
[3]);
1475 t3_os_find_pci_capability(adapter_t
*sc
, int cap
)
1479 struct pci_devinfo
*dinfo
;
1485 dinfo
= device_get_ivars(dev
);
1488 status
= pci_read_config(dev
, PCIR_STATUS
, 2);
1489 if (!(status
& PCIM_STATUS_CAPPRESENT
))
1492 switch (cfg
->hdrtype
& PCIM_HDRTYPE
) {
1498 ptr
= PCIR_CAP_PTR_2
;
1504 ptr
= pci_read_config(dev
, ptr
, 1);
1507 if (pci_read_config(dev
, ptr
+ PCICAP_ID
, 1) == cap
)
1509 ptr
= pci_read_config(dev
, ptr
+ PCICAP_NEXTPTR
, 1);
1518 status
= pci_conf_read(sc
->pa
.pa_pc
, sc
->pa
.pa_tag
, PCI_COMMAND_STATUS_REG
);
1519 if (!(status
&PCI_STATUS_CAPLIST_SUPPORT
))
1521 bhlc
= pci_conf_read(sc
->pa
.pa_pc
, sc
->pa
.pa_tag
, PCI_BHLC_REG
);
1522 switch (PCI_HDRTYPE(bhlc
))
1526 ptr
= PCI_CAPLISTPTR_REG
;
1529 ptr
= PCI_CARDBUS_CAPLISTPTR_REG
;
1534 temp
= pci_conf_read(sc
->pa
.pa_pc
, sc
->pa
.pa_tag
, ptr
);
1535 ptr
= PCI_CAPLIST_PTR(temp
);
1537 temp
= pci_conf_read(sc
->pa
.pa_pc
, sc
->pa
.pa_tag
, ptr
);
1538 if (PCI_CAPLIST_CAP(temp
) == cap
)
1540 ptr
= PCI_CAPLIST_NEXT(temp
);
1548 t3_os_pci_save_state(struct adapter
*sc
)
1552 struct pci_devinfo
*dinfo
;
1555 dinfo
= device_get_ivars(dev
);
1557 pci_cfg_save(dev
, dinfo
, 0);
1566 t3_os_pci_restore_state(struct adapter
*sc
)
1570 struct pci_devinfo
*dinfo
;
1573 dinfo
= device_get_ivars(dev
);
1575 pci_cfg_restore(dev
, dinfo
);
1584 * t3_os_link_changed - handle link status changes
1585 * @adapter: the adapter associated with the link change
1586 * @port_id: the port index whose limk status has changed
1587 * @link_stat: the new status of the link
1588 * @speed: the new speed setting
1589 * @duplex: the new duplex setting
1590 * @fc: the new flow-control setting
1592 * This is the OS-dependent handler for link status changes. The OS
1593 * neutral handler takes care of most of the processing for these events,
1594 * then calls this handler for any OS-specific processing.
1597 t3_os_link_changed(adapter_t
*adapter
, int port_id
, int link_status
, int speed
,
1600 struct port_info
*pi
= &adapter
->port
[port_id
];
1601 struct cmac
*mac
= &adapter
->port
[port_id
].mac
;
1603 if ((pi
->ifp
->if_flags
& IFF_UP
) == 0)
1607 t3_mac_enable(mac
, MAC_DIRECTION_RX
);
1608 if_link_state_change(pi
->ifp
, LINK_STATE_UP
);
1610 if_link_state_change(pi
->ifp
, LINK_STATE_DOWN
);
1611 pi
->phy
.ops
->power_down(&pi
->phy
, 1);
1612 t3_mac_disable(mac
, MAC_DIRECTION_RX
);
1613 t3_link_start(&pi
->phy
, mac
, &pi
->link_config
);
1618 * Interrupt-context handler for external (PHY) interrupts.
1621 t3_os_ext_intr_handler(adapter_t
*sc
)
1624 printf("t3_os_ext_intr_handler\n");
1626 * Schedule a task to handle external interrupts as they may be slow
1627 * and we use a mutex to protect MDIO registers. We disable PHY
1628 * interrupts in the meantime and let the task reenable them when
1632 if (sc
->slow_intr_mask
) {
1633 sc
->slow_intr_mask
&= ~F_T3DBG
;
1634 t3_write_reg(sc
, A_PL_INT_ENABLE0
, sc
->slow_intr_mask
);
1636 taskqueue_enqueue(sc
->tq
, &sc
->ext_intr_task
);
1639 workqueue_enqueue(sc
->ext_intr_task
.wq
, &sc
->ext_intr_task
.w
, NULL
);
1646 t3_os_set_hw_addr(adapter_t
*adapter
, int port_idx
, u8 hw_addr
[])
1650 * The ifnet might not be allocated before this gets called,
1651 * as this is called early on in attach by t3_prep_adapter
1652 * save the address off in the port structure
1656 printf("set_hw_addr on idx %d addr %6D\n", port_idx
, hw_addr
, ":");
1659 printf("set_hw_addr on idx %d addr %02x:%02x:%02x:%02x:%02x:%02x\n",
1660 port_idx
, hw_addr
[0], hw_addr
[1], hw_addr
[2], hw_addr
[3], hw_addr
[4], hw_addr
[5]);
1662 memcpy(adapter
->port
[port_idx
].hw_addr
, hw_addr
, ETHER_ADDR_LEN
);
1666 * link_start - enable a port
1667 * @p: the port to enable
1669 * Performs the MAC and PHY actions needed to enable a port.
1672 cxgb_link_start(struct port_info
*p
)
1675 struct t3_rx_mode rm
;
1676 struct cmac
*mac
= &p
->mac
;
1680 t3_init_rx_mode(&rm
, p
);
1681 if (!mac
->multiport
)
1683 t3_mac_set_mtu(mac
, ifp
->if_mtu
+ ETHER_HDR_LEN
+ ETHER_VLAN_ENCAP_LEN
);
1684 t3_mac_set_address(mac
, 0, p
->hw_addr
);
1685 t3_mac_set_rx_mode(mac
, &rm
);
1686 t3_link_start(&p
->phy
, mac
, &p
->link_config
);
1687 t3_mac_enable(mac
, MAC_DIRECTION_RX
| MAC_DIRECTION_TX
);
1691 * setup_rss - configure Receive Side Steering (per-queue connection demux)
1692 * @adap: the adapter
1694 * Sets up RSS to distribute packets to multiple receive queues. We
1695 * configure the RSS CPU lookup table to distribute to the number of HW
1696 * receive queues, and the response queue lookup table to narrow that
1697 * down to the response queues actually configured for each port.
1698 * We always configure the RSS mapping for two ports since the mapping
1699 * table has plenty of entries.
1702 setup_rss(adapter_t
*adap
)
1706 uint8_t cpus
[SGE_QSETS
+ 1];
1707 uint16_t rspq_map
[RSS_TABLE_SIZE
];
1709 for (i
= 0; i
< SGE_QSETS
; ++i
)
1711 cpus
[SGE_QSETS
] = 0xff;
1714 for_each_port(adap
, i
) {
1715 const struct port_info
*pi
= adap2pinfo(adap
, i
);
1717 nq
[pi
->tx_chan
] += pi
->nqsets
;
1719 nq
[0] = max(nq
[0], 1U);
1720 nq
[1] = max(nq
[1], 1U);
1721 for (i
= 0; i
< RSS_TABLE_SIZE
/ 2; ++i
) {
1722 rspq_map
[i
] = i
% nq
[0];
1723 rspq_map
[i
+ RSS_TABLE_SIZE
/ 2] = (i
% nq
[1]) + nq
[0];
1725 /* Calculate the reverse RSS map table */
1726 for (i
= 0; i
< RSS_TABLE_SIZE
; ++i
)
1727 if (adap
->rrss_map
[rspq_map
[i
]] == 0xff)
1728 adap
->rrss_map
[rspq_map
[i
]] = i
;
1730 t3_config_rss(adap
, F_RQFEEDBACKENABLE
| F_TNLLKPEN
| F_TNLMAPEN
|
1731 F_TNLPRTEN
| F_TNL2TUPEN
| F_TNL4TUPEN
| F_OFDMAPEN
|
1732 V_RRCPLCPUSIZE(6), cpus
, rspq_map
);
1737 * Sends an mbuf to an offload queue driver
1738 * after dealing with any active network taps.
1741 offload_tx(struct toedev
*tdev
, struct mbuf
*m
)
1746 ret
= t3_offload_tx(tdev
, m
);
1753 write_smt_entry(struct adapter
*adapter
, int idx
)
1755 struct port_info
*pi
= &adapter
->port
[idx
];
1756 struct cpl_smt_write_req
*req
;
1759 if ((m
= m_gethdr(M_NOWAIT
, MT_DATA
)) == NULL
)
1762 req
= mtod(m
, struct cpl_smt_write_req
*);
1763 req
->wr
.wr_hi
= htonl(V_WR_OP(FW_WROPCODE_FORWARD
));
1764 OPCODE_TID(req
) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ
, idx
));
1765 req
->mtu_idx
= NMTUS
- 1; /* should be 0 but there's a T3 bug */
1767 memset(req
->src_mac1
, 0, sizeof(req
->src_mac1
));
1768 memcpy(req
->src_mac0
, pi
->hw_addr
, ETHER_ADDR_LEN
);
1770 m_set_priority(m
, 1);
1772 offload_tx(&adapter
->tdev
, m
);
1778 init_smt(struct adapter
*adapter
)
1782 for_each_port(adapter
, i
)
1783 write_smt_entry(adapter
, i
);
1788 init_port_mtus(adapter_t
*adapter
)
1790 unsigned int mtus
= adapter
->port
[0].ifp
->if_mtu
;
1792 if (adapter
->port
[1].ifp
)
1793 mtus
|= adapter
->port
[1].ifp
->if_mtu
<< 16;
1794 t3_write_reg(adapter
, A_TP_MTU_PORT_TABLE
, mtus
);
1799 send_pktsched_cmd(struct adapter
*adap
, int sched
, int qidx
, int lo
,
1803 struct mngt_pktsched_wr
*req
;
1805 m
= m_gethdr(M_DONTWAIT
, MT_DATA
);
1807 req
= mtod(m
, struct mngt_pktsched_wr
*);
1808 req
->wr_hi
= htonl(V_WR_OP(FW_WROPCODE_MNGT
));
1809 req
->mngt_opcode
= FW_MNGTOPCODE_PKTSCHED_SET
;
1814 req
->binding
= port
;
1815 m
->m_len
= m
->m_pkthdr
.len
= sizeof(*req
);
1816 t3_mgmt_tx(adap
, m
);
1821 bind_qsets(adapter_t
*sc
)
1825 for (i
= 0; i
< (sc
)->params
.nports
; ++i
) {
1826 const struct port_info
*pi
= adap2pinfo(sc
, i
);
1828 for (j
= 0; j
< pi
->nqsets
; ++j
) {
1829 send_pktsched_cmd(sc
, 1, pi
->first_qset
+ j
, -1,
1838 update_tpeeprom(struct adapter
*adap
)
1840 #ifdef FIRMWARE_LATEST
1841 const struct firmware
*tpeeprom
;
1843 struct firmware
*tpeeprom
;
1848 unsigned int major
, minor
;
1852 t3_seeprom_read(adap
, TP_SRAM_OFFSET
, &vers
);
1854 major
= G_TP_VERSION_MAJOR(vers
);
1855 minor
= G_TP_VERSION_MINOR(vers
);
1856 if (major
== TP_VERSION_MAJOR
&& minor
== TP_VERSION_MINOR
)
1859 rev
= t3rev2char(adap
);
1861 snprintf(buf
, sizeof(buf
), TPEEPROM_NAME
, rev
,
1862 TP_VERSION_MAJOR
, TP_VERSION_MINOR
, TP_VERSION_MICRO
);
1865 tpeeprom
= firmware_get(buf
);
1870 if (tpeeprom
== NULL
) {
1872 device_printf(adap
->dev
, "could not load TP EEPROM: unable to load %s\n",
1878 len
= tpeeprom
->datasize
- 4;
1880 ret
= t3_check_tpsram(adap
, tpeeprom
->data
, tpeeprom
->datasize
);
1882 goto release_tpeeprom
;
1884 if (len
!= TP_SRAM_LEN
) {
1886 device_printf(adap
->dev
, "%s length is wrong len=%d expected=%d\n", buf
, len
, TP_SRAM_LEN
);
1891 ret
= set_eeprom(&adap
->port
[0], tpeeprom
->data
, tpeeprom
->datasize
,
1896 device_printf(adap
->dev
,
1897 "Protocol SRAM image updated in EEPROM to %d.%d.%d\n",
1898 TP_VERSION_MAJOR
, TP_VERSION_MINOR
, TP_VERSION_MICRO
);
1900 device_printf(adap
->dev
, "Protocol SRAM image update in EEPROM failed\n");
1904 printf("Protocol SRAM image updated in EEPROM to %d.%d.%d\n",
1905 TP_VERSION_MAJOR
, TP_VERSION_MINOR
, TP_VERSION_MICRO
);
1907 printf("Protocol SRAM image update in EEPROM failed\n");
1911 firmware_put(tpeeprom
, FIRMWARE_UNLOAD
);
1917 update_tpsram(struct adapter
*adap
)
1919 #ifdef FIRMWARE_LATEST
1920 const struct firmware
*tpsram
;
1922 struct firmware
*tpsram
;
1928 rev
= t3rev2char(adap
);
1932 update_tpeeprom(adap
);
1934 snprintf(buf
, sizeof(buf
), TPSRAM_NAME
, rev
,
1935 TP_VERSION_MAJOR
, TP_VERSION_MINOR
, TP_VERSION_MICRO
);
1937 tpsram
= firmware_get(buf
);
1938 if (tpsram
== NULL
){
1940 device_printf(adap
->dev
, "could not load TP SRAM: unable to load %s\n",
1944 printf("could not load TP SRAM: unable to load %s\n", buf
);
1949 device_printf(adap
->dev
, "updating TP SRAM with %s\n", buf
);
1952 printf("updating TP SRAM with %s\n", buf
);
1955 ret
= t3_check_tpsram(adap
, tpsram
->data
, tpsram
->datasize
);
1957 goto release_tpsram
;
1959 ret
= t3_set_proto_sram(adap
, tpsram
->data
);
1961 device_printf(adap
->dev
, "loading protocol SRAM failed\n");
1964 firmware_put(tpsram
, FIRMWARE_UNLOAD
);
1971 * cxgb_up - enable the adapter
1972 * @adap: adapter being enabled
1974 * Called when the first port is enabled, this function performs the
1975 * actions necessary to make an adapter operational, such as completing
1976 * the initialization of HW modules, and enabling interrupts.
1980 cxgb_up(struct adapter
*sc
)
1984 if ((sc
->flags
& FULL_INIT_DONE
) == 0) {
1987 if ((sc
->flags
& FW_UPTODATE
) == 0)
1988 if ((err
= upgrade_fw(sc
)))
1990 if ((sc
->flags
& TPS_UPTODATE
) == 0)
1991 if ((err
= update_tpsram(sc
)))
1995 if ((sc
->flags
& FW_UPTODATE
) == 0)
1996 printf("SHOULD UPGRADE FIRMWARE!\n");
1997 if ((sc
->flags
& TPS_UPTODATE
) == 0)
1998 printf("SHOULD UPDATE TPSRAM\n");
2000 err
= t3_init_hw(sc
, 0);
2004 t3_write_reg(sc
, A_ULPRX_TDDP_PSZ
, V_HPZ0(PAGE_SHIFT
- 12));
2006 err
= setup_sge_qsets(sc
);
2011 sc
->flags
|= FULL_INIT_DONE
;
2017 /* If it's MSI or INTx, allocate a single interrupt for everything */
2018 if ((sc
->flags
& USING_MSIX
) == 0) {
2019 if ((sc
->irq_res
= bus_alloc_resource_any(sc
->dev
, SYS_RES_IRQ
,
2020 &sc
->irq_rid
, RF_SHAREABLE
| RF_ACTIVE
)) == NULL
) {
2021 device_printf(sc
->dev
, "Cannot allocate interrupt rid=%d\n",
2026 device_printf(sc
->dev
, "allocated irq_res=%p\n", sc
->irq_res
);
2028 if (bus_setup_intr(sc
->dev
, sc
->irq_res
, INTR_MPSAFE
|INTR_TYPE_NET
,
2032 sc
->cxgb_intr
, sc
, &sc
->intr_tag
)) {
2033 device_printf(sc
->dev
, "Cannot set up interrupt\n");
2038 cxgb_setup_msix(sc
, sc
->msi_count
);
2042 /* If it's MSI or INTx, allocate a single interrupt for everything */
2043 if ((sc
->flags
& USING_MSIX
) == 0) {
2044 if (pci_intr_map(&sc
->pa
, &sc
->intr_handle
))
2046 device_printf(sc
->dev
, "Cannot allocate interrupt\n");
2050 device_printf(sc
->dev
, "allocated intr_handle=%p\n", sc
->intr_handle
);
2051 sc
->intr_cookie
= pci_intr_establish(sc
->pa
.pa_pc
,
2052 sc
->intr_handle
, IPL_NET
,
2054 if (sc
->intr_cookie
== NULL
)
2056 device_printf(sc
->dev
, "Cannot establish interrupt\n");
2061 printf("Using MSIX?!?!?!\n");
2063 cxgb_setup_msix(sc
, sc
->msi_count
);
2070 if (!(sc
->flags
& QUEUES_BOUND
)) {
2072 sc
->flags
|= QUEUES_BOUND
;
2077 CH_ERR(sc
, "request_irq failed, err %d\n", err
);
2083 * Release resources when all the ports and offloading have been stopped.
2086 cxgb_down_locked(struct adapter
*sc
)
2093 t3_intr_disable(sc
);
2096 if (sc
->intr_tag
!= NULL
) {
2097 bus_teardown_intr(sc
->dev
, sc
->irq_res
, sc
->intr_tag
);
2098 sc
->intr_tag
= NULL
;
2100 if (sc
->irq_res
!= NULL
) {
2101 device_printf(sc
->dev
, "de-allocating interrupt irq_rid=%d irq_res=%p\n",
2102 sc
->irq_rid
, sc
->irq_res
);
2103 bus_release_resource(sc
->dev
, SYS_RES_IRQ
, sc
->irq_rid
,
2109 INT3
; // XXXXXXXXXXXXXXXXXX
2112 if (sc
->flags
& USING_MSIX
)
2113 cxgb_teardown_msix(sc
);
2116 callout_drain(&sc
->cxgb_tick_ch
);
2117 callout_drain(&sc
->sge_timer_ch
);
2120 if (sc
->tq
!= NULL
) {
2121 taskqueue_drain(sc
->tq
, &sc
->slow_intr_task
);
2122 for (i
= 0; i
< sc
->params
.nports
; i
++)
2123 taskqueue_drain(sc
->tq
, &sc
->port
[i
].timer_reclaim_task
);
2128 if (sc
->port
[i
].tq
!= NULL
)
2135 offload_open(struct port_info
*pi
)
2137 struct adapter
*adapter
= pi
->adapter
;
2138 struct toedev
*tdev
= TOEDEV(pi
->ifp
);
2139 int adap_up
= adapter
->open_device_map
& PORT_MASK
;
2142 if (atomic_cmpset_int(&adapter
->open_device_map
,
2143 (adapter
->open_device_map
& ~OFFLOAD_DEVMAP_BIT
),
2144 (adapter
->open_device_map
| OFFLOAD_DEVMAP_BIT
)) == 0)
2147 ADAPTER_LOCK(pi
->adapter
);
2149 err
= cxgb_up(adapter
);
2150 ADAPTER_UNLOCK(pi
->adapter
);
2154 t3_tp_set_offload_mode(adapter
, 1);
2155 tdev
->lldev
= adapter
->port
[0].ifp
;
2156 err
= cxgb_offload_activate(adapter
);
2160 init_port_mtus(adapter
);
2161 t3_load_mtus(adapter
, adapter
->params
.mtus
, adapter
->params
.a_wnd
,
2162 adapter
->params
.b_wnd
,
2163 adapter
->params
.rev
== 0 ?
2164 adapter
->port
[0].ifp
->if_mtu
: 0xffff);
2167 /* Call back all registered clients */
2168 cxgb_add_clients(tdev
);
2171 /* restore them in case the offload module has changed them */
2173 t3_tp_set_offload_mode(adapter
, 0);
2174 clrbit(&adapter
->open_device_map
, OFFLOAD_DEVMAP_BIT
);
2175 cxgb_set_dummy_ops(tdev
);
2181 offload_close(struct toedev
*tdev
)
2183 struct adapter
*adapter
= tdev2adap(tdev
);
2185 if (!isset(&adapter
->open_device_map
, OFFLOAD_DEVMAP_BIT
))
2188 /* Call back all registered clients */
2189 cxgb_remove_clients(tdev
);
2191 cxgb_set_dummy_ops(tdev
);
2192 t3_tp_set_offload_mode(adapter
, 0);
2193 clrbit(&adapter
->open_device_map
, OFFLOAD_DEVMAP_BIT
);
2195 if (!adapter
->open_device_map
)
2198 cxgb_offload_deactivate(adapter
);
2206 cxgb_init(void *arg
)
2210 cxgb_init(struct ifnet
*ifp
)
2214 struct port_info
*p
= arg
;
2217 struct port_info
*p
= ifp
->if_softc
;
2221 cxgb_init_locked(p
);
2225 return (0); // ????????????
2230 cxgb_init_locked(struct port_info
*p
)
2233 adapter_t
*sc
= p
->adapter
;
2236 PORT_LOCK_ASSERT_OWNED(p
);
2239 ADAPTER_LOCK(p
->adapter
);
2240 if ((sc
->open_device_map
== 0) && (err
= cxgb_up(sc
))) {
2241 ADAPTER_UNLOCK(p
->adapter
);
2242 cxgb_stop_locked(p
);
2245 if (p
->adapter
->open_device_map
== 0) {
2247 t3_sge_init_adapter(sc
);
2249 setbit(&p
->adapter
->open_device_map
, p
->port_id
);
2250 ADAPTER_UNLOCK(p
->adapter
);
2253 if (is_offload(sc
) && !ofld_disable
) {
2254 err
= offload_open(p
);
2257 "Could not initialize offload capabilities\n");
2261 t3_link_changed(sc
, p
->port_id
);
2262 ifp
->if_baudrate
= p
->link_config
.speed
* 1000000;
2264 device_printf(sc
->dev
, "enabling interrupts on port=%d\n", p
->port_id
);
2265 t3_port_intr_enable(sc
, p
->port_id
);
2267 callout_reset(&sc
->cxgb_tick_ch
, sc
->params
.stats_update_period
* hz
,
2270 ifp
->if_drv_flags
|= IFF_DRV_RUNNING
;
2271 ifp
->if_drv_flags
&= ~IFF_DRV_OACTIVE
;
2275 cxgb_set_rxmode(struct port_info
*p
)
2277 struct t3_rx_mode rm
;
2278 struct cmac
*mac
= &p
->mac
;
2280 PORT_LOCK_ASSERT_OWNED(p
);
2282 t3_init_rx_mode(&rm
, p
);
2283 t3_mac_set_rx_mode(mac
, &rm
);
2287 cxgb_stop_locked(struct port_info
*p
)
2291 PORT_LOCK_ASSERT_OWNED(p
);
2292 ADAPTER_LOCK_ASSERT_NOTOWNED(p
->adapter
);
2296 t3_port_intr_disable(p
->adapter
, p
->port_id
);
2297 ifp
->if_drv_flags
&= ~(IFF_DRV_RUNNING
| IFF_DRV_OACTIVE
);
2298 p
->phy
.ops
->power_down(&p
->phy
, 1);
2299 t3_mac_disable(&p
->mac
, MAC_DIRECTION_TX
| MAC_DIRECTION_RX
);
2301 ADAPTER_LOCK(p
->adapter
);
2302 clrbit(&p
->adapter
->open_device_map
, p
->port_id
);
2305 if (p
->adapter
->open_device_map
== 0) {
2306 cxgb_down_locked(p
->adapter
);
2308 ADAPTER_UNLOCK(p
->adapter
);
2313 cxgb_set_mtu(struct port_info
*p
, int mtu
)
2315 struct ifnet
*ifp
= p
->ifp
;
2321 if ((mtu
< ETHERMIN
) || (mtu
> ETHER_MAX_LEN_JUMBO
))
2323 else if ((error
= ifioctl_common(ifp
, SIOCSIFMTU
, &ifr
)) == ENETRESET
) {
2326 if (ifp
->if_drv_flags
& IFF_DRV_RUNNING
) {
2327 callout_stop(&p
->adapter
->cxgb_tick_ch
);
2328 cxgb_stop_locked(p
);
2329 cxgb_init_locked(p
);
2338 cxgb_ioctl(struct ifnet
*ifp
, unsigned long command
, caddr_t data
)
2341 cxgb_ioctl(struct ifnet
*ifp
, unsigned long command
, void *data
)
2344 struct port_info
*p
= ifp
->if_softc
;
2345 struct ifaddr
*ifa
= (struct ifaddr
*)data
;
2346 struct ifreq
*ifr
= (struct ifreq
*)data
;
2347 int flags
, error
= 0;
2351 * XXX need to check that we aren't in the middle of an unload
2353 printf("cxgb_ioctl(%d): command=%08lx\n", __LINE__
, command
);
2356 error
= cxgb_set_mtu(p
, ifr
->ifr_mtu
);
2357 printf("SIOCSIFMTU: error=%d\n", error
);
2359 case SIOCINITIFADDR
:
2360 printf("SIOCINITIFADDR:\n");
2362 if (ifa
->ifa_addr
->sa_family
== AF_INET
) {
2363 ifp
->if_flags
|= IFF_UP
;
2364 if (!(ifp
->if_drv_flags
& IFF_DRV_RUNNING
))
2365 cxgb_init_locked(p
);
2366 arp_ifinit(ifp
, ifa
);
2368 error
= ether_ioctl(ifp
, command
, data
);
2372 printf("SIOCSIFFLAGS:\n");
2373 if ((error
= ifioctl_common(ifp
, cmd
, data
)) != 0)
2375 callout_drain(&p
->adapter
->cxgb_tick_ch
);
2377 if (ifp
->if_flags
& IFF_UP
) {
2378 if (ifp
->if_drv_flags
& IFF_DRV_RUNNING
) {
2379 flags
= p
->if_flags
;
2380 if (((ifp
->if_flags
^ flags
) & IFF_PROMISC
) ||
2381 ((ifp
->if_flags
^ flags
) & IFF_ALLMULTI
))
2384 cxgb_init_locked(p
);
2385 p
->if_flags
= ifp
->if_flags
;
2386 } else if (ifp
->if_drv_flags
& IFF_DRV_RUNNING
)
2387 cxgb_stop_locked(p
);
2389 if (ifp
->if_drv_flags
& IFF_DRV_RUNNING
) {
2390 adapter_t
*sc
= p
->adapter
;
2391 callout_reset(&sc
->cxgb_tick_ch
,
2392 sc
->params
.stats_update_period
* hz
,
2398 printf("SIOCSIFMEDIA:\n");
2400 error
= ifmedia_ioctl(ifp
, ifr
, &p
->media
, command
);
2401 printf("SIOCGIFMEDIA: error=%d\n", error
);
2405 printf("SIOCSIFCAP:\n");
2407 mask
= ifr
->ifr_reqcap
^ ifp
->if_capenable
;
2408 if (mask
& IFCAP_TXCSUM
) {
2409 if (IFCAP_TXCSUM
& ifp
->if_capenable
) {
2410 ifp
->if_capenable
&= ~(IFCAP_TXCSUM
|IFCAP_TSO4
);
2411 ifp
->if_hwassist
&= ~(CSUM_TCP
| CSUM_UDP
2414 ifp
->if_capenable
|= IFCAP_TXCSUM
;
2415 ifp
->if_hwassist
|= (CSUM_TCP
| CSUM_UDP
);
2417 } else if (mask
& IFCAP_RXCSUM
) {
2418 if (IFCAP_RXCSUM
& ifp
->if_capenable
) {
2419 ifp
->if_capenable
&= ~IFCAP_RXCSUM
;
2421 ifp
->if_capenable
|= IFCAP_RXCSUM
;
2424 if (mask
& IFCAP_TSO4
) {
2425 if (IFCAP_TSO4
& ifp
->if_capenable
) {
2426 ifp
->if_capenable
&= ~IFCAP_TSO4
;
2427 ifp
->if_hwassist
&= ~CSUM_TSO
;
2428 } else if (IFCAP_TXCSUM
& ifp
->if_capenable
) {
2429 ifp
->if_capenable
|= IFCAP_TSO4
;
2430 ifp
->if_hwassist
|= CSUM_TSO
;
2433 printf("cxgb requires tx checksum offload"
2434 " be enabled to use TSO\n");
2440 #endif /* __FreeBSD__ */
2442 printf("Dir = %x Len = %x Group = '%c' Num = %x\n",
2443 (unsigned int)(command
&0xe0000000)>>28, (unsigned int)(command
&0x1fff0000)>>16,
2444 (unsigned int)(command
&0xff00)>>8, (unsigned int)command
&0xff);
2445 if ((error
= ether_ioctl(ifp
, command
, data
)) != ENETRESET
)
2454 cxgb_start_tx(struct ifnet
*ifp
, uint32_t txmax
)
2456 struct sge_qset
*qs
;
2457 struct sge_txq
*txq
;
2458 struct port_info
*p
= ifp
->if_softc
;
2459 struct mbuf
*m
= NULL
;
2460 int err
, in_use_init
, free_it
;
2462 if (!p
->link_config
.link_ok
)
2468 if (IFQ_DRV_IS_EMPTY(&ifp
->if_snd
))
2471 if (IFQ_IS_EMPTY(&ifp
->if_snd
))
2477 qs
= &p
->adapter
->sge
.qs
[p
->first_qset
];
2478 txq
= &qs
->txq
[TXQ_ETH
];
2481 if (txq
->flags
& TXQ_TRANSMITTING
)
2483 return (EINPROGRESS
);
2486 mtx_lock(&txq
->lock
);
2487 txq
->flags
|= TXQ_TRANSMITTING
;
2488 in_use_init
= txq
->in_use
;
2489 while ((txq
->in_use
- in_use_init
< txmax
) &&
2490 (txq
->size
> txq
->in_use
+ TX_MAX_DESC
)) {
2493 IFQ_DRV_DEQUEUE(&ifp
->if_snd
, m
);
2496 IFQ_DEQUEUE(&ifp
->if_snd
, m
);
2501 * Convert chain to M_IOVEC
2504 KASSERT((m
->m_flags
& M_IOVEC
) == 0, ("IOVEC set too early"));
2507 KASSERT((m
->m_flags
& M_IOVEC
) == 0);
2511 if (collapse_mbufs
&& m
->m_pkthdr
.len
> MCLBYTES
&&
2512 m_collapse(m
, TX_MAX_SEGS
, &m0
) == EFBIG
) {
2513 if ((m0
= m_defrag(m
, M_NOWAIT
)) != NULL
) {
2515 m_collapse(m
, TX_MAX_SEGS
, &m0
);
2521 if ((err
= t3_encap(p
, &m
, &free_it
)) != 0)
2523 printf("t3_encap() returned %d\n", err
);
2530 // bpf_mtap(ifp, m);
2537 txq
->flags
&= ~TXQ_TRANSMITTING
;
2538 mtx_unlock(&txq
->lock
);
2540 if (__predict_false(err
)) {
2541 if (err
== ENOMEM
) {
2542 ifp
->if_drv_flags
|= IFF_DRV_OACTIVE
;
2544 IFQ_LOCK(&ifp
->if_snd
);
2545 IFQ_DRV_PREPEND(&ifp
->if_snd
, m
);
2546 IFQ_UNLOCK(&ifp
->if_snd
);
2549 // XXXXXXXXXX lock/unlock??
2550 IF_PREPEND(&ifp
->if_snd
, m
);
2554 if (err
== 0 && m
== NULL
)
2556 else if ((err
== 0) && (txq
->size
<= txq
->in_use
+ TX_MAX_DESC
) &&
2557 (ifp
->if_drv_flags
& IFF_DRV_OACTIVE
) == 0) {
2558 ifp
->if_drv_flags
|= IFF_DRV_OACTIVE
;
2566 cxgb_start_proc(void *arg
, int ncount
)
2569 cxgb_start_proc(struct work
*wk
, void *arg
)
2572 struct ifnet
*ifp
= arg
;
2573 struct port_info
*pi
= ifp
->if_softc
;
2574 struct sge_qset
*qs
;
2575 struct sge_txq
*txq
;
2578 qs
= &pi
->adapter
->sge
.qs
[pi
->first_qset
];
2579 txq
= &qs
->txq
[TXQ_ETH
];
2582 if (desc_reclaimable(txq
) > TX_CLEAN_MAX_DESC
>> 2)
2584 taskqueue_enqueue(pi
->tq
, &txq
->qreclaim_task
);
2587 workqueue_enqueue(pi
->timer_reclaim_task
.wq
, &pi
->timer_reclaim_task
.w
, NULL
);
2590 error
= cxgb_start_tx(ifp
, TX_START_MAX_DESC
);
2591 } while (error
== 0);
2595 cxgb_start(struct ifnet
*ifp
)
2597 struct port_info
*pi
= ifp
->if_softc
;
2598 struct sge_qset
*qs
;
2599 struct sge_txq
*txq
;
2602 qs
= &pi
->adapter
->sge
.qs
[pi
->first_qset
];
2603 txq
= &qs
->txq
[TXQ_ETH
];
2605 if (desc_reclaimable(txq
) > TX_CLEAN_MAX_DESC
>> 2)
2607 taskqueue_enqueue(pi
->tq
,
2608 &txq
->qreclaim_task
);
2611 workqueue_enqueue(pi
->timer_reclaim_task
.wq
, &pi
->timer_reclaim_task
.w
, NULL
);
2614 err
= cxgb_start_tx(ifp
, TX_START_MAX_DESC
);
2618 taskqueue_enqueue(pi
->tq
, &pi
->start_task
);
2621 workqueue_enqueue(pi
->start_task
.wq
, &pi
->start_task
.w
, NULL
);
2627 cxgb_stop(struct ifnet
*ifp
, int reason
)
2629 struct port_info
*pi
= ifp
->if_softc
;
2631 printf("cxgb_stop(): pi=%p, reason=%d\n", pi
, reason
);
2637 cxgb_media_change(struct ifnet
*ifp
)
2640 if_printf(ifp
, "media change not supported\n");
2643 printf("media change not supported: ifp=%p\n", ifp
);
2649 cxgb_media_status(struct ifnet
*ifp
, struct ifmediareq
*ifmr
)
2651 struct port_info
*p
;
2655 ifmr
->ifm_status
= IFM_AVALID
;
2656 ifmr
->ifm_active
= IFM_ETHER
;
2658 if (!p
->link_config
.link_ok
)
2661 ifmr
->ifm_status
|= IFM_ACTIVE
;
2663 switch (p
->link_config
.speed
) {
2665 ifmr
->ifm_active
|= IFM_10_T
;
2668 ifmr
->ifm_active
|= IFM_100_TX
;
2671 ifmr
->ifm_active
|= IFM_1000_T
;
2675 if (p
->link_config
.duplex
)
2676 ifmr
->ifm_active
|= IFM_FDX
;
2678 ifmr
->ifm_active
|= IFM_HDX
;
2687 cxgb_async_intr(void *data
)
2689 adapter_t
*sc
= data
;
2692 device_printf(sc
->dev
, "cxgb_async_intr\n");
2695 * May need to sleep - defer to taskqueue
2697 taskqueue_enqueue(sc
->tq
, &sc
->slow_intr_task
);
2701 * May need to sleep - defer to taskqueue
2703 workqueue_enqueue(sc
->slow_intr_task
.wq
, &sc
->slow_intr_task
.w
, NULL
);
2711 cxgb_ext_intr_handler(void *arg
, int count
)
2714 cxgb_ext_intr_handler(struct work
*wk
, void *arg
)
2717 adapter_t
*sc
= (adapter_t
*)arg
;
2720 printf("cxgb_ext_intr_handler\n");
2722 t3_phy_intr_handler(sc
);
2724 /* Now reenable external interrupts */
2726 if (sc
->slow_intr_mask
) {
2727 sc
->slow_intr_mask
|= F_T3DBG
;
2728 t3_write_reg(sc
, A_PL_INT_CAUSE0
, F_T3DBG
);
2729 t3_write_reg(sc
, A_PL_INT_ENABLE0
, sc
->slow_intr_mask
);
2735 check_link_status(adapter_t
*sc
)
2739 for (i
= 0; i
< (sc
)->params
.nports
; ++i
) {
2740 struct port_info
*p
= &sc
->port
[i
];
2742 if (!(p
->port_type
->caps
& SUPPORTED_IRQ
))
2743 t3_link_changed(sc
, i
);
2744 p
->ifp
->if_baudrate
= p
->link_config
.speed
* 1000000;
2749 check_t3b2_mac(struct adapter
*adapter
)
2753 for_each_port(adapter
, i
) {
2754 struct port_info
*p
= &adapter
->port
[i
];
2755 struct ifnet
*ifp
= p
->ifp
;
2758 if ((ifp
->if_drv_flags
& IFF_DRV_RUNNING
) == 0)
2763 if ((ifp
->if_drv_flags
& IFF_DRV_RUNNING
))
2764 status
= t3b2_mac_watchdog_task(&p
->mac
);
2766 p
->mac
.stats
.num_toggled
++;
2767 else if (status
== 2) {
2768 struct cmac
*mac
= &p
->mac
;
2770 t3_mac_set_mtu(mac
, ifp
->if_mtu
+ ETHER_HDR_LEN
2771 + ETHER_VLAN_ENCAP_LEN
);
2772 t3_mac_set_address(mac
, 0, p
->hw_addr
);
2774 t3_link_start(&p
->phy
, mac
, &p
->link_config
);
2775 t3_mac_enable(mac
, MAC_DIRECTION_RX
| MAC_DIRECTION_TX
);
2776 t3_port_intr_enable(adapter
, p
->port_id
);
2777 p
->mac
.stats
.num_resets
++;
2784 cxgb_tick(void *arg
)
2786 adapter_t
*sc
= (adapter_t
*)arg
;
2789 taskqueue_enqueue(sc
->tq
, &sc
->tick_task
);
2792 workqueue_enqueue(sc
->tick_task
.wq
, &sc
->tick_task
.w
, NULL
);
2795 if (sc
->open_device_map
!= 0)
2796 callout_reset(&sc
->cxgb_tick_ch
, sc
->params
.stats_update_period
* hz
,
2802 cxgb_tick_handler(void *arg
, int count
)
2805 cxgb_tick_handler(struct work
*wk
, void *arg
)
2808 adapter_t
*sc
= (adapter_t
*)arg
;
2809 const struct adapter_params
*p
= &sc
->params
;
2812 if (p
->linkpoll_period
)
2813 check_link_status(sc
);
2816 * adapter lock can currently only be acquire after the
2821 if (p
->rev
== T3_REV_B2
&& p
->nports
< 4)
2826 touch_bars(device_t dev
)
2831 #if !defined(__LP64__) && 0
2834 pci_read_config_dword(pdev
, PCI_BASE_ADDRESS_1
, &v
);
2835 pci_write_config_dword(pdev
, PCI_BASE_ADDRESS_1
, v
);
2836 pci_read_config_dword(pdev
, PCI_BASE_ADDRESS_3
, &v
);
2837 pci_write_config_dword(pdev
, PCI_BASE_ADDRESS_3
, v
);
2838 pci_read_config_dword(pdev
, PCI_BASE_ADDRESS_5
, &v
);
2839 pci_write_config_dword(pdev
, PCI_BASE_ADDRESS_5
, v
);
2845 set_eeprom(struct port_info
*pi
, const uint8_t *data
, int len
, int offset
)
2849 u32 aligned_offset
, aligned_len
, *p
;
2850 struct adapter
*adapter
= pi
->adapter
;
2853 aligned_offset
= offset
& ~3;
2854 aligned_len
= (len
+ (offset
& 3) + 3) & ~3;
2856 if (aligned_offset
!= offset
|| aligned_len
!= len
) {
2857 buf
= malloc(aligned_len
, M_DEVBUF
, M_WAITOK
|M_ZERO
);
2860 err
= t3_seeprom_read(adapter
, aligned_offset
, (u32
*)buf
);
2861 if (!err
&& aligned_len
> 4)
2862 err
= t3_seeprom_read(adapter
,
2863 aligned_offset
+ aligned_len
- 4,
2864 (u32
*)&buf
[aligned_len
- 4]);
2867 memcpy(buf
+ (offset
& 3), data
, len
);
2869 buf
= (uint8_t *)(uintptr_t)data
;
2871 err
= t3_seeprom_wp(adapter
, 0);
2875 for (p
= (u32
*)buf
; !err
&& aligned_len
; aligned_len
-= 4, p
++) {
2876 err
= t3_seeprom_write(adapter
, aligned_offset
, *p
);
2877 aligned_offset
+= 4;
2881 err
= t3_seeprom_wp(adapter
, 1);
2884 free(buf
, M_DEVBUF
);
2891 in_range(int val
, int lo
, int hi
)
2893 return val
< 0 || (val
<= hi
&& val
>= lo
);
2897 cxgb_extension_open(struct cdev
*dev
, int flags
, int fmp
, d_thread_t
*td
)
2903 cxgb_extension_close(struct cdev
*dev
, int flags
, int fmt
, d_thread_t
*td
)
2909 cxgb_extension_ioctl(struct cdev
*dev
, unsigned long cmd
, caddr_t data
,
2910 int fflag
, struct thread
*td
)
2913 struct port_info
*pi
= dev
->si_drv1
;
2914 adapter_t
*sc
= pi
->adapter
;
2916 #ifdef PRIV_SUPPORTED
2917 if (priv_check(td
, PRIV_DRIVER
)) {
2919 printf("user does not have access to privileged ioctls\n");
2925 printf("user does not have access to privileged ioctls\n");
2933 struct cphy
*phy
= &pi
->phy
;
2934 struct mii_data
*mid
= (struct mii_data
*)data
;
2936 if (!phy
->mdio_read
)
2937 return (EOPNOTSUPP
);
2939 mmd
= mid
->phy_id
>> 8;
2942 else if (mmd
> MDIO_DEV_XGXS
)
2945 error
= phy
->mdio_read(sc
, mid
->phy_id
& 0x1f, mmd
,
2946 mid
->reg_num
, &val
);
2948 error
= phy
->mdio_read(sc
, mid
->phy_id
& 0x1f, 0,
2949 mid
->reg_num
& 0x1f, &val
);
2955 struct cphy
*phy
= &pi
->phy
;
2956 struct mii_data
*mid
= (struct mii_data
*)data
;
2958 if (!phy
->mdio_write
)
2959 return (EOPNOTSUPP
);
2961 mmd
= mid
->phy_id
>> 8;
2964 else if (mmd
> MDIO_DEV_XGXS
)
2967 error
= phy
->mdio_write(sc
, mid
->phy_id
& 0x1f,
2968 mmd
, mid
->reg_num
, mid
->val_in
);
2970 error
= phy
->mdio_write(sc
, mid
->phy_id
& 0x1f, 0,
2971 mid
->reg_num
& 0x1f,
2975 case CHELSIO_SETREG
: {
2976 struct ch_reg
*edata
= (struct ch_reg
*)data
;
2977 if ((edata
->addr
& 0x3) != 0 || edata
->addr
>= sc
->mmio_len
)
2979 t3_write_reg(sc
, edata
->addr
, edata
->val
);
2982 case CHELSIO_GETREG
: {
2983 struct ch_reg
*edata
= (struct ch_reg
*)data
;
2984 if ((edata
->addr
& 0x3) != 0 || edata
->addr
>= sc
->mmio_len
)
2986 edata
->val
= t3_read_reg(sc
, edata
->addr
);
2989 case CHELSIO_GET_SGE_CONTEXT
: {
2990 struct ch_cntxt
*ecntxt
= (struct ch_cntxt
*)data
;
2991 mtx_lock(&sc
->sge
.reg_lock
);
2992 switch (ecntxt
->cntxt_type
) {
2993 case CNTXT_TYPE_EGRESS
:
2994 error
= t3_sge_read_ecntxt(sc
, ecntxt
->cntxt_id
,
2998 error
= t3_sge_read_fl(sc
, ecntxt
->cntxt_id
,
3001 case CNTXT_TYPE_RSP
:
3002 error
= t3_sge_read_rspq(sc
, ecntxt
->cntxt_id
,
3006 error
= t3_sge_read_cq(sc
, ecntxt
->cntxt_id
,
3013 mtx_unlock(&sc
->sge
.reg_lock
);
3016 case CHELSIO_GET_SGE_DESC
: {
3017 struct ch_desc
*edesc
= (struct ch_desc
*)data
;
3019 if (edesc
->queue_num
>= SGE_QSETS
* 6)
3021 ret
= t3_get_desc(&sc
->sge
.qs
[edesc
->queue_num
/ 6],
3022 edesc
->queue_num
% 6, edesc
->idx
, edesc
->data
);
3028 case CHELSIO_SET_QSET_PARAMS
: {
3029 struct qset_params
*q
;
3030 struct ch_qset_params
*t
= (struct ch_qset_params
*)data
;
3032 if (t
->qset_idx
>= SGE_QSETS
)
3034 if (!in_range(t
->intr_lat
, 0, M_NEWTIMER
) ||
3035 !in_range(t
->cong_thres
, 0, 255) ||
3036 !in_range(t
->txq_size
[0], MIN_TXQ_ENTRIES
,
3038 !in_range(t
->txq_size
[1], MIN_TXQ_ENTRIES
,
3040 !in_range(t
->txq_size
[2], MIN_CTRL_TXQ_ENTRIES
,
3041 MAX_CTRL_TXQ_ENTRIES
) ||
3042 !in_range(t
->fl_size
[0], MIN_FL_ENTRIES
, MAX_RX_BUFFERS
) ||
3043 !in_range(t
->fl_size
[1], MIN_FL_ENTRIES
,
3044 MAX_RX_JUMBO_BUFFERS
) ||
3045 !in_range(t
->rspq_size
, MIN_RSPQ_ENTRIES
, MAX_RSPQ_ENTRIES
))
3047 if ((sc
->flags
& FULL_INIT_DONE
) &&
3048 (t
->rspq_size
>= 0 || t
->fl_size
[0] >= 0 ||
3049 t
->fl_size
[1] >= 0 || t
->txq_size
[0] >= 0 ||
3050 t
->txq_size
[1] >= 0 || t
->txq_size
[2] >= 0 ||
3051 t
->polling
>= 0 || t
->cong_thres
>= 0))
3054 q
= &sc
->params
.sge
.qset
[t
->qset_idx
];
3056 if (t
->rspq_size
>= 0)
3057 q
->rspq_size
= t
->rspq_size
;
3058 if (t
->fl_size
[0] >= 0)
3059 q
->fl_size
= t
->fl_size
[0];
3060 if (t
->fl_size
[1] >= 0)
3061 q
->jumbo_size
= t
->fl_size
[1];
3062 if (t
->txq_size
[0] >= 0)
3063 q
->txq_size
[0] = t
->txq_size
[0];
3064 if (t
->txq_size
[1] >= 0)
3065 q
->txq_size
[1] = t
->txq_size
[1];
3066 if (t
->txq_size
[2] >= 0)
3067 q
->txq_size
[2] = t
->txq_size
[2];
3068 if (t
->cong_thres
>= 0)
3069 q
->cong_thres
= t
->cong_thres
;
3070 if (t
->intr_lat
>= 0) {
3071 struct sge_qset
*qs
= &sc
->sge
.qs
[t
->qset_idx
];
3073 q
->coalesce_nsecs
= t
->intr_lat
*1000;
3074 t3_update_qset_coalesce(qs
, q
);
3078 case CHELSIO_GET_QSET_PARAMS
: {
3079 struct qset_params
*q
;
3080 struct ch_qset_params
*t
= (struct ch_qset_params
*)data
;
3082 if (t
->qset_idx
>= SGE_QSETS
)
3085 q
= &(sc
)->params
.sge
.qset
[t
->qset_idx
];
3086 t
->rspq_size
= q
->rspq_size
;
3087 t
->txq_size
[0] = q
->txq_size
[0];
3088 t
->txq_size
[1] = q
->txq_size
[1];
3089 t
->txq_size
[2] = q
->txq_size
[2];
3090 t
->fl_size
[0] = q
->fl_size
;
3091 t
->fl_size
[1] = q
->jumbo_size
;
3092 t
->polling
= q
->polling
;
3093 t
->intr_lat
= q
->coalesce_nsecs
/ 1000;
3094 t
->cong_thres
= q
->cong_thres
;
3097 case CHELSIO_SET_QSET_NUM
: {
3098 struct ch_reg
*edata
= (struct ch_reg
*)data
;
3099 unsigned int port_idx
= pi
->port_id
;
3101 if (sc
->flags
& FULL_INIT_DONE
)
3103 if (edata
->val
< 1 ||
3104 (edata
->val
> 1 && !(sc
->flags
& USING_MSIX
)))
3106 if (edata
->val
+ sc
->port
[!port_idx
].nqsets
> SGE_QSETS
)
3108 sc
->port
[port_idx
].nqsets
= edata
->val
;
3109 sc
->port
[0].first_qset
= 0;
3111 * XXX hardcode ourselves to 2 ports just like LEEENUX
3113 sc
->port
[1].first_qset
= sc
->port
[0].nqsets
;
3116 case CHELSIO_GET_QSET_NUM
: {
3117 struct ch_reg
*edata
= (struct ch_reg
*)data
;
3118 edata
->val
= pi
->nqsets
;
3122 case CHELSIO_LOAD_FW
:
3123 case CHELSIO_GET_PM
:
3124 case CHELSIO_SET_PM
:
3125 return (EOPNOTSUPP
);
3128 case CHELSIO_SETMTUTAB
: {
3129 struct ch_mtus
*m
= (struct ch_mtus
*)data
;
3132 if (!is_offload(sc
))
3133 return (EOPNOTSUPP
);
3134 if (offload_running(sc
))
3136 if (m
->nmtus
!= NMTUS
)
3138 if (m
->mtus
[0] < 81) /* accommodate SACK */
3142 * MTUs must be in ascending order
3144 for (i
= 1; i
< NMTUS
; ++i
)
3145 if (m
->mtus
[i
] < m
->mtus
[i
- 1])
3148 memcpy(sc
->params
.mtus
, m
->mtus
,
3149 sizeof(sc
->params
.mtus
));
3152 case CHELSIO_GETMTUTAB
: {
3153 struct ch_mtus
*m
= (struct ch_mtus
*)data
;
3155 if (!is_offload(sc
))
3156 return (EOPNOTSUPP
);
3158 memcpy(m
->mtus
, sc
->params
.mtus
, sizeof(m
->mtus
));
3163 if (!is_offload(sc
))
3164 return (EOPNOTSUPP
);
3165 return offload_open(pi
);
3167 case CHELSIO_GET_MEM
: {
3168 struct ch_mem_range
*t
= (struct ch_mem_range
*)data
;
3173 if (!is_offload(sc
))
3174 return (EOPNOTSUPP
);
3175 if (!(sc
->flags
& FULL_INIT_DONE
))
3176 return (EIO
); /* need the memory controllers */
3177 if ((t
->addr
& 0x7) || (t
->len
& 0x7))
3179 if (t
->mem_id
== MEM_CM
)
3181 else if (t
->mem_id
== MEM_PMRX
)
3183 else if (t
->mem_id
== MEM_PMTX
)
3190 * bits 0..9: chip version
3191 * bits 10..15: chip revision
3193 t
->version
= 3 | (sc
->params
.rev
<< 10);
3196 * Read 256 bytes at a time as len can be large and we don't
3197 * want to use huge intermediate buffers.
3199 useraddr
= (uint8_t *)(t
+ 1); /* advance to start of buffer */
3201 unsigned int chunk
= min(t
->len
, sizeof(buf
));
3203 error
= t3_mc7_bd_read(mem
, t
->addr
/ 8, chunk
/ 8, buf
);
3206 if (copyout(buf
, useraddr
, chunk
))
3214 case CHELSIO_READ_TCAM_WORD
: {
3215 struct ch_tcam_word
*t
= (struct ch_tcam_word
*)data
;
3217 if (!is_offload(sc
))
3218 return (EOPNOTSUPP
);
3219 if (!(sc
->flags
& FULL_INIT_DONE
))
3220 return (EIO
); /* need MC5 */
3221 return -t3_read_mc5_range(&sc
->mc5
, t
->addr
, 1, t
->buf
);
3224 case CHELSIO_SET_TRACE_FILTER
: {
3225 struct ch_trace
*t
= (struct ch_trace
*)data
;
3226 const struct trace_params
*tp
;
3228 tp
= (const struct trace_params
*)&t
->sip
;
3230 t3_config_trace_filter(sc
, tp
, 0, t
->invert_match
,
3233 t3_config_trace_filter(sc
, tp
, 1, t
->invert_match
,
3237 case CHELSIO_SET_PKTSCHED
: {
3238 struct ch_pktsched_params
*p
= (struct ch_pktsched_params
*)data
;
3239 if (sc
->open_device_map
== 0)
3241 send_pktsched_cmd(sc
, p
->sched
, p
->idx
, p
->min
, p
->max
,
3245 case CHELSIO_IFCONF_GETREGS
: {
3246 struct ifconf_regs
*regs
= (struct ifconf_regs
*)data
;
3247 int reglen
= cxgb_get_regs_len();
3248 uint8_t *buf
= malloc(REGDUMP_SIZE
, M_DEVBUF
, M_NOWAIT
);
3251 } if (regs
->len
> reglen
)
3253 else if (regs
->len
< reglen
) {
3257 cxgb_get_regs(sc
, regs
, buf
);
3258 error
= copyout(buf
, regs
->data
, reglen
);
3261 free(buf
, M_DEVBUF
);
3265 case CHELSIO_SET_HW_SCHED
: {
3266 struct ch_hw_sched
*t
= (struct ch_hw_sched
*)data
;
3267 unsigned int ticks_per_usec
= core_ticks_per_usec(sc
);
3269 if ((sc
->flags
& FULL_INIT_DONE
) == 0)
3270 return (EAGAIN
); /* need TP to be initialized */
3271 if (t
->sched
>= NTX_SCHED
|| !in_range(t
->mode
, 0, 1) ||
3272 !in_range(t
->channel
, 0, 1) ||
3273 !in_range(t
->kbps
, 0, 10000000) ||
3274 !in_range(t
->class_ipg
, 0, 10000 * 65535 / ticks_per_usec
) ||
3275 !in_range(t
->flow_ipg
, 0,
3276 dack_ticks_to_usec(sc
, 0x7ff)))
3280 error
= t3_config_sched(sc
, t
->kbps
, t
->sched
);
3284 if (t
->class_ipg
>= 0)
3285 t3_set_sched_ipg(sc
, t
->sched
, t
->class_ipg
);
3286 if (t
->flow_ipg
>= 0) {
3287 t
->flow_ipg
*= 1000; /* us -> ns */
3288 t3_set_pace_tbl(sc
, &t
->flow_ipg
, t
->sched
, 1);
3291 int bit
= 1 << (S_TX_MOD_TIMER_MODE
+ t
->sched
);
3293 t3_set_reg_field(sc
, A_TP_TX_MOD_QUEUE_REQ_MAP
,
3294 bit
, t
->mode
? bit
: 0);
3296 if (t
->channel
>= 0)
3297 t3_set_reg_field(sc
, A_TP_TX_MOD_QUEUE_REQ_MAP
,
3298 1 << t
->sched
, t
->channel
<< t
->sched
);
3302 return (EOPNOTSUPP
);
3310 static __inline
void
3311 reg_block_dump(struct adapter
*ap
, uint8_t *buf
, unsigned int start
,
3314 uint32_t *p
= (uint32_t *)buf
+ start
;
3316 for ( ; start
<= end
; start
+= sizeof(uint32_t))
3317 *p
++ = t3_read_reg(ap
, start
);
3321 #define T3_REGMAP_SIZE (3 * 1024)
3323 cxgb_get_regs_len(void)
3325 return T3_REGMAP_SIZE
;
3327 #undef T3_REGMAP_SIZE
3330 cxgb_get_regs(adapter_t
*sc
, struct ifconf_regs
*regs
, uint8_t *buf
)
3335 * bits 0..9: chip version
3336 * bits 10..15: chip revision
3337 * bit 31: set for PCIe cards
3339 regs
->version
= 3 | (sc
->params
.rev
<< 10) | (is_pcie(sc
) << 31);
3342 * We skip the MAC statistics registers because they are clear-on-read.
3343 * Also reading multi-register stats would need to synchronize with the
3344 * periodic mac stats accumulation. Hard to justify the complexity.
3346 memset(buf
, 0, REGDUMP_SIZE
);
3347 reg_block_dump(sc
, buf
, 0, A_SG_RSPQ_CREDIT_RETURN
);
3348 reg_block_dump(sc
, buf
, A_SG_HI_DRB_HI_THRSH
, A_ULPRX_PBL_ULIMIT
);
3349 reg_block_dump(sc
, buf
, A_ULPTX_CONFIG
, A_MPS_INT_CAUSE
);
3350 reg_block_dump(sc
, buf
, A_CPL_SWITCH_CNTRL
, A_CPL_MAP_TBL_DATA
);
3351 reg_block_dump(sc
, buf
, A_SMB_GLOBAL_TIME_CFG
, A_XGM_SERDES_STAT3
);
3352 reg_block_dump(sc
, buf
, A_XGM_SERDES_STATUS0
,
3353 XGM_REG(A_XGM_SERDES_STAT3
, 1));
3354 reg_block_dump(sc
, buf
, XGM_REG(A_XGM_SERDES_STATUS0
, 1),
3355 XGM_REG(A_XGM_RX_SPI4_SOP_EOP_CNT
, 1));