2 * This file and its contents are supplied under the terms of the
3 * Common Development and Distribution License ("CDDL"), version 1.0.
4 * You may only use this file in accordance with the terms of version
7 * A full copy of the text of the CDDL should have accompanied this
8 * source. A copy of the CDDL is also available via the Internet at
9 * http://www.illumos.org/license/CDDL.
13 * This file is part of the Chelsio T4 support code.
15 * Copyright (C) 2010-2013 Chelsio Communications. All rights reserved.
17 * This program is distributed in the hope that it will be useful, but WITHOUT
18 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
19 * FITNESS FOR A PARTICULAR PURPOSE. See the LICENSE file included in this
20 * release for licensing terms and conditions.
24 #include <sys/sunddi.h>
25 #include <sys/sunndi.h>
26 #include <sys/modctl.h>
28 #include <sys/devops.h>
30 #include <sys/atomic.h>
31 #include <sys/types.h>
33 #include <sys/errno.h>
37 #include <sys/mkdev.h>
38 #include <sys/queue.h>
39 #include <sys/containerof.h>
42 #include "common/common.h"
43 #include "common/t4_msg.h"
44 #include "common/t4_regs.h"
45 #include "firmware/t4_fw.h"
46 #include "firmware/t4_cfg.h"
47 #include "firmware/t5_fw.h"
48 #include "firmware/t5_cfg.h"
49 #include "firmware/t6_fw.h"
50 #include "firmware/t6_cfg.h"
53 static int t4_cb_open(dev_t
*devp
, int flag
, int otyp
, cred_t
*credp
);
54 static int t4_cb_close(dev_t dev
, int flag
, int otyp
, cred_t
*credp
);
55 static int t4_cb_ioctl(dev_t dev
, int cmd
, intptr_t d
, int mode
, cred_t
*credp
,
57 struct cb_ops t4_cb_ops
= {
58 .cb_open
= t4_cb_open
,
59 .cb_close
= t4_cb_close
,
65 .cb_ioctl
= t4_cb_ioctl
,
69 .cb_chpoll
= nochpoll
,
70 .cb_prop_op
= ddi_prop_op
,
77 static int t4_bus_ctl(dev_info_t
*dip
, dev_info_t
*rdip
, ddi_ctl_enum_t op
,
78 void *arg
, void *result
);
79 static int t4_bus_config(dev_info_t
*dip
, uint_t flags
, ddi_bus_config_op_t op
,
80 void *arg
, dev_info_t
**cdipp
);
81 static int t4_bus_unconfig(dev_info_t
*dip
, uint_t flags
,
82 ddi_bus_config_op_t op
, void *arg
);
83 struct bus_ops t4_bus_ops
= {
84 .busops_rev
= BUSO_REV
,
85 .bus_ctl
= t4_bus_ctl
,
86 .bus_prop_op
= ddi_bus_prop_op
,
87 .bus_config
= t4_bus_config
,
88 .bus_unconfig
= t4_bus_unconfig
,
91 static int t4_devo_getinfo(dev_info_t
*dip
, ddi_info_cmd_t cmd
, void *arg
,
93 static int t4_devo_probe(dev_info_t
*dip
);
94 static int t4_devo_attach(dev_info_t
*dip
, ddi_attach_cmd_t cmd
);
95 static int t4_devo_detach(dev_info_t
*dip
, ddi_detach_cmd_t cmd
);
96 static int t4_devo_quiesce(dev_info_t
*dip
);
97 struct dev_ops t4_dev_ops
= {
99 .devo_getinfo
= t4_devo_getinfo
,
100 .devo_identify
= nulldev
,
101 .devo_probe
= t4_devo_probe
,
102 .devo_attach
= t4_devo_attach
,
103 .devo_detach
= t4_devo_detach
,
105 .devo_cb_ops
= &t4_cb_ops
,
106 .devo_bus_ops
= &t4_bus_ops
,
107 .devo_quiesce
= &t4_devo_quiesce
,
110 static struct modldrv modldrv
= {
111 .drv_modops
= &mod_driverops
,
112 .drv_linkinfo
= "Chelsio T4 nexus " DRV_VERSION
,
113 .drv_dev_ops
= &t4_dev_ops
116 static struct modlinkage modlinkage
= {
118 .ml_linkage
= {&modldrv
, NULL
},
123 struct intrs_and_queues
{
124 int intr_type
; /* DDI_INTR_TYPE_* */
125 int nirq
; /* Number of vectors */
126 int intr_fwd
; /* Interrupts forwarded */
127 int ntxq10g
; /* # of NIC txq's for each 10G port */
128 int nrxq10g
; /* # of NIC rxq's for each 10G port */
129 int ntxq1g
; /* # of NIC txq's for each 1G port */
130 int nrxq1g
; /* # of NIC rxq's for each 1G port */
131 #ifdef TCP_OFFLOAD_ENABLE
132 int nofldtxq10g
; /* # of TOE txq's for each 10G port */
133 int nofldrxq10g
; /* # of TOE rxq's for each 10G port */
134 int nofldtxq1g
; /* # of TOE txq's for each 1G port */
135 int nofldrxq1g
; /* # of TOE rxq's for each 1G port */
139 struct fw_info fi
[3];
141 static int cpl_not_handled(struct sge_iq
*iq
, const struct rss_header
*rss
,
143 static int fw_msg_not_handled(struct adapter
*, const __be64
*);
144 int t4_register_cpl_handler(struct adapter
*sc
, int opcode
, cpl_handler_t h
);
145 static unsigned int getpf(struct adapter
*sc
);
146 static int prep_firmware(struct adapter
*sc
);
147 static int upload_config_file(struct adapter
*sc
, uint32_t *mt
, uint32_t *ma
);
148 static int partition_resources(struct adapter
*sc
);
149 static int adap__pre_init_tweaks(struct adapter
*sc
);
150 static int get_params__pre_init(struct adapter
*sc
);
151 static int get_params__post_init(struct adapter
*sc
);
152 static int set_params__post_init(struct adapter
*);
153 static void setup_memwin(struct adapter
*sc
);
154 static int validate_mt_off_len(struct adapter
*, int, uint32_t, int,
156 void memwin_info(struct adapter
*, int, uint32_t *, uint32_t *);
157 uint32_t position_memwin(struct adapter
*, int, uint32_t);
158 static int prop_lookup_int_array(struct adapter
*sc
, char *name
, int *data
,
160 static int prop_lookup_int_array(struct adapter
*sc
, char *name
, int *data
,
162 static int init_driver_props(struct adapter
*sc
, struct driver_properties
*p
);
163 static int remove_extra_props(struct adapter
*sc
, int n10g
, int n1g
);
164 static int cfg_itype_and_nqueues(struct adapter
*sc
, int n10g
, int n1g
,
165 struct intrs_and_queues
*iaq
);
166 static int add_child_node(struct adapter
*sc
, int idx
);
167 static int remove_child_node(struct adapter
*sc
, int idx
);
168 static kstat_t
*setup_kstats(struct adapter
*sc
);
169 static kstat_t
*setup_wc_kstats(struct adapter
*);
170 static int update_wc_kstats(kstat_t
*, int);
171 #ifdef TCP_OFFLOAD_ENABLE
172 static int toe_capability(struct port_info
*pi
, int enable
);
173 static int activate_uld(struct adapter
*sc
, int id
, struct uld_softc
*usc
);
174 static int deactivate_uld(struct uld_softc
*usc
);
176 static kmutex_t t4_adapter_list_lock
;
177 static SLIST_HEAD(, adapter
) t4_adapter_list
;
178 #ifdef TCP_OFFLOAD_ENABLE
179 static kmutex_t t4_uld_list_lock
;
180 static SLIST_HEAD(, uld_info
) t4_uld_list
;
188 rc
= ddi_soft_state_init(&t4_list
, sizeof (struct adapter
), 0);
192 rc
= mod_install(&modlinkage
);
194 ddi_soft_state_fini(&t4_list
);
196 mutex_init(&t4_adapter_list_lock
, NULL
, MUTEX_DRIVER
, NULL
);
197 SLIST_INIT(&t4_adapter_list
);
199 #ifdef TCP_OFFLOAD_ENABLE
200 mutex_init(&t4_uld_list_lock
, NULL
, MUTEX_DRIVER
, NULL
);
201 SLIST_INIT(&t4_uld_list
);
212 rc
= mod_remove(&modlinkage
);
216 ddi_soft_state_fini(&t4_list
);
221 _info(struct modinfo
*mi
)
223 return (mod_info(&modlinkage
, mi
));
228 t4_devo_getinfo(dev_info_t
*dip
, ddi_info_cmd_t cmd
, void *arg
, void **rp
)
233 minor
= getminor((dev_t
)arg
); /* same as instance# in our case */
235 if (cmd
== DDI_INFO_DEVT2DEVINFO
) {
236 sc
= ddi_get_soft_state(t4_list
, minor
);
238 return (DDI_FAILURE
);
240 ASSERT(sc
->dev
== (dev_t
)arg
);
241 *rp
= (void *)sc
->dip
;
242 } else if (cmd
== DDI_INFO_DEVT2INSTANCE
)
243 *rp
= (void *) (unsigned long) minor
;
247 return (DDI_SUCCESS
);
251 t4_devo_probe(dev_info_t
*dip
)
256 id
= ddi_prop_get_int(DDI_DEV_T_ANY
, dip
, DDI_PROP_DONTPASS
,
257 "device-id", 0xffff);
259 return (DDI_PROBE_DONTCARE
);
261 rc
= ddi_prop_lookup_int_array(DDI_DEV_T_ANY
, dip
, DDI_PROP_DONTPASS
,
263 if (rc
!= DDI_SUCCESS
)
264 return (DDI_PROBE_DONTCARE
);
266 pf
= PCI_REG_FUNC_G(reg
[0]);
269 /* Prevent driver attachment on any PF except 0 on the FPGA */
270 if (id
== 0xa000 && pf
!= 0)
271 return (DDI_PROBE_FAILURE
);
273 return (DDI_PROBE_DONTCARE
);
277 t4_devo_attach(dev_info_t
*dip
, ddi_attach_cmd_t cmd
)
279 struct adapter
*sc
= NULL
;
281 int i
, instance
, rc
= DDI_SUCCESS
, rqidx
, tqidx
, q
;
282 int irq
= 0, nxg
, n100g
, n40g
, n25g
, n10g
, n1g
;
283 #ifdef TCP_OFFLOAD_ENABLE
284 int ofld_rqidx
, ofld_tqidx
;
287 struct driver_properties
*prp
;
288 struct intrs_and_queues iaq
;
289 ddi_device_acc_attr_t da
= {
290 .devacc_attr_version
= DDI_DEVICE_ATTR_V0
,
291 .devacc_attr_endian_flags
= DDI_STRUCTURE_LE_ACC
,
292 .devacc_attr_dataorder
= DDI_UNORDERED_OK_ACC
294 ddi_device_acc_attr_t da1
= {
295 .devacc_attr_version
= DDI_DEVICE_ATTR_V0
,
296 .devacc_attr_endian_flags
= DDI_STRUCTURE_LE_ACC
,
297 .devacc_attr_dataorder
= DDI_MERGING_OK_ACC
300 if (cmd
!= DDI_ATTACH
)
301 return (DDI_FAILURE
);
304 * Allocate space for soft state.
306 instance
= ddi_get_instance(dip
);
307 rc
= ddi_soft_state_zalloc(t4_list
, instance
);
308 if (rc
!= DDI_SUCCESS
) {
309 cxgb_printf(dip
, CE_WARN
,
310 "failed to allocate soft state: %d", rc
);
311 return (DDI_FAILURE
);
314 sc
= ddi_get_soft_state(t4_list
, instance
);
316 sc
->dev
= makedevice(ddi_driver_major(dip
), instance
);
317 mutex_init(&sc
->lock
, NULL
, MUTEX_DRIVER
, NULL
);
318 cv_init(&sc
->cv
, NULL
, CV_DRIVER
, NULL
);
319 mutex_init(&sc
->sfl_lock
, NULL
, MUTEX_DRIVER
, NULL
);
320 TAILQ_INIT(&sc
->sfl
);
322 mutex_enter(&t4_adapter_list_lock
);
323 SLIST_INSERT_HEAD(&t4_adapter_list
, sc
, link
);
324 mutex_exit(&t4_adapter_list_lock
);
329 cxgb_printf(dip
, CE_WARN
,
330 "failed to determine PCI PF# of device");
335 /* Initialize the driver properties */
337 (void)init_driver_props(sc
, prp
);
340 * Enable access to the PCI config space.
342 rc
= pci_config_setup(dip
, &sc
->pci_regh
);
343 if (rc
!= DDI_SUCCESS
) {
344 cxgb_printf(dip
, CE_WARN
,
345 "failed to enable PCI config space access: %d", rc
);
349 /* TODO: Set max read request to 4K */
352 * Enable MMIO access.
354 rc
= ddi_regs_map_setup(dip
, 1, &sc
->regp
, 0, 0, &da
, &sc
->regh
);
355 if (rc
!= DDI_SUCCESS
) {
356 cxgb_printf(dip
, CE_WARN
,
357 "failed to map device registers: %d", rc
);
361 (void) memset(sc
->chan_map
, 0xff, sizeof (sc
->chan_map
));
364 * Initialize cpl handler.
366 for (i
= 0; i
< ARRAY_SIZE(sc
->cpl_handler
); i
++) {
367 sc
->cpl_handler
[i
] = cpl_not_handled
;
370 for (i
= 0; i
< ARRAY_SIZE(sc
->fw_msg_handler
); i
++) {
371 sc
->fw_msg_handler
[i
] = fw_msg_not_handled
;
374 for (i
= 0; i
< NCHAN
; i
++) {
375 (void) snprintf(name
, sizeof (name
), "%s-%d",
377 sc
->tq
[i
] = ddi_taskq_create(sc
->dip
,
378 name
, 1, TASKQ_DEFAULTPRI
, 0);
380 if (sc
->tq
[i
] == NULL
) {
381 cxgb_printf(dip
, CE_WARN
,
382 "failed to create task queues");
389 * Prepare the adapter for operation.
391 rc
= -t4_prep_adapter(sc
, false);
393 cxgb_printf(dip
, CE_WARN
, "failed to prepare adapter: %d", rc
);
398 * Enable BAR1 access.
400 sc
->doorbells
|= DOORBELL_KDB
;
401 rc
= ddi_regs_map_setup(dip
, 2, &sc
->reg1p
, 0, 0, &da1
, &sc
->reg1h
);
402 if (rc
!= DDI_SUCCESS
) {
403 cxgb_printf(dip
, CE_WARN
,
404 "failed to map BAR1 device registers: %d", rc
);
407 if (is_t5(sc
->params
.chip
)) {
408 sc
->doorbells
|= DOORBELL_UDB
;
411 * Enable write combining on BAR2. This is the
412 * userspace doorbell BAR and is split into 128B
413 * (UDBS_SEG_SIZE) doorbell regions, each associated
414 * with an egress queue. The first 64B has the doorbell
415 * and the second 64B can be used to submit a tx work
416 * request with an implicit doorbell.
418 sc
->doorbells
&= ~DOORBELL_UDB
;
419 sc
->doorbells
|= (DOORBELL_WCWR
|
421 t4_write_reg(sc
, A_SGE_STAT_CFG
,
422 V_STATSOURCE_T5(7) | V_STATMODE(0));
428 * Do this really early. Note that minor number = instance.
430 (void) snprintf(name
, sizeof (name
), "%s,%d", T4_NEXUS_NAME
, instance
);
431 rc
= ddi_create_minor_node(dip
, name
, S_IFCHR
, instance
,
433 if (rc
!= DDI_SUCCESS
) {
434 cxgb_printf(dip
, CE_WARN
,
435 "failed to create device node: %d", rc
);
436 rc
= DDI_SUCCESS
; /* carry on */
439 /* Do this early. Memory window is required for loading config file. */
442 /* Prepare the firmware for operation */
443 rc
= prep_firmware(sc
);
445 goto done
; /* error message displayed already */
447 rc
= adap__pre_init_tweaks(sc
);
451 rc
= get_params__pre_init(sc
);
453 goto done
; /* error message displayed already */
457 if (sc
->flags
& MASTER_PF
) {
458 /* get basic stuff going */
459 rc
= -t4_fw_initialize(sc
, sc
->mbox
);
461 cxgb_printf(sc
->dip
, CE_WARN
,
462 "early init failed: %d.\n", rc
);
467 rc
= get_params__post_init(sc
);
469 goto done
; /* error message displayed already */
471 rc
= set_params__post_init(sc
);
473 goto done
; /* error message displayed already */
476 * TODO: This is the place to call t4_set_filter_mode()
479 /* tweak some settings */
480 t4_write_reg(sc
, A_TP_SHIFT_CNT
, V_SYNSHIFTMAX(6) | V_RXTSHIFTMAXR1(4) |
481 V_RXTSHIFTMAXR2(15) | V_PERSHIFTBACKOFFMAX(8) | V_PERSHIFTMAX(8) |
482 V_KEEPALIVEMAXR1(4) | V_KEEPALIVEMAXR2(9));
483 t4_write_reg(sc
, A_ULP_RX_TDDP_PSZ
, V_HPZ0(PAGE_SHIFT
- 12));
486 * Work-around for bug 2619
487 * Set DisableVlan field in TP_RSS_CONFIG_VRT register so that the
488 * VLAN tag extraction is disabled.
490 t4_set_reg_field(sc
, A_TP_RSS_CONFIG_VRT
, F_DISABLEVLAN
, F_DISABLEVLAN
);
492 /* Store filter mode */
493 t4_read_indirect(sc
, A_TP_PIO_ADDR
, A_TP_PIO_DATA
, &sc
->filter_mode
, 1,
497 * First pass over all the ports - allocate VIs and initialize some
498 * basic parameters like mac address, port type, etc. We also figure
499 * out whether a port is 10G or 1G and use that information when
500 * calculating how many interrupts to attempt to allocate.
502 n100g
= n40g
= n25g
= n10g
= n1g
= 0;
503 for_each_port(sc
, i
) {
504 struct port_info
*pi
;
506 pi
= kmem_zalloc(sizeof (*pi
), KM_SLEEP
);
509 /* These must be set before t4_port_init */
511 /* LINTED: E_ASSIGN_NARROW_CONV */
515 /* Allocate the vi and initialize parameters like mac addr */
516 rc
= -t4_port_init(sc
, sc
->mbox
, sc
->pf
, 0);
518 cxgb_printf(dip
, CE_WARN
,
519 "unable to initialize port: %d", rc
);
523 for_each_port(sc
, i
) {
524 struct port_info
*pi
= sc
->port
[i
];
526 mutex_init(&pi
->lock
, NULL
, MUTEX_DRIVER
, NULL
);
529 if (is_100G_port(pi
)) {
531 pi
->tmr_idx
= prp
->tmr_idx_10g
;
532 pi
->pktc_idx
= prp
->pktc_idx_10g
;
533 } else if (is_40G_port(pi
)) {
535 pi
->tmr_idx
= prp
->tmr_idx_10g
;
536 pi
->pktc_idx
= prp
->pktc_idx_10g
;
537 } else if (is_25G_port(pi
)) {
539 pi
->tmr_idx
= prp
->tmr_idx_10g
;
540 pi
->pktc_idx
= prp
->pktc_idx_10g
;
541 } else if (is_10G_port(pi
)) {
543 pi
->tmr_idx
= prp
->tmr_idx_10g
;
544 pi
->pktc_idx
= prp
->pktc_idx_10g
;
547 pi
->tmr_idx
= prp
->tmr_idx_1g
;
548 pi
->pktc_idx
= prp
->pktc_idx_1g
;
551 pi
->xact_addr_filt
= -1;
554 setbit(&sc
->registered_device_map
, i
);
557 nxg
= n10g
+ n25g
+ n40g
+ n100g
;
558 (void) remove_extra_props(sc
, nxg
, n1g
);
560 if (sc
->registered_device_map
== 0) {
561 cxgb_printf(dip
, CE_WARN
, "no usable ports");
566 rc
= cfg_itype_and_nqueues(sc
, nxg
, n1g
, &iaq
);
568 goto done
; /* error message displayed already */
570 sc
->intr_type
= iaq
.intr_type
;
571 sc
->intr_count
= iaq
.nirq
;
573 if (sc
->props
.multi_rings
&& (sc
->intr_type
!= DDI_INTR_TYPE_MSIX
)) {
574 sc
->props
.multi_rings
= 0;
575 cxgb_printf(dip
, CE_WARN
,
576 "Multiple rings disabled as interrupt type is not MSI-X");
579 if (sc
->props
.multi_rings
&& iaq
.intr_fwd
) {
580 sc
->props
.multi_rings
= 0;
581 cxgb_printf(dip
, CE_WARN
,
582 "Multiple rings disabled as interrupts are forwarded");
585 if (!sc
->props
.multi_rings
) {
590 s
->nrxq
= nxg
* iaq
.nrxq10g
+ n1g
* iaq
.nrxq1g
;
591 s
->ntxq
= nxg
* iaq
.ntxq10g
+ n1g
* iaq
.ntxq1g
;
592 s
->neq
= s
->ntxq
+ s
->nrxq
; /* the fl in an rxq is an eq */
593 #ifdef TCP_OFFLOAD_ENABLE
594 /* control queues, 1 per port + 1 mgmtq */
595 s
->neq
+= sc
->params
.nports
+ 1;
597 s
->niq
= s
->nrxq
+ 1; /* 1 extra for firmware event queue */
598 if (iaq
.intr_fwd
!= 0)
599 sc
->flags
|= INTR_FWD
;
600 #ifdef TCP_OFFLOAD_ENABLE
601 if (is_offload(sc
) != 0) {
603 s
->nofldrxq
= nxg
* iaq
.nofldrxq10g
+ n1g
* iaq
.nofldrxq1g
;
604 s
->nofldtxq
= nxg
* iaq
.nofldtxq10g
+ n1g
* iaq
.nofldtxq1g
;
605 s
->neq
+= s
->nofldtxq
+ s
->nofldrxq
;
606 s
->niq
+= s
->nofldrxq
;
608 s
->ofld_rxq
= kmem_zalloc(s
->nofldrxq
*
609 sizeof (struct sge_ofld_rxq
), KM_SLEEP
);
610 s
->ofld_txq
= kmem_zalloc(s
->nofldtxq
*
611 sizeof (struct sge_wrq
), KM_SLEEP
);
612 s
->ctrlq
= kmem_zalloc(sc
->params
.nports
*
613 sizeof (struct sge_wrq
), KM_SLEEP
);
617 s
->rxq
= kmem_zalloc(s
->nrxq
* sizeof (struct sge_rxq
), KM_SLEEP
);
618 s
->txq
= kmem_zalloc(s
->ntxq
* sizeof (struct sge_txq
), KM_SLEEP
);
619 s
->iqmap
= kmem_zalloc(s
->niq
* sizeof (struct sge_iq
*), KM_SLEEP
);
620 s
->eqmap
= kmem_zalloc(s
->neq
* sizeof (struct sge_eq
*), KM_SLEEP
);
622 sc
->intr_handle
= kmem_zalloc(sc
->intr_count
*
623 sizeof (ddi_intr_handle_t
), KM_SLEEP
);
626 * Second pass over the ports. This time we know the number of rx and
627 * tx queues that each port should get.
630 #ifdef TCP_OFFLOAD_ENABLE
631 ofld_rqidx
= ofld_tqidx
= 0;
633 for_each_port(sc
, i
) {
634 struct port_info
*pi
= sc
->port
[i
];
640 /* LINTED: E_ASSIGN_NARROW_CONV */
641 pi
->first_rxq
= rqidx
;
642 /* LINTED: E_ASSIGN_NARROW_CONV */
643 pi
->nrxq
= (is_10XG_port(pi
)) ? iaq
.nrxq10g
645 /* LINTED: E_ASSIGN_NARROW_CONV */
646 pi
->first_txq
= tqidx
;
647 /* LINTED: E_ASSIGN_NARROW_CONV */
648 pi
->ntxq
= (is_10XG_port(pi
)) ? iaq
.ntxq10g
654 #ifdef TCP_OFFLOAD_ENABLE
655 if (is_offload(sc
) != 0) {
656 /* LINTED: E_ASSIGN_NARROW_CONV */
657 pi
->first_ofld_rxq
= ofld_rqidx
;
658 pi
->nofldrxq
= max(1, pi
->nrxq
/ 4);
660 /* LINTED: E_ASSIGN_NARROW_CONV */
661 pi
->first_ofld_txq
= ofld_tqidx
;
662 pi
->nofldtxq
= max(1, pi
->ntxq
/ 2);
664 ofld_rqidx
+= pi
->nofldrxq
;
665 ofld_tqidx
+= pi
->nofldtxq
;
670 * Enable hw checksumming and LSO for all ports by default.
671 * They can be disabled using ndd (hw_csum and hw_lso).
673 pi
->features
|= (CXGBE_HW_CSUM
| CXGBE_HW_LSO
);
676 #ifdef TCP_OFFLOAD_ENABLE
677 sc
->l2t
= t4_init_l2t(sc
);
685 rc
= ddi_intr_alloc(dip
, sc
->intr_handle
, sc
->intr_type
, 0,
686 sc
->intr_count
, &i
, DDI_INTR_ALLOC_STRICT
);
687 if (rc
!= DDI_SUCCESS
) {
688 cxgb_printf(dip
, CE_WARN
,
689 "failed to allocate %d interrupt(s) of type %d: %d, %d",
690 sc
->intr_count
, sc
->intr_type
, rc
, i
);
693 ASSERT(sc
->intr_count
== i
); /* allocation was STRICT */
694 (void) ddi_intr_get_cap(sc
->intr_handle
[0], &sc
->intr_cap
);
695 (void) ddi_intr_get_pri(sc
->intr_handle
[0], &sc
->intr_pri
);
696 if (sc
->intr_count
== 1) {
697 ASSERT(sc
->flags
& INTR_FWD
);
698 (void) ddi_intr_add_handler(sc
->intr_handle
[0], t4_intr_all
, sc
,
701 /* Multiple interrupts. The first one is always error intr */
702 (void) ddi_intr_add_handler(sc
->intr_handle
[0], t4_intr_err
, sc
,
706 /* The second one is always the firmware event queue */
707 (void) ddi_intr_add_handler(sc
->intr_handle
[1], t4_intr
, sc
,
711 * Note that if INTR_FWD is set then either the NIC rx
712 * queues or (exclusive or) the TOE rx queueus will be taking
715 * There is no need to check for is_offload(sc) as nofldrxq
716 * will be 0 if offload is disabled.
718 for_each_port(sc
, i
) {
719 struct port_info
*pi
= sc
->port
[i
];
721 #ifdef TCP_OFFLOAD_ENABLE
722 struct sge_ofld_rxq
*ofld_rxq
;
725 * Skip over the NIC queues if they aren't taking direct
728 if ((sc
->flags
& INTR_FWD
) &&
729 pi
->nofldrxq
> pi
->nrxq
)
732 rxq
= &s
->rxq
[pi
->first_rxq
];
733 for (q
= 0; q
< pi
->nrxq
; q
++, rxq
++) {
734 (void) ddi_intr_add_handler(
735 sc
->intr_handle
[irq
], t4_intr
, sc
,
740 #ifdef TCP_OFFLOAD_ENABLE
742 * Skip over the offload queues if they aren't taking
745 if ((sc
->flags
& INTR_FWD
))
748 ofld_rxq
= &s
->ofld_rxq
[pi
->first_ofld_rxq
];
749 for (q
= 0; q
< pi
->nofldrxq
; q
++, ofld_rxq
++) {
750 (void) ddi_intr_add_handler(
751 sc
->intr_handle
[irq
], t4_intr
, sc
,
759 sc
->flags
|= INTR_ALLOCATED
;
761 ASSERT(rc
== DDI_SUCCESS
);
765 * Hardware/Firmware/etc. Version/Revision IDs.
767 t4_dump_version_info(sc
);
770 cxgb_printf(dip
, CE_NOTE
,
771 "%dx100G (%d rxq, %d txq total) %d %s.",
772 n100g
, rqidx
, tqidx
, sc
->intr_count
,
773 sc
->intr_type
== DDI_INTR_TYPE_MSIX
? "MSI-X interrupts" :
774 sc
->intr_type
== DDI_INTR_TYPE_MSI
? "MSI interrupts" :
777 cxgb_printf(dip
, CE_NOTE
,
778 "%dx40G (%d rxq, %d txq total) %d %s.",
779 n40g
, rqidx
, tqidx
, sc
->intr_count
,
780 sc
->intr_type
== DDI_INTR_TYPE_MSIX
? "MSI-X interrupts" :
781 sc
->intr_type
== DDI_INTR_TYPE_MSI
? "MSI interrupts" :
784 cxgb_printf(dip
, CE_NOTE
,
785 "%dx25G (%d rxq, %d txq total) %d %s.",
786 n25g
, rqidx
, tqidx
, sc
->intr_count
,
787 sc
->intr_type
== DDI_INTR_TYPE_MSIX
? "MSI-X interrupts" :
788 sc
->intr_type
== DDI_INTR_TYPE_MSI
? "MSI interrupts" :
790 } else if (n10g
&& n1g
) {
791 cxgb_printf(dip
, CE_NOTE
,
792 "%dx10G %dx1G (%d rxq, %d txq total) %d %s.",
793 n10g
, n1g
, rqidx
, tqidx
, sc
->intr_count
,
794 sc
->intr_type
== DDI_INTR_TYPE_MSIX
? "MSI-X interrupts" :
795 sc
->intr_type
== DDI_INTR_TYPE_MSI
? "MSI interrupts" :
798 cxgb_printf(dip
, CE_NOTE
,
799 "%dx%sG (%d rxq, %d txq per port) %d %s.",
802 n10g
? iaq
.nrxq10g
: iaq
.nrxq1g
,
803 n10g
? iaq
.ntxq10g
: iaq
.ntxq1g
,
805 sc
->intr_type
== DDI_INTR_TYPE_MSIX
? "MSI-X interrupts" :
806 sc
->intr_type
== DDI_INTR_TYPE_MSI
? "MSI interrupts" :
810 sc
->ksp
= setup_kstats(sc
);
811 sc
->ksp_stat
= setup_wc_kstats(sc
);
812 sc
->params
.drv_memwin
= MEMWIN_NIC
;
815 if (rc
!= DDI_SUCCESS
) {
816 (void) t4_devo_detach(dip
, DDI_DETACH
);
818 /* rc may have errno style errors or DDI errors */
826 t4_devo_detach(dev_info_t
*dip
, ddi_detach_cmd_t cmd
)
830 struct port_info
*pi
;
833 if (cmd
!= DDI_DETACH
)
834 return (DDI_FAILURE
);
836 instance
= ddi_get_instance(dip
);
837 sc
= ddi_get_soft_state(t4_list
, instance
);
839 return (DDI_SUCCESS
);
841 if (sc
->flags
& FULL_INIT_DONE
) {
843 for_each_port(sc
, i
) {
845 if (pi
&& pi
->flags
& PORT_INIT_DONE
)
846 (void) port_full_uninit(pi
);
848 (void) adapter_full_uninit(sc
);
851 /* Safe to call no matter what */
852 ddi_prop_remove_all(dip
);
853 ddi_remove_minor_node(dip
, NULL
);
855 for (i
= 0; i
< NCHAN
; i
++) {
857 ddi_taskq_wait(sc
->tq
[i
]);
858 ddi_taskq_destroy(sc
->tq
[i
]);
863 kstat_delete(sc
->ksp
);
864 if (sc
->ksp_stat
!= NULL
)
865 kstat_delete(sc
->ksp_stat
);
869 kmem_free(s
->rxq
, s
->nrxq
* sizeof (struct sge_rxq
));
870 #ifdef TCP_OFFLOAD_ENABLE
871 if (s
->ofld_txq
!= NULL
)
872 kmem_free(s
->ofld_txq
, s
->nofldtxq
* sizeof (struct sge_wrq
));
873 if (s
->ofld_rxq
!= NULL
)
874 kmem_free(s
->ofld_rxq
,
875 s
->nofldrxq
* sizeof (struct sge_ofld_rxq
));
876 if (s
->ctrlq
!= NULL
)
878 sc
->params
.nports
* sizeof (struct sge_wrq
));
881 kmem_free(s
->txq
, s
->ntxq
* sizeof (struct sge_txq
));
882 if (s
->iqmap
!= NULL
)
883 kmem_free(s
->iqmap
, s
->niq
* sizeof (struct sge_iq
*));
884 if (s
->eqmap
!= NULL
)
885 kmem_free(s
->eqmap
, s
->neq
* sizeof (struct sge_eq
*));
887 if (s
->rxbuf_cache
!= NULL
)
888 rxbuf_cache_destroy(s
->rxbuf_cache
);
890 if (sc
->flags
& INTR_ALLOCATED
) {
891 for (i
= 0; i
< sc
->intr_count
; i
++) {
892 (void) ddi_intr_remove_handler(sc
->intr_handle
[i
]);
893 (void) ddi_intr_free(sc
->intr_handle
[i
]);
895 sc
->flags
&= ~INTR_ALLOCATED
;
898 if (sc
->intr_handle
!= NULL
) {
899 kmem_free(sc
->intr_handle
,
900 sc
->intr_count
* sizeof (*sc
->intr_handle
));
903 for_each_port(sc
, i
) {
906 mutex_destroy(&pi
->lock
);
907 kmem_free(pi
, sizeof (*pi
));
908 clrbit(&sc
->registered_device_map
, i
);
912 if (sc
->flags
& FW_OK
)
913 (void) t4_fw_bye(sc
, sc
->mbox
);
915 if (sc
->reg1h
!= NULL
)
916 ddi_regs_map_free(&sc
->reg1h
);
918 if (sc
->regh
!= NULL
)
919 ddi_regs_map_free(&sc
->regh
);
921 if (sc
->pci_regh
!= NULL
)
922 pci_config_teardown(&sc
->pci_regh
);
924 mutex_enter(&t4_adapter_list_lock
);
925 SLIST_REMOVE_HEAD(&t4_adapter_list
, link
);
926 mutex_exit(&t4_adapter_list_lock
);
928 mutex_destroy(&sc
->lock
);
930 mutex_destroy(&sc
->sfl_lock
);
933 bzero(sc
, sizeof (*sc
));
935 ddi_soft_state_free(t4_list
, instance
);
937 return (DDI_SUCCESS
);
941 t4_devo_quiesce(dev_info_t
*dip
)
946 instance
= ddi_get_instance(dip
);
947 sc
= ddi_get_soft_state(t4_list
, instance
);
949 return (DDI_SUCCESS
);
951 t4_set_reg_field(sc
, A_SGE_CONTROL
, F_GLOBALENABLE
, 0);
953 t4_write_reg(sc
, A_PL_RST
, F_PIORSTMODE
| F_PIORST
);
955 return (DDI_SUCCESS
);
959 t4_bus_ctl(dev_info_t
*dip
, dev_info_t
*rdip
, ddi_ctl_enum_t op
, void *arg
,
963 struct port_info
*pi
;
964 dev_info_t
*child
= (dev_info_t
*)arg
;
967 case DDI_CTLOPS_REPORTDEV
:
968 pi
= ddi_get_parent_data(rdip
);
969 pi
->instance
= ddi_get_instance(dip
);
970 pi
->child_inst
= ddi_get_instance(rdip
);
971 cmn_err(CE_CONT
, "?%s%d is port %s on %s%d\n",
972 ddi_node_name(rdip
), ddi_get_instance(rdip
),
973 ddi_get_name_addr(rdip
), ddi_driver_name(dip
),
974 ddi_get_instance(dip
));
975 return (DDI_SUCCESS
);
977 case DDI_CTLOPS_INITCHILD
:
978 pi
= ddi_get_parent_data(child
);
980 return (DDI_NOT_WELL_FORMED
);
981 (void) snprintf(s
, sizeof (s
), "%d", pi
->port_id
);
982 ddi_set_name_addr(child
, s
);
983 return (DDI_SUCCESS
);
985 case DDI_CTLOPS_UNINITCHILD
:
986 ddi_set_name_addr(child
, NULL
);
987 return (DDI_SUCCESS
);
989 case DDI_CTLOPS_ATTACH
:
990 case DDI_CTLOPS_DETACH
:
991 return (DDI_SUCCESS
);
994 return (ddi_ctlops(dip
, rdip
, op
, arg
, result
));
999 t4_bus_config(dev_info_t
*dip
, uint_t flags
, ddi_bus_config_op_t op
, void *arg
,
1005 instance
= ddi_get_instance(dip
);
1006 sc
= ddi_get_soft_state(t4_list
, instance
);
1008 if (op
== BUS_CONFIG_ONE
) {
1012 * arg is something like "cxgb@0" where 0 is the port_id hanging
1020 /* There should be exactly 1 digit after '@' */
1021 if (*(c
- 1) != '@')
1022 return (NDI_FAILURE
);
1026 if (add_child_node(sc
, i
) != 0)
1027 return (NDI_FAILURE
);
1029 flags
|= NDI_ONLINE_ATTACH
;
1031 } else if (op
== BUS_CONFIG_ALL
|| op
== BUS_CONFIG_DRIVER
) {
1032 /* Allocate and bind all child device nodes */
1033 for_each_port(sc
, i
)
1034 (void) add_child_node(sc
, i
);
1035 flags
|= NDI_ONLINE_ATTACH
;
1038 return (ndi_busop_bus_config(dip
, flags
, op
, arg
, cdipp
, 0));
1042 t4_bus_unconfig(dev_info_t
*dip
, uint_t flags
, ddi_bus_config_op_t op
,
1045 int instance
, i
, rc
;
1048 instance
= ddi_get_instance(dip
);
1049 sc
= ddi_get_soft_state(t4_list
, instance
);
1051 if (op
== BUS_CONFIG_ONE
|| op
== BUS_UNCONFIG_ALL
||
1052 op
== BUS_UNCONFIG_DRIVER
)
1053 flags
|= NDI_UNCONFIG
;
1055 rc
= ndi_busop_bus_unconfig(dip
, flags
, op
, arg
);
1059 if (op
== BUS_UNCONFIG_ONE
) {
1066 if (*(c
- 1) != '@')
1067 return (NDI_SUCCESS
);
1071 rc
= remove_child_node(sc
, i
);
1073 } else if (op
== BUS_UNCONFIG_ALL
|| op
== BUS_UNCONFIG_DRIVER
) {
1075 for_each_port(sc
, i
)
1076 (void) remove_child_node(sc
, i
);
1084 t4_cb_open(dev_t
*devp
, int flag
, int otyp
, cred_t
*credp
)
1088 if (otyp
!= OTYP_CHR
)
1091 sc
= ddi_get_soft_state(t4_list
, getminor(*devp
));
1095 return (atomic_cas_uint(&sc
->open
, 0, EBUSY
));
1100 t4_cb_close(dev_t dev
, int flag
, int otyp
, cred_t
*credp
)
1104 sc
= ddi_get_soft_state(t4_list
, getminor(dev
));
1108 (void) atomic_swap_uint(&sc
->open
, 0);
1114 t4_cb_ioctl(dev_t dev
, int cmd
, intptr_t d
, int mode
, cred_t
*credp
, int *rp
)
1118 void *data
= (void *)d
;
1120 if (crgetuid(credp
) != 0)
1123 instance
= getminor(dev
);
1124 sc
= ddi_get_soft_state(t4_list
, instance
);
1128 return (t4_ioctl(sc
, cmd
, data
, mode
));
1132 getpf(struct adapter
*sc
)
1137 rc
= ddi_prop_lookup_int_array(DDI_DEV_T_ANY
, sc
->dip
,
1138 DDI_PROP_DONTPASS
, "reg", &data
, &n
);
1139 if (rc
!= DDI_SUCCESS
) {
1140 cxgb_printf(sc
->dip
, CE_WARN
,
1141 "failed to lookup \"reg\" property: %d", rc
);
1145 pf
= PCI_REG_FUNC_G(data
[0]);
1146 ddi_prop_free(data
);
1152 static struct fw_info
*
1153 find_fw_info(int chip
)
1157 fi
[0].chip
= CHELSIO_T4
;
1158 fi
[0].fw_hdr
.chip
= FW_HDR_CHIP_T4
;
1159 fi
[0].fw_hdr
.fw_ver
= cpu_to_be32(FW_VERSION(T4
));
1160 fi
[0].fw_hdr
.intfver_nic
= FW_INTFVER(T4
, NIC
);
1161 fi
[0].fw_hdr
.intfver_vnic
= FW_INTFVER(T4
, VNIC
);
1162 fi
[0].fw_hdr
.intfver_ofld
= FW_INTFVER(T4
, OFLD
);
1163 fi
[0].fw_hdr
.intfver_ri
= FW_INTFVER(T4
, RI
);
1164 fi
[0].fw_hdr
.intfver_iscsipdu
= FW_INTFVER(T4
, ISCSIPDU
);
1165 fi
[0].fw_hdr
.intfver_iscsi
= FW_INTFVER(T4
, ISCSI
);
1166 fi
[0].fw_hdr
.intfver_fcoepdu
= FW_INTFVER(T4
, FCOEPDU
);
1167 fi
[0].fw_hdr
.intfver_fcoe
= FW_INTFVER(T4
, FCOE
);
1169 fi
[1].chip
= CHELSIO_T5
;
1170 fi
[1].fw_hdr
.chip
= FW_HDR_CHIP_T5
;
1171 fi
[1].fw_hdr
.fw_ver
= cpu_to_be32(FW_VERSION(T5
));
1172 fi
[1].fw_hdr
.intfver_nic
= FW_INTFVER(T5
, NIC
);
1173 fi
[1].fw_hdr
.intfver_vnic
= FW_INTFVER(T5
, VNIC
);
1174 fi
[1].fw_hdr
.intfver_ofld
= FW_INTFVER(T5
, OFLD
);
1175 fi
[1].fw_hdr
.intfver_ri
= FW_INTFVER(T5
, RI
);
1176 fi
[1].fw_hdr
.intfver_iscsipdu
= FW_INTFVER(T5
, ISCSIPDU
);
1177 fi
[1].fw_hdr
.intfver_iscsi
= FW_INTFVER(T5
, ISCSI
);
1178 fi
[1].fw_hdr
.intfver_fcoepdu
= FW_INTFVER(T5
, FCOEPDU
);
1179 fi
[1].fw_hdr
.intfver_fcoe
= FW_INTFVER(T5
, FCOE
);
1181 fi
[2].chip
= CHELSIO_T6
;
1182 fi
[2].fw_hdr
.chip
= FW_HDR_CHIP_T6
;
1183 fi
[2].fw_hdr
.fw_ver
= cpu_to_be32(FW_VERSION(T6
));
1184 fi
[2].fw_hdr
.intfver_nic
= FW_INTFVER(T6
, NIC
);
1185 fi
[2].fw_hdr
.intfver_vnic
= FW_INTFVER(T6
, VNIC
);
1186 fi
[2].fw_hdr
.intfver_ofld
= FW_INTFVER(T6
, OFLD
);
1187 fi
[2].fw_hdr
.intfver_ri
= FW_INTFVER(T6
, RI
);
1188 fi
[2].fw_hdr
.intfver_iscsipdu
= FW_INTFVER(T6
, ISCSIPDU
);
1189 fi
[2].fw_hdr
.intfver_iscsi
= FW_INTFVER(T6
, ISCSI
);
1190 fi
[2].fw_hdr
.intfver_fcoepdu
= FW_INTFVER(T6
, FCOEPDU
);
1191 fi
[2].fw_hdr
.intfver_fcoe
= FW_INTFVER(T6
, FCOE
);
1193 for (i
= 0; i
< ARRAY_SIZE(fi
); i
++) {
1194 if (fi
[i
].chip
== chip
)
1202 * Install a compatible firmware (if required), establish contact with it,
1203 * become the master, and reset the device.
1206 prep_firmware(struct adapter
*sc
)
1211 enum dev_state state
;
1212 unsigned char *fw_data
;
1213 struct fw_info
*fw_info
;
1214 struct fw_hdr
*card_fw
;
1216 struct driver_properties
*p
= &sc
->props
;
1218 /* Contact firmware, request master */
1219 rc
= t4_fw_hello(sc
, sc
->mbox
, sc
->mbox
, MASTER_MUST
, &state
);
1222 cxgb_printf(sc
->dip
, CE_WARN
,
1223 "failed to connect to the firmware: %d.", rc
);
1228 sc
->flags
|= MASTER_PF
;
1230 /* We may need FW version info for later reporting */
1231 t4_get_version_info(sc
);
1232 fw_info
= find_fw_info(CHELSIO_CHIP_VERSION(sc
->params
.chip
));
1233 /* allocate memory to read the header of the firmware on the
1237 cxgb_printf(sc
->dip
, CE_WARN
,
1238 "unable to look up firmware information for chip %d.\n",
1239 CHELSIO_CHIP_VERSION(sc
->params
.chip
));
1242 card_fw
= kmem_zalloc(sizeof(*card_fw
), KM_SLEEP
);
1244 cxgb_printf(sc
->dip
, CE_WARN
,
1245 "Memory allocation for card FW header failed\n");
1248 switch(CHELSIO_CHIP_VERSION(sc
->params
.chip
)) {
1250 fw_data
= t4fw_data
;
1251 fw_size
= t4fw_size
;
1254 fw_data
= t5fw_data
;
1255 fw_size
= t5fw_size
;
1258 fw_data
= t6fw_data
;
1259 fw_size
= t6fw_size
;
1262 cxgb_printf(sc
->dip
, CE_WARN
, "Adapter type not supported\n");
1263 kmem_free(card_fw
, sizeof(*card_fw
));
1267 rc
= -t4_prep_fw(sc
, fw_info
, fw_data
, fw_size
, card_fw
,
1268 p
->t4_fw_install
, state
, &reset
);
1270 kmem_free(card_fw
, sizeof(*card_fw
));
1273 cxgb_printf(sc
->dip
, CE_WARN
,
1274 "failed to install firmware: %d", rc
);
1278 (void) t4_check_fw_version(sc
);
1282 rc
= -t4_fw_reset(sc
, sc
->mbox
, F_PIORSTMODE
| F_PIORST
);
1284 cxgb_printf(sc
->dip
, CE_WARN
,
1285 "firmware reset failed: %d.", rc
);
1286 if (rc
!= ETIMEDOUT
&& rc
!= EIO
)
1287 (void) t4_fw_bye(sc
, sc
->mbox
);
1291 /* Partition adapter resources as specified in the config file. */
1292 if (sc
->flags
& MASTER_PF
) {
1293 /* Handle default vs special T4 config file */
1295 rc
= partition_resources(sc
);
1297 goto err
; /* error message displayed already */
1307 static const struct memwin t4_memwin
[] = {
1308 { MEMWIN0_BASE
, MEMWIN0_APERTURE
},
1309 { MEMWIN1_BASE
, MEMWIN1_APERTURE
},
1310 { MEMWIN2_BASE
, MEMWIN2_APERTURE
}
1313 static const struct memwin t5_memwin
[] = {
1314 { MEMWIN0_BASE
, MEMWIN0_APERTURE
},
1315 { MEMWIN1_BASE
, MEMWIN1_APERTURE
},
1316 { MEMWIN2_BASE_T5
, MEMWIN2_APERTURE_T5
},
1319 #define FW_PARAM_DEV(param) \
1320 (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \
1321 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param))
1322 #define FW_PARAM_PFVF(param) \
1323 (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \
1324 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param))
1327 * Verify that the memory range specified by the memtype/offset/len pair is
1328 * valid and lies entirely within the memtype specified. The global address of
1329 * the start of the range is returned in addr.
1332 validate_mt_off_len(struct adapter
*sc
, int mtype
, uint32_t off
, int len
,
1335 uint32_t em
, addr_len
, maddr
, mlen
;
1337 /* Memory can only be accessed in naturally aligned 4 byte units */
1338 if (off
& 3 || len
& 3 || len
== 0)
1341 em
= t4_read_reg(sc
, A_MA_TARGET_MEM_ENABLE
);
1344 if (!(em
& F_EDRAM0_ENABLE
))
1346 addr_len
= t4_read_reg(sc
, A_MA_EDRAM0_BAR
);
1347 maddr
= G_EDRAM0_BASE(addr_len
) << 20;
1348 mlen
= G_EDRAM0_SIZE(addr_len
) << 20;
1351 if (!(em
& F_EDRAM1_ENABLE
))
1353 addr_len
= t4_read_reg(sc
, A_MA_EDRAM1_BAR
);
1354 maddr
= G_EDRAM1_BASE(addr_len
) << 20;
1355 mlen
= G_EDRAM1_SIZE(addr_len
) << 20;
1358 if (!(em
& F_EXT_MEM_ENABLE
))
1360 addr_len
= t4_read_reg(sc
, A_MA_EXT_MEMORY_BAR
);
1361 maddr
= G_EXT_MEM_BASE(addr_len
) << 20;
1362 mlen
= G_EXT_MEM_SIZE(addr_len
) << 20;
1365 if (is_t4(sc
->params
.chip
) || !(em
& F_EXT_MEM1_ENABLE
))
1367 addr_len
= t4_read_reg(sc
, A_MA_EXT_MEMORY1_BAR
);
1368 maddr
= G_EXT_MEM1_BASE(addr_len
) << 20;
1369 mlen
= G_EXT_MEM1_SIZE(addr_len
) << 20;
1375 if (mlen
> 0 && off
< mlen
&& off
+ len
<= mlen
) {
1376 *addr
= maddr
+ off
; /* global address */
1384 memwin_info(struct adapter
*sc
, int win
, uint32_t *base
, uint32_t *aperture
)
1386 const struct memwin
*mw
;
1388 if (is_t4(sc
->params
.chip
)) {
1389 mw
= &t4_memwin
[win
];
1391 mw
= &t5_memwin
[win
];
1396 if (aperture
!= NULL
)
1397 *aperture
= mw
->aperture
;
1401 * Upload configuration file to card's memory.
1404 upload_config_file(struct adapter
*sc
, uint32_t *mt
, uint32_t *ma
)
1408 uint32_t param
, val
, addr
, mtype
, maddr
;
1409 uint32_t off
, mw_base
, mw_aperture
;
1410 const uint32_t *cfdata
;
1412 /* Figure out where the firmware wants us to upload it. */
1413 param
= FW_PARAM_DEV(CF
);
1414 rc
= -t4_query_params(sc
, sc
->mbox
, sc
->pf
, 0, 1, ¶m
, &val
);
1416 /* Firmwares without config file support will fail this way */
1417 cxgb_printf(sc
->dip
, CE_WARN
,
1418 "failed to query config file location: %d.\n", rc
);
1421 *mt
= mtype
= G_FW_PARAMS_PARAM_Y(val
);
1422 *ma
= maddr
= G_FW_PARAMS_PARAM_Z(val
) << 16;
1424 switch (CHELSIO_CHIP_VERSION(sc
->params
.chip
)) {
1426 cflen
= t4cfg_size
& ~3;
1427 /* LINTED: E_BAD_PTR_CAST_ALIGN */
1428 cfdata
= (const uint32_t *)t4cfg_data
;
1431 cflen
= t5cfg_size
& ~3;
1432 /* LINTED: E_BAD_PTR_CAST_ALIGN */
1433 cfdata
= (const uint32_t *)t5cfg_data
;
1436 cflen
= t6cfg_size
& ~3;
1437 /* LINTED: E_BAD_PTR_CAST_ALIGN */
1438 cfdata
= (const uint32_t *)t6cfg_data
;
1441 cxgb_printf(sc
->dip
, CE_WARN
,
1442 "Invalid Adapter detected\n");
1446 if (cflen
> FLASH_CFG_MAX_SIZE
) {
1447 cxgb_printf(sc
->dip
, CE_WARN
,
1448 "config file too long (%d, max allowed is %d). ",
1449 cflen
, FLASH_CFG_MAX_SIZE
);
1453 rc
= validate_mt_off_len(sc
, mtype
, maddr
, cflen
, &addr
);
1456 cxgb_printf(sc
->dip
, CE_WARN
,
1457 "%s: addr (%d/0x%x) or len %d is not valid: %d. "
1458 "Will try to use the config on the card, if any.\n",
1459 __func__
, mtype
, maddr
, cflen
, rc
);
1463 memwin_info(sc
, 2, &mw_base
, &mw_aperture
);
1465 off
= position_memwin(sc
, 2, addr
);
1466 n
= min(cflen
, mw_aperture
- off
);
1467 for (i
= 0; i
< n
; i
+= 4)
1468 t4_write_reg(sc
, mw_base
+ off
+ i
, *cfdata
++);
1477 * Partition chip resources for use between various PFs, VFs, etc. This is done
1478 * by uploading the firmware configuration file to the adapter and instructing
1479 * the firmware to process it.
1482 partition_resources(struct adapter
*sc
)
1485 struct fw_caps_config_cmd caps
;
1486 uint32_t mtype
, maddr
, finicsum
, cfcsum
;
1488 rc
= upload_config_file(sc
, &mtype
, &maddr
);
1490 mtype
= FW_MEMTYPE_CF_FLASH
;
1491 maddr
= t4_flash_cfg_addr(sc
);
1494 bzero(&caps
, sizeof (caps
));
1495 caps
.op_to_write
= BE_32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD
) |
1496 F_FW_CMD_REQUEST
| F_FW_CMD_READ
);
1497 caps
.cfvalid_to_len16
= BE_32(F_FW_CAPS_CONFIG_CMD_CFVALID
|
1498 V_FW_CAPS_CONFIG_CMD_MEMTYPE_CF(mtype
) |
1499 V_FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(maddr
>> 16) | FW_LEN16(caps
));
1500 rc
= -t4_wr_mbox(sc
, sc
->mbox
, &caps
, sizeof (caps
), &caps
);
1502 cxgb_printf(sc
->dip
, CE_WARN
,
1503 "failed to pre-process config file: %d.\n", rc
);
1507 finicsum
= ntohl(caps
.finicsum
);
1508 cfcsum
= ntohl(caps
.cfcsum
);
1509 if (finicsum
!= cfcsum
) {
1510 cxgb_printf(sc
->dip
, CE_WARN
,
1511 "WARNING: config file checksum mismatch: %08x %08x\n",
1514 sc
->cfcsum
= cfcsum
;
1516 /* TODO: Need to configure this correctly */
1517 caps
.toecaps
= htons(FW_CAPS_CONFIG_TOE
);
1521 /* TODO: Disable VNIC cap for now */
1522 caps
.niccaps
^= htons(FW_CAPS_CONFIG_NIC_VM
);
1524 caps
.op_to_write
= htonl(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD
) |
1525 F_FW_CMD_REQUEST
| F_FW_CMD_WRITE
);
1526 caps
.cfvalid_to_len16
= htonl(FW_LEN16(caps
));
1527 rc
= -t4_wr_mbox(sc
, sc
->mbox
, &caps
, sizeof (caps
), NULL
);
1529 cxgb_printf(sc
->dip
, CE_WARN
,
1530 "failed to process config file: %d.\n", rc
);
1538 * Tweak configuration based on module parameters, etc. Most of these have
1539 * defaults assigned to them by Firmware Configuration Files (if we're using
1540 * them) but need to be explicitly set if we're using hard-coded
1541 * initialization. But even in the case of using Firmware Configuration
1542 * Files, we'd like to expose the ability to change these via module
1543 * parameters so these are essentially common tweaks/settings for
1544 * Configuration Files and hard-coded initialization ...
1547 adap__pre_init_tweaks(struct adapter
*sc
)
1549 int rx_dma_offset
= 2; /* Offset of RX packets into DMA buffers */
1552 * Fix up various Host-Dependent Parameters like Page Size, Cache
1553 * Line Size, etc. The firmware default is for a 4KB Page Size and
1554 * 64B Cache Line Size ...
1556 (void) t4_fixup_host_params_compat(sc
, PAGE_SIZE
, CACHE_LINE
, T5_LAST_REV
);
1558 t4_set_reg_field(sc
, A_SGE_CONTROL
,
1559 V_PKTSHIFT(M_PKTSHIFT
), V_PKTSHIFT(rx_dma_offset
));
1564 * Retrieve parameters that are needed (or nice to have) prior to calling
1565 * t4_sge_init and t4_fw_initialize.
1568 get_params__pre_init(struct adapter
*sc
)
1571 uint32_t param
[2], val
[2];
1572 struct fw_devlog_cmd cmd
;
1573 struct devlog_params
*dlog
= &sc
->params
.devlog
;
1576 * Grab the raw VPD parameters.
1578 rc
= -t4_get_raw_vpd_params(sc
, &sc
->params
.vpd
);
1580 cxgb_printf(sc
->dip
, CE_WARN
,
1581 "failed to query VPD parameters (pre_init): %d.\n", rc
);
1585 param
[0] = FW_PARAM_DEV(PORTVEC
);
1586 param
[1] = FW_PARAM_DEV(CCLK
);
1587 rc
= -t4_query_params(sc
, sc
->mbox
, sc
->pf
, 0, 2, param
, val
);
1589 cxgb_printf(sc
->dip
, CE_WARN
,
1590 "failed to query parameters (pre_init): %d.\n", rc
);
1594 sc
->params
.portvec
= val
[0];
1595 sc
->params
.nports
= 0;
1597 sc
->params
.nports
++;
1598 val
[0] &= val
[0] - 1;
1601 sc
->params
.vpd
.cclk
= val
[1];
1603 /* Read device log parameters. */
1604 bzero(&cmd
, sizeof (cmd
));
1605 cmd
.op_to_write
= htonl(V_FW_CMD_OP(FW_DEVLOG_CMD
) |
1606 F_FW_CMD_REQUEST
| F_FW_CMD_READ
);
1607 cmd
.retval_len16
= htonl(FW_LEN16(cmd
));
1608 rc
= -t4_wr_mbox(sc
, sc
->mbox
, &cmd
, sizeof (cmd
), &cmd
);
1610 cxgb_printf(sc
->dip
, CE_WARN
,
1611 "failed to get devlog parameters: %d.\n", rc
);
1612 bzero(dlog
, sizeof (*dlog
));
1613 rc
= 0; /* devlog isn't critical for device operation */
1615 val
[0] = ntohl(cmd
.memtype_devlog_memaddr16_devlog
);
1616 dlog
->memtype
= G_FW_DEVLOG_CMD_MEMTYPE_DEVLOG(val
[0]);
1617 dlog
->start
= G_FW_DEVLOG_CMD_MEMADDR16_DEVLOG(val
[0]) << 4;
1618 dlog
->size
= ntohl(cmd
.memsize_devlog
);
1625 * Retrieve various parameters that are of interest to the driver. The device
1626 * has been initialized by the firmware at this point.
1629 get_params__post_init(struct adapter
*sc
)
1632 uint32_t param
[7], val
[7];
1633 struct fw_caps_config_cmd caps
;
1635 param
[0] = FW_PARAM_PFVF(IQFLINT_START
);
1636 param
[1] = FW_PARAM_PFVF(EQ_START
);
1637 param
[2] = FW_PARAM_PFVF(FILTER_START
);
1638 param
[3] = FW_PARAM_PFVF(FILTER_END
);
1639 param
[4] = FW_PARAM_PFVF(L2T_START
);
1640 param
[5] = FW_PARAM_PFVF(L2T_END
);
1641 rc
= -t4_query_params(sc
, sc
->mbox
, sc
->pf
, 0, 6, param
, val
);
1643 cxgb_printf(sc
->dip
, CE_WARN
,
1644 "failed to query parameters (post_init): %d.\n", rc
);
1648 /* LINTED: E_ASSIGN_NARROW_CONV */
1649 sc
->sge
.iq_start
= val
[0];
1650 sc
->sge
.eq_start
= val
[1];
1651 sc
->tids
.ftid_base
= val
[2];
1652 sc
->tids
.nftids
= val
[3] - val
[2] + 1;
1653 sc
->vres
.l2t
.start
= val
[4];
1654 sc
->vres
.l2t
.size
= val
[5] - val
[4] + 1;
1656 /* get capabilites */
1657 bzero(&caps
, sizeof (caps
));
1658 caps
.op_to_write
= htonl(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD
) |
1659 F_FW_CMD_REQUEST
| F_FW_CMD_READ
);
1660 caps
.cfvalid_to_len16
= htonl(FW_LEN16(caps
));
1661 rc
= -t4_wr_mbox(sc
, sc
->mbox
, &caps
, sizeof (caps
), &caps
);
1663 cxgb_printf(sc
->dip
, CE_WARN
,
1664 "failed to get card capabilities: %d.\n", rc
);
1668 if (caps
.toecaps
!= 0) {
1669 /* query offload-related parameters */
1670 param
[0] = FW_PARAM_DEV(NTID
);
1671 param
[1] = FW_PARAM_PFVF(SERVER_START
);
1672 param
[2] = FW_PARAM_PFVF(SERVER_END
);
1673 param
[3] = FW_PARAM_PFVF(TDDP_START
);
1674 param
[4] = FW_PARAM_PFVF(TDDP_END
);
1675 param
[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ
);
1676 rc
= -t4_query_params(sc
, sc
->mbox
, sc
->pf
, 0, 6, param
, val
);
1678 cxgb_printf(sc
->dip
, CE_WARN
,
1679 "failed to query TOE parameters: %d.\n", rc
);
1682 sc
->tids
.ntids
= val
[0];
1683 sc
->tids
.natids
= min(sc
->tids
.ntids
/ 2, MAX_ATIDS
);
1684 sc
->tids
.stid_base
= val
[1];
1685 sc
->tids
.nstids
= val
[2] - val
[1] + 1;
1686 sc
->vres
.ddp
.start
= val
[3];
1687 sc
->vres
.ddp
.size
= val
[4] - val
[3] + 1;
1688 sc
->params
.ofldq_wr_cred
= val
[5];
1689 sc
->params
.offload
= 1;
1692 /* These are finalized by FW initialization, load their values now */
1693 val
[0] = t4_read_reg(sc
, A_TP_TIMER_RESOLUTION
);
1694 sc
->params
.tp
.tre
= G_TIMERRESOLUTION(val
[0]);
1695 sc
->params
.tp
.dack_re
= G_DELAYEDACKRESOLUTION(val
[0]);
1696 t4_read_mtu_tbl(sc
, sc
->params
.mtus
, NULL
);
1702 set_params__post_init(struct adapter
*sc
)
1704 uint32_t param
, val
;
1706 /* ask for encapsulated CPLs */
1707 param
= FW_PARAM_PFVF(CPLFW4MSG_ENCAP
);
1709 (void)t4_set_params(sc
, sc
->mbox
, sc
->pf
, 0, 1, ¶m
, &val
);
1716 setup_memwin(struct adapter
*sc
)
1718 pci_regspec_t
*data
;
1722 uintptr_t mem_win0_base
, mem_win1_base
, mem_win2_base
;
1723 uintptr_t mem_win2_aperture
;
1725 rc
= ddi_prop_lookup_int_array(DDI_DEV_T_ANY
, sc
->dip
,
1726 DDI_PROP_DONTPASS
, "assigned-addresses", (int **)&data
, &n
);
1727 if (rc
!= DDI_SUCCESS
) {
1728 cxgb_printf(sc
->dip
, CE_WARN
,
1729 "failed to lookup \"assigned-addresses\" property: %d", rc
);
1732 n
/= sizeof (*data
);
1734 bar0
= ((uint64_t)data
[0].pci_phys_mid
<< 32) | data
[0].pci_phys_low
;
1735 ddi_prop_free(data
);
1737 if (is_t4(sc
->params
.chip
)) {
1738 mem_win0_base
= bar0
+ MEMWIN0_BASE
;
1739 mem_win1_base
= bar0
+ MEMWIN1_BASE
;
1740 mem_win2_base
= bar0
+ MEMWIN2_BASE
;
1741 mem_win2_aperture
= MEMWIN2_APERTURE
;
1743 /* For T5, only relative offset inside the PCIe BAR is passed */
1744 mem_win0_base
= MEMWIN0_BASE
;
1745 mem_win1_base
= MEMWIN1_BASE
;
1746 mem_win2_base
= MEMWIN2_BASE_T5
;
1747 mem_win2_aperture
= MEMWIN2_APERTURE_T5
;
1750 t4_write_reg(sc
, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN
, 0),
1751 mem_win0_base
| V_BIR(0) |
1752 V_WINDOW(ilog2(MEMWIN0_APERTURE
) - 10));
1754 t4_write_reg(sc
, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN
, 1),
1755 mem_win1_base
| V_BIR(0) |
1756 V_WINDOW(ilog2(MEMWIN1_APERTURE
) - 10));
1758 t4_write_reg(sc
, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN
, 2),
1759 mem_win2_base
| V_BIR(0) |
1760 V_WINDOW(ilog2(mem_win2_aperture
) - 10));
1763 (void)t4_read_reg(sc
, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN
, 2));
1767 * Positions the memory window such that it can be used to access the specified
1768 * address in the chip's address space. The return value is the offset of addr
1769 * from the start of the window.
1772 position_memwin(struct adapter
*sc
, int n
, uint32_t addr
)
1778 cxgb_printf(sc
->dip
, CE_WARN
,
1779 "addr (0x%x) is not at a 4B boundary.\n", addr
);
1783 if (is_t4(sc
->params
.chip
)) {
1785 start
= addr
& ~0xf; /* start must be 16B aligned */
1787 pf
= V_PFNUM(sc
->pf
);
1788 start
= addr
& ~0x7f; /* start must be 128B aligned */
1790 reg
= PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET
, n
);
1792 t4_write_reg(sc
, reg
, start
| pf
);
1793 (void) t4_read_reg(sc
, reg
);
1795 return (addr
- start
);
1800 * Reads the named property and fills up the "data" array (which has at least
1801 * "count" elements). We first try and lookup the property for our dev_t and
1802 * then retry with DDI_DEV_T_ANY if it's not found.
1804 * Returns non-zero if the property was found and "data" has been updated.
1807 prop_lookup_int_array(struct adapter
*sc
, char *name
, int *data
, uint_t count
)
1809 dev_info_t
*dip
= sc
->dip
;
1810 dev_t dev
= sc
->dev
;
1814 rc
= ddi_prop_lookup_int_array(dev
, dip
, DDI_PROP_DONTPASS
,
1816 if (rc
== DDI_PROP_SUCCESS
)
1819 if (rc
!= DDI_PROP_NOT_FOUND
) {
1820 cxgb_printf(dip
, CE_WARN
,
1821 "failed to lookup property %s for minor %d: %d.",
1822 name
, getminor(dev
), rc
);
1826 rc
= ddi_prop_lookup_int_array(DDI_DEV_T_ANY
, dip
, DDI_PROP_DONTPASS
,
1828 if (rc
== DDI_PROP_SUCCESS
)
1831 if (rc
!= DDI_PROP_NOT_FOUND
) {
1832 cxgb_printf(dip
, CE_WARN
,
1833 "failed to lookup property %s: %d.", name
, rc
);
1841 cxgb_printf(dip
, CE_NOTE
,
1842 "property %s has too many elements (%d), ignoring extras",
1846 for (i
= 0; i
< n
&& i
< count
; i
++)
1854 prop_lookup_int(struct adapter
*sc
, char *name
, int defval
)
1858 rc
= ddi_prop_get_int(sc
->dev
, sc
->dip
, DDI_PROP_DONTPASS
, name
, -1);
1862 return (ddi_prop_get_int(DDI_DEV_T_ANY
, sc
->dip
, DDI_PROP_DONTPASS
,
1867 init_driver_props(struct adapter
*sc
, struct driver_properties
*p
)
1869 dev_t dev
= sc
->dev
;
1870 dev_info_t
*dip
= sc
->dip
;
1872 uint_t tmr
[SGE_NTIMERS
] = {5, 10, 20, 50, 100, 200};
1873 uint_t cnt
[SGE_NCOUNTERS
] = {1, 8, 16, 32}; /* 63 max */
1878 data
= &p
->timer_val
[0];
1879 for (i
= 0; i
< SGE_NTIMERS
; i
++)
1881 (void) prop_lookup_int_array(sc
, "holdoff-timer-values", data
,
1883 for (i
= 0; i
< SGE_NTIMERS
; i
++) {
1885 if (data
[i
] > limit
) {
1886 cxgb_printf(dip
, CE_WARN
,
1887 "holdoff timer %d is too high (%d), lowered to %d.",
1892 (void) ddi_prop_update_int_array(dev
, dip
, "holdoff-timer-values",
1896 * Holdoff packet counter
1898 data
= &p
->counter_val
[0];
1899 for (i
= 0; i
< SGE_NCOUNTERS
; i
++)
1901 (void) prop_lookup_int_array(sc
, "holdoff-pkt-counter-values", data
,
1903 for (i
= 0; i
< SGE_NCOUNTERS
; i
++) {
1904 int limit
= M_THRESHOLD_0
;
1905 if (data
[i
] > limit
) {
1906 cxgb_printf(dip
, CE_WARN
,
1907 "holdoff pkt-counter %d is too high (%d), "
1908 "lowered to %d.", i
, data
[i
], limit
);
1912 (void) ddi_prop_update_int_array(dev
, dip
, "holdoff-pkt-counter-values",
1913 data
, SGE_NCOUNTERS
);
1916 * Maximum # of tx and rx queues to use for each
1917 * 100G, 40G, 25G, 10G and 1G port.
1919 p
->max_ntxq_10g
= prop_lookup_int(sc
, "max-ntxq-10G-port", 8);
1920 (void) ddi_prop_update_int(dev
, dip
, "max-ntxq-10G-port",
1923 p
->max_nrxq_10g
= prop_lookup_int(sc
, "max-nrxq-10G-port", 8);
1924 (void) ddi_prop_update_int(dev
, dip
, "max-nrxq-10G-port",
1927 p
->max_ntxq_1g
= prop_lookup_int(sc
, "max-ntxq-1G-port", 2);
1928 (void) ddi_prop_update_int(dev
, dip
, "max-ntxq-1G-port",
1931 p
->max_nrxq_1g
= prop_lookup_int(sc
, "max-nrxq-1G-port", 2);
1932 (void) ddi_prop_update_int(dev
, dip
, "max-nrxq-1G-port",
1935 #ifdef TCP_OFFLOAD_ENABLE
1936 p
->max_nofldtxq_10g
= prop_lookup_int(sc
, "max-nofldtxq-10G-port", 8);
1937 (void) ddi_prop_update_int(dev
, dip
, "max-ntxq-10G-port",
1938 p
->max_nofldtxq_10g
);
1940 p
->max_nofldrxq_10g
= prop_lookup_int(sc
, "max-nofldrxq-10G-port", 2);
1941 (void) ddi_prop_update_int(dev
, dip
, "max-nrxq-10G-port",
1942 p
->max_nofldrxq_10g
);
1944 p
->max_nofldtxq_1g
= prop_lookup_int(sc
, "max-nofldtxq-1G-port", 2);
1945 (void) ddi_prop_update_int(dev
, dip
, "max-ntxq-1G-port",
1946 p
->max_nofldtxq_1g
);
1948 p
->max_nofldrxq_1g
= prop_lookup_int(sc
, "max-nofldrxq-1G-port", 1);
1949 (void) ddi_prop_update_int(dev
, dip
, "max-nrxq-1G-port",
1950 p
->max_nofldrxq_1g
);
1954 * Holdoff parameters for 10G and 1G ports.
1956 p
->tmr_idx_10g
= prop_lookup_int(sc
, "holdoff-timer-idx-10G", 0);
1957 (void) ddi_prop_update_int(dev
, dip
, "holdoff-timer-idx-10G",
1960 p
->pktc_idx_10g
= prop_lookup_int(sc
, "holdoff-pktc-idx-10G", 2);
1961 (void) ddi_prop_update_int(dev
, dip
, "holdoff-pktc-idx-10G",
1964 p
->tmr_idx_1g
= prop_lookup_int(sc
, "holdoff-timer-idx-1G", 0);
1965 (void) ddi_prop_update_int(dev
, dip
, "holdoff-timer-idx-1G",
1968 p
->pktc_idx_1g
= prop_lookup_int(sc
, "holdoff-pktc-idx-1G", 2);
1969 (void) ddi_prop_update_int(dev
, dip
, "holdoff-pktc-idx-1G",
1973 * Size (number of entries) of each tx and rx queue.
1975 i
= prop_lookup_int(sc
, "qsize-txq", TX_EQ_QSIZE
);
1976 p
->qsize_txq
= max(i
, 128);
1977 if (p
->qsize_txq
!= i
) {
1978 cxgb_printf(dip
, CE_WARN
,
1979 "using %d instead of %d as the tx queue size",
1982 (void) ddi_prop_update_int(dev
, dip
, "qsize-txq", p
->qsize_txq
);
1984 i
= prop_lookup_int(sc
, "qsize-rxq", RX_IQ_QSIZE
);
1985 p
->qsize_rxq
= max(i
, 128);
1986 while (p
->qsize_rxq
& 7)
1988 if (p
->qsize_rxq
!= i
) {
1989 cxgb_printf(dip
, CE_WARN
,
1990 "using %d instead of %d as the rx queue size",
1993 (void) ddi_prop_update_int(dev
, dip
, "qsize-rxq", p
->qsize_rxq
);
1996 * Interrupt types allowed.
1997 * Bits 0, 1, 2 = INTx, MSI, MSI-X respectively. See sys/ddi_intr.h
1999 p
->intr_types
= prop_lookup_int(sc
, "interrupt-types",
2000 DDI_INTR_TYPE_MSIX
| DDI_INTR_TYPE_MSI
| DDI_INTR_TYPE_FIXED
);
2001 (void) ddi_prop_update_int(dev
, dip
, "interrupt-types", p
->intr_types
);
2004 * Forwarded interrupt queues. Create this property to force the driver
2005 * to use forwarded interrupt queues.
2007 if (ddi_prop_exists(dev
, dip
, DDI_PROP_DONTPASS
,
2008 "interrupt-forwarding") != 0 ||
2009 ddi_prop_exists(DDI_DEV_T_ANY
, dip
, DDI_PROP_DONTPASS
,
2010 "interrupt-forwarding") != 0) {
2012 (void) ddi_prop_create(dev
, dip
, DDI_PROP_CANSLEEP
,
2013 "interrupt-forwarding", NULL
, 0);
2018 * 0 to disable, 1 to enable
2020 p
->wc
= prop_lookup_int(sc
, "write-combine", 1);
2021 cxgb_printf(dip
, CE_WARN
, "write-combine: using of %d", p
->wc
);
2022 if (p
->wc
!= 0 && p
->wc
!= 1) {
2023 cxgb_printf(dip
, CE_WARN
,
2024 "write-combine: using 1 instead of %d", p
->wc
);
2027 (void) ddi_prop_update_int(dev
, dip
, "write-combine", p
->wc
);
2029 p
->t4_fw_install
= prop_lookup_int(sc
, "t4_fw_install", 1);
2030 if (p
->t4_fw_install
!= 0 && p
->t4_fw_install
!= 2)
2031 p
->t4_fw_install
= 1;
2032 (void) ddi_prop_update_int(dev
, dip
, "t4_fw_install", p
->t4_fw_install
);
2034 /* Multiple Rings */
2035 p
->multi_rings
= prop_lookup_int(sc
, "multi-rings", 1);
2036 if (p
->multi_rings
!= 0 && p
->multi_rings
!= 1) {
2037 cxgb_printf(dip
, CE_NOTE
,
2038 "multi-rings: using value 1 instead of %d", p
->multi_rings
);
2042 (void) ddi_prop_update_int(dev
, dip
, "multi-rings", p
->multi_rings
);
2048 remove_extra_props(struct adapter
*sc
, int n10g
, int n1g
)
2051 (void) ddi_prop_remove(sc
->dev
, sc
->dip
, "max-ntxq-10G-port");
2052 (void) ddi_prop_remove(sc
->dev
, sc
->dip
, "max-nrxq-10G-port");
2053 (void) ddi_prop_remove(sc
->dev
, sc
->dip
,
2054 "holdoff-timer-idx-10G");
2055 (void) ddi_prop_remove(sc
->dev
, sc
->dip
,
2056 "holdoff-pktc-idx-10G");
2060 (void) ddi_prop_remove(sc
->dev
, sc
->dip
, "max-ntxq-1G-port");
2061 (void) ddi_prop_remove(sc
->dev
, sc
->dip
, "max-nrxq-1G-port");
2062 (void) ddi_prop_remove(sc
->dev
, sc
->dip
,
2063 "holdoff-timer-idx-1G");
2064 (void) ddi_prop_remove(sc
->dev
, sc
->dip
, "holdoff-pktc-idx-1G");
2071 cfg_itype_and_nqueues(struct adapter
*sc
, int n10g
, int n1g
,
2072 struct intrs_and_queues
*iaq
)
2074 struct driver_properties
*p
= &sc
->props
;
2075 int rc
, itype
, itypes
, navail
, nc
, nrxq10g
, nrxq1g
, n
;
2076 int nofldrxq10g
= 0, nofldrxq1g
= 0;
2078 bzero(iaq
, sizeof (*iaq
));
2079 nc
= ncpus
; /* our snapshot of the number of CPUs */
2080 iaq
->ntxq10g
= min(nc
, p
->max_ntxq_10g
);
2081 iaq
->ntxq1g
= min(nc
, p
->max_ntxq_1g
);
2082 iaq
->nrxq10g
= nrxq10g
= min(nc
, p
->max_nrxq_10g
);
2083 iaq
->nrxq1g
= nrxq1g
= min(nc
, p
->max_nrxq_1g
);
2084 #ifdef TCP_OFFLOAD_ENABLE
2085 iaq
->nofldtxq10g
= min(nc
, p
->max_nofldtxq_10g
);
2086 iaq
->nofldtxq1g
= min(nc
, p
->max_nofldtxq_1g
);
2087 iaq
->nofldrxq10g
= nofldrxq10g
= min(nc
, p
->max_nofldrxq_10g
);
2088 iaq
->nofldrxq1g
= nofldrxq1g
= min(nc
, p
->max_nofldrxq_1g
);
2091 rc
= ddi_intr_get_supported_types(sc
->dip
, &itypes
);
2092 if (rc
!= DDI_SUCCESS
) {
2093 cxgb_printf(sc
->dip
, CE_WARN
,
2094 "failed to determine supported interrupt types: %d", rc
);
2098 for (itype
= DDI_INTR_TYPE_MSIX
; itype
; itype
>>= 1) {
2099 ASSERT(itype
== DDI_INTR_TYPE_MSIX
||
2100 itype
== DDI_INTR_TYPE_MSI
||
2101 itype
== DDI_INTR_TYPE_FIXED
);
2103 if ((itype
& itypes
& p
->intr_types
) == 0)
2104 continue; /* not supported or not allowed */
2107 rc
= ddi_intr_get_navail(sc
->dip
, itype
, &navail
);
2108 if (rc
!= DDI_SUCCESS
|| navail
== 0) {
2109 cxgb_printf(sc
->dip
, CE_WARN
,
2110 "failed to get # of interrupts for type %d: %d",
2112 continue; /* carry on */
2115 iaq
->intr_type
= itype
;
2120 * Best option: an interrupt vector for errors, one for the
2121 * firmware event queue, and one each for each rxq (NIC as well
2124 iaq
->nirq
= T4_EXTRA_INTR
;
2125 iaq
->nirq
+= n10g
* (nrxq10g
+ nofldrxq10g
);
2126 iaq
->nirq
+= n1g
* (nrxq1g
+ nofldrxq1g
);
2128 if (iaq
->nirq
<= navail
&&
2129 (itype
!= DDI_INTR_TYPE_MSI
|| ISP2(iaq
->nirq
))) {
2135 * Second best option: an interrupt vector for errors, one for
2136 * the firmware event queue, and one each for either NIC or
2139 iaq
->nirq
= T4_EXTRA_INTR
;
2140 iaq
->nirq
+= n10g
* max(nrxq10g
, nofldrxq10g
);
2141 iaq
->nirq
+= n1g
* max(nrxq1g
, nofldrxq1g
);
2142 if (iaq
->nirq
<= navail
&&
2143 (itype
!= DDI_INTR_TYPE_MSI
|| ISP2(iaq
->nirq
))) {
2149 * Next best option: an interrupt vector for errors, one for the
2150 * firmware event queue, and at least one per port. At this
2151 * point we know we'll have to downsize nrxq or nofldrxq to fit
2152 * what's available to us.
2154 iaq
->nirq
= T4_EXTRA_INTR
;
2155 iaq
->nirq
+= n10g
+ n1g
;
2156 if (iaq
->nirq
<= navail
) {
2157 int leftover
= navail
- iaq
->nirq
;
2160 int target
= max(nrxq10g
, nofldrxq10g
);
2163 while (n
< target
&& leftover
>= n10g
) {
2168 iaq
->nrxq10g
= min(n
, nrxq10g
);
2169 #ifdef TCP_OFFLOAD_ENABLE
2170 iaq
->nofldrxq10g
= min(n
, nofldrxq10g
);
2175 int target
= max(nrxq1g
, nofldrxq1g
);
2178 while (n
< target
&& leftover
>= n1g
) {
2183 iaq
->nrxq1g
= min(n
, nrxq1g
);
2184 #ifdef TCP_OFFLOAD_ENABLE
2185 iaq
->nofldrxq1g
= min(n
, nofldrxq1g
);
2189 /* We have arrived at a minimum value required to enable
2190 * per queue irq(either NIC or offload). Thus for non-
2191 * offload case, we will get a vector per queue, while
2192 * offload case, we will get a vector per offload/NIC q.
2193 * Hence enable Interrupt forwarding only for offload
2196 #ifdef TCP_OFFLOAD_ENABLE
2197 if (itype
!= DDI_INTR_TYPE_MSI
|| ISP2(iaq
->nirq
)) {
2200 if (itype
!= DDI_INTR_TYPE_MSI
) {
2207 * Least desirable option: one interrupt vector for everything.
2209 iaq
->nirq
= iaq
->nrxq10g
= iaq
->nrxq1g
= 1;
2210 #ifdef TCP_OFFLOAD_ENABLE
2211 iaq
->nofldrxq10g
= iaq
->nofldrxq1g
= 1;
2219 cxgb_printf(sc
->dip
, CE_WARN
,
2220 "failed to find a usable interrupt type. supported=%d, allowed=%d",
2221 itypes
, p
->intr_types
);
2222 return (DDI_FAILURE
);
2226 add_child_node(struct adapter
*sc
, int idx
)
2229 struct port_info
*pi
;
2231 if (idx
< 0 || idx
>= sc
->params
.nports
)
2236 return (ENODEV
); /* t4_port_init failed earlier */
2239 if (pi
->dip
!= NULL
) {
2240 rc
= 0; /* EEXIST really, but then bus_config fails */
2244 rc
= ndi_devi_alloc(sc
->dip
, T4_PORT_NAME
, DEVI_SID_NODEID
, &pi
->dip
);
2245 if (rc
!= DDI_SUCCESS
|| pi
->dip
== NULL
) {
2250 (void) ddi_set_parent_data(pi
->dip
, pi
);
2251 (void) ndi_devi_bind_driver(pi
->dip
, 0);
2259 remove_child_node(struct adapter
*sc
, int idx
)
2262 struct port_info
*pi
;
2264 if (idx
< 0 || idx
>= sc
->params
.nports
)
2272 if (pi
->dip
== NULL
) {
2277 rc
= ndi_devi_free(pi
->dip
);
2285 #define KS_UINIT(x) kstat_named_init(&kstatp->x, #x, KSTAT_DATA_ULONG)
2286 #define KS_CINIT(x) kstat_named_init(&kstatp->x, #x, KSTAT_DATA_CHAR)
2287 #define KS_U_SET(x, y) kstatp->x.value.ul = (y)
2288 #define KS_C_SET(x, ...) \
2289 (void) snprintf(kstatp->x.value.c, 16, __VA_ARGS__)
2295 kstat_named_t chip_ver
;
2296 kstat_named_t fw_vers
;
2297 kstat_named_t tp_vers
;
2298 kstat_named_t driver_version
;
2299 kstat_named_t serial_number
;
2300 kstat_named_t ec_level
;
2302 kstat_named_t bus_type
;
2303 kstat_named_t bus_width
;
2304 kstat_named_t bus_speed
;
2305 kstat_named_t core_clock
;
2306 kstat_named_t port_cnt
;
2307 kstat_named_t port_type
;
2308 kstat_named_t pci_vendor_id
;
2309 kstat_named_t pci_device_id
;
2312 setup_kstats(struct adapter
*sc
)
2315 struct t4_kstats
*kstatp
;
2317 struct pci_params
*p
= &sc
->params
.pci
;
2318 struct vpd_params
*v
= &sc
->params
.vpd
;
2319 uint16_t pci_vendor
, pci_device
;
2321 ndata
= sizeof (struct t4_kstats
) / sizeof (kstat_named_t
);
2323 ksp
= kstat_create(T4_NEXUS_NAME
, ddi_get_instance(sc
->dip
), "config",
2324 "nexus", KSTAT_TYPE_NAMED
, ndata
, 0);
2326 cxgb_printf(sc
->dip
, CE_WARN
, "failed to initialize kstats.");
2330 kstatp
= (struct t4_kstats
*)ksp
->ks_data
;
2335 KS_CINIT(driver_version
);
2336 KS_CINIT(serial_number
);
2340 KS_CINIT(bus_width
);
2341 KS_CINIT(bus_speed
);
2342 KS_UINIT(core_clock
);
2344 KS_CINIT(port_type
);
2345 KS_CINIT(pci_vendor_id
);
2346 KS_CINIT(pci_device_id
);
2348 KS_U_SET(chip_ver
, sc
->params
.chip
);
2349 KS_C_SET(fw_vers
, "%d.%d.%d.%d",
2350 G_FW_HDR_FW_VER_MAJOR(sc
->params
.fw_vers
),
2351 G_FW_HDR_FW_VER_MINOR(sc
->params
.fw_vers
),
2352 G_FW_HDR_FW_VER_MICRO(sc
->params
.fw_vers
),
2353 G_FW_HDR_FW_VER_BUILD(sc
->params
.fw_vers
));
2354 KS_C_SET(tp_vers
, "%d.%d.%d.%d",
2355 G_FW_HDR_FW_VER_MAJOR(sc
->params
.tp_vers
),
2356 G_FW_HDR_FW_VER_MINOR(sc
->params
.tp_vers
),
2357 G_FW_HDR_FW_VER_MICRO(sc
->params
.tp_vers
),
2358 G_FW_HDR_FW_VER_BUILD(sc
->params
.tp_vers
));
2359 KS_C_SET(driver_version
, DRV_VERSION
);
2360 KS_C_SET(serial_number
, "%s", v
->sn
);
2361 KS_C_SET(ec_level
, "%s", v
->ec
);
2362 KS_C_SET(id
, "%s", v
->id
);
2363 KS_C_SET(bus_type
, "pci-express");
2364 KS_C_SET(bus_width
, "x%d lanes", p
->width
);
2365 KS_C_SET(bus_speed
, "%d", p
->speed
);
2366 KS_U_SET(core_clock
, v
->cclk
);
2367 KS_U_SET(port_cnt
, sc
->params
.nports
);
2369 t4_os_pci_read_cfg2(sc
, PCI_CONF_VENID
, &pci_vendor
);
2370 KS_C_SET(pci_vendor_id
, "0x%x", pci_vendor
);
2372 t4_os_pci_read_cfg2(sc
, PCI_CONF_DEVID
, &pci_device
);
2373 KS_C_SET(pci_device_id
, "0x%x", pci_device
);
2375 KS_C_SET(port_type
, "%s/%s/%s/%s",
2376 print_port_speed(sc
->port
[0]),
2377 print_port_speed(sc
->port
[1]),
2378 print_port_speed(sc
->port
[2]),
2379 print_port_speed(sc
->port
[3]));
2381 /* Do NOT set ksp->ks_update. These kstats do not change. */
2383 /* Install the kstat */
2384 ksp
->ks_private
= (void *)sc
;
2393 struct t4_wc_kstats
{
2394 kstat_named_t write_coal_success
;
2395 kstat_named_t write_coal_failure
;
2398 setup_wc_kstats(struct adapter
*sc
)
2401 struct t4_wc_kstats
*kstatp
;
2404 ndata
= sizeof(struct t4_wc_kstats
) / sizeof(kstat_named_t
);
2405 ksp
= kstat_create(T4_NEXUS_NAME
, ddi_get_instance(sc
->dip
), "stats",
2406 "nexus", KSTAT_TYPE_NAMED
, ndata
, 0);
2408 cxgb_printf(sc
->dip
, CE_WARN
, "failed to initialize kstats.");
2412 kstatp
= (struct t4_wc_kstats
*)ksp
->ks_data
;
2414 KS_UINIT(write_coal_success
);
2415 KS_UINIT(write_coal_failure
);
2417 ksp
->ks_update
= update_wc_kstats
;
2418 /* Install the kstat */
2419 ksp
->ks_private
= (void *)sc
;
2426 update_wc_kstats(kstat_t
*ksp
, int rw
)
2428 struct t4_wc_kstats
*kstatp
= (struct t4_wc_kstats
*)ksp
->ks_data
;
2429 struct adapter
*sc
= ksp
->ks_private
;
2430 uint32_t wc_total
, wc_success
, wc_failure
;
2432 if (rw
== KSTAT_WRITE
)
2435 if (is_t5(sc
->params
.chip
)) {
2436 wc_total
= t4_read_reg(sc
, A_SGE_STAT_TOTAL
);
2437 wc_failure
= t4_read_reg(sc
, A_SGE_STAT_MATCH
);
2438 wc_success
= wc_total
- wc_failure
;
2444 KS_U_SET(write_coal_success
, wc_success
);
2445 KS_U_SET(write_coal_failure
, wc_failure
);
2451 adapter_full_init(struct adapter
*sc
)
2455 ADAPTER_LOCK_ASSERT_NOTOWNED(sc
);
2457 rc
= t4_setup_adapter_queues(sc
);
2461 if (sc
->intr_cap
& DDI_INTR_FLAG_BLOCK
)
2462 (void) ddi_intr_block_enable(sc
->intr_handle
, sc
->intr_count
);
2464 for (i
= 0; i
< sc
->intr_count
; i
++)
2465 (void) ddi_intr_enable(sc
->intr_handle
[i
]);
2468 sc
->flags
|= FULL_INIT_DONE
;
2470 #ifdef TCP_OFFLOAD_ENABLE
2471 /* TODO: wrong place to enable TOE capability */
2472 if (is_offload(sc
) != 0) {
2473 for_each_port(sc
, i
) {
2474 struct port_info
*pi
= sc
->port
[i
];
2475 rc
= toe_capability(pi
, 1);
2477 cxgb_printf(pi
->dip
, CE_WARN
,
2478 "Failed to activate toe capability: %d",
2480 rc
= 0; /* not a fatal error */
2488 (void) adapter_full_uninit(sc
);
2494 adapter_full_uninit(struct adapter
*sc
)
2498 ADAPTER_LOCK_ASSERT_NOTOWNED(sc
);
2500 if (sc
->intr_cap
& DDI_INTR_FLAG_BLOCK
)
2501 (void) ddi_intr_block_disable(sc
->intr_handle
, sc
->intr_count
);
2503 for (i
= 0; i
< sc
->intr_count
; i
++)
2504 (void) ddi_intr_disable(sc
->intr_handle
[i
]);
2507 rc
= t4_teardown_adapter_queues(sc
);
2511 sc
->flags
&= ~FULL_INIT_DONE
;
2517 port_full_init(struct port_info
*pi
)
2519 struct adapter
*sc
= pi
->adapter
;
2521 struct sge_rxq
*rxq
;
2524 ADAPTER_LOCK_ASSERT_NOTOWNED(sc
);
2525 ASSERT((pi
->flags
& PORT_INIT_DONE
) == 0);
2528 * Allocate tx/rx/fl queues for this port.
2530 rc
= t4_setup_port_queues(pi
);
2532 goto done
; /* error message displayed already */
2535 * Setup RSS for this port.
2537 rss
= kmem_zalloc(pi
->nrxq
* sizeof (*rss
), KM_SLEEP
);
2538 for_each_rxq(pi
, i
, rxq
) {
2539 rss
[i
] = rxq
->iq
.abs_id
;
2541 rc
= -t4_config_rss_range(sc
, sc
->mbox
, pi
->viid
, 0,
2542 pi
->rss_size
, rss
, pi
->nrxq
);
2543 kmem_free(rss
, pi
->nrxq
* sizeof (*rss
));
2545 cxgb_printf(pi
->dip
, CE_WARN
, "rss_config failed: %d", rc
);
2549 pi
->flags
|= PORT_INIT_DONE
;
2552 (void) port_full_uninit(pi
);
2561 port_full_uninit(struct port_info
*pi
)
2564 ASSERT(pi
->flags
& PORT_INIT_DONE
);
2566 (void) t4_teardown_port_queues(pi
);
2567 pi
->flags
&= ~PORT_INIT_DONE
;
2573 enable_port_queues(struct port_info
*pi
)
2575 struct adapter
*sc
= pi
->adapter
;
2578 struct sge_rxq
*rxq
;
2579 #ifdef TCP_OFFLOAD_ENABLE
2580 struct sge_ofld_rxq
*ofld_rxq
;
2583 ASSERT(pi
->flags
& PORT_INIT_DONE
);
2586 * TODO: whatever was queued up after we set iq->state to IQS_DISABLED
2587 * back in disable_port_queues will be processed now, after an unbounded
2588 * delay. This can't be good.
2591 #ifdef TCP_OFFLOAD_ENABLE
2592 for_each_ofld_rxq(pi
, i
, ofld_rxq
) {
2594 if (atomic_cas_uint(&iq
->state
, IQS_DISABLED
, IQS_IDLE
) !=
2596 panic("%s: iq %p wasn't disabled", __func__
,
2598 t4_write_reg(sc
, MYPF_REG(A_SGE_PF_GTS
),
2599 V_SEINTARM(iq
->intr_params
) | V_INGRESSQID(iq
->cntxt_id
));
2603 for_each_rxq(pi
, i
, rxq
) {
2605 if (atomic_cas_uint(&iq
->state
, IQS_DISABLED
, IQS_IDLE
) !=
2607 panic("%s: iq %p wasn't disabled", __func__
,
2609 t4_write_reg(sc
, MYPF_REG(A_SGE_PF_GTS
),
2610 V_SEINTARM(iq
->intr_params
) | V_INGRESSQID(iq
->cntxt_id
));
2615 disable_port_queues(struct port_info
*pi
)
2618 struct adapter
*sc
= pi
->adapter
;
2619 struct sge_rxq
*rxq
;
2620 #ifdef TCP_OFFLOAD_ENABLE
2621 struct sge_ofld_rxq
*ofld_rxq
;
2624 ASSERT(pi
->flags
& PORT_INIT_DONE
);
2627 * TODO: need proper implementation for all tx queues (ctrl, eth, ofld).
2630 #ifdef TCP_OFFLOAD_ENABLE
2631 for_each_ofld_rxq(pi
, i
, ofld_rxq
) {
2632 while (atomic_cas_uint(&ofld_rxq
->iq
.state
, IQS_IDLE
,
2633 IQS_DISABLED
) != IQS_IDLE
)
2638 for_each_rxq(pi
, i
, rxq
) {
2639 while (atomic_cas_uint(&rxq
->iq
.state
, IQS_IDLE
,
2640 IQS_DISABLED
) != IQS_IDLE
)
2644 mutex_enter(&sc
->sfl_lock
);
2645 #ifdef TCP_OFFLOAD_ENABLE
2646 for_each_ofld_rxq(pi
, i
, ofld_rxq
)
2647 ofld_rxq
->fl
.flags
|= FL_DOOMED
;
2649 for_each_rxq(pi
, i
, rxq
)
2650 rxq
->fl
.flags
|= FL_DOOMED
;
2651 mutex_exit(&sc
->sfl_lock
);
2652 /* TODO: need to wait for all fl's to be removed from sc->sfl */
2656 t4_fatal_err(struct adapter
*sc
)
2658 t4_set_reg_field(sc
, A_SGE_CONTROL
, F_GLOBALENABLE
, 0);
2659 t4_intr_disable(sc
);
2660 cxgb_printf(sc
->dip
, CE_WARN
,
2661 "encountered fatal error, adapter stopped.");
2665 t4_os_find_pci_capability(struct adapter
*sc
, int cap
)
2668 uint8_t cap_ptr
, cap_id
;
2670 t4_os_pci_read_cfg2(sc
, PCI_CONF_STAT
, &stat
);
2671 if ((stat
& PCI_STAT_CAP
) == 0)
2672 return (0); /* does not implement capabilities */
2674 t4_os_pci_read_cfg1(sc
, PCI_CONF_CAP_PTR
, &cap_ptr
);
2676 t4_os_pci_read_cfg1(sc
, cap_ptr
+ PCI_CAP_ID
, &cap_id
);
2678 return (cap_ptr
); /* found */
2679 t4_os_pci_read_cfg1(sc
, cap_ptr
+ PCI_CAP_NEXT_PTR
, &cap_ptr
);
2682 return (0); /* not found */
2686 t4_os_portmod_changed(const struct adapter
*sc
, int idx
)
2688 static const char *mod_str
[] = {
2689 NULL
, "LR", "SR", "ER", "TWINAX", "active TWINAX", "LRM"
2691 const struct port_info
*pi
= sc
->port
[idx
];
2693 if (pi
->mod_type
== FW_PORT_MOD_TYPE_NONE
)
2694 cxgb_printf(pi
->dip
, CE_NOTE
, "transceiver unplugged.");
2695 else if (pi
->mod_type
== FW_PORT_MOD_TYPE_UNKNOWN
)
2696 cxgb_printf(pi
->dip
, CE_NOTE
,
2697 "unknown transceiver inserted.\n");
2698 else if (pi
->mod_type
== FW_PORT_MOD_TYPE_NOTSUPPORTED
)
2699 cxgb_printf(pi
->dip
, CE_NOTE
,
2700 "unsupported transceiver inserted.\n");
2701 else if (pi
->mod_type
> 0 && pi
->mod_type
< ARRAY_SIZE(mod_str
))
2702 cxgb_printf(pi
->dip
, CE_NOTE
, "%s transceiver inserted.\n",
2703 mod_str
[pi
->mod_type
]);
2705 cxgb_printf(pi
->dip
, CE_NOTE
, "transceiver (type %d) inserted.",
2711 cpl_not_handled(struct sge_iq
*iq
, const struct rss_header
*rss
, mblk_t
*m
)
2719 t4_register_cpl_handler(struct adapter
*sc
, int opcode
, cpl_handler_t h
)
2723 if (opcode
>= ARRAY_SIZE(sc
->cpl_handler
))
2726 new = (uint_t
)(unsigned long) (h
? h
: cpl_not_handled
);
2727 loc
= (uint_t
*)&sc
->cpl_handler
[opcode
];
2728 (void) atomic_swap_uint(loc
, new);
2734 fw_msg_not_handled(struct adapter
*sc
, const __be64
*data
)
2736 struct cpl_fw6_msg
*cpl
;
2738 cpl
= __containerof((void *)data
, struct cpl_fw6_msg
, data
);
2740 cxgb_printf(sc
->dip
, CE_WARN
, "%s fw_msg type %d", __func__
, cpl
->type
);
2745 t4_register_fw_msg_handler(struct adapter
*sc
, int type
, fw_msg_handler_t h
)
2747 fw_msg_handler_t
*loc
, new;
2749 if (type
>= ARRAY_SIZE(sc
->fw_msg_handler
))
2753 * These are dispatched by the handler for FW{4|6}_CPL_MSG using the CPL
2754 * handler dispatch table. Reject any attempt to install a handler for
2757 if (type
== FW_TYPE_RSSCPL
|| type
== FW6_TYPE_RSSCPL
)
2760 new = h
? h
: fw_msg_not_handled
;
2761 loc
= &sc
->fw_msg_handler
[type
];
2762 (void)atomic_swap_ptr(loc
, (void *)new);
2767 #ifdef TCP_OFFLOAD_ENABLE
2769 toe_capability(struct port_info
*pi
, int enable
)
2772 struct adapter
*sc
= pi
->adapter
;
2774 if (!is_offload(sc
))
2778 if (isset(&sc
->offload_map
, pi
->port_id
) != 0)
2781 if (sc
->offload_map
== 0) {
2782 rc
= activate_uld(sc
, ULD_TOM
, &sc
->tom
);
2787 setbit(&sc
->offload_map
, pi
->port_id
);
2789 if (!isset(&sc
->offload_map
, pi
->port_id
))
2792 clrbit(&sc
->offload_map
, pi
->port_id
);
2794 if (sc
->offload_map
== 0) {
2795 rc
= deactivate_uld(&sc
->tom
);
2797 setbit(&sc
->offload_map
, pi
->port_id
);
2807 * Add an upper layer driver to the global list.
2810 t4_register_uld(struct uld_info
*ui
)
2815 mutex_enter(&t4_uld_list_lock
);
2816 SLIST_FOREACH(u
, &t4_uld_list
, link
) {
2817 if (u
->uld_id
== ui
->uld_id
) {
2823 SLIST_INSERT_HEAD(&t4_uld_list
, ui
, link
);
2826 mutex_exit(&t4_uld_list_lock
);
2831 t4_unregister_uld(struct uld_info
*ui
)
2836 mutex_enter(&t4_uld_list_lock
);
2838 SLIST_FOREACH(u
, &t4_uld_list
, link
) {
2840 if (ui
->refcount
> 0) {
2845 SLIST_REMOVE(&t4_uld_list
, ui
, uld_info
, link
);
2851 mutex_exit(&t4_uld_list_lock
);
2856 activate_uld(struct adapter
*sc
, int id
, struct uld_softc
*usc
)
2859 struct uld_info
*ui
;
2861 mutex_enter(&t4_uld_list_lock
);
2863 SLIST_FOREACH(ui
, &t4_uld_list
, link
) {
2864 if (ui
->uld_id
== id
) {
2865 rc
= ui
->attach(sc
, &usc
->softc
);
2867 ASSERT(usc
->softc
!= NULL
);
2875 mutex_exit(&t4_uld_list_lock
);
2881 deactivate_uld(struct uld_softc
*usc
)
2885 mutex_enter(&t4_uld_list_lock
);
2887 if (usc
->uld
== NULL
|| usc
->softc
== NULL
) {
2892 rc
= usc
->uld
->detach(usc
->softc
);
2894 ASSERT(usc
->uld
->refcount
> 0);
2895 usc
->uld
->refcount
--;
2900 mutex_exit(&t4_uld_list_lock
);
2906 t4_iterate(void (*func
)(int, void *), void *arg
)
2910 mutex_enter(&t4_adapter_list_lock
);
2911 SLIST_FOREACH(sc
, &t4_adapter_list
, link
) {
2913 * func should not make any assumptions about what state sc is
2914 * in - the only guarantee is that sc->sc_lock is a valid lock.
2916 func(ddi_get_instance(sc
->dip
), arg
);
2918 mutex_exit(&t4_adapter_list_lock
);