2 /**************************************************************************
4 Copyright (c) 2007, Chelsio Inc.
7 Redistribution and use in source and binary forms, with or without
8 modification, are permitted provided that the following conditions are met:
10 1. Redistributions of source code must retain the above copyright notice,
11 this list of conditions and the following disclaimer.
13 2. Neither the name of the Chelsio Corporation nor the names of its
14 contributors may be used to endorse or promote products derived from
15 this software without specific prior written permission.
17 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
18 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
21 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
25 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
27 POSSIBILITY OF SUCH DAMAGE.
30 ***************************************************************************/
33 #include <sys/cdefs.h>
35 __KERNEL_RCSID(0, "$NetBSD: cxgb_offload.c,v 1.5 2007/12/15 00:39:29 perry Exp $");
38 __FBSDID("$FreeBSD: src/sys/dev/cxgb/cxgb_offload.c,v 1.8 2007/08/17 05:57:04 kmacy Exp $");
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/kernel.h>
46 #include <sys/module.h>
47 #include <sys/pciio.h>
50 #include <machine/bus.h>
52 #include <machine/resource.h>
53 #include <sys/bus_dma.h>
56 #include <sys/ioccom.h>
59 #include <sys/linker.h>
60 #include <sys/firmware.h>
62 #include <sys/socket.h>
63 #include <sys/sockio.h>
67 #include <sys/sysctl.h>
68 #include <sys/queue.h>
70 #include <sys/taskqueue.h>
74 #include <cxgb_include.h>
77 #include <dev/cxgb/cxgb_include.h>
80 #include "cxgb_include.h"
85 #include <net/if_vlan_var.h>
87 #include <net/route.h>
94 #define VALIDATE_TID 0
97 TAILQ_HEAD(, cxgb_client
) client_list
;
98 TAILQ_HEAD(, toedev
) ofld_dev_list
;
99 TAILQ_HEAD(, adapter
) adapter_list
;
101 static struct mtx cxgb_db_lock
;
102 static struct rwlock adapter_list_lock
;
105 static const unsigned int MAX_ATIDS
= 64 * 1024;
106 static const unsigned int ATID_BASE
= 0x100000;
107 static int inited
= 0;
110 offload_activated(struct toedev
*tdev
)
112 struct adapter
*adapter
= tdev2adap(tdev
);
114 return (isset(&adapter
->open_device_map
, OFFLOAD_DEVMAP_BIT
));
118 * cxgb_register_client - register an offload client
119 * @client: the client
121 * Add the client to the client list,
122 * and call backs the client for each activated offload device
125 cxgb_register_client(struct cxgb_client
*client
)
129 mtx_lock(&cxgb_db_lock
);
130 TAILQ_INSERT_TAIL(&client_list
, client
, client_entry
);
133 TAILQ_FOREACH(tdev
, &ofld_dev_list
, ofld_entry
) {
134 if (offload_activated(tdev
))
138 mtx_unlock(&cxgb_db_lock
);
142 * cxgb_unregister_client - unregister an offload client
143 * @client: the client
145 * Remove the client to the client list,
146 * and call backs the client for each activated offload device.
149 cxgb_unregister_client(struct cxgb_client
*client
)
153 mtx_lock(&cxgb_db_lock
);
154 TAILQ_REMOVE(&client_list
, client
, client_entry
);
156 if (client
->remove
) {
157 TAILQ_FOREACH(tdev
, &ofld_dev_list
, ofld_entry
) {
158 if (offload_activated(tdev
))
159 client
->remove(tdev
);
162 mtx_unlock(&cxgb_db_lock
);
166 * cxgb_add_clients - activate register clients for an offload device
167 * @tdev: the offload device
169 * Call backs all registered clients once a offload device is activated
172 cxgb_add_clients(struct toedev
*tdev
)
174 struct cxgb_client
*client
;
176 mtx_lock(&cxgb_db_lock
);
177 TAILQ_FOREACH(client
, &client_list
, client_entry
) {
181 mtx_unlock(&cxgb_db_lock
);
185 * cxgb_remove_clients - activate register clients for an offload device
186 * @tdev: the offload device
188 * Call backs all registered clients once a offload device is deactivated
191 cxgb_remove_clients(struct toedev
*tdev
)
193 struct cxgb_client
*client
;
195 mtx_lock(&cxgb_db_lock
);
196 TAILQ_FOREACH(client
, &client_list
, client_entry
) {
198 client
->remove(tdev
);
200 mtx_unlock(&cxgb_db_lock
);
204 is_offloading(struct ifnet
*ifp
)
206 struct adapter
*adapter
;
209 rw_rlock(&adapter_list_lock
);
210 TAILQ_FOREACH(adapter
, &adapter_list
, adapter_entry
) {
211 for_each_port(adapter
, port
) {
212 if (ifp
== adapter
->port
[port
].ifp
) {
213 rw_runlock(&adapter_list_lock
);
218 rw_runlock(&adapter_list_lock
);
222 static struct ifnet
*
223 get_iff_from_mac(adapter_t
*adapter
, const uint8_t *mac
, unsigned int vlan
)
228 for_each_port(adapter
, i
) {
229 const struct vlan_group
*grp
;
230 const struct port_info
*p
= &adapter
->port
[i
];
231 struct ifnet
*ifnet
= p
->ifp
;
233 if (!memcmp(p
->hw_addr
, mac
, ETHER_ADDR_LEN
)) {
234 if (vlan
&& vlan
!= EVL_VLID_MASK
) {
236 dev
= grp
? grp
->vlan_devices
[vlan
] : NULL
;
248 failover_fixup(adapter_t
*adapter
, int port
)
250 if (adapter
->params
.rev
== 0) {
251 struct ifnet
*ifp
= adapter
->port
[port
].ifp
;
252 struct cmac
*mac
= &adapter
->port
[port
].mac
;
253 if (!(ifp
->if_flags
& IFF_UP
)) {
254 /* Failover triggered by the interface ifdown */
255 t3_write_reg(adapter
, A_XGM_TX_CTRL
+ mac
->offset
,
257 t3_read_reg(adapter
, A_XGM_TX_CTRL
+ mac
->offset
);
259 /* Failover triggered by the interface link down */
260 t3_write_reg(adapter
, A_XGM_RX_CTRL
+ mac
->offset
, 0);
261 t3_read_reg(adapter
, A_XGM_RX_CTRL
+ mac
->offset
);
262 t3_write_reg(adapter
, A_XGM_RX_CTRL
+ mac
->offset
,
269 cxgb_ulp_iscsi_ctl(adapter_t
*adapter
, unsigned int req
, void *data
)
272 struct ulp_iscsi_info
*uiip
= data
;
275 case ULP_ISCSI_GET_PARAMS
:
276 uiip
->llimit
= t3_read_reg(adapter
, A_ULPRX_ISCSI_LLIMIT
);
277 uiip
->ulimit
= t3_read_reg(adapter
, A_ULPRX_ISCSI_ULIMIT
);
278 uiip
->tagmask
= t3_read_reg(adapter
, A_ULPRX_ISCSI_TAGMASK
);
280 * On tx, the iscsi pdu has to be <= tx page size and has to
281 * fit into the Tx PM FIFO.
283 uiip
->max_txsz
= min(adapter
->params
.tp
.tx_pg_size
,
284 t3_read_reg(adapter
, A_PM1_TX_CFG
) >> 17);
285 /* on rx, the iscsi pdu has to be < rx page size and the
286 whole pdu + cpl headers has to fit into one sge buffer */
288 (unsigned int)min(adapter
->params
.tp
.rx_pg_size
,
289 (adapter
->sge
.qs
[0].fl
[1].buf_size
-
290 sizeof(struct cpl_rx_data
) * 2 -
291 sizeof(struct cpl_rx_data_ddp
)) );
293 case ULP_ISCSI_SET_PARAMS
:
294 t3_write_reg(adapter
, A_ULPRX_ISCSI_TAGMASK
, uiip
->tagmask
);
302 /* Response queue used for RDMA events. */
303 #define ASYNC_NOTIF_RSPQ 0
306 cxgb_rdma_ctl(adapter_t
*adapter
, unsigned int req
, void *data
)
311 case RDMA_GET_PARAMS
: {
312 struct rdma_info
*req2
= data
;
315 req2
->udbell_physbase
= rman_get_start(adapter
->regs_res
);
316 req2
->udbell_len
= rman_get_size(adapter
->regs_res
);
318 req2
->tpt_base
= t3_read_reg(adapter
, A_ULPTX_TPT_LLIMIT
);
319 req2
->tpt_top
= t3_read_reg(adapter
, A_ULPTX_TPT_ULIMIT
);
320 req2
->pbl_base
= t3_read_reg(adapter
, A_ULPTX_PBL_LLIMIT
);
321 req2
->pbl_top
= t3_read_reg(adapter
, A_ULPTX_PBL_ULIMIT
);
322 req2
->rqt_base
= t3_read_reg(adapter
, A_ULPRX_RQ_LLIMIT
);
323 req2
->rqt_top
= t3_read_reg(adapter
, A_ULPRX_RQ_ULIMIT
);
325 req2
->kdb_addr
= (void *)(rman_get_start(adapter
->regs_res
) + A_SG_KDOORBELL
);
330 struct rdma_cq_op
*req2
= data
;
332 /* may be called in any context */
333 mtx_lock(&adapter
->sge
.reg_lock
);
334 ret
= t3_sge_cqcntxt_op(adapter
, req2
->id
, req2
->op
,
336 mtx_unlock(&adapter
->sge
.reg_lock
);
340 struct ch_mem_range
*t
= data
;
343 if ((t
->addr
& 7) || (t
->len
& 7))
345 if (t
->mem_id
== MEM_CM
)
347 else if (t
->mem_id
== MEM_PMRX
)
348 mem
= &adapter
->pmrx
;
349 else if (t
->mem_id
== MEM_PMTX
)
350 mem
= &adapter
->pmtx
;
354 ret
= t3_mc7_bd_read(mem
, t
->addr
/8, t
->len
/8, (u64
*)t
->buf
);
359 case RDMA_CQ_SETUP
: {
360 struct rdma_cq_setup
*req2
= data
;
362 mtx_lock(&adapter
->sge
.reg_lock
);
363 ret
= t3_sge_init_cqcntxt(adapter
, req2
->id
, req2
->base_addr
,
364 req2
->size
, ASYNC_NOTIF_RSPQ
,
365 req2
->ovfl_mode
, req2
->credits
,
367 mtx_unlock(&adapter
->sge
.reg_lock
);
370 case RDMA_CQ_DISABLE
:
371 mtx_lock(&adapter
->sge
.reg_lock
);
372 ret
= t3_sge_disable_cqcntxt(adapter
, *(unsigned int *)data
);
373 mtx_unlock(&adapter
->sge
.reg_lock
);
375 case RDMA_CTRL_QP_SETUP
: {
376 struct rdma_ctrlqp_setup
*req2
= data
;
378 mtx_lock(&adapter
->sge
.reg_lock
);
379 ret
= t3_sge_init_ecntxt(adapter
, FW_RI_SGEEC_START
, 0,
380 SGE_CNTXT_RDMA
, ASYNC_NOTIF_RSPQ
,
381 req2
->base_addr
, req2
->size
,
382 FW_RI_TID_START
, 1, 0);
383 mtx_unlock(&adapter
->sge
.reg_lock
);
393 cxgb_offload_ctl(struct toedev
*tdev
, unsigned int req
, void *data
)
395 struct adapter
*adapter
= tdev2adap(tdev
);
396 struct tid_range
*tid
;
398 struct iff_mac
*iffmacp
;
399 struct ddp_params
*ddpp
;
400 struct adap_ports
*ports
;
404 case GET_MAX_OUTSTANDING_WR
:
405 *(unsigned int *)data
= FW_WR_NUM
;
408 *(unsigned int *)data
= WR_FLITS
;
410 case GET_TX_MAX_CHUNK
:
411 *(unsigned int *)data
= 1 << 20; /* 1MB */
415 tid
->num
= t3_mc5_size(&adapter
->mc5
) -
416 adapter
->params
.mc5
.nroutes
-
417 adapter
->params
.mc5
.nfilters
-
418 adapter
->params
.mc5
.nservers
;
423 tid
->num
= adapter
->params
.mc5
.nservers
;
424 tid
->base
= t3_mc5_size(&adapter
->mc5
) - tid
->num
-
425 adapter
->params
.mc5
.nfilters
-
426 adapter
->params
.mc5
.nroutes
;
428 case GET_L2T_CAPACITY
:
429 *(unsigned int *)data
= 2048;
434 mtup
->mtus
= adapter
->params
.mtus
;
436 case GET_IFF_FROM_MAC
:
438 iffmacp
->dev
= get_iff_from_mac(adapter
, iffmacp
->mac_addr
,
439 iffmacp
->vlan_tag
& EVL_VLID_MASK
);
443 ddpp
->llimit
= t3_read_reg(adapter
, A_ULPRX_TDDP_LLIMIT
);
444 ddpp
->ulimit
= t3_read_reg(adapter
, A_ULPRX_TDDP_ULIMIT
);
445 ddpp
->tag_mask
= t3_read_reg(adapter
, A_ULPRX_TDDP_TAGMASK
);
449 ports
->nports
= adapter
->params
.nports
;
450 for_each_port(adapter
, port
)
451 ports
->lldevs
[port
] = adapter
->port
[port
].ifp
;
455 t3_port_failover(adapter
, port
);
456 failover_fixup(adapter
, port
);
460 t3_failover_done(adapter
, port
);
463 t3_failover_clear(adapter
);
465 case ULP_ISCSI_GET_PARAMS
:
466 case ULP_ISCSI_SET_PARAMS
:
467 if (!offload_running(adapter
))
469 return cxgb_ulp_iscsi_ctl(adapter
, req
, data
);
470 case RDMA_GET_PARAMS
:
473 case RDMA_CQ_DISABLE
:
474 case RDMA_CTRL_QP_SETUP
:
476 if (!offload_running(adapter
))
478 return cxgb_rdma_ctl(adapter
, req
, data
);
486 * Dummy handler for Rx offload packets in case we get an offload packet before
487 * proper processing is setup. This complains and drops the packet as it isn't
488 * normal to get offload packets at this stage.
491 rx_offload_blackhole(struct toedev
*dev
, struct mbuf
**m
, int n
)
493 CH_ERR(tdev2adap(dev
), "%d unexpected offload packets, first data 0x%x\n",
494 n
, *mtod(m
[0], uint32_t *));
501 dummy_neigh_update(struct toedev
*dev
, struct rtentry
*neigh
)
506 cxgb_set_dummy_ops(struct toedev
*dev
)
508 dev
->recv
= rx_offload_blackhole
;
509 dev
->neigh_update
= dummy_neigh_update
;
513 * Free an active-open TID.
516 cxgb_free_atid(struct toedev
*tdev
, int atid
)
518 struct tid_info
*t
= &(TOE_DATA(tdev
))->tid_maps
;
519 union active_open_entry
*p
= atid2entry(t
, atid
);
520 void *ctx
= p
->toe_tid
.ctx
;
522 mtx_lock(&t
->atid_lock
);
526 mtx_lock(&t
->atid_lock
);
532 * Free a server TID and return it to the free pool.
535 cxgb_free_stid(struct toedev
*tdev
, int stid
)
537 struct tid_info
*t
= &(TOE_DATA(tdev
))->tid_maps
;
538 union listen_entry
*p
= stid2entry(t
, stid
);
540 mtx_lock(&t
->stid_lock
);
544 mtx_unlock(&t
->stid_lock
);
548 cxgb_insert_tid(struct toedev
*tdev
, struct cxgb_client
*client
,
549 void *ctx
, unsigned int tid
)
551 struct tid_info
*t
= &(TOE_DATA(tdev
))->tid_maps
;
553 t
->tid_tab
[tid
].client
= client
;
554 t
->tid_tab
[tid
].ctx
= ctx
;
555 atomic_add_int(&t
->tids_in_use
, 1);
559 * Populate a TID_RELEASE WR. The mbuf must be already propely sized.
562 mk_tid_release(struct mbuf
*m
, unsigned int tid
)
564 struct cpl_tid_release
*req
;
566 m_set_priority(m
, CPL_PRIORITY_SETUP
);
567 req
= mtod(m
, struct cpl_tid_release
*);
568 req
->wr
.wr_hi
= htonl(V_WR_OP(FW_WROPCODE_FORWARD
));
569 OPCODE_TID(req
) = htonl(MK_OPCODE_TID(CPL_TID_RELEASE
, tid
));
574 t3_process_tid_release_list(void *data
, int pending
)
577 t3_process_tid_release_list(struct work
*wk
, void *data
)
581 struct toedev
*tdev
= data
;
582 struct toe_data
*td
= TOE_DATA(tdev
);
584 mtx_lock(&td
->tid_release_lock
);
585 while (td
->tid_release_list
) {
586 struct toe_tid_entry
*p
= td
->tid_release_list
;
588 td
->tid_release_list
= (struct toe_tid_entry
*)p
->ctx
;
589 mtx_unlock(&td
->tid_release_lock
);
590 m
= m_get(M_WAIT
, MT_DATA
);
591 mk_tid_release(m
, p
- td
->tid_maps
.tid_tab
);
592 cxgb_ofld_send(tdev
, m
);
594 mtx_lock(&td
->tid_release_lock
);
596 mtx_unlock(&td
->tid_release_lock
);
599 /* use ctx as a next pointer in the tid release list */
601 cxgb_queue_tid_release(struct toedev
*tdev
, unsigned int tid
)
603 struct toe_data
*td
= TOE_DATA(tdev
);
604 struct toe_tid_entry
*p
= &td
->tid_maps
.tid_tab
[tid
];
606 mtx_lock(&td
->tid_release_lock
);
607 p
->ctx
= td
->tid_release_list
;
608 td
->tid_release_list
= p
;
612 taskqueue_enqueue(tdev
->adapter
->tq
, &td
->tid_release_task
);
616 workqueue_enqueue(td
->tid_release_task
.wq
, &td
->tid_release_task
.w
, NULL
);
619 mtx_unlock(&td
->tid_release_lock
);
623 * Remove a tid from the TID table. A client may defer processing its last
624 * CPL message if it is locked at the time it arrives, and while the message
625 * sits in the client's backlog the TID may be reused for another connection.
626 * To handle this we atomically switch the TID association if it still points
627 * to the original client context.
630 cxgb_remove_tid(struct toedev
*tdev
, void *ctx
, unsigned int tid
)
632 struct tid_info
*t
= &(TOE_DATA(tdev
))->tid_maps
;
634 BUG_ON(tid
>= t
->ntids
);
635 if (tdev
->type
== T3A
)
636 atomic_cmpset_ptr((uintptr_t *)&t
->tid_tab
[tid
].ctx
, (long)NULL
, (long)ctx
);
640 m
= m_get(M_NOWAIT
, MT_DATA
);
641 if (__predict_true(m
!= NULL
)) {
642 mk_tid_release(m
, tid
);
643 cxgb_ofld_send(tdev
, m
);
644 t
->tid_tab
[tid
].ctx
= NULL
;
646 cxgb_queue_tid_release(tdev
, tid
);
648 atomic_add_int(&t
->tids_in_use
, -1);
652 cxgb_alloc_atid(struct toedev
*tdev
, struct cxgb_client
*client
,
656 struct tid_info
*t
= &(TOE_DATA(tdev
))->tid_maps
;
658 mtx_lock(&t
->atid_lock
);
660 union active_open_entry
*p
= t
->afree
;
662 atid
= (p
- t
->atid_tab
) + t
->atid_base
;
664 p
->toe_tid
.ctx
= ctx
;
665 p
->toe_tid
.client
= client
;
668 mtx_unlock(&t
->atid_lock
);
673 cxgb_alloc_stid(struct toedev
*tdev
, struct cxgb_client
*client
,
677 struct tid_info
*t
= &(TOE_DATA(tdev
))->tid_maps
;
679 mtx_lock(&t
->stid_lock
);
681 union listen_entry
*p
= t
->sfree
;
683 stid
= (p
- t
->stid_tab
) + t
->stid_base
;
685 p
->toe_tid
.ctx
= ctx
;
686 p
->toe_tid
.client
= client
;
689 mtx_unlock(&t
->stid_lock
);
694 do_smt_write_rpl(struct toedev
*dev
, struct mbuf
*m
)
696 struct cpl_smt_write_rpl
*rpl
= cplhdr(m
);
698 if (rpl
->status
!= CPL_ERR_NONE
)
700 "Unexpected SMT_WRITE_RPL status %u for entry %u\n",
701 rpl
->status
, GET_TID(rpl
));
703 return CPL_RET_BUF_DONE
;
707 do_l2t_write_rpl(struct toedev
*dev
, struct mbuf
*m
)
709 struct cpl_l2t_write_rpl
*rpl
= cplhdr(m
);
711 if (rpl
->status
!= CPL_ERR_NONE
)
713 "Unexpected L2T_WRITE_RPL status %u for entry %u\n",
714 rpl
->status
, GET_TID(rpl
));
716 return CPL_RET_BUF_DONE
;
720 do_act_open_rpl(struct toedev
*dev
, struct mbuf
*m
)
722 struct cpl_act_open_rpl
*rpl
= cplhdr(m
);
723 unsigned int atid
= G_TID(ntohl(rpl
->atid
));
724 struct toe_tid_entry
*toe_tid
;
726 toe_tid
= lookup_atid(&(TOE_DATA(dev
))->tid_maps
, atid
);
727 if (toe_tid
->ctx
&& toe_tid
->client
&& toe_tid
->client
->handlers
&&
728 toe_tid
->client
->handlers
[CPL_ACT_OPEN_RPL
]) {
729 return toe_tid
->client
->handlers
[CPL_ACT_OPEN_RPL
] (dev
, m
,
732 log(LOG_ERR
, "%s: received clientless CPL command 0x%x\n",
733 dev
->name
, CPL_ACT_OPEN_RPL
);
734 return CPL_RET_BUF_DONE
| CPL_RET_BAD_MSG
;
739 do_stid_rpl(struct toedev
*dev
, struct mbuf
*m
)
741 union opcode_tid
*p
= cplhdr(m
);
742 unsigned int stid
= G_TID(ntohl(p
->opcode_tid
));
743 struct toe_tid_entry
*toe_tid
;
745 toe_tid
= lookup_stid(&(TOE_DATA(dev
))->tid_maps
, stid
);
746 if (toe_tid
->ctx
&& toe_tid
->client
->handlers
&&
747 toe_tid
->client
->handlers
[p
->opcode
]) {
748 return toe_tid
->client
->handlers
[p
->opcode
] (dev
, m
, toe_tid
->ctx
);
750 log(LOG_ERR
, "%s: received clientless CPL command 0x%x\n",
751 dev
->name
, p
->opcode
);
752 return CPL_RET_BUF_DONE
| CPL_RET_BAD_MSG
;
757 do_hwtid_rpl(struct toedev
*dev
, struct mbuf
*m
)
759 union opcode_tid
*p
= cplhdr(m
);
761 struct toe_tid_entry
*toe_tid
;
763 printf("do_hwtid_rpl m=%p\n", m
);
767 hwtid
= G_TID(ntohl(p
->opcode_tid
));
769 toe_tid
= lookup_tid(&(TOE_DATA(dev
))->tid_maps
, hwtid
);
770 if (toe_tid
->ctx
&& toe_tid
->client
->handlers
&&
771 toe_tid
->client
->handlers
[p
->opcode
]) {
772 return toe_tid
->client
->handlers
[p
->opcode
]
773 (dev
, m
, toe_tid
->ctx
);
775 log(LOG_ERR
, "%s: received clientless CPL command 0x%x\n",
776 dev
->name
, p
->opcode
);
777 return CPL_RET_BUF_DONE
| CPL_RET_BAD_MSG
;
782 do_cr(struct toedev
*dev
, struct mbuf
*m
)
784 struct cpl_pass_accept_req
*req
= cplhdr(m
);
785 unsigned int stid
= G_PASS_OPEN_TID(ntohl(req
->tos_tid
));
786 struct toe_tid_entry
*toe_tid
;
788 toe_tid
= lookup_stid(&(TOE_DATA(dev
))->tid_maps
, stid
);
789 if (toe_tid
->ctx
&& toe_tid
->client
->handlers
&&
790 toe_tid
->client
->handlers
[CPL_PASS_ACCEPT_REQ
]) {
791 return toe_tid
->client
->handlers
[CPL_PASS_ACCEPT_REQ
]
792 (dev
, m
, toe_tid
->ctx
);
794 log(LOG_ERR
, "%s: received clientless CPL command 0x%x\n",
795 dev
->name
, CPL_PASS_ACCEPT_REQ
);
796 return CPL_RET_BUF_DONE
| CPL_RET_BAD_MSG
;
801 do_abort_req_rss(struct toedev
*dev
, struct mbuf
*m
)
803 union opcode_tid
*p
= cplhdr(m
);
804 unsigned int hwtid
= G_TID(ntohl(p
->opcode_tid
));
805 struct toe_tid_entry
*toe_tid
;
807 toe_tid
= lookup_tid(&(TOE_DATA(dev
))->tid_maps
, hwtid
);
808 if (toe_tid
->ctx
&& toe_tid
->client
->handlers
&&
809 toe_tid
->client
->handlers
[p
->opcode
]) {
810 return toe_tid
->client
->handlers
[p
->opcode
]
811 (dev
, m
, toe_tid
->ctx
);
813 struct cpl_abort_req_rss
*req
= cplhdr(m
);
814 struct cpl_abort_rpl
*rpl
;
816 struct mbuf
*m2
= m_get(M_NOWAIT
, MT_DATA
);
818 log(LOG_NOTICE
, "do_abort_req_rss: couldn't get mbuf!\n");
822 m_set_priority(m2
, CPL_PRIORITY_DATA
);
824 __skb_put(skb
, sizeof(struct cpl_abort_rpl
));
828 htonl(V_WR_OP(FW_WROPCODE_OFLD_HOST_ABORT_CON_RPL
));
829 rpl
->wr
.wr_lo
= htonl(V_WR_TID(GET_TID(req
)));
831 htonl(MK_OPCODE_TID(CPL_ABORT_RPL
, GET_TID(req
)));
832 rpl
->cmd
= req
->status
;
833 cxgb_ofld_send(dev
, m2
);
835 return CPL_RET_BUF_DONE
;
840 do_act_establish(struct toedev
*dev
, struct mbuf
*m
)
842 struct cpl_act_establish
*req
= cplhdr(m
);
843 unsigned int atid
= G_PASS_OPEN_TID(ntohl(req
->tos_tid
));
844 struct toe_tid_entry
*toe_tid
;
846 toe_tid
= lookup_atid(&(TOE_DATA(dev
))->tid_maps
, atid
);
847 if (toe_tid
->ctx
&& toe_tid
->client
->handlers
&&
848 toe_tid
->client
->handlers
[CPL_ACT_ESTABLISH
]) {
849 return toe_tid
->client
->handlers
[CPL_ACT_ESTABLISH
]
850 (dev
, m
, toe_tid
->ctx
);
852 log(LOG_ERR
, "%s: received clientless CPL command 0x%x\n",
853 dev
->name
, CPL_PASS_ACCEPT_REQ
);
854 return CPL_RET_BUF_DONE
| CPL_RET_BAD_MSG
;
859 do_set_tcb_rpl(struct toedev
*dev
, struct mbuf
*m
)
861 struct cpl_set_tcb_rpl
*rpl
= cplhdr(m
);
863 if (rpl
->status
!= CPL_ERR_NONE
)
865 "Unexpected SET_TCB_RPL status %u for tid %u\n",
866 rpl
->status
, GET_TID(rpl
));
867 return CPL_RET_BUF_DONE
;
871 do_trace(struct toedev
*dev
, struct mbuf
*m
)
874 struct cpl_trace_pkt
*p
= cplhdr(m
);
877 skb
->protocol
= 0xffff;
878 skb
->dev
= dev
->lldev
;
879 skb_pull(skb
, sizeof(*p
));
880 skb
->mac
.raw
= mtod(m
, (char *));
881 netif_receive_skb(skb
);
887 do_term(struct toedev
*dev
, struct mbuf
*m
)
889 unsigned int hwtid
= ntohl(m_get_priority(m
)) >> 8 & 0xfffff;
890 unsigned int opcode
= G_OPCODE(ntohl(m
->m_pkthdr
.csum_data
));
891 struct toe_tid_entry
*toe_tid
;
893 toe_tid
= lookup_tid(&(TOE_DATA(dev
))->tid_maps
, hwtid
);
894 if (toe_tid
->ctx
&& toe_tid
->client
->handlers
&&
895 toe_tid
->client
->handlers
[opcode
]) {
896 return toe_tid
->client
->handlers
[opcode
](dev
, m
, toe_tid
->ctx
);
898 log(LOG_ERR
, "%s: received clientless CPL command 0x%x\n",
900 return CPL_RET_BUF_DONE
| CPL_RET_BAD_MSG
;
906 #include <linux/config.h>
907 #include <linux/kallsyms.h>
908 #include <linux/kprobes.h>
911 static int (*orig_arp_constructor
)(struct ifnet
*);
914 neigh_suspect(struct ifnet
*neigh
)
918 neigh
->output
= neigh
->ops
->output
;
920 for (hh
= neigh
->hh
; hh
; hh
= hh
->hh_next
)
921 hh
->hh_output
= neigh
->ops
->output
;
925 neigh_connect(struct ifnet
*neigh
)
929 neigh
->output
= neigh
->ops
->connected_output
;
931 for (hh
= neigh
->hh
; hh
; hh
= hh
->hh_next
)
932 hh
->hh_output
= neigh
->ops
->hh_output
;
936 neigh_max_probes(const struct neighbour
*n
)
938 const struct neigh_parms
*p
= n
->parms
;
939 return (n
->nud_state
& NUD_PROBE
?
941 p
->ucast_probes
+ p
->app_probes
+ p
->mcast_probes
);
945 neigh_timer_handler_offload(unsigned long arg
)
947 unsigned long now
, next
;
948 struct neighbour
*neigh
= (struct neighbour
*)arg
;
952 write_lock(&neigh
->lock
);
954 state
= neigh
->nud_state
;
958 if (!(state
& NUD_IN_TIMER
)) {
960 log(LOG_WARNING
, "neigh: timer & !nud_in_timer\n");
965 if (state
& NUD_REACHABLE
) {
966 if (time_before_eq(now
,
968 neigh
->parms
->reachable_time
)) {
969 next
= neigh
->confirmed
+ neigh
->parms
->reachable_time
;
970 } else if (time_before_eq(now
,
972 neigh
->parms
->delay_probe_time
)) {
973 neigh
->nud_state
= NUD_DELAY
;
974 neigh
->updated
= jiffies
;
975 neigh_suspect(neigh
);
976 next
= now
+ neigh
->parms
->delay_probe_time
;
978 neigh
->nud_state
= NUD_STALE
;
979 neigh
->updated
= jiffies
;
980 neigh_suspect(neigh
);
981 cxgb_neigh_update(neigh
);
983 } else if (state
& NUD_DELAY
) {
984 if (time_before_eq(now
,
986 neigh
->parms
->delay_probe_time
)) {
987 neigh
->nud_state
= NUD_REACHABLE
;
988 neigh
->updated
= jiffies
;
989 neigh_connect(neigh
);
990 cxgb_neigh_update(neigh
);
991 next
= neigh
->confirmed
+ neigh
->parms
->reachable_time
;
993 neigh
->nud_state
= NUD_PROBE
;
994 neigh
->updated
= jiffies
;
995 atomic_set_int(&neigh
->probes
, 0);
996 next
= now
+ neigh
->parms
->retrans_time
;
999 /* NUD_PROBE|NUD_INCOMPLETE */
1000 next
= now
+ neigh
->parms
->retrans_time
;
1003 * Needed for read of probes
1006 if ((neigh
->nud_state
& (NUD_INCOMPLETE
| NUD_PROBE
)) &&
1007 neigh
->probes
>= neigh_max_probes(neigh
)) {
1010 neigh
->nud_state
= NUD_FAILED
;
1011 neigh
->updated
= jiffies
;
1013 cxgb_neigh_update(neigh
);
1014 NEIGH_CACHE_STAT_INC(neigh
->tbl
, res_failed
);
1016 /* It is very thin place. report_unreachable is very
1017 complicated routine. Particularly, it can hit the same
1019 So that, we try to be accurate and avoid dead loop. --ANK
1021 while (neigh
->nud_state
== NUD_FAILED
&&
1022 (skb
= __skb_dequeue(&neigh
->arp_queue
)) != NULL
) {
1023 write_unlock(&neigh
->lock
);
1024 neigh
->ops
->error_report(neigh
, skb
);
1025 write_lock(&neigh
->lock
);
1027 skb_queue_purge(&neigh
->arp_queue
);
1030 if (neigh
->nud_state
& NUD_IN_TIMER
) {
1031 if (time_before(next
, jiffies
+ HZ
/2))
1032 next
= jiffies
+ HZ
/2;
1033 if (!mod_timer(&neigh
->timer
, next
))
1036 if (neigh
->nud_state
& (NUD_INCOMPLETE
| NUD_PROBE
)) {
1037 struct mbuf
*m
= skb_peek(&neigh
->arp_queue
);
1039 write_unlock(&neigh
->lock
);
1040 neigh
->ops
->solicit(neigh
, skb
);
1041 atomic_add_int(&neigh
->probes
, 1);
1046 write_unlock(&neigh
->lock
);
1050 if (notify
&& neigh
->parms
->app_probes
)
1051 neigh_app_notify(neigh
);
1053 neigh_release(neigh
);
1057 arp_constructor_offload(struct neighbour
*neigh
)
1059 if (neigh
->ifp
&& is_offloading(neigh
->ifp
))
1060 neigh
->timer
.function
= neigh_timer_handler_offload
;
1061 return orig_arp_constructor(neigh
);
1065 * This must match exactly the signature of neigh_update for jprobes to work.
1066 * It runs from a trap handler with interrupts off so don't disable BH.
1069 neigh_update_offload(struct neighbour
*neigh
, const u8
*lladdr
,
1072 write_lock(&neigh
->lock
);
1073 cxgb_neigh_update(neigh
);
1074 write_unlock(&neigh
->lock
);
1080 static struct jprobe neigh_update_jprobe
= {
1081 .entry
= (kprobe_opcode_t
*) neigh_update_offload
,
1082 .kp
.addr
= (kprobe_opcode_t
*) neigh_update
1085 #ifdef MODULE_SUPPORT
1087 prepare_arp_with_t3core(void)
1091 err
= register_jprobe(&neigh_update_jprobe
);
1093 log(LOG_ERR
, "Could not install neigh_update jprobe, "
1098 orig_arp_constructor
= arp_tbl
.constructor
;
1099 arp_tbl
.constructor
= arp_constructor_offload
;
1105 restore_arp_sans_t3core(void)
1107 arp_tbl
.constructor
= orig_arp_constructor
;
1108 unregister_jprobe(&neigh_update_jprobe
);
1111 #else /* Module suport */
1113 prepare_arp_with_t3core(void)
1119 restore_arp_sans_t3core(void)
1124 * Process a received packet with an unknown/unexpected CPL opcode.
1127 do_bad_cpl(struct toedev
*dev
, struct mbuf
*m
)
1129 log(LOG_ERR
, "%s: received bad CPL command 0x%x\n", dev
->name
,
1130 *mtod(m
, uint32_t *));
1131 return (CPL_RET_BUF_DONE
| CPL_RET_BAD_MSG
);
1135 * Handlers for each CPL opcode
1137 static cpl_handler_func cpl_handlers
[NUM_CPL_CMDS
];
1140 * Add a new handler to the CPL dispatch table. A NULL handler may be supplied
1141 * to unregister an existing handler.
1144 t3_register_cpl_handler(unsigned int opcode
, cpl_handler_func h
)
1146 if (opcode
< NUM_CPL_CMDS
)
1147 cpl_handlers
[opcode
] = h
? h
: do_bad_cpl
;
1149 log(LOG_ERR
, "T3C: handler registration for "
1150 "opcode %x failed\n", opcode
);
1154 * TOEDEV's receive method.
1157 process_rx(struct toedev
*dev
, struct mbuf
**m
, int n
)
1160 struct mbuf
*m0
= *m
++;
1161 unsigned int opcode
= G_OPCODE(ntohl(m0
->m_pkthdr
.csum_data
));
1162 int ret
= cpl_handlers
[opcode
] (dev
, m0
);
1165 if (ret
& CPL_RET_UNKNOWN_TID
) {
1166 union opcode_tid
*p
= cplhdr(m0
);
1168 log(LOG_ERR
, "%s: CPL message (opcode %u) had "
1169 "unknown TID %u\n", dev
->name
, opcode
,
1170 G_TID(ntohl(p
->opcode_tid
)));
1173 if (ret
& CPL_RET_BUF_DONE
)
1180 * Sends an sk_buff to a T3C driver after dealing with any active network taps.
1183 cxgb_ofld_send(struct toedev
*dev
, struct mbuf
*m
)
1188 r
= dev
->send(dev
, m
);
1195 * cxgb_ofld_recv - process n received offload packets
1196 * @dev: the offload device
1197 * @m: an array of offload packets
1198 * @n: the number of offload packets
1200 * Process an array of ingress offload packets. Each packet is forwarded
1201 * to any active network taps and then passed to the offload device's receive
1202 * method. We optimize passing packets to the receive method by passing
1203 * it the whole array at once except when there are active taps.
1206 cxgb_ofld_recv(struct toedev
*dev
, struct mbuf
**m
, int n
)
1209 #if defined(CONFIG_CHELSIO_T3)
1210 if (likely(!netdev_nit
))
1211 return dev
->recv(dev
, skb
, n
);
1213 for ( ; n
; n
--, skb
++) {
1214 skb
[0]->dev
= dev
->lldev
;
1215 dev_queue_xmit_nit(skb
[0], dev
->lldev
);
1217 dev
->recv(dev
, skb
, 1);
1221 return dev
->recv(dev
, m
, n
);
1226 cxgb_neigh_update(struct rtentry
*rt
)
1229 if (is_offloading(rt
->rt_ifp
)) {
1230 struct toedev
*tdev
= TOEDEV(rt
->rt_ifp
);
1233 t3_l2t_update(tdev
, rt
);
1238 set_l2t_ix(struct toedev
*tdev
, u32 tid
, struct l2t_entry
*e
)
1241 struct cpl_set_tcb_field
*req
;
1243 m
= m_gethdr(M_NOWAIT
, MT_DATA
);
1245 log(LOG_ERR
, "%s: cannot allocate mbuf!\n", __func__
);
1249 m_set_priority(m
, CPL_PRIORITY_CONTROL
);
1250 req
= mtod(m
, struct cpl_set_tcb_field
*);
1251 req
->wr
.wr_hi
= htonl(V_WR_OP(FW_WROPCODE_FORWARD
));
1252 OPCODE_TID(req
) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD
, tid
));
1255 req
->word
= htons(W_TCB_L2T_IX
);
1256 req
->mask
= htobe64(V_TCB_L2T_IX(M_TCB_L2T_IX
));
1257 req
->val
= htobe64(V_TCB_L2T_IX(e
->idx
));
1258 tdev
->send(tdev
, m
);
1262 cxgb_redirect(struct rtentry
*old
, struct rtentry
*new)
1264 struct ifnet
*olddev
, *newdev
;
1265 struct tid_info
*ti
;
1266 struct toedev
*tdev
;
1269 struct l2t_entry
*e
;
1270 struct toe_tid_entry
*te
;
1272 olddev
= old
->rt_ifp
;
1273 newdev
= new->rt_ifp
;
1274 if (!is_offloading(olddev
))
1276 if (!is_offloading(newdev
)) {
1277 log(LOG_WARNING
, "%s: Redirect to non-offload"
1278 "device ignored.\n", __func__
);
1281 tdev
= TOEDEV(olddev
);
1283 if (tdev
!= TOEDEV(newdev
)) {
1284 log(LOG_WARNING
, "%s: Redirect to different "
1285 "offload device ignored.\n", __func__
);
1289 /* Add new L2T entry */
1290 e
= t3_l2t_get(tdev
, new, ((struct port_info
*)new->rt_ifp
->if_softc
)->port_id
);
1292 log(LOG_ERR
, "%s: couldn't allocate new l2t entry!\n",
1297 /* Walk tid table and notify clients of dst change. */
1298 ti
= &(TOE_DATA(tdev
))->tid_maps
;
1299 for (tid
=0; tid
< ti
->ntids
; tid
++) {
1300 te
= lookup_tid(ti
, tid
);
1302 if (te
->ctx
&& te
->client
&& te
->client
->redirect
) {
1303 update_tcb
= te
->client
->redirect(te
->ctx
, old
, new,
1306 l2t_hold(L2DATA(tdev
), e
);
1307 set_l2t_ix(tdev
, tid
, e
);
1311 l2t_release(L2DATA(tdev
), e
);
1315 * Allocate a chunk of memory using kmalloc or, if that fails, vmalloc.
1316 * The allocated memory is cleared.
1319 cxgb_alloc_mem(unsigned long size
)
1321 return malloc(size
, M_DEVBUF
, M_ZERO
);
1325 * Free memory allocated through t3_alloc_mem().
1328 cxgb_free_mem(void *addr
)
1330 free(addr
, M_DEVBUF
);
1335 * Allocate and initialize the TID tables. Returns 0 on success.
1338 init_tid_tabs(struct tid_info
*t
, unsigned int ntids
,
1339 unsigned int natids
, unsigned int nstids
,
1340 unsigned int atid_base
, unsigned int stid_base
)
1342 unsigned long size
= ntids
* sizeof(*t
->tid_tab
) +
1343 natids
* sizeof(*t
->atid_tab
) + nstids
* sizeof(*t
->stid_tab
);
1345 t
->tid_tab
= cxgb_alloc_mem(size
);
1349 t
->stid_tab
= (union listen_entry
*)&t
->tid_tab
[ntids
];
1350 t
->atid_tab
= (union active_open_entry
*)&t
->stid_tab
[nstids
];
1353 t
->stid_base
= stid_base
;
1356 t
->atid_base
= atid_base
;
1358 t
->stids_in_use
= t
->atids_in_use
= 0;
1359 atomic_set_int(&t
->tids_in_use
, 0);
1360 mtx_init(&t
->stid_lock
, "stid", NULL
, MTX_DEF
);
1361 mtx_init(&t
->atid_lock
, "atid", NULL
, MTX_DEF
);
1364 * Setup the free lists for stid_tab and atid_tab.
1368 t
->stid_tab
[nstids
- 1].next
= &t
->stid_tab
[nstids
];
1369 t
->sfree
= t
->stid_tab
;
1373 t
->atid_tab
[natids
- 1].next
= &t
->atid_tab
[natids
];
1374 t
->afree
= t
->atid_tab
;
1380 free_tid_maps(struct tid_info
*t
)
1382 cxgb_free_mem(t
->tid_tab
);
1386 add_adapter(adapter_t
*adap
)
1388 rw_wlock(&adapter_list_lock
);
1389 TAILQ_INSERT_TAIL(&adapter_list
, adap
, adapter_entry
);
1390 rw_wunlock(&adapter_list_lock
);
1394 remove_adapter(adapter_t
*adap
)
1396 rw_wlock(&adapter_list_lock
);
1397 TAILQ_REMOVE(&adapter_list
, adap
, adapter_entry
);
1398 rw_wunlock(&adapter_list_lock
);
1404 #define t3_free_l2t(...)
1407 cxgb_offload_activate(struct adapter
*adapter
)
1409 struct toedev
*dev
= &adapter
->tdev
;
1412 struct tid_range stid_range
, tid_range
;
1413 struct mtutab mtutab
;
1414 unsigned int l2t_capacity
;
1416 t
= malloc(sizeof(*t
), M_DEVBUF
, M_WAITOK
);
1421 if (dev
->ctl(dev
, GET_TX_MAX_CHUNK
, &t
->tx_max_chunk
) < 0 ||
1422 dev
->ctl(dev
, GET_MAX_OUTSTANDING_WR
, &t
->max_wrs
) < 0 ||
1423 dev
->ctl(dev
, GET_L2T_CAPACITY
, &l2t_capacity
) < 0 ||
1424 dev
->ctl(dev
, GET_MTUS
, &mtutab
) < 0 ||
1425 dev
->ctl(dev
, GET_TID_RANGE
, &tid_range
) < 0 ||
1426 dev
->ctl(dev
, GET_STID_RANGE
, &stid_range
) < 0)
1430 L2DATA(dev
) = t3_init_l2t(l2t_capacity
);
1434 natids
= min(tid_range
.num
/ 2, MAX_ATIDS
);
1435 err
= init_tid_tabs(&t
->tid_maps
, tid_range
.num
, natids
,
1436 stid_range
.num
, ATID_BASE
, stid_range
.base
);
1440 t
->mtus
= mtutab
.mtus
;
1441 t
->nmtus
= mtutab
.size
;
1444 TASK_INIT(&t
->tid_release_task
, 0 /* XXX? */, t3_process_tid_release_list
, dev
);
1447 t
->tid_release_task
.name
= "t3_process_tid_release_list";
1448 t
->tid_release_task
.func
= t3_process_tid_release_list
;
1449 t
->tid_release_task
.context
= adapter
;
1450 kthread_create(PRI_NONE
, 0, NULL
, cxgb_make_task
, &t
->tid_release_task
, NULL
, "cxgb_make_task");
1452 mtx_init(&t
->tid_release_lock
, "tid release", NULL
, MTX_DEF
);
1456 dev
->recv
= process_rx
;
1457 dev
->neigh_update
= t3_l2t_update
;
1459 offload_proc_dev_setup(dev
);
1461 /* Register netevent handler once */
1462 if (TAILQ_EMPTY(&adapter_list
)) {
1463 #if defined(CONFIG_CHELSIO_T3_MODULE)
1464 if (prepare_arp_with_t3core())
1465 log(LOG_ERR
, "Unable to set offload capabilities\n");
1468 add_adapter(adapter
);
1472 t3_free_l2t(L2DATA(dev
));
1481 cxgb_offload_deactivate(struct adapter
*adapter
)
1483 struct toedev
*tdev
= &adapter
->tdev
;
1484 struct toe_data
*t
= TOE_DATA(tdev
);
1486 remove_adapter(adapter
);
1487 if (TAILQ_EMPTY(&adapter_list
)) {
1488 #if defined(CONFIG_CHELSIO_T3_MODULE)
1489 restore_arp_sans_t3core();
1492 free_tid_maps(&t
->tid_maps
);
1493 TOE_DATA(tdev
) = NULL
;
1494 t3_free_l2t(L2DATA(tdev
));
1495 L2DATA(tdev
) = NULL
;
1501 register_tdev(struct toedev
*tdev
)
1505 mtx_lock(&cxgb_db_lock
);
1506 snprintf(tdev
->name
, sizeof(tdev
->name
), "ofld_dev%d", unit
++);
1507 TAILQ_INSERT_TAIL(&ofld_dev_list
, tdev
, ofld_entry
);
1508 mtx_unlock(&cxgb_db_lock
);
1512 unregister_tdev(struct toedev
*tdev
)
1514 mtx_lock(&cxgb_db_lock
);
1515 TAILQ_REMOVE(&ofld_dev_list
, tdev
, ofld_entry
);
1516 mtx_unlock(&cxgb_db_lock
);
1520 cxgb_adapter_ofld(struct adapter
*adapter
)
1522 struct toedev
*tdev
= &adapter
->tdev
;
1524 cxgb_set_dummy_ops(tdev
);
1525 tdev
->send
= t3_offload_tx
;
1526 tdev
->ctl
= cxgb_offload_ctl
;
1527 tdev
->type
= adapter
->params
.rev
== 0 ?
1530 register_tdev(tdev
);
1532 offload_proc_dev_init(tdev
);
1537 cxgb_adapter_unofld(struct adapter
*adapter
)
1539 struct toedev
*tdev
= &adapter
->tdev
;
1541 offload_proc_dev_cleanup(tdev
);
1542 offload_proc_dev_exit(tdev
);
1545 tdev
->neigh_update
= NULL
;
1547 unregister_tdev(tdev
);
1551 cxgb_offload_init(void)
1560 mtx_init(&cxgb_db_lock
, "ofld db", NULL
, MTX_DEF
);
1562 rw_init(&adapter_list_lock
, "ofld adap list");
1565 rw_init(&adapter_list_lock
);
1567 TAILQ_INIT(&client_list
);
1568 TAILQ_INIT(&ofld_dev_list
);
1569 TAILQ_INIT(&adapter_list
);
1571 for (i
= 0; i
< NUM_CPL_CMDS
; ++i
)
1572 cpl_handlers
[i
] = do_bad_cpl
;
1574 t3_register_cpl_handler(CPL_SMT_WRITE_RPL
, do_smt_write_rpl
);
1575 t3_register_cpl_handler(CPL_L2T_WRITE_RPL
, do_l2t_write_rpl
);
1576 t3_register_cpl_handler(CPL_PASS_OPEN_RPL
, do_stid_rpl
);
1577 t3_register_cpl_handler(CPL_CLOSE_LISTSRV_RPL
, do_stid_rpl
);
1578 t3_register_cpl_handler(CPL_PASS_ACCEPT_REQ
, do_cr
);
1579 t3_register_cpl_handler(CPL_PASS_ESTABLISH
, do_hwtid_rpl
);
1580 t3_register_cpl_handler(CPL_ABORT_RPL_RSS
, do_hwtid_rpl
);
1581 t3_register_cpl_handler(CPL_ABORT_RPL
, do_hwtid_rpl
);
1582 t3_register_cpl_handler(CPL_RX_URG_NOTIFY
, do_hwtid_rpl
);
1583 t3_register_cpl_handler(CPL_RX_DATA
, do_hwtid_rpl
);
1584 t3_register_cpl_handler(CPL_TX_DATA_ACK
, do_hwtid_rpl
);
1585 t3_register_cpl_handler(CPL_TX_DMA_ACK
, do_hwtid_rpl
);
1586 t3_register_cpl_handler(CPL_ACT_OPEN_RPL
, do_act_open_rpl
);
1587 t3_register_cpl_handler(CPL_PEER_CLOSE
, do_hwtid_rpl
);
1588 t3_register_cpl_handler(CPL_CLOSE_CON_RPL
, do_hwtid_rpl
);
1589 t3_register_cpl_handler(CPL_ABORT_REQ_RSS
, do_abort_req_rss
);
1590 t3_register_cpl_handler(CPL_ACT_ESTABLISH
, do_act_establish
);
1591 t3_register_cpl_handler(CPL_SET_TCB_RPL
, do_set_tcb_rpl
);
1592 t3_register_cpl_handler(CPL_RDMA_TERMINATE
, do_term
);
1593 t3_register_cpl_handler(CPL_RDMA_EC_STATUS
, do_hwtid_rpl
);
1594 t3_register_cpl_handler(CPL_TRACE_PKT
, do_trace
);
1595 t3_register_cpl_handler(CPL_RX_DATA_DDP
, do_hwtid_rpl
);
1596 t3_register_cpl_handler(CPL_RX_DDP_COMPLETE
, do_hwtid_rpl
);
1597 t3_register_cpl_handler(CPL_ISCSI_HDR
, do_hwtid_rpl
);
1599 if (offload_proc_init())
1600 log(LOG_WARNING
, "Unable to create /proc/net/cxgb3 dir\n");
1605 cxgb_offload_exit(void)
1607 static int deinited
= 0;
1613 mtx_destroy(&cxgb_db_lock
);
1614 rw_destroy(&adapter_list_lock
);
1616 offload_proc_cleanup();
1622 offload_info_read_proc(char *buf
, char **start
, off_t offset
,
1623 int length
, int *eof
, void *data
)
1625 struct toe_data
*d
= data
;
1626 struct tid_info
*t
= &d
->tid_maps
;
1629 len
= sprintf(buf
, "TID range: 0..%d, in use: %u\n"
1630 "STID range: %d..%d, in use: %u\n"
1631 "ATID range: %d..%d, in use: %u\n"
1633 t
->ntids
- 1, atomic_read(&t
->tids_in_use
), t
->stid_base
,
1634 t
->stid_base
+ t
->nstids
- 1, t
->stids_in_use
,
1635 t
->atid_base
, t
->atid_base
+ t
->natids
- 1,
1636 t
->atids_in_use
, d
->tx_max_chunk
);
1644 offload_info_proc_setup(struct proc_dir_entry
*dir
,
1647 struct proc_dir_entry
*p
;
1652 p
= create_proc_read_entry("info", 0, dir
, offload_info_read_proc
, d
);
1656 p
->owner
= THIS_MODULE
;
1662 offload_devices_read_proc(char *buf
, char **start
, off_t offset
,
1663 int length
, int *eof
, void *data
)
1667 struct net_device
*ndev
;
1669 len
= sprintf(buf
, "Device Interfaces\n");
1671 mtx_lock(&cxgb_db_lock
);
1672 TAILQ_FOREACH(dev
, &ofld_dev_list
, ofld_entry
) {
1673 len
+= sprintf(buf
+ len
, "%-16s", dev
->name
);
1674 read_lock(&dev_base_lock
);
1675 for (ndev
= dev_base
; ndev
; ndev
= ndev
->next
) {
1676 if (TOEDEV(ndev
) == dev
)
1677 len
+= sprintf(buf
+ len
, " %s", ndev
->name
);
1679 read_unlock(&dev_base_lock
);
1680 len
+= sprintf(buf
+ len
, "\n");
1684 mtx_unlock(&cxgb_db_lock
);