2 * Copyright (c) 2006-2008 Chelsio, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/list.h>
34 #include <linux/slab.h>
35 #include <net/neighbour.h>
36 #include <linux/notifier.h>
37 #include <asm/atomic.h>
38 #include <linux/proc_fs.h>
39 #include <linux/if_vlan.h>
40 #include <net/netevent.h>
41 #include <linux/highmem.h>
42 #include <linux/vmalloc.h>
46 #include "cxgb3_ioctl.h"
47 #include "cxgb3_ctl_defs.h"
48 #include "cxgb3_defs.h"
50 #include "firmware_exports.h"
51 #include "cxgb3_offload.h"
53 static LIST_HEAD(client_list
);
54 static LIST_HEAD(ofld_dev_list
);
55 static DEFINE_MUTEX(cxgb3_db_lock
);
57 static DEFINE_RWLOCK(adapter_list_lock
);
58 static LIST_HEAD(adapter_list
);
60 static const unsigned int MAX_ATIDS
= 64 * 1024;
61 static const unsigned int ATID_BASE
= 0x10000;
63 static inline int offload_activated(struct t3cdev
*tdev
)
65 const struct adapter
*adapter
= tdev2adap(tdev
);
67 return (test_bit(OFFLOAD_DEVMAP_BIT
, &adapter
->open_device_map
));
71 * cxgb3_register_client - register an offload client
74 * Add the client to the client list,
75 * and call backs the client for each activated offload device
77 void cxgb3_register_client(struct cxgb3_client
*client
)
81 mutex_lock(&cxgb3_db_lock
);
82 list_add_tail(&client
->client_list
, &client_list
);
85 list_for_each_entry(tdev
, &ofld_dev_list
, ofld_dev_list
) {
86 if (offload_activated(tdev
))
90 mutex_unlock(&cxgb3_db_lock
);
93 EXPORT_SYMBOL(cxgb3_register_client
);
96 * cxgb3_unregister_client - unregister an offload client
99 * Remove the client to the client list,
100 * and call backs the client for each activated offload device.
102 void cxgb3_unregister_client(struct cxgb3_client
*client
)
106 mutex_lock(&cxgb3_db_lock
);
107 list_del(&client
->client_list
);
109 if (client
->remove
) {
110 list_for_each_entry(tdev
, &ofld_dev_list
, ofld_dev_list
) {
111 if (offload_activated(tdev
))
112 client
->remove(tdev
);
115 mutex_unlock(&cxgb3_db_lock
);
118 EXPORT_SYMBOL(cxgb3_unregister_client
);
121 * cxgb3_add_clients - activate registered clients for an offload device
122 * @tdev: the offload device
124 * Call backs all registered clients once a offload device is activated
126 void cxgb3_add_clients(struct t3cdev
*tdev
)
128 struct cxgb3_client
*client
;
130 mutex_lock(&cxgb3_db_lock
);
131 list_for_each_entry(client
, &client_list
, client_list
) {
135 mutex_unlock(&cxgb3_db_lock
);
139 * cxgb3_remove_clients - deactivates registered clients
140 * for an offload device
141 * @tdev: the offload device
143 * Call backs all registered clients once a offload device is deactivated
145 void cxgb3_remove_clients(struct t3cdev
*tdev
)
147 struct cxgb3_client
*client
;
149 mutex_lock(&cxgb3_db_lock
);
150 list_for_each_entry(client
, &client_list
, client_list
) {
152 client
->remove(tdev
);
154 mutex_unlock(&cxgb3_db_lock
);
157 void cxgb3_event_notify(struct t3cdev
*tdev
, u32 event
, u32 port
)
159 struct cxgb3_client
*client
;
161 mutex_lock(&cxgb3_db_lock
);
162 list_for_each_entry(client
, &client_list
, client_list
) {
163 if (client
->event_handler
)
164 client
->event_handler(tdev
, event
, port
);
166 mutex_unlock(&cxgb3_db_lock
);
169 static struct net_device
*get_iff_from_mac(struct adapter
*adapter
,
170 const unsigned char *mac
,
175 for_each_port(adapter
, i
) {
176 struct vlan_group
*grp
;
177 struct net_device
*dev
= adapter
->port
[i
];
178 const struct port_info
*p
= netdev_priv(dev
);
180 if (!memcmp(dev
->dev_addr
, mac
, ETH_ALEN
)) {
181 if (vlan
&& vlan
!= VLAN_VID_MASK
) {
185 dev
= vlan_group_get_device(grp
, vlan
);
195 static int cxgb_ulp_iscsi_ctl(struct adapter
*adapter
, unsigned int req
,
200 unsigned int val
= 0;
201 struct ulp_iscsi_info
*uiip
= data
;
204 case ULP_ISCSI_GET_PARAMS
:
205 uiip
->pdev
= adapter
->pdev
;
206 uiip
->llimit
= t3_read_reg(adapter
, A_ULPRX_ISCSI_LLIMIT
);
207 uiip
->ulimit
= t3_read_reg(adapter
, A_ULPRX_ISCSI_ULIMIT
);
208 uiip
->tagmask
= t3_read_reg(adapter
, A_ULPRX_ISCSI_TAGMASK
);
210 val
= t3_read_reg(adapter
, A_ULPRX_ISCSI_PSZ
);
211 for (i
= 0; i
< 4; i
++, val
>>= 8)
212 uiip
->pgsz_factor
[i
] = val
& 0xFF;
214 val
= t3_read_reg(adapter
, A_TP_PARA_REG7
);
216 uiip
->max_rxsz
= min((val
>> S_PMMAXXFERLEN0
)&M_PMMAXXFERLEN0
,
217 (val
>> S_PMMAXXFERLEN1
)&M_PMMAXXFERLEN1
);
219 * On tx, the iscsi pdu has to be <= tx page size and has to
220 * fit into the Tx PM FIFO.
222 val
= min(adapter
->params
.tp
.tx_pg_size
,
223 t3_read_reg(adapter
, A_PM1_TX_CFG
) >> 17);
224 uiip
->max_txsz
= min(val
, uiip
->max_txsz
);
226 /* set MaxRxData to 16224 */
227 val
= t3_read_reg(adapter
, A_TP_PARA_REG2
);
228 if ((val
>> S_MAXRXDATA
) != 0x3f60) {
229 val
&= (M_RXCOALESCESIZE
<< S_RXCOALESCESIZE
);
230 val
|= V_MAXRXDATA(0x3f60);
232 "%s, iscsi set MaxRxData to 16224 (0x%x).\n",
234 t3_write_reg(adapter
, A_TP_PARA_REG2
, val
);
238 * on rx, the iscsi pdu has to be < rx page size and the
239 * the max rx data length programmed in TP
241 val
= min(adapter
->params
.tp
.rx_pg_size
,
242 ((t3_read_reg(adapter
, A_TP_PARA_REG2
)) >>
243 S_MAXRXDATA
) & M_MAXRXDATA
);
244 uiip
->max_rxsz
= min(val
, uiip
->max_rxsz
);
246 case ULP_ISCSI_SET_PARAMS
:
247 t3_write_reg(adapter
, A_ULPRX_ISCSI_TAGMASK
, uiip
->tagmask
);
248 /* program the ddp page sizes */
249 for (i
= 0; i
< 4; i
++)
250 val
|= (uiip
->pgsz_factor
[i
] & 0xF) << (8 * i
);
251 if (val
&& (val
!= t3_read_reg(adapter
, A_ULPRX_ISCSI_PSZ
))) {
253 "%s, setting iscsi pgsz 0x%x, %u,%u,%u,%u.\n",
254 adapter
->name
, val
, uiip
->pgsz_factor
[0],
255 uiip
->pgsz_factor
[1], uiip
->pgsz_factor
[2],
256 uiip
->pgsz_factor
[3]);
257 t3_write_reg(adapter
, A_ULPRX_ISCSI_PSZ
, val
);
266 /* Response queue used for RDMA events. */
267 #define ASYNC_NOTIF_RSPQ 0
269 static int cxgb_rdma_ctl(struct adapter
*adapter
, unsigned int req
, void *data
)
274 case RDMA_GET_PARAMS
: {
275 struct rdma_info
*rdma
= data
;
276 struct pci_dev
*pdev
= adapter
->pdev
;
278 rdma
->udbell_physbase
= pci_resource_start(pdev
, 2);
279 rdma
->udbell_len
= pci_resource_len(pdev
, 2);
281 t3_read_reg(adapter
, A_ULPTX_TPT_LLIMIT
);
282 rdma
->tpt_top
= t3_read_reg(adapter
, A_ULPTX_TPT_ULIMIT
);
284 t3_read_reg(adapter
, A_ULPTX_PBL_LLIMIT
);
285 rdma
->pbl_top
= t3_read_reg(adapter
, A_ULPTX_PBL_ULIMIT
);
286 rdma
->rqt_base
= t3_read_reg(adapter
, A_ULPRX_RQ_LLIMIT
);
287 rdma
->rqt_top
= t3_read_reg(adapter
, A_ULPRX_RQ_ULIMIT
);
288 rdma
->kdb_addr
= adapter
->regs
+ A_SG_KDOORBELL
;
294 struct rdma_cq_op
*rdma
= data
;
296 /* may be called in any context */
297 spin_lock_irqsave(&adapter
->sge
.reg_lock
, flags
);
298 ret
= t3_sge_cqcntxt_op(adapter
, rdma
->id
, rdma
->op
,
300 spin_unlock_irqrestore(&adapter
->sge
.reg_lock
, flags
);
304 struct ch_mem_range
*t
= data
;
307 if ((t
->addr
& 7) || (t
->len
& 7))
309 if (t
->mem_id
== MEM_CM
)
311 else if (t
->mem_id
== MEM_PMRX
)
312 mem
= &adapter
->pmrx
;
313 else if (t
->mem_id
== MEM_PMTX
)
314 mem
= &adapter
->pmtx
;
319 t3_mc7_bd_read(mem
, t
->addr
/ 8, t
->len
/ 8,
326 struct rdma_cq_setup
*rdma
= data
;
328 spin_lock_irq(&adapter
->sge
.reg_lock
);
330 t3_sge_init_cqcntxt(adapter
, rdma
->id
,
331 rdma
->base_addr
, rdma
->size
,
333 rdma
->ovfl_mode
, rdma
->credits
,
335 spin_unlock_irq(&adapter
->sge
.reg_lock
);
338 case RDMA_CQ_DISABLE
:
339 spin_lock_irq(&adapter
->sge
.reg_lock
);
340 ret
= t3_sge_disable_cqcntxt(adapter
, *(unsigned int *)data
);
341 spin_unlock_irq(&adapter
->sge
.reg_lock
);
343 case RDMA_CTRL_QP_SETUP
:{
344 struct rdma_ctrlqp_setup
*rdma
= data
;
346 spin_lock_irq(&adapter
->sge
.reg_lock
);
347 ret
= t3_sge_init_ecntxt(adapter
, FW_RI_SGEEC_START
, 0,
350 rdma
->base_addr
, rdma
->size
,
351 FW_RI_TID_START
, 1, 0);
352 spin_unlock_irq(&adapter
->sge
.reg_lock
);
356 spin_lock(&adapter
->stats_lock
);
357 t3_tp_get_mib_stats(adapter
, (struct tp_mib_stats
*)data
);
358 spin_unlock(&adapter
->stats_lock
);
367 static int cxgb_offload_ctl(struct t3cdev
*tdev
, unsigned int req
, void *data
)
369 struct adapter
*adapter
= tdev2adap(tdev
);
370 struct tid_range
*tid
;
372 struct iff_mac
*iffmacp
;
373 struct ddp_params
*ddpp
;
374 struct adap_ports
*ports
;
375 struct ofld_page_info
*rx_page_info
;
376 struct tp_params
*tp
= &adapter
->params
.tp
;
380 case GET_MAX_OUTSTANDING_WR
:
381 *(unsigned int *)data
= FW_WR_NUM
;
384 *(unsigned int *)data
= WR_FLITS
;
386 case GET_TX_MAX_CHUNK
:
387 *(unsigned int *)data
= 1 << 20; /* 1MB */
391 tid
->num
= t3_mc5_size(&adapter
->mc5
) -
392 adapter
->params
.mc5
.nroutes
-
393 adapter
->params
.mc5
.nfilters
- adapter
->params
.mc5
.nservers
;
398 tid
->num
= adapter
->params
.mc5
.nservers
;
399 tid
->base
= t3_mc5_size(&adapter
->mc5
) - tid
->num
-
400 adapter
->params
.mc5
.nfilters
- adapter
->params
.mc5
.nroutes
;
402 case GET_L2T_CAPACITY
:
403 *(unsigned int *)data
= 2048;
408 mtup
->mtus
= adapter
->params
.mtus
;
410 case GET_IFF_FROM_MAC
:
412 iffmacp
->dev
= get_iff_from_mac(adapter
, iffmacp
->mac_addr
,
418 ddpp
->llimit
= t3_read_reg(adapter
, A_ULPRX_TDDP_LLIMIT
);
419 ddpp
->ulimit
= t3_read_reg(adapter
, A_ULPRX_TDDP_ULIMIT
);
420 ddpp
->tag_mask
= t3_read_reg(adapter
, A_ULPRX_TDDP_TAGMASK
);
424 ports
->nports
= adapter
->params
.nports
;
425 for_each_port(adapter
, i
)
426 ports
->lldevs
[i
] = adapter
->port
[i
];
428 case ULP_ISCSI_GET_PARAMS
:
429 case ULP_ISCSI_SET_PARAMS
:
430 if (!offload_running(adapter
))
432 return cxgb_ulp_iscsi_ctl(adapter
, req
, data
);
433 case RDMA_GET_PARAMS
:
436 case RDMA_CQ_DISABLE
:
437 case RDMA_CTRL_QP_SETUP
:
440 if (!offload_running(adapter
))
442 return cxgb_rdma_ctl(adapter
, req
, data
);
443 case GET_RX_PAGE_INFO
:
445 rx_page_info
->page_size
= tp
->rx_pg_size
;
446 rx_page_info
->num
= tp
->rx_num_pgs
;
448 case GET_ISCSI_IPV4ADDR
: {
449 struct iscsi_ipv4addr
*p
= data
;
450 struct port_info
*pi
= netdev_priv(p
->dev
);
451 p
->ipv4addr
= pi
->iscsi_ipv4addr
;
454 case GET_EMBEDDED_INFO
: {
455 struct ch_embedded_info
*e
= data
;
457 spin_lock(&adapter
->stats_lock
);
458 t3_get_fw_version(adapter
, &e
->fw_vers
);
459 t3_get_tp_version(adapter
, &e
->tp_vers
);
460 spin_unlock(&adapter
->stats_lock
);
470 * Dummy handler for Rx offload packets in case we get an offload packet before
471 * proper processing is setup. This complains and drops the packet as it isn't
472 * normal to get offload packets at this stage.
474 static int rx_offload_blackhole(struct t3cdev
*dev
, struct sk_buff
**skbs
,
478 dev_kfree_skb_any(skbs
[n
]);
482 static void dummy_neigh_update(struct t3cdev
*dev
, struct neighbour
*neigh
)
486 void cxgb3_set_dummy_ops(struct t3cdev
*dev
)
488 dev
->recv
= rx_offload_blackhole
;
489 dev
->neigh_update
= dummy_neigh_update
;
493 * Free an active-open TID.
495 void *cxgb3_free_atid(struct t3cdev
*tdev
, int atid
)
497 struct tid_info
*t
= &(T3C_DATA(tdev
))->tid_maps
;
498 union active_open_entry
*p
= atid2entry(t
, atid
);
499 void *ctx
= p
->t3c_tid
.ctx
;
501 spin_lock_bh(&t
->atid_lock
);
505 spin_unlock_bh(&t
->atid_lock
);
510 EXPORT_SYMBOL(cxgb3_free_atid
);
513 * Free a server TID and return it to the free pool.
515 void cxgb3_free_stid(struct t3cdev
*tdev
, int stid
)
517 struct tid_info
*t
= &(T3C_DATA(tdev
))->tid_maps
;
518 union listen_entry
*p
= stid2entry(t
, stid
);
520 spin_lock_bh(&t
->stid_lock
);
524 spin_unlock_bh(&t
->stid_lock
);
527 EXPORT_SYMBOL(cxgb3_free_stid
);
529 void cxgb3_insert_tid(struct t3cdev
*tdev
, struct cxgb3_client
*client
,
530 void *ctx
, unsigned int tid
)
532 struct tid_info
*t
= &(T3C_DATA(tdev
))->tid_maps
;
534 t
->tid_tab
[tid
].client
= client
;
535 t
->tid_tab
[tid
].ctx
= ctx
;
536 atomic_inc(&t
->tids_in_use
);
539 EXPORT_SYMBOL(cxgb3_insert_tid
);
542 * Populate a TID_RELEASE WR. The skb must be already propely sized.
544 static inline void mk_tid_release(struct sk_buff
*skb
, unsigned int tid
)
546 struct cpl_tid_release
*req
;
548 skb
->priority
= CPL_PRIORITY_SETUP
;
549 req
= (struct cpl_tid_release
*)__skb_put(skb
, sizeof(*req
));
550 req
->wr
.wr_hi
= htonl(V_WR_OP(FW_WROPCODE_FORWARD
));
551 OPCODE_TID(req
) = htonl(MK_OPCODE_TID(CPL_TID_RELEASE
, tid
));
554 static void t3_process_tid_release_list(struct work_struct
*work
)
556 struct t3c_data
*td
= container_of(work
, struct t3c_data
,
559 struct t3cdev
*tdev
= td
->dev
;
562 spin_lock_bh(&td
->tid_release_lock
);
563 while (td
->tid_release_list
) {
564 struct t3c_tid_entry
*p
= td
->tid_release_list
;
566 td
->tid_release_list
= (struct t3c_tid_entry
*)p
->ctx
;
567 spin_unlock_bh(&td
->tid_release_lock
);
569 skb
= alloc_skb(sizeof(struct cpl_tid_release
),
572 skb
= td
->nofail_skb
;
574 spin_lock_bh(&td
->tid_release_lock
);
575 p
->ctx
= (void *)td
->tid_release_list
;
576 td
->tid_release_list
= (struct t3c_tid_entry
*)p
;
579 mk_tid_release(skb
, p
- td
->tid_maps
.tid_tab
);
580 cxgb3_ofld_send(tdev
, skb
);
582 if (skb
== td
->nofail_skb
)
584 alloc_skb(sizeof(struct cpl_tid_release
),
586 spin_lock_bh(&td
->tid_release_lock
);
588 td
->release_list_incomplete
= (td
->tid_release_list
== NULL
) ? 0 : 1;
589 spin_unlock_bh(&td
->tid_release_lock
);
593 alloc_skb(sizeof(struct cpl_tid_release
),
597 /* use ctx as a next pointer in the tid release list */
598 void cxgb3_queue_tid_release(struct t3cdev
*tdev
, unsigned int tid
)
600 struct t3c_data
*td
= T3C_DATA(tdev
);
601 struct t3c_tid_entry
*p
= &td
->tid_maps
.tid_tab
[tid
];
603 spin_lock_bh(&td
->tid_release_lock
);
604 p
->ctx
= (void *)td
->tid_release_list
;
606 td
->tid_release_list
= p
;
607 if (!p
->ctx
|| td
->release_list_incomplete
)
608 schedule_work(&td
->tid_release_task
);
609 spin_unlock_bh(&td
->tid_release_lock
);
612 EXPORT_SYMBOL(cxgb3_queue_tid_release
);
615 * Remove a tid from the TID table. A client may defer processing its last
616 * CPL message if it is locked at the time it arrives, and while the message
617 * sits in the client's backlog the TID may be reused for another connection.
618 * To handle this we atomically switch the TID association if it still points
619 * to the original client context.
621 void cxgb3_remove_tid(struct t3cdev
*tdev
, void *ctx
, unsigned int tid
)
623 struct tid_info
*t
= &(T3C_DATA(tdev
))->tid_maps
;
625 BUG_ON(tid
>= t
->ntids
);
626 if (tdev
->type
== T3A
)
627 (void)cmpxchg(&t
->tid_tab
[tid
].ctx
, ctx
, NULL
);
631 skb
= alloc_skb(sizeof(struct cpl_tid_release
), GFP_ATOMIC
);
633 mk_tid_release(skb
, tid
);
634 cxgb3_ofld_send(tdev
, skb
);
635 t
->tid_tab
[tid
].ctx
= NULL
;
637 cxgb3_queue_tid_release(tdev
, tid
);
639 atomic_dec(&t
->tids_in_use
);
642 EXPORT_SYMBOL(cxgb3_remove_tid
);
644 int cxgb3_alloc_atid(struct t3cdev
*tdev
, struct cxgb3_client
*client
,
648 struct tid_info
*t
= &(T3C_DATA(tdev
))->tid_maps
;
650 spin_lock_bh(&t
->atid_lock
);
652 t
->atids_in_use
+ atomic_read(&t
->tids_in_use
) + MC5_MIN_TIDS
<=
654 union active_open_entry
*p
= t
->afree
;
656 atid
= (p
- t
->atid_tab
) + t
->atid_base
;
658 p
->t3c_tid
.ctx
= ctx
;
659 p
->t3c_tid
.client
= client
;
662 spin_unlock_bh(&t
->atid_lock
);
666 EXPORT_SYMBOL(cxgb3_alloc_atid
);
668 int cxgb3_alloc_stid(struct t3cdev
*tdev
, struct cxgb3_client
*client
,
672 struct tid_info
*t
= &(T3C_DATA(tdev
))->tid_maps
;
674 spin_lock_bh(&t
->stid_lock
);
676 union listen_entry
*p
= t
->sfree
;
678 stid
= (p
- t
->stid_tab
) + t
->stid_base
;
680 p
->t3c_tid
.ctx
= ctx
;
681 p
->t3c_tid
.client
= client
;
684 spin_unlock_bh(&t
->stid_lock
);
688 EXPORT_SYMBOL(cxgb3_alloc_stid
);
690 /* Get the t3cdev associated with a net_device */
691 struct t3cdev
*dev2t3cdev(struct net_device
*dev
)
693 const struct port_info
*pi
= netdev_priv(dev
);
695 return (struct t3cdev
*)pi
->adapter
;
698 EXPORT_SYMBOL(dev2t3cdev
);
700 static int do_smt_write_rpl(struct t3cdev
*dev
, struct sk_buff
*skb
)
702 struct cpl_smt_write_rpl
*rpl
= cplhdr(skb
);
704 if (rpl
->status
!= CPL_ERR_NONE
)
706 "Unexpected SMT_WRITE_RPL status %u for entry %u\n",
707 rpl
->status
, GET_TID(rpl
));
709 return CPL_RET_BUF_DONE
;
712 static int do_l2t_write_rpl(struct t3cdev
*dev
, struct sk_buff
*skb
)
714 struct cpl_l2t_write_rpl
*rpl
= cplhdr(skb
);
716 if (rpl
->status
!= CPL_ERR_NONE
)
718 "Unexpected L2T_WRITE_RPL status %u for entry %u\n",
719 rpl
->status
, GET_TID(rpl
));
721 return CPL_RET_BUF_DONE
;
724 static int do_rte_write_rpl(struct t3cdev
*dev
, struct sk_buff
*skb
)
726 struct cpl_rte_write_rpl
*rpl
= cplhdr(skb
);
728 if (rpl
->status
!= CPL_ERR_NONE
)
730 "Unexpected RTE_WRITE_RPL status %u for entry %u\n",
731 rpl
->status
, GET_TID(rpl
));
733 return CPL_RET_BUF_DONE
;
736 static int do_act_open_rpl(struct t3cdev
*dev
, struct sk_buff
*skb
)
738 struct cpl_act_open_rpl
*rpl
= cplhdr(skb
);
739 unsigned int atid
= G_TID(ntohl(rpl
->atid
));
740 struct t3c_tid_entry
*t3c_tid
;
742 t3c_tid
= lookup_atid(&(T3C_DATA(dev
))->tid_maps
, atid
);
743 if (t3c_tid
&& t3c_tid
->ctx
&& t3c_tid
->client
&&
744 t3c_tid
->client
->handlers
&&
745 t3c_tid
->client
->handlers
[CPL_ACT_OPEN_RPL
]) {
746 return t3c_tid
->client
->handlers
[CPL_ACT_OPEN_RPL
] (dev
, skb
,
750 printk(KERN_ERR
"%s: received clientless CPL command 0x%x\n",
751 dev
->name
, CPL_ACT_OPEN_RPL
);
752 return CPL_RET_BUF_DONE
| CPL_RET_BAD_MSG
;
756 static int do_stid_rpl(struct t3cdev
*dev
, struct sk_buff
*skb
)
758 union opcode_tid
*p
= cplhdr(skb
);
759 unsigned int stid
= G_TID(ntohl(p
->opcode_tid
));
760 struct t3c_tid_entry
*t3c_tid
;
762 t3c_tid
= lookup_stid(&(T3C_DATA(dev
))->tid_maps
, stid
);
763 if (t3c_tid
&& t3c_tid
->ctx
&& t3c_tid
->client
->handlers
&&
764 t3c_tid
->client
->handlers
[p
->opcode
]) {
765 return t3c_tid
->client
->handlers
[p
->opcode
] (dev
, skb
,
768 printk(KERN_ERR
"%s: received clientless CPL command 0x%x\n",
769 dev
->name
, p
->opcode
);
770 return CPL_RET_BUF_DONE
| CPL_RET_BAD_MSG
;
774 static int do_hwtid_rpl(struct t3cdev
*dev
, struct sk_buff
*skb
)
776 union opcode_tid
*p
= cplhdr(skb
);
777 unsigned int hwtid
= G_TID(ntohl(p
->opcode_tid
));
778 struct t3c_tid_entry
*t3c_tid
;
780 t3c_tid
= lookup_tid(&(T3C_DATA(dev
))->tid_maps
, hwtid
);
781 if (t3c_tid
&& t3c_tid
->ctx
&& t3c_tid
->client
->handlers
&&
782 t3c_tid
->client
->handlers
[p
->opcode
]) {
783 return t3c_tid
->client
->handlers
[p
->opcode
]
784 (dev
, skb
, t3c_tid
->ctx
);
786 printk(KERN_ERR
"%s: received clientless CPL command 0x%x\n",
787 dev
->name
, p
->opcode
);
788 return CPL_RET_BUF_DONE
| CPL_RET_BAD_MSG
;
792 static int do_cr(struct t3cdev
*dev
, struct sk_buff
*skb
)
794 struct cpl_pass_accept_req
*req
= cplhdr(skb
);
795 unsigned int stid
= G_PASS_OPEN_TID(ntohl(req
->tos_tid
));
796 struct tid_info
*t
= &(T3C_DATA(dev
))->tid_maps
;
797 struct t3c_tid_entry
*t3c_tid
;
798 unsigned int tid
= GET_TID(req
);
800 if (unlikely(tid
>= t
->ntids
)) {
801 printk("%s: passive open TID %u too large\n",
803 t3_fatal_err(tdev2adap(dev
));
804 return CPL_RET_BUF_DONE
;
807 t3c_tid
= lookup_stid(t
, stid
);
808 if (t3c_tid
&& t3c_tid
->ctx
&& t3c_tid
->client
->handlers
&&
809 t3c_tid
->client
->handlers
[CPL_PASS_ACCEPT_REQ
]) {
810 return t3c_tid
->client
->handlers
[CPL_PASS_ACCEPT_REQ
]
811 (dev
, skb
, t3c_tid
->ctx
);
813 printk(KERN_ERR
"%s: received clientless CPL command 0x%x\n",
814 dev
->name
, CPL_PASS_ACCEPT_REQ
);
815 return CPL_RET_BUF_DONE
| CPL_RET_BAD_MSG
;
820 * Returns an sk_buff for a reply CPL message of size len. If the input
821 * sk_buff has no other users it is trimmed and reused, otherwise a new buffer
822 * is allocated. The input skb must be of size at least len. Note that this
823 * operation does not destroy the original skb data even if it decides to reuse
826 static struct sk_buff
*cxgb3_get_cpl_reply_skb(struct sk_buff
*skb
, size_t len
,
829 if (likely(!skb_cloned(skb
))) {
830 BUG_ON(skb
->len
< len
);
831 __skb_trim(skb
, len
);
834 skb
= alloc_skb(len
, gfp
);
841 static int do_abort_req_rss(struct t3cdev
*dev
, struct sk_buff
*skb
)
843 union opcode_tid
*p
= cplhdr(skb
);
844 unsigned int hwtid
= G_TID(ntohl(p
->opcode_tid
));
845 struct t3c_tid_entry
*t3c_tid
;
847 t3c_tid
= lookup_tid(&(T3C_DATA(dev
))->tid_maps
, hwtid
);
848 if (t3c_tid
&& t3c_tid
->ctx
&& t3c_tid
->client
->handlers
&&
849 t3c_tid
->client
->handlers
[p
->opcode
]) {
850 return t3c_tid
->client
->handlers
[p
->opcode
]
851 (dev
, skb
, t3c_tid
->ctx
);
853 struct cpl_abort_req_rss
*req
= cplhdr(skb
);
854 struct cpl_abort_rpl
*rpl
;
855 struct sk_buff
*reply_skb
;
856 unsigned int tid
= GET_TID(req
);
857 u8 cmd
= req
->status
;
859 if (req
->status
== CPL_ERR_RTX_NEG_ADVICE
||
860 req
->status
== CPL_ERR_PERSIST_NEG_ADVICE
)
863 reply_skb
= cxgb3_get_cpl_reply_skb(skb
,
869 printk("do_abort_req_rss: couldn't get skb!\n");
872 reply_skb
->priority
= CPL_PRIORITY_DATA
;
873 __skb_put(reply_skb
, sizeof(struct cpl_abort_rpl
));
874 rpl
= cplhdr(reply_skb
);
876 htonl(V_WR_OP(FW_WROPCODE_OFLD_HOST_ABORT_CON_RPL
));
877 rpl
->wr
.wr_lo
= htonl(V_WR_TID(tid
));
878 OPCODE_TID(rpl
) = htonl(MK_OPCODE_TID(CPL_ABORT_RPL
, tid
));
880 cxgb3_ofld_send(dev
, reply_skb
);
882 return CPL_RET_BUF_DONE
;
886 static int do_act_establish(struct t3cdev
*dev
, struct sk_buff
*skb
)
888 struct cpl_act_establish
*req
= cplhdr(skb
);
889 unsigned int atid
= G_PASS_OPEN_TID(ntohl(req
->tos_tid
));
890 struct tid_info
*t
= &(T3C_DATA(dev
))->tid_maps
;
891 struct t3c_tid_entry
*t3c_tid
;
892 unsigned int tid
= GET_TID(req
);
894 if (unlikely(tid
>= t
->ntids
)) {
895 printk("%s: active establish TID %u too large\n",
897 t3_fatal_err(tdev2adap(dev
));
898 return CPL_RET_BUF_DONE
;
901 t3c_tid
= lookup_atid(t
, atid
);
902 if (t3c_tid
&& t3c_tid
->ctx
&& t3c_tid
->client
->handlers
&&
903 t3c_tid
->client
->handlers
[CPL_ACT_ESTABLISH
]) {
904 return t3c_tid
->client
->handlers
[CPL_ACT_ESTABLISH
]
905 (dev
, skb
, t3c_tid
->ctx
);
907 printk(KERN_ERR
"%s: received clientless CPL command 0x%x\n",
908 dev
->name
, CPL_ACT_ESTABLISH
);
909 return CPL_RET_BUF_DONE
| CPL_RET_BAD_MSG
;
913 static int do_trace(struct t3cdev
*dev
, struct sk_buff
*skb
)
915 struct cpl_trace_pkt
*p
= cplhdr(skb
);
917 skb
->protocol
= htons(0xffff);
918 skb
->dev
= dev
->lldev
;
919 skb_pull(skb
, sizeof(*p
));
920 skb_reset_mac_header(skb
);
921 netif_receive_skb(skb
);
926 * That skb would better have come from process_responses() where we abuse
927 * ->priority and ->csum to carry our data. NB: if we get to per-arch
928 * ->csum, the things might get really interesting here.
931 static inline u32
get_hwtid(struct sk_buff
*skb
)
933 return ntohl((__force __be32
)skb
->priority
) >> 8 & 0xfffff;
936 static inline u32
get_opcode(struct sk_buff
*skb
)
938 return G_OPCODE(ntohl((__force __be32
)skb
->csum
));
941 static int do_term(struct t3cdev
*dev
, struct sk_buff
*skb
)
943 unsigned int hwtid
= get_hwtid(skb
);
944 unsigned int opcode
= get_opcode(skb
);
945 struct t3c_tid_entry
*t3c_tid
;
947 t3c_tid
= lookup_tid(&(T3C_DATA(dev
))->tid_maps
, hwtid
);
948 if (t3c_tid
&& t3c_tid
->ctx
&& t3c_tid
->client
->handlers
&&
949 t3c_tid
->client
->handlers
[opcode
]) {
950 return t3c_tid
->client
->handlers
[opcode
] (dev
, skb
,
953 printk(KERN_ERR
"%s: received clientless CPL command 0x%x\n",
955 return CPL_RET_BUF_DONE
| CPL_RET_BAD_MSG
;
959 static int nb_callback(struct notifier_block
*self
, unsigned long event
,
963 case (NETEVENT_NEIGH_UPDATE
):{
964 cxgb_neigh_update((struct neighbour
*)ctx
);
967 case (NETEVENT_PMTU_UPDATE
):
969 case (NETEVENT_REDIRECT
):{
970 struct netevent_redirect
*nr
= ctx
;
971 cxgb_redirect(nr
->old
, nr
->new);
972 cxgb_neigh_update(nr
->new->neighbour
);
981 static struct notifier_block nb
= {
982 .notifier_call
= nb_callback
986 * Process a received packet with an unknown/unexpected CPL opcode.
988 static int do_bad_cpl(struct t3cdev
*dev
, struct sk_buff
*skb
)
990 printk(KERN_ERR
"%s: received bad CPL command 0x%x\n", dev
->name
,
992 return CPL_RET_BUF_DONE
| CPL_RET_BAD_MSG
;
996 * Handlers for each CPL opcode
998 static cpl_handler_func cpl_handlers
[NUM_CPL_CMDS
];
1001 * Add a new handler to the CPL dispatch table. A NULL handler may be supplied
1002 * to unregister an existing handler.
1004 void t3_register_cpl_handler(unsigned int opcode
, cpl_handler_func h
)
1006 if (opcode
< NUM_CPL_CMDS
)
1007 cpl_handlers
[opcode
] = h
? h
: do_bad_cpl
;
1009 printk(KERN_ERR
"T3C: handler registration for "
1010 "opcode %x failed\n", opcode
);
1013 EXPORT_SYMBOL(t3_register_cpl_handler
);
1016 * T3CDEV's receive method.
1018 int process_rx(struct t3cdev
*dev
, struct sk_buff
**skbs
, int n
)
1021 struct sk_buff
*skb
= *skbs
++;
1022 unsigned int opcode
= get_opcode(skb
);
1023 int ret
= cpl_handlers
[opcode
] (dev
, skb
);
1026 if (ret
& CPL_RET_UNKNOWN_TID
) {
1027 union opcode_tid
*p
= cplhdr(skb
);
1029 printk(KERN_ERR
"%s: CPL message (opcode %u) had "
1030 "unknown TID %u\n", dev
->name
, opcode
,
1031 G_TID(ntohl(p
->opcode_tid
)));
1034 if (ret
& CPL_RET_BUF_DONE
)
1041 * Sends an sk_buff to a T3C driver after dealing with any active network taps.
1043 int cxgb3_ofld_send(struct t3cdev
*dev
, struct sk_buff
*skb
)
1048 r
= dev
->send(dev
, skb
);
1053 EXPORT_SYMBOL(cxgb3_ofld_send
);
1055 static int is_offloading(struct net_device
*dev
)
1057 struct adapter
*adapter
;
1060 read_lock_bh(&adapter_list_lock
);
1061 list_for_each_entry(adapter
, &adapter_list
, adapter_list
) {
1062 for_each_port(adapter
, i
) {
1063 if (dev
== adapter
->port
[i
]) {
1064 read_unlock_bh(&adapter_list_lock
);
1069 read_unlock_bh(&adapter_list_lock
);
1073 void cxgb_neigh_update(struct neighbour
*neigh
)
1075 struct net_device
*dev
= neigh
->dev
;
1077 if (dev
&& (is_offloading(dev
))) {
1078 struct t3cdev
*tdev
= dev2t3cdev(dev
);
1081 t3_l2t_update(tdev
, neigh
);
1085 static void set_l2t_ix(struct t3cdev
*tdev
, u32 tid
, struct l2t_entry
*e
)
1087 struct sk_buff
*skb
;
1088 struct cpl_set_tcb_field
*req
;
1090 skb
= alloc_skb(sizeof(*req
), GFP_ATOMIC
);
1092 printk(KERN_ERR
"%s: cannot allocate skb!\n", __func__
);
1095 skb
->priority
= CPL_PRIORITY_CONTROL
;
1096 req
= (struct cpl_set_tcb_field
*)skb_put(skb
, sizeof(*req
));
1097 req
->wr
.wr_hi
= htonl(V_WR_OP(FW_WROPCODE_FORWARD
));
1098 OPCODE_TID(req
) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD
, tid
));
1101 req
->word
= htons(W_TCB_L2T_IX
);
1102 req
->mask
= cpu_to_be64(V_TCB_L2T_IX(M_TCB_L2T_IX
));
1103 req
->val
= cpu_to_be64(V_TCB_L2T_IX(e
->idx
));
1104 tdev
->send(tdev
, skb
);
1107 void cxgb_redirect(struct dst_entry
*old
, struct dst_entry
*new)
1109 struct net_device
*olddev
, *newdev
;
1110 struct tid_info
*ti
;
1111 struct t3cdev
*tdev
;
1114 struct l2t_entry
*e
;
1115 struct t3c_tid_entry
*te
;
1117 olddev
= old
->neighbour
->dev
;
1118 newdev
= new->neighbour
->dev
;
1119 if (!is_offloading(olddev
))
1121 if (!is_offloading(newdev
)) {
1122 printk(KERN_WARNING
"%s: Redirect to non-offload "
1123 "device ignored.\n", __func__
);
1126 tdev
= dev2t3cdev(olddev
);
1128 if (tdev
!= dev2t3cdev(newdev
)) {
1129 printk(KERN_WARNING
"%s: Redirect to different "
1130 "offload device ignored.\n", __func__
);
1134 /* Add new L2T entry */
1135 e
= t3_l2t_get(tdev
, new->neighbour
, newdev
);
1137 printk(KERN_ERR
"%s: couldn't allocate new l2t entry!\n",
1142 /* Walk tid table and notify clients of dst change. */
1143 ti
= &(T3C_DATA(tdev
))->tid_maps
;
1144 for (tid
= 0; tid
< ti
->ntids
; tid
++) {
1145 te
= lookup_tid(ti
, tid
);
1147 if (te
&& te
->ctx
&& te
->client
&& te
->client
->redirect
) {
1148 update_tcb
= te
->client
->redirect(te
->ctx
, old
, new, e
);
1150 l2t_hold(L2DATA(tdev
), e
);
1151 set_l2t_ix(tdev
, tid
, e
);
1155 l2t_release(L2DATA(tdev
), e
);
1159 * Allocate a chunk of memory using kmalloc or, if that fails, vmalloc.
1160 * The allocated memory is cleared.
1162 void *cxgb_alloc_mem(unsigned long size
)
1164 void *p
= kmalloc(size
, GFP_KERNEL
);
1174 * Free memory allocated through t3_alloc_mem().
1176 void cxgb_free_mem(void *addr
)
1178 if (is_vmalloc_addr(addr
))
1185 * Allocate and initialize the TID tables. Returns 0 on success.
1187 static int init_tid_tabs(struct tid_info
*t
, unsigned int ntids
,
1188 unsigned int natids
, unsigned int nstids
,
1189 unsigned int atid_base
, unsigned int stid_base
)
1191 unsigned long size
= ntids
* sizeof(*t
->tid_tab
) +
1192 natids
* sizeof(*t
->atid_tab
) + nstids
* sizeof(*t
->stid_tab
);
1194 t
->tid_tab
= cxgb_alloc_mem(size
);
1198 t
->stid_tab
= (union listen_entry
*)&t
->tid_tab
[ntids
];
1199 t
->atid_tab
= (union active_open_entry
*)&t
->stid_tab
[nstids
];
1202 t
->stid_base
= stid_base
;
1205 t
->atid_base
= atid_base
;
1207 t
->stids_in_use
= t
->atids_in_use
= 0;
1208 atomic_set(&t
->tids_in_use
, 0);
1209 spin_lock_init(&t
->stid_lock
);
1210 spin_lock_init(&t
->atid_lock
);
1213 * Setup the free lists for stid_tab and atid_tab.
1217 t
->stid_tab
[nstids
- 1].next
= &t
->stid_tab
[nstids
];
1218 t
->sfree
= t
->stid_tab
;
1222 t
->atid_tab
[natids
- 1].next
= &t
->atid_tab
[natids
];
1223 t
->afree
= t
->atid_tab
;
1228 static void free_tid_maps(struct tid_info
*t
)
1230 cxgb_free_mem(t
->tid_tab
);
1233 static inline void add_adapter(struct adapter
*adap
)
1235 write_lock_bh(&adapter_list_lock
);
1236 list_add_tail(&adap
->adapter_list
, &adapter_list
);
1237 write_unlock_bh(&adapter_list_lock
);
1240 static inline void remove_adapter(struct adapter
*adap
)
1242 write_lock_bh(&adapter_list_lock
);
1243 list_del(&adap
->adapter_list
);
1244 write_unlock_bh(&adapter_list_lock
);
1247 int cxgb3_offload_activate(struct adapter
*adapter
)
1249 struct t3cdev
*dev
= &adapter
->tdev
;
1252 struct tid_range stid_range
, tid_range
;
1253 struct mtutab mtutab
;
1254 unsigned int l2t_capacity
;
1256 t
= kzalloc(sizeof(*t
), GFP_KERNEL
);
1261 if (dev
->ctl(dev
, GET_TX_MAX_CHUNK
, &t
->tx_max_chunk
) < 0 ||
1262 dev
->ctl(dev
, GET_MAX_OUTSTANDING_WR
, &t
->max_wrs
) < 0 ||
1263 dev
->ctl(dev
, GET_L2T_CAPACITY
, &l2t_capacity
) < 0 ||
1264 dev
->ctl(dev
, GET_MTUS
, &mtutab
) < 0 ||
1265 dev
->ctl(dev
, GET_TID_RANGE
, &tid_range
) < 0 ||
1266 dev
->ctl(dev
, GET_STID_RANGE
, &stid_range
) < 0)
1270 L2DATA(dev
) = t3_init_l2t(l2t_capacity
);
1274 natids
= min(tid_range
.num
/ 2, MAX_ATIDS
);
1275 err
= init_tid_tabs(&t
->tid_maps
, tid_range
.num
, natids
,
1276 stid_range
.num
, ATID_BASE
, stid_range
.base
);
1280 t
->mtus
= mtutab
.mtus
;
1281 t
->nmtus
= mtutab
.size
;
1283 INIT_WORK(&t
->tid_release_task
, t3_process_tid_release_list
);
1284 spin_lock_init(&t
->tid_release_lock
);
1285 INIT_LIST_HEAD(&t
->list_node
);
1289 dev
->recv
= process_rx
;
1290 dev
->neigh_update
= t3_l2t_update
;
1292 /* Register netevent handler once */
1293 if (list_empty(&adapter_list
))
1294 register_netevent_notifier(&nb
);
1296 t
->nofail_skb
= alloc_skb(sizeof(struct cpl_tid_release
), GFP_KERNEL
);
1297 t
->release_list_incomplete
= 0;
1299 add_adapter(adapter
);
1303 t3_free_l2t(L2DATA(dev
));
1310 void cxgb3_offload_deactivate(struct adapter
*adapter
)
1312 struct t3cdev
*tdev
= &adapter
->tdev
;
1313 struct t3c_data
*t
= T3C_DATA(tdev
);
1315 remove_adapter(adapter
);
1316 if (list_empty(&adapter_list
))
1317 unregister_netevent_notifier(&nb
);
1319 free_tid_maps(&t
->tid_maps
);
1320 T3C_DATA(tdev
) = NULL
;
1321 t3_free_l2t(L2DATA(tdev
));
1322 L2DATA(tdev
) = NULL
;
1324 kfree_skb(t
->nofail_skb
);
1328 static inline void register_tdev(struct t3cdev
*tdev
)
1332 mutex_lock(&cxgb3_db_lock
);
1333 snprintf(tdev
->name
, sizeof(tdev
->name
), "ofld_dev%d", unit
++);
1334 list_add_tail(&tdev
->ofld_dev_list
, &ofld_dev_list
);
1335 mutex_unlock(&cxgb3_db_lock
);
1338 static inline void unregister_tdev(struct t3cdev
*tdev
)
1340 mutex_lock(&cxgb3_db_lock
);
1341 list_del(&tdev
->ofld_dev_list
);
1342 mutex_unlock(&cxgb3_db_lock
);
1345 static inline int adap2type(struct adapter
*adapter
)
1349 switch (adapter
->params
.rev
) {
1364 void __devinit
cxgb3_adapter_ofld(struct adapter
*adapter
)
1366 struct t3cdev
*tdev
= &adapter
->tdev
;
1368 INIT_LIST_HEAD(&tdev
->ofld_dev_list
);
1370 cxgb3_set_dummy_ops(tdev
);
1371 tdev
->send
= t3_offload_tx
;
1372 tdev
->ctl
= cxgb_offload_ctl
;
1373 tdev
->type
= adap2type(adapter
);
1375 register_tdev(tdev
);
1378 void __devexit
cxgb3_adapter_unofld(struct adapter
*adapter
)
1380 struct t3cdev
*tdev
= &adapter
->tdev
;
1383 tdev
->neigh_update
= NULL
;
1385 unregister_tdev(tdev
);
1388 void __init
cxgb3_offload_init(void)
1392 for (i
= 0; i
< NUM_CPL_CMDS
; ++i
)
1393 cpl_handlers
[i
] = do_bad_cpl
;
1395 t3_register_cpl_handler(CPL_SMT_WRITE_RPL
, do_smt_write_rpl
);
1396 t3_register_cpl_handler(CPL_L2T_WRITE_RPL
, do_l2t_write_rpl
);
1397 t3_register_cpl_handler(CPL_RTE_WRITE_RPL
, do_rte_write_rpl
);
1398 t3_register_cpl_handler(CPL_PASS_OPEN_RPL
, do_stid_rpl
);
1399 t3_register_cpl_handler(CPL_CLOSE_LISTSRV_RPL
, do_stid_rpl
);
1400 t3_register_cpl_handler(CPL_PASS_ACCEPT_REQ
, do_cr
);
1401 t3_register_cpl_handler(CPL_PASS_ESTABLISH
, do_hwtid_rpl
);
1402 t3_register_cpl_handler(CPL_ABORT_RPL_RSS
, do_hwtid_rpl
);
1403 t3_register_cpl_handler(CPL_ABORT_RPL
, do_hwtid_rpl
);
1404 t3_register_cpl_handler(CPL_RX_URG_NOTIFY
, do_hwtid_rpl
);
1405 t3_register_cpl_handler(CPL_RX_DATA
, do_hwtid_rpl
);
1406 t3_register_cpl_handler(CPL_TX_DATA_ACK
, do_hwtid_rpl
);
1407 t3_register_cpl_handler(CPL_TX_DMA_ACK
, do_hwtid_rpl
);
1408 t3_register_cpl_handler(CPL_ACT_OPEN_RPL
, do_act_open_rpl
);
1409 t3_register_cpl_handler(CPL_PEER_CLOSE
, do_hwtid_rpl
);
1410 t3_register_cpl_handler(CPL_CLOSE_CON_RPL
, do_hwtid_rpl
);
1411 t3_register_cpl_handler(CPL_ABORT_REQ_RSS
, do_abort_req_rss
);
1412 t3_register_cpl_handler(CPL_ACT_ESTABLISH
, do_act_establish
);
1413 t3_register_cpl_handler(CPL_SET_TCB_RPL
, do_hwtid_rpl
);
1414 t3_register_cpl_handler(CPL_GET_TCB_RPL
, do_hwtid_rpl
);
1415 t3_register_cpl_handler(CPL_RDMA_TERMINATE
, do_term
);
1416 t3_register_cpl_handler(CPL_RDMA_EC_STATUS
, do_hwtid_rpl
);
1417 t3_register_cpl_handler(CPL_TRACE_PKT
, do_trace
);
1418 t3_register_cpl_handler(CPL_RX_DATA_DDP
, do_hwtid_rpl
);
1419 t3_register_cpl_handler(CPL_RX_DDP_COMPLETE
, do_hwtid_rpl
);
1420 t3_register_cpl_handler(CPL_ISCSI_HDR
, do_hwtid_rpl
);