2 * Copyright (c) 2006 Chelsio, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 #include <linux/module.h>
33 #include <linux/list.h>
34 #include <linux/workqueue.h>
35 #include <linux/skbuff.h>
36 #include <linux/timer.h>
37 #include <linux/notifier.h>
39 #include <net/neighbour.h>
40 #include <net/netevent.h>
41 #include <net/route.h>
44 #include "cxgb3_offload.h"
46 #include "iwch_provider.h"
49 static char *states
[] = {
65 static int ep_timeout_secs
= 10;
66 module_param(ep_timeout_secs
, int, 0444);
67 MODULE_PARM_DESC(ep_timeout_secs
, "CM Endpoint operation timeout "
68 "in seconds (default=10)");
70 static int mpa_rev
= 1;
71 module_param(mpa_rev
, int, 0444);
72 MODULE_PARM_DESC(mpa_rev
, "MPA Revision, 0 supports amso1100, "
73 "1 is spec compliant. (default=1)");
75 static int markers_enabled
= 0;
76 module_param(markers_enabled
, int, 0444);
77 MODULE_PARM_DESC(markers_enabled
, "Enable MPA MARKERS (default(0)=disabled)");
79 static int crc_enabled
= 1;
80 module_param(crc_enabled
, int, 0444);
81 MODULE_PARM_DESC(crc_enabled
, "Enable MPA CRC (default(1)=enabled)");
83 static int rcv_win
= 256 * 1024;
84 module_param(rcv_win
, int, 0444);
85 MODULE_PARM_DESC(rcv_win
, "TCP receive window in bytes (default=256)");
87 static int snd_win
= 32 * 1024;
88 module_param(snd_win
, int, 0444);
89 MODULE_PARM_DESC(snd_win
, "TCP send window in bytes (default=32KB)");
91 static unsigned int nocong
= 0;
92 module_param(nocong
, uint
, 0444);
93 MODULE_PARM_DESC(nocong
, "Turn off congestion control (default=0)");
95 static unsigned int cong_flavor
= 1;
96 module_param(cong_flavor
, uint
, 0444);
97 MODULE_PARM_DESC(cong_flavor
, "TCP Congestion control flavor (default=1)");
99 static void process_work(struct work_struct
*work
);
100 static struct workqueue_struct
*workq
;
101 static DECLARE_WORK(skb_work
, process_work
);
103 static struct sk_buff_head rxq
;
104 static cxgb3_cpl_handler_func work_handlers
[NUM_CPL_CMDS
];
106 static struct sk_buff
*get_skb(struct sk_buff
*skb
, int len
, gfp_t gfp
);
107 static void ep_timeout(unsigned long arg
);
108 static void connect_reply_upcall(struct iwch_ep
*ep
, int status
);
110 static void start_ep_timer(struct iwch_ep
*ep
)
112 PDBG("%s ep %p\n", __FUNCTION__
, ep
);
113 if (timer_pending(&ep
->timer
)) {
114 PDBG("%s stopped / restarted timer ep %p\n", __FUNCTION__
, ep
);
115 del_timer_sync(&ep
->timer
);
118 ep
->timer
.expires
= jiffies
+ ep_timeout_secs
* HZ
;
119 ep
->timer
.data
= (unsigned long)ep
;
120 ep
->timer
.function
= ep_timeout
;
121 add_timer(&ep
->timer
);
124 static void stop_ep_timer(struct iwch_ep
*ep
)
126 PDBG("%s ep %p\n", __FUNCTION__
, ep
);
127 del_timer_sync(&ep
->timer
);
131 static void release_tid(struct t3cdev
*tdev
, u32 hwtid
, struct sk_buff
*skb
)
133 struct cpl_tid_release
*req
;
135 skb
= get_skb(skb
, sizeof *req
, GFP_KERNEL
);
138 req
= (struct cpl_tid_release
*) skb_put(skb
, sizeof(*req
));
139 req
->wr
.wr_hi
= htonl(V_WR_OP(FW_WROPCODE_FORWARD
));
140 OPCODE_TID(req
) = htonl(MK_OPCODE_TID(CPL_TID_RELEASE
, hwtid
));
141 skb
->priority
= CPL_PRIORITY_SETUP
;
142 tdev
->send(tdev
, skb
);
146 int iwch_quiesce_tid(struct iwch_ep
*ep
)
148 struct cpl_set_tcb_field
*req
;
149 struct sk_buff
*skb
= get_skb(NULL
, sizeof(*req
), GFP_KERNEL
);
153 req
= (struct cpl_set_tcb_field
*) skb_put(skb
, sizeof(*req
));
154 req
->wr
.wr_hi
= htonl(V_WR_OP(FW_WROPCODE_FORWARD
));
155 req
->wr
.wr_lo
= htonl(V_WR_TID(ep
->hwtid
));
156 OPCODE_TID(req
) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD
, ep
->hwtid
));
159 req
->word
= htons(W_TCB_RX_QUIESCE
);
160 req
->mask
= cpu_to_be64(1ULL << S_TCB_RX_QUIESCE
);
161 req
->val
= cpu_to_be64(1 << S_TCB_RX_QUIESCE
);
163 skb
->priority
= CPL_PRIORITY_DATA
;
164 ep
->com
.tdev
->send(ep
->com
.tdev
, skb
);
168 int iwch_resume_tid(struct iwch_ep
*ep
)
170 struct cpl_set_tcb_field
*req
;
171 struct sk_buff
*skb
= get_skb(NULL
, sizeof(*req
), GFP_KERNEL
);
175 req
= (struct cpl_set_tcb_field
*) skb_put(skb
, sizeof(*req
));
176 req
->wr
.wr_hi
= htonl(V_WR_OP(FW_WROPCODE_FORWARD
));
177 req
->wr
.wr_lo
= htonl(V_WR_TID(ep
->hwtid
));
178 OPCODE_TID(req
) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD
, ep
->hwtid
));
181 req
->word
= htons(W_TCB_RX_QUIESCE
);
182 req
->mask
= cpu_to_be64(1ULL << S_TCB_RX_QUIESCE
);
185 skb
->priority
= CPL_PRIORITY_DATA
;
186 ep
->com
.tdev
->send(ep
->com
.tdev
, skb
);
190 static void set_emss(struct iwch_ep
*ep
, u16 opt
)
192 PDBG("%s ep %p opt %u\n", __FUNCTION__
, ep
, opt
);
193 ep
->emss
= T3C_DATA(ep
->com
.tdev
)->mtus
[G_TCPOPT_MSS(opt
)] - 40;
194 if (G_TCPOPT_TSTAMP(opt
))
198 PDBG("emss=%d\n", ep
->emss
);
201 static enum iwch_ep_state
state_read(struct iwch_ep_common
*epc
)
204 enum iwch_ep_state state
;
206 spin_lock_irqsave(&epc
->lock
, flags
);
208 spin_unlock_irqrestore(&epc
->lock
, flags
);
212 static void __state_set(struct iwch_ep_common
*epc
, enum iwch_ep_state
new)
217 static void state_set(struct iwch_ep_common
*epc
, enum iwch_ep_state
new)
221 spin_lock_irqsave(&epc
->lock
, flags
);
222 PDBG("%s - %s -> %s\n", __FUNCTION__
, states
[epc
->state
], states
[new]);
223 __state_set(epc
, new);
224 spin_unlock_irqrestore(&epc
->lock
, flags
);
228 static void *alloc_ep(int size
, gfp_t gfp
)
230 struct iwch_ep_common
*epc
;
232 epc
= kmalloc(size
, gfp
);
234 memset(epc
, 0, size
);
235 kref_init(&epc
->kref
);
236 spin_lock_init(&epc
->lock
);
237 init_waitqueue_head(&epc
->waitq
);
239 PDBG("%s alloc ep %p\n", __FUNCTION__
, epc
);
243 void __free_ep(struct kref
*kref
)
245 struct iwch_ep_common
*epc
;
246 epc
= container_of(kref
, struct iwch_ep_common
, kref
);
247 PDBG("%s ep %p state %s\n", __FUNCTION__
, epc
, states
[state_read(epc
)]);
251 static void release_ep_resources(struct iwch_ep
*ep
)
253 PDBG("%s ep %p tid %d\n", __FUNCTION__
, ep
, ep
->hwtid
);
254 cxgb3_remove_tid(ep
->com
.tdev
, (void *)ep
, ep
->hwtid
);
255 dst_release(ep
->dst
);
256 l2t_release(L2DATA(ep
->com
.tdev
), ep
->l2t
);
257 if (ep
->com
.tdev
->type
== T3B
)
258 release_tid(ep
->com
.tdev
, ep
->hwtid
, NULL
);
262 static void process_work(struct work_struct
*work
)
264 struct sk_buff
*skb
= NULL
;
269 while ((skb
= skb_dequeue(&rxq
))) {
270 ep
= *((void **) (skb
->cb
));
271 tdev
= *((struct t3cdev
**) (skb
->cb
+ sizeof(void *)));
272 ret
= work_handlers
[G_OPCODE(ntohl((__force __be32
)skb
->csum
))](tdev
, skb
, ep
);
273 if (ret
& CPL_RET_BUF_DONE
)
277 * ep was referenced in sched(), and is freed here.
279 put_ep((struct iwch_ep_common
*)ep
);
283 static int status2errno(int status
)
288 case CPL_ERR_CONN_RESET
:
290 case CPL_ERR_ARP_MISS
:
291 return -EHOSTUNREACH
;
292 case CPL_ERR_CONN_TIMEDOUT
:
294 case CPL_ERR_TCAM_FULL
:
296 case CPL_ERR_CONN_EXIST
:
304 * Try and reuse skbs already allocated...
306 static struct sk_buff
*get_skb(struct sk_buff
*skb
, int len
, gfp_t gfp
)
308 if (skb
&& !skb_is_nonlinear(skb
) && !skb_cloned(skb
)) {
312 skb
= alloc_skb(len
, gfp
);
317 static struct rtable
*find_route(struct t3cdev
*dev
, __be32 local_ip
,
318 __be32 peer_ip
, __be16 local_port
,
319 __be16 peer_port
, u8 tos
)
330 .proto
= IPPROTO_TCP
,
338 if (ip_route_output_flow(&rt
, &fl
, NULL
, 0))
343 static unsigned int find_best_mtu(const struct t3c_data
*d
, unsigned short mtu
)
347 while (i
< d
->nmtus
- 1 && d
->mtus
[i
+ 1] <= mtu
)
352 static void arp_failure_discard(struct t3cdev
*dev
, struct sk_buff
*skb
)
354 PDBG("%s t3cdev %p\n", __FUNCTION__
, dev
);
359 * Handle an ARP failure for an active open.
361 static void act_open_req_arp_failure(struct t3cdev
*dev
, struct sk_buff
*skb
)
363 printk(KERN_ERR MOD
"ARP failure duing connect\n");
368 * Handle an ARP failure for a CPL_ABORT_REQ. Change it into a no RST variant
371 static void abort_arp_failure(struct t3cdev
*dev
, struct sk_buff
*skb
)
373 struct cpl_abort_req
*req
= cplhdr(skb
);
375 PDBG("%s t3cdev %p\n", __FUNCTION__
, dev
);
376 req
->cmd
= CPL_ABORT_NO_RST
;
377 cxgb3_ofld_send(dev
, skb
);
380 static int send_halfclose(struct iwch_ep
*ep
, gfp_t gfp
)
382 struct cpl_close_con_req
*req
;
385 PDBG("%s ep %p\n", __FUNCTION__
, ep
);
386 skb
= get_skb(NULL
, sizeof(*req
), gfp
);
388 printk(KERN_ERR MOD
"%s - failed to alloc skb\n", __FUNCTION__
);
391 skb
->priority
= CPL_PRIORITY_DATA
;
392 set_arp_failure_handler(skb
, arp_failure_discard
);
393 req
= (struct cpl_close_con_req
*) skb_put(skb
, sizeof(*req
));
394 req
->wr
.wr_hi
= htonl(V_WR_OP(FW_WROPCODE_OFLD_CLOSE_CON
));
395 req
->wr
.wr_lo
= htonl(V_WR_TID(ep
->hwtid
));
396 OPCODE_TID(req
) = htonl(MK_OPCODE_TID(CPL_CLOSE_CON_REQ
, ep
->hwtid
));
397 l2t_send(ep
->com
.tdev
, skb
, ep
->l2t
);
401 static int send_abort(struct iwch_ep
*ep
, struct sk_buff
*skb
, gfp_t gfp
)
403 struct cpl_abort_req
*req
;
405 PDBG("%s ep %p\n", __FUNCTION__
, ep
);
406 skb
= get_skb(skb
, sizeof(*req
), gfp
);
408 printk(KERN_ERR MOD
"%s - failed to alloc skb.\n",
412 skb
->priority
= CPL_PRIORITY_DATA
;
413 set_arp_failure_handler(skb
, abort_arp_failure
);
414 req
= (struct cpl_abort_req
*) skb_put(skb
, sizeof(*req
));
415 req
->wr
.wr_hi
= htonl(V_WR_OP(FW_WROPCODE_OFLD_HOST_ABORT_CON_REQ
));
416 req
->wr
.wr_lo
= htonl(V_WR_TID(ep
->hwtid
));
417 OPCODE_TID(req
) = htonl(MK_OPCODE_TID(CPL_ABORT_REQ
, ep
->hwtid
));
418 req
->cmd
= CPL_ABORT_SEND_RST
;
419 l2t_send(ep
->com
.tdev
, skb
, ep
->l2t
);
423 static int send_connect(struct iwch_ep
*ep
)
425 struct cpl_act_open_req
*req
;
427 u32 opt0h
, opt0l
, opt2
;
428 unsigned int mtu_idx
;
431 PDBG("%s ep %p\n", __FUNCTION__
, ep
);
433 skb
= get_skb(NULL
, sizeof(*req
), GFP_KERNEL
);
435 printk(KERN_ERR MOD
"%s - failed to alloc skb.\n",
439 mtu_idx
= find_best_mtu(T3C_DATA(ep
->com
.tdev
), dst_mtu(ep
->dst
));
440 wscale
= compute_wscale(rcv_win
);
445 V_WND_SCALE(wscale
) |
447 V_L2T_IDX(ep
->l2t
->idx
) | V_TX_CHANNEL(ep
->l2t
->smt_idx
);
448 opt0l
= V_TOS((ep
->tos
>> 2) & M_TOS
) | V_RCV_BUFSIZ(rcv_win
>>10);
449 opt2
= V_FLAVORS_VALID(1) | V_CONG_CONTROL_FLAVOR(cong_flavor
);
450 skb
->priority
= CPL_PRIORITY_SETUP
;
451 set_arp_failure_handler(skb
, act_open_req_arp_failure
);
453 req
= (struct cpl_act_open_req
*) skb_put(skb
, sizeof(*req
));
454 req
->wr
.wr_hi
= htonl(V_WR_OP(FW_WROPCODE_FORWARD
));
455 OPCODE_TID(req
) = htonl(MK_OPCODE_TID(CPL_ACT_OPEN_REQ
, ep
->atid
));
456 req
->local_port
= ep
->com
.local_addr
.sin_port
;
457 req
->peer_port
= ep
->com
.remote_addr
.sin_port
;
458 req
->local_ip
= ep
->com
.local_addr
.sin_addr
.s_addr
;
459 req
->peer_ip
= ep
->com
.remote_addr
.sin_addr
.s_addr
;
460 req
->opt0h
= htonl(opt0h
);
461 req
->opt0l
= htonl(opt0l
);
463 req
->opt2
= htonl(opt2
);
464 l2t_send(ep
->com
.tdev
, skb
, ep
->l2t
);
468 static void send_mpa_req(struct iwch_ep
*ep
, struct sk_buff
*skb
)
471 struct tx_data_wr
*req
;
472 struct mpa_message
*mpa
;
475 PDBG("%s ep %p pd_len %d\n", __FUNCTION__
, ep
, ep
->plen
);
477 BUG_ON(skb_cloned(skb
));
479 mpalen
= sizeof(*mpa
) + ep
->plen
;
480 if (skb
->data
+ mpalen
+ sizeof(*req
) > skb
->end
) {
482 skb
=alloc_skb(mpalen
+ sizeof(*req
), GFP_KERNEL
);
484 connect_reply_upcall(ep
, -ENOMEM
);
489 skb_reserve(skb
, sizeof(*req
));
490 skb_put(skb
, mpalen
);
491 skb
->priority
= CPL_PRIORITY_DATA
;
492 mpa
= (struct mpa_message
*) skb
->data
;
493 memset(mpa
, 0, sizeof(*mpa
));
494 memcpy(mpa
->key
, MPA_KEY_REQ
, sizeof(mpa
->key
));
495 mpa
->flags
= (crc_enabled
? MPA_CRC
: 0) |
496 (markers_enabled
? MPA_MARKERS
: 0);
497 mpa
->private_data_size
= htons(ep
->plen
);
498 mpa
->revision
= mpa_rev
;
501 memcpy(mpa
->private_data
, ep
->mpa_pkt
+ sizeof(*mpa
), ep
->plen
);
504 * Reference the mpa skb. This ensures the data area
505 * will remain in memory until the hw acks the tx.
506 * Function tx_ack() will deref it.
509 set_arp_failure_handler(skb
, arp_failure_discard
);
510 skb
->h
.raw
= skb
->data
;
512 req
= (struct tx_data_wr
*) skb_push(skb
, sizeof(*req
));
513 req
->wr_hi
= htonl(V_WR_OP(FW_WROPCODE_OFLD_TX_DATA
));
514 req
->wr_lo
= htonl(V_WR_TID(ep
->hwtid
));
515 req
->len
= htonl(len
);
516 req
->param
= htonl(V_TX_PORT(ep
->l2t
->smt_idx
) |
517 V_TX_SNDBUF(snd_win
>>15));
518 req
->flags
= htonl(F_TX_IMM_ACK
|F_TX_INIT
);
519 req
->sndseq
= htonl(ep
->snd_seq
);
522 l2t_send(ep
->com
.tdev
, skb
, ep
->l2t
);
524 state_set(&ep
->com
, MPA_REQ_SENT
);
528 static int send_mpa_reject(struct iwch_ep
*ep
, const void *pdata
, u8 plen
)
531 struct tx_data_wr
*req
;
532 struct mpa_message
*mpa
;
535 PDBG("%s ep %p plen %d\n", __FUNCTION__
, ep
, plen
);
537 mpalen
= sizeof(*mpa
) + plen
;
539 skb
= get_skb(NULL
, mpalen
+ sizeof(*req
), GFP_KERNEL
);
541 printk(KERN_ERR MOD
"%s - cannot alloc skb!\n", __FUNCTION__
);
544 skb_reserve(skb
, sizeof(*req
));
545 mpa
= (struct mpa_message
*) skb_put(skb
, mpalen
);
546 memset(mpa
, 0, sizeof(*mpa
));
547 memcpy(mpa
->key
, MPA_KEY_REP
, sizeof(mpa
->key
));
548 mpa
->flags
= MPA_REJECT
;
549 mpa
->revision
= mpa_rev
;
550 mpa
->private_data_size
= htons(plen
);
552 memcpy(mpa
->private_data
, pdata
, plen
);
555 * Reference the mpa skb again. This ensures the data area
556 * will remain in memory until the hw acks the tx.
557 * Function tx_ack() will deref it.
560 skb
->priority
= CPL_PRIORITY_DATA
;
561 set_arp_failure_handler(skb
, arp_failure_discard
);
562 skb
->h
.raw
= skb
->data
;
563 req
= (struct tx_data_wr
*) skb_push(skb
, sizeof(*req
));
564 req
->wr_hi
= htonl(V_WR_OP(FW_WROPCODE_OFLD_TX_DATA
));
565 req
->wr_lo
= htonl(V_WR_TID(ep
->hwtid
));
566 req
->len
= htonl(mpalen
);
567 req
->param
= htonl(V_TX_PORT(ep
->l2t
->smt_idx
) |
568 V_TX_SNDBUF(snd_win
>>15));
569 req
->flags
= htonl(F_TX_IMM_ACK
|F_TX_INIT
);
570 req
->sndseq
= htonl(ep
->snd_seq
);
573 l2t_send(ep
->com
.tdev
, skb
, ep
->l2t
);
577 static int send_mpa_reply(struct iwch_ep
*ep
, const void *pdata
, u8 plen
)
580 struct tx_data_wr
*req
;
581 struct mpa_message
*mpa
;
585 PDBG("%s ep %p plen %d\n", __FUNCTION__
, ep
, plen
);
587 mpalen
= sizeof(*mpa
) + plen
;
589 skb
= get_skb(NULL
, mpalen
+ sizeof(*req
), GFP_KERNEL
);
591 printk(KERN_ERR MOD
"%s - cannot alloc skb!\n", __FUNCTION__
);
594 skb
->priority
= CPL_PRIORITY_DATA
;
595 skb_reserve(skb
, sizeof(*req
));
596 mpa
= (struct mpa_message
*) skb_put(skb
, mpalen
);
597 memset(mpa
, 0, sizeof(*mpa
));
598 memcpy(mpa
->key
, MPA_KEY_REP
, sizeof(mpa
->key
));
599 mpa
->flags
= (ep
->mpa_attr
.crc_enabled
? MPA_CRC
: 0) |
600 (markers_enabled
? MPA_MARKERS
: 0);
601 mpa
->revision
= mpa_rev
;
602 mpa
->private_data_size
= htons(plen
);
604 memcpy(mpa
->private_data
, pdata
, plen
);
607 * Reference the mpa skb. This ensures the data area
608 * will remain in memory until the hw acks the tx.
609 * Function tx_ack() will deref it.
612 set_arp_failure_handler(skb
, arp_failure_discard
);
613 skb
->h
.raw
= skb
->data
;
615 req
= (struct tx_data_wr
*) skb_push(skb
, sizeof(*req
));
616 req
->wr_hi
= htonl(V_WR_OP(FW_WROPCODE_OFLD_TX_DATA
));
617 req
->wr_lo
= htonl(V_WR_TID(ep
->hwtid
));
618 req
->len
= htonl(len
);
619 req
->param
= htonl(V_TX_PORT(ep
->l2t
->smt_idx
) |
620 V_TX_SNDBUF(snd_win
>>15));
621 req
->flags
= htonl(F_TX_MORE
| F_TX_IMM_ACK
| F_TX_INIT
);
622 req
->sndseq
= htonl(ep
->snd_seq
);
624 state_set(&ep
->com
, MPA_REP_SENT
);
625 l2t_send(ep
->com
.tdev
, skb
, ep
->l2t
);
629 static int act_establish(struct t3cdev
*tdev
, struct sk_buff
*skb
, void *ctx
)
631 struct iwch_ep
*ep
= ctx
;
632 struct cpl_act_establish
*req
= cplhdr(skb
);
633 unsigned int tid
= GET_TID(req
);
635 PDBG("%s ep %p tid %d\n", __FUNCTION__
, ep
, tid
);
637 dst_confirm(ep
->dst
);
639 /* setup the hwtid for this connection */
641 cxgb3_insert_tid(ep
->com
.tdev
, &t3c_client
, ep
, tid
);
643 ep
->snd_seq
= ntohl(req
->snd_isn
);
645 set_emss(ep
, ntohs(req
->tcp_opt
));
647 /* dealloc the atid */
648 cxgb3_free_atid(ep
->com
.tdev
, ep
->atid
);
650 /* start MPA negotiation */
651 send_mpa_req(ep
, skb
);
656 static void abort_connection(struct iwch_ep
*ep
, struct sk_buff
*skb
, gfp_t gfp
)
658 PDBG("%s ep %p\n", __FILE__
, ep
);
659 state_set(&ep
->com
, ABORTING
);
660 send_abort(ep
, skb
, gfp
);
663 static void close_complete_upcall(struct iwch_ep
*ep
)
665 struct iw_cm_event event
;
667 PDBG("%s ep %p\n", __FUNCTION__
, ep
);
668 memset(&event
, 0, sizeof(event
));
669 event
.event
= IW_CM_EVENT_CLOSE
;
671 PDBG("close complete delivered ep %p cm_id %p tid %d\n",
672 ep
, ep
->com
.cm_id
, ep
->hwtid
);
673 ep
->com
.cm_id
->event_handler(ep
->com
.cm_id
, &event
);
674 ep
->com
.cm_id
->rem_ref(ep
->com
.cm_id
);
675 ep
->com
.cm_id
= NULL
;
680 static void peer_close_upcall(struct iwch_ep
*ep
)
682 struct iw_cm_event event
;
684 PDBG("%s ep %p\n", __FUNCTION__
, ep
);
685 memset(&event
, 0, sizeof(event
));
686 event
.event
= IW_CM_EVENT_DISCONNECT
;
688 PDBG("peer close delivered ep %p cm_id %p tid %d\n",
689 ep
, ep
->com
.cm_id
, ep
->hwtid
);
690 ep
->com
.cm_id
->event_handler(ep
->com
.cm_id
, &event
);
694 static void peer_abort_upcall(struct iwch_ep
*ep
)
696 struct iw_cm_event event
;
698 PDBG("%s ep %p\n", __FUNCTION__
, ep
);
699 memset(&event
, 0, sizeof(event
));
700 event
.event
= IW_CM_EVENT_CLOSE
;
701 event
.status
= -ECONNRESET
;
703 PDBG("abort delivered ep %p cm_id %p tid %d\n", ep
,
704 ep
->com
.cm_id
, ep
->hwtid
);
705 ep
->com
.cm_id
->event_handler(ep
->com
.cm_id
, &event
);
706 ep
->com
.cm_id
->rem_ref(ep
->com
.cm_id
);
707 ep
->com
.cm_id
= NULL
;
712 static void connect_reply_upcall(struct iwch_ep
*ep
, int status
)
714 struct iw_cm_event event
;
716 PDBG("%s ep %p status %d\n", __FUNCTION__
, ep
, status
);
717 memset(&event
, 0, sizeof(event
));
718 event
.event
= IW_CM_EVENT_CONNECT_REPLY
;
719 event
.status
= status
;
720 event
.local_addr
= ep
->com
.local_addr
;
721 event
.remote_addr
= ep
->com
.remote_addr
;
723 if ((status
== 0) || (status
== -ECONNREFUSED
)) {
724 event
.private_data_len
= ep
->plen
;
725 event
.private_data
= ep
->mpa_pkt
+ sizeof(struct mpa_message
);
728 PDBG("%s ep %p tid %d status %d\n", __FUNCTION__
, ep
,
730 ep
->com
.cm_id
->event_handler(ep
->com
.cm_id
, &event
);
733 ep
->com
.cm_id
->rem_ref(ep
->com
.cm_id
);
734 ep
->com
.cm_id
= NULL
;
739 static void connect_request_upcall(struct iwch_ep
*ep
)
741 struct iw_cm_event event
;
743 PDBG("%s ep %p tid %d\n", __FUNCTION__
, ep
, ep
->hwtid
);
744 memset(&event
, 0, sizeof(event
));
745 event
.event
= IW_CM_EVENT_CONNECT_REQUEST
;
746 event
.local_addr
= ep
->com
.local_addr
;
747 event
.remote_addr
= ep
->com
.remote_addr
;
748 event
.private_data_len
= ep
->plen
;
749 event
.private_data
= ep
->mpa_pkt
+ sizeof(struct mpa_message
);
750 event
.provider_data
= ep
;
751 if (state_read(&ep
->parent_ep
->com
) != DEAD
)
752 ep
->parent_ep
->com
.cm_id
->event_handler(
753 ep
->parent_ep
->com
.cm_id
,
755 put_ep(&ep
->parent_ep
->com
);
756 ep
->parent_ep
= NULL
;
759 static void established_upcall(struct iwch_ep
*ep
)
761 struct iw_cm_event event
;
763 PDBG("%s ep %p\n", __FUNCTION__
, ep
);
764 memset(&event
, 0, sizeof(event
));
765 event
.event
= IW_CM_EVENT_ESTABLISHED
;
767 PDBG("%s ep %p tid %d\n", __FUNCTION__
, ep
, ep
->hwtid
);
768 ep
->com
.cm_id
->event_handler(ep
->com
.cm_id
, &event
);
772 static int update_rx_credits(struct iwch_ep
*ep
, u32 credits
)
774 struct cpl_rx_data_ack
*req
;
777 PDBG("%s ep %p credits %u\n", __FUNCTION__
, ep
, credits
);
778 skb
= get_skb(NULL
, sizeof(*req
), GFP_KERNEL
);
780 printk(KERN_ERR MOD
"update_rx_credits - cannot alloc skb!\n");
784 req
= (struct cpl_rx_data_ack
*) skb_put(skb
, sizeof(*req
));
785 req
->wr
.wr_hi
= htonl(V_WR_OP(FW_WROPCODE_FORWARD
));
786 OPCODE_TID(req
) = htonl(MK_OPCODE_TID(CPL_RX_DATA_ACK
, ep
->hwtid
));
787 req
->credit_dack
= htonl(V_RX_CREDITS(credits
) | V_RX_FORCE_ACK(1));
788 skb
->priority
= CPL_PRIORITY_ACK
;
789 ep
->com
.tdev
->send(ep
->com
.tdev
, skb
);
793 static void process_mpa_reply(struct iwch_ep
*ep
, struct sk_buff
*skb
)
795 struct mpa_message
*mpa
;
797 struct iwch_qp_attributes attrs
;
798 enum iwch_qp_attr_mask mask
;
801 PDBG("%s ep %p\n", __FUNCTION__
, ep
);
804 * Stop mpa timer. If it expired, then the state has
805 * changed and we bail since ep_timeout already aborted
809 if (state_read(&ep
->com
) != MPA_REQ_SENT
)
813 * If we get more than the supported amount of private data
814 * then we must fail this connection.
816 if (ep
->mpa_pkt_len
+ skb
->len
> sizeof(ep
->mpa_pkt
)) {
822 * copy the new data into our accumulation buffer.
824 memcpy(&(ep
->mpa_pkt
[ep
->mpa_pkt_len
]), skb
->data
, skb
->len
);
825 ep
->mpa_pkt_len
+= skb
->len
;
828 * if we don't even have the mpa message, then bail.
830 if (ep
->mpa_pkt_len
< sizeof(*mpa
))
832 mpa
= (struct mpa_message
*) ep
->mpa_pkt
;
834 /* Validate MPA header. */
835 if (mpa
->revision
!= mpa_rev
) {
839 if (memcmp(mpa
->key
, MPA_KEY_REP
, sizeof(mpa
->key
))) {
844 plen
= ntohs(mpa
->private_data_size
);
847 * Fail if there's too much private data.
849 if (plen
> MPA_MAX_PRIVATE_DATA
) {
855 * If plen does not account for pkt size
857 if (ep
->mpa_pkt_len
> (sizeof(*mpa
) + plen
)) {
862 ep
->plen
= (u8
) plen
;
865 * If we don't have all the pdata yet, then bail.
866 * We'll continue process when more data arrives.
868 if (ep
->mpa_pkt_len
< (sizeof(*mpa
) + plen
))
871 if (mpa
->flags
& MPA_REJECT
) {
877 * If we get here we have accumulated the entire mpa
878 * start reply message including private data. And
879 * the MPA header is valid.
881 state_set(&ep
->com
, FPDU_MODE
);
882 ep
->mpa_attr
.crc_enabled
= (mpa
->flags
& MPA_CRC
) | crc_enabled
? 1 : 0;
883 ep
->mpa_attr
.recv_marker_enabled
= markers_enabled
;
884 ep
->mpa_attr
.xmit_marker_enabled
= mpa
->flags
& MPA_MARKERS
? 1 : 0;
885 ep
->mpa_attr
.version
= mpa_rev
;
886 PDBG("%s - crc_enabled=%d, recv_marker_enabled=%d, "
887 "xmit_marker_enabled=%d, version=%d\n", __FUNCTION__
,
888 ep
->mpa_attr
.crc_enabled
, ep
->mpa_attr
.recv_marker_enabled
,
889 ep
->mpa_attr
.xmit_marker_enabled
, ep
->mpa_attr
.version
);
891 attrs
.mpa_attr
= ep
->mpa_attr
;
892 attrs
.max_ird
= ep
->ird
;
893 attrs
.max_ord
= ep
->ord
;
894 attrs
.llp_stream_handle
= ep
;
895 attrs
.next_state
= IWCH_QP_STATE_RTS
;
897 mask
= IWCH_QP_ATTR_NEXT_STATE
|
898 IWCH_QP_ATTR_LLP_STREAM_HANDLE
| IWCH_QP_ATTR_MPA_ATTR
|
899 IWCH_QP_ATTR_MAX_IRD
| IWCH_QP_ATTR_MAX_ORD
;
901 /* bind QP and TID with INIT_WR */
902 err
= iwch_modify_qp(ep
->com
.qp
->rhp
,
903 ep
->com
.qp
, mask
, &attrs
, 1);
907 abort_connection(ep
, skb
, GFP_KERNEL
);
909 connect_reply_upcall(ep
, err
);
913 static void process_mpa_request(struct iwch_ep
*ep
, struct sk_buff
*skb
)
915 struct mpa_message
*mpa
;
918 PDBG("%s ep %p\n", __FUNCTION__
, ep
);
921 * Stop mpa timer. If it expired, then the state has
922 * changed and we bail since ep_timeout already aborted
926 if (state_read(&ep
->com
) != MPA_REQ_WAIT
)
930 * If we get more than the supported amount of private data
931 * then we must fail this connection.
933 if (ep
->mpa_pkt_len
+ skb
->len
> sizeof(ep
->mpa_pkt
)) {
934 abort_connection(ep
, skb
, GFP_KERNEL
);
938 PDBG("%s enter (%s line %u)\n", __FUNCTION__
, __FILE__
, __LINE__
);
941 * Copy the new data into our accumulation buffer.
943 memcpy(&(ep
->mpa_pkt
[ep
->mpa_pkt_len
]), skb
->data
, skb
->len
);
944 ep
->mpa_pkt_len
+= skb
->len
;
947 * If we don't even have the mpa message, then bail.
948 * We'll continue process when more data arrives.
950 if (ep
->mpa_pkt_len
< sizeof(*mpa
))
952 PDBG("%s enter (%s line %u)\n", __FUNCTION__
, __FILE__
, __LINE__
);
953 mpa
= (struct mpa_message
*) ep
->mpa_pkt
;
956 * Validate MPA Header.
958 if (mpa
->revision
!= mpa_rev
) {
959 abort_connection(ep
, skb
, GFP_KERNEL
);
963 if (memcmp(mpa
->key
, MPA_KEY_REQ
, sizeof(mpa
->key
))) {
964 abort_connection(ep
, skb
, GFP_KERNEL
);
968 plen
= ntohs(mpa
->private_data_size
);
971 * Fail if there's too much private data.
973 if (plen
> MPA_MAX_PRIVATE_DATA
) {
974 abort_connection(ep
, skb
, GFP_KERNEL
);
979 * If plen does not account for pkt size
981 if (ep
->mpa_pkt_len
> (sizeof(*mpa
) + plen
)) {
982 abort_connection(ep
, skb
, GFP_KERNEL
);
985 ep
->plen
= (u8
) plen
;
988 * If we don't have all the pdata yet, then bail.
990 if (ep
->mpa_pkt_len
< (sizeof(*mpa
) + plen
))
994 * If we get here we have accumulated the entire mpa
995 * start reply message including private data.
997 ep
->mpa_attr
.crc_enabled
= (mpa
->flags
& MPA_CRC
) | crc_enabled
? 1 : 0;
998 ep
->mpa_attr
.recv_marker_enabled
= markers_enabled
;
999 ep
->mpa_attr
.xmit_marker_enabled
= mpa
->flags
& MPA_MARKERS
? 1 : 0;
1000 ep
->mpa_attr
.version
= mpa_rev
;
1001 PDBG("%s - crc_enabled=%d, recv_marker_enabled=%d, "
1002 "xmit_marker_enabled=%d, version=%d\n", __FUNCTION__
,
1003 ep
->mpa_attr
.crc_enabled
, ep
->mpa_attr
.recv_marker_enabled
,
1004 ep
->mpa_attr
.xmit_marker_enabled
, ep
->mpa_attr
.version
);
1006 state_set(&ep
->com
, MPA_REQ_RCVD
);
1009 connect_request_upcall(ep
);
1013 static int rx_data(struct t3cdev
*tdev
, struct sk_buff
*skb
, void *ctx
)
1015 struct iwch_ep
*ep
= ctx
;
1016 struct cpl_rx_data
*hdr
= cplhdr(skb
);
1017 unsigned int dlen
= ntohs(hdr
->len
);
1019 PDBG("%s ep %p dlen %u\n", __FUNCTION__
, ep
, dlen
);
1021 skb_pull(skb
, sizeof(*hdr
));
1022 skb_trim(skb
, dlen
);
1024 switch (state_read(&ep
->com
)) {
1026 process_mpa_reply(ep
, skb
);
1029 process_mpa_request(ep
, skb
);
1034 printk(KERN_ERR MOD
"%s Unexpected streaming data."
1035 " ep %p state %d tid %d\n",
1036 __FUNCTION__
, ep
, state_read(&ep
->com
), ep
->hwtid
);
1039 * The ep will timeout and inform the ULP of the failure.
1045 /* update RX credits */
1046 update_rx_credits(ep
, dlen
);
1048 return CPL_RET_BUF_DONE
;
1052 * Upcall from the adapter indicating data has been transmitted.
1053 * For us its just the single MPA request or reply. We can now free
1054 * the skb holding the mpa message.
1056 static int tx_ack(struct t3cdev
*tdev
, struct sk_buff
*skb
, void *ctx
)
1058 struct iwch_ep
*ep
= ctx
;
1059 struct cpl_wr_ack
*hdr
= cplhdr(skb
);
1060 unsigned int credits
= ntohs(hdr
->credits
);
1061 enum iwch_qp_attr_mask mask
;
1063 PDBG("%s ep %p credits %u\n", __FUNCTION__
, ep
, credits
);
1066 return CPL_RET_BUF_DONE
;
1067 BUG_ON(credits
!= 1);
1068 BUG_ON(ep
->mpa_skb
== NULL
);
1069 kfree_skb(ep
->mpa_skb
);
1071 dst_confirm(ep
->dst
);
1072 if (state_read(&ep
->com
) == MPA_REP_SENT
) {
1073 struct iwch_qp_attributes attrs
;
1075 /* bind QP to EP and move to RTS */
1076 attrs
.mpa_attr
= ep
->mpa_attr
;
1077 attrs
.max_ird
= ep
->ord
;
1078 attrs
.max_ord
= ep
->ord
;
1079 attrs
.llp_stream_handle
= ep
;
1080 attrs
.next_state
= IWCH_QP_STATE_RTS
;
1082 /* bind QP and TID with INIT_WR */
1083 mask
= IWCH_QP_ATTR_NEXT_STATE
|
1084 IWCH_QP_ATTR_LLP_STREAM_HANDLE
|
1085 IWCH_QP_ATTR_MPA_ATTR
|
1086 IWCH_QP_ATTR_MAX_IRD
|
1087 IWCH_QP_ATTR_MAX_ORD
;
1089 ep
->com
.rpl_err
= iwch_modify_qp(ep
->com
.qp
->rhp
,
1090 ep
->com
.qp
, mask
, &attrs
, 1);
1092 if (!ep
->com
.rpl_err
) {
1093 state_set(&ep
->com
, FPDU_MODE
);
1094 established_upcall(ep
);
1097 ep
->com
.rpl_done
= 1;
1098 PDBG("waking up ep %p\n", ep
);
1099 wake_up(&ep
->com
.waitq
);
1101 return CPL_RET_BUF_DONE
;
1104 static int abort_rpl(struct t3cdev
*tdev
, struct sk_buff
*skb
, void *ctx
)
1106 struct iwch_ep
*ep
= ctx
;
1108 PDBG("%s ep %p\n", __FUNCTION__
, ep
);
1110 close_complete_upcall(ep
);
1111 state_set(&ep
->com
, DEAD
);
1112 release_ep_resources(ep
);
1113 return CPL_RET_BUF_DONE
;
1116 static int act_open_rpl(struct t3cdev
*tdev
, struct sk_buff
*skb
, void *ctx
)
1118 struct iwch_ep
*ep
= ctx
;
1119 struct cpl_act_open_rpl
*rpl
= cplhdr(skb
);
1121 PDBG("%s ep %p status %u errno %d\n", __FUNCTION__
, ep
, rpl
->status
,
1122 status2errno(rpl
->status
));
1123 connect_reply_upcall(ep
, status2errno(rpl
->status
));
1124 state_set(&ep
->com
, DEAD
);
1125 if (ep
->com
.tdev
->type
== T3B
)
1126 release_tid(ep
->com
.tdev
, GET_TID(rpl
), NULL
);
1127 cxgb3_free_atid(ep
->com
.tdev
, ep
->atid
);
1128 dst_release(ep
->dst
);
1129 l2t_release(L2DATA(ep
->com
.tdev
), ep
->l2t
);
1131 return CPL_RET_BUF_DONE
;
1134 static int listen_start(struct iwch_listen_ep
*ep
)
1136 struct sk_buff
*skb
;
1137 struct cpl_pass_open_req
*req
;
1139 PDBG("%s ep %p\n", __FUNCTION__
, ep
);
1140 skb
= get_skb(NULL
, sizeof(*req
), GFP_KERNEL
);
1142 printk(KERN_ERR MOD
"t3c_listen_start failed to alloc skb!\n");
1146 req
= (struct cpl_pass_open_req
*) skb_put(skb
, sizeof(*req
));
1147 req
->wr
.wr_hi
= htonl(V_WR_OP(FW_WROPCODE_FORWARD
));
1148 OPCODE_TID(req
) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ
, ep
->stid
));
1149 req
->local_port
= ep
->com
.local_addr
.sin_port
;
1150 req
->local_ip
= ep
->com
.local_addr
.sin_addr
.s_addr
;
1153 req
->peer_netmask
= 0;
1154 req
->opt0h
= htonl(F_DELACK
| F_TCAM_BYPASS
);
1155 req
->opt0l
= htonl(V_RCV_BUFSIZ(rcv_win
>>10));
1156 req
->opt1
= htonl(V_CONN_POLICY(CPL_CONN_POLICY_ASK
));
1159 ep
->com
.tdev
->send(ep
->com
.tdev
, skb
);
1163 static int pass_open_rpl(struct t3cdev
*tdev
, struct sk_buff
*skb
, void *ctx
)
1165 struct iwch_listen_ep
*ep
= ctx
;
1166 struct cpl_pass_open_rpl
*rpl
= cplhdr(skb
);
1168 PDBG("%s ep %p status %d error %d\n", __FUNCTION__
, ep
,
1169 rpl
->status
, status2errno(rpl
->status
));
1170 ep
->com
.rpl_err
= status2errno(rpl
->status
);
1171 ep
->com
.rpl_done
= 1;
1172 wake_up(&ep
->com
.waitq
);
1174 return CPL_RET_BUF_DONE
;
1177 static int listen_stop(struct iwch_listen_ep
*ep
)
1179 struct sk_buff
*skb
;
1180 struct cpl_close_listserv_req
*req
;
1182 PDBG("%s ep %p\n", __FUNCTION__
, ep
);
1183 skb
= get_skb(NULL
, sizeof(*req
), GFP_KERNEL
);
1185 printk(KERN_ERR MOD
"%s - failed to alloc skb\n", __FUNCTION__
);
1188 req
= (struct cpl_close_listserv_req
*) skb_put(skb
, sizeof(*req
));
1189 req
->wr
.wr_hi
= htonl(V_WR_OP(FW_WROPCODE_FORWARD
));
1190 OPCODE_TID(req
) = htonl(MK_OPCODE_TID(CPL_CLOSE_LISTSRV_REQ
, ep
->stid
));
1192 ep
->com
.tdev
->send(ep
->com
.tdev
, skb
);
1196 static int close_listsrv_rpl(struct t3cdev
*tdev
, struct sk_buff
*skb
,
1199 struct iwch_listen_ep
*ep
= ctx
;
1200 struct cpl_close_listserv_rpl
*rpl
= cplhdr(skb
);
1202 PDBG("%s ep %p\n", __FUNCTION__
, ep
);
1203 ep
->com
.rpl_err
= status2errno(rpl
->status
);
1204 ep
->com
.rpl_done
= 1;
1205 wake_up(&ep
->com
.waitq
);
1206 return CPL_RET_BUF_DONE
;
1209 static void accept_cr(struct iwch_ep
*ep
, __be32 peer_ip
, struct sk_buff
*skb
)
1211 struct cpl_pass_accept_rpl
*rpl
;
1212 unsigned int mtu_idx
;
1213 u32 opt0h
, opt0l
, opt2
;
1216 PDBG("%s ep %p\n", __FUNCTION__
, ep
);
1217 BUG_ON(skb_cloned(skb
));
1218 skb_trim(skb
, sizeof(*rpl
));
1220 mtu_idx
= find_best_mtu(T3C_DATA(ep
->com
.tdev
), dst_mtu(ep
->dst
));
1221 wscale
= compute_wscale(rcv_win
);
1222 opt0h
= V_NAGLE(0) |
1226 V_WND_SCALE(wscale
) |
1227 V_MSS_IDX(mtu_idx
) |
1228 V_L2T_IDX(ep
->l2t
->idx
) | V_TX_CHANNEL(ep
->l2t
->smt_idx
);
1229 opt0l
= V_TOS((ep
->tos
>> 2) & M_TOS
) | V_RCV_BUFSIZ(rcv_win
>>10);
1230 opt2
= V_FLAVORS_VALID(1) | V_CONG_CONTROL_FLAVOR(cong_flavor
);
1233 rpl
->wr
.wr_hi
= htonl(V_WR_OP(FW_WROPCODE_FORWARD
));
1234 OPCODE_TID(rpl
) = htonl(MK_OPCODE_TID(CPL_PASS_ACCEPT_RPL
, ep
->hwtid
));
1235 rpl
->peer_ip
= peer_ip
;
1236 rpl
->opt0h
= htonl(opt0h
);
1237 rpl
->opt0l_status
= htonl(opt0l
| CPL_PASS_OPEN_ACCEPT
);
1238 rpl
->opt2
= htonl(opt2
);
1239 rpl
->rsvd
= rpl
->opt2
; /* workaround for HW bug */
1240 skb
->priority
= CPL_PRIORITY_SETUP
;
1241 l2t_send(ep
->com
.tdev
, skb
, ep
->l2t
);
1246 static void reject_cr(struct t3cdev
*tdev
, u32 hwtid
, __be32 peer_ip
,
1247 struct sk_buff
*skb
)
1249 PDBG("%s t3cdev %p tid %u peer_ip %x\n", __FUNCTION__
, tdev
, hwtid
,
1251 BUG_ON(skb_cloned(skb
));
1252 skb_trim(skb
, sizeof(struct cpl_tid_release
));
1255 if (tdev
->type
== T3B
)
1256 release_tid(tdev
, hwtid
, skb
);
1258 struct cpl_pass_accept_rpl
*rpl
;
1261 skb
->priority
= CPL_PRIORITY_SETUP
;
1262 rpl
->wr
.wr_hi
= htonl(V_WR_OP(FW_WROPCODE_FORWARD
));
1263 OPCODE_TID(rpl
) = htonl(MK_OPCODE_TID(CPL_PASS_ACCEPT_RPL
,
1265 rpl
->peer_ip
= peer_ip
;
1266 rpl
->opt0h
= htonl(F_TCAM_BYPASS
);
1267 rpl
->opt0l_status
= htonl(CPL_PASS_OPEN_REJECT
);
1269 rpl
->rsvd
= rpl
->opt2
;
1270 tdev
->send(tdev
, skb
);
1274 static int pass_accept_req(struct t3cdev
*tdev
, struct sk_buff
*skb
, void *ctx
)
1276 struct iwch_ep
*child_ep
, *parent_ep
= ctx
;
1277 struct cpl_pass_accept_req
*req
= cplhdr(skb
);
1278 unsigned int hwtid
= GET_TID(req
);
1279 struct dst_entry
*dst
;
1280 struct l2t_entry
*l2t
;
1284 PDBG("%s parent ep %p tid %u\n", __FUNCTION__
, parent_ep
, hwtid
);
1286 if (state_read(&parent_ep
->com
) != LISTEN
) {
1287 printk(KERN_ERR
"%s - listening ep not in LISTEN\n",
1293 * Find the netdev for this connection request.
1295 tim
.mac_addr
= req
->dst_mac
;
1296 tim
.vlan_tag
= ntohs(req
->vlan_tag
);
1297 if (tdev
->ctl(tdev
, GET_IFF_FROM_MAC
, &tim
) < 0 || !tim
.dev
) {
1299 "%s bad dst mac %02x %02x %02x %02x %02x %02x\n",
1310 /* Find output route */
1311 rt
= find_route(tdev
,
1315 req
->peer_port
, G_PASS_OPEN_TOS(ntohl(req
->tos_tid
)));
1317 printk(KERN_ERR MOD
"%s - failed to find dst entry!\n",
1322 l2t
= t3_l2t_get(tdev
, dst
->neighbour
, dst
->neighbour
->dev
);
1324 printk(KERN_ERR MOD
"%s - failed to allocate l2t entry!\n",
1329 child_ep
= alloc_ep(sizeof(*child_ep
), GFP_KERNEL
);
1331 printk(KERN_ERR MOD
"%s - failed to allocate ep entry!\n",
1333 l2t_release(L2DATA(tdev
), l2t
);
1337 state_set(&child_ep
->com
, CONNECTING
);
1338 child_ep
->com
.tdev
= tdev
;
1339 child_ep
->com
.cm_id
= NULL
;
1340 child_ep
->com
.local_addr
.sin_family
= PF_INET
;
1341 child_ep
->com
.local_addr
.sin_port
= req
->local_port
;
1342 child_ep
->com
.local_addr
.sin_addr
.s_addr
= req
->local_ip
;
1343 child_ep
->com
.remote_addr
.sin_family
= PF_INET
;
1344 child_ep
->com
.remote_addr
.sin_port
= req
->peer_port
;
1345 child_ep
->com
.remote_addr
.sin_addr
.s_addr
= req
->peer_ip
;
1346 get_ep(&parent_ep
->com
);
1347 child_ep
->parent_ep
= parent_ep
;
1348 child_ep
->tos
= G_PASS_OPEN_TOS(ntohl(req
->tos_tid
));
1349 child_ep
->l2t
= l2t
;
1350 child_ep
->dst
= dst
;
1351 child_ep
->hwtid
= hwtid
;
1352 init_timer(&child_ep
->timer
);
1353 cxgb3_insert_tid(tdev
, &t3c_client
, child_ep
, hwtid
);
1354 accept_cr(child_ep
, req
->peer_ip
, skb
);
1357 reject_cr(tdev
, hwtid
, req
->peer_ip
, skb
);
1359 return CPL_RET_BUF_DONE
;
1362 static int pass_establish(struct t3cdev
*tdev
, struct sk_buff
*skb
, void *ctx
)
1364 struct iwch_ep
*ep
= ctx
;
1365 struct cpl_pass_establish
*req
= cplhdr(skb
);
1367 PDBG("%s ep %p\n", __FUNCTION__
, ep
);
1368 ep
->snd_seq
= ntohl(req
->snd_isn
);
1370 set_emss(ep
, ntohs(req
->tcp_opt
));
1372 dst_confirm(ep
->dst
);
1373 state_set(&ep
->com
, MPA_REQ_WAIT
);
1376 return CPL_RET_BUF_DONE
;
1379 static int peer_close(struct t3cdev
*tdev
, struct sk_buff
*skb
, void *ctx
)
1381 struct iwch_ep
*ep
= ctx
;
1382 struct iwch_qp_attributes attrs
;
1383 unsigned long flags
;
1387 PDBG("%s ep %p\n", __FUNCTION__
, ep
);
1388 dst_confirm(ep
->dst
);
1390 spin_lock_irqsave(&ep
->com
.lock
, flags
);
1391 switch (ep
->com
.state
) {
1393 __state_set(&ep
->com
, CLOSING
);
1396 __state_set(&ep
->com
, CLOSING
);
1397 connect_reply_upcall(ep
, -ECONNRESET
);
1402 * We're gonna mark this puppy DEAD, but keep
1403 * the reference on it until the ULP accepts or
1406 __state_set(&ep
->com
, CLOSING
);
1410 __state_set(&ep
->com
, CLOSING
);
1411 ep
->com
.rpl_done
= 1;
1412 ep
->com
.rpl_err
= -ECONNRESET
;
1413 PDBG("waking up ep %p\n", ep
);
1414 wake_up(&ep
->com
.waitq
);
1418 __state_set(&ep
->com
, CLOSING
);
1419 attrs
.next_state
= IWCH_QP_STATE_CLOSING
;
1420 iwch_modify_qp(ep
->com
.qp
->rhp
, ep
->com
.qp
,
1421 IWCH_QP_ATTR_NEXT_STATE
, &attrs
, 1);
1422 peer_close_upcall(ep
);
1428 __state_set(&ep
->com
, MORIBUND
);
1433 if (ep
->com
.cm_id
&& ep
->com
.qp
) {
1434 attrs
.next_state
= IWCH_QP_STATE_IDLE
;
1435 iwch_modify_qp(ep
->com
.qp
->rhp
, ep
->com
.qp
,
1436 IWCH_QP_ATTR_NEXT_STATE
, &attrs
, 1);
1438 close_complete_upcall(ep
);
1439 __state_set(&ep
->com
, DEAD
);
1449 spin_unlock_irqrestore(&ep
->com
.lock
, flags
);
1451 iwch_ep_disconnect(ep
, 0, GFP_KERNEL
);
1453 release_ep_resources(ep
);
1454 return CPL_RET_BUF_DONE
;
1458 * Returns whether an ABORT_REQ_RSS message is a negative advice.
1460 static int is_neg_adv_abort(unsigned int status
)
1462 return status
== CPL_ERR_RTX_NEG_ADVICE
||
1463 status
== CPL_ERR_PERSIST_NEG_ADVICE
;
1466 static int peer_abort(struct t3cdev
*tdev
, struct sk_buff
*skb
, void *ctx
)
1468 struct cpl_abort_req_rss
*req
= cplhdr(skb
);
1469 struct iwch_ep
*ep
= ctx
;
1470 struct cpl_abort_rpl
*rpl
;
1471 struct sk_buff
*rpl_skb
;
1472 struct iwch_qp_attributes attrs
;
1476 if (is_neg_adv_abort(req
->status
)) {
1477 PDBG("%s neg_adv_abort ep %p tid %d\n", __FUNCTION__
, ep
,
1479 t3_l2t_send_event(ep
->com
.tdev
, ep
->l2t
);
1480 return CPL_RET_BUF_DONE
;
1483 state
= state_read(&ep
->com
);
1484 PDBG("%s ep %p state %u\n", __FUNCTION__
, ep
, state
);
1493 connect_reply_upcall(ep
, -ECONNRESET
);
1496 ep
->com
.rpl_done
= 1;
1497 ep
->com
.rpl_err
= -ECONNRESET
;
1498 PDBG("waking up ep %p\n", ep
);
1499 wake_up(&ep
->com
.waitq
);
1504 * We're gonna mark this puppy DEAD, but keep
1505 * the reference on it until the ULP accepts or
1515 if (ep
->com
.cm_id
&& ep
->com
.qp
) {
1516 attrs
.next_state
= IWCH_QP_STATE_ERROR
;
1517 ret
= iwch_modify_qp(ep
->com
.qp
->rhp
,
1518 ep
->com
.qp
, IWCH_QP_ATTR_NEXT_STATE
,
1522 "%s - qp <- error failed!\n",
1525 peer_abort_upcall(ep
);
1530 PDBG("%s PEER_ABORT IN DEAD STATE!!!!\n", __FUNCTION__
);
1531 return CPL_RET_BUF_DONE
;
1536 dst_confirm(ep
->dst
);
1538 rpl_skb
= get_skb(skb
, sizeof(*rpl
), GFP_KERNEL
);
1540 printk(KERN_ERR MOD
"%s - cannot allocate skb!\n",
1542 dst_release(ep
->dst
);
1543 l2t_release(L2DATA(ep
->com
.tdev
), ep
->l2t
);
1545 return CPL_RET_BUF_DONE
;
1547 rpl_skb
->priority
= CPL_PRIORITY_DATA
;
1548 rpl
= (struct cpl_abort_rpl
*) skb_put(rpl_skb
, sizeof(*rpl
));
1549 rpl
->wr
.wr_hi
= htonl(V_WR_OP(FW_WROPCODE_OFLD_HOST_ABORT_CON_RPL
));
1550 rpl
->wr
.wr_lo
= htonl(V_WR_TID(ep
->hwtid
));
1551 OPCODE_TID(rpl
) = htonl(MK_OPCODE_TID(CPL_ABORT_RPL
, ep
->hwtid
));
1552 rpl
->cmd
= CPL_ABORT_NO_RST
;
1553 ep
->com
.tdev
->send(ep
->com
.tdev
, rpl_skb
);
1554 if (state
!= ABORTING
) {
1555 state_set(&ep
->com
, DEAD
);
1556 release_ep_resources(ep
);
1558 return CPL_RET_BUF_DONE
;
1561 static int close_con_rpl(struct t3cdev
*tdev
, struct sk_buff
*skb
, void *ctx
)
1563 struct iwch_ep
*ep
= ctx
;
1564 struct iwch_qp_attributes attrs
;
1565 unsigned long flags
;
1568 PDBG("%s ep %p\n", __FUNCTION__
, ep
);
1571 /* The cm_id may be null if we failed to connect */
1572 spin_lock_irqsave(&ep
->com
.lock
, flags
);
1573 switch (ep
->com
.state
) {
1575 __state_set(&ep
->com
, MORIBUND
);
1579 if ((ep
->com
.cm_id
) && (ep
->com
.qp
)) {
1580 attrs
.next_state
= IWCH_QP_STATE_IDLE
;
1581 iwch_modify_qp(ep
->com
.qp
->rhp
,
1583 IWCH_QP_ATTR_NEXT_STATE
,
1586 close_complete_upcall(ep
);
1587 __state_set(&ep
->com
, DEAD
);
1597 spin_unlock_irqrestore(&ep
->com
.lock
, flags
);
1599 release_ep_resources(ep
);
1600 return CPL_RET_BUF_DONE
;
1604 * T3A does 3 things when a TERM is received:
1605 * 1) send up a CPL_RDMA_TERMINATE message with the TERM packet
1606 * 2) generate an async event on the QP with the TERMINATE opcode
1607 * 3) post a TERMINATE opcde cqe into the associated CQ.
1609 * For (1), we save the message in the qp for later consumer consumption.
1610 * For (2), we move the QP into TERMINATE, post a QP event and disconnect.
1611 * For (3), we toss the CQE in cxio_poll_cq().
1613 * terminate() handles case (1)...
1615 static int terminate(struct t3cdev
*tdev
, struct sk_buff
*skb
, void *ctx
)
1617 struct iwch_ep
*ep
= ctx
;
1619 PDBG("%s ep %p\n", __FUNCTION__
, ep
);
1620 skb_pull(skb
, sizeof(struct cpl_rdma_terminate
));
1621 PDBG("%s saving %d bytes of term msg\n", __FUNCTION__
, skb
->len
);
1622 memcpy(ep
->com
.qp
->attr
.terminate_buffer
, skb
->data
, skb
->len
);
1623 ep
->com
.qp
->attr
.terminate_msg_len
= skb
->len
;
1624 ep
->com
.qp
->attr
.is_terminate_local
= 0;
1625 return CPL_RET_BUF_DONE
;
1628 static int ec_status(struct t3cdev
*tdev
, struct sk_buff
*skb
, void *ctx
)
1630 struct cpl_rdma_ec_status
*rep
= cplhdr(skb
);
1631 struct iwch_ep
*ep
= ctx
;
1633 PDBG("%s ep %p tid %u status %d\n", __FUNCTION__
, ep
, ep
->hwtid
,
1636 struct iwch_qp_attributes attrs
;
1638 printk(KERN_ERR MOD
"%s BAD CLOSE - Aborting tid %u\n",
1639 __FUNCTION__
, ep
->hwtid
);
1641 attrs
.next_state
= IWCH_QP_STATE_ERROR
;
1642 iwch_modify_qp(ep
->com
.qp
->rhp
,
1643 ep
->com
.qp
, IWCH_QP_ATTR_NEXT_STATE
,
1645 abort_connection(ep
, NULL
, GFP_KERNEL
);
1647 return CPL_RET_BUF_DONE
;
1650 static void ep_timeout(unsigned long arg
)
1652 struct iwch_ep
*ep
= (struct iwch_ep
*)arg
;
1653 struct iwch_qp_attributes attrs
;
1654 unsigned long flags
;
1656 spin_lock_irqsave(&ep
->com
.lock
, flags
);
1657 PDBG("%s ep %p tid %u state %d\n", __FUNCTION__
, ep
, ep
->hwtid
,
1659 switch (ep
->com
.state
) {
1661 connect_reply_upcall(ep
, -ETIMEDOUT
);
1667 if (ep
->com
.cm_id
&& ep
->com
.qp
) {
1668 attrs
.next_state
= IWCH_QP_STATE_ERROR
;
1669 iwch_modify_qp(ep
->com
.qp
->rhp
,
1670 ep
->com
.qp
, IWCH_QP_ATTR_NEXT_STATE
,
1677 __state_set(&ep
->com
, CLOSING
);
1678 spin_unlock_irqrestore(&ep
->com
.lock
, flags
);
1679 abort_connection(ep
, NULL
, GFP_ATOMIC
);
1683 int iwch_reject_cr(struct iw_cm_id
*cm_id
, const void *pdata
, u8 pdata_len
)
1686 struct iwch_ep
*ep
= to_ep(cm_id
);
1687 PDBG("%s ep %p tid %u\n", __FUNCTION__
, ep
, ep
->hwtid
);
1689 if (state_read(&ep
->com
) == DEAD
) {
1693 BUG_ON(state_read(&ep
->com
) != MPA_REQ_RCVD
);
1695 abort_connection(ep
, NULL
, GFP_KERNEL
);
1697 err
= send_mpa_reject(ep
, pdata
, pdata_len
);
1698 err
= iwch_ep_disconnect(ep
, 0, GFP_KERNEL
);
1703 int iwch_accept_cr(struct iw_cm_id
*cm_id
, struct iw_cm_conn_param
*conn_param
)
1706 struct iwch_qp_attributes attrs
;
1707 enum iwch_qp_attr_mask mask
;
1708 struct iwch_ep
*ep
= to_ep(cm_id
);
1709 struct iwch_dev
*h
= to_iwch_dev(cm_id
->device
);
1710 struct iwch_qp
*qp
= get_qhp(h
, conn_param
->qpn
);
1712 PDBG("%s ep %p tid %u\n", __FUNCTION__
, ep
, ep
->hwtid
);
1713 if (state_read(&ep
->com
) == DEAD
) {
1718 BUG_ON(state_read(&ep
->com
) != MPA_REQ_RCVD
);
1721 if ((conn_param
->ord
> qp
->rhp
->attr
.max_rdma_read_qp_depth
) ||
1722 (conn_param
->ird
> qp
->rhp
->attr
.max_rdma_reads_per_qp
)) {
1723 abort_connection(ep
, NULL
, GFP_KERNEL
);
1727 cm_id
->add_ref(cm_id
);
1728 ep
->com
.cm_id
= cm_id
;
1731 ep
->com
.rpl_done
= 0;
1732 ep
->com
.rpl_err
= 0;
1733 ep
->ird
= conn_param
->ird
;
1734 ep
->ord
= conn_param
->ord
;
1735 PDBG("%s %d ird %d ord %d\n", __FUNCTION__
, __LINE__
, ep
->ird
, ep
->ord
);
1737 err
= send_mpa_reply(ep
, conn_param
->private_data
,
1738 conn_param
->private_data_len
);
1740 ep
->com
.cm_id
= NULL
;
1742 cm_id
->rem_ref(cm_id
);
1743 abort_connection(ep
, NULL
, GFP_KERNEL
);
1748 /* bind QP to EP and move to RTS */
1749 attrs
.mpa_attr
= ep
->mpa_attr
;
1750 attrs
.max_ird
= ep
->ord
;
1751 attrs
.max_ord
= ep
->ord
;
1752 attrs
.llp_stream_handle
= ep
;
1753 attrs
.next_state
= IWCH_QP_STATE_RTS
;
1755 /* bind QP and TID with INIT_WR */
1756 mask
= IWCH_QP_ATTR_NEXT_STATE
|
1757 IWCH_QP_ATTR_LLP_STREAM_HANDLE
|
1758 IWCH_QP_ATTR_MPA_ATTR
|
1759 IWCH_QP_ATTR_MAX_IRD
|
1760 IWCH_QP_ATTR_MAX_ORD
;
1762 err
= iwch_modify_qp(ep
->com
.qp
->rhp
,
1763 ep
->com
.qp
, mask
, &attrs
, 1);
1766 ep
->com
.cm_id
= NULL
;
1768 cm_id
->rem_ref(cm_id
);
1769 abort_connection(ep
, NULL
, GFP_KERNEL
);
1771 state_set(&ep
->com
, FPDU_MODE
);
1772 established_upcall(ep
);
1778 int iwch_connect(struct iw_cm_id
*cm_id
, struct iw_cm_conn_param
*conn_param
)
1781 struct iwch_dev
*h
= to_iwch_dev(cm_id
->device
);
1785 ep
= alloc_ep(sizeof(*ep
), GFP_KERNEL
);
1787 printk(KERN_ERR MOD
"%s - cannot alloc ep.\n", __FUNCTION__
);
1791 init_timer(&ep
->timer
);
1792 ep
->plen
= conn_param
->private_data_len
;
1794 memcpy(ep
->mpa_pkt
+ sizeof(struct mpa_message
),
1795 conn_param
->private_data
, ep
->plen
);
1796 ep
->ird
= conn_param
->ird
;
1797 ep
->ord
= conn_param
->ord
;
1798 ep
->com
.tdev
= h
->rdev
.t3cdev_p
;
1800 cm_id
->add_ref(cm_id
);
1801 ep
->com
.cm_id
= cm_id
;
1802 ep
->com
.qp
= get_qhp(h
, conn_param
->qpn
);
1803 BUG_ON(!ep
->com
.qp
);
1804 PDBG("%s qpn 0x%x qp %p cm_id %p\n", __FUNCTION__
, conn_param
->qpn
,
1808 * Allocate an active TID to initiate a TCP connection.
1810 ep
->atid
= cxgb3_alloc_atid(h
->rdev
.t3cdev_p
, &t3c_client
, ep
);
1811 if (ep
->atid
== -1) {
1812 printk(KERN_ERR MOD
"%s - cannot alloc atid.\n", __FUNCTION__
);
1818 rt
= find_route(h
->rdev
.t3cdev_p
,
1819 cm_id
->local_addr
.sin_addr
.s_addr
,
1820 cm_id
->remote_addr
.sin_addr
.s_addr
,
1821 cm_id
->local_addr
.sin_port
,
1822 cm_id
->remote_addr
.sin_port
, IPTOS_LOWDELAY
);
1824 printk(KERN_ERR MOD
"%s - cannot find route.\n", __FUNCTION__
);
1825 err
= -EHOSTUNREACH
;
1828 ep
->dst
= &rt
->u
.dst
;
1830 /* get a l2t entry */
1831 ep
->l2t
= t3_l2t_get(ep
->com
.tdev
, ep
->dst
->neighbour
,
1832 ep
->dst
->neighbour
->dev
);
1834 printk(KERN_ERR MOD
"%s - cannot alloc l2e.\n", __FUNCTION__
);
1839 state_set(&ep
->com
, CONNECTING
);
1840 ep
->tos
= IPTOS_LOWDELAY
;
1841 ep
->com
.local_addr
= cm_id
->local_addr
;
1842 ep
->com
.remote_addr
= cm_id
->remote_addr
;
1844 /* send connect request to rnic */
1845 err
= send_connect(ep
);
1849 l2t_release(L2DATA(h
->rdev
.t3cdev_p
), ep
->l2t
);
1851 dst_release(ep
->dst
);
1853 cxgb3_free_atid(ep
->com
.tdev
, ep
->atid
);
1860 int iwch_create_listen(struct iw_cm_id
*cm_id
, int backlog
)
1863 struct iwch_dev
*h
= to_iwch_dev(cm_id
->device
);
1864 struct iwch_listen_ep
*ep
;
1869 ep
= alloc_ep(sizeof(*ep
), GFP_KERNEL
);
1871 printk(KERN_ERR MOD
"%s - cannot alloc ep.\n", __FUNCTION__
);
1875 PDBG("%s ep %p\n", __FUNCTION__
, ep
);
1876 ep
->com
.tdev
= h
->rdev
.t3cdev_p
;
1877 cm_id
->add_ref(cm_id
);
1878 ep
->com
.cm_id
= cm_id
;
1879 ep
->backlog
= backlog
;
1880 ep
->com
.local_addr
= cm_id
->local_addr
;
1883 * Allocate a server TID.
1885 ep
->stid
= cxgb3_alloc_stid(h
->rdev
.t3cdev_p
, &t3c_client
, ep
);
1886 if (ep
->stid
== -1) {
1887 printk(KERN_ERR MOD
"%s - cannot alloc atid.\n", __FUNCTION__
);
1892 state_set(&ep
->com
, LISTEN
);
1893 err
= listen_start(ep
);
1897 /* wait for pass_open_rpl */
1898 wait_event(ep
->com
.waitq
, ep
->com
.rpl_done
);
1899 err
= ep
->com
.rpl_err
;
1901 cm_id
->provider_data
= ep
;
1905 cxgb3_free_stid(ep
->com
.tdev
, ep
->stid
);
1913 int iwch_destroy_listen(struct iw_cm_id
*cm_id
)
1916 struct iwch_listen_ep
*ep
= to_listen_ep(cm_id
);
1918 PDBG("%s ep %p\n", __FUNCTION__
, ep
);
1921 state_set(&ep
->com
, DEAD
);
1922 ep
->com
.rpl_done
= 0;
1923 ep
->com
.rpl_err
= 0;
1924 err
= listen_stop(ep
);
1925 wait_event(ep
->com
.waitq
, ep
->com
.rpl_done
);
1926 cxgb3_free_stid(ep
->com
.tdev
, ep
->stid
);
1927 err
= ep
->com
.rpl_err
;
1928 cm_id
->rem_ref(cm_id
);
1933 int iwch_ep_disconnect(struct iwch_ep
*ep
, int abrupt
, gfp_t gfp
)
1936 unsigned long flags
;
1939 spin_lock_irqsave(&ep
->com
.lock
, flags
);
1941 PDBG("%s ep %p state %s, abrupt %d\n", __FUNCTION__
, ep
,
1942 states
[ep
->com
.state
], abrupt
);
1944 if (ep
->com
.state
== DEAD
) {
1945 PDBG("%s already dead ep %p\n", __FUNCTION__
, ep
);
1950 if (ep
->com
.state
!= ABORTING
) {
1951 ep
->com
.state
= ABORTING
;
1957 switch (ep
->com
.state
) {
1964 ep
->com
.state
= CLOSING
;
1968 ep
->com
.state
= MORIBUND
;
1978 spin_unlock_irqrestore(&ep
->com
.lock
, flags
);
1981 ret
= send_abort(ep
, NULL
, gfp
);
1983 ret
= send_halfclose(ep
, gfp
);
1988 int iwch_ep_redirect(void *ctx
, struct dst_entry
*old
, struct dst_entry
*new,
1989 struct l2t_entry
*l2t
)
1991 struct iwch_ep
*ep
= ctx
;
1996 PDBG("%s ep %p redirect to dst %p l2t %p\n", __FUNCTION__
, ep
, new,
1999 l2t_release(L2DATA(ep
->com
.tdev
), ep
->l2t
);
2007 * All the CM events are handled on a work queue to have a safe context.
2009 static int sched(struct t3cdev
*tdev
, struct sk_buff
*skb
, void *ctx
)
2011 struct iwch_ep_common
*epc
= ctx
;
2016 * Save ctx and tdev in the skb->cb area.
2018 *((void **) skb
->cb
) = ctx
;
2019 *((struct t3cdev
**) (skb
->cb
+ sizeof(void *))) = tdev
;
2022 * Queue the skb and schedule the worker thread.
2024 skb_queue_tail(&rxq
, skb
);
2025 queue_work(workq
, &skb_work
);
2029 int __init
iwch_cm_init(void)
2031 skb_queue_head_init(&rxq
);
2033 workq
= create_singlethread_workqueue("iw_cxgb3");
2038 * All upcalls from the T3 Core go to sched() to
2039 * schedule the processing on a work queue.
2041 t3c_handlers
[CPL_ACT_ESTABLISH
] = sched
;
2042 t3c_handlers
[CPL_ACT_OPEN_RPL
] = sched
;
2043 t3c_handlers
[CPL_RX_DATA
] = sched
;
2044 t3c_handlers
[CPL_TX_DMA_ACK
] = sched
;
2045 t3c_handlers
[CPL_ABORT_RPL_RSS
] = sched
;
2046 t3c_handlers
[CPL_ABORT_RPL
] = sched
;
2047 t3c_handlers
[CPL_PASS_OPEN_RPL
] = sched
;
2048 t3c_handlers
[CPL_CLOSE_LISTSRV_RPL
] = sched
;
2049 t3c_handlers
[CPL_PASS_ACCEPT_REQ
] = sched
;
2050 t3c_handlers
[CPL_PASS_ESTABLISH
] = sched
;
2051 t3c_handlers
[CPL_PEER_CLOSE
] = sched
;
2052 t3c_handlers
[CPL_CLOSE_CON_RPL
] = sched
;
2053 t3c_handlers
[CPL_ABORT_REQ_RSS
] = sched
;
2054 t3c_handlers
[CPL_RDMA_TERMINATE
] = sched
;
2055 t3c_handlers
[CPL_RDMA_EC_STATUS
] = sched
;
2058 * These are the real handlers that are called from a
2061 work_handlers
[CPL_ACT_ESTABLISH
] = act_establish
;
2062 work_handlers
[CPL_ACT_OPEN_RPL
] = act_open_rpl
;
2063 work_handlers
[CPL_RX_DATA
] = rx_data
;
2064 work_handlers
[CPL_TX_DMA_ACK
] = tx_ack
;
2065 work_handlers
[CPL_ABORT_RPL_RSS
] = abort_rpl
;
2066 work_handlers
[CPL_ABORT_RPL
] = abort_rpl
;
2067 work_handlers
[CPL_PASS_OPEN_RPL
] = pass_open_rpl
;
2068 work_handlers
[CPL_CLOSE_LISTSRV_RPL
] = close_listsrv_rpl
;
2069 work_handlers
[CPL_PASS_ACCEPT_REQ
] = pass_accept_req
;
2070 work_handlers
[CPL_PASS_ESTABLISH
] = pass_establish
;
2071 work_handlers
[CPL_PEER_CLOSE
] = peer_close
;
2072 work_handlers
[CPL_ABORT_REQ_RSS
] = peer_abort
;
2073 work_handlers
[CPL_CLOSE_CON_RPL
] = close_con_rpl
;
2074 work_handlers
[CPL_RDMA_TERMINATE
] = terminate
;
2075 work_handlers
[CPL_RDMA_EC_STATUS
] = ec_status
;
2079 void __exit
iwch_cm_term(void)
2081 flush_workqueue(workq
);
2082 destroy_workqueue(workq
);