2 * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 #include <linux/module.h>
33 #include <linux/list.h>
34 #include <linux/workqueue.h>
35 #include <linux/skbuff.h>
36 #include <linux/timer.h>
37 #include <linux/notifier.h>
38 #include <linux/inetdevice.h>
40 #include <linux/tcp.h>
41 #include <linux/if_vlan.h>
43 #include <net/neighbour.h>
44 #include <net/netevent.h>
45 #include <net/route.h>
50 static char *states
[] = {
67 module_param(nocong
, int, 0644);
68 MODULE_PARM_DESC(nocong
, "Turn of congestion control (default=0)");
70 static int enable_ecn
;
71 module_param(enable_ecn
, int, 0644);
72 MODULE_PARM_DESC(enable_ecn
, "Enable ECN (default=0/disabled)");
74 static int dack_mode
= 1;
75 module_param(dack_mode
, int, 0644);
76 MODULE_PARM_DESC(dack_mode
, "Delayed ack mode (default=1)");
78 int c4iw_max_read_depth
= 8;
79 module_param(c4iw_max_read_depth
, int, 0644);
80 MODULE_PARM_DESC(c4iw_max_read_depth
, "Per-connection max ORD/IRD (default=8)");
82 static int enable_tcp_timestamps
;
83 module_param(enable_tcp_timestamps
, int, 0644);
84 MODULE_PARM_DESC(enable_tcp_timestamps
, "Enable tcp timestamps (default=0)");
86 static int enable_tcp_sack
;
87 module_param(enable_tcp_sack
, int, 0644);
88 MODULE_PARM_DESC(enable_tcp_sack
, "Enable tcp SACK (default=0)");
90 static int enable_tcp_window_scaling
= 1;
91 module_param(enable_tcp_window_scaling
, int, 0644);
92 MODULE_PARM_DESC(enable_tcp_window_scaling
,
93 "Enable tcp window scaling (default=1)");
96 module_param(c4iw_debug
, int, 0644);
97 MODULE_PARM_DESC(c4iw_debug
, "Enable debug logging (default=0)");
100 module_param(peer2peer
, int, 0644);
101 MODULE_PARM_DESC(peer2peer
, "Support peer2peer ULPs (default=0)");
103 static int p2p_type
= FW_RI_INIT_P2PTYPE_READ_REQ
;
104 module_param(p2p_type
, int, 0644);
105 MODULE_PARM_DESC(p2p_type
, "RDMAP opcode to use for the RTR message: "
106 "1=RDMA_READ 0=RDMA_WRITE (default 1)");
108 static int ep_timeout_secs
= 60;
109 module_param(ep_timeout_secs
, int, 0644);
110 MODULE_PARM_DESC(ep_timeout_secs
, "CM Endpoint operation timeout "
111 "in seconds (default=60)");
113 static int mpa_rev
= 1;
114 module_param(mpa_rev
, int, 0644);
115 MODULE_PARM_DESC(mpa_rev
, "MPA Revision, 0 supports amso1100, "
116 "1 is RFC0544 spec compliant, 2 is IETF MPA Peer Connect Draft"
117 " compliant (default=1)");
119 static int markers_enabled
;
120 module_param(markers_enabled
, int, 0644);
121 MODULE_PARM_DESC(markers_enabled
, "Enable MPA MARKERS (default(0)=disabled)");
123 static int crc_enabled
= 1;
124 module_param(crc_enabled
, int, 0644);
125 MODULE_PARM_DESC(crc_enabled
, "Enable MPA CRC (default(1)=enabled)");
127 static int rcv_win
= 256 * 1024;
128 module_param(rcv_win
, int, 0644);
129 MODULE_PARM_DESC(rcv_win
, "TCP receive window in bytes (default=256KB)");
131 static int snd_win
= 128 * 1024;
132 module_param(snd_win
, int, 0644);
133 MODULE_PARM_DESC(snd_win
, "TCP send window in bytes (default=128KB)");
135 static struct workqueue_struct
*workq
;
137 static struct sk_buff_head rxq
;
139 static struct sk_buff
*get_skb(struct sk_buff
*skb
, int len
, gfp_t gfp
);
140 static void ep_timeout(unsigned long arg
);
141 static void connect_reply_upcall(struct c4iw_ep
*ep
, int status
);
143 static LIST_HEAD(timeout_list
);
144 static spinlock_t timeout_lock
;
146 static void deref_qp(struct c4iw_ep
*ep
)
148 c4iw_qp_rem_ref(&ep
->com
.qp
->ibqp
);
149 clear_bit(QP_REFERENCED
, &ep
->com
.flags
);
152 static void ref_qp(struct c4iw_ep
*ep
)
154 set_bit(QP_REFERENCED
, &ep
->com
.flags
);
155 c4iw_qp_add_ref(&ep
->com
.qp
->ibqp
);
158 static void start_ep_timer(struct c4iw_ep
*ep
)
160 PDBG("%s ep %p\n", __func__
, ep
);
161 if (timer_pending(&ep
->timer
)) {
162 pr_err("%s timer already started! ep %p\n",
166 clear_bit(TIMEOUT
, &ep
->com
.flags
);
167 c4iw_get_ep(&ep
->com
);
168 ep
->timer
.expires
= jiffies
+ ep_timeout_secs
* HZ
;
169 ep
->timer
.data
= (unsigned long)ep
;
170 ep
->timer
.function
= ep_timeout
;
171 add_timer(&ep
->timer
);
174 static void stop_ep_timer(struct c4iw_ep
*ep
)
176 PDBG("%s ep %p stopping\n", __func__
, ep
);
177 del_timer_sync(&ep
->timer
);
178 if (!test_and_set_bit(TIMEOUT
, &ep
->com
.flags
))
179 c4iw_put_ep(&ep
->com
);
182 static int c4iw_l2t_send(struct c4iw_rdev
*rdev
, struct sk_buff
*skb
,
183 struct l2t_entry
*l2e
)
187 if (c4iw_fatal_error(rdev
)) {
189 PDBG("%s - device in error state - dropping\n", __func__
);
192 error
= cxgb4_l2t_send(rdev
->lldi
.ports
[0], skb
, l2e
);
195 return error
< 0 ? error
: 0;
198 int c4iw_ofld_send(struct c4iw_rdev
*rdev
, struct sk_buff
*skb
)
202 if (c4iw_fatal_error(rdev
)) {
204 PDBG("%s - device in error state - dropping\n", __func__
);
207 error
= cxgb4_ofld_send(rdev
->lldi
.ports
[0], skb
);
210 return error
< 0 ? error
: 0;
213 static void release_tid(struct c4iw_rdev
*rdev
, u32 hwtid
, struct sk_buff
*skb
)
215 struct cpl_tid_release
*req
;
217 skb
= get_skb(skb
, sizeof *req
, GFP_KERNEL
);
220 req
= (struct cpl_tid_release
*) skb_put(skb
, sizeof(*req
));
221 INIT_TP_WR(req
, hwtid
);
222 OPCODE_TID(req
) = cpu_to_be32(MK_OPCODE_TID(CPL_TID_RELEASE
, hwtid
));
223 set_wr_txq(skb
, CPL_PRIORITY_SETUP
, 0);
224 c4iw_ofld_send(rdev
, skb
);
228 static void set_emss(struct c4iw_ep
*ep
, u16 opt
)
230 ep
->emss
= ep
->com
.dev
->rdev
.lldi
.mtus
[GET_TCPOPT_MSS(opt
)] - 40;
232 if (GET_TCPOPT_TSTAMP(opt
))
236 PDBG("%s mss_idx %u mss %u emss=%u\n", __func__
, GET_TCPOPT_MSS(opt
),
240 static enum c4iw_ep_state
state_read(struct c4iw_ep_common
*epc
)
242 enum c4iw_ep_state state
;
244 mutex_lock(&epc
->mutex
);
246 mutex_unlock(&epc
->mutex
);
250 static void __state_set(struct c4iw_ep_common
*epc
, enum c4iw_ep_state
new)
255 static void state_set(struct c4iw_ep_common
*epc
, enum c4iw_ep_state
new)
257 mutex_lock(&epc
->mutex
);
258 PDBG("%s - %s -> %s\n", __func__
, states
[epc
->state
], states
[new]);
259 __state_set(epc
, new);
260 mutex_unlock(&epc
->mutex
);
264 static void *alloc_ep(int size
, gfp_t gfp
)
266 struct c4iw_ep_common
*epc
;
268 epc
= kzalloc(size
, gfp
);
270 kref_init(&epc
->kref
);
271 mutex_init(&epc
->mutex
);
272 c4iw_init_wr_wait(&epc
->wr_wait
);
274 PDBG("%s alloc ep %p\n", __func__
, epc
);
278 void _c4iw_free_ep(struct kref
*kref
)
282 ep
= container_of(kref
, struct c4iw_ep
, com
.kref
);
283 PDBG("%s ep %p state %s\n", __func__
, ep
, states
[state_read(&ep
->com
)]);
284 if (test_bit(QP_REFERENCED
, &ep
->com
.flags
))
286 if (test_bit(RELEASE_RESOURCES
, &ep
->com
.flags
)) {
287 remove_handle(ep
->com
.dev
, &ep
->com
.dev
->hwtid_idr
, ep
->hwtid
);
288 cxgb4_remove_tid(ep
->com
.dev
->rdev
.lldi
.tids
, 0, ep
->hwtid
);
289 dst_release(ep
->dst
);
290 cxgb4_l2t_release(ep
->l2t
);
295 static void release_ep_resources(struct c4iw_ep
*ep
)
297 set_bit(RELEASE_RESOURCES
, &ep
->com
.flags
);
298 c4iw_put_ep(&ep
->com
);
301 static int status2errno(int status
)
306 case CPL_ERR_CONN_RESET
:
308 case CPL_ERR_ARP_MISS
:
309 return -EHOSTUNREACH
;
310 case CPL_ERR_CONN_TIMEDOUT
:
312 case CPL_ERR_TCAM_FULL
:
314 case CPL_ERR_CONN_EXIST
:
322 * Try and reuse skbs already allocated...
324 static struct sk_buff
*get_skb(struct sk_buff
*skb
, int len
, gfp_t gfp
)
326 if (skb
&& !skb_is_nonlinear(skb
) && !skb_cloned(skb
)) {
329 skb_reset_transport_header(skb
);
331 skb
= alloc_skb(len
, gfp
);
336 static struct rtable
*find_route(struct c4iw_dev
*dev
, __be32 local_ip
,
337 __be32 peer_ip
, __be16 local_port
,
338 __be16 peer_port
, u8 tos
)
343 rt
= ip_route_output_ports(&init_net
, &fl4
, NULL
, peer_ip
, local_ip
,
344 peer_port
, local_port
, IPPROTO_TCP
,
351 static void arp_failure_discard(void *handle
, struct sk_buff
*skb
)
353 PDBG("%s c4iw_dev %p\n", __func__
, handle
);
358 * Handle an ARP failure for an active open.
360 static void act_open_req_arp_failure(void *handle
, struct sk_buff
*skb
)
362 printk(KERN_ERR MOD
"ARP failure duing connect\n");
367 * Handle an ARP failure for a CPL_ABORT_REQ. Change it into a no RST variant
370 static void abort_arp_failure(void *handle
, struct sk_buff
*skb
)
372 struct c4iw_rdev
*rdev
= handle
;
373 struct cpl_abort_req
*req
= cplhdr(skb
);
375 PDBG("%s rdev %p\n", __func__
, rdev
);
376 req
->cmd
= CPL_ABORT_NO_RST
;
377 c4iw_ofld_send(rdev
, skb
);
380 static void send_flowc(struct c4iw_ep
*ep
, struct sk_buff
*skb
)
382 unsigned int flowclen
= 80;
383 struct fw_flowc_wr
*flowc
;
386 skb
= get_skb(skb
, flowclen
, GFP_KERNEL
);
387 flowc
= (struct fw_flowc_wr
*)__skb_put(skb
, flowclen
);
389 flowc
->op_to_nparams
= cpu_to_be32(FW_WR_OP(FW_FLOWC_WR
) |
390 FW_FLOWC_WR_NPARAMS(8));
391 flowc
->flowid_len16
= cpu_to_be32(FW_WR_LEN16(DIV_ROUND_UP(flowclen
,
392 16)) | FW_WR_FLOWID(ep
->hwtid
));
394 flowc
->mnemval
[0].mnemonic
= FW_FLOWC_MNEM_PFNVFN
;
395 flowc
->mnemval
[0].val
= cpu_to_be32(PCI_FUNC(ep
->com
.dev
->rdev
.lldi
.pdev
->devfn
) << 8);
396 flowc
->mnemval
[1].mnemonic
= FW_FLOWC_MNEM_CH
;
397 flowc
->mnemval
[1].val
= cpu_to_be32(ep
->tx_chan
);
398 flowc
->mnemval
[2].mnemonic
= FW_FLOWC_MNEM_PORT
;
399 flowc
->mnemval
[2].val
= cpu_to_be32(ep
->tx_chan
);
400 flowc
->mnemval
[3].mnemonic
= FW_FLOWC_MNEM_IQID
;
401 flowc
->mnemval
[3].val
= cpu_to_be32(ep
->rss_qid
);
402 flowc
->mnemval
[4].mnemonic
= FW_FLOWC_MNEM_SNDNXT
;
403 flowc
->mnemval
[4].val
= cpu_to_be32(ep
->snd_seq
);
404 flowc
->mnemval
[5].mnemonic
= FW_FLOWC_MNEM_RCVNXT
;
405 flowc
->mnemval
[5].val
= cpu_to_be32(ep
->rcv_seq
);
406 flowc
->mnemval
[6].mnemonic
= FW_FLOWC_MNEM_SNDBUF
;
407 flowc
->mnemval
[6].val
= cpu_to_be32(snd_win
);
408 flowc
->mnemval
[7].mnemonic
= FW_FLOWC_MNEM_MSS
;
409 flowc
->mnemval
[7].val
= cpu_to_be32(ep
->emss
);
410 /* Pad WR to 16 byte boundary */
411 flowc
->mnemval
[8].mnemonic
= 0;
412 flowc
->mnemval
[8].val
= 0;
413 for (i
= 0; i
< 9; i
++) {
414 flowc
->mnemval
[i
].r4
[0] = 0;
415 flowc
->mnemval
[i
].r4
[1] = 0;
416 flowc
->mnemval
[i
].r4
[2] = 0;
419 set_wr_txq(skb
, CPL_PRIORITY_DATA
, ep
->txq_idx
);
420 c4iw_ofld_send(&ep
->com
.dev
->rdev
, skb
);
423 static int send_halfclose(struct c4iw_ep
*ep
, gfp_t gfp
)
425 struct cpl_close_con_req
*req
;
427 int wrlen
= roundup(sizeof *req
, 16);
429 PDBG("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
430 skb
= get_skb(NULL
, wrlen
, gfp
);
432 printk(KERN_ERR MOD
"%s - failed to alloc skb\n", __func__
);
435 set_wr_txq(skb
, CPL_PRIORITY_DATA
, ep
->txq_idx
);
436 t4_set_arp_err_handler(skb
, NULL
, arp_failure_discard
);
437 req
= (struct cpl_close_con_req
*) skb_put(skb
, wrlen
);
438 memset(req
, 0, wrlen
);
439 INIT_TP_WR(req
, ep
->hwtid
);
440 OPCODE_TID(req
) = cpu_to_be32(MK_OPCODE_TID(CPL_CLOSE_CON_REQ
,
442 return c4iw_l2t_send(&ep
->com
.dev
->rdev
, skb
, ep
->l2t
);
445 static int send_abort(struct c4iw_ep
*ep
, struct sk_buff
*skb
, gfp_t gfp
)
447 struct cpl_abort_req
*req
;
448 int wrlen
= roundup(sizeof *req
, 16);
450 PDBG("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
451 skb
= get_skb(skb
, wrlen
, gfp
);
453 printk(KERN_ERR MOD
"%s - failed to alloc skb.\n",
457 set_wr_txq(skb
, CPL_PRIORITY_DATA
, ep
->txq_idx
);
458 t4_set_arp_err_handler(skb
, &ep
->com
.dev
->rdev
, abort_arp_failure
);
459 req
= (struct cpl_abort_req
*) skb_put(skb
, wrlen
);
460 memset(req
, 0, wrlen
);
461 INIT_TP_WR(req
, ep
->hwtid
);
462 OPCODE_TID(req
) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_REQ
, ep
->hwtid
));
463 req
->cmd
= CPL_ABORT_SEND_RST
;
464 return c4iw_l2t_send(&ep
->com
.dev
->rdev
, skb
, ep
->l2t
);
467 #define VLAN_NONE 0xfff
468 #define FILTER_SEL_VLAN_NONE 0xffff
469 #define FILTER_SEL_WIDTH_P_FC (3+1) /* port uses 3 bits, FCoE one bit */
470 #define FILTER_SEL_WIDTH_VIN_P_FC \
471 (6 + 7 + FILTER_SEL_WIDTH_P_FC) /* 6 bits are unused, VF uses 7 bits*/
472 #define FILTER_SEL_WIDTH_TAG_P_FC \
473 (3 + FILTER_SEL_WIDTH_VIN_P_FC) /* PF uses 3 bits */
474 #define FILTER_SEL_WIDTH_VLD_TAG_P_FC (1 + FILTER_SEL_WIDTH_TAG_P_FC)
476 static unsigned int select_ntuple(struct c4iw_dev
*dev
, struct dst_entry
*dst
,
477 struct l2t_entry
*l2t
)
479 unsigned int ntuple
= 0;
482 switch (dev
->rdev
.lldi
.filt_mode
) {
484 /* default filter mode */
485 case HW_TPL_FR_MT_PR_IV_P_FC
:
486 if (l2t
->vlan
== VLAN_NONE
)
487 ntuple
|= FILTER_SEL_VLAN_NONE
<< FILTER_SEL_WIDTH_P_FC
;
489 ntuple
|= l2t
->vlan
<< FILTER_SEL_WIDTH_P_FC
;
490 ntuple
|= 1 << FILTER_SEL_WIDTH_VLD_TAG_P_FC
;
492 ntuple
|= l2t
->lport
<< S_PORT
| IPPROTO_TCP
<<
493 FILTER_SEL_WIDTH_VLD_TAG_P_FC
;
495 case HW_TPL_FR_MT_PR_OV_P_FC
: {
496 viid
= cxgb4_port_viid(l2t
->neigh
->dev
);
498 ntuple
|= FW_VIID_VIN_GET(viid
) << FILTER_SEL_WIDTH_P_FC
;
499 ntuple
|= FW_VIID_PFN_GET(viid
) << FILTER_SEL_WIDTH_VIN_P_FC
;
500 ntuple
|= FW_VIID_VIVLD_GET(viid
) << FILTER_SEL_WIDTH_TAG_P_FC
;
501 ntuple
|= l2t
->lport
<< S_PORT
| IPPROTO_TCP
<<
502 FILTER_SEL_WIDTH_VLD_TAG_P_FC
;
511 static int send_connect(struct c4iw_ep
*ep
)
513 struct cpl_act_open_req
*req
;
514 struct cpl_t5_act_open_req
*t5_req
;
518 unsigned int mtu_idx
;
520 int size
= is_t4(ep
->com
.dev
->rdev
.lldi
.adapter_type
) ?
521 sizeof(struct cpl_act_open_req
) :
522 sizeof(struct cpl_t5_act_open_req
);
523 int wrlen
= roundup(size
, 16);
525 PDBG("%s ep %p atid %u\n", __func__
, ep
, ep
->atid
);
527 skb
= get_skb(NULL
, wrlen
, GFP_KERNEL
);
529 printk(KERN_ERR MOD
"%s - failed to alloc skb.\n",
533 set_wr_txq(skb
, CPL_PRIORITY_SETUP
, ep
->ctrlq_idx
);
535 cxgb4_best_mtu(ep
->com
.dev
->rdev
.lldi
.mtus
, ep
->mtu
, &mtu_idx
);
536 wscale
= compute_wscale(rcv_win
);
537 opt0
= (nocong
? NO_CONG(1) : 0) |
542 L2T_IDX(ep
->l2t
->idx
) |
543 TX_CHAN(ep
->tx_chan
) |
544 SMAC_SEL(ep
->smac_idx
) |
546 ULP_MODE(ULP_MODE_TCPDDP
) |
547 RCV_BUFSIZ(rcv_win
>>10);
548 opt2
= RX_CHANNEL(0) |
549 CCTRL_ECN(enable_ecn
) |
550 RSS_QUEUE_VALID
| RSS_QUEUE(ep
->rss_qid
);
551 if (enable_tcp_timestamps
)
552 opt2
|= TSTAMPS_EN(1);
555 if (wscale
&& enable_tcp_window_scaling
)
556 opt2
|= WND_SCALE_EN(1);
557 t4_set_arp_err_handler(skb
, NULL
, act_open_req_arp_failure
);
559 if (is_t4(ep
->com
.dev
->rdev
.lldi
.adapter_type
)) {
560 req
= (struct cpl_act_open_req
*) skb_put(skb
, wrlen
);
562 OPCODE_TID(req
) = cpu_to_be32(
563 MK_OPCODE_TID(CPL_ACT_OPEN_REQ
,
564 ((ep
->rss_qid
<< 14) | ep
->atid
)));
565 req
->local_port
= ep
->com
.local_addr
.sin_port
;
566 req
->peer_port
= ep
->com
.remote_addr
.sin_port
;
567 req
->local_ip
= ep
->com
.local_addr
.sin_addr
.s_addr
;
568 req
->peer_ip
= ep
->com
.remote_addr
.sin_addr
.s_addr
;
569 req
->opt0
= cpu_to_be64(opt0
);
570 req
->params
= cpu_to_be32(select_ntuple(ep
->com
.dev
,
572 req
->opt2
= cpu_to_be32(opt2
);
574 t5_req
= (struct cpl_t5_act_open_req
*) skb_put(skb
, wrlen
);
575 INIT_TP_WR(t5_req
, 0);
576 OPCODE_TID(t5_req
) = cpu_to_be32(
577 MK_OPCODE_TID(CPL_ACT_OPEN_REQ
,
578 ((ep
->rss_qid
<< 14) | ep
->atid
)));
579 t5_req
->local_port
= ep
->com
.local_addr
.sin_port
;
580 t5_req
->peer_port
= ep
->com
.remote_addr
.sin_port
;
581 t5_req
->local_ip
= ep
->com
.local_addr
.sin_addr
.s_addr
;
582 t5_req
->peer_ip
= ep
->com
.remote_addr
.sin_addr
.s_addr
;
583 t5_req
->opt0
= cpu_to_be64(opt0
);
584 t5_req
->params
= cpu_to_be64(V_FILTER_TUPLE(
585 select_ntuple(ep
->com
.dev
, ep
->dst
, ep
->l2t
)));
586 t5_req
->opt2
= cpu_to_be32(opt2
);
589 set_bit(ACT_OPEN_REQ
, &ep
->com
.history
);
590 return c4iw_l2t_send(&ep
->com
.dev
->rdev
, skb
, ep
->l2t
);
593 static void send_mpa_req(struct c4iw_ep
*ep
, struct sk_buff
*skb
,
597 struct fw_ofld_tx_data_wr
*req
;
598 struct mpa_message
*mpa
;
599 struct mpa_v2_conn_params mpa_v2_params
;
601 PDBG("%s ep %p tid %u pd_len %d\n", __func__
, ep
, ep
->hwtid
, ep
->plen
);
603 BUG_ON(skb_cloned(skb
));
605 mpalen
= sizeof(*mpa
) + ep
->plen
;
606 if (mpa_rev_to_use
== 2)
607 mpalen
+= sizeof(struct mpa_v2_conn_params
);
608 wrlen
= roundup(mpalen
+ sizeof *req
, 16);
609 skb
= get_skb(skb
, wrlen
, GFP_KERNEL
);
611 connect_reply_upcall(ep
, -ENOMEM
);
614 set_wr_txq(skb
, CPL_PRIORITY_DATA
, ep
->txq_idx
);
616 req
= (struct fw_ofld_tx_data_wr
*)skb_put(skb
, wrlen
);
617 memset(req
, 0, wrlen
);
618 req
->op_to_immdlen
= cpu_to_be32(
619 FW_WR_OP(FW_OFLD_TX_DATA_WR
) |
621 FW_WR_IMMDLEN(mpalen
));
622 req
->flowid_len16
= cpu_to_be32(
623 FW_WR_FLOWID(ep
->hwtid
) |
624 FW_WR_LEN16(wrlen
>> 4));
625 req
->plen
= cpu_to_be32(mpalen
);
626 req
->tunnel_to_proxy
= cpu_to_be32(
627 FW_OFLD_TX_DATA_WR_FLUSH(1) |
628 FW_OFLD_TX_DATA_WR_SHOVE(1));
630 mpa
= (struct mpa_message
*)(req
+ 1);
631 memcpy(mpa
->key
, MPA_KEY_REQ
, sizeof(mpa
->key
));
632 mpa
->flags
= (crc_enabled
? MPA_CRC
: 0) |
633 (markers_enabled
? MPA_MARKERS
: 0) |
634 (mpa_rev_to_use
== 2 ? MPA_ENHANCED_RDMA_CONN
: 0);
635 mpa
->private_data_size
= htons(ep
->plen
);
636 mpa
->revision
= mpa_rev_to_use
;
637 if (mpa_rev_to_use
== 1) {
638 ep
->tried_with_mpa_v1
= 1;
639 ep
->retry_with_mpa_v1
= 0;
642 if (mpa_rev_to_use
== 2) {
643 mpa
->private_data_size
= htons(ntohs(mpa
->private_data_size
) +
644 sizeof (struct mpa_v2_conn_params
));
645 mpa_v2_params
.ird
= htons((u16
)ep
->ird
);
646 mpa_v2_params
.ord
= htons((u16
)ep
->ord
);
649 mpa_v2_params
.ird
|= htons(MPA_V2_PEER2PEER_MODEL
);
650 if (p2p_type
== FW_RI_INIT_P2PTYPE_RDMA_WRITE
)
652 htons(MPA_V2_RDMA_WRITE_RTR
);
653 else if (p2p_type
== FW_RI_INIT_P2PTYPE_READ_REQ
)
655 htons(MPA_V2_RDMA_READ_RTR
);
657 memcpy(mpa
->private_data
, &mpa_v2_params
,
658 sizeof(struct mpa_v2_conn_params
));
661 memcpy(mpa
->private_data
+
662 sizeof(struct mpa_v2_conn_params
),
663 ep
->mpa_pkt
+ sizeof(*mpa
), ep
->plen
);
666 memcpy(mpa
->private_data
,
667 ep
->mpa_pkt
+ sizeof(*mpa
), ep
->plen
);
670 * Reference the mpa skb. This ensures the data area
671 * will remain in memory until the hw acks the tx.
672 * Function fw4_ack() will deref it.
675 t4_set_arp_err_handler(skb
, NULL
, arp_failure_discard
);
678 c4iw_l2t_send(&ep
->com
.dev
->rdev
, skb
, ep
->l2t
);
680 state_set(&ep
->com
, MPA_REQ_SENT
);
681 ep
->mpa_attr
.initiator
= 1;
685 static int send_mpa_reject(struct c4iw_ep
*ep
, const void *pdata
, u8 plen
)
688 struct fw_ofld_tx_data_wr
*req
;
689 struct mpa_message
*mpa
;
691 struct mpa_v2_conn_params mpa_v2_params
;
693 PDBG("%s ep %p tid %u pd_len %d\n", __func__
, ep
, ep
->hwtid
, ep
->plen
);
695 mpalen
= sizeof(*mpa
) + plen
;
696 if (ep
->mpa_attr
.version
== 2 && ep
->mpa_attr
.enhanced_rdma_conn
)
697 mpalen
+= sizeof(struct mpa_v2_conn_params
);
698 wrlen
= roundup(mpalen
+ sizeof *req
, 16);
700 skb
= get_skb(NULL
, wrlen
, GFP_KERNEL
);
702 printk(KERN_ERR MOD
"%s - cannot alloc skb!\n", __func__
);
705 set_wr_txq(skb
, CPL_PRIORITY_DATA
, ep
->txq_idx
);
707 req
= (struct fw_ofld_tx_data_wr
*)skb_put(skb
, wrlen
);
708 memset(req
, 0, wrlen
);
709 req
->op_to_immdlen
= cpu_to_be32(
710 FW_WR_OP(FW_OFLD_TX_DATA_WR
) |
712 FW_WR_IMMDLEN(mpalen
));
713 req
->flowid_len16
= cpu_to_be32(
714 FW_WR_FLOWID(ep
->hwtid
) |
715 FW_WR_LEN16(wrlen
>> 4));
716 req
->plen
= cpu_to_be32(mpalen
);
717 req
->tunnel_to_proxy
= cpu_to_be32(
718 FW_OFLD_TX_DATA_WR_FLUSH(1) |
719 FW_OFLD_TX_DATA_WR_SHOVE(1));
721 mpa
= (struct mpa_message
*)(req
+ 1);
722 memset(mpa
, 0, sizeof(*mpa
));
723 memcpy(mpa
->key
, MPA_KEY_REP
, sizeof(mpa
->key
));
724 mpa
->flags
= MPA_REJECT
;
725 mpa
->revision
= ep
->mpa_attr
.version
;
726 mpa
->private_data_size
= htons(plen
);
728 if (ep
->mpa_attr
.version
== 2 && ep
->mpa_attr
.enhanced_rdma_conn
) {
729 mpa
->flags
|= MPA_ENHANCED_RDMA_CONN
;
730 mpa
->private_data_size
= htons(ntohs(mpa
->private_data_size
) +
731 sizeof (struct mpa_v2_conn_params
));
732 mpa_v2_params
.ird
= htons(((u16
)ep
->ird
) |
733 (peer2peer
? MPA_V2_PEER2PEER_MODEL
:
735 mpa_v2_params
.ord
= htons(((u16
)ep
->ord
) | (peer2peer
?
737 FW_RI_INIT_P2PTYPE_RDMA_WRITE
?
738 MPA_V2_RDMA_WRITE_RTR
: p2p_type
==
739 FW_RI_INIT_P2PTYPE_READ_REQ
?
740 MPA_V2_RDMA_READ_RTR
: 0) : 0));
741 memcpy(mpa
->private_data
, &mpa_v2_params
,
742 sizeof(struct mpa_v2_conn_params
));
745 memcpy(mpa
->private_data
+
746 sizeof(struct mpa_v2_conn_params
), pdata
, plen
);
749 memcpy(mpa
->private_data
, pdata
, plen
);
752 * Reference the mpa skb again. This ensures the data area
753 * will remain in memory until the hw acks the tx.
754 * Function fw4_ack() will deref it.
757 set_wr_txq(skb
, CPL_PRIORITY_DATA
, ep
->txq_idx
);
758 t4_set_arp_err_handler(skb
, NULL
, arp_failure_discard
);
761 return c4iw_l2t_send(&ep
->com
.dev
->rdev
, skb
, ep
->l2t
);
764 static int send_mpa_reply(struct c4iw_ep
*ep
, const void *pdata
, u8 plen
)
767 struct fw_ofld_tx_data_wr
*req
;
768 struct mpa_message
*mpa
;
770 struct mpa_v2_conn_params mpa_v2_params
;
772 PDBG("%s ep %p tid %u pd_len %d\n", __func__
, ep
, ep
->hwtid
, ep
->plen
);
774 mpalen
= sizeof(*mpa
) + plen
;
775 if (ep
->mpa_attr
.version
== 2 && ep
->mpa_attr
.enhanced_rdma_conn
)
776 mpalen
+= sizeof(struct mpa_v2_conn_params
);
777 wrlen
= roundup(mpalen
+ sizeof *req
, 16);
779 skb
= get_skb(NULL
, wrlen
, GFP_KERNEL
);
781 printk(KERN_ERR MOD
"%s - cannot alloc skb!\n", __func__
);
784 set_wr_txq(skb
, CPL_PRIORITY_DATA
, ep
->txq_idx
);
786 req
= (struct fw_ofld_tx_data_wr
*) skb_put(skb
, wrlen
);
787 memset(req
, 0, wrlen
);
788 req
->op_to_immdlen
= cpu_to_be32(
789 FW_WR_OP(FW_OFLD_TX_DATA_WR
) |
791 FW_WR_IMMDLEN(mpalen
));
792 req
->flowid_len16
= cpu_to_be32(
793 FW_WR_FLOWID(ep
->hwtid
) |
794 FW_WR_LEN16(wrlen
>> 4));
795 req
->plen
= cpu_to_be32(mpalen
);
796 req
->tunnel_to_proxy
= cpu_to_be32(
797 FW_OFLD_TX_DATA_WR_FLUSH(1) |
798 FW_OFLD_TX_DATA_WR_SHOVE(1));
800 mpa
= (struct mpa_message
*)(req
+ 1);
801 memset(mpa
, 0, sizeof(*mpa
));
802 memcpy(mpa
->key
, MPA_KEY_REP
, sizeof(mpa
->key
));
803 mpa
->flags
= (ep
->mpa_attr
.crc_enabled
? MPA_CRC
: 0) |
804 (markers_enabled
? MPA_MARKERS
: 0);
805 mpa
->revision
= ep
->mpa_attr
.version
;
806 mpa
->private_data_size
= htons(plen
);
808 if (ep
->mpa_attr
.version
== 2 && ep
->mpa_attr
.enhanced_rdma_conn
) {
809 mpa
->flags
|= MPA_ENHANCED_RDMA_CONN
;
810 mpa
->private_data_size
= htons(ntohs(mpa
->private_data_size
) +
811 sizeof (struct mpa_v2_conn_params
));
812 mpa_v2_params
.ird
= htons((u16
)ep
->ird
);
813 mpa_v2_params
.ord
= htons((u16
)ep
->ord
);
814 if (peer2peer
&& (ep
->mpa_attr
.p2p_type
!=
815 FW_RI_INIT_P2PTYPE_DISABLED
)) {
816 mpa_v2_params
.ird
|= htons(MPA_V2_PEER2PEER_MODEL
);
818 if (p2p_type
== FW_RI_INIT_P2PTYPE_RDMA_WRITE
)
820 htons(MPA_V2_RDMA_WRITE_RTR
);
821 else if (p2p_type
== FW_RI_INIT_P2PTYPE_READ_REQ
)
823 htons(MPA_V2_RDMA_READ_RTR
);
826 memcpy(mpa
->private_data
, &mpa_v2_params
,
827 sizeof(struct mpa_v2_conn_params
));
830 memcpy(mpa
->private_data
+
831 sizeof(struct mpa_v2_conn_params
), pdata
, plen
);
834 memcpy(mpa
->private_data
, pdata
, plen
);
837 * Reference the mpa skb. This ensures the data area
838 * will remain in memory until the hw acks the tx.
839 * Function fw4_ack() will deref it.
842 t4_set_arp_err_handler(skb
, NULL
, arp_failure_discard
);
844 state_set(&ep
->com
, MPA_REP_SENT
);
845 return c4iw_l2t_send(&ep
->com
.dev
->rdev
, skb
, ep
->l2t
);
848 static int act_establish(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
851 struct cpl_act_establish
*req
= cplhdr(skb
);
852 unsigned int tid
= GET_TID(req
);
853 unsigned int atid
= GET_TID_TID(ntohl(req
->tos_atid
));
854 struct tid_info
*t
= dev
->rdev
.lldi
.tids
;
856 ep
= lookup_atid(t
, atid
);
858 PDBG("%s ep %p tid %u snd_isn %u rcv_isn %u\n", __func__
, ep
, tid
,
859 be32_to_cpu(req
->snd_isn
), be32_to_cpu(req
->rcv_isn
));
861 dst_confirm(ep
->dst
);
863 /* setup the hwtid for this connection */
865 cxgb4_insert_tid(t
, ep
, tid
);
866 insert_handle(dev
, &dev
->hwtid_idr
, ep
, ep
->hwtid
);
868 ep
->snd_seq
= be32_to_cpu(req
->snd_isn
);
869 ep
->rcv_seq
= be32_to_cpu(req
->rcv_isn
);
871 set_emss(ep
, ntohs(req
->tcp_opt
));
873 /* dealloc the atid */
874 remove_handle(ep
->com
.dev
, &ep
->com
.dev
->atid_idr
, atid
);
875 cxgb4_free_atid(t
, atid
);
876 set_bit(ACT_ESTAB
, &ep
->com
.history
);
878 /* start MPA negotiation */
879 send_flowc(ep
, NULL
);
880 if (ep
->retry_with_mpa_v1
)
881 send_mpa_req(ep
, skb
, 1);
883 send_mpa_req(ep
, skb
, mpa_rev
);
888 static void close_complete_upcall(struct c4iw_ep
*ep
)
890 struct iw_cm_event event
;
892 PDBG("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
893 memset(&event
, 0, sizeof(event
));
894 event
.event
= IW_CM_EVENT_CLOSE
;
896 PDBG("close complete delivered ep %p cm_id %p tid %u\n",
897 ep
, ep
->com
.cm_id
, ep
->hwtid
);
898 ep
->com
.cm_id
->event_handler(ep
->com
.cm_id
, &event
);
899 ep
->com
.cm_id
->rem_ref(ep
->com
.cm_id
);
900 ep
->com
.cm_id
= NULL
;
901 set_bit(CLOSE_UPCALL
, &ep
->com
.history
);
905 static int abort_connection(struct c4iw_ep
*ep
, struct sk_buff
*skb
, gfp_t gfp
)
907 PDBG("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
908 close_complete_upcall(ep
);
909 state_set(&ep
->com
, ABORTING
);
910 set_bit(ABORT_CONN
, &ep
->com
.history
);
911 return send_abort(ep
, skb
, gfp
);
914 static void peer_close_upcall(struct c4iw_ep
*ep
)
916 struct iw_cm_event event
;
918 PDBG("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
919 memset(&event
, 0, sizeof(event
));
920 event
.event
= IW_CM_EVENT_DISCONNECT
;
922 PDBG("peer close delivered ep %p cm_id %p tid %u\n",
923 ep
, ep
->com
.cm_id
, ep
->hwtid
);
924 ep
->com
.cm_id
->event_handler(ep
->com
.cm_id
, &event
);
925 set_bit(DISCONN_UPCALL
, &ep
->com
.history
);
929 static void peer_abort_upcall(struct c4iw_ep
*ep
)
931 struct iw_cm_event event
;
933 PDBG("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
934 memset(&event
, 0, sizeof(event
));
935 event
.event
= IW_CM_EVENT_CLOSE
;
936 event
.status
= -ECONNRESET
;
938 PDBG("abort delivered ep %p cm_id %p tid %u\n", ep
,
939 ep
->com
.cm_id
, ep
->hwtid
);
940 ep
->com
.cm_id
->event_handler(ep
->com
.cm_id
, &event
);
941 ep
->com
.cm_id
->rem_ref(ep
->com
.cm_id
);
942 ep
->com
.cm_id
= NULL
;
943 set_bit(ABORT_UPCALL
, &ep
->com
.history
);
947 static void connect_reply_upcall(struct c4iw_ep
*ep
, int status
)
949 struct iw_cm_event event
;
951 PDBG("%s ep %p tid %u status %d\n", __func__
, ep
, ep
->hwtid
, status
);
952 memset(&event
, 0, sizeof(event
));
953 event
.event
= IW_CM_EVENT_CONNECT_REPLY
;
954 event
.status
= status
;
955 event
.local_addr
= ep
->com
.local_addr
;
956 event
.remote_addr
= ep
->com
.remote_addr
;
958 if ((status
== 0) || (status
== -ECONNREFUSED
)) {
959 if (!ep
->tried_with_mpa_v1
) {
960 /* this means MPA_v2 is used */
961 event
.private_data_len
= ep
->plen
-
962 sizeof(struct mpa_v2_conn_params
);
963 event
.private_data
= ep
->mpa_pkt
+
964 sizeof(struct mpa_message
) +
965 sizeof(struct mpa_v2_conn_params
);
967 /* this means MPA_v1 is used */
968 event
.private_data_len
= ep
->plen
;
969 event
.private_data
= ep
->mpa_pkt
+
970 sizeof(struct mpa_message
);
974 PDBG("%s ep %p tid %u status %d\n", __func__
, ep
,
976 set_bit(CONN_RPL_UPCALL
, &ep
->com
.history
);
977 ep
->com
.cm_id
->event_handler(ep
->com
.cm_id
, &event
);
980 ep
->com
.cm_id
->rem_ref(ep
->com
.cm_id
);
981 ep
->com
.cm_id
= NULL
;
985 static void connect_request_upcall(struct c4iw_ep
*ep
)
987 struct iw_cm_event event
;
989 PDBG("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
990 memset(&event
, 0, sizeof(event
));
991 event
.event
= IW_CM_EVENT_CONNECT_REQUEST
;
992 event
.local_addr
= ep
->com
.local_addr
;
993 event
.remote_addr
= ep
->com
.remote_addr
;
994 event
.provider_data
= ep
;
995 if (!ep
->tried_with_mpa_v1
) {
996 /* this means MPA_v2 is used */
999 event
.private_data_len
= ep
->plen
-
1000 sizeof(struct mpa_v2_conn_params
);
1001 event
.private_data
= ep
->mpa_pkt
+ sizeof(struct mpa_message
) +
1002 sizeof(struct mpa_v2_conn_params
);
1004 /* this means MPA_v1 is used. Send max supported */
1005 event
.ord
= c4iw_max_read_depth
;
1006 event
.ird
= c4iw_max_read_depth
;
1007 event
.private_data_len
= ep
->plen
;
1008 event
.private_data
= ep
->mpa_pkt
+ sizeof(struct mpa_message
);
1010 if (state_read(&ep
->parent_ep
->com
) != DEAD
) {
1011 c4iw_get_ep(&ep
->com
);
1012 ep
->parent_ep
->com
.cm_id
->event_handler(
1013 ep
->parent_ep
->com
.cm_id
,
1016 set_bit(CONNREQ_UPCALL
, &ep
->com
.history
);
1017 c4iw_put_ep(&ep
->parent_ep
->com
);
1018 ep
->parent_ep
= NULL
;
1021 static void established_upcall(struct c4iw_ep
*ep
)
1023 struct iw_cm_event event
;
1025 PDBG("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
1026 memset(&event
, 0, sizeof(event
));
1027 event
.event
= IW_CM_EVENT_ESTABLISHED
;
1028 event
.ird
= ep
->ird
;
1029 event
.ord
= ep
->ord
;
1030 if (ep
->com
.cm_id
) {
1031 PDBG("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
1032 ep
->com
.cm_id
->event_handler(ep
->com
.cm_id
, &event
);
1033 set_bit(ESTAB_UPCALL
, &ep
->com
.history
);
1037 static int update_rx_credits(struct c4iw_ep
*ep
, u32 credits
)
1039 struct cpl_rx_data_ack
*req
;
1040 struct sk_buff
*skb
;
1041 int wrlen
= roundup(sizeof *req
, 16);
1043 PDBG("%s ep %p tid %u credits %u\n", __func__
, ep
, ep
->hwtid
, credits
);
1044 skb
= get_skb(NULL
, wrlen
, GFP_KERNEL
);
1046 printk(KERN_ERR MOD
"update_rx_credits - cannot alloc skb!\n");
1050 req
= (struct cpl_rx_data_ack
*) skb_put(skb
, wrlen
);
1051 memset(req
, 0, wrlen
);
1052 INIT_TP_WR(req
, ep
->hwtid
);
1053 OPCODE_TID(req
) = cpu_to_be32(MK_OPCODE_TID(CPL_RX_DATA_ACK
,
1055 req
->credit_dack
= cpu_to_be32(credits
| RX_FORCE_ACK(1) |
1057 V_RX_DACK_MODE(dack_mode
));
1058 set_wr_txq(skb
, CPL_PRIORITY_ACK
, ep
->ctrlq_idx
);
1059 c4iw_ofld_send(&ep
->com
.dev
->rdev
, skb
);
1063 static void process_mpa_reply(struct c4iw_ep
*ep
, struct sk_buff
*skb
)
1065 struct mpa_message
*mpa
;
1066 struct mpa_v2_conn_params
*mpa_v2_params
;
1068 u16 resp_ird
, resp_ord
;
1069 u8 rtr_mismatch
= 0, insuff_ird
= 0;
1070 struct c4iw_qp_attributes attrs
;
1071 enum c4iw_qp_attr_mask mask
;
1074 PDBG("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
1077 * Stop mpa timer. If it expired, then the state has
1078 * changed and we bail since ep_timeout already aborted
1082 if (state_read(&ep
->com
) != MPA_REQ_SENT
)
1086 * If we get more than the supported amount of private data
1087 * then we must fail this connection.
1089 if (ep
->mpa_pkt_len
+ skb
->len
> sizeof(ep
->mpa_pkt
)) {
1095 * copy the new data into our accumulation buffer.
1097 skb_copy_from_linear_data(skb
, &(ep
->mpa_pkt
[ep
->mpa_pkt_len
]),
1099 ep
->mpa_pkt_len
+= skb
->len
;
1102 * if we don't even have the mpa message, then bail.
1104 if (ep
->mpa_pkt_len
< sizeof(*mpa
))
1106 mpa
= (struct mpa_message
*) ep
->mpa_pkt
;
1108 /* Validate MPA header. */
1109 if (mpa
->revision
> mpa_rev
) {
1110 printk(KERN_ERR MOD
"%s MPA version mismatch. Local = %d,"
1111 " Received = %d\n", __func__
, mpa_rev
, mpa
->revision
);
1115 if (memcmp(mpa
->key
, MPA_KEY_REP
, sizeof(mpa
->key
))) {
1120 plen
= ntohs(mpa
->private_data_size
);
1123 * Fail if there's too much private data.
1125 if (plen
> MPA_MAX_PRIVATE_DATA
) {
1131 * If plen does not account for pkt size
1133 if (ep
->mpa_pkt_len
> (sizeof(*mpa
) + plen
)) {
1138 ep
->plen
= (u8
) plen
;
1141 * If we don't have all the pdata yet, then bail.
1142 * We'll continue process when more data arrives.
1144 if (ep
->mpa_pkt_len
< (sizeof(*mpa
) + plen
))
1147 if (mpa
->flags
& MPA_REJECT
) {
1148 err
= -ECONNREFUSED
;
1153 * If we get here we have accumulated the entire mpa
1154 * start reply message including private data. And
1155 * the MPA header is valid.
1157 state_set(&ep
->com
, FPDU_MODE
);
1158 ep
->mpa_attr
.crc_enabled
= (mpa
->flags
& MPA_CRC
) | crc_enabled
? 1 : 0;
1159 ep
->mpa_attr
.recv_marker_enabled
= markers_enabled
;
1160 ep
->mpa_attr
.xmit_marker_enabled
= mpa
->flags
& MPA_MARKERS
? 1 : 0;
1161 ep
->mpa_attr
.version
= mpa
->revision
;
1162 ep
->mpa_attr
.p2p_type
= FW_RI_INIT_P2PTYPE_DISABLED
;
1164 if (mpa
->revision
== 2) {
1165 ep
->mpa_attr
.enhanced_rdma_conn
=
1166 mpa
->flags
& MPA_ENHANCED_RDMA_CONN
? 1 : 0;
1167 if (ep
->mpa_attr
.enhanced_rdma_conn
) {
1168 mpa_v2_params
= (struct mpa_v2_conn_params
*)
1169 (ep
->mpa_pkt
+ sizeof(*mpa
));
1170 resp_ird
= ntohs(mpa_v2_params
->ird
) &
1171 MPA_V2_IRD_ORD_MASK
;
1172 resp_ord
= ntohs(mpa_v2_params
->ord
) &
1173 MPA_V2_IRD_ORD_MASK
;
1176 * This is a double-check. Ideally, below checks are
1177 * not required since ird/ord stuff has been taken
1178 * care of in c4iw_accept_cr
1180 if ((ep
->ird
< resp_ord
) || (ep
->ord
> resp_ird
)) {
1187 if (ntohs(mpa_v2_params
->ird
) &
1188 MPA_V2_PEER2PEER_MODEL
) {
1189 if (ntohs(mpa_v2_params
->ord
) &
1190 MPA_V2_RDMA_WRITE_RTR
)
1191 ep
->mpa_attr
.p2p_type
=
1192 FW_RI_INIT_P2PTYPE_RDMA_WRITE
;
1193 else if (ntohs(mpa_v2_params
->ord
) &
1194 MPA_V2_RDMA_READ_RTR
)
1195 ep
->mpa_attr
.p2p_type
=
1196 FW_RI_INIT_P2PTYPE_READ_REQ
;
1199 } else if (mpa
->revision
== 1)
1201 ep
->mpa_attr
.p2p_type
= p2p_type
;
1203 PDBG("%s - crc_enabled=%d, recv_marker_enabled=%d, "
1204 "xmit_marker_enabled=%d, version=%d p2p_type=%d local-p2p_type = "
1205 "%d\n", __func__
, ep
->mpa_attr
.crc_enabled
,
1206 ep
->mpa_attr
.recv_marker_enabled
,
1207 ep
->mpa_attr
.xmit_marker_enabled
, ep
->mpa_attr
.version
,
1208 ep
->mpa_attr
.p2p_type
, p2p_type
);
1211 * If responder's RTR does not match with that of initiator, assign
1212 * FW_RI_INIT_P2PTYPE_DISABLED in mpa attributes so that RTR is not
1213 * generated when moving QP to RTS state.
1214 * A TERM message will be sent after QP has moved to RTS state
1216 if ((ep
->mpa_attr
.version
== 2) && peer2peer
&&
1217 (ep
->mpa_attr
.p2p_type
!= p2p_type
)) {
1218 ep
->mpa_attr
.p2p_type
= FW_RI_INIT_P2PTYPE_DISABLED
;
1222 attrs
.mpa_attr
= ep
->mpa_attr
;
1223 attrs
.max_ird
= ep
->ird
;
1224 attrs
.max_ord
= ep
->ord
;
1225 attrs
.llp_stream_handle
= ep
;
1226 attrs
.next_state
= C4IW_QP_STATE_RTS
;
1228 mask
= C4IW_QP_ATTR_NEXT_STATE
|
1229 C4IW_QP_ATTR_LLP_STREAM_HANDLE
| C4IW_QP_ATTR_MPA_ATTR
|
1230 C4IW_QP_ATTR_MAX_IRD
| C4IW_QP_ATTR_MAX_ORD
;
1232 /* bind QP and TID with INIT_WR */
1233 err
= c4iw_modify_qp(ep
->com
.qp
->rhp
,
1234 ep
->com
.qp
, mask
, &attrs
, 1);
1239 * If responder's RTR requirement did not match with what initiator
1240 * supports, generate TERM message
1243 printk(KERN_ERR
"%s: RTR mismatch, sending TERM\n", __func__
);
1244 attrs
.layer_etype
= LAYER_MPA
| DDP_LLP
;
1245 attrs
.ecode
= MPA_NOMATCH_RTR
;
1246 attrs
.next_state
= C4IW_QP_STATE_TERMINATE
;
1247 err
= c4iw_modify_qp(ep
->com
.qp
->rhp
, ep
->com
.qp
,
1248 C4IW_QP_ATTR_NEXT_STATE
, &attrs
, 0);
1254 * Generate TERM if initiator IRD is not sufficient for responder
1255 * provided ORD. Currently, we do the same behaviour even when
1256 * responder provided IRD is also not sufficient as regards to
1260 printk(KERN_ERR
"%s: Insufficient IRD, sending TERM\n",
1262 attrs
.layer_etype
= LAYER_MPA
| DDP_LLP
;
1263 attrs
.ecode
= MPA_INSUFF_IRD
;
1264 attrs
.next_state
= C4IW_QP_STATE_TERMINATE
;
1265 err
= c4iw_modify_qp(ep
->com
.qp
->rhp
, ep
->com
.qp
,
1266 C4IW_QP_ATTR_NEXT_STATE
, &attrs
, 0);
1272 state_set(&ep
->com
, ABORTING
);
1273 send_abort(ep
, skb
, GFP_KERNEL
);
1275 connect_reply_upcall(ep
, err
);
1279 static void process_mpa_request(struct c4iw_ep
*ep
, struct sk_buff
*skb
)
1281 struct mpa_message
*mpa
;
1282 struct mpa_v2_conn_params
*mpa_v2_params
;
1285 PDBG("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
1287 if (state_read(&ep
->com
) != MPA_REQ_WAIT
)
1291 * If we get more than the supported amount of private data
1292 * then we must fail this connection.
1294 if (ep
->mpa_pkt_len
+ skb
->len
> sizeof(ep
->mpa_pkt
)) {
1296 abort_connection(ep
, skb
, GFP_KERNEL
);
1300 PDBG("%s enter (%s line %u)\n", __func__
, __FILE__
, __LINE__
);
1303 * Copy the new data into our accumulation buffer.
1305 skb_copy_from_linear_data(skb
, &(ep
->mpa_pkt
[ep
->mpa_pkt_len
]),
1307 ep
->mpa_pkt_len
+= skb
->len
;
1310 * If we don't even have the mpa message, then bail.
1311 * We'll continue process when more data arrives.
1313 if (ep
->mpa_pkt_len
< sizeof(*mpa
))
1316 PDBG("%s enter (%s line %u)\n", __func__
, __FILE__
, __LINE__
);
1318 mpa
= (struct mpa_message
*) ep
->mpa_pkt
;
1321 * Validate MPA Header.
1323 if (mpa
->revision
> mpa_rev
) {
1324 printk(KERN_ERR MOD
"%s MPA version mismatch. Local = %d,"
1325 " Received = %d\n", __func__
, mpa_rev
, mpa
->revision
);
1327 abort_connection(ep
, skb
, GFP_KERNEL
);
1331 if (memcmp(mpa
->key
, MPA_KEY_REQ
, sizeof(mpa
->key
))) {
1333 abort_connection(ep
, skb
, GFP_KERNEL
);
1337 plen
= ntohs(mpa
->private_data_size
);
1340 * Fail if there's too much private data.
1342 if (plen
> MPA_MAX_PRIVATE_DATA
) {
1344 abort_connection(ep
, skb
, GFP_KERNEL
);
1349 * If plen does not account for pkt size
1351 if (ep
->mpa_pkt_len
> (sizeof(*mpa
) + plen
)) {
1353 abort_connection(ep
, skb
, GFP_KERNEL
);
1356 ep
->plen
= (u8
) plen
;
1359 * If we don't have all the pdata yet, then bail.
1361 if (ep
->mpa_pkt_len
< (sizeof(*mpa
) + plen
))
1365 * If we get here we have accumulated the entire mpa
1366 * start reply message including private data.
1368 ep
->mpa_attr
.initiator
= 0;
1369 ep
->mpa_attr
.crc_enabled
= (mpa
->flags
& MPA_CRC
) | crc_enabled
? 1 : 0;
1370 ep
->mpa_attr
.recv_marker_enabled
= markers_enabled
;
1371 ep
->mpa_attr
.xmit_marker_enabled
= mpa
->flags
& MPA_MARKERS
? 1 : 0;
1372 ep
->mpa_attr
.version
= mpa
->revision
;
1373 if (mpa
->revision
== 1)
1374 ep
->tried_with_mpa_v1
= 1;
1375 ep
->mpa_attr
.p2p_type
= FW_RI_INIT_P2PTYPE_DISABLED
;
1377 if (mpa
->revision
== 2) {
1378 ep
->mpa_attr
.enhanced_rdma_conn
=
1379 mpa
->flags
& MPA_ENHANCED_RDMA_CONN
? 1 : 0;
1380 if (ep
->mpa_attr
.enhanced_rdma_conn
) {
1381 mpa_v2_params
= (struct mpa_v2_conn_params
*)
1382 (ep
->mpa_pkt
+ sizeof(*mpa
));
1383 ep
->ird
= ntohs(mpa_v2_params
->ird
) &
1384 MPA_V2_IRD_ORD_MASK
;
1385 ep
->ord
= ntohs(mpa_v2_params
->ord
) &
1386 MPA_V2_IRD_ORD_MASK
;
1387 if (ntohs(mpa_v2_params
->ird
) & MPA_V2_PEER2PEER_MODEL
)
1389 if (ntohs(mpa_v2_params
->ord
) &
1390 MPA_V2_RDMA_WRITE_RTR
)
1391 ep
->mpa_attr
.p2p_type
=
1392 FW_RI_INIT_P2PTYPE_RDMA_WRITE
;
1393 else if (ntohs(mpa_v2_params
->ord
) &
1394 MPA_V2_RDMA_READ_RTR
)
1395 ep
->mpa_attr
.p2p_type
=
1396 FW_RI_INIT_P2PTYPE_READ_REQ
;
1399 } else if (mpa
->revision
== 1)
1401 ep
->mpa_attr
.p2p_type
= p2p_type
;
1403 PDBG("%s - crc_enabled=%d, recv_marker_enabled=%d, "
1404 "xmit_marker_enabled=%d, version=%d p2p_type=%d\n", __func__
,
1405 ep
->mpa_attr
.crc_enabled
, ep
->mpa_attr
.recv_marker_enabled
,
1406 ep
->mpa_attr
.xmit_marker_enabled
, ep
->mpa_attr
.version
,
1407 ep
->mpa_attr
.p2p_type
);
1409 state_set(&ep
->com
, MPA_REQ_RCVD
);
1412 connect_request_upcall(ep
);
1416 static int rx_data(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
1419 struct cpl_rx_data
*hdr
= cplhdr(skb
);
1420 unsigned int dlen
= ntohs(hdr
->len
);
1421 unsigned int tid
= GET_TID(hdr
);
1422 struct tid_info
*t
= dev
->rdev
.lldi
.tids
;
1423 __u8 status
= hdr
->status
;
1425 ep
= lookup_tid(t
, tid
);
1426 PDBG("%s ep %p tid %u dlen %u\n", __func__
, ep
, ep
->hwtid
, dlen
);
1427 skb_pull(skb
, sizeof(*hdr
));
1428 skb_trim(skb
, dlen
);
1430 /* update RX credits */
1431 update_rx_credits(ep
, dlen
);
1433 switch (state_read(&ep
->com
)) {
1435 ep
->rcv_seq
+= dlen
;
1436 process_mpa_reply(ep
, skb
);
1439 ep
->rcv_seq
+= dlen
;
1440 process_mpa_request(ep
, skb
);
1443 struct c4iw_qp_attributes attrs
;
1444 BUG_ON(!ep
->com
.qp
);
1446 pr_err("%s Unexpected streaming data." \
1447 " qpid %u ep %p state %d tid %u status %d\n",
1448 __func__
, ep
->com
.qp
->wq
.sq
.qid
, ep
,
1449 state_read(&ep
->com
), ep
->hwtid
, status
);
1450 attrs
.next_state
= C4IW_QP_STATE_ERROR
;
1451 c4iw_modify_qp(ep
->com
.qp
->rhp
, ep
->com
.qp
,
1452 C4IW_QP_ATTR_NEXT_STATE
, &attrs
, 1);
1453 c4iw_ep_disconnect(ep
, 1, GFP_KERNEL
);
1462 static int abort_rpl(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
1465 struct cpl_abort_rpl_rss
*rpl
= cplhdr(skb
);
1467 unsigned int tid
= GET_TID(rpl
);
1468 struct tid_info
*t
= dev
->rdev
.lldi
.tids
;
1470 ep
= lookup_tid(t
, tid
);
1472 printk(KERN_WARNING MOD
"Abort rpl to freed endpoint\n");
1475 PDBG("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
1476 mutex_lock(&ep
->com
.mutex
);
1477 switch (ep
->com
.state
) {
1479 c4iw_wake_up(&ep
->com
.wr_wait
, -ECONNRESET
);
1480 __state_set(&ep
->com
, DEAD
);
1484 printk(KERN_ERR
"%s ep %p state %d\n",
1485 __func__
, ep
, ep
->com
.state
);
1488 mutex_unlock(&ep
->com
.mutex
);
1491 release_ep_resources(ep
);
1495 static void send_fw_act_open_req(struct c4iw_ep
*ep
, unsigned int atid
)
1497 struct sk_buff
*skb
;
1498 struct fw_ofld_connection_wr
*req
;
1499 unsigned int mtu_idx
;
1502 skb
= get_skb(NULL
, sizeof(*req
), GFP_KERNEL
);
1503 req
= (struct fw_ofld_connection_wr
*)__skb_put(skb
, sizeof(*req
));
1504 memset(req
, 0, sizeof(*req
));
1505 req
->op_compl
= htonl(V_WR_OP(FW_OFLD_CONNECTION_WR
));
1506 req
->len16_pkd
= htonl(FW_WR_LEN16(DIV_ROUND_UP(sizeof(*req
), 16)));
1507 req
->le
.filter
= cpu_to_be32(select_ntuple(ep
->com
.dev
, ep
->dst
,
1509 req
->le
.lport
= ep
->com
.local_addr
.sin_port
;
1510 req
->le
.pport
= ep
->com
.remote_addr
.sin_port
;
1511 req
->le
.u
.ipv4
.lip
= ep
->com
.local_addr
.sin_addr
.s_addr
;
1512 req
->le
.u
.ipv4
.pip
= ep
->com
.remote_addr
.sin_addr
.s_addr
;
1513 req
->tcb
.t_state_to_astid
=
1514 htonl(V_FW_OFLD_CONNECTION_WR_T_STATE(TCP_SYN_SENT
) |
1515 V_FW_OFLD_CONNECTION_WR_ASTID(atid
));
1516 req
->tcb
.cplrxdataack_cplpassacceptrpl
=
1517 htons(F_FW_OFLD_CONNECTION_WR_CPLRXDATAACK
);
1518 req
->tcb
.tx_max
= (__force __be32
) jiffies
;
1519 req
->tcb
.rcv_adv
= htons(1);
1520 cxgb4_best_mtu(ep
->com
.dev
->rdev
.lldi
.mtus
, ep
->mtu
, &mtu_idx
);
1521 wscale
= compute_wscale(rcv_win
);
1522 req
->tcb
.opt0
= (__force __be64
) (TCAM_BYPASS(1) |
1523 (nocong
? NO_CONG(1) : 0) |
1528 L2T_IDX(ep
->l2t
->idx
) |
1529 TX_CHAN(ep
->tx_chan
) |
1530 SMAC_SEL(ep
->smac_idx
) |
1532 ULP_MODE(ULP_MODE_TCPDDP
) |
1533 RCV_BUFSIZ(rcv_win
>> 10));
1534 req
->tcb
.opt2
= (__force __be32
) (PACE(1) |
1535 TX_QUEUE(ep
->com
.dev
->rdev
.lldi
.tx_modq
[ep
->tx_chan
]) |
1537 CCTRL_ECN(enable_ecn
) |
1538 RSS_QUEUE_VALID
| RSS_QUEUE(ep
->rss_qid
));
1539 if (enable_tcp_timestamps
)
1540 req
->tcb
.opt2
|= (__force __be32
) TSTAMPS_EN(1);
1541 if (enable_tcp_sack
)
1542 req
->tcb
.opt2
|= (__force __be32
) SACK_EN(1);
1543 if (wscale
&& enable_tcp_window_scaling
)
1544 req
->tcb
.opt2
|= (__force __be32
) WND_SCALE_EN(1);
1545 req
->tcb
.opt0
= cpu_to_be64((__force u64
) req
->tcb
.opt0
);
1546 req
->tcb
.opt2
= cpu_to_be32((__force u32
) req
->tcb
.opt2
);
1547 set_wr_txq(skb
, CPL_PRIORITY_CONTROL
, ep
->ctrlq_idx
);
1548 set_bit(ACT_OFLD_CONN
, &ep
->com
.history
);
1549 c4iw_l2t_send(&ep
->com
.dev
->rdev
, skb
, ep
->l2t
);
1553 * Return whether a failed active open has allocated a TID
1555 static inline int act_open_has_tid(int status
)
1557 return status
!= CPL_ERR_TCAM_FULL
&& status
!= CPL_ERR_CONN_EXIST
&&
1558 status
!= CPL_ERR_ARP_MISS
;
1561 #define ACT_OPEN_RETRY_COUNT 2
1563 static int c4iw_reconnect(struct c4iw_ep
*ep
)
1567 struct port_info
*pi
;
1568 struct net_device
*pdev
;
1570 struct neighbour
*neigh
;
1572 PDBG("%s qp %p cm_id %p\n", __func__
, ep
->com
.qp
, ep
->com
.cm_id
);
1573 init_timer(&ep
->timer
);
1576 * Allocate an active TID to initiate a TCP connection.
1578 ep
->atid
= cxgb4_alloc_atid(ep
->com
.dev
->rdev
.lldi
.tids
, ep
);
1579 if (ep
->atid
== -1) {
1580 pr_err("%s - cannot alloc atid.\n", __func__
);
1584 insert_handle(ep
->com
.dev
, &ep
->com
.dev
->atid_idr
, ep
, ep
->atid
);
1587 rt
= find_route(ep
->com
.dev
,
1588 ep
->com
.cm_id
->local_addr
.sin_addr
.s_addr
,
1589 ep
->com
.cm_id
->remote_addr
.sin_addr
.s_addr
,
1590 ep
->com
.cm_id
->local_addr
.sin_port
,
1591 ep
->com
.cm_id
->remote_addr
.sin_port
, 0);
1593 pr_err("%s - cannot find route.\n", __func__
);
1594 err
= -EHOSTUNREACH
;
1599 neigh
= dst_neigh_lookup(ep
->dst
,
1600 &ep
->com
.cm_id
->remote_addr
.sin_addr
.s_addr
);
1602 pr_err("%s - cannot alloc neigh.\n", __func__
);
1607 /* get a l2t entry */
1608 if (neigh
->dev
->flags
& IFF_LOOPBACK
) {
1609 PDBG("%s LOOPBACK\n", __func__
);
1610 pdev
= ip_dev_find(&init_net
,
1611 ep
->com
.cm_id
->remote_addr
.sin_addr
.s_addr
);
1612 ep
->l2t
= cxgb4_l2t_get(ep
->com
.dev
->rdev
.lldi
.l2t
,
1614 pi
= (struct port_info
*)netdev_priv(pdev
);
1615 ep
->mtu
= pdev
->mtu
;
1616 ep
->tx_chan
= cxgb4_port_chan(pdev
);
1617 ep
->smac_idx
= (cxgb4_port_viid(pdev
) & 0x7F) << 1;
1620 ep
->l2t
= cxgb4_l2t_get(ep
->com
.dev
->rdev
.lldi
.l2t
,
1621 neigh
, neigh
->dev
, 0);
1622 pi
= (struct port_info
*)netdev_priv(neigh
->dev
);
1623 ep
->mtu
= dst_mtu(ep
->dst
);
1624 ep
->tx_chan
= cxgb4_port_chan(neigh
->dev
);
1625 ep
->smac_idx
= (cxgb4_port_viid(neigh
->dev
) &
1629 step
= ep
->com
.dev
->rdev
.lldi
.ntxq
/ ep
->com
.dev
->rdev
.lldi
.nchan
;
1630 ep
->txq_idx
= pi
->port_id
* step
;
1631 ep
->ctrlq_idx
= pi
->port_id
;
1632 step
= ep
->com
.dev
->rdev
.lldi
.nrxq
/ ep
->com
.dev
->rdev
.lldi
.nchan
;
1633 ep
->rss_qid
= ep
->com
.dev
->rdev
.lldi
.rxq_ids
[pi
->port_id
* step
];
1636 pr_err("%s - cannot alloc l2e.\n", __func__
);
1641 PDBG("%s txq_idx %u tx_chan %u smac_idx %u rss_qid %u l2t_idx %u\n",
1642 __func__
, ep
->txq_idx
, ep
->tx_chan
, ep
->smac_idx
, ep
->rss_qid
,
1645 state_set(&ep
->com
, CONNECTING
);
1648 /* send connect request to rnic */
1649 err
= send_connect(ep
);
1653 cxgb4_l2t_release(ep
->l2t
);
1655 dst_release(ep
->dst
);
1657 remove_handle(ep
->com
.dev
, &ep
->com
.dev
->atid_idr
, ep
->atid
);
1658 cxgb4_free_atid(ep
->com
.dev
->rdev
.lldi
.tids
, ep
->atid
);
1661 * remember to send notification to upper layer.
1662 * We are in here so the upper layer is not aware that this is
1663 * re-connect attempt and so, upper layer is still waiting for
1664 * response of 1st connect request.
1666 connect_reply_upcall(ep
, -ECONNRESET
);
1667 c4iw_put_ep(&ep
->com
);
1672 static int act_open_rpl(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
1675 struct cpl_act_open_rpl
*rpl
= cplhdr(skb
);
1676 unsigned int atid
= GET_TID_TID(GET_AOPEN_ATID(
1677 ntohl(rpl
->atid_status
)));
1678 struct tid_info
*t
= dev
->rdev
.lldi
.tids
;
1679 int status
= GET_AOPEN_STATUS(ntohl(rpl
->atid_status
));
1681 ep
= lookup_atid(t
, atid
);
1683 PDBG("%s ep %p atid %u status %u errno %d\n", __func__
, ep
, atid
,
1684 status
, status2errno(status
));
1686 if (status
== CPL_ERR_RTX_NEG_ADVICE
) {
1687 printk(KERN_WARNING MOD
"Connection problems for atid %u\n",
1692 set_bit(ACT_OPEN_RPL
, &ep
->com
.history
);
1695 * Log interesting failures.
1698 case CPL_ERR_CONN_RESET
:
1699 case CPL_ERR_CONN_TIMEDOUT
:
1701 case CPL_ERR_TCAM_FULL
:
1702 dev
->rdev
.stats
.tcam_full
++;
1703 if (dev
->rdev
.lldi
.enable_fw_ofld_conn
) {
1704 mutex_lock(&dev
->rdev
.stats
.lock
);
1705 mutex_unlock(&dev
->rdev
.stats
.lock
);
1706 send_fw_act_open_req(ep
,
1707 GET_TID_TID(GET_AOPEN_ATID(
1708 ntohl(rpl
->atid_status
))));
1712 case CPL_ERR_CONN_EXIST
:
1713 if (ep
->retry_count
++ < ACT_OPEN_RETRY_COUNT
) {
1714 set_bit(ACT_RETRY_INUSE
, &ep
->com
.history
);
1715 remove_handle(ep
->com
.dev
, &ep
->com
.dev
->atid_idr
,
1717 cxgb4_free_atid(t
, atid
);
1718 dst_release(ep
->dst
);
1719 cxgb4_l2t_release(ep
->l2t
);
1725 printk(KERN_INFO MOD
"Active open failure - "
1726 "atid %u status %u errno %d %pI4:%u->%pI4:%u\n",
1727 atid
, status
, status2errno(status
),
1728 &ep
->com
.local_addr
.sin_addr
.s_addr
,
1729 ntohs(ep
->com
.local_addr
.sin_port
),
1730 &ep
->com
.remote_addr
.sin_addr
.s_addr
,
1731 ntohs(ep
->com
.remote_addr
.sin_port
));
1735 connect_reply_upcall(ep
, status2errno(status
));
1736 state_set(&ep
->com
, DEAD
);
1738 if (status
&& act_open_has_tid(status
))
1739 cxgb4_remove_tid(ep
->com
.dev
->rdev
.lldi
.tids
, 0, GET_TID(rpl
));
1741 remove_handle(ep
->com
.dev
, &ep
->com
.dev
->atid_idr
, atid
);
1742 cxgb4_free_atid(t
, atid
);
1743 dst_release(ep
->dst
);
1744 cxgb4_l2t_release(ep
->l2t
);
1745 c4iw_put_ep(&ep
->com
);
1750 static int pass_open_rpl(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
1752 struct cpl_pass_open_rpl
*rpl
= cplhdr(skb
);
1753 struct tid_info
*t
= dev
->rdev
.lldi
.tids
;
1754 unsigned int stid
= GET_TID(rpl
);
1755 struct c4iw_listen_ep
*ep
= lookup_stid(t
, stid
);
1758 PDBG("%s stid %d lookup failure!\n", __func__
, stid
);
1761 PDBG("%s ep %p status %d error %d\n", __func__
, ep
,
1762 rpl
->status
, status2errno(rpl
->status
));
1763 c4iw_wake_up(&ep
->com
.wr_wait
, status2errno(rpl
->status
));
1769 static int listen_stop(struct c4iw_listen_ep
*ep
)
1771 struct sk_buff
*skb
;
1772 struct cpl_close_listsvr_req
*req
;
1774 PDBG("%s ep %p\n", __func__
, ep
);
1775 skb
= get_skb(NULL
, sizeof(*req
), GFP_KERNEL
);
1777 printk(KERN_ERR MOD
"%s - failed to alloc skb\n", __func__
);
1780 req
= (struct cpl_close_listsvr_req
*) skb_put(skb
, sizeof(*req
));
1782 OPCODE_TID(req
) = cpu_to_be32(MK_OPCODE_TID(CPL_CLOSE_LISTSRV_REQ
,
1784 req
->reply_ctrl
= cpu_to_be16(
1785 QUEUENO(ep
->com
.dev
->rdev
.lldi
.rxq_ids
[0]));
1786 set_wr_txq(skb
, CPL_PRIORITY_SETUP
, 0);
1787 return c4iw_ofld_send(&ep
->com
.dev
->rdev
, skb
);
1790 static int close_listsrv_rpl(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
1792 struct cpl_close_listsvr_rpl
*rpl
= cplhdr(skb
);
1793 struct tid_info
*t
= dev
->rdev
.lldi
.tids
;
1794 unsigned int stid
= GET_TID(rpl
);
1795 struct c4iw_listen_ep
*ep
= lookup_stid(t
, stid
);
1797 PDBG("%s ep %p\n", __func__
, ep
);
1798 c4iw_wake_up(&ep
->com
.wr_wait
, status2errno(rpl
->status
));
1802 static void accept_cr(struct c4iw_ep
*ep
, __be32 peer_ip
, struct sk_buff
*skb
,
1803 struct cpl_pass_accept_req
*req
)
1805 struct cpl_pass_accept_rpl
*rpl
;
1806 unsigned int mtu_idx
;
1811 PDBG("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
1812 BUG_ON(skb_cloned(skb
));
1813 skb_trim(skb
, sizeof(*rpl
));
1815 cxgb4_best_mtu(ep
->com
.dev
->rdev
.lldi
.mtus
, ep
->mtu
, &mtu_idx
);
1816 wscale
= compute_wscale(rcv_win
);
1817 opt0
= (nocong
? NO_CONG(1) : 0) |
1822 L2T_IDX(ep
->l2t
->idx
) |
1823 TX_CHAN(ep
->tx_chan
) |
1824 SMAC_SEL(ep
->smac_idx
) |
1825 DSCP(ep
->tos
>> 2) |
1826 ULP_MODE(ULP_MODE_TCPDDP
) |
1827 RCV_BUFSIZ(rcv_win
>>10);
1828 opt2
= RX_CHANNEL(0) |
1829 RSS_QUEUE_VALID
| RSS_QUEUE(ep
->rss_qid
);
1831 if (enable_tcp_timestamps
&& req
->tcpopt
.tstamp
)
1832 opt2
|= TSTAMPS_EN(1);
1833 if (enable_tcp_sack
&& req
->tcpopt
.sack
)
1835 if (wscale
&& enable_tcp_window_scaling
)
1836 opt2
|= WND_SCALE_EN(1);
1838 const struct tcphdr
*tcph
;
1839 u32 hlen
= ntohl(req
->hdr_len
);
1841 tcph
= (const void *)(req
+ 1) + G_ETH_HDR_LEN(hlen
) +
1843 if (tcph
->ece
&& tcph
->cwr
)
1844 opt2
|= CCTRL_ECN(1);
1848 INIT_TP_WR(rpl
, ep
->hwtid
);
1849 OPCODE_TID(rpl
) = cpu_to_be32(MK_OPCODE_TID(CPL_PASS_ACCEPT_RPL
,
1851 rpl
->opt0
= cpu_to_be64(opt0
);
1852 rpl
->opt2
= cpu_to_be32(opt2
);
1853 set_wr_txq(skb
, CPL_PRIORITY_SETUP
, ep
->ctrlq_idx
);
1854 c4iw_l2t_send(&ep
->com
.dev
->rdev
, skb
, ep
->l2t
);
1859 static void reject_cr(struct c4iw_dev
*dev
, u32 hwtid
, __be32 peer_ip
,
1860 struct sk_buff
*skb
)
1862 PDBG("%s c4iw_dev %p tid %u peer_ip %x\n", __func__
, dev
, hwtid
,
1864 BUG_ON(skb_cloned(skb
));
1865 skb_trim(skb
, sizeof(struct cpl_tid_release
));
1867 release_tid(&dev
->rdev
, hwtid
, skb
);
1871 static void get_4tuple(struct cpl_pass_accept_req
*req
,
1872 __be32
*local_ip
, __be32
*peer_ip
,
1873 __be16
*local_port
, __be16
*peer_port
)
1875 int eth_len
= G_ETH_HDR_LEN(be32_to_cpu(req
->hdr_len
));
1876 int ip_len
= G_IP_HDR_LEN(be32_to_cpu(req
->hdr_len
));
1877 struct iphdr
*ip
= (struct iphdr
*)((u8
*)(req
+ 1) + eth_len
);
1878 struct tcphdr
*tcp
= (struct tcphdr
*)
1879 ((u8
*)(req
+ 1) + eth_len
+ ip_len
);
1881 PDBG("%s saddr 0x%x daddr 0x%x sport %u dport %u\n", __func__
,
1882 ntohl(ip
->saddr
), ntohl(ip
->daddr
), ntohs(tcp
->source
),
1885 *peer_ip
= ip
->saddr
;
1886 *local_ip
= ip
->daddr
;
1887 *peer_port
= tcp
->source
;
1888 *local_port
= tcp
->dest
;
1893 static int import_ep(struct c4iw_ep
*ep
, __be32 peer_ip
, struct dst_entry
*dst
,
1894 struct c4iw_dev
*cdev
, bool clear_mpa_v1
)
1896 struct neighbour
*n
;
1899 n
= dst_neigh_lookup(dst
, &peer_ip
);
1905 if (n
->dev
->flags
& IFF_LOOPBACK
) {
1906 struct net_device
*pdev
;
1908 pdev
= ip_dev_find(&init_net
, peer_ip
);
1913 ep
->l2t
= cxgb4_l2t_get(cdev
->rdev
.lldi
.l2t
,
1917 ep
->mtu
= pdev
->mtu
;
1918 ep
->tx_chan
= cxgb4_port_chan(pdev
);
1919 ep
->smac_idx
= (cxgb4_port_viid(pdev
) & 0x7F) << 1;
1920 step
= cdev
->rdev
.lldi
.ntxq
/
1921 cdev
->rdev
.lldi
.nchan
;
1922 ep
->txq_idx
= cxgb4_port_idx(pdev
) * step
;
1923 step
= cdev
->rdev
.lldi
.nrxq
/
1924 cdev
->rdev
.lldi
.nchan
;
1925 ep
->ctrlq_idx
= cxgb4_port_idx(pdev
);
1926 ep
->rss_qid
= cdev
->rdev
.lldi
.rxq_ids
[
1927 cxgb4_port_idx(pdev
) * step
];
1930 ep
->l2t
= cxgb4_l2t_get(cdev
->rdev
.lldi
.l2t
,
1934 ep
->mtu
= dst_mtu(dst
);
1935 ep
->tx_chan
= cxgb4_port_chan(n
->dev
);
1936 ep
->smac_idx
= (cxgb4_port_viid(n
->dev
) & 0x7F) << 1;
1937 step
= cdev
->rdev
.lldi
.ntxq
/
1938 cdev
->rdev
.lldi
.nchan
;
1939 ep
->txq_idx
= cxgb4_port_idx(n
->dev
) * step
;
1940 ep
->ctrlq_idx
= cxgb4_port_idx(n
->dev
);
1941 step
= cdev
->rdev
.lldi
.nrxq
/
1942 cdev
->rdev
.lldi
.nchan
;
1943 ep
->rss_qid
= cdev
->rdev
.lldi
.rxq_ids
[
1944 cxgb4_port_idx(n
->dev
) * step
];
1947 ep
->retry_with_mpa_v1
= 0;
1948 ep
->tried_with_mpa_v1
= 0;
1960 static int pass_accept_req(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
1962 struct c4iw_ep
*child_ep
= NULL
, *parent_ep
;
1963 struct cpl_pass_accept_req
*req
= cplhdr(skb
);
1964 unsigned int stid
= GET_POPEN_TID(ntohl(req
->tos_stid
));
1965 struct tid_info
*t
= dev
->rdev
.lldi
.tids
;
1966 unsigned int hwtid
= GET_TID(req
);
1967 struct dst_entry
*dst
;
1969 __be32 local_ip
, peer_ip
= 0;
1970 __be16 local_port
, peer_port
;
1972 u16 peer_mss
= ntohs(req
->tcpopt
.mss
);
1974 parent_ep
= lookup_stid(t
, stid
);
1976 PDBG("%s connect request on invalid stid %d\n", __func__
, stid
);
1979 get_4tuple(req
, &local_ip
, &peer_ip
, &local_port
, &peer_port
);
1981 PDBG("%s parent ep %p hwtid %u laddr 0x%x raddr 0x%x lport %d " \
1982 "rport %d peer_mss %d\n", __func__
, parent_ep
, hwtid
,
1983 ntohl(local_ip
), ntohl(peer_ip
), ntohs(local_port
),
1984 ntohs(peer_port
), peer_mss
);
1986 if (state_read(&parent_ep
->com
) != LISTEN
) {
1987 printk(KERN_ERR
"%s - listening ep not in LISTEN\n",
1992 /* Find output route */
1993 rt
= find_route(dev
, local_ip
, peer_ip
, local_port
, peer_port
,
1994 GET_POPEN_TOS(ntohl(req
->tos_stid
)));
1996 printk(KERN_ERR MOD
"%s - failed to find dst entry!\n",
2002 child_ep
= alloc_ep(sizeof(*child_ep
), GFP_KERNEL
);
2004 printk(KERN_ERR MOD
"%s - failed to allocate ep entry!\n",
2010 err
= import_ep(child_ep
, peer_ip
, dst
, dev
, false);
2012 printk(KERN_ERR MOD
"%s - failed to allocate l2t entry!\n",
2019 if (peer_mss
&& child_ep
->mtu
> (peer_mss
+ 40))
2020 child_ep
->mtu
= peer_mss
+ 40;
2022 state_set(&child_ep
->com
, CONNECTING
);
2023 child_ep
->com
.dev
= dev
;
2024 child_ep
->com
.cm_id
= NULL
;
2025 child_ep
->com
.local_addr
.sin_family
= PF_INET
;
2026 child_ep
->com
.local_addr
.sin_port
= local_port
;
2027 child_ep
->com
.local_addr
.sin_addr
.s_addr
= local_ip
;
2028 child_ep
->com
.remote_addr
.sin_family
= PF_INET
;
2029 child_ep
->com
.remote_addr
.sin_port
= peer_port
;
2030 child_ep
->com
.remote_addr
.sin_addr
.s_addr
= peer_ip
;
2031 c4iw_get_ep(&parent_ep
->com
);
2032 child_ep
->parent_ep
= parent_ep
;
2033 child_ep
->tos
= GET_POPEN_TOS(ntohl(req
->tos_stid
));
2034 child_ep
->dst
= dst
;
2035 child_ep
->hwtid
= hwtid
;
2037 PDBG("%s tx_chan %u smac_idx %u rss_qid %u\n", __func__
,
2038 child_ep
->tx_chan
, child_ep
->smac_idx
, child_ep
->rss_qid
);
2040 init_timer(&child_ep
->timer
);
2041 cxgb4_insert_tid(t
, child_ep
, hwtid
);
2042 insert_handle(dev
, &dev
->hwtid_idr
, child_ep
, child_ep
->hwtid
);
2043 accept_cr(child_ep
, peer_ip
, skb
, req
);
2044 set_bit(PASS_ACCEPT_REQ
, &child_ep
->com
.history
);
2047 reject_cr(dev
, hwtid
, peer_ip
, skb
);
2052 static int pass_establish(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
2055 struct cpl_pass_establish
*req
= cplhdr(skb
);
2056 struct tid_info
*t
= dev
->rdev
.lldi
.tids
;
2057 unsigned int tid
= GET_TID(req
);
2059 ep
= lookup_tid(t
, tid
);
2060 PDBG("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
2061 ep
->snd_seq
= be32_to_cpu(req
->snd_isn
);
2062 ep
->rcv_seq
= be32_to_cpu(req
->rcv_isn
);
2064 PDBG("%s ep %p hwtid %u tcp_opt 0x%02x\n", __func__
, ep
, tid
,
2065 ntohs(req
->tcp_opt
));
2067 set_emss(ep
, ntohs(req
->tcp_opt
));
2069 dst_confirm(ep
->dst
);
2070 state_set(&ep
->com
, MPA_REQ_WAIT
);
2072 send_flowc(ep
, skb
);
2073 set_bit(PASS_ESTAB
, &ep
->com
.history
);
2078 static int peer_close(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
2080 struct cpl_peer_close
*hdr
= cplhdr(skb
);
2082 struct c4iw_qp_attributes attrs
;
2085 struct tid_info
*t
= dev
->rdev
.lldi
.tids
;
2086 unsigned int tid
= GET_TID(hdr
);
2089 ep
= lookup_tid(t
, tid
);
2090 PDBG("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
2091 dst_confirm(ep
->dst
);
2093 set_bit(PEER_CLOSE
, &ep
->com
.history
);
2094 mutex_lock(&ep
->com
.mutex
);
2095 switch (ep
->com
.state
) {
2097 __state_set(&ep
->com
, CLOSING
);
2100 __state_set(&ep
->com
, CLOSING
);
2101 connect_reply_upcall(ep
, -ECONNRESET
);
2106 * We're gonna mark this puppy DEAD, but keep
2107 * the reference on it until the ULP accepts or
2108 * rejects the CR. Also wake up anyone waiting
2109 * in rdma connection migration (see c4iw_accept_cr()).
2111 __state_set(&ep
->com
, CLOSING
);
2112 PDBG("waking up ep %p tid %u\n", ep
, ep
->hwtid
);
2113 c4iw_wake_up(&ep
->com
.wr_wait
, -ECONNRESET
);
2116 __state_set(&ep
->com
, CLOSING
);
2117 PDBG("waking up ep %p tid %u\n", ep
, ep
->hwtid
);
2118 c4iw_wake_up(&ep
->com
.wr_wait
, -ECONNRESET
);
2122 __state_set(&ep
->com
, CLOSING
);
2123 attrs
.next_state
= C4IW_QP_STATE_CLOSING
;
2124 ret
= c4iw_modify_qp(ep
->com
.qp
->rhp
, ep
->com
.qp
,
2125 C4IW_QP_ATTR_NEXT_STATE
, &attrs
, 1);
2126 if (ret
!= -ECONNRESET
) {
2127 peer_close_upcall(ep
);
2135 __state_set(&ep
->com
, MORIBUND
);
2140 if (ep
->com
.cm_id
&& ep
->com
.qp
) {
2141 attrs
.next_state
= C4IW_QP_STATE_IDLE
;
2142 c4iw_modify_qp(ep
->com
.qp
->rhp
, ep
->com
.qp
,
2143 C4IW_QP_ATTR_NEXT_STATE
, &attrs
, 1);
2145 close_complete_upcall(ep
);
2146 __state_set(&ep
->com
, DEAD
);
2156 mutex_unlock(&ep
->com
.mutex
);
2158 c4iw_ep_disconnect(ep
, 0, GFP_KERNEL
);
2160 release_ep_resources(ep
);
2165 * Returns whether an ABORT_REQ_RSS message is a negative advice.
2167 static int is_neg_adv_abort(unsigned int status
)
2169 return status
== CPL_ERR_RTX_NEG_ADVICE
||
2170 status
== CPL_ERR_PERSIST_NEG_ADVICE
;
2173 static int peer_abort(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
2175 struct cpl_abort_req_rss
*req
= cplhdr(skb
);
2177 struct cpl_abort_rpl
*rpl
;
2178 struct sk_buff
*rpl_skb
;
2179 struct c4iw_qp_attributes attrs
;
2182 struct tid_info
*t
= dev
->rdev
.lldi
.tids
;
2183 unsigned int tid
= GET_TID(req
);
2185 ep
= lookup_tid(t
, tid
);
2186 if (is_neg_adv_abort(req
->status
)) {
2187 PDBG("%s neg_adv_abort ep %p tid %u\n", __func__
, ep
,
2191 PDBG("%s ep %p tid %u state %u\n", __func__
, ep
, ep
->hwtid
,
2193 set_bit(PEER_ABORT
, &ep
->com
.history
);
2196 * Wake up any threads in rdma_init() or rdma_fini().
2197 * However, this is not needed if com state is just
2200 if (ep
->com
.state
!= MPA_REQ_SENT
)
2201 c4iw_wake_up(&ep
->com
.wr_wait
, -ECONNRESET
);
2203 mutex_lock(&ep
->com
.mutex
);
2204 switch (ep
->com
.state
) {
2212 if (mpa_rev
== 1 || (mpa_rev
== 2 && ep
->tried_with_mpa_v1
))
2213 connect_reply_upcall(ep
, -ECONNRESET
);
2216 * we just don't send notification upwards because we
2217 * want to retry with mpa_v1 without upper layers even
2220 * do some housekeeping so as to re-initiate the
2223 PDBG("%s: mpa_rev=%d. Retrying with mpav1\n", __func__
,
2225 ep
->retry_with_mpa_v1
= 1;
2237 if (ep
->com
.cm_id
&& ep
->com
.qp
) {
2238 attrs
.next_state
= C4IW_QP_STATE_ERROR
;
2239 ret
= c4iw_modify_qp(ep
->com
.qp
->rhp
,
2240 ep
->com
.qp
, C4IW_QP_ATTR_NEXT_STATE
,
2244 "%s - qp <- error failed!\n",
2247 peer_abort_upcall(ep
);
2252 PDBG("%s PEER_ABORT IN DEAD STATE!!!!\n", __func__
);
2253 mutex_unlock(&ep
->com
.mutex
);
2259 dst_confirm(ep
->dst
);
2260 if (ep
->com
.state
!= ABORTING
) {
2261 __state_set(&ep
->com
, DEAD
);
2262 /* we don't release if we want to retry with mpa_v1 */
2263 if (!ep
->retry_with_mpa_v1
)
2266 mutex_unlock(&ep
->com
.mutex
);
2268 rpl_skb
= get_skb(skb
, sizeof(*rpl
), GFP_KERNEL
);
2270 printk(KERN_ERR MOD
"%s - cannot allocate skb!\n",
2275 set_wr_txq(skb
, CPL_PRIORITY_DATA
, ep
->txq_idx
);
2276 rpl
= (struct cpl_abort_rpl
*) skb_put(rpl_skb
, sizeof(*rpl
));
2277 INIT_TP_WR(rpl
, ep
->hwtid
);
2278 OPCODE_TID(rpl
) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_RPL
, ep
->hwtid
));
2279 rpl
->cmd
= CPL_ABORT_NO_RST
;
2280 c4iw_ofld_send(&ep
->com
.dev
->rdev
, rpl_skb
);
2283 release_ep_resources(ep
);
2284 else if (ep
->retry_with_mpa_v1
) {
2285 remove_handle(ep
->com
.dev
, &ep
->com
.dev
->hwtid_idr
, ep
->hwtid
);
2286 cxgb4_remove_tid(ep
->com
.dev
->rdev
.lldi
.tids
, 0, ep
->hwtid
);
2287 dst_release(ep
->dst
);
2288 cxgb4_l2t_release(ep
->l2t
);
2295 static int close_con_rpl(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
2298 struct c4iw_qp_attributes attrs
;
2299 struct cpl_close_con_rpl
*rpl
= cplhdr(skb
);
2301 struct tid_info
*t
= dev
->rdev
.lldi
.tids
;
2302 unsigned int tid
= GET_TID(rpl
);
2304 ep
= lookup_tid(t
, tid
);
2306 PDBG("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
2309 /* The cm_id may be null if we failed to connect */
2310 mutex_lock(&ep
->com
.mutex
);
2311 switch (ep
->com
.state
) {
2313 __state_set(&ep
->com
, MORIBUND
);
2317 if ((ep
->com
.cm_id
) && (ep
->com
.qp
)) {
2318 attrs
.next_state
= C4IW_QP_STATE_IDLE
;
2319 c4iw_modify_qp(ep
->com
.qp
->rhp
,
2321 C4IW_QP_ATTR_NEXT_STATE
,
2324 close_complete_upcall(ep
);
2325 __state_set(&ep
->com
, DEAD
);
2335 mutex_unlock(&ep
->com
.mutex
);
2337 release_ep_resources(ep
);
2341 static int terminate(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
2343 struct cpl_rdma_terminate
*rpl
= cplhdr(skb
);
2344 struct tid_info
*t
= dev
->rdev
.lldi
.tids
;
2345 unsigned int tid
= GET_TID(rpl
);
2347 struct c4iw_qp_attributes attrs
;
2349 ep
= lookup_tid(t
, tid
);
2352 if (ep
&& ep
->com
.qp
) {
2353 printk(KERN_WARNING MOD
"TERM received tid %u qpid %u\n", tid
,
2354 ep
->com
.qp
->wq
.sq
.qid
);
2355 attrs
.next_state
= C4IW_QP_STATE_TERMINATE
;
2356 c4iw_modify_qp(ep
->com
.qp
->rhp
, ep
->com
.qp
,
2357 C4IW_QP_ATTR_NEXT_STATE
, &attrs
, 1);
2359 printk(KERN_WARNING MOD
"TERM received tid %u no ep/qp\n", tid
);
2365 * Upcall from the adapter indicating data has been transmitted.
2366 * For us its just the single MPA request or reply. We can now free
2367 * the skb holding the mpa message.
2369 static int fw4_ack(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
2372 struct cpl_fw4_ack
*hdr
= cplhdr(skb
);
2373 u8 credits
= hdr
->credits
;
2374 unsigned int tid
= GET_TID(hdr
);
2375 struct tid_info
*t
= dev
->rdev
.lldi
.tids
;
2378 ep
= lookup_tid(t
, tid
);
2379 PDBG("%s ep %p tid %u credits %u\n", __func__
, ep
, ep
->hwtid
, credits
);
2381 PDBG("%s 0 credit ack ep %p tid %u state %u\n",
2382 __func__
, ep
, ep
->hwtid
, state_read(&ep
->com
));
2386 dst_confirm(ep
->dst
);
2388 PDBG("%s last streaming msg ack ep %p tid %u state %u "
2389 "initiator %u freeing skb\n", __func__
, ep
, ep
->hwtid
,
2390 state_read(&ep
->com
), ep
->mpa_attr
.initiator
? 1 : 0);
2391 kfree_skb(ep
->mpa_skb
);
2397 int c4iw_reject_cr(struct iw_cm_id
*cm_id
, const void *pdata
, u8 pdata_len
)
2400 struct c4iw_ep
*ep
= to_ep(cm_id
);
2401 PDBG("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
2403 if (state_read(&ep
->com
) == DEAD
) {
2404 c4iw_put_ep(&ep
->com
);
2407 set_bit(ULP_REJECT
, &ep
->com
.history
);
2408 BUG_ON(state_read(&ep
->com
) != MPA_REQ_RCVD
);
2410 abort_connection(ep
, NULL
, GFP_KERNEL
);
2412 err
= send_mpa_reject(ep
, pdata
, pdata_len
);
2413 err
= c4iw_ep_disconnect(ep
, 0, GFP_KERNEL
);
2415 c4iw_put_ep(&ep
->com
);
2419 int c4iw_accept_cr(struct iw_cm_id
*cm_id
, struct iw_cm_conn_param
*conn_param
)
2422 struct c4iw_qp_attributes attrs
;
2423 enum c4iw_qp_attr_mask mask
;
2424 struct c4iw_ep
*ep
= to_ep(cm_id
);
2425 struct c4iw_dev
*h
= to_c4iw_dev(cm_id
->device
);
2426 struct c4iw_qp
*qp
= get_qhp(h
, conn_param
->qpn
);
2428 PDBG("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
2429 if (state_read(&ep
->com
) == DEAD
) {
2434 BUG_ON(state_read(&ep
->com
) != MPA_REQ_RCVD
);
2437 set_bit(ULP_ACCEPT
, &ep
->com
.history
);
2438 if ((conn_param
->ord
> c4iw_max_read_depth
) ||
2439 (conn_param
->ird
> c4iw_max_read_depth
)) {
2440 abort_connection(ep
, NULL
, GFP_KERNEL
);
2445 if (ep
->mpa_attr
.version
== 2 && ep
->mpa_attr
.enhanced_rdma_conn
) {
2446 if (conn_param
->ord
> ep
->ird
) {
2447 ep
->ird
= conn_param
->ird
;
2448 ep
->ord
= conn_param
->ord
;
2449 send_mpa_reject(ep
, conn_param
->private_data
,
2450 conn_param
->private_data_len
);
2451 abort_connection(ep
, NULL
, GFP_KERNEL
);
2455 if (conn_param
->ird
> ep
->ord
) {
2457 conn_param
->ird
= 1;
2459 abort_connection(ep
, NULL
, GFP_KERNEL
);
2466 ep
->ird
= conn_param
->ird
;
2467 ep
->ord
= conn_param
->ord
;
2469 if (ep
->mpa_attr
.version
!= 2)
2470 if (peer2peer
&& ep
->ird
== 0)
2473 PDBG("%s %d ird %d ord %d\n", __func__
, __LINE__
, ep
->ird
, ep
->ord
);
2475 cm_id
->add_ref(cm_id
);
2476 ep
->com
.cm_id
= cm_id
;
2480 /* bind QP to EP and move to RTS */
2481 attrs
.mpa_attr
= ep
->mpa_attr
;
2482 attrs
.max_ird
= ep
->ird
;
2483 attrs
.max_ord
= ep
->ord
;
2484 attrs
.llp_stream_handle
= ep
;
2485 attrs
.next_state
= C4IW_QP_STATE_RTS
;
2487 /* bind QP and TID with INIT_WR */
2488 mask
= C4IW_QP_ATTR_NEXT_STATE
|
2489 C4IW_QP_ATTR_LLP_STREAM_HANDLE
|
2490 C4IW_QP_ATTR_MPA_ATTR
|
2491 C4IW_QP_ATTR_MAX_IRD
|
2492 C4IW_QP_ATTR_MAX_ORD
;
2494 err
= c4iw_modify_qp(ep
->com
.qp
->rhp
,
2495 ep
->com
.qp
, mask
, &attrs
, 1);
2498 err
= send_mpa_reply(ep
, conn_param
->private_data
,
2499 conn_param
->private_data_len
);
2503 state_set(&ep
->com
, FPDU_MODE
);
2504 established_upcall(ep
);
2505 c4iw_put_ep(&ep
->com
);
2508 ep
->com
.cm_id
= NULL
;
2509 cm_id
->rem_ref(cm_id
);
2511 c4iw_put_ep(&ep
->com
);
2515 int c4iw_connect(struct iw_cm_id
*cm_id
, struct iw_cm_conn_param
*conn_param
)
2517 struct c4iw_dev
*dev
= to_c4iw_dev(cm_id
->device
);
2522 if ((conn_param
->ord
> c4iw_max_read_depth
) ||
2523 (conn_param
->ird
> c4iw_max_read_depth
)) {
2527 ep
= alloc_ep(sizeof(*ep
), GFP_KERNEL
);
2529 printk(KERN_ERR MOD
"%s - cannot alloc ep.\n", __func__
);
2533 init_timer(&ep
->timer
);
2534 ep
->plen
= conn_param
->private_data_len
;
2536 memcpy(ep
->mpa_pkt
+ sizeof(struct mpa_message
),
2537 conn_param
->private_data
, ep
->plen
);
2538 ep
->ird
= conn_param
->ird
;
2539 ep
->ord
= conn_param
->ord
;
2541 if (peer2peer
&& ep
->ord
== 0)
2544 cm_id
->add_ref(cm_id
);
2546 ep
->com
.cm_id
= cm_id
;
2547 ep
->com
.qp
= get_qhp(dev
, conn_param
->qpn
);
2548 BUG_ON(!ep
->com
.qp
);
2550 PDBG("%s qpn 0x%x qp %p cm_id %p\n", __func__
, conn_param
->qpn
,
2554 * Allocate an active TID to initiate a TCP connection.
2556 ep
->atid
= cxgb4_alloc_atid(dev
->rdev
.lldi
.tids
, ep
);
2557 if (ep
->atid
== -1) {
2558 printk(KERN_ERR MOD
"%s - cannot alloc atid.\n", __func__
);
2562 insert_handle(dev
, &dev
->atid_idr
, ep
, ep
->atid
);
2564 PDBG("%s saddr 0x%x sport 0x%x raddr 0x%x rport 0x%x\n", __func__
,
2565 ntohl(cm_id
->local_addr
.sin_addr
.s_addr
),
2566 ntohs(cm_id
->local_addr
.sin_port
),
2567 ntohl(cm_id
->remote_addr
.sin_addr
.s_addr
),
2568 ntohs(cm_id
->remote_addr
.sin_port
));
2571 rt
= find_route(dev
,
2572 cm_id
->local_addr
.sin_addr
.s_addr
,
2573 cm_id
->remote_addr
.sin_addr
.s_addr
,
2574 cm_id
->local_addr
.sin_port
,
2575 cm_id
->remote_addr
.sin_port
, 0);
2577 printk(KERN_ERR MOD
"%s - cannot find route.\n", __func__
);
2578 err
= -EHOSTUNREACH
;
2583 err
= import_ep(ep
, cm_id
->remote_addr
.sin_addr
.s_addr
,
2584 ep
->dst
, ep
->com
.dev
, true);
2586 printk(KERN_ERR MOD
"%s - cannot alloc l2e.\n", __func__
);
2590 PDBG("%s txq_idx %u tx_chan %u smac_idx %u rss_qid %u l2t_idx %u\n",
2591 __func__
, ep
->txq_idx
, ep
->tx_chan
, ep
->smac_idx
, ep
->rss_qid
,
2594 state_set(&ep
->com
, CONNECTING
);
2596 ep
->com
.local_addr
= cm_id
->local_addr
;
2597 ep
->com
.remote_addr
= cm_id
->remote_addr
;
2599 /* send connect request to rnic */
2600 err
= send_connect(ep
);
2604 cxgb4_l2t_release(ep
->l2t
);
2606 dst_release(ep
->dst
);
2608 remove_handle(ep
->com
.dev
, &ep
->com
.dev
->atid_idr
, ep
->atid
);
2609 cxgb4_free_atid(ep
->com
.dev
->rdev
.lldi
.tids
, ep
->atid
);
2611 cm_id
->rem_ref(cm_id
);
2612 c4iw_put_ep(&ep
->com
);
2617 int c4iw_create_listen(struct iw_cm_id
*cm_id
, int backlog
)
2620 struct c4iw_dev
*dev
= to_c4iw_dev(cm_id
->device
);
2621 struct c4iw_listen_ep
*ep
;
2625 ep
= alloc_ep(sizeof(*ep
), GFP_KERNEL
);
2627 printk(KERN_ERR MOD
"%s - cannot alloc ep.\n", __func__
);
2631 PDBG("%s ep %p\n", __func__
, ep
);
2632 cm_id
->add_ref(cm_id
);
2633 ep
->com
.cm_id
= cm_id
;
2635 ep
->backlog
= backlog
;
2636 ep
->com
.local_addr
= cm_id
->local_addr
;
2639 * Allocate a server TID.
2641 if (dev
->rdev
.lldi
.enable_fw_ofld_conn
)
2642 ep
->stid
= cxgb4_alloc_sftid(dev
->rdev
.lldi
.tids
, PF_INET
, ep
);
2644 ep
->stid
= cxgb4_alloc_stid(dev
->rdev
.lldi
.tids
, PF_INET
, ep
);
2646 if (ep
->stid
== -1) {
2647 printk(KERN_ERR MOD
"%s - cannot alloc stid.\n", __func__
);
2651 insert_handle(dev
, &dev
->stid_idr
, ep
, ep
->stid
);
2652 state_set(&ep
->com
, LISTEN
);
2653 if (dev
->rdev
.lldi
.enable_fw_ofld_conn
) {
2655 err
= cxgb4_create_server_filter(
2656 ep
->com
.dev
->rdev
.lldi
.ports
[0], ep
->stid
,
2657 ep
->com
.local_addr
.sin_addr
.s_addr
,
2658 ep
->com
.local_addr
.sin_port
,
2660 ep
->com
.dev
->rdev
.lldi
.rxq_ids
[0],
2663 if (err
== -EBUSY
) {
2664 set_current_state(TASK_UNINTERRUPTIBLE
);
2665 schedule_timeout(usecs_to_jiffies(100));
2667 } while (err
== -EBUSY
);
2669 c4iw_init_wr_wait(&ep
->com
.wr_wait
);
2670 err
= cxgb4_create_server(ep
->com
.dev
->rdev
.lldi
.ports
[0],
2671 ep
->stid
, ep
->com
.local_addr
.sin_addr
.s_addr
,
2672 ep
->com
.local_addr
.sin_port
,
2674 ep
->com
.dev
->rdev
.lldi
.rxq_ids
[0]);
2676 err
= c4iw_wait_for_reply(&ep
->com
.dev
->rdev
,
2681 cm_id
->provider_data
= ep
;
2684 pr_err("%s cxgb4_create_server/filter failed err %d " \
2685 "stid %d laddr %08x lport %d\n", \
2686 __func__
, err
, ep
->stid
,
2687 ntohl(ep
->com
.local_addr
.sin_addr
.s_addr
),
2688 ntohs(ep
->com
.local_addr
.sin_port
));
2689 cxgb4_free_stid(ep
->com
.dev
->rdev
.lldi
.tids
, ep
->stid
, PF_INET
);
2691 cm_id
->rem_ref(cm_id
);
2692 c4iw_put_ep(&ep
->com
);
2698 int c4iw_destroy_listen(struct iw_cm_id
*cm_id
)
2701 struct c4iw_listen_ep
*ep
= to_listen_ep(cm_id
);
2703 PDBG("%s ep %p\n", __func__
, ep
);
2706 state_set(&ep
->com
, DEAD
);
2707 if (ep
->com
.dev
->rdev
.lldi
.enable_fw_ofld_conn
) {
2708 err
= cxgb4_remove_server_filter(
2709 ep
->com
.dev
->rdev
.lldi
.ports
[0], ep
->stid
,
2710 ep
->com
.dev
->rdev
.lldi
.rxq_ids
[0], 0);
2712 c4iw_init_wr_wait(&ep
->com
.wr_wait
);
2713 err
= listen_stop(ep
);
2716 err
= c4iw_wait_for_reply(&ep
->com
.dev
->rdev
, &ep
->com
.wr_wait
,
2719 remove_handle(ep
->com
.dev
, &ep
->com
.dev
->stid_idr
, ep
->stid
);
2720 cxgb4_free_stid(ep
->com
.dev
->rdev
.lldi
.tids
, ep
->stid
, PF_INET
);
2722 cm_id
->rem_ref(cm_id
);
2723 c4iw_put_ep(&ep
->com
);
2727 int c4iw_ep_disconnect(struct c4iw_ep
*ep
, int abrupt
, gfp_t gfp
)
2732 struct c4iw_rdev
*rdev
;
2734 mutex_lock(&ep
->com
.mutex
);
2736 PDBG("%s ep %p state %s, abrupt %d\n", __func__
, ep
,
2737 states
[ep
->com
.state
], abrupt
);
2739 rdev
= &ep
->com
.dev
->rdev
;
2740 if (c4iw_fatal_error(rdev
)) {
2742 close_complete_upcall(ep
);
2743 ep
->com
.state
= DEAD
;
2745 switch (ep
->com
.state
) {
2753 ep
->com
.state
= ABORTING
;
2755 ep
->com
.state
= CLOSING
;
2758 set_bit(CLOSE_SENT
, &ep
->com
.flags
);
2761 if (!test_and_set_bit(CLOSE_SENT
, &ep
->com
.flags
)) {
2765 ep
->com
.state
= ABORTING
;
2767 ep
->com
.state
= MORIBUND
;
2773 PDBG("%s ignoring disconnect ep %p state %u\n",
2774 __func__
, ep
, ep
->com
.state
);
2783 set_bit(EP_DISC_ABORT
, &ep
->com
.history
);
2784 close_complete_upcall(ep
);
2785 ret
= send_abort(ep
, NULL
, gfp
);
2787 set_bit(EP_DISC_CLOSE
, &ep
->com
.history
);
2788 ret
= send_halfclose(ep
, gfp
);
2793 mutex_unlock(&ep
->com
.mutex
);
2795 release_ep_resources(ep
);
2799 static void active_ofld_conn_reply(struct c4iw_dev
*dev
, struct sk_buff
*skb
,
2800 struct cpl_fw6_msg_ofld_connection_wr_rpl
*req
)
2803 int atid
= be32_to_cpu(req
->tid
);
2805 ep
= (struct c4iw_ep
*)lookup_atid(dev
->rdev
.lldi
.tids
,
2806 (__force u32
) req
->tid
);
2810 switch (req
->retval
) {
2812 set_bit(ACT_RETRY_NOMEM
, &ep
->com
.history
);
2813 if (ep
->retry_count
++ < ACT_OPEN_RETRY_COUNT
) {
2814 send_fw_act_open_req(ep
, atid
);
2818 set_bit(ACT_RETRY_INUSE
, &ep
->com
.history
);
2819 if (ep
->retry_count
++ < ACT_OPEN_RETRY_COUNT
) {
2820 send_fw_act_open_req(ep
, atid
);
2825 pr_info("%s unexpected ofld conn wr retval %d\n",
2826 __func__
, req
->retval
);
2829 pr_err("active ofld_connect_wr failure %d atid %d\n",
2831 mutex_lock(&dev
->rdev
.stats
.lock
);
2832 dev
->rdev
.stats
.act_ofld_conn_fails
++;
2833 mutex_unlock(&dev
->rdev
.stats
.lock
);
2834 connect_reply_upcall(ep
, status2errno(req
->retval
));
2835 state_set(&ep
->com
, DEAD
);
2836 remove_handle(dev
, &dev
->atid_idr
, atid
);
2837 cxgb4_free_atid(dev
->rdev
.lldi
.tids
, atid
);
2838 dst_release(ep
->dst
);
2839 cxgb4_l2t_release(ep
->l2t
);
2840 c4iw_put_ep(&ep
->com
);
2843 static void passive_ofld_conn_reply(struct c4iw_dev
*dev
, struct sk_buff
*skb
,
2844 struct cpl_fw6_msg_ofld_connection_wr_rpl
*req
)
2846 struct sk_buff
*rpl_skb
;
2847 struct cpl_pass_accept_req
*cpl
;
2850 rpl_skb
= (struct sk_buff
*)(unsigned long)req
->cookie
;
2853 PDBG("%s passive open failure %d\n", __func__
, req
->retval
);
2854 mutex_lock(&dev
->rdev
.stats
.lock
);
2855 dev
->rdev
.stats
.pas_ofld_conn_fails
++;
2856 mutex_unlock(&dev
->rdev
.stats
.lock
);
2859 cpl
= (struct cpl_pass_accept_req
*)cplhdr(rpl_skb
);
2860 OPCODE_TID(cpl
) = htonl(MK_OPCODE_TID(CPL_PASS_ACCEPT_REQ
,
2861 (__force u32
) htonl(
2862 (__force u32
) req
->tid
)));
2863 ret
= pass_accept_req(dev
, rpl_skb
);
2870 static int deferred_fw6_msg(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
2872 struct cpl_fw6_msg
*rpl
= cplhdr(skb
);
2873 struct cpl_fw6_msg_ofld_connection_wr_rpl
*req
;
2875 switch (rpl
->type
) {
2877 c4iw_ev_dispatch(dev
, (struct t4_cqe
*)&rpl
->data
[0]);
2879 case FW6_TYPE_OFLD_CONNECTION_WR_RPL
:
2880 req
= (struct cpl_fw6_msg_ofld_connection_wr_rpl
*)rpl
->data
;
2881 switch (req
->t_state
) {
2883 active_ofld_conn_reply(dev
, skb
, req
);
2886 passive_ofld_conn_reply(dev
, skb
, req
);
2889 pr_err("%s unexpected ofld conn wr state %d\n",
2890 __func__
, req
->t_state
);
2898 static void build_cpl_pass_accept_req(struct sk_buff
*skb
, int stid
, u8 tos
)
2901 u16 vlantag
, len
, hdr_len
, eth_hdr_len
;
2903 struct cpl_rx_pkt
*cpl
= cplhdr(skb
);
2904 struct cpl_pass_accept_req
*req
;
2905 struct tcp_options_received tmp_opt
;
2906 struct c4iw_dev
*dev
;
2908 dev
= *((struct c4iw_dev
**) (skb
->cb
+ sizeof(void *)));
2909 /* Store values from cpl_rx_pkt in temporary location. */
2910 vlantag
= (__force u16
) cpl
->vlan
;
2911 len
= (__force u16
) cpl
->len
;
2912 l2info
= (__force u32
) cpl
->l2info
;
2913 hdr_len
= (__force u16
) cpl
->hdr_len
;
2916 __skb_pull(skb
, sizeof(*req
) + sizeof(struct rss_header
));
2919 * We need to parse the TCP options from SYN packet.
2920 * to generate cpl_pass_accept_req.
2922 memset(&tmp_opt
, 0, sizeof(tmp_opt
));
2923 tcp_clear_options(&tmp_opt
);
2924 tcp_parse_options(skb
, &tmp_opt
, 0, NULL
);
2926 req
= (struct cpl_pass_accept_req
*)__skb_push(skb
, sizeof(*req
));
2927 memset(req
, 0, sizeof(*req
));
2928 req
->l2info
= cpu_to_be16(V_SYN_INTF(intf
) |
2929 V_SYN_MAC_IDX(G_RX_MACIDX(
2930 (__force
int) htonl(l2info
))) |
2932 eth_hdr_len
= is_t4(dev
->rdev
.lldi
.adapter_type
) ?
2933 G_RX_ETHHDR_LEN((__force
int) htonl(l2info
)) :
2934 G_RX_T5_ETHHDR_LEN((__force
int) htonl(l2info
));
2935 req
->hdr_len
= cpu_to_be32(V_SYN_RX_CHAN(G_RX_CHAN(
2936 (__force
int) htonl(l2info
))) |
2937 V_TCP_HDR_LEN(G_RX_TCPHDR_LEN(
2938 (__force
int) htons(hdr_len
))) |
2939 V_IP_HDR_LEN(G_RX_IPHDR_LEN(
2940 (__force
int) htons(hdr_len
))) |
2941 V_ETH_HDR_LEN(G_RX_ETHHDR_LEN(eth_hdr_len
)));
2942 req
->vlan
= (__force __be16
) vlantag
;
2943 req
->len
= (__force __be16
) len
;
2944 req
->tos_stid
= cpu_to_be32(PASS_OPEN_TID(stid
) |
2945 PASS_OPEN_TOS(tos
));
2946 req
->tcpopt
.mss
= htons(tmp_opt
.mss_clamp
);
2947 if (tmp_opt
.wscale_ok
)
2948 req
->tcpopt
.wsf
= tmp_opt
.snd_wscale
;
2949 req
->tcpopt
.tstamp
= tmp_opt
.saw_tstamp
;
2950 if (tmp_opt
.sack_ok
)
2951 req
->tcpopt
.sack
= 1;
2952 OPCODE_TID(req
) = htonl(MK_OPCODE_TID(CPL_PASS_ACCEPT_REQ
, 0));
2956 static void send_fw_pass_open_req(struct c4iw_dev
*dev
, struct sk_buff
*skb
,
2957 __be32 laddr
, __be16 lport
,
2958 __be32 raddr
, __be16 rport
,
2959 u32 rcv_isn
, u32 filter
, u16 window
,
2960 u32 rss_qid
, u8 port_id
)
2962 struct sk_buff
*req_skb
;
2963 struct fw_ofld_connection_wr
*req
;
2964 struct cpl_pass_accept_req
*cpl
= cplhdr(skb
);
2966 req_skb
= alloc_skb(sizeof(struct fw_ofld_connection_wr
), GFP_KERNEL
);
2967 req
= (struct fw_ofld_connection_wr
*)__skb_put(req_skb
, sizeof(*req
));
2968 memset(req
, 0, sizeof(*req
));
2969 req
->op_compl
= htonl(V_WR_OP(FW_OFLD_CONNECTION_WR
) | FW_WR_COMPL(1));
2970 req
->len16_pkd
= htonl(FW_WR_LEN16(DIV_ROUND_UP(sizeof(*req
), 16)));
2971 req
->le
.version_cpl
= htonl(F_FW_OFLD_CONNECTION_WR_CPL
);
2972 req
->le
.filter
= (__force __be32
) filter
;
2973 req
->le
.lport
= lport
;
2974 req
->le
.pport
= rport
;
2975 req
->le
.u
.ipv4
.lip
= laddr
;
2976 req
->le
.u
.ipv4
.pip
= raddr
;
2977 req
->tcb
.rcv_nxt
= htonl(rcv_isn
+ 1);
2978 req
->tcb
.rcv_adv
= htons(window
);
2979 req
->tcb
.t_state_to_astid
=
2980 htonl(V_FW_OFLD_CONNECTION_WR_T_STATE(TCP_SYN_RECV
) |
2981 V_FW_OFLD_CONNECTION_WR_RCV_SCALE(cpl
->tcpopt
.wsf
) |
2982 V_FW_OFLD_CONNECTION_WR_ASTID(
2983 GET_PASS_OPEN_TID(ntohl(cpl
->tos_stid
))));
2986 * We store the qid in opt2 which will be used by the firmware
2987 * to send us the wr response.
2989 req
->tcb
.opt2
= htonl(V_RSS_QUEUE(rss_qid
));
2992 * We initialize the MSS index in TCB to 0xF.
2993 * So that when driver sends cpl_pass_accept_rpl
2994 * TCB picks up the correct value. If this was 0
2995 * TP will ignore any value > 0 for MSS index.
2997 req
->tcb
.opt0
= cpu_to_be64(V_MSS_IDX(0xF));
2998 req
->cookie
= (unsigned long)skb
;
3000 set_wr_txq(req_skb
, CPL_PRIORITY_CONTROL
, port_id
);
3001 cxgb4_ofld_send(dev
->rdev
.lldi
.ports
[0], req_skb
);
3005 * Handler for CPL_RX_PKT message. Need to handle cpl_rx_pkt
3006 * messages when a filter is being used instead of server to
3007 * redirect a syn packet. When packets hit filter they are redirected
3008 * to the offload queue and driver tries to establish the connection
3009 * using firmware work request.
3011 static int rx_pkt(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
3014 unsigned int filter
;
3015 struct ethhdr
*eh
= NULL
;
3016 struct vlan_ethhdr
*vlan_eh
= NULL
;
3018 struct tcphdr
*tcph
;
3019 struct rss_header
*rss
= (void *)skb
->data
;
3020 struct cpl_rx_pkt
*cpl
= (void *)skb
->data
;
3021 struct cpl_pass_accept_req
*req
= (void *)(rss
+ 1);
3022 struct l2t_entry
*e
;
3023 struct dst_entry
*dst
;
3025 struct c4iw_ep
*lep
;
3027 struct port_info
*pi
;
3028 struct net_device
*pdev
;
3029 u16 rss_qid
, eth_hdr_len
;
3032 struct neighbour
*neigh
;
3034 /* Drop all non-SYN packets */
3035 if (!(cpl
->l2info
& cpu_to_be32(F_RXF_SYN
)))
3039 * Drop all packets which did not hit the filter.
3040 * Unlikely to happen.
3042 if (!(rss
->filter_hit
&& rss
->filter_tid
))
3046 * Calculate the server tid from filter hit index from cpl_rx_pkt.
3048 stid
= (__force
int) cpu_to_be32((__force u32
) rss
->hash_val
)
3049 - dev
->rdev
.lldi
.tids
->sftid_base
3050 + dev
->rdev
.lldi
.tids
->nstids
;
3052 lep
= (struct c4iw_ep
*)lookup_stid(dev
->rdev
.lldi
.tids
, stid
);
3054 PDBG("%s connect request on invalid stid %d\n", __func__
, stid
);
3058 eth_hdr_len
= is_t4(dev
->rdev
.lldi
.adapter_type
) ?
3059 G_RX_ETHHDR_LEN(htonl(cpl
->l2info
)) :
3060 G_RX_T5_ETHHDR_LEN(htonl(cpl
->l2info
));
3061 if (eth_hdr_len
== ETH_HLEN
) {
3062 eh
= (struct ethhdr
*)(req
+ 1);
3063 iph
= (struct iphdr
*)(eh
+ 1);
3065 vlan_eh
= (struct vlan_ethhdr
*)(req
+ 1);
3066 iph
= (struct iphdr
*)(vlan_eh
+ 1);
3067 skb
->vlan_tci
= ntohs(cpl
->vlan
);
3070 if (iph
->version
!= 0x4)
3073 tcph
= (struct tcphdr
*)(iph
+ 1);
3074 skb_set_network_header(skb
, (void *)iph
- (void *)rss
);
3075 skb_set_transport_header(skb
, (void *)tcph
- (void *)rss
);
3078 PDBG("%s lip 0x%x lport %u pip 0x%x pport %u tos %d\n", __func__
,
3079 ntohl(iph
->daddr
), ntohs(tcph
->dest
), ntohl(iph
->saddr
),
3080 ntohs(tcph
->source
), iph
->tos
);
3082 rt
= find_route(dev
, iph
->daddr
, iph
->saddr
, tcph
->dest
, tcph
->source
,
3085 pr_err("%s - failed to find dst entry!\n",
3090 neigh
= dst_neigh_lookup_skb(dst
, skb
);
3093 pr_err("%s - failed to allocate neigh!\n",
3098 if (neigh
->dev
->flags
& IFF_LOOPBACK
) {
3099 pdev
= ip_dev_find(&init_net
, iph
->daddr
);
3100 e
= cxgb4_l2t_get(dev
->rdev
.lldi
.l2t
, neigh
,
3102 pi
= (struct port_info
*)netdev_priv(pdev
);
3103 tx_chan
= cxgb4_port_chan(pdev
);
3106 e
= cxgb4_l2t_get(dev
->rdev
.lldi
.l2t
, neigh
,
3108 pi
= (struct port_info
*)netdev_priv(neigh
->dev
);
3109 tx_chan
= cxgb4_port_chan(neigh
->dev
);
3112 pr_err("%s - failed to allocate l2t entry!\n",
3117 step
= dev
->rdev
.lldi
.nrxq
/ dev
->rdev
.lldi
.nchan
;
3118 rss_qid
= dev
->rdev
.lldi
.rxq_ids
[pi
->port_id
* step
];
3119 window
= (__force u16
) htons((__force u16
)tcph
->window
);
3121 /* Calcuate filter portion for LE region. */
3122 filter
= (__force
unsigned int) cpu_to_be32(select_ntuple(dev
, dst
, e
));
3125 * Synthesize the cpl_pass_accept_req. We have everything except the
3126 * TID. Once firmware sends a reply with TID we update the TID field
3127 * in cpl and pass it through the regular cpl_pass_accept_req path.
3129 build_cpl_pass_accept_req(skb
, stid
, iph
->tos
);
3130 send_fw_pass_open_req(dev
, skb
, iph
->daddr
, tcph
->dest
, iph
->saddr
,
3131 tcph
->source
, ntohl(tcph
->seq
), filter
, window
,
3132 rss_qid
, pi
->port_id
);
3133 cxgb4_l2t_release(e
);
3141 * These are the real handlers that are called from a
3144 static c4iw_handler_func work_handlers
[NUM_CPL_CMDS
] = {
3145 [CPL_ACT_ESTABLISH
] = act_establish
,
3146 [CPL_ACT_OPEN_RPL
] = act_open_rpl
,
3147 [CPL_RX_DATA
] = rx_data
,
3148 [CPL_ABORT_RPL_RSS
] = abort_rpl
,
3149 [CPL_ABORT_RPL
] = abort_rpl
,
3150 [CPL_PASS_OPEN_RPL
] = pass_open_rpl
,
3151 [CPL_CLOSE_LISTSRV_RPL
] = close_listsrv_rpl
,
3152 [CPL_PASS_ACCEPT_REQ
] = pass_accept_req
,
3153 [CPL_PASS_ESTABLISH
] = pass_establish
,
3154 [CPL_PEER_CLOSE
] = peer_close
,
3155 [CPL_ABORT_REQ_RSS
] = peer_abort
,
3156 [CPL_CLOSE_CON_RPL
] = close_con_rpl
,
3157 [CPL_RDMA_TERMINATE
] = terminate
,
3158 [CPL_FW4_ACK
] = fw4_ack
,
3159 [CPL_FW6_MSG
] = deferred_fw6_msg
,
3160 [CPL_RX_PKT
] = rx_pkt
3163 static void process_timeout(struct c4iw_ep
*ep
)
3165 struct c4iw_qp_attributes attrs
;
3168 mutex_lock(&ep
->com
.mutex
);
3169 PDBG("%s ep %p tid %u state %d\n", __func__
, ep
, ep
->hwtid
,
3171 set_bit(TIMEDOUT
, &ep
->com
.history
);
3172 switch (ep
->com
.state
) {
3174 __state_set(&ep
->com
, ABORTING
);
3175 connect_reply_upcall(ep
, -ETIMEDOUT
);
3178 __state_set(&ep
->com
, ABORTING
);
3182 if (ep
->com
.cm_id
&& ep
->com
.qp
) {
3183 attrs
.next_state
= C4IW_QP_STATE_ERROR
;
3184 c4iw_modify_qp(ep
->com
.qp
->rhp
,
3185 ep
->com
.qp
, C4IW_QP_ATTR_NEXT_STATE
,
3188 __state_set(&ep
->com
, ABORTING
);
3191 WARN(1, "%s unexpected state ep %p tid %u state %u\n",
3192 __func__
, ep
, ep
->hwtid
, ep
->com
.state
);
3195 mutex_unlock(&ep
->com
.mutex
);
3197 abort_connection(ep
, NULL
, GFP_KERNEL
);
3198 c4iw_put_ep(&ep
->com
);
3201 static void process_timedout_eps(void)
3205 spin_lock_irq(&timeout_lock
);
3206 while (!list_empty(&timeout_list
)) {
3207 struct list_head
*tmp
;
3209 tmp
= timeout_list
.next
;
3211 spin_unlock_irq(&timeout_lock
);
3212 ep
= list_entry(tmp
, struct c4iw_ep
, entry
);
3213 process_timeout(ep
);
3214 spin_lock_irq(&timeout_lock
);
3216 spin_unlock_irq(&timeout_lock
);
3219 static void process_work(struct work_struct
*work
)
3221 struct sk_buff
*skb
= NULL
;
3222 struct c4iw_dev
*dev
;
3223 struct cpl_act_establish
*rpl
;
3224 unsigned int opcode
;
3227 while ((skb
= skb_dequeue(&rxq
))) {
3229 dev
= *((struct c4iw_dev
**) (skb
->cb
+ sizeof(void *)));
3230 opcode
= rpl
->ot
.opcode
;
3232 BUG_ON(!work_handlers
[opcode
]);
3233 ret
= work_handlers
[opcode
](dev
, skb
);
3237 process_timedout_eps();
3240 static DECLARE_WORK(skb_work
, process_work
);
3242 static void ep_timeout(unsigned long arg
)
3244 struct c4iw_ep
*ep
= (struct c4iw_ep
*)arg
;
3247 spin_lock(&timeout_lock
);
3248 if (!test_and_set_bit(TIMEOUT
, &ep
->com
.flags
)) {
3249 list_add_tail(&ep
->entry
, &timeout_list
);
3252 spin_unlock(&timeout_lock
);
3254 queue_work(workq
, &skb_work
);
3258 * All the CM events are handled on a work queue to have a safe context.
3260 static int sched(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
3264 * Save dev in the skb->cb area.
3266 *((struct c4iw_dev
**) (skb
->cb
+ sizeof(void *))) = dev
;
3269 * Queue the skb and schedule the worker thread.
3271 skb_queue_tail(&rxq
, skb
);
3272 queue_work(workq
, &skb_work
);
3276 static int set_tcb_rpl(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
3278 struct cpl_set_tcb_rpl
*rpl
= cplhdr(skb
);
3280 if (rpl
->status
!= CPL_ERR_NONE
) {
3281 printk(KERN_ERR MOD
"Unexpected SET_TCB_RPL status %u "
3282 "for tid %u\n", rpl
->status
, GET_TID(rpl
));
3288 static int fw6_msg(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
3290 struct cpl_fw6_msg
*rpl
= cplhdr(skb
);
3291 struct c4iw_wr_wait
*wr_waitp
;
3294 PDBG("%s type %u\n", __func__
, rpl
->type
);
3296 switch (rpl
->type
) {
3297 case FW6_TYPE_WR_RPL
:
3298 ret
= (int)((be64_to_cpu(rpl
->data
[0]) >> 8) & 0xff);
3299 wr_waitp
= (struct c4iw_wr_wait
*)(__force
unsigned long) rpl
->data
[1];
3300 PDBG("%s wr_waitp %p ret %u\n", __func__
, wr_waitp
, ret
);
3302 c4iw_wake_up(wr_waitp
, ret
? -ret
: 0);
3306 case FW6_TYPE_OFLD_CONNECTION_WR_RPL
:
3310 printk(KERN_ERR MOD
"%s unexpected fw6 msg type %u\n", __func__
,
3318 static int peer_abort_intr(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
3320 struct cpl_abort_req_rss
*req
= cplhdr(skb
);
3322 struct tid_info
*t
= dev
->rdev
.lldi
.tids
;
3323 unsigned int tid
= GET_TID(req
);
3325 ep
= lookup_tid(t
, tid
);
3327 printk(KERN_WARNING MOD
3328 "Abort on non-existent endpoint, tid %d\n", tid
);
3332 if (is_neg_adv_abort(req
->status
)) {
3333 PDBG("%s neg_adv_abort ep %p tid %u\n", __func__
, ep
,
3338 PDBG("%s ep %p tid %u state %u\n", __func__
, ep
, ep
->hwtid
,
3342 * Wake up any threads in rdma_init() or rdma_fini().
3343 * However, if we are on MPAv2 and want to retry with MPAv1
3344 * then, don't wake up yet.
3346 if (mpa_rev
== 2 && !ep
->tried_with_mpa_v1
) {
3347 if (ep
->com
.state
!= MPA_REQ_SENT
)
3348 c4iw_wake_up(&ep
->com
.wr_wait
, -ECONNRESET
);
3350 c4iw_wake_up(&ep
->com
.wr_wait
, -ECONNRESET
);
3356 * Most upcalls from the T4 Core go to sched() to
3357 * schedule the processing on a work queue.
3359 c4iw_handler_func c4iw_handlers
[NUM_CPL_CMDS
] = {
3360 [CPL_ACT_ESTABLISH
] = sched
,
3361 [CPL_ACT_OPEN_RPL
] = sched
,
3362 [CPL_RX_DATA
] = sched
,
3363 [CPL_ABORT_RPL_RSS
] = sched
,
3364 [CPL_ABORT_RPL
] = sched
,
3365 [CPL_PASS_OPEN_RPL
] = sched
,
3366 [CPL_CLOSE_LISTSRV_RPL
] = sched
,
3367 [CPL_PASS_ACCEPT_REQ
] = sched
,
3368 [CPL_PASS_ESTABLISH
] = sched
,
3369 [CPL_PEER_CLOSE
] = sched
,
3370 [CPL_CLOSE_CON_RPL
] = sched
,
3371 [CPL_ABORT_REQ_RSS
] = peer_abort_intr
,
3372 [CPL_RDMA_TERMINATE
] = sched
,
3373 [CPL_FW4_ACK
] = sched
,
3374 [CPL_SET_TCB_RPL
] = set_tcb_rpl
,
3375 [CPL_FW6_MSG
] = fw6_msg
,
3376 [CPL_RX_PKT
] = sched
3379 int __init
c4iw_cm_init(void)
3381 spin_lock_init(&timeout_lock
);
3382 skb_queue_head_init(&rxq
);
3384 workq
= create_singlethread_workqueue("iw_cxgb4");
3391 void __exit
c4iw_cm_term(void)
3393 WARN_ON(!list_empty(&timeout_list
));
3394 flush_workqueue(workq
);
3395 destroy_workqueue(workq
);