2 * Copyright (c) 2009-2014 Chelsio, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 #include <linux/module.h>
33 #include <linux/list.h>
34 #include <linux/workqueue.h>
35 #include <linux/skbuff.h>
36 #include <linux/timer.h>
37 #include <linux/notifier.h>
38 #include <linux/inetdevice.h>
40 #include <linux/tcp.h>
41 #include <linux/if_vlan.h>
43 #include <net/neighbour.h>
44 #include <net/netevent.h>
45 #include <net/route.h>
47 #include <net/ip6_route.h>
48 #include <net/addrconf.h>
50 #include <rdma/ib_addr.h>
55 static char *states
[] = {
72 module_param(nocong
, int, 0644);
73 MODULE_PARM_DESC(nocong
, "Turn of congestion control (default=0)");
75 static int enable_ecn
;
76 module_param(enable_ecn
, int, 0644);
77 MODULE_PARM_DESC(enable_ecn
, "Enable ECN (default=0/disabled)");
79 static int dack_mode
= 1;
80 module_param(dack_mode
, int, 0644);
81 MODULE_PARM_DESC(dack_mode
, "Delayed ack mode (default=1)");
83 uint c4iw_max_read_depth
= 32;
84 module_param(c4iw_max_read_depth
, int, 0644);
85 MODULE_PARM_DESC(c4iw_max_read_depth
,
86 "Per-connection max ORD/IRD (default=32)");
88 static int enable_tcp_timestamps
;
89 module_param(enable_tcp_timestamps
, int, 0644);
90 MODULE_PARM_DESC(enable_tcp_timestamps
, "Enable tcp timestamps (default=0)");
92 static int enable_tcp_sack
;
93 module_param(enable_tcp_sack
, int, 0644);
94 MODULE_PARM_DESC(enable_tcp_sack
, "Enable tcp SACK (default=0)");
96 static int enable_tcp_window_scaling
= 1;
97 module_param(enable_tcp_window_scaling
, int, 0644);
98 MODULE_PARM_DESC(enable_tcp_window_scaling
,
99 "Enable tcp window scaling (default=1)");
102 module_param(c4iw_debug
, int, 0644);
103 MODULE_PARM_DESC(c4iw_debug
, "Enable debug logging (default=0)");
105 static int peer2peer
= 1;
106 module_param(peer2peer
, int, 0644);
107 MODULE_PARM_DESC(peer2peer
, "Support peer2peer ULPs (default=1)");
109 static int p2p_type
= FW_RI_INIT_P2PTYPE_READ_REQ
;
110 module_param(p2p_type
, int, 0644);
111 MODULE_PARM_DESC(p2p_type
, "RDMAP opcode to use for the RTR message: "
112 "1=RDMA_READ 0=RDMA_WRITE (default 1)");
114 static int ep_timeout_secs
= 60;
115 module_param(ep_timeout_secs
, int, 0644);
116 MODULE_PARM_DESC(ep_timeout_secs
, "CM Endpoint operation timeout "
117 "in seconds (default=60)");
119 static int mpa_rev
= 2;
120 module_param(mpa_rev
, int, 0644);
121 MODULE_PARM_DESC(mpa_rev
, "MPA Revision, 0 supports amso1100, "
122 "1 is RFC0544 spec compliant, 2 is IETF MPA Peer Connect Draft"
123 " compliant (default=2)");
125 static int markers_enabled
;
126 module_param(markers_enabled
, int, 0644);
127 MODULE_PARM_DESC(markers_enabled
, "Enable MPA MARKERS (default(0)=disabled)");
129 static int crc_enabled
= 1;
130 module_param(crc_enabled
, int, 0644);
131 MODULE_PARM_DESC(crc_enabled
, "Enable MPA CRC (default(1)=enabled)");
133 static int rcv_win
= 256 * 1024;
134 module_param(rcv_win
, int, 0644);
135 MODULE_PARM_DESC(rcv_win
, "TCP receive window in bytes (default=256KB)");
137 static int snd_win
= 128 * 1024;
138 module_param(snd_win
, int, 0644);
139 MODULE_PARM_DESC(snd_win
, "TCP send window in bytes (default=128KB)");
141 static struct workqueue_struct
*workq
;
143 static struct sk_buff_head rxq
;
145 static struct sk_buff
*get_skb(struct sk_buff
*skb
, int len
, gfp_t gfp
);
146 static void ep_timeout(unsigned long arg
);
147 static void connect_reply_upcall(struct c4iw_ep
*ep
, int status
);
149 static LIST_HEAD(timeout_list
);
150 static spinlock_t timeout_lock
;
152 static void deref_qp(struct c4iw_ep
*ep
)
154 c4iw_qp_rem_ref(&ep
->com
.qp
->ibqp
);
155 clear_bit(QP_REFERENCED
, &ep
->com
.flags
);
158 static void ref_qp(struct c4iw_ep
*ep
)
160 set_bit(QP_REFERENCED
, &ep
->com
.flags
);
161 c4iw_qp_add_ref(&ep
->com
.qp
->ibqp
);
164 static void start_ep_timer(struct c4iw_ep
*ep
)
166 PDBG("%s ep %p\n", __func__
, ep
);
167 if (timer_pending(&ep
->timer
)) {
168 pr_err("%s timer already started! ep %p\n",
172 clear_bit(TIMEOUT
, &ep
->com
.flags
);
173 c4iw_get_ep(&ep
->com
);
174 ep
->timer
.expires
= jiffies
+ ep_timeout_secs
* HZ
;
175 ep
->timer
.data
= (unsigned long)ep
;
176 ep
->timer
.function
= ep_timeout
;
177 add_timer(&ep
->timer
);
180 static int stop_ep_timer(struct c4iw_ep
*ep
)
182 PDBG("%s ep %p stopping\n", __func__
, ep
);
183 del_timer_sync(&ep
->timer
);
184 if (!test_and_set_bit(TIMEOUT
, &ep
->com
.flags
)) {
185 c4iw_put_ep(&ep
->com
);
191 static int c4iw_l2t_send(struct c4iw_rdev
*rdev
, struct sk_buff
*skb
,
192 struct l2t_entry
*l2e
)
196 if (c4iw_fatal_error(rdev
)) {
198 PDBG("%s - device in error state - dropping\n", __func__
);
201 error
= cxgb4_l2t_send(rdev
->lldi
.ports
[0], skb
, l2e
);
204 return error
< 0 ? error
: 0;
207 int c4iw_ofld_send(struct c4iw_rdev
*rdev
, struct sk_buff
*skb
)
211 if (c4iw_fatal_error(rdev
)) {
213 PDBG("%s - device in error state - dropping\n", __func__
);
216 error
= cxgb4_ofld_send(rdev
->lldi
.ports
[0], skb
);
219 return error
< 0 ? error
: 0;
222 static void release_tid(struct c4iw_rdev
*rdev
, u32 hwtid
, struct sk_buff
*skb
)
224 struct cpl_tid_release
*req
;
226 skb
= get_skb(skb
, sizeof *req
, GFP_KERNEL
);
229 req
= (struct cpl_tid_release
*) skb_put(skb
, sizeof(*req
));
230 INIT_TP_WR(req
, hwtid
);
231 OPCODE_TID(req
) = cpu_to_be32(MK_OPCODE_TID(CPL_TID_RELEASE
, hwtid
));
232 set_wr_txq(skb
, CPL_PRIORITY_SETUP
, 0);
233 c4iw_ofld_send(rdev
, skb
);
237 static void set_emss(struct c4iw_ep
*ep
, u16 opt
)
239 ep
->emss
= ep
->com
.dev
->rdev
.lldi
.mtus
[TCPOPT_MSS_G(opt
)] -
240 ((AF_INET
== ep
->com
.remote_addr
.ss_family
) ?
241 sizeof(struct iphdr
) : sizeof(struct ipv6hdr
)) -
242 sizeof(struct tcphdr
);
244 if (TCPOPT_TSTAMP_G(opt
))
245 ep
->emss
-= round_up(TCPOLEN_TIMESTAMP
, 4);
249 PDBG("Warning: misaligned mtu idx %u mss %u emss=%u\n",
250 TCPOPT_MSS_G(opt
), ep
->mss
, ep
->emss
);
251 PDBG("%s mss_idx %u mss %u emss=%u\n", __func__
, TCPOPT_MSS_G(opt
),
255 static enum c4iw_ep_state
state_read(struct c4iw_ep_common
*epc
)
257 enum c4iw_ep_state state
;
259 mutex_lock(&epc
->mutex
);
261 mutex_unlock(&epc
->mutex
);
265 static void __state_set(struct c4iw_ep_common
*epc
, enum c4iw_ep_state
new)
270 static void state_set(struct c4iw_ep_common
*epc
, enum c4iw_ep_state
new)
272 mutex_lock(&epc
->mutex
);
273 PDBG("%s - %s -> %s\n", __func__
, states
[epc
->state
], states
[new]);
274 __state_set(epc
, new);
275 mutex_unlock(&epc
->mutex
);
279 static void *alloc_ep(int size
, gfp_t gfp
)
281 struct c4iw_ep_common
*epc
;
283 epc
= kzalloc(size
, gfp
);
285 kref_init(&epc
->kref
);
286 mutex_init(&epc
->mutex
);
287 c4iw_init_wr_wait(&epc
->wr_wait
);
289 PDBG("%s alloc ep %p\n", __func__
, epc
);
293 void _c4iw_free_ep(struct kref
*kref
)
297 ep
= container_of(kref
, struct c4iw_ep
, com
.kref
);
298 PDBG("%s ep %p state %s\n", __func__
, ep
, states
[state_read(&ep
->com
)]);
299 if (test_bit(QP_REFERENCED
, &ep
->com
.flags
))
301 if (test_bit(RELEASE_RESOURCES
, &ep
->com
.flags
)) {
302 if (ep
->com
.remote_addr
.ss_family
== AF_INET6
) {
303 struct sockaddr_in6
*sin6
=
304 (struct sockaddr_in6
*)
305 &ep
->com
.mapped_local_addr
;
308 ep
->com
.dev
->rdev
.lldi
.ports
[0],
309 (const u32
*)&sin6
->sin6_addr
.s6_addr
,
312 remove_handle(ep
->com
.dev
, &ep
->com
.dev
->hwtid_idr
, ep
->hwtid
);
313 cxgb4_remove_tid(ep
->com
.dev
->rdev
.lldi
.tids
, 0, ep
->hwtid
);
314 dst_release(ep
->dst
);
315 cxgb4_l2t_release(ep
->l2t
);
317 if (test_bit(RELEASE_MAPINFO
, &ep
->com
.flags
)) {
318 print_addr(&ep
->com
, __func__
, "remove_mapinfo/mapping");
319 iwpm_remove_mapinfo(&ep
->com
.local_addr
,
320 &ep
->com
.mapped_local_addr
);
321 iwpm_remove_mapping(&ep
->com
.local_addr
, RDMA_NL_C4IW
);
326 static void release_ep_resources(struct c4iw_ep
*ep
)
328 set_bit(RELEASE_RESOURCES
, &ep
->com
.flags
);
329 c4iw_put_ep(&ep
->com
);
332 static int status2errno(int status
)
337 case CPL_ERR_CONN_RESET
:
339 case CPL_ERR_ARP_MISS
:
340 return -EHOSTUNREACH
;
341 case CPL_ERR_CONN_TIMEDOUT
:
343 case CPL_ERR_TCAM_FULL
:
345 case CPL_ERR_CONN_EXIST
:
353 * Try and reuse skbs already allocated...
355 static struct sk_buff
*get_skb(struct sk_buff
*skb
, int len
, gfp_t gfp
)
357 if (skb
&& !skb_is_nonlinear(skb
) && !skb_cloned(skb
)) {
360 skb_reset_transport_header(skb
);
362 skb
= alloc_skb(len
, gfp
);
364 t4_set_arp_err_handler(skb
, NULL
, NULL
);
368 static struct net_device
*get_real_dev(struct net_device
*egress_dev
)
370 return rdma_vlan_dev_real_dev(egress_dev
) ? : egress_dev
;
373 static int our_interface(struct c4iw_dev
*dev
, struct net_device
*egress_dev
)
377 egress_dev
= get_real_dev(egress_dev
);
378 for (i
= 0; i
< dev
->rdev
.lldi
.nports
; i
++)
379 if (dev
->rdev
.lldi
.ports
[i
] == egress_dev
)
384 static struct dst_entry
*find_route6(struct c4iw_dev
*dev
, __u8
*local_ip
,
385 __u8
*peer_ip
, __be16 local_port
,
386 __be16 peer_port
, u8 tos
,
389 struct dst_entry
*dst
= NULL
;
391 if (IS_ENABLED(CONFIG_IPV6
)) {
394 memset(&fl6
, 0, sizeof(fl6
));
395 memcpy(&fl6
.daddr
, peer_ip
, 16);
396 memcpy(&fl6
.saddr
, local_ip
, 16);
397 if (ipv6_addr_type(&fl6
.daddr
) & IPV6_ADDR_LINKLOCAL
)
398 fl6
.flowi6_oif
= sin6_scope_id
;
399 dst
= ip6_route_output(&init_net
, NULL
, &fl6
);
402 if (!our_interface(dev
, ip6_dst_idev(dst
)->dev
) &&
403 !(ip6_dst_idev(dst
)->dev
->flags
& IFF_LOOPBACK
)) {
413 static struct dst_entry
*find_route(struct c4iw_dev
*dev
, __be32 local_ip
,
414 __be32 peer_ip
, __be16 local_port
,
415 __be16 peer_port
, u8 tos
)
421 rt
= ip_route_output_ports(&init_net
, &fl4
, NULL
, peer_ip
, local_ip
,
422 peer_port
, local_port
, IPPROTO_TCP
,
426 n
= dst_neigh_lookup(&rt
->dst
, &peer_ip
);
429 if (!our_interface(dev
, n
->dev
) &&
430 !(n
->dev
->flags
& IFF_LOOPBACK
)) {
432 dst_release(&rt
->dst
);
439 static void arp_failure_discard(void *handle
, struct sk_buff
*skb
)
441 PDBG("%s c4iw_dev %p\n", __func__
, handle
);
446 * Handle an ARP failure for an active open.
448 static void act_open_req_arp_failure(void *handle
, struct sk_buff
*skb
)
450 struct c4iw_ep
*ep
= handle
;
452 printk(KERN_ERR MOD
"ARP failure duing connect\n");
454 connect_reply_upcall(ep
, -EHOSTUNREACH
);
455 state_set(&ep
->com
, DEAD
);
456 if (ep
->com
.remote_addr
.ss_family
== AF_INET6
) {
457 struct sockaddr_in6
*sin6
=
458 (struct sockaddr_in6
*)&ep
->com
.mapped_local_addr
;
459 cxgb4_clip_release(ep
->com
.dev
->rdev
.lldi
.ports
[0],
460 (const u32
*)&sin6
->sin6_addr
.s6_addr
, 1);
462 remove_handle(ep
->com
.dev
, &ep
->com
.dev
->atid_idr
, ep
->atid
);
463 cxgb4_free_atid(ep
->com
.dev
->rdev
.lldi
.tids
, ep
->atid
);
464 dst_release(ep
->dst
);
465 cxgb4_l2t_release(ep
->l2t
);
466 c4iw_put_ep(&ep
->com
);
470 * Handle an ARP failure for a CPL_ABORT_REQ. Change it into a no RST variant
473 static void abort_arp_failure(void *handle
, struct sk_buff
*skb
)
475 struct c4iw_rdev
*rdev
= handle
;
476 struct cpl_abort_req
*req
= cplhdr(skb
);
478 PDBG("%s rdev %p\n", __func__
, rdev
);
479 req
->cmd
= CPL_ABORT_NO_RST
;
480 c4iw_ofld_send(rdev
, skb
);
483 static void send_flowc(struct c4iw_ep
*ep
, struct sk_buff
*skb
)
485 unsigned int flowclen
= 80;
486 struct fw_flowc_wr
*flowc
;
489 skb
= get_skb(skb
, flowclen
, GFP_KERNEL
);
490 flowc
= (struct fw_flowc_wr
*)__skb_put(skb
, flowclen
);
492 flowc
->op_to_nparams
= cpu_to_be32(FW_WR_OP_V(FW_FLOWC_WR
) |
493 FW_FLOWC_WR_NPARAMS_V(8));
494 flowc
->flowid_len16
= cpu_to_be32(FW_WR_LEN16_V(DIV_ROUND_UP(flowclen
,
495 16)) | FW_WR_FLOWID_V(ep
->hwtid
));
497 flowc
->mnemval
[0].mnemonic
= FW_FLOWC_MNEM_PFNVFN
;
498 flowc
->mnemval
[0].val
= cpu_to_be32(FW_PFVF_CMD_PFN_V
499 (ep
->com
.dev
->rdev
.lldi
.pf
));
500 flowc
->mnemval
[1].mnemonic
= FW_FLOWC_MNEM_CH
;
501 flowc
->mnemval
[1].val
= cpu_to_be32(ep
->tx_chan
);
502 flowc
->mnemval
[2].mnemonic
= FW_FLOWC_MNEM_PORT
;
503 flowc
->mnemval
[2].val
= cpu_to_be32(ep
->tx_chan
);
504 flowc
->mnemval
[3].mnemonic
= FW_FLOWC_MNEM_IQID
;
505 flowc
->mnemval
[3].val
= cpu_to_be32(ep
->rss_qid
);
506 flowc
->mnemval
[4].mnemonic
= FW_FLOWC_MNEM_SNDNXT
;
507 flowc
->mnemval
[4].val
= cpu_to_be32(ep
->snd_seq
);
508 flowc
->mnemval
[5].mnemonic
= FW_FLOWC_MNEM_RCVNXT
;
509 flowc
->mnemval
[5].val
= cpu_to_be32(ep
->rcv_seq
);
510 flowc
->mnemval
[6].mnemonic
= FW_FLOWC_MNEM_SNDBUF
;
511 flowc
->mnemval
[6].val
= cpu_to_be32(ep
->snd_win
);
512 flowc
->mnemval
[7].mnemonic
= FW_FLOWC_MNEM_MSS
;
513 flowc
->mnemval
[7].val
= cpu_to_be32(ep
->emss
);
514 /* Pad WR to 16 byte boundary */
515 flowc
->mnemval
[8].mnemonic
= 0;
516 flowc
->mnemval
[8].val
= 0;
517 for (i
= 0; i
< 9; i
++) {
518 flowc
->mnemval
[i
].r4
[0] = 0;
519 flowc
->mnemval
[i
].r4
[1] = 0;
520 flowc
->mnemval
[i
].r4
[2] = 0;
523 set_wr_txq(skb
, CPL_PRIORITY_DATA
, ep
->txq_idx
);
524 c4iw_ofld_send(&ep
->com
.dev
->rdev
, skb
);
527 static int send_halfclose(struct c4iw_ep
*ep
, gfp_t gfp
)
529 struct cpl_close_con_req
*req
;
531 int wrlen
= roundup(sizeof *req
, 16);
533 PDBG("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
534 skb
= get_skb(NULL
, wrlen
, gfp
);
536 printk(KERN_ERR MOD
"%s - failed to alloc skb\n", __func__
);
539 set_wr_txq(skb
, CPL_PRIORITY_DATA
, ep
->txq_idx
);
540 t4_set_arp_err_handler(skb
, NULL
, arp_failure_discard
);
541 req
= (struct cpl_close_con_req
*) skb_put(skb
, wrlen
);
542 memset(req
, 0, wrlen
);
543 INIT_TP_WR(req
, ep
->hwtid
);
544 OPCODE_TID(req
) = cpu_to_be32(MK_OPCODE_TID(CPL_CLOSE_CON_REQ
,
546 return c4iw_l2t_send(&ep
->com
.dev
->rdev
, skb
, ep
->l2t
);
549 static int send_abort(struct c4iw_ep
*ep
, struct sk_buff
*skb
, gfp_t gfp
)
551 struct cpl_abort_req
*req
;
552 int wrlen
= roundup(sizeof *req
, 16);
554 PDBG("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
555 skb
= get_skb(skb
, wrlen
, gfp
);
557 printk(KERN_ERR MOD
"%s - failed to alloc skb.\n",
561 set_wr_txq(skb
, CPL_PRIORITY_DATA
, ep
->txq_idx
);
562 t4_set_arp_err_handler(skb
, &ep
->com
.dev
->rdev
, abort_arp_failure
);
563 req
= (struct cpl_abort_req
*) skb_put(skb
, wrlen
);
564 memset(req
, 0, wrlen
);
565 INIT_TP_WR(req
, ep
->hwtid
);
566 OPCODE_TID(req
) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_REQ
, ep
->hwtid
));
567 req
->cmd
= CPL_ABORT_SEND_RST
;
568 return c4iw_l2t_send(&ep
->com
.dev
->rdev
, skb
, ep
->l2t
);
572 * c4iw_form_pm_msg - Form a port mapper message with mapping info
574 static void c4iw_form_pm_msg(struct c4iw_ep
*ep
,
575 struct iwpm_sa_data
*pm_msg
)
577 memcpy(&pm_msg
->loc_addr
, &ep
->com
.local_addr
,
578 sizeof(ep
->com
.local_addr
));
579 memcpy(&pm_msg
->rem_addr
, &ep
->com
.remote_addr
,
580 sizeof(ep
->com
.remote_addr
));
584 * c4iw_form_reg_msg - Form a port mapper message with dev info
586 static void c4iw_form_reg_msg(struct c4iw_dev
*dev
,
587 struct iwpm_dev_data
*pm_msg
)
589 memcpy(pm_msg
->dev_name
, dev
->ibdev
.name
, IWPM_DEVNAME_SIZE
);
590 memcpy(pm_msg
->if_name
, dev
->rdev
.lldi
.ports
[0]->name
,
594 static void c4iw_record_pm_msg(struct c4iw_ep
*ep
,
595 struct iwpm_sa_data
*pm_msg
)
597 memcpy(&ep
->com
.mapped_local_addr
, &pm_msg
->mapped_loc_addr
,
598 sizeof(ep
->com
.mapped_local_addr
));
599 memcpy(&ep
->com
.mapped_remote_addr
, &pm_msg
->mapped_rem_addr
,
600 sizeof(ep
->com
.mapped_remote_addr
));
603 static int get_remote_addr(struct c4iw_ep
*parent_ep
, struct c4iw_ep
*child_ep
)
607 print_addr(&parent_ep
->com
, __func__
, "get_remote_addr parent_ep ");
608 print_addr(&child_ep
->com
, __func__
, "get_remote_addr child_ep ");
610 ret
= iwpm_get_remote_info(&parent_ep
->com
.mapped_local_addr
,
611 &child_ep
->com
.mapped_remote_addr
,
612 &child_ep
->com
.remote_addr
, RDMA_NL_C4IW
);
614 PDBG("Unable to find remote peer addr info - err %d\n", ret
);
619 static void best_mtu(const unsigned short *mtus
, unsigned short mtu
,
620 unsigned int *idx
, int use_ts
, int ipv6
)
622 unsigned short hdr_size
= (ipv6
?
623 sizeof(struct ipv6hdr
) :
624 sizeof(struct iphdr
)) +
625 sizeof(struct tcphdr
) +
627 round_up(TCPOLEN_TIMESTAMP
, 4) : 0);
628 unsigned short data_size
= mtu
- hdr_size
;
630 cxgb4_best_aligned_mtu(mtus
, hdr_size
, data_size
, 8, idx
);
633 static int send_connect(struct c4iw_ep
*ep
)
635 struct cpl_act_open_req
*req
= NULL
;
636 struct cpl_t5_act_open_req
*t5req
= NULL
;
637 struct cpl_t6_act_open_req
*t6req
= NULL
;
638 struct cpl_act_open_req6
*req6
= NULL
;
639 struct cpl_t5_act_open_req6
*t5req6
= NULL
;
640 struct cpl_t6_act_open_req6
*t6req6
= NULL
;
644 unsigned int mtu_idx
;
646 int win
, sizev4
, sizev6
, wrlen
;
647 struct sockaddr_in
*la
= (struct sockaddr_in
*)
648 &ep
->com
.mapped_local_addr
;
649 struct sockaddr_in
*ra
= (struct sockaddr_in
*)
650 &ep
->com
.mapped_remote_addr
;
651 struct sockaddr_in6
*la6
= (struct sockaddr_in6
*)
652 &ep
->com
.mapped_local_addr
;
653 struct sockaddr_in6
*ra6
= (struct sockaddr_in6
*)
654 &ep
->com
.mapped_remote_addr
;
656 enum chip_type adapter_type
= ep
->com
.dev
->rdev
.lldi
.adapter_type
;
657 u32 isn
= (prandom_u32() & ~7UL) - 1;
659 switch (CHELSIO_CHIP_VERSION(adapter_type
)) {
661 sizev4
= sizeof(struct cpl_act_open_req
);
662 sizev6
= sizeof(struct cpl_act_open_req6
);
665 sizev4
= sizeof(struct cpl_t5_act_open_req
);
666 sizev6
= sizeof(struct cpl_t5_act_open_req6
);
669 sizev4
= sizeof(struct cpl_t6_act_open_req
);
670 sizev6
= sizeof(struct cpl_t6_act_open_req6
);
673 pr_err("T%d Chip is not supported\n",
674 CHELSIO_CHIP_VERSION(adapter_type
));
678 wrlen
= (ep
->com
.remote_addr
.ss_family
== AF_INET
) ?
679 roundup(sizev4
, 16) :
682 PDBG("%s ep %p atid %u\n", __func__
, ep
, ep
->atid
);
684 skb
= get_skb(NULL
, wrlen
, GFP_KERNEL
);
686 printk(KERN_ERR MOD
"%s - failed to alloc skb.\n",
690 set_wr_txq(skb
, CPL_PRIORITY_SETUP
, ep
->ctrlq_idx
);
692 best_mtu(ep
->com
.dev
->rdev
.lldi
.mtus
, ep
->mtu
, &mtu_idx
,
693 enable_tcp_timestamps
,
694 (AF_INET
== ep
->com
.remote_addr
.ss_family
) ? 0 : 1);
695 wscale
= compute_wscale(rcv_win
);
698 * Specify the largest window that will fit in opt0. The
699 * remainder will be specified in the rx_data_ack.
701 win
= ep
->rcv_win
>> 10;
702 if (win
> RCV_BUFSIZ_M
)
705 opt0
= (nocong
? NO_CONG_F
: 0) |
708 WND_SCALE_V(wscale
) |
710 L2T_IDX_V(ep
->l2t
->idx
) |
711 TX_CHAN_V(ep
->tx_chan
) |
712 SMAC_SEL_V(ep
->smac_idx
) |
714 ULP_MODE_V(ULP_MODE_TCPDDP
) |
716 opt2
= RX_CHANNEL_V(0) |
717 CCTRL_ECN_V(enable_ecn
) |
718 RSS_QUEUE_VALID_F
| RSS_QUEUE_V(ep
->rss_qid
);
719 if (enable_tcp_timestamps
)
720 opt2
|= TSTAMPS_EN_F
;
723 if (wscale
&& enable_tcp_window_scaling
)
724 opt2
|= WND_SCALE_EN_F
;
725 if (CHELSIO_CHIP_VERSION(adapter_type
) > CHELSIO_T4
) {
729 opt2
|= T5_OPT_2_VALID_F
;
730 opt2
|= CONG_CNTRL_V(CONG_ALG_TAHOE
);
734 if (ep
->com
.remote_addr
.ss_family
== AF_INET6
)
735 cxgb4_clip_get(ep
->com
.dev
->rdev
.lldi
.ports
[0],
736 (const u32
*)&la6
->sin6_addr
.s6_addr
, 1);
738 t4_set_arp_err_handler(skb
, ep
, act_open_req_arp_failure
);
740 if (ep
->com
.remote_addr
.ss_family
== AF_INET
) {
741 switch (CHELSIO_CHIP_VERSION(adapter_type
)) {
743 req
= (struct cpl_act_open_req
*)skb_put(skb
, wrlen
);
747 t5req
= (struct cpl_t5_act_open_req
*)skb_put(skb
,
749 INIT_TP_WR(t5req
, 0);
750 req
= (struct cpl_act_open_req
*)t5req
;
753 t6req
= (struct cpl_t6_act_open_req
*)skb_put(skb
,
755 INIT_TP_WR(t6req
, 0);
756 req
= (struct cpl_act_open_req
*)t6req
;
757 t5req
= (struct cpl_t5_act_open_req
*)t6req
;
760 pr_err("T%d Chip is not supported\n",
761 CHELSIO_CHIP_VERSION(adapter_type
));
766 OPCODE_TID(req
) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ
,
767 ((ep
->rss_qid
<<14) | ep
->atid
)));
768 req
->local_port
= la
->sin_port
;
769 req
->peer_port
= ra
->sin_port
;
770 req
->local_ip
= la
->sin_addr
.s_addr
;
771 req
->peer_ip
= ra
->sin_addr
.s_addr
;
772 req
->opt0
= cpu_to_be64(opt0
);
774 if (is_t4(ep
->com
.dev
->rdev
.lldi
.adapter_type
)) {
775 req
->params
= cpu_to_be32(cxgb4_select_ntuple(
776 ep
->com
.dev
->rdev
.lldi
.ports
[0],
778 req
->opt2
= cpu_to_be32(opt2
);
780 t5req
->params
= cpu_to_be64(FILTER_TUPLE_V(
782 ep
->com
.dev
->rdev
.lldi
.ports
[0],
784 t5req
->rsvd
= cpu_to_be32(isn
);
785 PDBG("%s snd_isn %u\n", __func__
, t5req
->rsvd
);
786 t5req
->opt2
= cpu_to_be32(opt2
);
789 switch (CHELSIO_CHIP_VERSION(adapter_type
)) {
791 req6
= (struct cpl_act_open_req6
*)skb_put(skb
, wrlen
);
795 t5req6
= (struct cpl_t5_act_open_req6
*)skb_put(skb
,
797 INIT_TP_WR(t5req6
, 0);
798 req6
= (struct cpl_act_open_req6
*)t5req6
;
801 t6req6
= (struct cpl_t6_act_open_req6
*)skb_put(skb
,
803 INIT_TP_WR(t6req6
, 0);
804 req6
= (struct cpl_act_open_req6
*)t6req6
;
805 t5req6
= (struct cpl_t5_act_open_req6
*)t6req6
;
808 pr_err("T%d Chip is not supported\n",
809 CHELSIO_CHIP_VERSION(adapter_type
));
814 OPCODE_TID(req6
) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ6
,
815 ((ep
->rss_qid
<<14)|ep
->atid
)));
816 req6
->local_port
= la6
->sin6_port
;
817 req6
->peer_port
= ra6
->sin6_port
;
818 req6
->local_ip_hi
= *((__be64
*)(la6
->sin6_addr
.s6_addr
));
819 req6
->local_ip_lo
= *((__be64
*)(la6
->sin6_addr
.s6_addr
+ 8));
820 req6
->peer_ip_hi
= *((__be64
*)(ra6
->sin6_addr
.s6_addr
));
821 req6
->peer_ip_lo
= *((__be64
*)(ra6
->sin6_addr
.s6_addr
+ 8));
822 req6
->opt0
= cpu_to_be64(opt0
);
824 if (is_t4(ep
->com
.dev
->rdev
.lldi
.adapter_type
)) {
825 req6
->params
= cpu_to_be32(cxgb4_select_ntuple(
826 ep
->com
.dev
->rdev
.lldi
.ports
[0],
828 req6
->opt2
= cpu_to_be32(opt2
);
830 t5req6
->params
= cpu_to_be64(FILTER_TUPLE_V(
832 ep
->com
.dev
->rdev
.lldi
.ports
[0],
834 t5req6
->rsvd
= cpu_to_be32(isn
);
835 PDBG("%s snd_isn %u\n", __func__
, t5req6
->rsvd
);
836 t5req6
->opt2
= cpu_to_be32(opt2
);
840 set_bit(ACT_OPEN_REQ
, &ep
->com
.history
);
841 ret
= c4iw_l2t_send(&ep
->com
.dev
->rdev
, skb
, ep
->l2t
);
843 if (ret
&& ep
->com
.remote_addr
.ss_family
== AF_INET6
)
844 cxgb4_clip_release(ep
->com
.dev
->rdev
.lldi
.ports
[0],
845 (const u32
*)&la6
->sin6_addr
.s6_addr
, 1);
849 static void send_mpa_req(struct c4iw_ep
*ep
, struct sk_buff
*skb
,
853 struct fw_ofld_tx_data_wr
*req
;
854 struct mpa_message
*mpa
;
855 struct mpa_v2_conn_params mpa_v2_params
;
857 PDBG("%s ep %p tid %u pd_len %d\n", __func__
, ep
, ep
->hwtid
, ep
->plen
);
859 BUG_ON(skb_cloned(skb
));
861 mpalen
= sizeof(*mpa
) + ep
->plen
;
862 if (mpa_rev_to_use
== 2)
863 mpalen
+= sizeof(struct mpa_v2_conn_params
);
864 wrlen
= roundup(mpalen
+ sizeof *req
, 16);
865 skb
= get_skb(skb
, wrlen
, GFP_KERNEL
);
867 connect_reply_upcall(ep
, -ENOMEM
);
870 set_wr_txq(skb
, CPL_PRIORITY_DATA
, ep
->txq_idx
);
872 req
= (struct fw_ofld_tx_data_wr
*)skb_put(skb
, wrlen
);
873 memset(req
, 0, wrlen
);
874 req
->op_to_immdlen
= cpu_to_be32(
875 FW_WR_OP_V(FW_OFLD_TX_DATA_WR
) |
877 FW_WR_IMMDLEN_V(mpalen
));
878 req
->flowid_len16
= cpu_to_be32(
879 FW_WR_FLOWID_V(ep
->hwtid
) |
880 FW_WR_LEN16_V(wrlen
>> 4));
881 req
->plen
= cpu_to_be32(mpalen
);
882 req
->tunnel_to_proxy
= cpu_to_be32(
883 FW_OFLD_TX_DATA_WR_FLUSH_F
|
884 FW_OFLD_TX_DATA_WR_SHOVE_F
);
886 mpa
= (struct mpa_message
*)(req
+ 1);
887 memcpy(mpa
->key
, MPA_KEY_REQ
, sizeof(mpa
->key
));
888 mpa
->flags
= (crc_enabled
? MPA_CRC
: 0) |
889 (markers_enabled
? MPA_MARKERS
: 0) |
890 (mpa_rev_to_use
== 2 ? MPA_ENHANCED_RDMA_CONN
: 0);
891 mpa
->private_data_size
= htons(ep
->plen
);
892 mpa
->revision
= mpa_rev_to_use
;
893 if (mpa_rev_to_use
== 1) {
894 ep
->tried_with_mpa_v1
= 1;
895 ep
->retry_with_mpa_v1
= 0;
898 if (mpa_rev_to_use
== 2) {
899 mpa
->private_data_size
= htons(ntohs(mpa
->private_data_size
) +
900 sizeof (struct mpa_v2_conn_params
));
901 PDBG("%s initiator ird %u ord %u\n", __func__
, ep
->ird
,
903 mpa_v2_params
.ird
= htons((u16
)ep
->ird
);
904 mpa_v2_params
.ord
= htons((u16
)ep
->ord
);
907 mpa_v2_params
.ird
|= htons(MPA_V2_PEER2PEER_MODEL
);
908 if (p2p_type
== FW_RI_INIT_P2PTYPE_RDMA_WRITE
)
910 htons(MPA_V2_RDMA_WRITE_RTR
);
911 else if (p2p_type
== FW_RI_INIT_P2PTYPE_READ_REQ
)
913 htons(MPA_V2_RDMA_READ_RTR
);
915 memcpy(mpa
->private_data
, &mpa_v2_params
,
916 sizeof(struct mpa_v2_conn_params
));
919 memcpy(mpa
->private_data
+
920 sizeof(struct mpa_v2_conn_params
),
921 ep
->mpa_pkt
+ sizeof(*mpa
), ep
->plen
);
924 memcpy(mpa
->private_data
,
925 ep
->mpa_pkt
+ sizeof(*mpa
), ep
->plen
);
928 * Reference the mpa skb. This ensures the data area
929 * will remain in memory until the hw acks the tx.
930 * Function fw4_ack() will deref it.
933 t4_set_arp_err_handler(skb
, NULL
, arp_failure_discard
);
936 c4iw_l2t_send(&ep
->com
.dev
->rdev
, skb
, ep
->l2t
);
938 __state_set(&ep
->com
, MPA_REQ_SENT
);
939 ep
->mpa_attr
.initiator
= 1;
940 ep
->snd_seq
+= mpalen
;
944 static int send_mpa_reject(struct c4iw_ep
*ep
, const void *pdata
, u8 plen
)
947 struct fw_ofld_tx_data_wr
*req
;
948 struct mpa_message
*mpa
;
950 struct mpa_v2_conn_params mpa_v2_params
;
952 PDBG("%s ep %p tid %u pd_len %d\n", __func__
, ep
, ep
->hwtid
, ep
->plen
);
954 mpalen
= sizeof(*mpa
) + plen
;
955 if (ep
->mpa_attr
.version
== 2 && ep
->mpa_attr
.enhanced_rdma_conn
)
956 mpalen
+= sizeof(struct mpa_v2_conn_params
);
957 wrlen
= roundup(mpalen
+ sizeof *req
, 16);
959 skb
= get_skb(NULL
, wrlen
, GFP_KERNEL
);
961 printk(KERN_ERR MOD
"%s - cannot alloc skb!\n", __func__
);
964 set_wr_txq(skb
, CPL_PRIORITY_DATA
, ep
->txq_idx
);
966 req
= (struct fw_ofld_tx_data_wr
*)skb_put(skb
, wrlen
);
967 memset(req
, 0, wrlen
);
968 req
->op_to_immdlen
= cpu_to_be32(
969 FW_WR_OP_V(FW_OFLD_TX_DATA_WR
) |
971 FW_WR_IMMDLEN_V(mpalen
));
972 req
->flowid_len16
= cpu_to_be32(
973 FW_WR_FLOWID_V(ep
->hwtid
) |
974 FW_WR_LEN16_V(wrlen
>> 4));
975 req
->plen
= cpu_to_be32(mpalen
);
976 req
->tunnel_to_proxy
= cpu_to_be32(
977 FW_OFLD_TX_DATA_WR_FLUSH_F
|
978 FW_OFLD_TX_DATA_WR_SHOVE_F
);
980 mpa
= (struct mpa_message
*)(req
+ 1);
981 memset(mpa
, 0, sizeof(*mpa
));
982 memcpy(mpa
->key
, MPA_KEY_REP
, sizeof(mpa
->key
));
983 mpa
->flags
= MPA_REJECT
;
984 mpa
->revision
= ep
->mpa_attr
.version
;
985 mpa
->private_data_size
= htons(plen
);
987 if (ep
->mpa_attr
.version
== 2 && ep
->mpa_attr
.enhanced_rdma_conn
) {
988 mpa
->flags
|= MPA_ENHANCED_RDMA_CONN
;
989 mpa
->private_data_size
= htons(ntohs(mpa
->private_data_size
) +
990 sizeof (struct mpa_v2_conn_params
));
991 mpa_v2_params
.ird
= htons(((u16
)ep
->ird
) |
992 (peer2peer
? MPA_V2_PEER2PEER_MODEL
:
994 mpa_v2_params
.ord
= htons(((u16
)ep
->ord
) | (peer2peer
?
996 FW_RI_INIT_P2PTYPE_RDMA_WRITE
?
997 MPA_V2_RDMA_WRITE_RTR
: p2p_type
==
998 FW_RI_INIT_P2PTYPE_READ_REQ
?
999 MPA_V2_RDMA_READ_RTR
: 0) : 0));
1000 memcpy(mpa
->private_data
, &mpa_v2_params
,
1001 sizeof(struct mpa_v2_conn_params
));
1004 memcpy(mpa
->private_data
+
1005 sizeof(struct mpa_v2_conn_params
), pdata
, plen
);
1008 memcpy(mpa
->private_data
, pdata
, plen
);
1011 * Reference the mpa skb again. This ensures the data area
1012 * will remain in memory until the hw acks the tx.
1013 * Function fw4_ack() will deref it.
1016 set_wr_txq(skb
, CPL_PRIORITY_DATA
, ep
->txq_idx
);
1017 t4_set_arp_err_handler(skb
, NULL
, arp_failure_discard
);
1018 BUG_ON(ep
->mpa_skb
);
1020 ep
->snd_seq
+= mpalen
;
1021 return c4iw_l2t_send(&ep
->com
.dev
->rdev
, skb
, ep
->l2t
);
1024 static int send_mpa_reply(struct c4iw_ep
*ep
, const void *pdata
, u8 plen
)
1027 struct fw_ofld_tx_data_wr
*req
;
1028 struct mpa_message
*mpa
;
1029 struct sk_buff
*skb
;
1030 struct mpa_v2_conn_params mpa_v2_params
;
1032 PDBG("%s ep %p tid %u pd_len %d\n", __func__
, ep
, ep
->hwtid
, ep
->plen
);
1034 mpalen
= sizeof(*mpa
) + plen
;
1035 if (ep
->mpa_attr
.version
== 2 && ep
->mpa_attr
.enhanced_rdma_conn
)
1036 mpalen
+= sizeof(struct mpa_v2_conn_params
);
1037 wrlen
= roundup(mpalen
+ sizeof *req
, 16);
1039 skb
= get_skb(NULL
, wrlen
, GFP_KERNEL
);
1041 printk(KERN_ERR MOD
"%s - cannot alloc skb!\n", __func__
);
1044 set_wr_txq(skb
, CPL_PRIORITY_DATA
, ep
->txq_idx
);
1046 req
= (struct fw_ofld_tx_data_wr
*) skb_put(skb
, wrlen
);
1047 memset(req
, 0, wrlen
);
1048 req
->op_to_immdlen
= cpu_to_be32(
1049 FW_WR_OP_V(FW_OFLD_TX_DATA_WR
) |
1051 FW_WR_IMMDLEN_V(mpalen
));
1052 req
->flowid_len16
= cpu_to_be32(
1053 FW_WR_FLOWID_V(ep
->hwtid
) |
1054 FW_WR_LEN16_V(wrlen
>> 4));
1055 req
->plen
= cpu_to_be32(mpalen
);
1056 req
->tunnel_to_proxy
= cpu_to_be32(
1057 FW_OFLD_TX_DATA_WR_FLUSH_F
|
1058 FW_OFLD_TX_DATA_WR_SHOVE_F
);
1060 mpa
= (struct mpa_message
*)(req
+ 1);
1061 memset(mpa
, 0, sizeof(*mpa
));
1062 memcpy(mpa
->key
, MPA_KEY_REP
, sizeof(mpa
->key
));
1063 mpa
->flags
= (ep
->mpa_attr
.crc_enabled
? MPA_CRC
: 0) |
1064 (markers_enabled
? MPA_MARKERS
: 0);
1065 mpa
->revision
= ep
->mpa_attr
.version
;
1066 mpa
->private_data_size
= htons(plen
);
1068 if (ep
->mpa_attr
.version
== 2 && ep
->mpa_attr
.enhanced_rdma_conn
) {
1069 mpa
->flags
|= MPA_ENHANCED_RDMA_CONN
;
1070 mpa
->private_data_size
= htons(ntohs(mpa
->private_data_size
) +
1071 sizeof (struct mpa_v2_conn_params
));
1072 mpa_v2_params
.ird
= htons((u16
)ep
->ird
);
1073 mpa_v2_params
.ord
= htons((u16
)ep
->ord
);
1074 if (peer2peer
&& (ep
->mpa_attr
.p2p_type
!=
1075 FW_RI_INIT_P2PTYPE_DISABLED
)) {
1076 mpa_v2_params
.ird
|= htons(MPA_V2_PEER2PEER_MODEL
);
1078 if (p2p_type
== FW_RI_INIT_P2PTYPE_RDMA_WRITE
)
1079 mpa_v2_params
.ord
|=
1080 htons(MPA_V2_RDMA_WRITE_RTR
);
1081 else if (p2p_type
== FW_RI_INIT_P2PTYPE_READ_REQ
)
1082 mpa_v2_params
.ord
|=
1083 htons(MPA_V2_RDMA_READ_RTR
);
1086 memcpy(mpa
->private_data
, &mpa_v2_params
,
1087 sizeof(struct mpa_v2_conn_params
));
1090 memcpy(mpa
->private_data
+
1091 sizeof(struct mpa_v2_conn_params
), pdata
, plen
);
1094 memcpy(mpa
->private_data
, pdata
, plen
);
1097 * Reference the mpa skb. This ensures the data area
1098 * will remain in memory until the hw acks the tx.
1099 * Function fw4_ack() will deref it.
1102 t4_set_arp_err_handler(skb
, NULL
, arp_failure_discard
);
1104 __state_set(&ep
->com
, MPA_REP_SENT
);
1105 ep
->snd_seq
+= mpalen
;
1106 return c4iw_l2t_send(&ep
->com
.dev
->rdev
, skb
, ep
->l2t
);
1109 static int act_establish(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
1112 struct cpl_act_establish
*req
= cplhdr(skb
);
1113 unsigned int tid
= GET_TID(req
);
1114 unsigned int atid
= TID_TID_G(ntohl(req
->tos_atid
));
1115 struct tid_info
*t
= dev
->rdev
.lldi
.tids
;
1117 ep
= lookup_atid(t
, atid
);
1119 PDBG("%s ep %p tid %u snd_isn %u rcv_isn %u\n", __func__
, ep
, tid
,
1120 be32_to_cpu(req
->snd_isn
), be32_to_cpu(req
->rcv_isn
));
1122 mutex_lock(&ep
->com
.mutex
);
1123 dst_confirm(ep
->dst
);
1125 /* setup the hwtid for this connection */
1127 cxgb4_insert_tid(t
, ep
, tid
);
1128 insert_handle(dev
, &dev
->hwtid_idr
, ep
, ep
->hwtid
);
1130 ep
->snd_seq
= be32_to_cpu(req
->snd_isn
);
1131 ep
->rcv_seq
= be32_to_cpu(req
->rcv_isn
);
1133 set_emss(ep
, ntohs(req
->tcp_opt
));
1135 /* dealloc the atid */
1136 remove_handle(ep
->com
.dev
, &ep
->com
.dev
->atid_idr
, atid
);
1137 cxgb4_free_atid(t
, atid
);
1138 set_bit(ACT_ESTAB
, &ep
->com
.history
);
1140 /* start MPA negotiation */
1141 send_flowc(ep
, NULL
);
1142 if (ep
->retry_with_mpa_v1
)
1143 send_mpa_req(ep
, skb
, 1);
1145 send_mpa_req(ep
, skb
, mpa_rev
);
1146 mutex_unlock(&ep
->com
.mutex
);
1150 static void close_complete_upcall(struct c4iw_ep
*ep
, int status
)
1152 struct iw_cm_event event
;
1154 PDBG("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
1155 memset(&event
, 0, sizeof(event
));
1156 event
.event
= IW_CM_EVENT_CLOSE
;
1157 event
.status
= status
;
1158 if (ep
->com
.cm_id
) {
1159 PDBG("close complete delivered ep %p cm_id %p tid %u\n",
1160 ep
, ep
->com
.cm_id
, ep
->hwtid
);
1161 ep
->com
.cm_id
->event_handler(ep
->com
.cm_id
, &event
);
1162 ep
->com
.cm_id
->rem_ref(ep
->com
.cm_id
);
1163 ep
->com
.cm_id
= NULL
;
1164 set_bit(CLOSE_UPCALL
, &ep
->com
.history
);
1168 static int abort_connection(struct c4iw_ep
*ep
, struct sk_buff
*skb
, gfp_t gfp
)
1170 PDBG("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
1171 __state_set(&ep
->com
, ABORTING
);
1172 set_bit(ABORT_CONN
, &ep
->com
.history
);
1173 return send_abort(ep
, skb
, gfp
);
1176 static void peer_close_upcall(struct c4iw_ep
*ep
)
1178 struct iw_cm_event event
;
1180 PDBG("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
1181 memset(&event
, 0, sizeof(event
));
1182 event
.event
= IW_CM_EVENT_DISCONNECT
;
1183 if (ep
->com
.cm_id
) {
1184 PDBG("peer close delivered ep %p cm_id %p tid %u\n",
1185 ep
, ep
->com
.cm_id
, ep
->hwtid
);
1186 ep
->com
.cm_id
->event_handler(ep
->com
.cm_id
, &event
);
1187 set_bit(DISCONN_UPCALL
, &ep
->com
.history
);
1191 static void peer_abort_upcall(struct c4iw_ep
*ep
)
1193 struct iw_cm_event event
;
1195 PDBG("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
1196 memset(&event
, 0, sizeof(event
));
1197 event
.event
= IW_CM_EVENT_CLOSE
;
1198 event
.status
= -ECONNRESET
;
1199 if (ep
->com
.cm_id
) {
1200 PDBG("abort delivered ep %p cm_id %p tid %u\n", ep
,
1201 ep
->com
.cm_id
, ep
->hwtid
);
1202 ep
->com
.cm_id
->event_handler(ep
->com
.cm_id
, &event
);
1203 ep
->com
.cm_id
->rem_ref(ep
->com
.cm_id
);
1204 ep
->com
.cm_id
= NULL
;
1205 set_bit(ABORT_UPCALL
, &ep
->com
.history
);
1209 static void connect_reply_upcall(struct c4iw_ep
*ep
, int status
)
1211 struct iw_cm_event event
;
1213 PDBG("%s ep %p tid %u status %d\n", __func__
, ep
, ep
->hwtid
, status
);
1214 memset(&event
, 0, sizeof(event
));
1215 event
.event
= IW_CM_EVENT_CONNECT_REPLY
;
1216 event
.status
= status
;
1217 memcpy(&event
.local_addr
, &ep
->com
.local_addr
,
1218 sizeof(ep
->com
.local_addr
));
1219 memcpy(&event
.remote_addr
, &ep
->com
.remote_addr
,
1220 sizeof(ep
->com
.remote_addr
));
1222 if ((status
== 0) || (status
== -ECONNREFUSED
)) {
1223 if (!ep
->tried_with_mpa_v1
) {
1224 /* this means MPA_v2 is used */
1225 event
.ord
= ep
->ird
;
1226 event
.ird
= ep
->ord
;
1227 event
.private_data_len
= ep
->plen
-
1228 sizeof(struct mpa_v2_conn_params
);
1229 event
.private_data
= ep
->mpa_pkt
+
1230 sizeof(struct mpa_message
) +
1231 sizeof(struct mpa_v2_conn_params
);
1233 /* this means MPA_v1 is used */
1234 event
.ord
= cur_max_read_depth(ep
->com
.dev
);
1235 event
.ird
= cur_max_read_depth(ep
->com
.dev
);
1236 event
.private_data_len
= ep
->plen
;
1237 event
.private_data
= ep
->mpa_pkt
+
1238 sizeof(struct mpa_message
);
1242 PDBG("%s ep %p tid %u status %d\n", __func__
, ep
,
1244 set_bit(CONN_RPL_UPCALL
, &ep
->com
.history
);
1245 ep
->com
.cm_id
->event_handler(ep
->com
.cm_id
, &event
);
1248 ep
->com
.cm_id
->rem_ref(ep
->com
.cm_id
);
1249 ep
->com
.cm_id
= NULL
;
1253 static int connect_request_upcall(struct c4iw_ep
*ep
)
1255 struct iw_cm_event event
;
1258 PDBG("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
1259 memset(&event
, 0, sizeof(event
));
1260 event
.event
= IW_CM_EVENT_CONNECT_REQUEST
;
1261 memcpy(&event
.local_addr
, &ep
->com
.local_addr
,
1262 sizeof(ep
->com
.local_addr
));
1263 memcpy(&event
.remote_addr
, &ep
->com
.remote_addr
,
1264 sizeof(ep
->com
.remote_addr
));
1265 event
.provider_data
= ep
;
1266 if (!ep
->tried_with_mpa_v1
) {
1267 /* this means MPA_v2 is used */
1268 event
.ord
= ep
->ord
;
1269 event
.ird
= ep
->ird
;
1270 event
.private_data_len
= ep
->plen
-
1271 sizeof(struct mpa_v2_conn_params
);
1272 event
.private_data
= ep
->mpa_pkt
+ sizeof(struct mpa_message
) +
1273 sizeof(struct mpa_v2_conn_params
);
1275 /* this means MPA_v1 is used. Send max supported */
1276 event
.ord
= cur_max_read_depth(ep
->com
.dev
);
1277 event
.ird
= cur_max_read_depth(ep
->com
.dev
);
1278 event
.private_data_len
= ep
->plen
;
1279 event
.private_data
= ep
->mpa_pkt
+ sizeof(struct mpa_message
);
1281 c4iw_get_ep(&ep
->com
);
1282 ret
= ep
->parent_ep
->com
.cm_id
->event_handler(ep
->parent_ep
->com
.cm_id
,
1285 c4iw_put_ep(&ep
->com
);
1286 set_bit(CONNREQ_UPCALL
, &ep
->com
.history
);
1287 c4iw_put_ep(&ep
->parent_ep
->com
);
1291 static void established_upcall(struct c4iw_ep
*ep
)
1293 struct iw_cm_event event
;
1295 PDBG("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
1296 memset(&event
, 0, sizeof(event
));
1297 event
.event
= IW_CM_EVENT_ESTABLISHED
;
1298 event
.ird
= ep
->ord
;
1299 event
.ord
= ep
->ird
;
1300 if (ep
->com
.cm_id
) {
1301 PDBG("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
1302 ep
->com
.cm_id
->event_handler(ep
->com
.cm_id
, &event
);
1303 set_bit(ESTAB_UPCALL
, &ep
->com
.history
);
1307 static int update_rx_credits(struct c4iw_ep
*ep
, u32 credits
)
1309 struct cpl_rx_data_ack
*req
;
1310 struct sk_buff
*skb
;
1311 int wrlen
= roundup(sizeof *req
, 16);
1313 PDBG("%s ep %p tid %u credits %u\n", __func__
, ep
, ep
->hwtid
, credits
);
1314 skb
= get_skb(NULL
, wrlen
, GFP_KERNEL
);
1316 printk(KERN_ERR MOD
"update_rx_credits - cannot alloc skb!\n");
1321 * If we couldn't specify the entire rcv window at connection setup
1322 * due to the limit in the number of bits in the RCV_BUFSIZ field,
1323 * then add the overage in to the credits returned.
1325 if (ep
->rcv_win
> RCV_BUFSIZ_M
* 1024)
1326 credits
+= ep
->rcv_win
- RCV_BUFSIZ_M
* 1024;
1328 req
= (struct cpl_rx_data_ack
*) skb_put(skb
, wrlen
);
1329 memset(req
, 0, wrlen
);
1330 INIT_TP_WR(req
, ep
->hwtid
);
1331 OPCODE_TID(req
) = cpu_to_be32(MK_OPCODE_TID(CPL_RX_DATA_ACK
,
1333 req
->credit_dack
= cpu_to_be32(credits
| RX_FORCE_ACK_F
|
1335 RX_DACK_MODE_V(dack_mode
));
1336 set_wr_txq(skb
, CPL_PRIORITY_ACK
, ep
->ctrlq_idx
);
1337 c4iw_ofld_send(&ep
->com
.dev
->rdev
, skb
);
1341 #define RELAXED_IRD_NEGOTIATION 1
1343 static int process_mpa_reply(struct c4iw_ep
*ep
, struct sk_buff
*skb
)
1345 struct mpa_message
*mpa
;
1346 struct mpa_v2_conn_params
*mpa_v2_params
;
1348 u16 resp_ird
, resp_ord
;
1349 u8 rtr_mismatch
= 0, insuff_ird
= 0;
1350 struct c4iw_qp_attributes attrs
;
1351 enum c4iw_qp_attr_mask mask
;
1355 PDBG("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
1358 * Stop mpa timer. If it expired, then
1359 * we ignore the MPA reply. process_timeout()
1360 * will abort the connection.
1362 if (stop_ep_timer(ep
))
1366 * If we get more than the supported amount of private data
1367 * then we must fail this connection.
1369 if (ep
->mpa_pkt_len
+ skb
->len
> sizeof(ep
->mpa_pkt
)) {
1375 * copy the new data into our accumulation buffer.
1377 skb_copy_from_linear_data(skb
, &(ep
->mpa_pkt
[ep
->mpa_pkt_len
]),
1379 ep
->mpa_pkt_len
+= skb
->len
;
1382 * if we don't even have the mpa message, then bail.
1384 if (ep
->mpa_pkt_len
< sizeof(*mpa
))
1386 mpa
= (struct mpa_message
*) ep
->mpa_pkt
;
1388 /* Validate MPA header. */
1389 if (mpa
->revision
> mpa_rev
) {
1390 printk(KERN_ERR MOD
"%s MPA version mismatch. Local = %d,"
1391 " Received = %d\n", __func__
, mpa_rev
, mpa
->revision
);
1395 if (memcmp(mpa
->key
, MPA_KEY_REP
, sizeof(mpa
->key
))) {
1400 plen
= ntohs(mpa
->private_data_size
);
1403 * Fail if there's too much private data.
1405 if (plen
> MPA_MAX_PRIVATE_DATA
) {
1411 * If plen does not account for pkt size
1413 if (ep
->mpa_pkt_len
> (sizeof(*mpa
) + plen
)) {
1418 ep
->plen
= (u8
) plen
;
1421 * If we don't have all the pdata yet, then bail.
1422 * We'll continue process when more data arrives.
1424 if (ep
->mpa_pkt_len
< (sizeof(*mpa
) + plen
))
1427 if (mpa
->flags
& MPA_REJECT
) {
1428 err
= -ECONNREFUSED
;
1433 * If we get here we have accumulated the entire mpa
1434 * start reply message including private data. And
1435 * the MPA header is valid.
1437 __state_set(&ep
->com
, FPDU_MODE
);
1438 ep
->mpa_attr
.crc_enabled
= (mpa
->flags
& MPA_CRC
) | crc_enabled
? 1 : 0;
1439 ep
->mpa_attr
.recv_marker_enabled
= markers_enabled
;
1440 ep
->mpa_attr
.xmit_marker_enabled
= mpa
->flags
& MPA_MARKERS
? 1 : 0;
1441 ep
->mpa_attr
.version
= mpa
->revision
;
1442 ep
->mpa_attr
.p2p_type
= FW_RI_INIT_P2PTYPE_DISABLED
;
1444 if (mpa
->revision
== 2) {
1445 ep
->mpa_attr
.enhanced_rdma_conn
=
1446 mpa
->flags
& MPA_ENHANCED_RDMA_CONN
? 1 : 0;
1447 if (ep
->mpa_attr
.enhanced_rdma_conn
) {
1448 mpa_v2_params
= (struct mpa_v2_conn_params
*)
1449 (ep
->mpa_pkt
+ sizeof(*mpa
));
1450 resp_ird
= ntohs(mpa_v2_params
->ird
) &
1451 MPA_V2_IRD_ORD_MASK
;
1452 resp_ord
= ntohs(mpa_v2_params
->ord
) &
1453 MPA_V2_IRD_ORD_MASK
;
1454 PDBG("%s responder ird %u ord %u ep ird %u ord %u\n",
1455 __func__
, resp_ird
, resp_ord
, ep
->ird
, ep
->ord
);
1458 * This is a double-check. Ideally, below checks are
1459 * not required since ird/ord stuff has been taken
1460 * care of in c4iw_accept_cr
1462 if (ep
->ird
< resp_ord
) {
1463 if (RELAXED_IRD_NEGOTIATION
&& resp_ord
<=
1464 ep
->com
.dev
->rdev
.lldi
.max_ordird_qp
)
1468 } else if (ep
->ird
> resp_ord
) {
1471 if (ep
->ord
> resp_ird
) {
1472 if (RELAXED_IRD_NEGOTIATION
)
1483 if (ntohs(mpa_v2_params
->ird
) &
1484 MPA_V2_PEER2PEER_MODEL
) {
1485 if (ntohs(mpa_v2_params
->ord
) &
1486 MPA_V2_RDMA_WRITE_RTR
)
1487 ep
->mpa_attr
.p2p_type
=
1488 FW_RI_INIT_P2PTYPE_RDMA_WRITE
;
1489 else if (ntohs(mpa_v2_params
->ord
) &
1490 MPA_V2_RDMA_READ_RTR
)
1491 ep
->mpa_attr
.p2p_type
=
1492 FW_RI_INIT_P2PTYPE_READ_REQ
;
1495 } else if (mpa
->revision
== 1)
1497 ep
->mpa_attr
.p2p_type
= p2p_type
;
1499 PDBG("%s - crc_enabled=%d, recv_marker_enabled=%d, "
1500 "xmit_marker_enabled=%d, version=%d p2p_type=%d local-p2p_type = "
1501 "%d\n", __func__
, ep
->mpa_attr
.crc_enabled
,
1502 ep
->mpa_attr
.recv_marker_enabled
,
1503 ep
->mpa_attr
.xmit_marker_enabled
, ep
->mpa_attr
.version
,
1504 ep
->mpa_attr
.p2p_type
, p2p_type
);
1507 * If responder's RTR does not match with that of initiator, assign
1508 * FW_RI_INIT_P2PTYPE_DISABLED in mpa attributes so that RTR is not
1509 * generated when moving QP to RTS state.
1510 * A TERM message will be sent after QP has moved to RTS state
1512 if ((ep
->mpa_attr
.version
== 2) && peer2peer
&&
1513 (ep
->mpa_attr
.p2p_type
!= p2p_type
)) {
1514 ep
->mpa_attr
.p2p_type
= FW_RI_INIT_P2PTYPE_DISABLED
;
1518 attrs
.mpa_attr
= ep
->mpa_attr
;
1519 attrs
.max_ird
= ep
->ird
;
1520 attrs
.max_ord
= ep
->ord
;
1521 attrs
.llp_stream_handle
= ep
;
1522 attrs
.next_state
= C4IW_QP_STATE_RTS
;
1524 mask
= C4IW_QP_ATTR_NEXT_STATE
|
1525 C4IW_QP_ATTR_LLP_STREAM_HANDLE
| C4IW_QP_ATTR_MPA_ATTR
|
1526 C4IW_QP_ATTR_MAX_IRD
| C4IW_QP_ATTR_MAX_ORD
;
1528 /* bind QP and TID with INIT_WR */
1529 err
= c4iw_modify_qp(ep
->com
.qp
->rhp
,
1530 ep
->com
.qp
, mask
, &attrs
, 1);
1535 * If responder's RTR requirement did not match with what initiator
1536 * supports, generate TERM message
1539 printk(KERN_ERR
"%s: RTR mismatch, sending TERM\n", __func__
);
1540 attrs
.layer_etype
= LAYER_MPA
| DDP_LLP
;
1541 attrs
.ecode
= MPA_NOMATCH_RTR
;
1542 attrs
.next_state
= C4IW_QP_STATE_TERMINATE
;
1543 attrs
.send_term
= 1;
1544 err
= c4iw_modify_qp(ep
->com
.qp
->rhp
, ep
->com
.qp
,
1545 C4IW_QP_ATTR_NEXT_STATE
, &attrs
, 1);
1552 * Generate TERM if initiator IRD is not sufficient for responder
1553 * provided ORD. Currently, we do the same behaviour even when
1554 * responder provided IRD is also not sufficient as regards to
1558 printk(KERN_ERR
"%s: Insufficient IRD, sending TERM\n",
1560 attrs
.layer_etype
= LAYER_MPA
| DDP_LLP
;
1561 attrs
.ecode
= MPA_INSUFF_IRD
;
1562 attrs
.next_state
= C4IW_QP_STATE_TERMINATE
;
1563 attrs
.send_term
= 1;
1564 err
= c4iw_modify_qp(ep
->com
.qp
->rhp
, ep
->com
.qp
,
1565 C4IW_QP_ATTR_NEXT_STATE
, &attrs
, 1);
1572 __state_set(&ep
->com
, ABORTING
);
1573 send_abort(ep
, skb
, GFP_KERNEL
);
1575 connect_reply_upcall(ep
, err
);
1579 static void process_mpa_request(struct c4iw_ep
*ep
, struct sk_buff
*skb
)
1581 struct mpa_message
*mpa
;
1582 struct mpa_v2_conn_params
*mpa_v2_params
;
1585 PDBG("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
1588 * If we get more than the supported amount of private data
1589 * then we must fail this connection.
1591 if (ep
->mpa_pkt_len
+ skb
->len
> sizeof(ep
->mpa_pkt
)) {
1592 (void)stop_ep_timer(ep
);
1593 abort_connection(ep
, skb
, GFP_KERNEL
);
1597 PDBG("%s enter (%s line %u)\n", __func__
, __FILE__
, __LINE__
);
1600 * Copy the new data into our accumulation buffer.
1602 skb_copy_from_linear_data(skb
, &(ep
->mpa_pkt
[ep
->mpa_pkt_len
]),
1604 ep
->mpa_pkt_len
+= skb
->len
;
1607 * If we don't even have the mpa message, then bail.
1608 * We'll continue process when more data arrives.
1610 if (ep
->mpa_pkt_len
< sizeof(*mpa
))
1613 PDBG("%s enter (%s line %u)\n", __func__
, __FILE__
, __LINE__
);
1614 mpa
= (struct mpa_message
*) ep
->mpa_pkt
;
1617 * Validate MPA Header.
1619 if (mpa
->revision
> mpa_rev
) {
1620 printk(KERN_ERR MOD
"%s MPA version mismatch. Local = %d,"
1621 " Received = %d\n", __func__
, mpa_rev
, mpa
->revision
);
1622 (void)stop_ep_timer(ep
);
1623 abort_connection(ep
, skb
, GFP_KERNEL
);
1627 if (memcmp(mpa
->key
, MPA_KEY_REQ
, sizeof(mpa
->key
))) {
1628 (void)stop_ep_timer(ep
);
1629 abort_connection(ep
, skb
, GFP_KERNEL
);
1633 plen
= ntohs(mpa
->private_data_size
);
1636 * Fail if there's too much private data.
1638 if (plen
> MPA_MAX_PRIVATE_DATA
) {
1639 (void)stop_ep_timer(ep
);
1640 abort_connection(ep
, skb
, GFP_KERNEL
);
1645 * If plen does not account for pkt size
1647 if (ep
->mpa_pkt_len
> (sizeof(*mpa
) + plen
)) {
1648 (void)stop_ep_timer(ep
);
1649 abort_connection(ep
, skb
, GFP_KERNEL
);
1652 ep
->plen
= (u8
) plen
;
1655 * If we don't have all the pdata yet, then bail.
1657 if (ep
->mpa_pkt_len
< (sizeof(*mpa
) + plen
))
1661 * If we get here we have accumulated the entire mpa
1662 * start reply message including private data.
1664 ep
->mpa_attr
.initiator
= 0;
1665 ep
->mpa_attr
.crc_enabled
= (mpa
->flags
& MPA_CRC
) | crc_enabled
? 1 : 0;
1666 ep
->mpa_attr
.recv_marker_enabled
= markers_enabled
;
1667 ep
->mpa_attr
.xmit_marker_enabled
= mpa
->flags
& MPA_MARKERS
? 1 : 0;
1668 ep
->mpa_attr
.version
= mpa
->revision
;
1669 if (mpa
->revision
== 1)
1670 ep
->tried_with_mpa_v1
= 1;
1671 ep
->mpa_attr
.p2p_type
= FW_RI_INIT_P2PTYPE_DISABLED
;
1673 if (mpa
->revision
== 2) {
1674 ep
->mpa_attr
.enhanced_rdma_conn
=
1675 mpa
->flags
& MPA_ENHANCED_RDMA_CONN
? 1 : 0;
1676 if (ep
->mpa_attr
.enhanced_rdma_conn
) {
1677 mpa_v2_params
= (struct mpa_v2_conn_params
*)
1678 (ep
->mpa_pkt
+ sizeof(*mpa
));
1679 ep
->ird
= ntohs(mpa_v2_params
->ird
) &
1680 MPA_V2_IRD_ORD_MASK
;
1681 ep
->ord
= ntohs(mpa_v2_params
->ord
) &
1682 MPA_V2_IRD_ORD_MASK
;
1683 PDBG("%s initiator ird %u ord %u\n", __func__
, ep
->ird
,
1685 if (ntohs(mpa_v2_params
->ird
) & MPA_V2_PEER2PEER_MODEL
)
1687 if (ntohs(mpa_v2_params
->ord
) &
1688 MPA_V2_RDMA_WRITE_RTR
)
1689 ep
->mpa_attr
.p2p_type
=
1690 FW_RI_INIT_P2PTYPE_RDMA_WRITE
;
1691 else if (ntohs(mpa_v2_params
->ord
) &
1692 MPA_V2_RDMA_READ_RTR
)
1693 ep
->mpa_attr
.p2p_type
=
1694 FW_RI_INIT_P2PTYPE_READ_REQ
;
1697 } else if (mpa
->revision
== 1)
1699 ep
->mpa_attr
.p2p_type
= p2p_type
;
1701 PDBG("%s - crc_enabled=%d, recv_marker_enabled=%d, "
1702 "xmit_marker_enabled=%d, version=%d p2p_type=%d\n", __func__
,
1703 ep
->mpa_attr
.crc_enabled
, ep
->mpa_attr
.recv_marker_enabled
,
1704 ep
->mpa_attr
.xmit_marker_enabled
, ep
->mpa_attr
.version
,
1705 ep
->mpa_attr
.p2p_type
);
1708 * If the endpoint timer already expired, then we ignore
1709 * the start request. process_timeout() will abort
1712 if (!stop_ep_timer(ep
)) {
1713 __state_set(&ep
->com
, MPA_REQ_RCVD
);
1716 mutex_lock_nested(&ep
->parent_ep
->com
.mutex
,
1717 SINGLE_DEPTH_NESTING
);
1718 if (ep
->parent_ep
->com
.state
!= DEAD
) {
1719 if (connect_request_upcall(ep
))
1720 abort_connection(ep
, skb
, GFP_KERNEL
);
1722 abort_connection(ep
, skb
, GFP_KERNEL
);
1724 mutex_unlock(&ep
->parent_ep
->com
.mutex
);
1729 static int rx_data(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
1732 struct cpl_rx_data
*hdr
= cplhdr(skb
);
1733 unsigned int dlen
= ntohs(hdr
->len
);
1734 unsigned int tid
= GET_TID(hdr
);
1735 struct tid_info
*t
= dev
->rdev
.lldi
.tids
;
1736 __u8 status
= hdr
->status
;
1739 ep
= lookup_tid(t
, tid
);
1742 PDBG("%s ep %p tid %u dlen %u\n", __func__
, ep
, ep
->hwtid
, dlen
);
1743 skb_pull(skb
, sizeof(*hdr
));
1744 skb_trim(skb
, dlen
);
1745 mutex_lock(&ep
->com
.mutex
);
1747 /* update RX credits */
1748 update_rx_credits(ep
, dlen
);
1750 switch (ep
->com
.state
) {
1752 ep
->rcv_seq
+= dlen
;
1753 disconnect
= process_mpa_reply(ep
, skb
);
1756 ep
->rcv_seq
+= dlen
;
1757 process_mpa_request(ep
, skb
);
1760 struct c4iw_qp_attributes attrs
;
1761 BUG_ON(!ep
->com
.qp
);
1763 pr_err("%s Unexpected streaming data." \
1764 " qpid %u ep %p state %d tid %u status %d\n",
1765 __func__
, ep
->com
.qp
->wq
.sq
.qid
, ep
,
1766 ep
->com
.state
, ep
->hwtid
, status
);
1767 attrs
.next_state
= C4IW_QP_STATE_TERMINATE
;
1768 c4iw_modify_qp(ep
->com
.qp
->rhp
, ep
->com
.qp
,
1769 C4IW_QP_ATTR_NEXT_STATE
, &attrs
, 1);
1776 mutex_unlock(&ep
->com
.mutex
);
1778 c4iw_ep_disconnect(ep
, 0, GFP_KERNEL
);
1782 static int abort_rpl(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
1785 struct cpl_abort_rpl_rss
*rpl
= cplhdr(skb
);
1787 unsigned int tid
= GET_TID(rpl
);
1788 struct tid_info
*t
= dev
->rdev
.lldi
.tids
;
1790 ep
= lookup_tid(t
, tid
);
1792 printk(KERN_WARNING MOD
"Abort rpl to freed endpoint\n");
1795 PDBG("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
1796 mutex_lock(&ep
->com
.mutex
);
1797 switch (ep
->com
.state
) {
1799 c4iw_wake_up(&ep
->com
.wr_wait
, -ECONNRESET
);
1800 __state_set(&ep
->com
, DEAD
);
1804 printk(KERN_ERR
"%s ep %p state %d\n",
1805 __func__
, ep
, ep
->com
.state
);
1808 mutex_unlock(&ep
->com
.mutex
);
1811 release_ep_resources(ep
);
1815 static void send_fw_act_open_req(struct c4iw_ep
*ep
, unsigned int atid
)
1817 struct sk_buff
*skb
;
1818 struct fw_ofld_connection_wr
*req
;
1819 unsigned int mtu_idx
;
1821 struct sockaddr_in
*sin
;
1824 skb
= get_skb(NULL
, sizeof(*req
), GFP_KERNEL
);
1825 req
= (struct fw_ofld_connection_wr
*)__skb_put(skb
, sizeof(*req
));
1826 memset(req
, 0, sizeof(*req
));
1827 req
->op_compl
= htonl(WR_OP_V(FW_OFLD_CONNECTION_WR
));
1828 req
->len16_pkd
= htonl(FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*req
), 16)));
1829 req
->le
.filter
= cpu_to_be32(cxgb4_select_ntuple(
1830 ep
->com
.dev
->rdev
.lldi
.ports
[0],
1832 sin
= (struct sockaddr_in
*)&ep
->com
.mapped_local_addr
;
1833 req
->le
.lport
= sin
->sin_port
;
1834 req
->le
.u
.ipv4
.lip
= sin
->sin_addr
.s_addr
;
1835 sin
= (struct sockaddr_in
*)&ep
->com
.mapped_remote_addr
;
1836 req
->le
.pport
= sin
->sin_port
;
1837 req
->le
.u
.ipv4
.pip
= sin
->sin_addr
.s_addr
;
1838 req
->tcb
.t_state_to_astid
=
1839 htonl(FW_OFLD_CONNECTION_WR_T_STATE_V(TCP_SYN_SENT
) |
1840 FW_OFLD_CONNECTION_WR_ASTID_V(atid
));
1841 req
->tcb
.cplrxdataack_cplpassacceptrpl
=
1842 htons(FW_OFLD_CONNECTION_WR_CPLRXDATAACK_F
);
1843 req
->tcb
.tx_max
= (__force __be32
) jiffies
;
1844 req
->tcb
.rcv_adv
= htons(1);
1845 best_mtu(ep
->com
.dev
->rdev
.lldi
.mtus
, ep
->mtu
, &mtu_idx
,
1846 enable_tcp_timestamps
,
1847 (AF_INET
== ep
->com
.remote_addr
.ss_family
) ? 0 : 1);
1848 wscale
= compute_wscale(rcv_win
);
1851 * Specify the largest window that will fit in opt0. The
1852 * remainder will be specified in the rx_data_ack.
1854 win
= ep
->rcv_win
>> 10;
1855 if (win
> RCV_BUFSIZ_M
)
1858 req
->tcb
.opt0
= (__force __be64
) (TCAM_BYPASS_F
|
1859 (nocong
? NO_CONG_F
: 0) |
1862 WND_SCALE_V(wscale
) |
1863 MSS_IDX_V(mtu_idx
) |
1864 L2T_IDX_V(ep
->l2t
->idx
) |
1865 TX_CHAN_V(ep
->tx_chan
) |
1866 SMAC_SEL_V(ep
->smac_idx
) |
1868 ULP_MODE_V(ULP_MODE_TCPDDP
) |
1870 req
->tcb
.opt2
= (__force __be32
) (PACE_V(1) |
1871 TX_QUEUE_V(ep
->com
.dev
->rdev
.lldi
.tx_modq
[ep
->tx_chan
]) |
1873 CCTRL_ECN_V(enable_ecn
) |
1874 RSS_QUEUE_VALID_F
| RSS_QUEUE_V(ep
->rss_qid
));
1875 if (enable_tcp_timestamps
)
1876 req
->tcb
.opt2
|= (__force __be32
)TSTAMPS_EN_F
;
1877 if (enable_tcp_sack
)
1878 req
->tcb
.opt2
|= (__force __be32
)SACK_EN_F
;
1879 if (wscale
&& enable_tcp_window_scaling
)
1880 req
->tcb
.opt2
|= (__force __be32
)WND_SCALE_EN_F
;
1881 req
->tcb
.opt0
= cpu_to_be64((__force u64
)req
->tcb
.opt0
);
1882 req
->tcb
.opt2
= cpu_to_be32((__force u32
)req
->tcb
.opt2
);
1883 set_wr_txq(skb
, CPL_PRIORITY_CONTROL
, ep
->ctrlq_idx
);
1884 set_bit(ACT_OFLD_CONN
, &ep
->com
.history
);
1885 c4iw_l2t_send(&ep
->com
.dev
->rdev
, skb
, ep
->l2t
);
1889 * Return whether a failed active open has allocated a TID
1891 static inline int act_open_has_tid(int status
)
1893 return status
!= CPL_ERR_TCAM_FULL
&& status
!= CPL_ERR_CONN_EXIST
&&
1894 status
!= CPL_ERR_ARP_MISS
;
1897 /* Returns whether a CPL status conveys negative advice.
1899 static int is_neg_adv(unsigned int status
)
1901 return status
== CPL_ERR_RTX_NEG_ADVICE
||
1902 status
== CPL_ERR_PERSIST_NEG_ADVICE
||
1903 status
== CPL_ERR_KEEPALV_NEG_ADVICE
;
1906 static char *neg_adv_str(unsigned int status
)
1909 case CPL_ERR_RTX_NEG_ADVICE
:
1910 return "Retransmit timeout";
1911 case CPL_ERR_PERSIST_NEG_ADVICE
:
1912 return "Persist timeout";
1913 case CPL_ERR_KEEPALV_NEG_ADVICE
:
1914 return "Keepalive timeout";
1920 static void set_tcp_window(struct c4iw_ep
*ep
, struct port_info
*pi
)
1922 ep
->snd_win
= snd_win
;
1923 ep
->rcv_win
= rcv_win
;
1924 PDBG("%s snd_win %d rcv_win %d\n", __func__
, ep
->snd_win
, ep
->rcv_win
);
1927 #define ACT_OPEN_RETRY_COUNT 2
1929 static int import_ep(struct c4iw_ep
*ep
, int iptype
, __u8
*peer_ip
,
1930 struct dst_entry
*dst
, struct c4iw_dev
*cdev
,
1931 bool clear_mpa_v1
, enum chip_type adapter_type
)
1933 struct neighbour
*n
;
1935 struct net_device
*pdev
;
1937 n
= dst_neigh_lookup(dst
, peer_ip
);
1943 if (n
->dev
->flags
& IFF_LOOPBACK
) {
1945 pdev
= ip_dev_find(&init_net
, *(__be32
*)peer_ip
);
1946 else if (IS_ENABLED(CONFIG_IPV6
))
1947 for_each_netdev(&init_net
, pdev
) {
1948 if (ipv6_chk_addr(&init_net
,
1949 (struct in6_addr
*)peer_ip
,
1960 ep
->l2t
= cxgb4_l2t_get(cdev
->rdev
.lldi
.l2t
,
1964 ep
->mtu
= pdev
->mtu
;
1965 ep
->tx_chan
= cxgb4_port_chan(pdev
);
1966 ep
->smac_idx
= cxgb4_tp_smt_idx(adapter_type
,
1967 cxgb4_port_viid(pdev
));
1968 step
= cdev
->rdev
.lldi
.ntxq
/
1969 cdev
->rdev
.lldi
.nchan
;
1970 ep
->txq_idx
= cxgb4_port_idx(pdev
) * step
;
1971 step
= cdev
->rdev
.lldi
.nrxq
/
1972 cdev
->rdev
.lldi
.nchan
;
1973 ep
->ctrlq_idx
= cxgb4_port_idx(pdev
);
1974 ep
->rss_qid
= cdev
->rdev
.lldi
.rxq_ids
[
1975 cxgb4_port_idx(pdev
) * step
];
1976 set_tcp_window(ep
, (struct port_info
*)netdev_priv(pdev
));
1979 pdev
= get_real_dev(n
->dev
);
1980 ep
->l2t
= cxgb4_l2t_get(cdev
->rdev
.lldi
.l2t
,
1984 ep
->mtu
= dst_mtu(dst
);
1985 ep
->tx_chan
= cxgb4_port_chan(pdev
);
1986 ep
->smac_idx
= cxgb4_tp_smt_idx(adapter_type
,
1987 cxgb4_port_viid(pdev
));
1988 step
= cdev
->rdev
.lldi
.ntxq
/
1989 cdev
->rdev
.lldi
.nchan
;
1990 ep
->txq_idx
= cxgb4_port_idx(pdev
) * step
;
1991 ep
->ctrlq_idx
= cxgb4_port_idx(pdev
);
1992 step
= cdev
->rdev
.lldi
.nrxq
/
1993 cdev
->rdev
.lldi
.nchan
;
1994 ep
->rss_qid
= cdev
->rdev
.lldi
.rxq_ids
[
1995 cxgb4_port_idx(pdev
) * step
];
1996 set_tcp_window(ep
, (struct port_info
*)netdev_priv(pdev
));
1999 ep
->retry_with_mpa_v1
= 0;
2000 ep
->tried_with_mpa_v1
= 0;
2012 static int c4iw_reconnect(struct c4iw_ep
*ep
)
2015 struct sockaddr_in
*laddr
= (struct sockaddr_in
*)
2016 &ep
->com
.cm_id
->local_addr
;
2017 struct sockaddr_in
*raddr
= (struct sockaddr_in
*)
2018 &ep
->com
.cm_id
->remote_addr
;
2019 struct sockaddr_in6
*laddr6
= (struct sockaddr_in6
*)
2020 &ep
->com
.cm_id
->local_addr
;
2021 struct sockaddr_in6
*raddr6
= (struct sockaddr_in6
*)
2022 &ep
->com
.cm_id
->remote_addr
;
2026 PDBG("%s qp %p cm_id %p\n", __func__
, ep
->com
.qp
, ep
->com
.cm_id
);
2027 init_timer(&ep
->timer
);
2030 * Allocate an active TID to initiate a TCP connection.
2032 ep
->atid
= cxgb4_alloc_atid(ep
->com
.dev
->rdev
.lldi
.tids
, ep
);
2033 if (ep
->atid
== -1) {
2034 pr_err("%s - cannot alloc atid.\n", __func__
);
2038 insert_handle(ep
->com
.dev
, &ep
->com
.dev
->atid_idr
, ep
, ep
->atid
);
2041 if (ep
->com
.cm_id
->local_addr
.ss_family
== AF_INET
) {
2042 ep
->dst
= find_route(ep
->com
.dev
, laddr
->sin_addr
.s_addr
,
2043 raddr
->sin_addr
.s_addr
, laddr
->sin_port
,
2044 raddr
->sin_port
, 0);
2046 ra
= (__u8
*)&raddr
->sin_addr
;
2048 ep
->dst
= find_route6(ep
->com
.dev
, laddr6
->sin6_addr
.s6_addr
,
2049 raddr6
->sin6_addr
.s6_addr
,
2050 laddr6
->sin6_port
, raddr6
->sin6_port
, 0,
2051 raddr6
->sin6_scope_id
);
2053 ra
= (__u8
*)&raddr6
->sin6_addr
;
2056 pr_err("%s - cannot find route.\n", __func__
);
2057 err
= -EHOSTUNREACH
;
2060 err
= import_ep(ep
, iptype
, ra
, ep
->dst
, ep
->com
.dev
, false,
2061 ep
->com
.dev
->rdev
.lldi
.adapter_type
);
2063 pr_err("%s - cannot alloc l2e.\n", __func__
);
2067 PDBG("%s txq_idx %u tx_chan %u smac_idx %u rss_qid %u l2t_idx %u\n",
2068 __func__
, ep
->txq_idx
, ep
->tx_chan
, ep
->smac_idx
, ep
->rss_qid
,
2071 state_set(&ep
->com
, CONNECTING
);
2074 /* send connect request to rnic */
2075 err
= send_connect(ep
);
2079 cxgb4_l2t_release(ep
->l2t
);
2081 dst_release(ep
->dst
);
2083 remove_handle(ep
->com
.dev
, &ep
->com
.dev
->atid_idr
, ep
->atid
);
2084 cxgb4_free_atid(ep
->com
.dev
->rdev
.lldi
.tids
, ep
->atid
);
2087 * remember to send notification to upper layer.
2088 * We are in here so the upper layer is not aware that this is
2089 * re-connect attempt and so, upper layer is still waiting for
2090 * response of 1st connect request.
2092 connect_reply_upcall(ep
, -ECONNRESET
);
2093 c4iw_put_ep(&ep
->com
);
2098 static int act_open_rpl(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
2101 struct cpl_act_open_rpl
*rpl
= cplhdr(skb
);
2102 unsigned int atid
= TID_TID_G(AOPEN_ATID_G(
2103 ntohl(rpl
->atid_status
)));
2104 struct tid_info
*t
= dev
->rdev
.lldi
.tids
;
2105 int status
= AOPEN_STATUS_G(ntohl(rpl
->atid_status
));
2106 struct sockaddr_in
*la
;
2107 struct sockaddr_in
*ra
;
2108 struct sockaddr_in6
*la6
;
2109 struct sockaddr_in6
*ra6
;
2111 ep
= lookup_atid(t
, atid
);
2112 la
= (struct sockaddr_in
*)&ep
->com
.mapped_local_addr
;
2113 ra
= (struct sockaddr_in
*)&ep
->com
.mapped_remote_addr
;
2114 la6
= (struct sockaddr_in6
*)&ep
->com
.mapped_local_addr
;
2115 ra6
= (struct sockaddr_in6
*)&ep
->com
.mapped_remote_addr
;
2117 PDBG("%s ep %p atid %u status %u errno %d\n", __func__
, ep
, atid
,
2118 status
, status2errno(status
));
2120 if (is_neg_adv(status
)) {
2121 PDBG("%s Connection problems for atid %u status %u (%s)\n",
2122 __func__
, atid
, status
, neg_adv_str(status
));
2123 ep
->stats
.connect_neg_adv
++;
2124 mutex_lock(&dev
->rdev
.stats
.lock
);
2125 dev
->rdev
.stats
.neg_adv
++;
2126 mutex_unlock(&dev
->rdev
.stats
.lock
);
2130 set_bit(ACT_OPEN_RPL
, &ep
->com
.history
);
2133 * Log interesting failures.
2136 case CPL_ERR_CONN_RESET
:
2137 case CPL_ERR_CONN_TIMEDOUT
:
2139 case CPL_ERR_TCAM_FULL
:
2140 mutex_lock(&dev
->rdev
.stats
.lock
);
2141 dev
->rdev
.stats
.tcam_full
++;
2142 mutex_unlock(&dev
->rdev
.stats
.lock
);
2143 if (ep
->com
.local_addr
.ss_family
== AF_INET
&&
2144 dev
->rdev
.lldi
.enable_fw_ofld_conn
) {
2145 send_fw_act_open_req(ep
,
2146 TID_TID_G(AOPEN_ATID_G(
2147 ntohl(rpl
->atid_status
))));
2151 case CPL_ERR_CONN_EXIST
:
2152 if (ep
->retry_count
++ < ACT_OPEN_RETRY_COUNT
) {
2153 set_bit(ACT_RETRY_INUSE
, &ep
->com
.history
);
2154 if (ep
->com
.remote_addr
.ss_family
== AF_INET6
) {
2155 struct sockaddr_in6
*sin6
=
2156 (struct sockaddr_in6
*)
2157 &ep
->com
.mapped_local_addr
;
2159 ep
->com
.dev
->rdev
.lldi
.ports
[0],
2161 &sin6
->sin6_addr
.s6_addr
, 1);
2163 remove_handle(ep
->com
.dev
, &ep
->com
.dev
->atid_idr
,
2165 cxgb4_free_atid(t
, atid
);
2166 dst_release(ep
->dst
);
2167 cxgb4_l2t_release(ep
->l2t
);
2173 if (ep
->com
.local_addr
.ss_family
== AF_INET
) {
2174 pr_info("Active open failure - atid %u status %u errno %d %pI4:%u->%pI4:%u\n",
2175 atid
, status
, status2errno(status
),
2176 &la
->sin_addr
.s_addr
, ntohs(la
->sin_port
),
2177 &ra
->sin_addr
.s_addr
, ntohs(ra
->sin_port
));
2179 pr_info("Active open failure - atid %u status %u errno %d %pI6:%u->%pI6:%u\n",
2180 atid
, status
, status2errno(status
),
2181 la6
->sin6_addr
.s6_addr
, ntohs(la6
->sin6_port
),
2182 ra6
->sin6_addr
.s6_addr
, ntohs(ra6
->sin6_port
));
2187 connect_reply_upcall(ep
, status2errno(status
));
2188 state_set(&ep
->com
, DEAD
);
2190 if (ep
->com
.remote_addr
.ss_family
== AF_INET6
) {
2191 struct sockaddr_in6
*sin6
=
2192 (struct sockaddr_in6
*)&ep
->com
.mapped_local_addr
;
2193 cxgb4_clip_release(ep
->com
.dev
->rdev
.lldi
.ports
[0],
2194 (const u32
*)&sin6
->sin6_addr
.s6_addr
, 1);
2196 if (status
&& act_open_has_tid(status
))
2197 cxgb4_remove_tid(ep
->com
.dev
->rdev
.lldi
.tids
, 0, GET_TID(rpl
));
2199 remove_handle(ep
->com
.dev
, &ep
->com
.dev
->atid_idr
, atid
);
2200 cxgb4_free_atid(t
, atid
);
2201 dst_release(ep
->dst
);
2202 cxgb4_l2t_release(ep
->l2t
);
2203 c4iw_put_ep(&ep
->com
);
2208 static int pass_open_rpl(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
2210 struct cpl_pass_open_rpl
*rpl
= cplhdr(skb
);
2211 struct tid_info
*t
= dev
->rdev
.lldi
.tids
;
2212 unsigned int stid
= GET_TID(rpl
);
2213 struct c4iw_listen_ep
*ep
= lookup_stid(t
, stid
);
2216 PDBG("%s stid %d lookup failure!\n", __func__
, stid
);
2219 PDBG("%s ep %p status %d error %d\n", __func__
, ep
,
2220 rpl
->status
, status2errno(rpl
->status
));
2221 c4iw_wake_up(&ep
->com
.wr_wait
, status2errno(rpl
->status
));
2227 static int close_listsrv_rpl(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
2229 struct cpl_close_listsvr_rpl
*rpl
= cplhdr(skb
);
2230 struct tid_info
*t
= dev
->rdev
.lldi
.tids
;
2231 unsigned int stid
= GET_TID(rpl
);
2232 struct c4iw_listen_ep
*ep
= lookup_stid(t
, stid
);
2234 PDBG("%s ep %p\n", __func__
, ep
);
2235 c4iw_wake_up(&ep
->com
.wr_wait
, status2errno(rpl
->status
));
2239 static void accept_cr(struct c4iw_ep
*ep
, struct sk_buff
*skb
,
2240 struct cpl_pass_accept_req
*req
)
2242 struct cpl_pass_accept_rpl
*rpl
;
2243 unsigned int mtu_idx
;
2247 struct cpl_t5_pass_accept_rpl
*rpl5
= NULL
;
2249 enum chip_type adapter_type
= ep
->com
.dev
->rdev
.lldi
.adapter_type
;
2251 PDBG("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
2252 BUG_ON(skb_cloned(skb
));
2256 if (!is_t4(adapter_type
)) {
2257 skb_trim(skb
, roundup(sizeof(*rpl5
), 16));
2259 INIT_TP_WR(rpl5
, ep
->hwtid
);
2261 skb_trim(skb
, sizeof(*rpl
));
2262 INIT_TP_WR(rpl
, ep
->hwtid
);
2264 OPCODE_TID(rpl
) = cpu_to_be32(MK_OPCODE_TID(CPL_PASS_ACCEPT_RPL
,
2267 best_mtu(ep
->com
.dev
->rdev
.lldi
.mtus
, ep
->mtu
, &mtu_idx
,
2268 enable_tcp_timestamps
&& req
->tcpopt
.tstamp
,
2269 (AF_INET
== ep
->com
.remote_addr
.ss_family
) ? 0 : 1);
2270 wscale
= compute_wscale(rcv_win
);
2273 * Specify the largest window that will fit in opt0. The
2274 * remainder will be specified in the rx_data_ack.
2276 win
= ep
->rcv_win
>> 10;
2277 if (win
> RCV_BUFSIZ_M
)
2279 opt0
= (nocong
? NO_CONG_F
: 0) |
2282 WND_SCALE_V(wscale
) |
2283 MSS_IDX_V(mtu_idx
) |
2284 L2T_IDX_V(ep
->l2t
->idx
) |
2285 TX_CHAN_V(ep
->tx_chan
) |
2286 SMAC_SEL_V(ep
->smac_idx
) |
2287 DSCP_V(ep
->tos
>> 2) |
2288 ULP_MODE_V(ULP_MODE_TCPDDP
) |
2290 opt2
= RX_CHANNEL_V(0) |
2291 RSS_QUEUE_VALID_F
| RSS_QUEUE_V(ep
->rss_qid
);
2293 if (enable_tcp_timestamps
&& req
->tcpopt
.tstamp
)
2294 opt2
|= TSTAMPS_EN_F
;
2295 if (enable_tcp_sack
&& req
->tcpopt
.sack
)
2297 if (wscale
&& enable_tcp_window_scaling
)
2298 opt2
|= WND_SCALE_EN_F
;
2300 const struct tcphdr
*tcph
;
2301 u32 hlen
= ntohl(req
->hdr_len
);
2303 if (CHELSIO_CHIP_VERSION(adapter_type
) <= CHELSIO_T5
)
2304 tcph
= (const void *)(req
+ 1) + ETH_HDR_LEN_G(hlen
) +
2307 tcph
= (const void *)(req
+ 1) +
2308 T6_ETH_HDR_LEN_G(hlen
) + T6_IP_HDR_LEN_G(hlen
);
2309 if (tcph
->ece
&& tcph
->cwr
)
2310 opt2
|= CCTRL_ECN_V(1);
2312 if (CHELSIO_CHIP_VERSION(adapter_type
) > CHELSIO_T4
) {
2313 u32 isn
= (prandom_u32() & ~7UL) - 1;
2314 opt2
|= T5_OPT_2_VALID_F
;
2315 opt2
|= CONG_CNTRL_V(CONG_ALG_TAHOE
);
2318 memset(&rpl5
->iss
, 0, roundup(sizeof(*rpl5
)-sizeof(*rpl
), 16));
2321 rpl5
->iss
= cpu_to_be32(isn
);
2322 PDBG("%s iss %u\n", __func__
, be32_to_cpu(rpl5
->iss
));
2325 rpl
->opt0
= cpu_to_be64(opt0
);
2326 rpl
->opt2
= cpu_to_be32(opt2
);
2327 set_wr_txq(skb
, CPL_PRIORITY_SETUP
, ep
->ctrlq_idx
);
2328 t4_set_arp_err_handler(skb
, NULL
, arp_failure_discard
);
2329 c4iw_l2t_send(&ep
->com
.dev
->rdev
, skb
, ep
->l2t
);
2334 static void reject_cr(struct c4iw_dev
*dev
, u32 hwtid
, struct sk_buff
*skb
)
2336 PDBG("%s c4iw_dev %p tid %u\n", __func__
, dev
, hwtid
);
2337 BUG_ON(skb_cloned(skb
));
2338 skb_trim(skb
, sizeof(struct cpl_tid_release
));
2339 release_tid(&dev
->rdev
, hwtid
, skb
);
2343 static void get_4tuple(struct cpl_pass_accept_req
*req
, enum chip_type type
,
2344 int *iptype
, __u8
*local_ip
, __u8
*peer_ip
,
2345 __be16
*local_port
, __be16
*peer_port
)
2347 int eth_len
= (CHELSIO_CHIP_VERSION(type
) <= CHELSIO_T5
) ?
2348 ETH_HDR_LEN_G(be32_to_cpu(req
->hdr_len
)) :
2349 T6_ETH_HDR_LEN_G(be32_to_cpu(req
->hdr_len
));
2350 int ip_len
= (CHELSIO_CHIP_VERSION(type
) <= CHELSIO_T5
) ?
2351 IP_HDR_LEN_G(be32_to_cpu(req
->hdr_len
)) :
2352 T6_IP_HDR_LEN_G(be32_to_cpu(req
->hdr_len
));
2353 struct iphdr
*ip
= (struct iphdr
*)((u8
*)(req
+ 1) + eth_len
);
2354 struct ipv6hdr
*ip6
= (struct ipv6hdr
*)((u8
*)(req
+ 1) + eth_len
);
2355 struct tcphdr
*tcp
= (struct tcphdr
*)
2356 ((u8
*)(req
+ 1) + eth_len
+ ip_len
);
2358 if (ip
->version
== 4) {
2359 PDBG("%s saddr 0x%x daddr 0x%x sport %u dport %u\n", __func__
,
2360 ntohl(ip
->saddr
), ntohl(ip
->daddr
), ntohs(tcp
->source
),
2363 memcpy(peer_ip
, &ip
->saddr
, 4);
2364 memcpy(local_ip
, &ip
->daddr
, 4);
2366 PDBG("%s saddr %pI6 daddr %pI6 sport %u dport %u\n", __func__
,
2367 ip6
->saddr
.s6_addr
, ip6
->daddr
.s6_addr
, ntohs(tcp
->source
),
2370 memcpy(peer_ip
, ip6
->saddr
.s6_addr
, 16);
2371 memcpy(local_ip
, ip6
->daddr
.s6_addr
, 16);
2373 *peer_port
= tcp
->source
;
2374 *local_port
= tcp
->dest
;
2379 static int pass_accept_req(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
2381 struct c4iw_ep
*child_ep
= NULL
, *parent_ep
;
2382 struct cpl_pass_accept_req
*req
= cplhdr(skb
);
2383 unsigned int stid
= PASS_OPEN_TID_G(ntohl(req
->tos_stid
));
2384 struct tid_info
*t
= dev
->rdev
.lldi
.tids
;
2385 unsigned int hwtid
= GET_TID(req
);
2386 struct dst_entry
*dst
;
2387 __u8 local_ip
[16], peer_ip
[16];
2388 __be16 local_port
, peer_port
;
2389 struct sockaddr_in6
*sin6
;
2391 u16 peer_mss
= ntohs(req
->tcpopt
.mss
);
2393 unsigned short hdrs
;
2395 parent_ep
= lookup_stid(t
, stid
);
2397 PDBG("%s connect request on invalid stid %d\n", __func__
, stid
);
2401 if (state_read(&parent_ep
->com
) != LISTEN
) {
2402 printk(KERN_ERR
"%s - listening ep not in LISTEN\n",
2407 get_4tuple(req
, parent_ep
->com
.dev
->rdev
.lldi
.adapter_type
, &iptype
,
2408 local_ip
, peer_ip
, &local_port
, &peer_port
);
2410 /* Find output route */
2412 PDBG("%s parent ep %p hwtid %u laddr %pI4 raddr %pI4 lport %d rport %d peer_mss %d\n"
2413 , __func__
, parent_ep
, hwtid
,
2414 local_ip
, peer_ip
, ntohs(local_port
),
2415 ntohs(peer_port
), peer_mss
);
2416 dst
= find_route(dev
, *(__be32
*)local_ip
, *(__be32
*)peer_ip
,
2417 local_port
, peer_port
,
2418 PASS_OPEN_TOS_G(ntohl(req
->tos_stid
)));
2420 PDBG("%s parent ep %p hwtid %u laddr %pI6 raddr %pI6 lport %d rport %d peer_mss %d\n"
2421 , __func__
, parent_ep
, hwtid
,
2422 local_ip
, peer_ip
, ntohs(local_port
),
2423 ntohs(peer_port
), peer_mss
);
2424 dst
= find_route6(dev
, local_ip
, peer_ip
, local_port
, peer_port
,
2425 PASS_OPEN_TOS_G(ntohl(req
->tos_stid
)),
2426 ((struct sockaddr_in6
*)
2427 &parent_ep
->com
.local_addr
)->sin6_scope_id
);
2430 printk(KERN_ERR MOD
"%s - failed to find dst entry!\n",
2435 child_ep
= alloc_ep(sizeof(*child_ep
), GFP_KERNEL
);
2437 printk(KERN_ERR MOD
"%s - failed to allocate ep entry!\n",
2443 err
= import_ep(child_ep
, iptype
, peer_ip
, dst
, dev
, false,
2444 parent_ep
->com
.dev
->rdev
.lldi
.adapter_type
);
2446 printk(KERN_ERR MOD
"%s - failed to allocate l2t entry!\n",
2453 hdrs
= sizeof(struct iphdr
) + sizeof(struct tcphdr
) +
2454 ((enable_tcp_timestamps
&& req
->tcpopt
.tstamp
) ? 12 : 0);
2455 if (peer_mss
&& child_ep
->mtu
> (peer_mss
+ hdrs
))
2456 child_ep
->mtu
= peer_mss
+ hdrs
;
2458 state_set(&child_ep
->com
, CONNECTING
);
2459 child_ep
->com
.dev
= dev
;
2460 child_ep
->com
.cm_id
= NULL
;
2463 * The mapped_local and mapped_remote addresses get setup with
2464 * the actual 4-tuple. The local address will be based on the
2465 * actual local address of the connection, but on the port number
2466 * of the parent listening endpoint. The remote address is
2467 * setup based on a query to the IWPM since we don't know what it
2468 * originally was before mapping. If no mapping was done, then
2469 * mapped_remote == remote, and mapped_local == local.
2472 struct sockaddr_in
*sin
= (struct sockaddr_in
*)
2473 &child_ep
->com
.mapped_local_addr
;
2475 sin
->sin_family
= PF_INET
;
2476 sin
->sin_port
= local_port
;
2477 sin
->sin_addr
.s_addr
= *(__be32
*)local_ip
;
2479 sin
= (struct sockaddr_in
*)&child_ep
->com
.local_addr
;
2480 sin
->sin_family
= PF_INET
;
2481 sin
->sin_port
= ((struct sockaddr_in
*)
2482 &parent_ep
->com
.local_addr
)->sin_port
;
2483 sin
->sin_addr
.s_addr
= *(__be32
*)local_ip
;
2485 sin
= (struct sockaddr_in
*)&child_ep
->com
.mapped_remote_addr
;
2486 sin
->sin_family
= PF_INET
;
2487 sin
->sin_port
= peer_port
;
2488 sin
->sin_addr
.s_addr
= *(__be32
*)peer_ip
;
2490 sin6
= (struct sockaddr_in6
*)&child_ep
->com
.mapped_local_addr
;
2491 sin6
->sin6_family
= PF_INET6
;
2492 sin6
->sin6_port
= local_port
;
2493 memcpy(sin6
->sin6_addr
.s6_addr
, local_ip
, 16);
2495 sin6
= (struct sockaddr_in6
*)&child_ep
->com
.local_addr
;
2496 sin6
->sin6_family
= PF_INET6
;
2497 sin6
->sin6_port
= ((struct sockaddr_in6
*)
2498 &parent_ep
->com
.local_addr
)->sin6_port
;
2499 memcpy(sin6
->sin6_addr
.s6_addr
, local_ip
, 16);
2501 sin6
= (struct sockaddr_in6
*)&child_ep
->com
.mapped_remote_addr
;
2502 sin6
->sin6_family
= PF_INET6
;
2503 sin6
->sin6_port
= peer_port
;
2504 memcpy(sin6
->sin6_addr
.s6_addr
, peer_ip
, 16);
2506 memcpy(&child_ep
->com
.remote_addr
, &child_ep
->com
.mapped_remote_addr
,
2507 sizeof(child_ep
->com
.remote_addr
));
2508 get_remote_addr(parent_ep
, child_ep
);
2510 c4iw_get_ep(&parent_ep
->com
);
2511 child_ep
->parent_ep
= parent_ep
;
2512 child_ep
->tos
= PASS_OPEN_TOS_G(ntohl(req
->tos_stid
));
2513 child_ep
->dst
= dst
;
2514 child_ep
->hwtid
= hwtid
;
2516 PDBG("%s tx_chan %u smac_idx %u rss_qid %u\n", __func__
,
2517 child_ep
->tx_chan
, child_ep
->smac_idx
, child_ep
->rss_qid
);
2519 init_timer(&child_ep
->timer
);
2520 cxgb4_insert_tid(t
, child_ep
, hwtid
);
2521 insert_handle(dev
, &dev
->hwtid_idr
, child_ep
, child_ep
->hwtid
);
2522 accept_cr(child_ep
, skb
, req
);
2523 set_bit(PASS_ACCEPT_REQ
, &child_ep
->com
.history
);
2525 sin6
= (struct sockaddr_in6
*)&child_ep
->com
.mapped_local_addr
;
2526 cxgb4_clip_get(child_ep
->com
.dev
->rdev
.lldi
.ports
[0],
2527 (const u32
*)&sin6
->sin6_addr
.s6_addr
, 1);
2531 reject_cr(dev
, hwtid
, skb
);
2536 static int pass_establish(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
2539 struct cpl_pass_establish
*req
= cplhdr(skb
);
2540 struct tid_info
*t
= dev
->rdev
.lldi
.tids
;
2541 unsigned int tid
= GET_TID(req
);
2543 ep
= lookup_tid(t
, tid
);
2544 PDBG("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
2545 ep
->snd_seq
= be32_to_cpu(req
->snd_isn
);
2546 ep
->rcv_seq
= be32_to_cpu(req
->rcv_isn
);
2548 PDBG("%s ep %p hwtid %u tcp_opt 0x%02x\n", __func__
, ep
, tid
,
2549 ntohs(req
->tcp_opt
));
2551 set_emss(ep
, ntohs(req
->tcp_opt
));
2553 dst_confirm(ep
->dst
);
2554 state_set(&ep
->com
, MPA_REQ_WAIT
);
2556 send_flowc(ep
, skb
);
2557 set_bit(PASS_ESTAB
, &ep
->com
.history
);
2562 static int peer_close(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
2564 struct cpl_peer_close
*hdr
= cplhdr(skb
);
2566 struct c4iw_qp_attributes attrs
;
2569 struct tid_info
*t
= dev
->rdev
.lldi
.tids
;
2570 unsigned int tid
= GET_TID(hdr
);
2573 ep
= lookup_tid(t
, tid
);
2574 PDBG("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
2575 dst_confirm(ep
->dst
);
2577 set_bit(PEER_CLOSE
, &ep
->com
.history
);
2578 mutex_lock(&ep
->com
.mutex
);
2579 switch (ep
->com
.state
) {
2581 __state_set(&ep
->com
, CLOSING
);
2584 __state_set(&ep
->com
, CLOSING
);
2585 connect_reply_upcall(ep
, -ECONNRESET
);
2590 * We're gonna mark this puppy DEAD, but keep
2591 * the reference on it until the ULP accepts or
2592 * rejects the CR. Also wake up anyone waiting
2593 * in rdma connection migration (see c4iw_accept_cr()).
2595 __state_set(&ep
->com
, CLOSING
);
2596 PDBG("waking up ep %p tid %u\n", ep
, ep
->hwtid
);
2597 c4iw_wake_up(&ep
->com
.wr_wait
, -ECONNRESET
);
2600 __state_set(&ep
->com
, CLOSING
);
2601 PDBG("waking up ep %p tid %u\n", ep
, ep
->hwtid
);
2602 c4iw_wake_up(&ep
->com
.wr_wait
, -ECONNRESET
);
2606 __state_set(&ep
->com
, CLOSING
);
2607 attrs
.next_state
= C4IW_QP_STATE_CLOSING
;
2608 ret
= c4iw_modify_qp(ep
->com
.qp
->rhp
, ep
->com
.qp
,
2609 C4IW_QP_ATTR_NEXT_STATE
, &attrs
, 1);
2610 if (ret
!= -ECONNRESET
) {
2611 peer_close_upcall(ep
);
2619 __state_set(&ep
->com
, MORIBUND
);
2623 (void)stop_ep_timer(ep
);
2624 if (ep
->com
.cm_id
&& ep
->com
.qp
) {
2625 attrs
.next_state
= C4IW_QP_STATE_IDLE
;
2626 c4iw_modify_qp(ep
->com
.qp
->rhp
, ep
->com
.qp
,
2627 C4IW_QP_ATTR_NEXT_STATE
, &attrs
, 1);
2629 close_complete_upcall(ep
, 0);
2630 __state_set(&ep
->com
, DEAD
);
2640 mutex_unlock(&ep
->com
.mutex
);
2642 c4iw_ep_disconnect(ep
, 0, GFP_KERNEL
);
2644 release_ep_resources(ep
);
2648 static int peer_abort(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
2650 struct cpl_abort_req_rss
*req
= cplhdr(skb
);
2652 struct cpl_abort_rpl
*rpl
;
2653 struct sk_buff
*rpl_skb
;
2654 struct c4iw_qp_attributes attrs
;
2657 struct tid_info
*t
= dev
->rdev
.lldi
.tids
;
2658 unsigned int tid
= GET_TID(req
);
2660 ep
= lookup_tid(t
, tid
);
2661 if (is_neg_adv(req
->status
)) {
2662 PDBG("%s Negative advice on abort- tid %u status %d (%s)\n",
2663 __func__
, ep
->hwtid
, req
->status
,
2664 neg_adv_str(req
->status
));
2665 ep
->stats
.abort_neg_adv
++;
2666 mutex_lock(&dev
->rdev
.stats
.lock
);
2667 dev
->rdev
.stats
.neg_adv
++;
2668 mutex_unlock(&dev
->rdev
.stats
.lock
);
2671 PDBG("%s ep %p tid %u state %u\n", __func__
, ep
, ep
->hwtid
,
2673 set_bit(PEER_ABORT
, &ep
->com
.history
);
2676 * Wake up any threads in rdma_init() or rdma_fini().
2677 * However, this is not needed if com state is just
2680 if (ep
->com
.state
!= MPA_REQ_SENT
)
2681 c4iw_wake_up(&ep
->com
.wr_wait
, -ECONNRESET
);
2683 mutex_lock(&ep
->com
.mutex
);
2684 switch (ep
->com
.state
) {
2688 (void)stop_ep_timer(ep
);
2691 (void)stop_ep_timer(ep
);
2692 if (mpa_rev
== 1 || (mpa_rev
== 2 && ep
->tried_with_mpa_v1
))
2693 connect_reply_upcall(ep
, -ECONNRESET
);
2696 * we just don't send notification upwards because we
2697 * want to retry with mpa_v1 without upper layers even
2700 * do some housekeeping so as to re-initiate the
2703 PDBG("%s: mpa_rev=%d. Retrying with mpav1\n", __func__
,
2705 ep
->retry_with_mpa_v1
= 1;
2717 if (ep
->com
.cm_id
&& ep
->com
.qp
) {
2718 attrs
.next_state
= C4IW_QP_STATE_ERROR
;
2719 ret
= c4iw_modify_qp(ep
->com
.qp
->rhp
,
2720 ep
->com
.qp
, C4IW_QP_ATTR_NEXT_STATE
,
2724 "%s - qp <- error failed!\n",
2727 peer_abort_upcall(ep
);
2732 PDBG("%s PEER_ABORT IN DEAD STATE!!!!\n", __func__
);
2733 mutex_unlock(&ep
->com
.mutex
);
2739 dst_confirm(ep
->dst
);
2740 if (ep
->com
.state
!= ABORTING
) {
2741 __state_set(&ep
->com
, DEAD
);
2742 /* we don't release if we want to retry with mpa_v1 */
2743 if (!ep
->retry_with_mpa_v1
)
2746 mutex_unlock(&ep
->com
.mutex
);
2748 rpl_skb
= get_skb(skb
, sizeof(*rpl
), GFP_KERNEL
);
2750 printk(KERN_ERR MOD
"%s - cannot allocate skb!\n",
2755 set_wr_txq(skb
, CPL_PRIORITY_DATA
, ep
->txq_idx
);
2756 rpl
= (struct cpl_abort_rpl
*) skb_put(rpl_skb
, sizeof(*rpl
));
2757 INIT_TP_WR(rpl
, ep
->hwtid
);
2758 OPCODE_TID(rpl
) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_RPL
, ep
->hwtid
));
2759 rpl
->cmd
= CPL_ABORT_NO_RST
;
2760 c4iw_ofld_send(&ep
->com
.dev
->rdev
, rpl_skb
);
2763 release_ep_resources(ep
);
2764 else if (ep
->retry_with_mpa_v1
) {
2765 if (ep
->com
.remote_addr
.ss_family
== AF_INET6
) {
2766 struct sockaddr_in6
*sin6
=
2767 (struct sockaddr_in6
*)
2768 &ep
->com
.mapped_local_addr
;
2770 ep
->com
.dev
->rdev
.lldi
.ports
[0],
2771 (const u32
*)&sin6
->sin6_addr
.s6_addr
,
2774 remove_handle(ep
->com
.dev
, &ep
->com
.dev
->hwtid_idr
, ep
->hwtid
);
2775 cxgb4_remove_tid(ep
->com
.dev
->rdev
.lldi
.tids
, 0, ep
->hwtid
);
2776 dst_release(ep
->dst
);
2777 cxgb4_l2t_release(ep
->l2t
);
2784 static int close_con_rpl(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
2787 struct c4iw_qp_attributes attrs
;
2788 struct cpl_close_con_rpl
*rpl
= cplhdr(skb
);
2790 struct tid_info
*t
= dev
->rdev
.lldi
.tids
;
2791 unsigned int tid
= GET_TID(rpl
);
2793 ep
= lookup_tid(t
, tid
);
2795 PDBG("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
2798 /* The cm_id may be null if we failed to connect */
2799 mutex_lock(&ep
->com
.mutex
);
2800 switch (ep
->com
.state
) {
2802 __state_set(&ep
->com
, MORIBUND
);
2805 (void)stop_ep_timer(ep
);
2806 if ((ep
->com
.cm_id
) && (ep
->com
.qp
)) {
2807 attrs
.next_state
= C4IW_QP_STATE_IDLE
;
2808 c4iw_modify_qp(ep
->com
.qp
->rhp
,
2810 C4IW_QP_ATTR_NEXT_STATE
,
2813 close_complete_upcall(ep
, 0);
2814 __state_set(&ep
->com
, DEAD
);
2824 mutex_unlock(&ep
->com
.mutex
);
2826 release_ep_resources(ep
);
2830 static int terminate(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
2832 struct cpl_rdma_terminate
*rpl
= cplhdr(skb
);
2833 struct tid_info
*t
= dev
->rdev
.lldi
.tids
;
2834 unsigned int tid
= GET_TID(rpl
);
2836 struct c4iw_qp_attributes attrs
;
2838 ep
= lookup_tid(t
, tid
);
2841 if (ep
&& ep
->com
.qp
) {
2842 printk(KERN_WARNING MOD
"TERM received tid %u qpid %u\n", tid
,
2843 ep
->com
.qp
->wq
.sq
.qid
);
2844 attrs
.next_state
= C4IW_QP_STATE_TERMINATE
;
2845 c4iw_modify_qp(ep
->com
.qp
->rhp
, ep
->com
.qp
,
2846 C4IW_QP_ATTR_NEXT_STATE
, &attrs
, 1);
2848 printk(KERN_WARNING MOD
"TERM received tid %u no ep/qp\n", tid
);
2854 * Upcall from the adapter indicating data has been transmitted.
2855 * For us its just the single MPA request or reply. We can now free
2856 * the skb holding the mpa message.
2858 static int fw4_ack(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
2861 struct cpl_fw4_ack
*hdr
= cplhdr(skb
);
2862 u8 credits
= hdr
->credits
;
2863 unsigned int tid
= GET_TID(hdr
);
2864 struct tid_info
*t
= dev
->rdev
.lldi
.tids
;
2867 ep
= lookup_tid(t
, tid
);
2868 PDBG("%s ep %p tid %u credits %u\n", __func__
, ep
, ep
->hwtid
, credits
);
2870 PDBG("%s 0 credit ack ep %p tid %u state %u\n",
2871 __func__
, ep
, ep
->hwtid
, state_read(&ep
->com
));
2875 dst_confirm(ep
->dst
);
2877 PDBG("%s last streaming msg ack ep %p tid %u state %u "
2878 "initiator %u freeing skb\n", __func__
, ep
, ep
->hwtid
,
2879 state_read(&ep
->com
), ep
->mpa_attr
.initiator
? 1 : 0);
2880 kfree_skb(ep
->mpa_skb
);
2886 int c4iw_reject_cr(struct iw_cm_id
*cm_id
, const void *pdata
, u8 pdata_len
)
2890 struct c4iw_ep
*ep
= to_ep(cm_id
);
2891 PDBG("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
2893 mutex_lock(&ep
->com
.mutex
);
2894 if (ep
->com
.state
== DEAD
) {
2895 mutex_unlock(&ep
->com
.mutex
);
2896 c4iw_put_ep(&ep
->com
);
2899 set_bit(ULP_REJECT
, &ep
->com
.history
);
2900 BUG_ON(ep
->com
.state
!= MPA_REQ_RCVD
);
2902 abort_connection(ep
, NULL
, GFP_KERNEL
);
2904 err
= send_mpa_reject(ep
, pdata
, pdata_len
);
2907 mutex_unlock(&ep
->com
.mutex
);
2909 err
= c4iw_ep_disconnect(ep
, 0, GFP_KERNEL
);
2910 c4iw_put_ep(&ep
->com
);
2914 int c4iw_accept_cr(struct iw_cm_id
*cm_id
, struct iw_cm_conn_param
*conn_param
)
2917 struct c4iw_qp_attributes attrs
;
2918 enum c4iw_qp_attr_mask mask
;
2919 struct c4iw_ep
*ep
= to_ep(cm_id
);
2920 struct c4iw_dev
*h
= to_c4iw_dev(cm_id
->device
);
2921 struct c4iw_qp
*qp
= get_qhp(h
, conn_param
->qpn
);
2923 PDBG("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
2925 mutex_lock(&ep
->com
.mutex
);
2926 if (ep
->com
.state
== DEAD
) {
2931 BUG_ON(ep
->com
.state
!= MPA_REQ_RCVD
);
2934 set_bit(ULP_ACCEPT
, &ep
->com
.history
);
2935 if ((conn_param
->ord
> cur_max_read_depth(ep
->com
.dev
)) ||
2936 (conn_param
->ird
> cur_max_read_depth(ep
->com
.dev
))) {
2937 abort_connection(ep
, NULL
, GFP_KERNEL
);
2942 if (ep
->mpa_attr
.version
== 2 && ep
->mpa_attr
.enhanced_rdma_conn
) {
2943 if (conn_param
->ord
> ep
->ird
) {
2944 if (RELAXED_IRD_NEGOTIATION
) {
2947 ep
->ird
= conn_param
->ird
;
2948 ep
->ord
= conn_param
->ord
;
2949 send_mpa_reject(ep
, conn_param
->private_data
,
2950 conn_param
->private_data_len
);
2951 abort_connection(ep
, NULL
, GFP_KERNEL
);
2956 if (conn_param
->ird
< ep
->ord
) {
2957 if (RELAXED_IRD_NEGOTIATION
&&
2958 ep
->ord
<= h
->rdev
.lldi
.max_ordird_qp
) {
2959 conn_param
->ird
= ep
->ord
;
2961 abort_connection(ep
, NULL
, GFP_KERNEL
);
2967 ep
->ird
= conn_param
->ird
;
2968 ep
->ord
= conn_param
->ord
;
2970 if (ep
->mpa_attr
.version
== 1) {
2971 if (peer2peer
&& ep
->ird
== 0)
2975 (ep
->mpa_attr
.p2p_type
!= FW_RI_INIT_P2PTYPE_DISABLED
) &&
2976 (p2p_type
== FW_RI_INIT_P2PTYPE_READ_REQ
) && ep
->ird
== 0)
2980 PDBG("%s %d ird %d ord %d\n", __func__
, __LINE__
, ep
->ird
, ep
->ord
);
2982 cm_id
->add_ref(cm_id
);
2983 ep
->com
.cm_id
= cm_id
;
2987 /* bind QP to EP and move to RTS */
2988 attrs
.mpa_attr
= ep
->mpa_attr
;
2989 attrs
.max_ird
= ep
->ird
;
2990 attrs
.max_ord
= ep
->ord
;
2991 attrs
.llp_stream_handle
= ep
;
2992 attrs
.next_state
= C4IW_QP_STATE_RTS
;
2994 /* bind QP and TID with INIT_WR */
2995 mask
= C4IW_QP_ATTR_NEXT_STATE
|
2996 C4IW_QP_ATTR_LLP_STREAM_HANDLE
|
2997 C4IW_QP_ATTR_MPA_ATTR
|
2998 C4IW_QP_ATTR_MAX_IRD
|
2999 C4IW_QP_ATTR_MAX_ORD
;
3001 err
= c4iw_modify_qp(ep
->com
.qp
->rhp
,
3002 ep
->com
.qp
, mask
, &attrs
, 1);
3005 err
= send_mpa_reply(ep
, conn_param
->private_data
,
3006 conn_param
->private_data_len
);
3010 __state_set(&ep
->com
, FPDU_MODE
);
3011 established_upcall(ep
);
3012 mutex_unlock(&ep
->com
.mutex
);
3013 c4iw_put_ep(&ep
->com
);
3016 ep
->com
.cm_id
= NULL
;
3017 abort_connection(ep
, NULL
, GFP_KERNEL
);
3018 cm_id
->rem_ref(cm_id
);
3020 mutex_unlock(&ep
->com
.mutex
);
3021 c4iw_put_ep(&ep
->com
);
3025 static int pick_local_ipaddrs(struct c4iw_dev
*dev
, struct iw_cm_id
*cm_id
)
3027 struct in_device
*ind
;
3029 struct sockaddr_in
*laddr
= (struct sockaddr_in
*)&cm_id
->local_addr
;
3030 struct sockaddr_in
*raddr
= (struct sockaddr_in
*)&cm_id
->remote_addr
;
3032 ind
= in_dev_get(dev
->rdev
.lldi
.ports
[0]);
3034 return -EADDRNOTAVAIL
;
3035 for_primary_ifa(ind
) {
3036 laddr
->sin_addr
.s_addr
= ifa
->ifa_address
;
3037 raddr
->sin_addr
.s_addr
= ifa
->ifa_address
;
3043 return found
? 0 : -EADDRNOTAVAIL
;
3046 static int get_lladdr(struct net_device
*dev
, struct in6_addr
*addr
,
3047 unsigned char banned_flags
)
3049 struct inet6_dev
*idev
;
3050 int err
= -EADDRNOTAVAIL
;
3053 idev
= __in6_dev_get(dev
);
3055 struct inet6_ifaddr
*ifp
;
3057 read_lock_bh(&idev
->lock
);
3058 list_for_each_entry(ifp
, &idev
->addr_list
, if_list
) {
3059 if (ifp
->scope
== IFA_LINK
&&
3060 !(ifp
->flags
& banned_flags
)) {
3061 memcpy(addr
, &ifp
->addr
, 16);
3066 read_unlock_bh(&idev
->lock
);
3072 static int pick_local_ip6addrs(struct c4iw_dev
*dev
, struct iw_cm_id
*cm_id
)
3074 struct in6_addr
uninitialized_var(addr
);
3075 struct sockaddr_in6
*la6
= (struct sockaddr_in6
*)&cm_id
->local_addr
;
3076 struct sockaddr_in6
*ra6
= (struct sockaddr_in6
*)&cm_id
->remote_addr
;
3078 if (!get_lladdr(dev
->rdev
.lldi
.ports
[0], &addr
, IFA_F_TENTATIVE
)) {
3079 memcpy(la6
->sin6_addr
.s6_addr
, &addr
, 16);
3080 memcpy(ra6
->sin6_addr
.s6_addr
, &addr
, 16);
3083 return -EADDRNOTAVAIL
;
3086 int c4iw_connect(struct iw_cm_id
*cm_id
, struct iw_cm_conn_param
*conn_param
)
3088 struct c4iw_dev
*dev
= to_c4iw_dev(cm_id
->device
);
3091 struct sockaddr_in
*laddr
;
3092 struct sockaddr_in
*raddr
;
3093 struct sockaddr_in6
*laddr6
;
3094 struct sockaddr_in6
*raddr6
;
3095 struct iwpm_dev_data pm_reg_msg
;
3096 struct iwpm_sa_data pm_msg
;
3101 if ((conn_param
->ord
> cur_max_read_depth(dev
)) ||
3102 (conn_param
->ird
> cur_max_read_depth(dev
))) {
3106 ep
= alloc_ep(sizeof(*ep
), GFP_KERNEL
);
3108 printk(KERN_ERR MOD
"%s - cannot alloc ep.\n", __func__
);
3112 init_timer(&ep
->timer
);
3113 ep
->plen
= conn_param
->private_data_len
;
3115 memcpy(ep
->mpa_pkt
+ sizeof(struct mpa_message
),
3116 conn_param
->private_data
, ep
->plen
);
3117 ep
->ird
= conn_param
->ird
;
3118 ep
->ord
= conn_param
->ord
;
3120 if (peer2peer
&& ep
->ord
== 0)
3123 cm_id
->add_ref(cm_id
);
3125 ep
->com
.cm_id
= cm_id
;
3126 ep
->com
.qp
= get_qhp(dev
, conn_param
->qpn
);
3128 PDBG("%s qpn 0x%x not found!\n", __func__
, conn_param
->qpn
);
3133 PDBG("%s qpn 0x%x qp %p cm_id %p\n", __func__
, conn_param
->qpn
,
3137 * Allocate an active TID to initiate a TCP connection.
3139 ep
->atid
= cxgb4_alloc_atid(dev
->rdev
.lldi
.tids
, ep
);
3140 if (ep
->atid
== -1) {
3141 printk(KERN_ERR MOD
"%s - cannot alloc atid.\n", __func__
);
3145 insert_handle(dev
, &dev
->atid_idr
, ep
, ep
->atid
);
3147 memcpy(&ep
->com
.local_addr
, &cm_id
->local_addr
,
3148 sizeof(ep
->com
.local_addr
));
3149 memcpy(&ep
->com
.remote_addr
, &cm_id
->remote_addr
,
3150 sizeof(ep
->com
.remote_addr
));
3152 /* No port mapper available, go with the specified peer information */
3153 memcpy(&ep
->com
.mapped_local_addr
, &cm_id
->local_addr
,
3154 sizeof(ep
->com
.mapped_local_addr
));
3155 memcpy(&ep
->com
.mapped_remote_addr
, &cm_id
->remote_addr
,
3156 sizeof(ep
->com
.mapped_remote_addr
));
3158 c4iw_form_reg_msg(dev
, &pm_reg_msg
);
3159 iwpm_err
= iwpm_register_pid(&pm_reg_msg
, RDMA_NL_C4IW
);
3161 PDBG("%s: Port Mapper reg pid fail (err = %d).\n",
3162 __func__
, iwpm_err
);
3164 if (iwpm_valid_pid() && !iwpm_err
) {
3165 c4iw_form_pm_msg(ep
, &pm_msg
);
3166 iwpm_err
= iwpm_add_and_query_mapping(&pm_msg
, RDMA_NL_C4IW
);
3168 PDBG("%s: Port Mapper query fail (err = %d).\n",
3169 __func__
, iwpm_err
);
3171 c4iw_record_pm_msg(ep
, &pm_msg
);
3173 if (iwpm_create_mapinfo(&ep
->com
.local_addr
,
3174 &ep
->com
.mapped_local_addr
, RDMA_NL_C4IW
)) {
3175 iwpm_remove_mapping(&ep
->com
.local_addr
, RDMA_NL_C4IW
);
3179 print_addr(&ep
->com
, __func__
, "add_query/create_mapinfo");
3180 set_bit(RELEASE_MAPINFO
, &ep
->com
.flags
);
3182 laddr
= (struct sockaddr_in
*)&ep
->com
.mapped_local_addr
;
3183 raddr
= (struct sockaddr_in
*)&ep
->com
.mapped_remote_addr
;
3184 laddr6
= (struct sockaddr_in6
*)&ep
->com
.mapped_local_addr
;
3185 raddr6
= (struct sockaddr_in6
*) &ep
->com
.mapped_remote_addr
;
3187 if (cm_id
->remote_addr
.ss_family
== AF_INET
) {
3189 ra
= (__u8
*)&raddr
->sin_addr
;
3192 * Handle loopback requests to INADDR_ANY.
3194 if ((__force
int)raddr
->sin_addr
.s_addr
== INADDR_ANY
) {
3195 err
= pick_local_ipaddrs(dev
, cm_id
);
3201 PDBG("%s saddr %pI4 sport 0x%x raddr %pI4 rport 0x%x\n",
3202 __func__
, &laddr
->sin_addr
, ntohs(laddr
->sin_port
),
3203 ra
, ntohs(raddr
->sin_port
));
3204 ep
->dst
= find_route(dev
, laddr
->sin_addr
.s_addr
,
3205 raddr
->sin_addr
.s_addr
, laddr
->sin_port
,
3206 raddr
->sin_port
, 0);
3209 ra
= (__u8
*)&raddr6
->sin6_addr
;
3212 * Handle loopback requests to INADDR_ANY.
3214 if (ipv6_addr_type(&raddr6
->sin6_addr
) == IPV6_ADDR_ANY
) {
3215 err
= pick_local_ip6addrs(dev
, cm_id
);
3221 PDBG("%s saddr %pI6 sport 0x%x raddr %pI6 rport 0x%x\n",
3222 __func__
, laddr6
->sin6_addr
.s6_addr
,
3223 ntohs(laddr6
->sin6_port
),
3224 raddr6
->sin6_addr
.s6_addr
, ntohs(raddr6
->sin6_port
));
3225 ep
->dst
= find_route6(dev
, laddr6
->sin6_addr
.s6_addr
,
3226 raddr6
->sin6_addr
.s6_addr
,
3227 laddr6
->sin6_port
, raddr6
->sin6_port
, 0,
3228 raddr6
->sin6_scope_id
);
3231 printk(KERN_ERR MOD
"%s - cannot find route.\n", __func__
);
3232 err
= -EHOSTUNREACH
;
3236 err
= import_ep(ep
, iptype
, ra
, ep
->dst
, ep
->com
.dev
, true,
3237 ep
->com
.dev
->rdev
.lldi
.adapter_type
);
3239 printk(KERN_ERR MOD
"%s - cannot alloc l2e.\n", __func__
);
3243 PDBG("%s txq_idx %u tx_chan %u smac_idx %u rss_qid %u l2t_idx %u\n",
3244 __func__
, ep
->txq_idx
, ep
->tx_chan
, ep
->smac_idx
, ep
->rss_qid
,
3247 state_set(&ep
->com
, CONNECTING
);
3250 /* send connect request to rnic */
3251 err
= send_connect(ep
);
3255 cxgb4_l2t_release(ep
->l2t
);
3257 dst_release(ep
->dst
);
3259 remove_handle(ep
->com
.dev
, &ep
->com
.dev
->atid_idr
, ep
->atid
);
3260 cxgb4_free_atid(ep
->com
.dev
->rdev
.lldi
.tids
, ep
->atid
);
3262 cm_id
->rem_ref(cm_id
);
3263 c4iw_put_ep(&ep
->com
);
3268 static int create_server6(struct c4iw_dev
*dev
, struct c4iw_listen_ep
*ep
)
3271 struct sockaddr_in6
*sin6
= (struct sockaddr_in6
*)
3272 &ep
->com
.mapped_local_addr
;
3274 c4iw_init_wr_wait(&ep
->com
.wr_wait
);
3275 err
= cxgb4_create_server6(ep
->com
.dev
->rdev
.lldi
.ports
[0],
3276 ep
->stid
, &sin6
->sin6_addr
,
3278 ep
->com
.dev
->rdev
.lldi
.rxq_ids
[0]);
3280 err
= c4iw_wait_for_reply(&ep
->com
.dev
->rdev
,
3284 err
= net_xmit_errno(err
);
3286 pr_err("cxgb4_create_server6/filter failed err %d stid %d laddr %pI6 lport %d\n",
3288 sin6
->sin6_addr
.s6_addr
, ntohs(sin6
->sin6_port
));
3290 cxgb4_clip_get(ep
->com
.dev
->rdev
.lldi
.ports
[0],
3291 (const u32
*)&sin6
->sin6_addr
.s6_addr
, 1);
3295 static int create_server4(struct c4iw_dev
*dev
, struct c4iw_listen_ep
*ep
)
3298 struct sockaddr_in
*sin
= (struct sockaddr_in
*)
3299 &ep
->com
.mapped_local_addr
;
3301 if (dev
->rdev
.lldi
.enable_fw_ofld_conn
) {
3303 err
= cxgb4_create_server_filter(
3304 ep
->com
.dev
->rdev
.lldi
.ports
[0], ep
->stid
,
3305 sin
->sin_addr
.s_addr
, sin
->sin_port
, 0,
3306 ep
->com
.dev
->rdev
.lldi
.rxq_ids
[0], 0, 0);
3307 if (err
== -EBUSY
) {
3308 if (c4iw_fatal_error(&ep
->com
.dev
->rdev
)) {
3312 set_current_state(TASK_UNINTERRUPTIBLE
);
3313 schedule_timeout(usecs_to_jiffies(100));
3315 } while (err
== -EBUSY
);
3317 c4iw_init_wr_wait(&ep
->com
.wr_wait
);
3318 err
= cxgb4_create_server(ep
->com
.dev
->rdev
.lldi
.ports
[0],
3319 ep
->stid
, sin
->sin_addr
.s_addr
, sin
->sin_port
,
3320 0, ep
->com
.dev
->rdev
.lldi
.rxq_ids
[0]);
3322 err
= c4iw_wait_for_reply(&ep
->com
.dev
->rdev
,
3326 err
= net_xmit_errno(err
);
3329 pr_err("cxgb4_create_server/filter failed err %d stid %d laddr %pI4 lport %d\n"
3331 &sin
->sin_addr
, ntohs(sin
->sin_port
));
3335 int c4iw_create_listen(struct iw_cm_id
*cm_id
, int backlog
)
3338 struct c4iw_dev
*dev
= to_c4iw_dev(cm_id
->device
);
3339 struct c4iw_listen_ep
*ep
;
3340 struct iwpm_dev_data pm_reg_msg
;
3341 struct iwpm_sa_data pm_msg
;
3346 ep
= alloc_ep(sizeof(*ep
), GFP_KERNEL
);
3348 printk(KERN_ERR MOD
"%s - cannot alloc ep.\n", __func__
);
3352 PDBG("%s ep %p\n", __func__
, ep
);
3353 cm_id
->add_ref(cm_id
);
3354 ep
->com
.cm_id
= cm_id
;
3356 ep
->backlog
= backlog
;
3357 memcpy(&ep
->com
.local_addr
, &cm_id
->local_addr
,
3358 sizeof(ep
->com
.local_addr
));
3361 * Allocate a server TID.
3363 if (dev
->rdev
.lldi
.enable_fw_ofld_conn
&&
3364 ep
->com
.local_addr
.ss_family
== AF_INET
)
3365 ep
->stid
= cxgb4_alloc_sftid(dev
->rdev
.lldi
.tids
,
3366 cm_id
->local_addr
.ss_family
, ep
);
3368 ep
->stid
= cxgb4_alloc_stid(dev
->rdev
.lldi
.tids
,
3369 cm_id
->local_addr
.ss_family
, ep
);
3371 if (ep
->stid
== -1) {
3372 printk(KERN_ERR MOD
"%s - cannot alloc stid.\n", __func__
);
3376 insert_handle(dev
, &dev
->stid_idr
, ep
, ep
->stid
);
3378 /* No port mapper available, go with the specified info */
3379 memcpy(&ep
->com
.mapped_local_addr
, &cm_id
->local_addr
,
3380 sizeof(ep
->com
.mapped_local_addr
));
3382 c4iw_form_reg_msg(dev
, &pm_reg_msg
);
3383 iwpm_err
= iwpm_register_pid(&pm_reg_msg
, RDMA_NL_C4IW
);
3385 PDBG("%s: Port Mapper reg pid fail (err = %d).\n",
3386 __func__
, iwpm_err
);
3388 if (iwpm_valid_pid() && !iwpm_err
) {
3389 memcpy(&pm_msg
.loc_addr
, &ep
->com
.local_addr
,
3390 sizeof(ep
->com
.local_addr
));
3391 iwpm_err
= iwpm_add_mapping(&pm_msg
, RDMA_NL_C4IW
);
3393 PDBG("%s: Port Mapper query fail (err = %d).\n",
3394 __func__
, iwpm_err
);
3396 memcpy(&ep
->com
.mapped_local_addr
,
3397 &pm_msg
.mapped_loc_addr
,
3398 sizeof(ep
->com
.mapped_local_addr
));
3400 if (iwpm_create_mapinfo(&ep
->com
.local_addr
,
3401 &ep
->com
.mapped_local_addr
, RDMA_NL_C4IW
)) {
3405 print_addr(&ep
->com
, __func__
, "add_mapping/create_mapinfo");
3407 set_bit(RELEASE_MAPINFO
, &ep
->com
.flags
);
3408 state_set(&ep
->com
, LISTEN
);
3409 if (ep
->com
.local_addr
.ss_family
== AF_INET
)
3410 err
= create_server4(dev
, ep
);
3412 err
= create_server6(dev
, ep
);
3414 cm_id
->provider_data
= ep
;
3419 cxgb4_free_stid(ep
->com
.dev
->rdev
.lldi
.tids
, ep
->stid
,
3420 ep
->com
.local_addr
.ss_family
);
3422 cm_id
->rem_ref(cm_id
);
3423 c4iw_put_ep(&ep
->com
);
3429 int c4iw_destroy_listen(struct iw_cm_id
*cm_id
)
3432 struct c4iw_listen_ep
*ep
= to_listen_ep(cm_id
);
3434 PDBG("%s ep %p\n", __func__
, ep
);
3437 state_set(&ep
->com
, DEAD
);
3438 if (ep
->com
.dev
->rdev
.lldi
.enable_fw_ofld_conn
&&
3439 ep
->com
.local_addr
.ss_family
== AF_INET
) {
3440 err
= cxgb4_remove_server_filter(
3441 ep
->com
.dev
->rdev
.lldi
.ports
[0], ep
->stid
,
3442 ep
->com
.dev
->rdev
.lldi
.rxq_ids
[0], 0);
3444 struct sockaddr_in6
*sin6
;
3445 c4iw_init_wr_wait(&ep
->com
.wr_wait
);
3446 err
= cxgb4_remove_server(
3447 ep
->com
.dev
->rdev
.lldi
.ports
[0], ep
->stid
,
3448 ep
->com
.dev
->rdev
.lldi
.rxq_ids
[0], 0);
3451 err
= c4iw_wait_for_reply(&ep
->com
.dev
->rdev
, &ep
->com
.wr_wait
,
3453 sin6
= (struct sockaddr_in6
*)&ep
->com
.mapped_local_addr
;
3454 cxgb4_clip_release(ep
->com
.dev
->rdev
.lldi
.ports
[0],
3455 (const u32
*)&sin6
->sin6_addr
.s6_addr
, 1);
3457 remove_handle(ep
->com
.dev
, &ep
->com
.dev
->stid_idr
, ep
->stid
);
3458 cxgb4_free_stid(ep
->com
.dev
->rdev
.lldi
.tids
, ep
->stid
,
3459 ep
->com
.local_addr
.ss_family
);
3461 cm_id
->rem_ref(cm_id
);
3462 c4iw_put_ep(&ep
->com
);
3466 int c4iw_ep_disconnect(struct c4iw_ep
*ep
, int abrupt
, gfp_t gfp
)
3471 struct c4iw_rdev
*rdev
;
3473 mutex_lock(&ep
->com
.mutex
);
3475 PDBG("%s ep %p state %s, abrupt %d\n", __func__
, ep
,
3476 states
[ep
->com
.state
], abrupt
);
3478 rdev
= &ep
->com
.dev
->rdev
;
3479 if (c4iw_fatal_error(rdev
)) {
3481 close_complete_upcall(ep
, -EIO
);
3482 ep
->com
.state
= DEAD
;
3484 switch (ep
->com
.state
) {
3492 ep
->com
.state
= ABORTING
;
3494 ep
->com
.state
= CLOSING
;
3497 set_bit(CLOSE_SENT
, &ep
->com
.flags
);
3500 if (!test_and_set_bit(CLOSE_SENT
, &ep
->com
.flags
)) {
3503 (void)stop_ep_timer(ep
);
3504 ep
->com
.state
= ABORTING
;
3506 ep
->com
.state
= MORIBUND
;
3512 PDBG("%s ignoring disconnect ep %p state %u\n",
3513 __func__
, ep
, ep
->com
.state
);
3522 set_bit(EP_DISC_ABORT
, &ep
->com
.history
);
3523 close_complete_upcall(ep
, -ECONNRESET
);
3524 ret
= send_abort(ep
, NULL
, gfp
);
3526 set_bit(EP_DISC_CLOSE
, &ep
->com
.history
);
3527 ret
= send_halfclose(ep
, gfp
);
3532 mutex_unlock(&ep
->com
.mutex
);
3534 release_ep_resources(ep
);
3538 static void active_ofld_conn_reply(struct c4iw_dev
*dev
, struct sk_buff
*skb
,
3539 struct cpl_fw6_msg_ofld_connection_wr_rpl
*req
)
3542 int atid
= be32_to_cpu(req
->tid
);
3544 ep
= (struct c4iw_ep
*)lookup_atid(dev
->rdev
.lldi
.tids
,
3545 (__force u32
) req
->tid
);
3549 switch (req
->retval
) {
3551 set_bit(ACT_RETRY_NOMEM
, &ep
->com
.history
);
3552 if (ep
->retry_count
++ < ACT_OPEN_RETRY_COUNT
) {
3553 send_fw_act_open_req(ep
, atid
);
3557 set_bit(ACT_RETRY_INUSE
, &ep
->com
.history
);
3558 if (ep
->retry_count
++ < ACT_OPEN_RETRY_COUNT
) {
3559 send_fw_act_open_req(ep
, atid
);
3564 pr_info("%s unexpected ofld conn wr retval %d\n",
3565 __func__
, req
->retval
);
3568 pr_err("active ofld_connect_wr failure %d atid %d\n",
3570 mutex_lock(&dev
->rdev
.stats
.lock
);
3571 dev
->rdev
.stats
.act_ofld_conn_fails
++;
3572 mutex_unlock(&dev
->rdev
.stats
.lock
);
3573 connect_reply_upcall(ep
, status2errno(req
->retval
));
3574 state_set(&ep
->com
, DEAD
);
3575 if (ep
->com
.remote_addr
.ss_family
== AF_INET6
) {
3576 struct sockaddr_in6
*sin6
=
3577 (struct sockaddr_in6
*)&ep
->com
.mapped_local_addr
;
3578 cxgb4_clip_release(ep
->com
.dev
->rdev
.lldi
.ports
[0],
3579 (const u32
*)&sin6
->sin6_addr
.s6_addr
, 1);
3581 remove_handle(dev
, &dev
->atid_idr
, atid
);
3582 cxgb4_free_atid(dev
->rdev
.lldi
.tids
, atid
);
3583 dst_release(ep
->dst
);
3584 cxgb4_l2t_release(ep
->l2t
);
3585 c4iw_put_ep(&ep
->com
);
3588 static void passive_ofld_conn_reply(struct c4iw_dev
*dev
, struct sk_buff
*skb
,
3589 struct cpl_fw6_msg_ofld_connection_wr_rpl
*req
)
3591 struct sk_buff
*rpl_skb
;
3592 struct cpl_pass_accept_req
*cpl
;
3595 rpl_skb
= (struct sk_buff
*)(unsigned long)req
->cookie
;
3598 PDBG("%s passive open failure %d\n", __func__
, req
->retval
);
3599 mutex_lock(&dev
->rdev
.stats
.lock
);
3600 dev
->rdev
.stats
.pas_ofld_conn_fails
++;
3601 mutex_unlock(&dev
->rdev
.stats
.lock
);
3604 cpl
= (struct cpl_pass_accept_req
*)cplhdr(rpl_skb
);
3605 OPCODE_TID(cpl
) = htonl(MK_OPCODE_TID(CPL_PASS_ACCEPT_REQ
,
3606 (__force u32
) htonl(
3607 (__force u32
) req
->tid
)));
3608 ret
= pass_accept_req(dev
, rpl_skb
);
3615 static int deferred_fw6_msg(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
3617 struct cpl_fw6_msg
*rpl
= cplhdr(skb
);
3618 struct cpl_fw6_msg_ofld_connection_wr_rpl
*req
;
3620 switch (rpl
->type
) {
3622 c4iw_ev_dispatch(dev
, (struct t4_cqe
*)&rpl
->data
[0]);
3624 case FW6_TYPE_OFLD_CONNECTION_WR_RPL
:
3625 req
= (struct cpl_fw6_msg_ofld_connection_wr_rpl
*)rpl
->data
;
3626 switch (req
->t_state
) {
3628 active_ofld_conn_reply(dev
, skb
, req
);
3631 passive_ofld_conn_reply(dev
, skb
, req
);
3634 pr_err("%s unexpected ofld conn wr state %d\n",
3635 __func__
, req
->t_state
);
3643 static void build_cpl_pass_accept_req(struct sk_buff
*skb
, int stid
, u8 tos
)
3646 __be16 hdr_len
, vlantag
, len
;
3648 int tcp_hdr_len
, ip_hdr_len
;
3650 struct cpl_rx_pkt
*cpl
= cplhdr(skb
);
3651 struct cpl_pass_accept_req
*req
;
3652 struct tcp_options_received tmp_opt
;
3653 struct c4iw_dev
*dev
;
3654 enum chip_type type
;
3656 dev
= *((struct c4iw_dev
**) (skb
->cb
+ sizeof(void *)));
3657 /* Store values from cpl_rx_pkt in temporary location. */
3658 vlantag
= cpl
->vlan
;
3660 l2info
= cpl
->l2info
;
3661 hdr_len
= cpl
->hdr_len
;
3664 __skb_pull(skb
, sizeof(*req
) + sizeof(struct rss_header
));
3667 * We need to parse the TCP options from SYN packet.
3668 * to generate cpl_pass_accept_req.
3670 memset(&tmp_opt
, 0, sizeof(tmp_opt
));
3671 tcp_clear_options(&tmp_opt
);
3672 tcp_parse_options(skb
, &tmp_opt
, 0, NULL
);
3674 req
= (struct cpl_pass_accept_req
*)__skb_push(skb
, sizeof(*req
));
3675 memset(req
, 0, sizeof(*req
));
3676 req
->l2info
= cpu_to_be16(SYN_INTF_V(intf
) |
3677 SYN_MAC_IDX_V(RX_MACIDX_G(
3678 be32_to_cpu(l2info
))) |
3680 type
= dev
->rdev
.lldi
.adapter_type
;
3681 tcp_hdr_len
= RX_TCPHDR_LEN_G(be16_to_cpu(hdr_len
));
3682 ip_hdr_len
= RX_IPHDR_LEN_G(be16_to_cpu(hdr_len
));
3684 cpu_to_be32(SYN_RX_CHAN_V(RX_CHAN_G(be32_to_cpu(l2info
))));
3685 if (CHELSIO_CHIP_VERSION(type
) <= CHELSIO_T5
) {
3686 eth_hdr_len
= is_t4(type
) ?
3687 RX_ETHHDR_LEN_G(be32_to_cpu(l2info
)) :
3688 RX_T5_ETHHDR_LEN_G(be32_to_cpu(l2info
));
3689 req
->hdr_len
|= cpu_to_be32(TCP_HDR_LEN_V(tcp_hdr_len
) |
3690 IP_HDR_LEN_V(ip_hdr_len
) |
3691 ETH_HDR_LEN_V(eth_hdr_len
));
3692 } else { /* T6 and later */
3693 eth_hdr_len
= RX_T6_ETHHDR_LEN_G(be32_to_cpu(l2info
));
3694 req
->hdr_len
|= cpu_to_be32(T6_TCP_HDR_LEN_V(tcp_hdr_len
) |
3695 T6_IP_HDR_LEN_V(ip_hdr_len
) |
3696 T6_ETH_HDR_LEN_V(eth_hdr_len
));
3698 req
->vlan
= vlantag
;
3700 req
->tos_stid
= cpu_to_be32(PASS_OPEN_TID_V(stid
) |
3701 PASS_OPEN_TOS_V(tos
));
3702 req
->tcpopt
.mss
= htons(tmp_opt
.mss_clamp
);
3703 if (tmp_opt
.wscale_ok
)
3704 req
->tcpopt
.wsf
= tmp_opt
.snd_wscale
;
3705 req
->tcpopt
.tstamp
= tmp_opt
.saw_tstamp
;
3706 if (tmp_opt
.sack_ok
)
3707 req
->tcpopt
.sack
= 1;
3708 OPCODE_TID(req
) = htonl(MK_OPCODE_TID(CPL_PASS_ACCEPT_REQ
, 0));
3712 static void send_fw_pass_open_req(struct c4iw_dev
*dev
, struct sk_buff
*skb
,
3713 __be32 laddr
, __be16 lport
,
3714 __be32 raddr
, __be16 rport
,
3715 u32 rcv_isn
, u32 filter
, u16 window
,
3716 u32 rss_qid
, u8 port_id
)
3718 struct sk_buff
*req_skb
;
3719 struct fw_ofld_connection_wr
*req
;
3720 struct cpl_pass_accept_req
*cpl
= cplhdr(skb
);
3723 req_skb
= alloc_skb(sizeof(struct fw_ofld_connection_wr
), GFP_KERNEL
);
3724 req
= (struct fw_ofld_connection_wr
*)__skb_put(req_skb
, sizeof(*req
));
3725 memset(req
, 0, sizeof(*req
));
3726 req
->op_compl
= htonl(WR_OP_V(FW_OFLD_CONNECTION_WR
) | FW_WR_COMPL_F
);
3727 req
->len16_pkd
= htonl(FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*req
), 16)));
3728 req
->le
.version_cpl
= htonl(FW_OFLD_CONNECTION_WR_CPL_F
);
3729 req
->le
.filter
= (__force __be32
) filter
;
3730 req
->le
.lport
= lport
;
3731 req
->le
.pport
= rport
;
3732 req
->le
.u
.ipv4
.lip
= laddr
;
3733 req
->le
.u
.ipv4
.pip
= raddr
;
3734 req
->tcb
.rcv_nxt
= htonl(rcv_isn
+ 1);
3735 req
->tcb
.rcv_adv
= htons(window
);
3736 req
->tcb
.t_state_to_astid
=
3737 htonl(FW_OFLD_CONNECTION_WR_T_STATE_V(TCP_SYN_RECV
) |
3738 FW_OFLD_CONNECTION_WR_RCV_SCALE_V(cpl
->tcpopt
.wsf
) |
3739 FW_OFLD_CONNECTION_WR_ASTID_V(
3740 PASS_OPEN_TID_G(ntohl(cpl
->tos_stid
))));
3743 * We store the qid in opt2 which will be used by the firmware
3744 * to send us the wr response.
3746 req
->tcb
.opt2
= htonl(RSS_QUEUE_V(rss_qid
));
3749 * We initialize the MSS index in TCB to 0xF.
3750 * So that when driver sends cpl_pass_accept_rpl
3751 * TCB picks up the correct value. If this was 0
3752 * TP will ignore any value > 0 for MSS index.
3754 req
->tcb
.opt0
= cpu_to_be64(MSS_IDX_V(0xF));
3755 req
->cookie
= (uintptr_t)skb
;
3757 set_wr_txq(req_skb
, CPL_PRIORITY_CONTROL
, port_id
);
3758 ret
= cxgb4_ofld_send(dev
->rdev
.lldi
.ports
[0], req_skb
);
3760 pr_err("%s - cxgb4_ofld_send error %d - dropping\n", __func__
,
3768 * Handler for CPL_RX_PKT message. Need to handle cpl_rx_pkt
3769 * messages when a filter is being used instead of server to
3770 * redirect a syn packet. When packets hit filter they are redirected
3771 * to the offload queue and driver tries to establish the connection
3772 * using firmware work request.
3774 static int rx_pkt(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
3777 unsigned int filter
;
3778 struct ethhdr
*eh
= NULL
;
3779 struct vlan_ethhdr
*vlan_eh
= NULL
;
3781 struct tcphdr
*tcph
;
3782 struct rss_header
*rss
= (void *)skb
->data
;
3783 struct cpl_rx_pkt
*cpl
= (void *)skb
->data
;
3784 struct cpl_pass_accept_req
*req
= (void *)(rss
+ 1);
3785 struct l2t_entry
*e
;
3786 struct dst_entry
*dst
;
3787 struct c4iw_ep
*lep
;
3789 struct port_info
*pi
;
3790 struct net_device
*pdev
;
3791 u16 rss_qid
, eth_hdr_len
;
3794 struct neighbour
*neigh
;
3796 /* Drop all non-SYN packets */
3797 if (!(cpl
->l2info
& cpu_to_be32(RXF_SYN_F
)))
3801 * Drop all packets which did not hit the filter.
3802 * Unlikely to happen.
3804 if (!(rss
->filter_hit
&& rss
->filter_tid
))
3808 * Calculate the server tid from filter hit index from cpl_rx_pkt.
3810 stid
= (__force
int) cpu_to_be32((__force u32
) rss
->hash_val
);
3812 lep
= (struct c4iw_ep
*)lookup_stid(dev
->rdev
.lldi
.tids
, stid
);
3814 PDBG("%s connect request on invalid stid %d\n", __func__
, stid
);
3818 switch (CHELSIO_CHIP_VERSION(dev
->rdev
.lldi
.adapter_type
)) {
3820 eth_hdr_len
= RX_ETHHDR_LEN_G(be32_to_cpu(cpl
->l2info
));
3823 eth_hdr_len
= RX_T5_ETHHDR_LEN_G(be32_to_cpu(cpl
->l2info
));
3826 eth_hdr_len
= RX_T6_ETHHDR_LEN_G(be32_to_cpu(cpl
->l2info
));
3829 pr_err("T%d Chip is not supported\n",
3830 CHELSIO_CHIP_VERSION(dev
->rdev
.lldi
.adapter_type
));
3834 if (eth_hdr_len
== ETH_HLEN
) {
3835 eh
= (struct ethhdr
*)(req
+ 1);
3836 iph
= (struct iphdr
*)(eh
+ 1);
3838 vlan_eh
= (struct vlan_ethhdr
*)(req
+ 1);
3839 iph
= (struct iphdr
*)(vlan_eh
+ 1);
3840 skb
->vlan_tci
= ntohs(cpl
->vlan
);
3843 if (iph
->version
!= 0x4)
3846 tcph
= (struct tcphdr
*)(iph
+ 1);
3847 skb_set_network_header(skb
, (void *)iph
- (void *)rss
);
3848 skb_set_transport_header(skb
, (void *)tcph
- (void *)rss
);
3851 PDBG("%s lip 0x%x lport %u pip 0x%x pport %u tos %d\n", __func__
,
3852 ntohl(iph
->daddr
), ntohs(tcph
->dest
), ntohl(iph
->saddr
),
3853 ntohs(tcph
->source
), iph
->tos
);
3855 dst
= find_route(dev
, iph
->daddr
, iph
->saddr
, tcph
->dest
, tcph
->source
,
3858 pr_err("%s - failed to find dst entry!\n",
3862 neigh
= dst_neigh_lookup_skb(dst
, skb
);
3865 pr_err("%s - failed to allocate neigh!\n",
3870 if (neigh
->dev
->flags
& IFF_LOOPBACK
) {
3871 pdev
= ip_dev_find(&init_net
, iph
->daddr
);
3872 e
= cxgb4_l2t_get(dev
->rdev
.lldi
.l2t
, neigh
,
3874 pi
= (struct port_info
*)netdev_priv(pdev
);
3875 tx_chan
= cxgb4_port_chan(pdev
);
3878 pdev
= get_real_dev(neigh
->dev
);
3879 e
= cxgb4_l2t_get(dev
->rdev
.lldi
.l2t
, neigh
,
3881 pi
= (struct port_info
*)netdev_priv(pdev
);
3882 tx_chan
= cxgb4_port_chan(pdev
);
3884 neigh_release(neigh
);
3886 pr_err("%s - failed to allocate l2t entry!\n",
3891 step
= dev
->rdev
.lldi
.nrxq
/ dev
->rdev
.lldi
.nchan
;
3892 rss_qid
= dev
->rdev
.lldi
.rxq_ids
[pi
->port_id
* step
];
3893 window
= (__force u16
) htons((__force u16
)tcph
->window
);
3895 /* Calcuate filter portion for LE region. */
3896 filter
= (__force
unsigned int) cpu_to_be32(cxgb4_select_ntuple(
3897 dev
->rdev
.lldi
.ports
[0],
3901 * Synthesize the cpl_pass_accept_req. We have everything except the
3902 * TID. Once firmware sends a reply with TID we update the TID field
3903 * in cpl and pass it through the regular cpl_pass_accept_req path.
3905 build_cpl_pass_accept_req(skb
, stid
, iph
->tos
);
3906 send_fw_pass_open_req(dev
, skb
, iph
->daddr
, tcph
->dest
, iph
->saddr
,
3907 tcph
->source
, ntohl(tcph
->seq
), filter
, window
,
3908 rss_qid
, pi
->port_id
);
3909 cxgb4_l2t_release(e
);
3917 * These are the real handlers that are called from a
3920 static c4iw_handler_func work_handlers
[NUM_CPL_CMDS
] = {
3921 [CPL_ACT_ESTABLISH
] = act_establish
,
3922 [CPL_ACT_OPEN_RPL
] = act_open_rpl
,
3923 [CPL_RX_DATA
] = rx_data
,
3924 [CPL_ABORT_RPL_RSS
] = abort_rpl
,
3925 [CPL_ABORT_RPL
] = abort_rpl
,
3926 [CPL_PASS_OPEN_RPL
] = pass_open_rpl
,
3927 [CPL_CLOSE_LISTSRV_RPL
] = close_listsrv_rpl
,
3928 [CPL_PASS_ACCEPT_REQ
] = pass_accept_req
,
3929 [CPL_PASS_ESTABLISH
] = pass_establish
,
3930 [CPL_PEER_CLOSE
] = peer_close
,
3931 [CPL_ABORT_REQ_RSS
] = peer_abort
,
3932 [CPL_CLOSE_CON_RPL
] = close_con_rpl
,
3933 [CPL_RDMA_TERMINATE
] = terminate
,
3934 [CPL_FW4_ACK
] = fw4_ack
,
3935 [CPL_FW6_MSG
] = deferred_fw6_msg
,
3936 [CPL_RX_PKT
] = rx_pkt
3939 static void process_timeout(struct c4iw_ep
*ep
)
3941 struct c4iw_qp_attributes attrs
;
3944 mutex_lock(&ep
->com
.mutex
);
3945 PDBG("%s ep %p tid %u state %d\n", __func__
, ep
, ep
->hwtid
,
3947 set_bit(TIMEDOUT
, &ep
->com
.history
);
3948 switch (ep
->com
.state
) {
3950 __state_set(&ep
->com
, ABORTING
);
3951 connect_reply_upcall(ep
, -ETIMEDOUT
);
3954 __state_set(&ep
->com
, ABORTING
);
3958 if (ep
->com
.cm_id
&& ep
->com
.qp
) {
3959 attrs
.next_state
= C4IW_QP_STATE_ERROR
;
3960 c4iw_modify_qp(ep
->com
.qp
->rhp
,
3961 ep
->com
.qp
, C4IW_QP_ATTR_NEXT_STATE
,
3964 __state_set(&ep
->com
, ABORTING
);
3965 close_complete_upcall(ep
, -ETIMEDOUT
);
3971 * These states are expected if the ep timed out at the same
3972 * time as another thread was calling stop_ep_timer().
3973 * So we silently do nothing for these states.
3978 WARN(1, "%s unexpected state ep %p tid %u state %u\n",
3979 __func__
, ep
, ep
->hwtid
, ep
->com
.state
);
3983 abort_connection(ep
, NULL
, GFP_KERNEL
);
3984 mutex_unlock(&ep
->com
.mutex
);
3985 c4iw_put_ep(&ep
->com
);
3988 static void process_timedout_eps(void)
3992 spin_lock_irq(&timeout_lock
);
3993 while (!list_empty(&timeout_list
)) {
3994 struct list_head
*tmp
;
3996 tmp
= timeout_list
.next
;
4000 spin_unlock_irq(&timeout_lock
);
4001 ep
= list_entry(tmp
, struct c4iw_ep
, entry
);
4002 process_timeout(ep
);
4003 spin_lock_irq(&timeout_lock
);
4005 spin_unlock_irq(&timeout_lock
);
4008 static void process_work(struct work_struct
*work
)
4010 struct sk_buff
*skb
= NULL
;
4011 struct c4iw_dev
*dev
;
4012 struct cpl_act_establish
*rpl
;
4013 unsigned int opcode
;
4016 process_timedout_eps();
4017 while ((skb
= skb_dequeue(&rxq
))) {
4019 dev
= *((struct c4iw_dev
**) (skb
->cb
+ sizeof(void *)));
4020 opcode
= rpl
->ot
.opcode
;
4022 BUG_ON(!work_handlers
[opcode
]);
4023 ret
= work_handlers
[opcode
](dev
, skb
);
4026 process_timedout_eps();
4030 static DECLARE_WORK(skb_work
, process_work
);
4032 static void ep_timeout(unsigned long arg
)
4034 struct c4iw_ep
*ep
= (struct c4iw_ep
*)arg
;
4037 spin_lock(&timeout_lock
);
4038 if (!test_and_set_bit(TIMEOUT
, &ep
->com
.flags
)) {
4040 * Only insert if it is not already on the list.
4042 if (!ep
->entry
.next
) {
4043 list_add_tail(&ep
->entry
, &timeout_list
);
4047 spin_unlock(&timeout_lock
);
4049 queue_work(workq
, &skb_work
);
4053 * All the CM events are handled on a work queue to have a safe context.
4055 static int sched(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
4059 * Save dev in the skb->cb area.
4061 *((struct c4iw_dev
**) (skb
->cb
+ sizeof(void *))) = dev
;
4064 * Queue the skb and schedule the worker thread.
4066 skb_queue_tail(&rxq
, skb
);
4067 queue_work(workq
, &skb_work
);
4071 static int set_tcb_rpl(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
4073 struct cpl_set_tcb_rpl
*rpl
= cplhdr(skb
);
4075 if (rpl
->status
!= CPL_ERR_NONE
) {
4076 printk(KERN_ERR MOD
"Unexpected SET_TCB_RPL status %u "
4077 "for tid %u\n", rpl
->status
, GET_TID(rpl
));
4083 static int fw6_msg(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
4085 struct cpl_fw6_msg
*rpl
= cplhdr(skb
);
4086 struct c4iw_wr_wait
*wr_waitp
;
4089 PDBG("%s type %u\n", __func__
, rpl
->type
);
4091 switch (rpl
->type
) {
4092 case FW6_TYPE_WR_RPL
:
4093 ret
= (int)((be64_to_cpu(rpl
->data
[0]) >> 8) & 0xff);
4094 wr_waitp
= (struct c4iw_wr_wait
*)(__force
unsigned long) rpl
->data
[1];
4095 PDBG("%s wr_waitp %p ret %u\n", __func__
, wr_waitp
, ret
);
4097 c4iw_wake_up(wr_waitp
, ret
? -ret
: 0);
4101 case FW6_TYPE_OFLD_CONNECTION_WR_RPL
:
4105 printk(KERN_ERR MOD
"%s unexpected fw6 msg type %u\n", __func__
,
4113 static int peer_abort_intr(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
4115 struct cpl_abort_req_rss
*req
= cplhdr(skb
);
4117 struct tid_info
*t
= dev
->rdev
.lldi
.tids
;
4118 unsigned int tid
= GET_TID(req
);
4120 ep
= lookup_tid(t
, tid
);
4122 printk(KERN_WARNING MOD
4123 "Abort on non-existent endpoint, tid %d\n", tid
);
4127 if (is_neg_adv(req
->status
)) {
4128 PDBG("%s Negative advice on abort- tid %u status %d (%s)\n",
4129 __func__
, ep
->hwtid
, req
->status
,
4130 neg_adv_str(req
->status
));
4131 ep
->stats
.abort_neg_adv
++;
4132 dev
->rdev
.stats
.neg_adv
++;
4136 PDBG("%s ep %p tid %u state %u\n", __func__
, ep
, ep
->hwtid
,
4140 * Wake up any threads in rdma_init() or rdma_fini().
4141 * However, if we are on MPAv2 and want to retry with MPAv1
4142 * then, don't wake up yet.
4144 if (mpa_rev
== 2 && !ep
->tried_with_mpa_v1
) {
4145 if (ep
->com
.state
!= MPA_REQ_SENT
)
4146 c4iw_wake_up(&ep
->com
.wr_wait
, -ECONNRESET
);
4148 c4iw_wake_up(&ep
->com
.wr_wait
, -ECONNRESET
);
4154 * Most upcalls from the T4 Core go to sched() to
4155 * schedule the processing on a work queue.
4157 c4iw_handler_func c4iw_handlers
[NUM_CPL_CMDS
] = {
4158 [CPL_ACT_ESTABLISH
] = sched
,
4159 [CPL_ACT_OPEN_RPL
] = sched
,
4160 [CPL_RX_DATA
] = sched
,
4161 [CPL_ABORT_RPL_RSS
] = sched
,
4162 [CPL_ABORT_RPL
] = sched
,
4163 [CPL_PASS_OPEN_RPL
] = sched
,
4164 [CPL_CLOSE_LISTSRV_RPL
] = sched
,
4165 [CPL_PASS_ACCEPT_REQ
] = sched
,
4166 [CPL_PASS_ESTABLISH
] = sched
,
4167 [CPL_PEER_CLOSE
] = sched
,
4168 [CPL_CLOSE_CON_RPL
] = sched
,
4169 [CPL_ABORT_REQ_RSS
] = peer_abort_intr
,
4170 [CPL_RDMA_TERMINATE
] = sched
,
4171 [CPL_FW4_ACK
] = sched
,
4172 [CPL_SET_TCB_RPL
] = set_tcb_rpl
,
4173 [CPL_FW6_MSG
] = fw6_msg
,
4174 [CPL_RX_PKT
] = sched
4177 int __init
c4iw_cm_init(void)
4179 spin_lock_init(&timeout_lock
);
4180 skb_queue_head_init(&rxq
);
4182 workq
= create_singlethread_workqueue("iw_cxgb4");
4189 void c4iw_cm_term(void)
4191 WARN_ON(!list_empty(&timeout_list
));
4192 flush_workqueue(workq
);
4193 destroy_workqueue(workq
);