2 * cxgb3i_offload.c: Chelsio S3xx iscsi offloaded tcp connection management
4 * Copyright (C) 2003-2008 Chelsio Communications. All rights reserved.
6 * This program is distributed in the hope that it will be useful, but WITHOUT
7 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
8 * FITNESS FOR A PARTICULAR PURPOSE. See the LICENSE file included in this
9 * release for licensing terms and conditions.
11 * Written by: Dimitris Michailidis (dm@chelsio.com)
12 * Karen Xie (kxie@chelsio.com)
15 #include <linux/if_vlan.h>
16 #include <linux/slab.h>
17 #include <linux/version.h>
19 #include "cxgb3_defs.h"
20 #include "cxgb3_ctl_defs.h"
21 #include "firmware_exports.h"
22 #include "cxgb3i_offload.h"
23 #include "cxgb3i_pdu.h"
24 #include "cxgb3i_ddp.h"
26 #ifdef __DEBUG_C3CN_CONN__
27 #define c3cn_conn_debug cxgb3i_log_debug
29 #define c3cn_conn_debug(fmt...)
32 #ifdef __DEBUG_C3CN_TX__
33 #define c3cn_tx_debug cxgb3i_log_debug
35 #define c3cn_tx_debug(fmt...)
38 #ifdef __DEBUG_C3CN_RX__
39 #define c3cn_rx_debug cxgb3i_log_debug
41 #define c3cn_rx_debug(fmt...)
45 * module parameters releated to offloaded iscsi connection
47 static int cxgb3_rcv_win
= 256 * 1024;
48 module_param(cxgb3_rcv_win
, int, 0644);
49 MODULE_PARM_DESC(cxgb3_rcv_win
, "TCP receive window in bytes (default=256KB)");
51 static int cxgb3_snd_win
= 128 * 1024;
52 module_param(cxgb3_snd_win
, int, 0644);
53 MODULE_PARM_DESC(cxgb3_snd_win
, "TCP send window in bytes (default=128KB)");
55 static int cxgb3_rx_credit_thres
= 10 * 1024;
56 module_param(cxgb3_rx_credit_thres
, int, 0644);
57 MODULE_PARM_DESC(rx_credit_thres
,
58 "RX credits return threshold in bytes (default=10KB)");
60 static unsigned int cxgb3_max_connect
= 8 * 1024;
61 module_param(cxgb3_max_connect
, uint
, 0644);
62 MODULE_PARM_DESC(cxgb3_max_connect
, "Max. # of connections (default=8092)");
64 static unsigned int cxgb3_sport_base
= 20000;
65 module_param(cxgb3_sport_base
, uint
, 0644);
66 MODULE_PARM_DESC(cxgb3_sport_base
, "starting port number (default=20000)");
69 * cxgb3i tcp connection data(per adapter) list
71 static LIST_HEAD(cdata_list
);
72 static DEFINE_RWLOCK(cdata_rwlock
);
74 static int c3cn_push_tx_frames(struct s3_conn
*c3cn
, int req_completion
);
75 static void c3cn_release_offload_resources(struct s3_conn
*c3cn
);
78 * iscsi source port management
80 * Find a free source port in the port allocation map. We use a very simple
81 * rotor scheme to look for the next free port.
83 * If a source port has been specified make sure that it doesn't collide with
84 * our normal source port allocation map. If it's outside the range of our
85 * allocation/deallocation scheme just let them use it.
87 * If the source port is outside our allocation range, the caller is
88 * responsible for keeping track of their port usage.
90 static int c3cn_get_port(struct s3_conn
*c3cn
, struct cxgb3i_sdev_data
*cdata
)
98 if (c3cn
->saddr
.sin_port
) {
99 cxgb3i_log_error("connect, sin_port NON-ZERO %u.\n",
100 c3cn
->saddr
.sin_port
);
104 spin_lock_bh(&cdata
->lock
);
105 start
= idx
= cdata
->sport_next
;
107 if (++idx
>= cxgb3_max_connect
)
109 if (!cdata
->sport_conn
[idx
]) {
110 c3cn
->saddr
.sin_port
= htons(cxgb3_sport_base
+ idx
);
111 cdata
->sport_next
= idx
;
112 cdata
->sport_conn
[idx
] = c3cn
;
113 spin_unlock_bh(&cdata
->lock
);
115 c3cn_conn_debug("%s reserve port %u.\n",
117 cxgb3_sport_base
+ idx
);
120 } while (idx
!= start
);
121 spin_unlock_bh(&cdata
->lock
);
124 return -EADDRNOTAVAIL
;
127 static void c3cn_put_port(struct s3_conn
*c3cn
)
132 if (c3cn
->saddr
.sin_port
) {
133 struct cxgb3i_sdev_data
*cdata
= CXGB3_SDEV_DATA(c3cn
->cdev
);
134 int idx
= ntohs(c3cn
->saddr
.sin_port
) - cxgb3_sport_base
;
136 c3cn
->saddr
.sin_port
= 0;
137 if (idx
< 0 || idx
>= cxgb3_max_connect
)
139 spin_lock_bh(&cdata
->lock
);
140 cdata
->sport_conn
[idx
] = NULL
;
141 spin_unlock_bh(&cdata
->lock
);
142 c3cn_conn_debug("%s, release port %u.\n",
143 cdata
->cdev
->name
, cxgb3_sport_base
+ idx
);
147 static inline void c3cn_set_flag(struct s3_conn
*c3cn
, enum c3cn_flags flag
)
149 __set_bit(flag
, &c3cn
->flags
);
150 c3cn_conn_debug("c3cn 0x%p, set %d, s %u, f 0x%lx.\n",
151 c3cn
, flag
, c3cn
->state
, c3cn
->flags
);
154 static inline void c3cn_clear_flag(struct s3_conn
*c3cn
, enum c3cn_flags flag
)
156 __clear_bit(flag
, &c3cn
->flags
);
157 c3cn_conn_debug("c3cn 0x%p, clear %d, s %u, f 0x%lx.\n",
158 c3cn
, flag
, c3cn
->state
, c3cn
->flags
);
161 static inline int c3cn_flag(struct s3_conn
*c3cn
, enum c3cn_flags flag
)
165 return test_bit(flag
, &c3cn
->flags
);
168 static void c3cn_set_state(struct s3_conn
*c3cn
, int state
)
170 c3cn_conn_debug("c3cn 0x%p state -> %u.\n", c3cn
, state
);
174 static inline void c3cn_hold(struct s3_conn
*c3cn
)
176 atomic_inc(&c3cn
->refcnt
);
179 static inline void c3cn_put(struct s3_conn
*c3cn
)
181 if (atomic_dec_and_test(&c3cn
->refcnt
)) {
182 c3cn_conn_debug("free c3cn 0x%p, s %u, f 0x%lx.\n",
183 c3cn
, c3cn
->state
, c3cn
->flags
);
188 static void c3cn_closed(struct s3_conn
*c3cn
)
190 c3cn_conn_debug("c3cn 0x%p, state %u, flag 0x%lx.\n",
191 c3cn
, c3cn
->state
, c3cn
->flags
);
194 c3cn_release_offload_resources(c3cn
);
195 c3cn_set_state(c3cn
, C3CN_STATE_CLOSED
);
196 cxgb3i_conn_closing(c3cn
);
200 * CPL (Chelsio Protocol Language) defines a message passing interface between
201 * the host driver and T3 asic.
202 * The section below implments CPLs that related to iscsi tcp connection
203 * open/close/abort and data send/receive.
207 * CPL connection active open request: host ->
209 static unsigned int find_best_mtu(const struct t3c_data
*d
, unsigned short mtu
)
213 while (i
< d
->nmtus
- 1 && d
->mtus
[i
+ 1] <= mtu
)
218 static unsigned int select_mss(struct s3_conn
*c3cn
, unsigned int pmtu
)
221 struct dst_entry
*dst
= c3cn
->dst_cache
;
222 struct t3cdev
*cdev
= c3cn
->cdev
;
223 const struct t3c_data
*td
= T3C_DATA(cdev
);
224 u16 advmss
= dst_metric(dst
, RTAX_ADVMSS
);
226 if (advmss
> pmtu
- 40)
228 if (advmss
< td
->mtus
[0] - 40)
229 advmss
= td
->mtus
[0] - 40;
230 idx
= find_best_mtu(td
, advmss
+ 40);
234 static inline int compute_wscale(int win
)
237 while (wscale
< 14 && (65535<<wscale
) < win
)
242 static inline unsigned int calc_opt0h(struct s3_conn
*c3cn
)
244 int wscale
= compute_wscale(cxgb3_rcv_win
);
245 return V_KEEP_ALIVE(1) |
247 V_WND_SCALE(wscale
) |
248 V_MSS_IDX(c3cn
->mss_idx
);
251 static inline unsigned int calc_opt0l(struct s3_conn
*c3cn
)
253 return V_ULP_MODE(ULP_MODE_ISCSI
) |
254 V_RCV_BUFSIZ(cxgb3_rcv_win
>>10);
257 static void make_act_open_req(struct s3_conn
*c3cn
, struct sk_buff
*skb
,
258 unsigned int atid
, const struct l2t_entry
*e
)
260 struct cpl_act_open_req
*req
;
262 c3cn_conn_debug("c3cn 0x%p, atid 0x%x.\n", c3cn
, atid
);
264 skb
->priority
= CPL_PRIORITY_SETUP
;
265 req
= (struct cpl_act_open_req
*)__skb_put(skb
, sizeof(*req
));
266 req
->wr
.wr_hi
= htonl(V_WR_OP(FW_WROPCODE_FORWARD
));
267 OPCODE_TID(req
) = htonl(MK_OPCODE_TID(CPL_ACT_OPEN_REQ
, atid
));
268 req
->local_port
= c3cn
->saddr
.sin_port
;
269 req
->peer_port
= c3cn
->daddr
.sin_port
;
270 req
->local_ip
= c3cn
->saddr
.sin_addr
.s_addr
;
271 req
->peer_ip
= c3cn
->daddr
.sin_addr
.s_addr
;
272 req
->opt0h
= htonl(calc_opt0h(c3cn
) | V_L2T_IDX(e
->idx
) |
273 V_TX_CHANNEL(e
->smt_idx
));
274 req
->opt0l
= htonl(calc_opt0l(c3cn
));
278 static void fail_act_open(struct s3_conn
*c3cn
, int errno
)
280 c3cn_conn_debug("c3cn 0x%p, state %u, flag 0x%lx.\n",
281 c3cn
, c3cn
->state
, c3cn
->flags
);
286 static void act_open_req_arp_failure(struct t3cdev
*dev
, struct sk_buff
*skb
)
288 struct s3_conn
*c3cn
= (struct s3_conn
*)skb
->sk
;
290 c3cn_conn_debug("c3cn 0x%p, state %u.\n", c3cn
, c3cn
->state
);
293 spin_lock_bh(&c3cn
->lock
);
294 if (c3cn
->state
== C3CN_STATE_CONNECTING
)
295 fail_act_open(c3cn
, -EHOSTUNREACH
);
296 spin_unlock_bh(&c3cn
->lock
);
302 * CPL connection close request: host ->
304 * Close a connection by sending a CPL_CLOSE_CON_REQ message and queue it to
305 * the write queue (i.e., after any unsent txt data).
307 static void skb_entail(struct s3_conn
*c3cn
, struct sk_buff
*skb
,
310 skb_tcp_seq(skb
) = c3cn
->write_seq
;
311 skb_flags(skb
) = flags
;
312 __skb_queue_tail(&c3cn
->write_queue
, skb
);
315 static void send_close_req(struct s3_conn
*c3cn
)
317 struct sk_buff
*skb
= c3cn
->cpl_close
;
318 struct cpl_close_con_req
*req
= (struct cpl_close_con_req
*)skb
->head
;
319 unsigned int tid
= c3cn
->tid
;
321 c3cn_conn_debug("c3cn 0x%p, state 0x%x, flag 0x%lx.\n",
322 c3cn
, c3cn
->state
, c3cn
->flags
);
324 c3cn
->cpl_close
= NULL
;
326 req
->wr
.wr_hi
= htonl(V_WR_OP(FW_WROPCODE_OFLD_CLOSE_CON
));
327 req
->wr
.wr_lo
= htonl(V_WR_TID(tid
));
328 OPCODE_TID(req
) = htonl(MK_OPCODE_TID(CPL_CLOSE_CON_REQ
, tid
));
329 req
->rsvd
= htonl(c3cn
->write_seq
);
331 skb_entail(c3cn
, skb
, C3CB_FLAG_NO_APPEND
);
332 if (c3cn
->state
!= C3CN_STATE_CONNECTING
)
333 c3cn_push_tx_frames(c3cn
, 1);
337 * CPL connection abort request: host ->
339 * Send an ABORT_REQ message. Makes sure we do not send multiple ABORT_REQs
340 * for the same connection and also that we do not try to send a message
341 * after the connection has closed.
343 static void abort_arp_failure(struct t3cdev
*cdev
, struct sk_buff
*skb
)
345 struct cpl_abort_req
*req
= cplhdr(skb
);
347 c3cn_conn_debug("tdev 0x%p.\n", cdev
);
349 req
->cmd
= CPL_ABORT_NO_RST
;
350 cxgb3_ofld_send(cdev
, skb
);
353 static inline void c3cn_purge_write_queue(struct s3_conn
*c3cn
)
357 while ((skb
= __skb_dequeue(&c3cn
->write_queue
)))
361 static void send_abort_req(struct s3_conn
*c3cn
)
363 struct sk_buff
*skb
= c3cn
->cpl_abort_req
;
364 struct cpl_abort_req
*req
;
365 unsigned int tid
= c3cn
->tid
;
367 if (unlikely(c3cn
->state
== C3CN_STATE_ABORTING
) || !skb
||
371 c3cn_set_state(c3cn
, C3CN_STATE_ABORTING
);
373 c3cn_conn_debug("c3cn 0x%p, flag ABORT_RPL + ABORT_SHUT.\n", c3cn
);
375 c3cn_set_flag(c3cn
, C3CN_ABORT_RPL_PENDING
);
377 /* Purge the send queue so we don't send anything after an abort. */
378 c3cn_purge_write_queue(c3cn
);
380 c3cn
->cpl_abort_req
= NULL
;
381 req
= (struct cpl_abort_req
*)skb
->head
;
383 skb
->priority
= CPL_PRIORITY_DATA
;
384 set_arp_failure_handler(skb
, abort_arp_failure
);
386 req
->wr
.wr_hi
= htonl(V_WR_OP(FW_WROPCODE_OFLD_HOST_ABORT_CON_REQ
));
387 req
->wr
.wr_lo
= htonl(V_WR_TID(tid
));
388 OPCODE_TID(req
) = htonl(MK_OPCODE_TID(CPL_ABORT_REQ
, tid
));
389 req
->rsvd0
= htonl(c3cn
->snd_nxt
);
390 req
->rsvd1
= !c3cn_flag(c3cn
, C3CN_TX_DATA_SENT
);
391 req
->cmd
= CPL_ABORT_SEND_RST
;
393 l2t_send(c3cn
->cdev
, skb
, c3cn
->l2t
);
397 * CPL connection abort reply: host ->
399 * Send an ABORT_RPL message in response of the ABORT_REQ received.
401 static void send_abort_rpl(struct s3_conn
*c3cn
, int rst_status
)
403 struct sk_buff
*skb
= c3cn
->cpl_abort_rpl
;
404 struct cpl_abort_rpl
*rpl
= (struct cpl_abort_rpl
*)skb
->head
;
406 c3cn
->cpl_abort_rpl
= NULL
;
408 skb
->priority
= CPL_PRIORITY_DATA
;
409 rpl
->wr
.wr_hi
= htonl(V_WR_OP(FW_WROPCODE_OFLD_HOST_ABORT_CON_RPL
));
410 rpl
->wr
.wr_lo
= htonl(V_WR_TID(c3cn
->tid
));
411 OPCODE_TID(rpl
) = htonl(MK_OPCODE_TID(CPL_ABORT_RPL
, c3cn
->tid
));
412 rpl
->cmd
= rst_status
;
414 cxgb3_ofld_send(c3cn
->cdev
, skb
);
418 * CPL connection rx data ack: host ->
419 * Send RX credits through an RX_DATA_ACK CPL message. Returns the number of
422 static u32
send_rx_credits(struct s3_conn
*c3cn
, u32 credits
, u32 dack
)
425 struct cpl_rx_data_ack
*req
;
427 skb
= alloc_skb(sizeof(*req
), GFP_ATOMIC
);
431 req
= (struct cpl_rx_data_ack
*)__skb_put(skb
, sizeof(*req
));
432 req
->wr
.wr_hi
= htonl(V_WR_OP(FW_WROPCODE_FORWARD
));
433 OPCODE_TID(req
) = htonl(MK_OPCODE_TID(CPL_RX_DATA_ACK
, c3cn
->tid
));
434 req
->credit_dack
= htonl(dack
| V_RX_CREDITS(credits
));
435 skb
->priority
= CPL_PRIORITY_ACK
;
436 cxgb3_ofld_send(c3cn
->cdev
, skb
);
441 * CPL connection tx data: host ->
443 * Send iscsi PDU via TX_DATA CPL message. Returns the number of
445 * Each TX_DATA consumes work request credit (wrs), so we need to keep track of
446 * how many we've used so far and how many are pending (i.e., yet ack'ed by T3).
450 * For ULP connections HW may inserts digest bytes into the pdu. Those digest
451 * bytes are not sent by the host but are part of the TCP payload and therefore
452 * consume TCP sequence space.
454 static const unsigned int cxgb3_ulp_extra_len
[] = { 0, 4, 4, 8 };
455 static inline unsigned int ulp_extra_len(const struct sk_buff
*skb
)
457 return cxgb3_ulp_extra_len
[skb_ulp_mode(skb
) & 3];
460 static unsigned int wrlen __read_mostly
;
463 * The number of WRs needed for an skb depends on the number of fragments
464 * in the skb and whether it has any payload in its main body. This maps the
465 * length of the gather list represented by an skb into the # of necessary WRs.
466 * The extra two fragments are for iscsi bhs and payload padding.
468 #define SKB_WR_LIST_SIZE (MAX_SKB_FRAGS + 2)
469 static unsigned int skb_wrs
[SKB_WR_LIST_SIZE
] __read_mostly
;
471 static void s3_init_wr_tab(unsigned int wr_len
)
475 if (skb_wrs
[1]) /* already initialized */
478 for (i
= 1; i
< SKB_WR_LIST_SIZE
; i
++) {
479 int sgl_len
= (3 * i
) / 2 + (i
& 1);
482 skb_wrs
[i
] = (sgl_len
<= wr_len
483 ? 1 : 1 + (sgl_len
- 2) / (wr_len
- 1));
489 static inline void reset_wr_list(struct s3_conn
*c3cn
)
491 c3cn
->wr_pending_head
= c3cn
->wr_pending_tail
= NULL
;
495 * Add a WR to a connections's list of pending WRs. This is a singly-linked
496 * list of sk_buffs operating as a FIFO. The head is kept in wr_pending_head
497 * and the tail in wr_pending_tail.
499 static inline void enqueue_wr(struct s3_conn
*c3cn
,
502 skb_tx_wr_next(skb
) = NULL
;
505 * We want to take an extra reference since both us and the driver
506 * need to free the packet before it's really freed. We know there's
507 * just one user currently so we use atomic_set rather than skb_get
508 * to avoid the atomic op.
510 atomic_set(&skb
->users
, 2);
512 if (!c3cn
->wr_pending_head
)
513 c3cn
->wr_pending_head
= skb
;
515 skb_tx_wr_next(c3cn
->wr_pending_tail
) = skb
;
516 c3cn
->wr_pending_tail
= skb
;
519 static int count_pending_wrs(struct s3_conn
*c3cn
)
522 const struct sk_buff
*skb
= c3cn
->wr_pending_head
;
526 skb
= skb_tx_wr_next(skb
);
531 static inline struct sk_buff
*peek_wr(const struct s3_conn
*c3cn
)
533 return c3cn
->wr_pending_head
;
536 static inline void free_wr_skb(struct sk_buff
*skb
)
541 static inline struct sk_buff
*dequeue_wr(struct s3_conn
*c3cn
)
543 struct sk_buff
*skb
= c3cn
->wr_pending_head
;
546 /* Don't bother clearing the tail */
547 c3cn
->wr_pending_head
= skb_tx_wr_next(skb
);
548 skb_tx_wr_next(skb
) = NULL
;
553 static void purge_wr_queue(struct s3_conn
*c3cn
)
556 while ((skb
= dequeue_wr(c3cn
)) != NULL
)
560 static inline void make_tx_data_wr(struct s3_conn
*c3cn
, struct sk_buff
*skb
,
561 int len
, int req_completion
)
563 struct tx_data_wr
*req
;
565 skb_reset_transport_header(skb
);
566 req
= (struct tx_data_wr
*)__skb_push(skb
, sizeof(*req
));
567 req
->wr_hi
= htonl(V_WR_OP(FW_WROPCODE_OFLD_TX_DATA
) |
568 (req_completion
? F_WR_COMPL
: 0));
569 req
->wr_lo
= htonl(V_WR_TID(c3cn
->tid
));
570 req
->sndseq
= htonl(c3cn
->snd_nxt
);
571 /* len includes the length of any HW ULP additions */
572 req
->len
= htonl(len
);
573 req
->param
= htonl(V_TX_PORT(c3cn
->l2t
->smt_idx
));
574 /* V_TX_ULP_SUBMODE sets both the mode and submode */
575 req
->flags
= htonl(V_TX_ULP_SUBMODE(skb_ulp_mode(skb
)) |
576 V_TX_SHOVE((skb_peek(&c3cn
->write_queue
) ? 0 : 1)));
578 if (!c3cn_flag(c3cn
, C3CN_TX_DATA_SENT
)) {
579 req
->flags
|= htonl(V_TX_ACK_PAGES(2) | F_TX_INIT
|
580 V_TX_CPU_IDX(c3cn
->qset
));
581 /* Sendbuffer is in units of 32KB. */
582 req
->param
|= htonl(V_TX_SNDBUF(cxgb3_snd_win
>> 15));
583 c3cn_set_flag(c3cn
, C3CN_TX_DATA_SENT
);
588 * c3cn_push_tx_frames -- start transmit
589 * @c3cn: the offloaded connection
590 * @req_completion: request wr_ack or not
592 * Prepends TX_DATA_WR or CPL_CLOSE_CON_REQ headers to buffers waiting in a
593 * connection's send queue and sends them on to T3. Must be called with the
594 * connection's lock held. Returns the amount of send buffer space that was
595 * freed as a result of sending queued data to T3.
597 static void arp_failure_discard(struct t3cdev
*cdev
, struct sk_buff
*skb
)
602 static int c3cn_push_tx_frames(struct s3_conn
*c3cn
, int req_completion
)
607 struct cxgb3i_sdev_data
*cdata
;
609 if (unlikely(c3cn
->state
== C3CN_STATE_CONNECTING
||
610 c3cn
->state
== C3CN_STATE_CLOSE_WAIT_1
||
611 c3cn
->state
>= C3CN_STATE_ABORTING
)) {
612 c3cn_tx_debug("c3cn 0x%p, in closing state %u.\n",
618 cdata
= CXGB3_SDEV_DATA(cdev
);
620 while (c3cn
->wr_avail
621 && (skb
= skb_peek(&c3cn
->write_queue
)) != NULL
) {
622 int len
= skb
->len
; /* length before skb_push */
623 int frags
= skb_shinfo(skb
)->nr_frags
+ (len
!= skb
->data_len
);
624 int wrs_needed
= skb_wrs
[frags
];
626 if (wrs_needed
> 1 && len
+ sizeof(struct tx_data_wr
) <= wrlen
)
629 WARN_ON(frags
>= SKB_WR_LIST_SIZE
|| wrs_needed
< 1);
631 if (c3cn
->wr_avail
< wrs_needed
) {
632 c3cn_tx_debug("c3cn 0x%p, skb len %u/%u, frag %u, "
634 c3cn
, skb
->len
, skb
->data_len
, frags
,
635 wrs_needed
, c3cn
->wr_avail
);
639 __skb_unlink(skb
, &c3cn
->write_queue
);
640 skb
->priority
= CPL_PRIORITY_DATA
;
641 skb
->csum
= wrs_needed
; /* remember this until the WR_ACK */
642 c3cn
->wr_avail
-= wrs_needed
;
643 c3cn
->wr_unacked
+= wrs_needed
;
644 enqueue_wr(c3cn
, skb
);
646 c3cn_tx_debug("c3cn 0x%p, enqueue, skb len %u/%u, frag %u, "
647 "wr %d, left %u, unack %u.\n",
648 c3cn
, skb
->len
, skb
->data_len
, frags
,
649 wrs_needed
, c3cn
->wr_avail
, c3cn
->wr_unacked
);
652 if (likely(skb_flags(skb
) & C3CB_FLAG_NEED_HDR
)) {
653 if ((req_completion
&&
654 c3cn
->wr_unacked
== wrs_needed
) ||
655 (skb_flags(skb
) & C3CB_FLAG_COMPL
) ||
656 c3cn
->wr_unacked
>= c3cn
->wr_max
/ 2) {
658 c3cn
->wr_unacked
= 0;
660 len
+= ulp_extra_len(skb
);
661 make_tx_data_wr(c3cn
, skb
, len
, req_completion
);
662 c3cn
->snd_nxt
+= len
;
663 skb_flags(skb
) &= ~C3CB_FLAG_NEED_HDR
;
666 total_size
+= skb
->truesize
;
667 set_arp_failure_handler(skb
, arp_failure_discard
);
668 l2t_send(cdev
, skb
, c3cn
->l2t
);
674 * process_cpl_msg: -> host
675 * Top-level CPL message processing used by most CPL messages that
676 * pertain to connections.
678 static inline void process_cpl_msg(void (*fn
)(struct s3_conn
*,
680 struct s3_conn
*c3cn
,
683 spin_lock_bh(&c3cn
->lock
);
685 spin_unlock_bh(&c3cn
->lock
);
689 * process_cpl_msg_ref: -> host
690 * Similar to process_cpl_msg() but takes an extra connection reference around
691 * the call to the handler. Should be used if the handler may drop a
692 * connection reference.
694 static inline void process_cpl_msg_ref(void (*fn
) (struct s3_conn
*,
696 struct s3_conn
*c3cn
,
700 process_cpl_msg(fn
, c3cn
, skb
);
705 * Process a CPL_ACT_ESTABLISH message: -> host
706 * Updates connection state from an active establish CPL message. Runs with
707 * the connection lock held.
710 static inline void s3_free_atid(struct t3cdev
*cdev
, unsigned int tid
)
712 struct s3_conn
*c3cn
= cxgb3_free_atid(cdev
, tid
);
717 static void c3cn_established(struct s3_conn
*c3cn
, u32 snd_isn
,
720 c3cn_conn_debug("c3cn 0x%p, state %u.\n", c3cn
, c3cn
->state
);
722 c3cn
->write_seq
= c3cn
->snd_nxt
= c3cn
->snd_una
= snd_isn
;
725 * Causes the first RX_DATA_ACK to supply any Rx credits we couldn't
728 if (cxgb3_rcv_win
> (M_RCV_BUFSIZ
<< 10))
729 c3cn
->rcv_wup
-= cxgb3_rcv_win
- (M_RCV_BUFSIZ
<< 10);
731 dst_confirm(c3cn
->dst_cache
);
735 c3cn_set_state(c3cn
, C3CN_STATE_ESTABLISHED
);
738 static void process_act_establish(struct s3_conn
*c3cn
, struct sk_buff
*skb
)
740 struct cpl_act_establish
*req
= cplhdr(skb
);
741 u32 rcv_isn
= ntohl(req
->rcv_isn
); /* real RCV_ISN + 1 */
743 c3cn_conn_debug("c3cn 0x%p, state %u, flag 0x%lx.\n",
744 c3cn
, c3cn
->state
, c3cn
->flags
);
746 if (unlikely(c3cn
->state
!= C3CN_STATE_CONNECTING
))
747 cxgb3i_log_error("TID %u expected SYN_SENT, got EST., s %u\n",
748 c3cn
->tid
, c3cn
->state
);
750 c3cn
->copied_seq
= c3cn
->rcv_wup
= c3cn
->rcv_nxt
= rcv_isn
;
751 c3cn_established(c3cn
, ntohl(req
->snd_isn
), ntohs(req
->tcp_opt
));
755 if (unlikely(c3cn_flag(c3cn
, C3CN_ACTIVE_CLOSE_NEEDED
)))
756 /* upper layer has requested closing */
757 send_abort_req(c3cn
);
759 if (skb_queue_len(&c3cn
->write_queue
))
760 c3cn_push_tx_frames(c3cn
, 1);
761 cxgb3i_conn_tx_open(c3cn
);
765 static int do_act_establish(struct t3cdev
*cdev
, struct sk_buff
*skb
,
768 struct cpl_act_establish
*req
= cplhdr(skb
);
769 unsigned int tid
= GET_TID(req
);
770 unsigned int atid
= G_PASS_OPEN_TID(ntohl(req
->tos_tid
));
771 struct s3_conn
*c3cn
= ctx
;
772 struct cxgb3i_sdev_data
*cdata
= CXGB3_SDEV_DATA(cdev
);
774 c3cn_conn_debug("rcv, tid 0x%x, c3cn 0x%p, s %u, f 0x%lx.\n",
775 tid
, c3cn
, c3cn
->state
, c3cn
->flags
);
779 cxgb3_insert_tid(cdata
->cdev
, cdata
->client
, c3cn
, tid
);
780 s3_free_atid(cdev
, atid
);
782 c3cn
->qset
= G_QNUM(ntohl(skb
->csum
));
784 process_cpl_msg(process_act_establish
, c3cn
, skb
);
789 * Process a CPL_ACT_OPEN_RPL message: -> host
790 * Handle active open failures.
792 static int act_open_rpl_status_to_errno(int status
)
795 case CPL_ERR_CONN_RESET
:
796 return -ECONNREFUSED
;
797 case CPL_ERR_ARP_MISS
:
798 return -EHOSTUNREACH
;
799 case CPL_ERR_CONN_TIMEDOUT
:
801 case CPL_ERR_TCAM_FULL
:
803 case CPL_ERR_CONN_EXIST
:
804 cxgb3i_log_error("ACTIVE_OPEN_RPL: 4-tuple in use\n");
811 static void act_open_retry_timer(unsigned long data
)
814 struct s3_conn
*c3cn
= (struct s3_conn
*)data
;
816 c3cn_conn_debug("c3cn 0x%p, state %u.\n", c3cn
, c3cn
->state
);
818 spin_lock_bh(&c3cn
->lock
);
819 skb
= alloc_skb(sizeof(struct cpl_act_open_req
), GFP_ATOMIC
);
821 fail_act_open(c3cn
, -ENOMEM
);
823 skb
->sk
= (struct sock
*)c3cn
;
824 set_arp_failure_handler(skb
, act_open_req_arp_failure
);
825 make_act_open_req(c3cn
, skb
, c3cn
->tid
, c3cn
->l2t
);
826 l2t_send(c3cn
->cdev
, skb
, c3cn
->l2t
);
828 spin_unlock_bh(&c3cn
->lock
);
832 static void process_act_open_rpl(struct s3_conn
*c3cn
, struct sk_buff
*skb
)
834 struct cpl_act_open_rpl
*rpl
= cplhdr(skb
);
836 c3cn_conn_debug("c3cn 0x%p, state %u, flag 0x%lx.\n",
837 c3cn
, c3cn
->state
, c3cn
->flags
);
839 if (rpl
->status
== CPL_ERR_CONN_EXIST
&&
840 c3cn
->retry_timer
.function
!= act_open_retry_timer
) {
841 c3cn
->retry_timer
.function
= act_open_retry_timer
;
842 if (!mod_timer(&c3cn
->retry_timer
, jiffies
+ HZ
/ 2))
845 fail_act_open(c3cn
, act_open_rpl_status_to_errno(rpl
->status
));
849 static int do_act_open_rpl(struct t3cdev
*cdev
, struct sk_buff
*skb
, void *ctx
)
851 struct s3_conn
*c3cn
= ctx
;
852 struct cpl_act_open_rpl
*rpl
= cplhdr(skb
);
854 c3cn_conn_debug("rcv, status 0x%x, c3cn 0x%p, s %u, f 0x%lx.\n",
855 rpl
->status
, c3cn
, c3cn
->state
, c3cn
->flags
);
857 if (rpl
->status
!= CPL_ERR_TCAM_FULL
&&
858 rpl
->status
!= CPL_ERR_CONN_EXIST
&&
859 rpl
->status
!= CPL_ERR_ARP_MISS
)
860 cxgb3_queue_tid_release(cdev
, GET_TID(rpl
));
862 process_cpl_msg_ref(process_act_open_rpl
, c3cn
, skb
);
867 * Process PEER_CLOSE CPL messages: -> host
870 static void process_peer_close(struct s3_conn
*c3cn
, struct sk_buff
*skb
)
872 c3cn_conn_debug("c3cn 0x%p, state %u, flag 0x%lx.\n",
873 c3cn
, c3cn
->state
, c3cn
->flags
);
875 if (c3cn_flag(c3cn
, C3CN_ABORT_RPL_PENDING
))
878 switch (c3cn
->state
) {
879 case C3CN_STATE_ESTABLISHED
:
880 c3cn_set_state(c3cn
, C3CN_STATE_PASSIVE_CLOSE
);
882 case C3CN_STATE_ACTIVE_CLOSE
:
883 c3cn_set_state(c3cn
, C3CN_STATE_CLOSE_WAIT_2
);
885 case C3CN_STATE_CLOSE_WAIT_1
:
888 case C3CN_STATE_ABORTING
:
891 cxgb3i_log_error("%s: peer close, TID %u in bad state %u\n",
892 c3cn
->cdev
->name
, c3cn
->tid
, c3cn
->state
);
895 cxgb3i_conn_closing(c3cn
);
900 static int do_peer_close(struct t3cdev
*cdev
, struct sk_buff
*skb
, void *ctx
)
902 struct s3_conn
*c3cn
= ctx
;
904 c3cn_conn_debug("rcv, c3cn 0x%p, s %u, f 0x%lx.\n",
905 c3cn
, c3cn
->state
, c3cn
->flags
);
906 process_cpl_msg_ref(process_peer_close
, c3cn
, skb
);
911 * Process CLOSE_CONN_RPL CPL message: -> host
912 * Process a peer ACK to our FIN.
914 static void process_close_con_rpl(struct s3_conn
*c3cn
, struct sk_buff
*skb
)
916 struct cpl_close_con_rpl
*rpl
= cplhdr(skb
);
918 c3cn_conn_debug("c3cn 0x%p, state %u, flag 0x%lx.\n",
919 c3cn
, c3cn
->state
, c3cn
->flags
);
921 c3cn
->snd_una
= ntohl(rpl
->snd_nxt
) - 1; /* exclude FIN */
923 if (c3cn_flag(c3cn
, C3CN_ABORT_RPL_PENDING
))
926 switch (c3cn
->state
) {
927 case C3CN_STATE_ACTIVE_CLOSE
:
928 c3cn_set_state(c3cn
, C3CN_STATE_CLOSE_WAIT_1
);
930 case C3CN_STATE_CLOSE_WAIT_1
:
931 case C3CN_STATE_CLOSE_WAIT_2
:
934 case C3CN_STATE_ABORTING
:
937 cxgb3i_log_error("%s: close_rpl, TID %u in bad state %u\n",
938 c3cn
->cdev
->name
, c3cn
->tid
, c3cn
->state
);
945 static int do_close_con_rpl(struct t3cdev
*cdev
, struct sk_buff
*skb
,
948 struct s3_conn
*c3cn
= ctx
;
950 c3cn_conn_debug("rcv, c3cn 0x%p, s %u, f 0x%lx.\n",
951 c3cn
, c3cn
->state
, c3cn
->flags
);
953 process_cpl_msg_ref(process_close_con_rpl
, c3cn
, skb
);
958 * Process ABORT_REQ_RSS CPL message: -> host
959 * Process abort requests. If we are waiting for an ABORT_RPL we ignore this
960 * request except that we need to reply to it.
963 static int abort_status_to_errno(struct s3_conn
*c3cn
, int abort_reason
,
966 switch (abort_reason
) {
967 case CPL_ERR_BAD_SYN
: /* fall through */
968 case CPL_ERR_CONN_RESET
:
969 return c3cn
->state
> C3CN_STATE_ESTABLISHED
?
970 -EPIPE
: -ECONNRESET
;
971 case CPL_ERR_XMIT_TIMEDOUT
:
972 case CPL_ERR_PERSIST_TIMEDOUT
:
973 case CPL_ERR_FINWAIT2_TIMEDOUT
:
974 case CPL_ERR_KEEPALIVE_TIMEDOUT
:
981 static void process_abort_req(struct s3_conn
*c3cn
, struct sk_buff
*skb
)
983 int rst_status
= CPL_ABORT_NO_RST
;
984 const struct cpl_abort_req_rss
*req
= cplhdr(skb
);
986 c3cn_conn_debug("c3cn 0x%p, state %u, flag 0x%lx.\n",
987 c3cn
, c3cn
->state
, c3cn
->flags
);
989 if (!c3cn_flag(c3cn
, C3CN_ABORT_REQ_RCVD
)) {
990 c3cn_set_flag(c3cn
, C3CN_ABORT_REQ_RCVD
);
991 c3cn_set_state(c3cn
, C3CN_STATE_ABORTING
);
996 c3cn_clear_flag(c3cn
, C3CN_ABORT_REQ_RCVD
);
997 send_abort_rpl(c3cn
, rst_status
);
999 if (!c3cn_flag(c3cn
, C3CN_ABORT_RPL_PENDING
)) {
1001 abort_status_to_errno(c3cn
, req
->status
, &rst_status
);
1006 static int do_abort_req(struct t3cdev
*cdev
, struct sk_buff
*skb
, void *ctx
)
1008 const struct cpl_abort_req_rss
*req
= cplhdr(skb
);
1009 struct s3_conn
*c3cn
= ctx
;
1011 c3cn_conn_debug("rcv, c3cn 0x%p, s 0x%x, f 0x%lx.\n",
1012 c3cn
, c3cn
->state
, c3cn
->flags
);
1014 if (req
->status
== CPL_ERR_RTX_NEG_ADVICE
||
1015 req
->status
== CPL_ERR_PERSIST_NEG_ADVICE
) {
1020 process_cpl_msg_ref(process_abort_req
, c3cn
, skb
);
1025 * Process ABORT_RPL_RSS CPL message: -> host
1026 * Process abort replies. We only process these messages if we anticipate
1027 * them as the coordination between SW and HW in this area is somewhat lacking
1028 * and sometimes we get ABORT_RPLs after we are done with the connection that
1029 * originated the ABORT_REQ.
1031 static void process_abort_rpl(struct s3_conn
*c3cn
, struct sk_buff
*skb
)
1033 c3cn_conn_debug("c3cn 0x%p, state %u, flag 0x%lx.\n",
1034 c3cn
, c3cn
->state
, c3cn
->flags
);
1036 if (c3cn_flag(c3cn
, C3CN_ABORT_RPL_PENDING
)) {
1037 if (!c3cn_flag(c3cn
, C3CN_ABORT_RPL_RCVD
))
1038 c3cn_set_flag(c3cn
, C3CN_ABORT_RPL_RCVD
);
1040 c3cn_clear_flag(c3cn
, C3CN_ABORT_RPL_RCVD
);
1041 c3cn_clear_flag(c3cn
, C3CN_ABORT_RPL_PENDING
);
1042 if (c3cn_flag(c3cn
, C3CN_ABORT_REQ_RCVD
))
1043 cxgb3i_log_error("%s tid %u, ABORT_RPL_RSS\n",
1044 c3cn
->cdev
->name
, c3cn
->tid
);
1051 static int do_abort_rpl(struct t3cdev
*cdev
, struct sk_buff
*skb
, void *ctx
)
1053 struct cpl_abort_rpl_rss
*rpl
= cplhdr(skb
);
1054 struct s3_conn
*c3cn
= ctx
;
1056 c3cn_conn_debug("rcv, status 0x%x, c3cn 0x%p, s %u, 0x%lx.\n",
1057 rpl
->status
, c3cn
, c3cn
? c3cn
->state
: 0,
1058 c3cn
? c3cn
->flags
: 0UL);
1061 * Ignore replies to post-close aborts indicating that the abort was
1062 * requested too late. These connections are terminated when we get
1063 * PEER_CLOSE or CLOSE_CON_RPL and by the time the abort_rpl_rss
1064 * arrives the TID is either no longer used or it has been recycled.
1066 if (rpl
->status
== CPL_ERR_ABORT_FAILED
)
1070 * Sometimes we've already closed the connection, e.g., a post-close
1071 * abort races with ABORT_REQ_RSS, the latter frees the connection
1072 * expecting the ABORT_REQ will fail with CPL_ERR_ABORT_FAILED,
1073 * but FW turns the ABORT_REQ into a regular one and so we get
1074 * ABORT_RPL_RSS with status 0 and no connection.
1079 process_cpl_msg_ref(process_abort_rpl
, c3cn
, skb
);
1088 * Process RX_ISCSI_HDR CPL message: -> host
1089 * Handle received PDUs, the payload could be DDP'ed. If not, the payload
1090 * follow after the bhs.
1092 static void process_rx_iscsi_hdr(struct s3_conn
*c3cn
, struct sk_buff
*skb
)
1094 struct cpl_iscsi_hdr
*hdr_cpl
= cplhdr(skb
);
1095 struct cpl_iscsi_hdr_norss data_cpl
;
1096 struct cpl_rx_data_ddp_norss ddp_cpl
;
1097 unsigned int hdr_len
, data_len
, status
;
1101 if (unlikely(c3cn
->state
>= C3CN_STATE_PASSIVE_CLOSE
)) {
1102 if (c3cn
->state
!= C3CN_STATE_ABORTING
)
1103 send_abort_req(c3cn
);
1108 skb_tcp_seq(skb
) = ntohl(hdr_cpl
->seq
);
1111 skb_reset_transport_header(skb
);
1112 __skb_pull(skb
, sizeof(struct cpl_iscsi_hdr
));
1114 len
= hdr_len
= ntohs(hdr_cpl
->len
);
1115 /* msg coalesce is off or not enough data received */
1116 if (skb
->len
<= hdr_len
) {
1117 cxgb3i_log_error("%s: TID %u, ISCSI_HDR, skb len %u < %u.\n",
1118 c3cn
->cdev
->name
, c3cn
->tid
,
1123 err
= skb_copy_bits(skb
, skb
->len
- sizeof(ddp_cpl
), &ddp_cpl
,
1128 skb_ulp_mode(skb
) = ULP2_FLAG_DATA_READY
;
1129 skb_rx_pdulen(skb
) = ntohs(ddp_cpl
.len
);
1130 skb_rx_ddigest(skb
) = ntohl(ddp_cpl
.ulp_crc
);
1131 status
= ntohl(ddp_cpl
.ddp_status
);
1133 c3cn_rx_debug("rx skb 0x%p, len %u, pdulen %u, ddp status 0x%x.\n",
1134 skb
, skb
->len
, skb_rx_pdulen(skb
), status
);
1136 if (status
& (1 << RX_DDP_STATUS_HCRC_SHIFT
))
1137 skb_ulp_mode(skb
) |= ULP2_FLAG_HCRC_ERROR
;
1138 if (status
& (1 << RX_DDP_STATUS_DCRC_SHIFT
))
1139 skb_ulp_mode(skb
) |= ULP2_FLAG_DCRC_ERROR
;
1140 if (status
& (1 << RX_DDP_STATUS_PAD_SHIFT
))
1141 skb_ulp_mode(skb
) |= ULP2_FLAG_PAD_ERROR
;
1143 if (skb
->len
> (hdr_len
+ sizeof(ddp_cpl
))) {
1144 err
= skb_copy_bits(skb
, hdr_len
, &data_cpl
, sizeof(data_cpl
));
1147 data_len
= ntohs(data_cpl
.len
);
1148 len
+= sizeof(data_cpl
) + data_len
;
1149 } else if (status
& (1 << RX_DDP_STATUS_DDP_SHIFT
))
1150 skb_ulp_mode(skb
) |= ULP2_FLAG_DATA_DDPED
;
1152 c3cn
->rcv_nxt
= ntohl(ddp_cpl
.seq
) + skb_rx_pdulen(skb
);
1153 __pskb_trim(skb
, len
);
1154 __skb_queue_tail(&c3cn
->receive_queue
, skb
);
1155 cxgb3i_conn_pdu_ready(c3cn
);
1160 send_abort_req(c3cn
);
1164 static int do_iscsi_hdr(struct t3cdev
*t3dev
, struct sk_buff
*skb
, void *ctx
)
1166 struct s3_conn
*c3cn
= ctx
;
1168 process_cpl_msg(process_rx_iscsi_hdr
, c3cn
, skb
);
1173 * Process TX_DATA_ACK CPL messages: -> host
1174 * Process an acknowledgment of WR completion. Advance snd_una and send the
1175 * next batch of work requests from the write queue.
1177 static void check_wr_invariants(struct s3_conn
*c3cn
)
1179 int pending
= count_pending_wrs(c3cn
);
1181 if (unlikely(c3cn
->wr_avail
+ pending
!= c3cn
->wr_max
))
1182 cxgb3i_log_error("TID %u: credit imbalance: avail %u, "
1183 "pending %u, total should be %u\n",
1184 c3cn
->tid
, c3cn
->wr_avail
, pending
,
1188 static void process_wr_ack(struct s3_conn
*c3cn
, struct sk_buff
*skb
)
1190 struct cpl_wr_ack
*hdr
= cplhdr(skb
);
1191 unsigned int credits
= ntohs(hdr
->credits
);
1192 u32 snd_una
= ntohl(hdr
->snd_una
);
1194 c3cn_tx_debug("%u WR credits, avail %u, unack %u, TID %u, state %u.\n",
1195 credits
, c3cn
->wr_avail
, c3cn
->wr_unacked
,
1196 c3cn
->tid
, c3cn
->state
);
1198 c3cn
->wr_avail
+= credits
;
1199 if (c3cn
->wr_unacked
> c3cn
->wr_max
- c3cn
->wr_avail
)
1200 c3cn
->wr_unacked
= c3cn
->wr_max
- c3cn
->wr_avail
;
1203 struct sk_buff
*p
= peek_wr(c3cn
);
1206 cxgb3i_log_error("%u WR_ACK credits for TID %u with "
1207 "nothing pending, state %u\n",
1208 credits
, c3cn
->tid
, c3cn
->state
);
1211 if (unlikely(credits
< p
->csum
)) {
1212 struct tx_data_wr
*w
= cplhdr(p
);
1213 cxgb3i_log_error("TID %u got %u WR credits need %u, "
1214 "len %u, main body %u, frags %u, "
1215 "seq # %u, ACK una %u, ACK nxt %u, "
1216 "WR_AVAIL %u, WRs pending %u\n",
1217 c3cn
->tid
, credits
, p
->csum
, p
->len
,
1218 p
->len
- p
->data_len
,
1219 skb_shinfo(p
)->nr_frags
,
1220 ntohl(w
->sndseq
), snd_una
,
1221 ntohl(hdr
->snd_nxt
), c3cn
->wr_avail
,
1222 count_pending_wrs(c3cn
) - credits
);
1232 check_wr_invariants(c3cn
);
1234 if (unlikely(before(snd_una
, c3cn
->snd_una
))) {
1235 cxgb3i_log_error("TID %u, unexpected sequence # %u in WR_ACK "
1237 c3cn
->tid
, snd_una
, c3cn
->snd_una
);
1241 if (c3cn
->snd_una
!= snd_una
) {
1242 c3cn
->snd_una
= snd_una
;
1243 dst_confirm(c3cn
->dst_cache
);
1246 if (skb_queue_len(&c3cn
->write_queue
)) {
1247 if (c3cn_push_tx_frames(c3cn
, 0))
1248 cxgb3i_conn_tx_open(c3cn
);
1250 cxgb3i_conn_tx_open(c3cn
);
1255 static int do_wr_ack(struct t3cdev
*cdev
, struct sk_buff
*skb
, void *ctx
)
1257 struct s3_conn
*c3cn
= ctx
;
1259 process_cpl_msg(process_wr_ack
, c3cn
, skb
);
1264 * for each connection, pre-allocate skbs needed for close/abort requests. So
1265 * that we can service the request right away.
1267 static void c3cn_free_cpl_skbs(struct s3_conn
*c3cn
)
1269 if (c3cn
->cpl_close
)
1270 kfree_skb(c3cn
->cpl_close
);
1271 if (c3cn
->cpl_abort_req
)
1272 kfree_skb(c3cn
->cpl_abort_req
);
1273 if (c3cn
->cpl_abort_rpl
)
1274 kfree_skb(c3cn
->cpl_abort_rpl
);
1277 static int c3cn_alloc_cpl_skbs(struct s3_conn
*c3cn
)
1279 c3cn
->cpl_close
= alloc_skb(sizeof(struct cpl_close_con_req
),
1281 if (!c3cn
->cpl_close
)
1283 skb_put(c3cn
->cpl_close
, sizeof(struct cpl_close_con_req
));
1285 c3cn
->cpl_abort_req
= alloc_skb(sizeof(struct cpl_abort_req
),
1287 if (!c3cn
->cpl_abort_req
)
1289 skb_put(c3cn
->cpl_abort_req
, sizeof(struct cpl_abort_req
));
1291 c3cn
->cpl_abort_rpl
= alloc_skb(sizeof(struct cpl_abort_rpl
),
1293 if (!c3cn
->cpl_abort_rpl
)
1295 skb_put(c3cn
->cpl_abort_rpl
, sizeof(struct cpl_abort_rpl
));
1300 c3cn_free_cpl_skbs(c3cn
);
1305 * c3cn_release_offload_resources - release offload resource
1306 * @c3cn: the offloaded iscsi tcp connection.
1307 * Release resources held by an offload connection (TID, L2T entry, etc.)
1309 static void c3cn_release_offload_resources(struct s3_conn
*c3cn
)
1311 struct t3cdev
*cdev
= c3cn
->cdev
;
1312 unsigned int tid
= c3cn
->tid
;
1315 c3cn_free_cpl_skbs(c3cn
);
1317 if (c3cn
->wr_avail
!= c3cn
->wr_max
) {
1318 purge_wr_queue(c3cn
);
1319 reset_wr_list(c3cn
);
1324 l2t_release(L2DATA(cdev
), c3cn
->l2t
);
1327 if (c3cn
->state
== C3CN_STATE_CONNECTING
)
1329 s3_free_atid(cdev
, tid
);
1332 cxgb3_remove_tid(cdev
, (void *)c3cn
, tid
);
1337 c3cn
->dst_cache
= NULL
;
1342 * cxgb3i_c3cn_create - allocate and initialize an s3_conn structure
1343 * returns the s3_conn structure allocated.
1345 struct s3_conn
*cxgb3i_c3cn_create(void)
1347 struct s3_conn
*c3cn
;
1349 c3cn
= kzalloc(sizeof(*c3cn
), GFP_KERNEL
);
1353 /* pre-allocate close/abort cpl, so we don't need to wait for memory
1354 when close/abort is requested. */
1355 if (c3cn_alloc_cpl_skbs(c3cn
) < 0)
1358 c3cn_conn_debug("alloc c3cn 0x%p.\n", c3cn
);
1361 spin_lock_init(&c3cn
->lock
);
1362 atomic_set(&c3cn
->refcnt
, 1);
1363 skb_queue_head_init(&c3cn
->receive_queue
);
1364 skb_queue_head_init(&c3cn
->write_queue
);
1365 setup_timer(&c3cn
->retry_timer
, NULL
, (unsigned long)c3cn
);
1366 rwlock_init(&c3cn
->callback_lock
);
1375 static void c3cn_active_close(struct s3_conn
*c3cn
)
1380 c3cn_conn_debug("c3cn 0x%p, state %u, flag 0x%lx.\n",
1381 c3cn
, c3cn
->state
, c3cn
->flags
);
1383 dst_confirm(c3cn
->dst_cache
);
1386 spin_lock_bh(&c3cn
->lock
);
1388 data_lost
= skb_queue_len(&c3cn
->receive_queue
);
1389 __skb_queue_purge(&c3cn
->receive_queue
);
1391 switch (c3cn
->state
) {
1392 case C3CN_STATE_CLOSED
:
1393 case C3CN_STATE_ACTIVE_CLOSE
:
1394 case C3CN_STATE_CLOSE_WAIT_1
:
1395 case C3CN_STATE_CLOSE_WAIT_2
:
1396 case C3CN_STATE_ABORTING
:
1397 /* nothing need to be done */
1399 case C3CN_STATE_CONNECTING
:
1400 /* defer until cpl_act_open_rpl or cpl_act_establish */
1401 c3cn_set_flag(c3cn
, C3CN_ACTIVE_CLOSE_NEEDED
);
1403 case C3CN_STATE_ESTABLISHED
:
1405 c3cn_set_state(c3cn
, C3CN_STATE_ACTIVE_CLOSE
);
1407 case C3CN_STATE_PASSIVE_CLOSE
:
1409 c3cn_set_state(c3cn
, C3CN_STATE_CLOSE_WAIT_2
);
1415 /* Unread data was tossed, zap the connection. */
1416 send_abort_req(c3cn
);
1418 send_close_req(c3cn
);
1421 spin_unlock_bh(&c3cn
->lock
);
1426 * cxgb3i_c3cn_release - close and release an iscsi tcp connection and any
1428 * @c3cn: the iscsi tcp connection
1430 void cxgb3i_c3cn_release(struct s3_conn
*c3cn
)
1432 c3cn_conn_debug("c3cn 0x%p, s %u, f 0x%lx.\n",
1433 c3cn
, c3cn
->state
, c3cn
->flags
);
1434 if (unlikely(c3cn
->state
== C3CN_STATE_CONNECTING
))
1435 c3cn_set_flag(c3cn
, C3CN_ACTIVE_CLOSE_NEEDED
);
1436 else if (likely(c3cn
->state
!= C3CN_STATE_CLOSED
))
1437 c3cn_active_close(c3cn
);
1441 static int is_cxgb3_dev(struct net_device
*dev
)
1443 struct cxgb3i_sdev_data
*cdata
;
1444 struct net_device
*ndev
= dev
;
1446 if (dev
->priv_flags
& IFF_802_1Q_VLAN
)
1447 ndev
= vlan_dev_real_dev(dev
);
1449 write_lock(&cdata_rwlock
);
1450 list_for_each_entry(cdata
, &cdata_list
, list
) {
1451 struct adap_ports
*ports
= &cdata
->ports
;
1454 for (i
= 0; i
< ports
->nports
; i
++)
1455 if (ndev
== ports
->lldevs
[i
]) {
1456 write_unlock(&cdata_rwlock
);
1460 write_unlock(&cdata_rwlock
);
1465 * cxgb3_egress_dev - return the cxgb3 egress device
1466 * @root_dev: the root device anchoring the search
1467 * @c3cn: the connection used to determine egress port in bonding mode
1468 * @context: in bonding mode, indicates a connection set up or failover
1470 * Return egress device or NULL if the egress device isn't one of our ports.
1472 static struct net_device
*cxgb3_egress_dev(struct net_device
*root_dev
,
1473 struct s3_conn
*c3cn
,
1477 if (root_dev
->priv_flags
& IFF_802_1Q_VLAN
)
1478 root_dev
= vlan_dev_real_dev(root_dev
);
1479 else if (is_cxgb3_dev(root_dev
))
1487 static struct rtable
*find_route(struct net_device
*dev
,
1488 __be32 saddr
, __be32 daddr
,
1489 __be16 sport
, __be16 dport
)
1493 .oif
= dev
? dev
->ifindex
: 0,
1499 .proto
= IPPROTO_TCP
,
1503 .dport
= dport
} } };
1505 if (ip_route_output_flow(&init_net
, &rt
, &fl
, NULL
, 0))
1511 * Assign offload parameters to some connection fields.
1513 static void init_offload_conn(struct s3_conn
*c3cn
,
1514 struct t3cdev
*cdev
,
1515 struct dst_entry
*dst
)
1517 BUG_ON(c3cn
->cdev
!= cdev
);
1518 c3cn
->wr_max
= c3cn
->wr_avail
= T3C_DATA(cdev
)->max_wrs
- 1;
1519 c3cn
->wr_unacked
= 0;
1520 c3cn
->mss_idx
= select_mss(c3cn
, dst_mtu(dst
));
1522 reset_wr_list(c3cn
);
1525 static int initiate_act_open(struct s3_conn
*c3cn
, struct net_device
*dev
)
1527 struct cxgb3i_sdev_data
*cdata
= NDEV2CDATA(dev
);
1528 struct t3cdev
*cdev
= cdata
->cdev
;
1529 struct dst_entry
*dst
= c3cn
->dst_cache
;
1530 struct sk_buff
*skb
;
1532 c3cn_conn_debug("c3cn 0x%p, state %u, flag 0x%lx.\n",
1533 c3cn
, c3cn
->state
, c3cn
->flags
);
1535 * Initialize connection data. Note that the flags and ULP mode are
1536 * initialized higher up ...
1540 c3cn
->tid
= cxgb3_alloc_atid(cdev
, cdata
->client
, c3cn
);
1545 c3cn
->l2t
= t3_l2t_get(cdev
, dst
->neighbour
, dev
);
1549 skb
= alloc_skb(sizeof(struct cpl_act_open_req
), GFP_KERNEL
);
1553 skb
->sk
= (struct sock
*)c3cn
;
1554 set_arp_failure_handler(skb
, act_open_req_arp_failure
);
1558 init_offload_conn(c3cn
, cdev
, dst
);
1561 make_act_open_req(c3cn
, skb
, c3cn
->tid
, c3cn
->l2t
);
1562 l2t_send(cdev
, skb
, c3cn
->l2t
);
1566 l2t_release(L2DATA(cdev
), c3cn
->l2t
);
1568 s3_free_atid(cdev
, c3cn
->tid
);
1575 * cxgb3i_find_dev - find the interface associated with the given address
1576 * @ipaddr: ip address
1578 static struct net_device
*
1579 cxgb3i_find_dev(struct net_device
*dev
, __be32 ipaddr
)
1585 memset(&fl
, 0, sizeof(fl
));
1586 fl
.nl_u
.ip4_u
.daddr
= ipaddr
;
1588 err
= ip_route_output_key(dev
? dev_net(dev
) : &init_net
, &rt
, &fl
);
1590 return (&rt
->u
.dst
)->dev
;
1596 * cxgb3i_c3cn_connect - initiates an iscsi tcp connection to a given address
1597 * @c3cn: the iscsi tcp connection
1598 * @usin: destination address
1600 * return 0 if active open request is sent, < 0 otherwise.
1602 int cxgb3i_c3cn_connect(struct net_device
*dev
, struct s3_conn
*c3cn
,
1603 struct sockaddr_in
*usin
)
1606 struct cxgb3i_sdev_data
*cdata
;
1607 struct t3cdev
*cdev
;
1609 struct net_device
*dstdev
;
1612 c3cn_conn_debug("c3cn 0x%p, dev 0x%p.\n", c3cn
, dev
);
1614 if (usin
->sin_family
!= AF_INET
)
1615 return -EAFNOSUPPORT
;
1617 c3cn
->daddr
.sin_port
= usin
->sin_port
;
1618 c3cn
->daddr
.sin_addr
.s_addr
= usin
->sin_addr
.s_addr
;
1620 dstdev
= cxgb3i_find_dev(dev
, usin
->sin_addr
.s_addr
);
1621 if (!dstdev
|| !is_cxgb3_dev(dstdev
))
1622 return -ENETUNREACH
;
1624 if (dstdev
->priv_flags
& IFF_802_1Q_VLAN
)
1627 rt
= find_route(dev
, c3cn
->saddr
.sin_addr
.s_addr
,
1628 c3cn
->daddr
.sin_addr
.s_addr
,
1629 c3cn
->saddr
.sin_port
,
1630 c3cn
->daddr
.sin_port
);
1632 c3cn_conn_debug("NO route to 0x%x, port %u, dev %s.\n",
1633 c3cn
->daddr
.sin_addr
.s_addr
,
1634 ntohs(c3cn
->daddr
.sin_port
),
1635 dev
? dev
->name
: "any");
1636 return -ENETUNREACH
;
1639 if (rt
->rt_flags
& (RTCF_MULTICAST
| RTCF_BROADCAST
)) {
1640 c3cn_conn_debug("multi-cast route to 0x%x, port %u, dev %s.\n",
1641 c3cn
->daddr
.sin_addr
.s_addr
,
1642 ntohs(c3cn
->daddr
.sin_port
),
1643 dev
? dev
->name
: "any");
1645 return -ENETUNREACH
;
1648 if (!c3cn
->saddr
.sin_addr
.s_addr
)
1649 c3cn
->saddr
.sin_addr
.s_addr
= rt
->rt_src
;
1651 /* now commit destination to connection */
1652 c3cn
->dst_cache
= &rt
->u
.dst
;
1654 /* try to establish an offloaded connection */
1655 dev
= cxgb3_egress_dev(c3cn
->dst_cache
->dev
, c3cn
, 0);
1657 c3cn_conn_debug("c3cn 0x%p, egress dev NULL.\n", c3cn
);
1658 return -ENETUNREACH
;
1660 cdata
= NDEV2CDATA(dev
);
1663 /* get a source port if one hasn't been provided */
1664 err
= c3cn_get_port(c3cn
, cdata
);
1668 c3cn_conn_debug("c3cn 0x%p get port %u.\n",
1669 c3cn
, ntohs(c3cn
->saddr
.sin_port
));
1671 sipv4
= cxgb3i_get_private_ipv4addr(dev
);
1673 c3cn_conn_debug("c3cn 0x%p, iscsi ip not configured.\n", c3cn
);
1674 sipv4
= c3cn
->saddr
.sin_addr
.s_addr
;
1675 cxgb3i_set_private_ipv4addr(dev
, sipv4
);
1677 c3cn
->saddr
.sin_addr
.s_addr
= sipv4
;
1679 c3cn_conn_debug("c3cn 0x%p, %pI4,%u-%pI4,%u SYN_SENT.\n",
1681 &c3cn
->saddr
.sin_addr
.s_addr
,
1682 ntohs(c3cn
->saddr
.sin_port
),
1683 &c3cn
->daddr
.sin_addr
.s_addr
,
1684 ntohs(c3cn
->daddr
.sin_port
));
1686 c3cn_set_state(c3cn
, C3CN_STATE_CONNECTING
);
1687 if (!initiate_act_open(c3cn
, dev
))
1691 * If we get here, we don't have an offload connection so simply
1697 * This trashes the connection and releases the local port,
1700 c3cn_conn_debug("c3cn 0x%p -> CLOSED.\n", c3cn
);
1701 c3cn_set_state(c3cn
, C3CN_STATE_CLOSED
);
1703 c3cn_put_port(c3cn
);
1708 * cxgb3i_c3cn_rx_credits - ack received tcp data.
1709 * @c3cn: iscsi tcp connection
1710 * @copied: # of bytes processed
1712 * Called after some received data has been read. It returns RX credits
1713 * to the HW for the amount of data processed.
1715 void cxgb3i_c3cn_rx_credits(struct s3_conn
*c3cn
, int copied
)
1717 struct t3cdev
*cdev
;
1719 u32 credits
, dack
= 0;
1721 if (c3cn
->state
!= C3CN_STATE_ESTABLISHED
)
1724 credits
= c3cn
->copied_seq
- c3cn
->rcv_wup
;
1725 if (unlikely(!credits
))
1730 if (unlikely(cxgb3_rx_credit_thres
== 0))
1733 dack
= F_RX_DACK_CHANGE
| V_RX_DACK_MODE(1);
1736 * For coalescing to work effectively ensure the receive window has
1737 * at least 16KB left.
1739 must_send
= credits
+ 16384 >= cxgb3_rcv_win
;
1741 if (must_send
|| credits
>= cxgb3_rx_credit_thres
)
1742 c3cn
->rcv_wup
+= send_rx_credits(c3cn
, credits
, dack
);
1746 * cxgb3i_c3cn_send_pdus - send the skbs containing iscsi pdus
1747 * @c3cn: iscsi tcp connection
1748 * @skb: skb contains the iscsi pdu
1750 * Add a list of skbs to a connection send queue. The skbs must comply with
1751 * the max size limit of the device and have a headroom of at least
1752 * TX_HEADER_LEN bytes.
1753 * Return # of bytes queued.
1755 int cxgb3i_c3cn_send_pdus(struct s3_conn
*c3cn
, struct sk_buff
*skb
)
1757 struct sk_buff
*next
;
1758 int err
, copied
= 0;
1760 spin_lock_bh(&c3cn
->lock
);
1762 if (c3cn
->state
!= C3CN_STATE_ESTABLISHED
) {
1763 c3cn_tx_debug("c3cn 0x%p, not in est. state %u.\n",
1770 c3cn_tx_debug("c3cn 0x%p, err %d.\n", c3cn
, c3cn
->err
);
1775 if (c3cn
->write_seq
- c3cn
->snd_una
>= cxgb3_snd_win
) {
1776 c3cn_tx_debug("c3cn 0x%p, snd %u - %u > %u.\n",
1777 c3cn
, c3cn
->write_seq
, c3cn
->snd_una
,
1784 int frags
= skb_shinfo(skb
)->nr_frags
+
1785 (skb
->len
!= skb
->data_len
);
1787 if (unlikely(skb_headroom(skb
) < TX_HEADER_LEN
)) {
1788 c3cn_tx_debug("c3cn 0x%p, skb head.\n", c3cn
);
1793 if (frags
>= SKB_WR_LIST_SIZE
) {
1794 cxgb3i_log_error("c3cn 0x%p, tx frags %d, len %u,%u.\n",
1795 c3cn
, skb_shinfo(skb
)->nr_frags
,
1796 skb
->len
, skb
->data_len
);
1803 skb_entail(c3cn
, skb
, C3CB_FLAG_NO_APPEND
| C3CB_FLAG_NEED_HDR
);
1805 c3cn
->write_seq
+= skb
->len
+ ulp_extra_len(skb
);
1809 if (likely(skb_queue_len(&c3cn
->write_queue
)))
1810 c3cn_push_tx_frames(c3cn
, 1);
1811 spin_unlock_bh(&c3cn
->lock
);
1815 if (copied
== 0 && err
== -EPIPE
)
1816 copied
= c3cn
->err
? c3cn
->err
: -EPIPE
;
1822 static void sdev_data_cleanup(struct cxgb3i_sdev_data
*cdata
)
1824 struct adap_ports
*ports
= &cdata
->ports
;
1825 struct s3_conn
*c3cn
;
1828 for (i
= 0; i
< cxgb3_max_connect
; i
++) {
1829 if (cdata
->sport_conn
[i
]) {
1830 c3cn
= cdata
->sport_conn
[i
];
1831 cdata
->sport_conn
[i
] = NULL
;
1833 spin_lock_bh(&c3cn
->lock
);
1835 c3cn_set_flag(c3cn
, C3CN_OFFLOAD_DOWN
);
1837 spin_unlock_bh(&c3cn
->lock
);
1841 for (i
= 0; i
< ports
->nports
; i
++)
1842 NDEV2CDATA(ports
->lldevs
[i
]) = NULL
;
1844 cxgb3i_free_big_mem(cdata
);
1847 void cxgb3i_sdev_cleanup(void)
1849 struct cxgb3i_sdev_data
*cdata
;
1851 write_lock(&cdata_rwlock
);
1852 list_for_each_entry(cdata
, &cdata_list
, list
) {
1853 list_del(&cdata
->list
);
1854 sdev_data_cleanup(cdata
);
1856 write_unlock(&cdata_rwlock
);
1859 int cxgb3i_sdev_init(cxgb3_cpl_handler_func
*cpl_handlers
)
1861 cpl_handlers
[CPL_ACT_ESTABLISH
] = do_act_establish
;
1862 cpl_handlers
[CPL_ACT_OPEN_RPL
] = do_act_open_rpl
;
1863 cpl_handlers
[CPL_PEER_CLOSE
] = do_peer_close
;
1864 cpl_handlers
[CPL_ABORT_REQ_RSS
] = do_abort_req
;
1865 cpl_handlers
[CPL_ABORT_RPL_RSS
] = do_abort_rpl
;
1866 cpl_handlers
[CPL_CLOSE_CON_RPL
] = do_close_con_rpl
;
1867 cpl_handlers
[CPL_TX_DMA_ACK
] = do_wr_ack
;
1868 cpl_handlers
[CPL_ISCSI_HDR
] = do_iscsi_hdr
;
1870 if (cxgb3_max_connect
> CXGB3I_MAX_CONN
)
1871 cxgb3_max_connect
= CXGB3I_MAX_CONN
;
1876 * cxgb3i_sdev_add - allocate and initialize resources for each adapter found
1877 * @cdev: t3cdev adapter
1878 * @client: cxgb3 driver client
1880 void cxgb3i_sdev_add(struct t3cdev
*cdev
, struct cxgb3_client
*client
)
1882 struct cxgb3i_sdev_data
*cdata
;
1883 struct ofld_page_info rx_page_info
;
1884 unsigned int wr_len
;
1885 int mapsize
= cxgb3_max_connect
* sizeof(struct s3_conn
*);
1888 cdata
= cxgb3i_alloc_big_mem(sizeof(*cdata
) + mapsize
, GFP_KERNEL
);
1890 cxgb3i_log_warn("t3dev 0x%p, offload up, OOM %d.\n",
1895 if (cdev
->ctl(cdev
, GET_WR_LEN
, &wr_len
) < 0 ||
1896 cdev
->ctl(cdev
, GET_PORTS
, &cdata
->ports
) < 0 ||
1897 cdev
->ctl(cdev
, GET_RX_PAGE_INFO
, &rx_page_info
) < 0) {
1898 cxgb3i_log_warn("t3dev 0x%p, offload up, ioctl failed.\n",
1903 s3_init_wr_tab(wr_len
);
1905 spin_lock_init(&cdata
->lock
);
1906 INIT_LIST_HEAD(&cdata
->list
);
1908 cdata
->client
= client
;
1910 for (i
= 0; i
< cdata
->ports
.nports
; i
++)
1911 NDEV2CDATA(cdata
->ports
.lldevs
[i
]) = cdata
;
1913 write_lock(&cdata_rwlock
);
1914 list_add_tail(&cdata
->list
, &cdata_list
);
1915 write_unlock(&cdata_rwlock
);
1917 cxgb3i_log_info("t3dev 0x%p, offload up, added.\n", cdev
);
1921 cxgb3i_free_big_mem(cdata
);
1925 * cxgb3i_sdev_remove - free the allocated resources for the adapter
1926 * @cdev: t3cdev adapter
1928 void cxgb3i_sdev_remove(struct t3cdev
*cdev
)
1930 struct cxgb3i_sdev_data
*cdata
= CXGB3_SDEV_DATA(cdev
);
1932 cxgb3i_log_info("t3dev 0x%p, offload down, remove.\n", cdev
);
1934 write_lock(&cdata_rwlock
);
1935 list_del(&cdata
->list
);
1936 write_unlock(&cdata_rwlock
);
1938 sdev_data_cleanup(cdata
);