2 * cxgb4i.c: Chelsio T4 iSCSI driver.
4 * Copyright (c) 2010-2015 Chelsio Communications, Inc.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation.
10 * Written by: Karen Xie (kxie@chelsio.com)
11 * Rakesh Ranjan (rranjan@chelsio.com)
14 #define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
16 #include <linux/kernel.h>
17 #include <linux/module.h>
18 #include <linux/moduleparam.h>
19 #include <scsi/scsi_host.h>
22 #include <linux/netdevice.h>
23 #include <net/addrconf.h>
28 #include "cxgb4_uld.h"
34 static unsigned int dbg_level
;
36 #include "../libcxgbi.h"
38 #define DRV_MODULE_NAME "cxgb4i"
39 #define DRV_MODULE_DESC "Chelsio T4-T6 iSCSI Driver"
40 #define DRV_MODULE_VERSION "0.9.5-ko"
41 #define DRV_MODULE_RELDATE "Apr. 2015"
43 static char version
[] =
44 DRV_MODULE_DESC
" " DRV_MODULE_NAME
45 " v" DRV_MODULE_VERSION
" (" DRV_MODULE_RELDATE
")\n";
47 MODULE_AUTHOR("Chelsio Communications, Inc.");
48 MODULE_DESCRIPTION(DRV_MODULE_DESC
);
49 MODULE_VERSION(DRV_MODULE_VERSION
);
50 MODULE_LICENSE("GPL");
52 module_param(dbg_level
, uint
, 0644);
53 MODULE_PARM_DESC(dbg_level
, "Debug flag (default=0)");
55 #define CXGB4I_DEFAULT_10G_RCV_WIN (256 * 1024)
56 static int cxgb4i_rcv_win
= -1;
57 module_param(cxgb4i_rcv_win
, int, 0644);
58 MODULE_PARM_DESC(cxgb4i_rcv_win
, "TCP reveive window in bytes");
60 #define CXGB4I_DEFAULT_10G_SND_WIN (128 * 1024)
61 static int cxgb4i_snd_win
= -1;
62 module_param(cxgb4i_snd_win
, int, 0644);
63 MODULE_PARM_DESC(cxgb4i_snd_win
, "TCP send window in bytes");
65 static int cxgb4i_rx_credit_thres
= 10 * 1024;
66 module_param(cxgb4i_rx_credit_thres
, int, 0644);
67 MODULE_PARM_DESC(cxgb4i_rx_credit_thres
,
68 "RX credits return threshold in bytes (default=10KB)");
70 static unsigned int cxgb4i_max_connect
= (8 * 1024);
71 module_param(cxgb4i_max_connect
, uint
, 0644);
72 MODULE_PARM_DESC(cxgb4i_max_connect
, "Maximum number of connections");
74 static unsigned short cxgb4i_sport_base
= 20000;
75 module_param(cxgb4i_sport_base
, ushort
, 0644);
76 MODULE_PARM_DESC(cxgb4i_sport_base
, "Starting port number (default 20000)");
78 typedef void (*cxgb4i_cplhandler_func
)(struct cxgbi_device
*, struct sk_buff
*);
80 static void *t4_uld_add(const struct cxgb4_lld_info
*);
81 static int t4_uld_rx_handler(void *, const __be64
*, const struct pkt_gl
*);
82 static int t4_uld_state_change(void *, enum cxgb4_state state
);
83 static inline int send_tx_flowc_wr(struct cxgbi_sock
*);
85 static const struct cxgb4_uld_info cxgb4i_uld_info
= {
86 .name
= DRV_MODULE_NAME
,
87 .nrxq
= MAX_ULD_QSETS
,
88 .ntxq
= MAX_ULD_QSETS
,
92 .rx_handler
= t4_uld_rx_handler
,
93 .state_change
= t4_uld_state_change
,
96 static struct scsi_host_template cxgb4i_host_template
= {
97 .module
= THIS_MODULE
,
98 .name
= DRV_MODULE_NAME
,
99 .proc_name
= DRV_MODULE_NAME
,
100 .can_queue
= CXGB4I_SCSI_HOST_QDEPTH
,
101 .queuecommand
= iscsi_queuecommand
,
102 .change_queue_depth
= scsi_change_queue_depth
,
103 .sg_tablesize
= SG_ALL
,
104 .max_sectors
= 0xFFFF,
105 .cmd_per_lun
= ISCSI_DEF_CMD_PER_LUN
,
106 .eh_timed_out
= iscsi_eh_cmd_timed_out
,
107 .eh_abort_handler
= iscsi_eh_abort
,
108 .eh_device_reset_handler
= iscsi_eh_device_reset
,
109 .eh_target_reset_handler
= iscsi_eh_recover_target
,
110 .target_alloc
= iscsi_target_alloc
,
111 .use_clustering
= DISABLE_CLUSTERING
,
113 .track_queue_depth
= 1,
116 static struct iscsi_transport cxgb4i_iscsi_transport
= {
117 .owner
= THIS_MODULE
,
118 .name
= DRV_MODULE_NAME
,
119 .caps
= CAP_RECOVERY_L0
| CAP_MULTI_R2T
| CAP_HDRDGST
|
120 CAP_DATADGST
| CAP_DIGEST_OFFLOAD
|
121 CAP_PADDING_OFFLOAD
| CAP_TEXT_NEGO
,
122 .attr_is_visible
= cxgbi_attr_is_visible
,
123 .get_host_param
= cxgbi_get_host_param
,
124 .set_host_param
= cxgbi_set_host_param
,
125 /* session management */
126 .create_session
= cxgbi_create_session
,
127 .destroy_session
= cxgbi_destroy_session
,
128 .get_session_param
= iscsi_session_get_param
,
129 /* connection management */
130 .create_conn
= cxgbi_create_conn
,
131 .bind_conn
= cxgbi_bind_conn
,
132 .destroy_conn
= iscsi_tcp_conn_teardown
,
133 .start_conn
= iscsi_conn_start
,
134 .stop_conn
= iscsi_conn_stop
,
135 .get_conn_param
= iscsi_conn_get_param
,
136 .set_param
= cxgbi_set_conn_param
,
137 .get_stats
= cxgbi_get_conn_stats
,
138 /* pdu xmit req from user space */
139 .send_pdu
= iscsi_conn_send_pdu
,
141 .init_task
= iscsi_tcp_task_init
,
142 .xmit_task
= iscsi_tcp_task_xmit
,
143 .cleanup_task
= cxgbi_cleanup_task
,
145 .alloc_pdu
= cxgbi_conn_alloc_pdu
,
146 .init_pdu
= cxgbi_conn_init_pdu
,
147 .xmit_pdu
= cxgbi_conn_xmit_pdu
,
148 .parse_pdu_itt
= cxgbi_parse_pdu_itt
,
149 /* TCP connect/disconnect */
150 .get_ep_param
= cxgbi_get_ep_param
,
151 .ep_connect
= cxgbi_ep_connect
,
152 .ep_poll
= cxgbi_ep_poll
,
153 .ep_disconnect
= cxgbi_ep_disconnect
,
154 /* Error recovery timeout call */
155 .session_recovery_timedout
= iscsi_session_recovery_timedout
,
158 static struct scsi_transport_template
*cxgb4i_stt
;
161 * CPL (Chelsio Protocol Language) defines a message passing interface between
162 * the host driver and Chelsio asic.
163 * The section below implments CPLs that related to iscsi tcp connection
164 * open/close/abort and data send/receive.
167 #define RCV_BUFSIZ_MASK 0x3FFU
168 #define MAX_IMM_TX_PKT_LEN 256
170 static int push_tx_frames(struct cxgbi_sock
*, int);
173 * is_ofld_imm - check whether a packet can be sent as immediate data
176 * Returns true if a packet can be sent as an offload WR with immediate
177 * data. We currently use the same limit as for Ethernet packets.
179 static inline bool is_ofld_imm(const struct sk_buff
*skb
)
183 if (likely(cxgbi_skcb_test_flag(skb
, SKCBF_TX_NEED_HDR
)))
184 len
+= sizeof(struct fw_ofld_tx_data_wr
);
186 return len
<= MAX_IMM_TX_PKT_LEN
;
189 static void send_act_open_req(struct cxgbi_sock
*csk
, struct sk_buff
*skb
,
192 struct cxgb4_lld_info
*lldi
= cxgbi_cdev_priv(csk
->cdev
);
193 int wscale
= cxgbi_sock_compute_wscale(csk
->mss_idx
);
194 unsigned long long opt0
;
196 unsigned int qid_atid
= ((unsigned int)csk
->atid
) |
197 (((unsigned int)csk
->rss_qid
) << 14);
199 opt0
= KEEP_ALIVE_F
|
200 WND_SCALE_V(wscale
) |
201 MSS_IDX_V(csk
->mss_idx
) |
202 L2T_IDX_V(((struct l2t_entry
*)csk
->l2t
)->idx
) |
203 TX_CHAN_V(csk
->tx_chan
) |
204 SMAC_SEL_V(csk
->smac_idx
) |
205 ULP_MODE_V(ULP_MODE_ISCSI
) |
206 RCV_BUFSIZ_V(csk
->rcv_win
>> 10);
208 opt2
= RX_CHANNEL_V(0) |
210 RSS_QUEUE_V(csk
->rss_qid
);
212 if (is_t4(lldi
->adapter_type
)) {
213 struct cpl_act_open_req
*req
=
214 (struct cpl_act_open_req
*)skb
->head
;
217 OPCODE_TID(req
) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ
,
219 req
->local_port
= csk
->saddr
.sin_port
;
220 req
->peer_port
= csk
->daddr
.sin_port
;
221 req
->local_ip
= csk
->saddr
.sin_addr
.s_addr
;
222 req
->peer_ip
= csk
->daddr
.sin_addr
.s_addr
;
223 req
->opt0
= cpu_to_be64(opt0
);
224 req
->params
= cpu_to_be32(cxgb4_select_ntuple(
225 csk
->cdev
->ports
[csk
->port_id
],
227 opt2
|= RX_FC_VALID_F
;
228 req
->opt2
= cpu_to_be32(opt2
);
230 log_debug(1 << CXGBI_DBG_TOE
| 1 << CXGBI_DBG_SOCK
,
231 "csk t4 0x%p, %pI4:%u-%pI4:%u, atid %d, qid %u.\n",
232 csk
, &req
->local_ip
, ntohs(req
->local_port
),
233 &req
->peer_ip
, ntohs(req
->peer_port
),
234 csk
->atid
, csk
->rss_qid
);
235 } else if (is_t5(lldi
->adapter_type
)) {
236 struct cpl_t5_act_open_req
*req
=
237 (struct cpl_t5_act_open_req
*)skb
->head
;
238 u32 isn
= (prandom_u32() & ~7UL) - 1;
241 OPCODE_TID(req
) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ
,
243 req
->local_port
= csk
->saddr
.sin_port
;
244 req
->peer_port
= csk
->daddr
.sin_port
;
245 req
->local_ip
= csk
->saddr
.sin_addr
.s_addr
;
246 req
->peer_ip
= csk
->daddr
.sin_addr
.s_addr
;
247 req
->opt0
= cpu_to_be64(opt0
);
248 req
->params
= cpu_to_be64(FILTER_TUPLE_V(
250 csk
->cdev
->ports
[csk
->port_id
],
252 req
->rsvd
= cpu_to_be32(isn
);
253 opt2
|= T5_ISS_VALID
;
254 opt2
|= T5_OPT_2_VALID_F
;
256 req
->opt2
= cpu_to_be32(opt2
);
258 log_debug(1 << CXGBI_DBG_TOE
| 1 << CXGBI_DBG_SOCK
,
259 "csk t5 0x%p, %pI4:%u-%pI4:%u, atid %d, qid %u.\n",
260 csk
, &req
->local_ip
, ntohs(req
->local_port
),
261 &req
->peer_ip
, ntohs(req
->peer_port
),
262 csk
->atid
, csk
->rss_qid
);
264 struct cpl_t6_act_open_req
*req
=
265 (struct cpl_t6_act_open_req
*)skb
->head
;
266 u32 isn
= (prandom_u32() & ~7UL) - 1;
269 OPCODE_TID(req
) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ
,
271 req
->local_port
= csk
->saddr
.sin_port
;
272 req
->peer_port
= csk
->daddr
.sin_port
;
273 req
->local_ip
= csk
->saddr
.sin_addr
.s_addr
;
274 req
->peer_ip
= csk
->daddr
.sin_addr
.s_addr
;
275 req
->opt0
= cpu_to_be64(opt0
);
276 req
->params
= cpu_to_be64(FILTER_TUPLE_V(
278 csk
->cdev
->ports
[csk
->port_id
],
280 req
->rsvd
= cpu_to_be32(isn
);
282 opt2
|= T5_ISS_VALID
;
283 opt2
|= RX_FC_DISABLE_F
;
284 opt2
|= T5_OPT_2_VALID_F
;
286 req
->opt2
= cpu_to_be32(opt2
);
287 req
->rsvd2
= cpu_to_be32(0);
288 req
->opt3
= cpu_to_be32(0);
290 log_debug(1 << CXGBI_DBG_TOE
| 1 << CXGBI_DBG_SOCK
,
291 "csk t6 0x%p, %pI4:%u-%pI4:%u, atid %d, qid %u.\n",
292 csk
, &req
->local_ip
, ntohs(req
->local_port
),
293 &req
->peer_ip
, ntohs(req
->peer_port
),
294 csk
->atid
, csk
->rss_qid
);
297 set_wr_txq(skb
, CPL_PRIORITY_SETUP
, csk
->port_id
);
299 pr_info_ipaddr("t%d csk 0x%p,%u,0x%lx,%u, rss_qid %u.\n",
300 (&csk
->saddr
), (&csk
->daddr
),
301 CHELSIO_CHIP_VERSION(lldi
->adapter_type
), csk
,
302 csk
->state
, csk
->flags
, csk
->atid
, csk
->rss_qid
);
304 cxgb4_l2t_send(csk
->cdev
->ports
[csk
->port_id
], skb
, csk
->l2t
);
307 #if IS_ENABLED(CONFIG_IPV6)
308 static void send_act_open_req6(struct cxgbi_sock
*csk
, struct sk_buff
*skb
,
311 struct cxgb4_lld_info
*lldi
= cxgbi_cdev_priv(csk
->cdev
);
312 int wscale
= cxgbi_sock_compute_wscale(csk
->mss_idx
);
313 unsigned long long opt0
;
315 unsigned int qid_atid
= ((unsigned int)csk
->atid
) |
316 (((unsigned int)csk
->rss_qid
) << 14);
318 opt0
= KEEP_ALIVE_F
|
319 WND_SCALE_V(wscale
) |
320 MSS_IDX_V(csk
->mss_idx
) |
321 L2T_IDX_V(((struct l2t_entry
*)csk
->l2t
)->idx
) |
322 TX_CHAN_V(csk
->tx_chan
) |
323 SMAC_SEL_V(csk
->smac_idx
) |
324 ULP_MODE_V(ULP_MODE_ISCSI
) |
325 RCV_BUFSIZ_V(csk
->rcv_win
>> 10);
327 opt2
= RX_CHANNEL_V(0) |
329 RSS_QUEUE_V(csk
->rss_qid
);
331 if (is_t4(lldi
->adapter_type
)) {
332 struct cpl_act_open_req6
*req
=
333 (struct cpl_act_open_req6
*)skb
->head
;
336 OPCODE_TID(req
) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ6
,
338 req
->local_port
= csk
->saddr6
.sin6_port
;
339 req
->peer_port
= csk
->daddr6
.sin6_port
;
341 req
->local_ip_hi
= *(__be64
*)(csk
->saddr6
.sin6_addr
.s6_addr
);
342 req
->local_ip_lo
= *(__be64
*)(csk
->saddr6
.sin6_addr
.s6_addr
+
344 req
->peer_ip_hi
= *(__be64
*)(csk
->daddr6
.sin6_addr
.s6_addr
);
345 req
->peer_ip_lo
= *(__be64
*)(csk
->daddr6
.sin6_addr
.s6_addr
+
348 req
->opt0
= cpu_to_be64(opt0
);
350 opt2
|= RX_FC_VALID_F
;
351 req
->opt2
= cpu_to_be32(opt2
);
353 req
->params
= cpu_to_be32(cxgb4_select_ntuple(
354 csk
->cdev
->ports
[csk
->port_id
],
356 } else if (is_t5(lldi
->adapter_type
)) {
357 struct cpl_t5_act_open_req6
*req
=
358 (struct cpl_t5_act_open_req6
*)skb
->head
;
361 OPCODE_TID(req
) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ6
,
363 req
->local_port
= csk
->saddr6
.sin6_port
;
364 req
->peer_port
= csk
->daddr6
.sin6_port
;
365 req
->local_ip_hi
= *(__be64
*)(csk
->saddr6
.sin6_addr
.s6_addr
);
366 req
->local_ip_lo
= *(__be64
*)(csk
->saddr6
.sin6_addr
.s6_addr
+
368 req
->peer_ip_hi
= *(__be64
*)(csk
->daddr6
.sin6_addr
.s6_addr
);
369 req
->peer_ip_lo
= *(__be64
*)(csk
->daddr6
.sin6_addr
.s6_addr
+
371 req
->opt0
= cpu_to_be64(opt0
);
373 opt2
|= T5_OPT_2_VALID_F
;
374 req
->opt2
= cpu_to_be32(opt2
);
376 req
->params
= cpu_to_be64(FILTER_TUPLE_V(cxgb4_select_ntuple(
377 csk
->cdev
->ports
[csk
->port_id
],
380 struct cpl_t6_act_open_req6
*req
=
381 (struct cpl_t6_act_open_req6
*)skb
->head
;
384 OPCODE_TID(req
) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ6
,
386 req
->local_port
= csk
->saddr6
.sin6_port
;
387 req
->peer_port
= csk
->daddr6
.sin6_port
;
388 req
->local_ip_hi
= *(__be64
*)(csk
->saddr6
.sin6_addr
.s6_addr
);
389 req
->local_ip_lo
= *(__be64
*)(csk
->saddr6
.sin6_addr
.s6_addr
+
391 req
->peer_ip_hi
= *(__be64
*)(csk
->daddr6
.sin6_addr
.s6_addr
);
392 req
->peer_ip_lo
= *(__be64
*)(csk
->daddr6
.sin6_addr
.s6_addr
+
394 req
->opt0
= cpu_to_be64(opt0
);
396 opt2
|= RX_FC_DISABLE_F
;
397 opt2
|= T5_OPT_2_VALID_F
;
399 req
->opt2
= cpu_to_be32(opt2
);
401 req
->params
= cpu_to_be64(FILTER_TUPLE_V(cxgb4_select_ntuple(
402 csk
->cdev
->ports
[csk
->port_id
],
405 req
->rsvd2
= cpu_to_be32(0);
406 req
->opt3
= cpu_to_be32(0);
409 set_wr_txq(skb
, CPL_PRIORITY_SETUP
, csk
->port_id
);
411 pr_info("t%d csk 0x%p,%u,0x%lx,%u, [%pI6]:%u-[%pI6]:%u, rss_qid %u.\n",
412 CHELSIO_CHIP_VERSION(lldi
->adapter_type
), csk
, csk
->state
,
413 csk
->flags
, csk
->atid
,
414 &csk
->saddr6
.sin6_addr
, ntohs(csk
->saddr
.sin_port
),
415 &csk
->daddr6
.sin6_addr
, ntohs(csk
->daddr
.sin_port
),
418 cxgb4_l2t_send(csk
->cdev
->ports
[csk
->port_id
], skb
, csk
->l2t
);
422 static void send_close_req(struct cxgbi_sock
*csk
)
424 struct sk_buff
*skb
= csk
->cpl_close
;
425 struct cpl_close_con_req
*req
= (struct cpl_close_con_req
*)skb
->head
;
426 unsigned int tid
= csk
->tid
;
428 log_debug(1 << CXGBI_DBG_TOE
| 1 << CXGBI_DBG_SOCK
,
429 "csk 0x%p,%u,0x%lx, tid %u.\n",
430 csk
, csk
->state
, csk
->flags
, csk
->tid
);
431 csk
->cpl_close
= NULL
;
432 set_wr_txq(skb
, CPL_PRIORITY_DATA
, csk
->port_id
);
433 INIT_TP_WR(req
, tid
);
434 OPCODE_TID(req
) = cpu_to_be32(MK_OPCODE_TID(CPL_CLOSE_CON_REQ
, tid
));
437 cxgbi_sock_skb_entail(csk
, skb
);
438 if (csk
->state
>= CTP_ESTABLISHED
)
439 push_tx_frames(csk
, 1);
442 static void abort_arp_failure(void *handle
, struct sk_buff
*skb
)
444 struct cxgbi_sock
*csk
= (struct cxgbi_sock
*)handle
;
445 struct cpl_abort_req
*req
;
447 log_debug(1 << CXGBI_DBG_TOE
| 1 << CXGBI_DBG_SOCK
,
448 "csk 0x%p,%u,0x%lx, tid %u, abort.\n",
449 csk
, csk
->state
, csk
->flags
, csk
->tid
);
450 req
= (struct cpl_abort_req
*)skb
->data
;
451 req
->cmd
= CPL_ABORT_NO_RST
;
452 cxgb4_ofld_send(csk
->cdev
->ports
[csk
->port_id
], skb
);
455 static void send_abort_req(struct cxgbi_sock
*csk
)
457 struct cpl_abort_req
*req
;
458 struct sk_buff
*skb
= csk
->cpl_abort_req
;
460 if (unlikely(csk
->state
== CTP_ABORTING
) || !skb
|| !csk
->cdev
)
463 if (!cxgbi_sock_flag(csk
, CTPF_TX_DATA_SENT
)) {
464 send_tx_flowc_wr(csk
);
465 cxgbi_sock_set_flag(csk
, CTPF_TX_DATA_SENT
);
468 cxgbi_sock_set_state(csk
, CTP_ABORTING
);
469 cxgbi_sock_set_flag(csk
, CTPF_ABORT_RPL_PENDING
);
470 cxgbi_sock_purge_write_queue(csk
);
472 csk
->cpl_abort_req
= NULL
;
473 req
= (struct cpl_abort_req
*)skb
->head
;
474 set_wr_txq(skb
, CPL_PRIORITY_DATA
, csk
->port_id
);
475 req
->cmd
= CPL_ABORT_SEND_RST
;
476 t4_set_arp_err_handler(skb
, csk
, abort_arp_failure
);
477 INIT_TP_WR(req
, csk
->tid
);
478 OPCODE_TID(req
) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_REQ
, csk
->tid
));
479 req
->rsvd0
= htonl(csk
->snd_nxt
);
480 req
->rsvd1
= !cxgbi_sock_flag(csk
, CTPF_TX_DATA_SENT
);
482 log_debug(1 << CXGBI_DBG_TOE
| 1 << CXGBI_DBG_SOCK
,
483 "csk 0x%p,%u,0x%lx,%u, snd_nxt %u, 0x%x.\n",
484 csk
, csk
->state
, csk
->flags
, csk
->tid
, csk
->snd_nxt
,
487 cxgb4_l2t_send(csk
->cdev
->ports
[csk
->port_id
], skb
, csk
->l2t
);
490 static void send_abort_rpl(struct cxgbi_sock
*csk
, int rst_status
)
492 struct sk_buff
*skb
= csk
->cpl_abort_rpl
;
493 struct cpl_abort_rpl
*rpl
= (struct cpl_abort_rpl
*)skb
->head
;
495 log_debug(1 << CXGBI_DBG_TOE
| 1 << CXGBI_DBG_SOCK
,
496 "csk 0x%p,%u,0x%lx,%u, status %d.\n",
497 csk
, csk
->state
, csk
->flags
, csk
->tid
, rst_status
);
499 csk
->cpl_abort_rpl
= NULL
;
500 set_wr_txq(skb
, CPL_PRIORITY_DATA
, csk
->port_id
);
501 INIT_TP_WR(rpl
, csk
->tid
);
502 OPCODE_TID(rpl
) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_RPL
, csk
->tid
));
503 rpl
->cmd
= rst_status
;
504 cxgb4_ofld_send(csk
->cdev
->ports
[csk
->port_id
], skb
);
508 * CPL connection rx data ack: host ->
509 * Send RX credits through an RX_DATA_ACK CPL message. Returns the number of
512 static u32
send_rx_credits(struct cxgbi_sock
*csk
, u32 credits
)
515 struct cpl_rx_data_ack
*req
;
517 log_debug(1 << CXGBI_DBG_TOE
| 1 << CXGBI_DBG_PDU_RX
,
518 "csk 0x%p,%u,0x%lx,%u, credit %u.\n",
519 csk
, csk
->state
, csk
->flags
, csk
->tid
, credits
);
521 skb
= alloc_wr(sizeof(*req
), 0, GFP_ATOMIC
);
523 pr_info("csk 0x%p, credit %u, OOM.\n", csk
, credits
);
526 req
= (struct cpl_rx_data_ack
*)skb
->head
;
528 set_wr_txq(skb
, CPL_PRIORITY_ACK
, csk
->port_id
);
529 INIT_TP_WR(req
, csk
->tid
);
530 OPCODE_TID(req
) = cpu_to_be32(MK_OPCODE_TID(CPL_RX_DATA_ACK
,
532 req
->credit_dack
= cpu_to_be32(RX_CREDITS_V(credits
)
534 cxgb4_ofld_send(csk
->cdev
->ports
[csk
->port_id
], skb
);
539 * sgl_len - calculates the size of an SGL of the given capacity
540 * @n: the number of SGL entries
541 * Calculates the number of flits needed for a scatter/gather list that
542 * can hold the given number of entries.
544 static inline unsigned int sgl_len(unsigned int n
)
547 return (3 * n
) / 2 + (n
& 1) + 2;
551 * calc_tx_flits_ofld - calculate # of flits for an offload packet
554 * Returns the number of flits needed for the given offload packet.
555 * These packets are already fully constructed and no additional headers
558 static inline unsigned int calc_tx_flits_ofld(const struct sk_buff
*skb
)
560 unsigned int flits
, cnt
;
562 if (is_ofld_imm(skb
))
563 return DIV_ROUND_UP(skb
->len
, 8);
564 flits
= skb_transport_offset(skb
) / 8;
565 cnt
= skb_shinfo(skb
)->nr_frags
;
566 if (skb_tail_pointer(skb
) != skb_transport_header(skb
))
568 return flits
+ sgl_len(cnt
);
571 #define FLOWC_WR_NPARAMS_MIN 9
572 static inline int tx_flowc_wr_credits(int *nparamsp
, int *flowclenp
)
574 int nparams
, flowclen16
, flowclen
;
576 nparams
= FLOWC_WR_NPARAMS_MIN
;
577 flowclen
= offsetof(struct fw_flowc_wr
, mnemval
[nparams
]);
578 flowclen16
= DIV_ROUND_UP(flowclen
, 16);
579 flowclen
= flowclen16
* 16;
581 * Return the number of 16-byte credits used by the FlowC request.
582 * Pass back the nparams and actual FlowC length if requested.
587 *flowclenp
= flowclen
;
592 static inline int send_tx_flowc_wr(struct cxgbi_sock
*csk
)
595 struct fw_flowc_wr
*flowc
;
596 int nparams
, flowclen16
, flowclen
;
598 flowclen16
= tx_flowc_wr_credits(&nparams
, &flowclen
);
599 skb
= alloc_wr(flowclen
, 0, GFP_ATOMIC
);
600 flowc
= (struct fw_flowc_wr
*)skb
->head
;
601 flowc
->op_to_nparams
=
602 htonl(FW_WR_OP_V(FW_FLOWC_WR
) | FW_FLOWC_WR_NPARAMS_V(nparams
));
603 flowc
->flowid_len16
=
604 htonl(FW_WR_LEN16_V(flowclen16
) | FW_WR_FLOWID_V(csk
->tid
));
605 flowc
->mnemval
[0].mnemonic
= FW_FLOWC_MNEM_PFNVFN
;
606 flowc
->mnemval
[0].val
= htonl(csk
->cdev
->pfvf
);
607 flowc
->mnemval
[1].mnemonic
= FW_FLOWC_MNEM_CH
;
608 flowc
->mnemval
[1].val
= htonl(csk
->tx_chan
);
609 flowc
->mnemval
[2].mnemonic
= FW_FLOWC_MNEM_PORT
;
610 flowc
->mnemval
[2].val
= htonl(csk
->tx_chan
);
611 flowc
->mnemval
[3].mnemonic
= FW_FLOWC_MNEM_IQID
;
612 flowc
->mnemval
[3].val
= htonl(csk
->rss_qid
);
613 flowc
->mnemval
[4].mnemonic
= FW_FLOWC_MNEM_SNDNXT
;
614 flowc
->mnemval
[4].val
= htonl(csk
->snd_nxt
);
615 flowc
->mnemval
[5].mnemonic
= FW_FLOWC_MNEM_RCVNXT
;
616 flowc
->mnemval
[5].val
= htonl(csk
->rcv_nxt
);
617 flowc
->mnemval
[6].mnemonic
= FW_FLOWC_MNEM_SNDBUF
;
618 flowc
->mnemval
[6].val
= htonl(csk
->snd_win
);
619 flowc
->mnemval
[7].mnemonic
= FW_FLOWC_MNEM_MSS
;
620 flowc
->mnemval
[7].val
= htonl(csk
->advmss
);
621 flowc
->mnemval
[8].mnemonic
= 0;
622 flowc
->mnemval
[8].val
= 0;
623 flowc
->mnemval
[8].mnemonic
= FW_FLOWC_MNEM_TXDATAPLEN_MAX
;
624 flowc
->mnemval
[8].val
= 16384;
626 set_wr_txq(skb
, CPL_PRIORITY_DATA
, csk
->port_id
);
628 log_debug(1 << CXGBI_DBG_TOE
| 1 << CXGBI_DBG_SOCK
,
629 "csk 0x%p, tid 0x%x, %u,%u,%u,%u,%u,%u,%u.\n",
630 csk
, csk
->tid
, 0, csk
->tx_chan
, csk
->rss_qid
,
631 csk
->snd_nxt
, csk
->rcv_nxt
, csk
->snd_win
,
634 cxgb4_ofld_send(csk
->cdev
->ports
[csk
->port_id
], skb
);
639 static inline void make_tx_data_wr(struct cxgbi_sock
*csk
, struct sk_buff
*skb
,
640 int dlen
, int len
, u32 credits
, int compl)
642 struct fw_ofld_tx_data_wr
*req
;
643 unsigned int submode
= cxgbi_skcb_ulp_mode(skb
) & 3;
644 unsigned int wr_ulp_mode
= 0, val
;
645 bool imm
= is_ofld_imm(skb
);
647 req
= __skb_push(skb
, sizeof(*req
));
650 req
->op_to_immdlen
= htonl(FW_WR_OP_V(FW_OFLD_TX_DATA_WR
) |
652 FW_WR_IMMDLEN_V(dlen
));
653 req
->flowid_len16
= htonl(FW_WR_FLOWID_V(csk
->tid
) |
654 FW_WR_LEN16_V(credits
));
657 cpu_to_be32(FW_WR_OP_V(FW_OFLD_TX_DATA_WR
) |
661 cpu_to_be32(FW_WR_FLOWID_V(csk
->tid
) |
662 FW_WR_LEN16_V(credits
));
665 wr_ulp_mode
= FW_OFLD_TX_DATA_WR_ULPMODE_V(ULP2_MODE_ISCSI
) |
666 FW_OFLD_TX_DATA_WR_ULPSUBMODE_V(submode
);
667 val
= skb_peek(&csk
->write_queue
) ? 0 : 1;
668 req
->tunnel_to_proxy
= htonl(wr_ulp_mode
|
669 FW_OFLD_TX_DATA_WR_SHOVE_V(val
));
670 req
->plen
= htonl(len
);
671 if (!cxgbi_sock_flag(csk
, CTPF_TX_DATA_SENT
))
672 cxgbi_sock_set_flag(csk
, CTPF_TX_DATA_SENT
);
675 static void arp_failure_skb_discard(void *handle
, struct sk_buff
*skb
)
680 static int push_tx_frames(struct cxgbi_sock
*csk
, int req_completion
)
685 if (unlikely(csk
->state
< CTP_ESTABLISHED
||
686 csk
->state
== CTP_CLOSE_WAIT_1
|| csk
->state
>= CTP_ABORTING
)) {
687 log_debug(1 << CXGBI_DBG_TOE
| 1 << CXGBI_DBG_SOCK
|
688 1 << CXGBI_DBG_PDU_TX
,
689 "csk 0x%p,%u,0x%lx,%u, in closing state.\n",
690 csk
, csk
->state
, csk
->flags
, csk
->tid
);
694 while (csk
->wr_cred
&& (skb
= skb_peek(&csk
->write_queue
)) != NULL
) {
697 unsigned int credits_needed
;
700 skb_reset_transport_header(skb
);
701 if (is_ofld_imm(skb
))
702 credits_needed
= DIV_ROUND_UP(dlen
, 16);
704 credits_needed
= DIV_ROUND_UP(
705 8 * calc_tx_flits_ofld(skb
),
708 if (likely(cxgbi_skcb_test_flag(skb
, SKCBF_TX_NEED_HDR
)))
709 credits_needed
+= DIV_ROUND_UP(
710 sizeof(struct fw_ofld_tx_data_wr
),
714 * Assumes the initial credits is large enough to support
715 * fw_flowc_wr plus largest possible first payload
717 if (!cxgbi_sock_flag(csk
, CTPF_TX_DATA_SENT
)) {
718 flowclen16
= send_tx_flowc_wr(csk
);
719 csk
->wr_cred
-= flowclen16
;
720 csk
->wr_una_cred
+= flowclen16
;
721 cxgbi_sock_set_flag(csk
, CTPF_TX_DATA_SENT
);
724 if (csk
->wr_cred
< credits_needed
) {
725 log_debug(1 << CXGBI_DBG_PDU_TX
,
726 "csk 0x%p, skb %u/%u, wr %d < %u.\n",
727 csk
, skb
->len
, skb
->data_len
,
728 credits_needed
, csk
->wr_cred
);
731 __skb_unlink(skb
, &csk
->write_queue
);
732 set_wr_txq(skb
, CPL_PRIORITY_DATA
, csk
->port_id
);
733 skb
->csum
= credits_needed
+ flowclen16
;
734 csk
->wr_cred
-= credits_needed
;
735 csk
->wr_una_cred
+= credits_needed
;
736 cxgbi_sock_enqueue_wr(csk
, skb
);
738 log_debug(1 << CXGBI_DBG_PDU_TX
,
739 "csk 0x%p, skb %u/%u, wr %d, left %u, unack %u.\n",
740 csk
, skb
->len
, skb
->data_len
, credits_needed
,
741 csk
->wr_cred
, csk
->wr_una_cred
);
743 if (likely(cxgbi_skcb_test_flag(skb
, SKCBF_TX_NEED_HDR
))) {
744 len
+= cxgbi_ulp_extra_len(cxgbi_skcb_ulp_mode(skb
));
745 make_tx_data_wr(csk
, skb
, dlen
, len
, credits_needed
,
748 cxgbi_skcb_clear_flag(skb
, SKCBF_TX_NEED_HDR
);
749 } else if (cxgbi_skcb_test_flag(skb
, SKCBF_TX_FLAG_COMPL
) &&
750 (csk
->wr_una_cred
>= (csk
->wr_max_cred
/ 2))) {
751 struct cpl_close_con_req
*req
=
752 (struct cpl_close_con_req
*)skb
->data
;
753 req
->wr
.wr_hi
|= htonl(FW_WR_COMPL_F
);
755 total_size
+= skb
->truesize
;
756 t4_set_arp_err_handler(skb
, csk
, arp_failure_skb_discard
);
758 log_debug(1 << CXGBI_DBG_TOE
| 1 << CXGBI_DBG_PDU_TX
,
759 "csk 0x%p,%u,0x%lx,%u, skb 0x%p, %u.\n",
760 csk
, csk
->state
, csk
->flags
, csk
->tid
, skb
, len
);
762 cxgb4_l2t_send(csk
->cdev
->ports
[csk
->port_id
], skb
, csk
->l2t
);
767 static inline void free_atid(struct cxgbi_sock
*csk
)
769 struct cxgb4_lld_info
*lldi
= cxgbi_cdev_priv(csk
->cdev
);
771 if (cxgbi_sock_flag(csk
, CTPF_HAS_ATID
)) {
772 cxgb4_free_atid(lldi
->tids
, csk
->atid
);
773 cxgbi_sock_clear_flag(csk
, CTPF_HAS_ATID
);
778 static void do_act_establish(struct cxgbi_device
*cdev
, struct sk_buff
*skb
)
780 struct cxgbi_sock
*csk
;
781 struct cpl_act_establish
*req
= (struct cpl_act_establish
*)skb
->data
;
782 unsigned short tcp_opt
= ntohs(req
->tcp_opt
);
783 unsigned int tid
= GET_TID(req
);
784 unsigned int atid
= TID_TID_G(ntohl(req
->tos_atid
));
785 struct cxgb4_lld_info
*lldi
= cxgbi_cdev_priv(cdev
);
786 struct tid_info
*t
= lldi
->tids
;
787 u32 rcv_isn
= be32_to_cpu(req
->rcv_isn
);
789 csk
= lookup_atid(t
, atid
);
790 if (unlikely(!csk
)) {
791 pr_err("NO conn. for atid %u, cdev 0x%p.\n", atid
, cdev
);
795 if (csk
->atid
!= atid
) {
796 pr_err("bad conn atid %u, csk 0x%p,%u,0x%lx,tid %u, atid %u.\n",
797 atid
, csk
, csk
->state
, csk
->flags
, csk
->tid
, csk
->atid
);
801 pr_info_ipaddr("atid 0x%x, tid 0x%x, csk 0x%p,%u,0x%lx, isn %u.\n",
802 (&csk
->saddr
), (&csk
->daddr
),
803 atid
, tid
, csk
, csk
->state
, csk
->flags
, rcv_isn
);
805 module_put(cdev
->owner
);
809 cxgb4_insert_tid(lldi
->tids
, csk
, tid
, csk
->csk_family
);
810 cxgbi_sock_set_flag(csk
, CTPF_HAS_TID
);
814 spin_lock_bh(&csk
->lock
);
815 if (unlikely(csk
->state
!= CTP_ACTIVE_OPEN
))
816 pr_info("csk 0x%p,%u,0x%lx,%u, got EST.\n",
817 csk
, csk
->state
, csk
->flags
, csk
->tid
);
819 if (csk
->retry_timer
.function
) {
820 del_timer(&csk
->retry_timer
);
821 csk
->retry_timer
.function
= NULL
;
824 csk
->copied_seq
= csk
->rcv_wup
= csk
->rcv_nxt
= rcv_isn
;
826 * Causes the first RX_DATA_ACK to supply any Rx credits we couldn't
829 if (csk
->rcv_win
> (RCV_BUFSIZ_MASK
<< 10))
830 csk
->rcv_wup
-= csk
->rcv_win
- (RCV_BUFSIZ_MASK
<< 10);
832 csk
->advmss
= lldi
->mtus
[TCPOPT_MSS_G(tcp_opt
)] - 40;
833 if (TCPOPT_TSTAMP_G(tcp_opt
))
835 if (csk
->advmss
< 128)
838 log_debug(1 << CXGBI_DBG_TOE
| 1 << CXGBI_DBG_SOCK
,
839 "csk 0x%p, mss_idx %u, advmss %u.\n",
840 csk
, TCPOPT_MSS_G(tcp_opt
), csk
->advmss
);
842 cxgbi_sock_established(csk
, ntohl(req
->snd_isn
), ntohs(req
->tcp_opt
));
844 if (unlikely(cxgbi_sock_flag(csk
, CTPF_ACTIVE_CLOSE_NEEDED
)))
847 if (skb_queue_len(&csk
->write_queue
))
848 push_tx_frames(csk
, 0);
849 cxgbi_conn_tx_open(csk
);
851 spin_unlock_bh(&csk
->lock
);
857 static int act_open_rpl_status_to_errno(int status
)
860 case CPL_ERR_CONN_RESET
:
861 return -ECONNREFUSED
;
862 case CPL_ERR_ARP_MISS
:
863 return -EHOSTUNREACH
;
864 case CPL_ERR_CONN_TIMEDOUT
:
866 case CPL_ERR_TCAM_FULL
:
868 case CPL_ERR_CONN_EXIST
:
875 static void csk_act_open_retry_timer(struct timer_list
*t
)
877 struct sk_buff
*skb
= NULL
;
878 struct cxgbi_sock
*csk
= from_timer(csk
, t
, retry_timer
);
879 struct cxgb4_lld_info
*lldi
= cxgbi_cdev_priv(csk
->cdev
);
880 void (*send_act_open_func
)(struct cxgbi_sock
*, struct sk_buff
*,
882 int t4
= is_t4(lldi
->adapter_type
), size
, size6
;
884 log_debug(1 << CXGBI_DBG_TOE
| 1 << CXGBI_DBG_SOCK
,
885 "csk 0x%p,%u,0x%lx,%u.\n",
886 csk
, csk
->state
, csk
->flags
, csk
->tid
);
889 spin_lock_bh(&csk
->lock
);
892 size
= sizeof(struct cpl_act_open_req
);
893 size6
= sizeof(struct cpl_act_open_req6
);
895 size
= sizeof(struct cpl_t5_act_open_req
);
896 size6
= sizeof(struct cpl_t5_act_open_req6
);
899 if (csk
->csk_family
== AF_INET
) {
900 send_act_open_func
= send_act_open_req
;
901 skb
= alloc_wr(size
, 0, GFP_ATOMIC
);
902 #if IS_ENABLED(CONFIG_IPV6)
904 send_act_open_func
= send_act_open_req6
;
905 skb
= alloc_wr(size6
, 0, GFP_ATOMIC
);
910 cxgbi_sock_fail_act_open(csk
, -ENOMEM
);
912 skb
->sk
= (struct sock
*)csk
;
913 t4_set_arp_err_handler(skb
, csk
,
914 cxgbi_sock_act_open_req_arp_failure
);
915 send_act_open_func(csk
, skb
, csk
->l2t
);
918 spin_unlock_bh(&csk
->lock
);
923 static inline bool is_neg_adv(unsigned int status
)
925 return status
== CPL_ERR_RTX_NEG_ADVICE
||
926 status
== CPL_ERR_KEEPALV_NEG_ADVICE
||
927 status
== CPL_ERR_PERSIST_NEG_ADVICE
;
930 static void do_act_open_rpl(struct cxgbi_device
*cdev
, struct sk_buff
*skb
)
932 struct cxgbi_sock
*csk
;
933 struct cpl_act_open_rpl
*rpl
= (struct cpl_act_open_rpl
*)skb
->data
;
934 unsigned int tid
= GET_TID(rpl
);
936 TID_TID_G(AOPEN_ATID_G(be32_to_cpu(rpl
->atid_status
)));
937 unsigned int status
= AOPEN_STATUS_G(be32_to_cpu(rpl
->atid_status
));
938 struct cxgb4_lld_info
*lldi
= cxgbi_cdev_priv(cdev
);
939 struct tid_info
*t
= lldi
->tids
;
941 csk
= lookup_atid(t
, atid
);
942 if (unlikely(!csk
)) {
943 pr_err("NO matching conn. atid %u, tid %u.\n", atid
, tid
);
947 pr_info_ipaddr("tid %u/%u, status %u.\n"
948 "csk 0x%p,%u,0x%lx. ", (&csk
->saddr
), (&csk
->daddr
),
949 atid
, tid
, status
, csk
, csk
->state
, csk
->flags
);
951 if (is_neg_adv(status
))
954 module_put(cdev
->owner
);
956 if (status
&& status
!= CPL_ERR_TCAM_FULL
&&
957 status
!= CPL_ERR_CONN_EXIST
&&
958 status
!= CPL_ERR_ARP_MISS
)
959 cxgb4_remove_tid(lldi
->tids
, csk
->port_id
, GET_TID(rpl
),
963 spin_lock_bh(&csk
->lock
);
965 if (status
== CPL_ERR_CONN_EXIST
&&
966 csk
->retry_timer
.function
!= csk_act_open_retry_timer
) {
967 csk
->retry_timer
.function
= csk_act_open_retry_timer
;
968 mod_timer(&csk
->retry_timer
, jiffies
+ HZ
/ 2);
970 cxgbi_sock_fail_act_open(csk
,
971 act_open_rpl_status_to_errno(status
));
973 spin_unlock_bh(&csk
->lock
);
979 static void do_peer_close(struct cxgbi_device
*cdev
, struct sk_buff
*skb
)
981 struct cxgbi_sock
*csk
;
982 struct cpl_peer_close
*req
= (struct cpl_peer_close
*)skb
->data
;
983 unsigned int tid
= GET_TID(req
);
984 struct cxgb4_lld_info
*lldi
= cxgbi_cdev_priv(cdev
);
985 struct tid_info
*t
= lldi
->tids
;
987 csk
= lookup_tid(t
, tid
);
988 if (unlikely(!csk
)) {
989 pr_err("can't find connection for tid %u.\n", tid
);
992 pr_info_ipaddr("csk 0x%p,%u,0x%lx,%u.\n",
993 (&csk
->saddr
), (&csk
->daddr
),
994 csk
, csk
->state
, csk
->flags
, csk
->tid
);
995 cxgbi_sock_rcv_peer_close(csk
);
1000 static void do_close_con_rpl(struct cxgbi_device
*cdev
, struct sk_buff
*skb
)
1002 struct cxgbi_sock
*csk
;
1003 struct cpl_close_con_rpl
*rpl
= (struct cpl_close_con_rpl
*)skb
->data
;
1004 unsigned int tid
= GET_TID(rpl
);
1005 struct cxgb4_lld_info
*lldi
= cxgbi_cdev_priv(cdev
);
1006 struct tid_info
*t
= lldi
->tids
;
1008 csk
= lookup_tid(t
, tid
);
1009 if (unlikely(!csk
)) {
1010 pr_err("can't find connection for tid %u.\n", tid
);
1013 pr_info_ipaddr("csk 0x%p,%u,0x%lx,%u.\n",
1014 (&csk
->saddr
), (&csk
->daddr
),
1015 csk
, csk
->state
, csk
->flags
, csk
->tid
);
1016 cxgbi_sock_rcv_close_conn_rpl(csk
, ntohl(rpl
->snd_nxt
));
1021 static int abort_status_to_errno(struct cxgbi_sock
*csk
, int abort_reason
,
1024 switch (abort_reason
) {
1025 case CPL_ERR_BAD_SYN
: /* fall through */
1026 case CPL_ERR_CONN_RESET
:
1027 return csk
->state
> CTP_ESTABLISHED
?
1028 -EPIPE
: -ECONNRESET
;
1029 case CPL_ERR_XMIT_TIMEDOUT
:
1030 case CPL_ERR_PERSIST_TIMEDOUT
:
1031 case CPL_ERR_FINWAIT2_TIMEDOUT
:
1032 case CPL_ERR_KEEPALIVE_TIMEDOUT
:
1039 static void do_abort_req_rss(struct cxgbi_device
*cdev
, struct sk_buff
*skb
)
1041 struct cxgbi_sock
*csk
;
1042 struct cpl_abort_req_rss
*req
= (struct cpl_abort_req_rss
*)skb
->data
;
1043 unsigned int tid
= GET_TID(req
);
1044 struct cxgb4_lld_info
*lldi
= cxgbi_cdev_priv(cdev
);
1045 struct tid_info
*t
= lldi
->tids
;
1046 int rst_status
= CPL_ABORT_NO_RST
;
1048 csk
= lookup_tid(t
, tid
);
1049 if (unlikely(!csk
)) {
1050 pr_err("can't find connection for tid %u.\n", tid
);
1054 pr_info_ipaddr("csk 0x%p,%u,0x%lx,%u, status %u.\n",
1055 (&csk
->saddr
), (&csk
->daddr
),
1056 csk
, csk
->state
, csk
->flags
, csk
->tid
, req
->status
);
1058 if (is_neg_adv(req
->status
))
1061 cxgbi_sock_get(csk
);
1062 spin_lock_bh(&csk
->lock
);
1064 cxgbi_sock_clear_flag(csk
, CTPF_ABORT_REQ_RCVD
);
1066 if (!cxgbi_sock_flag(csk
, CTPF_TX_DATA_SENT
)) {
1067 send_tx_flowc_wr(csk
);
1068 cxgbi_sock_set_flag(csk
, CTPF_TX_DATA_SENT
);
1071 cxgbi_sock_set_flag(csk
, CTPF_ABORT_REQ_RCVD
);
1072 cxgbi_sock_set_state(csk
, CTP_ABORTING
);
1074 send_abort_rpl(csk
, rst_status
);
1076 if (!cxgbi_sock_flag(csk
, CTPF_ABORT_RPL_PENDING
)) {
1077 csk
->err
= abort_status_to_errno(csk
, req
->status
, &rst_status
);
1078 cxgbi_sock_closed(csk
);
1081 spin_unlock_bh(&csk
->lock
);
1082 cxgbi_sock_put(csk
);
1087 static void do_abort_rpl_rss(struct cxgbi_device
*cdev
, struct sk_buff
*skb
)
1089 struct cxgbi_sock
*csk
;
1090 struct cpl_abort_rpl_rss
*rpl
= (struct cpl_abort_rpl_rss
*)skb
->data
;
1091 unsigned int tid
= GET_TID(rpl
);
1092 struct cxgb4_lld_info
*lldi
= cxgbi_cdev_priv(cdev
);
1093 struct tid_info
*t
= lldi
->tids
;
1095 csk
= lookup_tid(t
, tid
);
1100 pr_info_ipaddr("csk 0x%p,%u,0x%lx,%u, status %u.\n",
1101 (&csk
->saddr
), (&csk
->daddr
), csk
,
1102 csk
->state
, csk
->flags
, csk
->tid
, rpl
->status
);
1104 if (rpl
->status
== CPL_ERR_ABORT_FAILED
)
1107 cxgbi_sock_rcv_abort_rpl(csk
);
1112 static void do_rx_data(struct cxgbi_device
*cdev
, struct sk_buff
*skb
)
1114 struct cxgbi_sock
*csk
;
1115 struct cpl_rx_data
*cpl
= (struct cpl_rx_data
*)skb
->data
;
1116 unsigned int tid
= GET_TID(cpl
);
1117 struct cxgb4_lld_info
*lldi
= cxgbi_cdev_priv(cdev
);
1118 struct tid_info
*t
= lldi
->tids
;
1120 csk
= lookup_tid(t
, tid
);
1122 pr_err("can't find connection for tid %u.\n", tid
);
1124 /* not expecting this, reset the connection. */
1125 pr_err("csk 0x%p, tid %u, rcv cpl_rx_data.\n", csk
, tid
);
1126 spin_lock_bh(&csk
->lock
);
1127 send_abort_req(csk
);
1128 spin_unlock_bh(&csk
->lock
);
1133 static void do_rx_iscsi_hdr(struct cxgbi_device
*cdev
, struct sk_buff
*skb
)
1135 struct cxgbi_sock
*csk
;
1136 struct cpl_iscsi_hdr
*cpl
= (struct cpl_iscsi_hdr
*)skb
->data
;
1137 unsigned short pdu_len_ddp
= be16_to_cpu(cpl
->pdu_len_ddp
);
1138 unsigned int tid
= GET_TID(cpl
);
1139 struct cxgb4_lld_info
*lldi
= cxgbi_cdev_priv(cdev
);
1140 struct tid_info
*t
= lldi
->tids
;
1142 csk
= lookup_tid(t
, tid
);
1143 if (unlikely(!csk
)) {
1144 pr_err("can't find conn. for tid %u.\n", tid
);
1148 log_debug(1 << CXGBI_DBG_TOE
| 1 << CXGBI_DBG_PDU_RX
,
1149 "csk 0x%p,%u,0x%lx, tid %u, skb 0x%p,%u, 0x%x.\n",
1150 csk
, csk
->state
, csk
->flags
, csk
->tid
, skb
, skb
->len
,
1153 spin_lock_bh(&csk
->lock
);
1155 if (unlikely(csk
->state
>= CTP_PASSIVE_CLOSE
)) {
1156 log_debug(1 << CXGBI_DBG_TOE
| 1 << CXGBI_DBG_SOCK
,
1157 "csk 0x%p,%u,0x%lx,%u, bad state.\n",
1158 csk
, csk
->state
, csk
->flags
, csk
->tid
);
1159 if (csk
->state
!= CTP_ABORTING
)
1165 cxgbi_skcb_tcp_seq(skb
) = ntohl(cpl
->seq
);
1166 cxgbi_skcb_flags(skb
) = 0;
1168 skb_reset_transport_header(skb
);
1169 __skb_pull(skb
, sizeof(*cpl
));
1170 __pskb_trim(skb
, ntohs(cpl
->len
));
1172 if (!csk
->skb_ulp_lhdr
) {
1174 unsigned int hlen
, dlen
, plen
;
1176 log_debug(1 << CXGBI_DBG_TOE
| 1 << CXGBI_DBG_PDU_RX
,
1177 "csk 0x%p,%u,0x%lx, tid %u, skb 0x%p header.\n",
1178 csk
, csk
->state
, csk
->flags
, csk
->tid
, skb
);
1179 csk
->skb_ulp_lhdr
= skb
;
1180 cxgbi_skcb_set_flag(skb
, SKCBF_RX_HDR
);
1182 if (cxgbi_skcb_tcp_seq(skb
) != csk
->rcv_nxt
) {
1183 pr_info("tid %u, CPL_ISCSI_HDR, bad seq, 0x%x/0x%x.\n",
1184 csk
->tid
, cxgbi_skcb_tcp_seq(skb
),
1190 hlen
= ntohs(cpl
->len
);
1191 dlen
= ntohl(*(unsigned int *)(bhs
+ 4)) & 0xFFFFFF;
1193 plen
= ISCSI_PDU_LEN_G(pdu_len_ddp
);
1194 if (is_t4(lldi
->adapter_type
))
1197 if ((hlen
+ dlen
) != plen
) {
1198 pr_info("tid 0x%x, CPL_ISCSI_HDR, pdu len "
1199 "mismatch %u != %u + %u, seq 0x%x.\n",
1200 csk
->tid
, plen
, hlen
, dlen
,
1201 cxgbi_skcb_tcp_seq(skb
));
1205 cxgbi_skcb_rx_pdulen(skb
) = (hlen
+ dlen
+ 3) & (~0x3);
1207 cxgbi_skcb_rx_pdulen(skb
) += csk
->dcrc_len
;
1208 csk
->rcv_nxt
+= cxgbi_skcb_rx_pdulen(skb
);
1210 log_debug(1 << CXGBI_DBG_TOE
| 1 << CXGBI_DBG_PDU_RX
,
1211 "csk 0x%p, skb 0x%p, 0x%x,%u+%u,0x%x,0x%x.\n",
1212 csk
, skb
, *bhs
, hlen
, dlen
,
1213 ntohl(*((unsigned int *)(bhs
+ 16))),
1214 ntohl(*((unsigned int *)(bhs
+ 24))));
1217 struct sk_buff
*lskb
= csk
->skb_ulp_lhdr
;
1219 cxgbi_skcb_set_flag(lskb
, SKCBF_RX_DATA
);
1220 log_debug(1 << CXGBI_DBG_TOE
| 1 << CXGBI_DBG_PDU_RX
,
1221 "csk 0x%p,%u,0x%lx, skb 0x%p data, 0x%p.\n",
1222 csk
, csk
->state
, csk
->flags
, skb
, lskb
);
1225 __skb_queue_tail(&csk
->receive_queue
, skb
);
1226 spin_unlock_bh(&csk
->lock
);
1230 send_abort_req(csk
);
1232 spin_unlock_bh(&csk
->lock
);
1237 static void do_rx_iscsi_data(struct cxgbi_device
*cdev
, struct sk_buff
*skb
)
1239 struct cxgbi_sock
*csk
;
1240 struct cpl_iscsi_hdr
*cpl
= (struct cpl_iscsi_hdr
*)skb
->data
;
1241 struct cxgb4_lld_info
*lldi
= cxgbi_cdev_priv(cdev
);
1242 struct tid_info
*t
= lldi
->tids
;
1243 struct sk_buff
*lskb
;
1244 u32 tid
= GET_TID(cpl
);
1245 u16 pdu_len_ddp
= be16_to_cpu(cpl
->pdu_len_ddp
);
1247 csk
= lookup_tid(t
, tid
);
1248 if (unlikely(!csk
)) {
1249 pr_err("can't find conn. for tid %u.\n", tid
);
1253 log_debug(1 << CXGBI_DBG_TOE
| 1 << CXGBI_DBG_PDU_RX
,
1254 "csk 0x%p,%u,0x%lx, tid %u, skb 0x%p,%u, 0x%x.\n",
1255 csk
, csk
->state
, csk
->flags
, csk
->tid
, skb
,
1256 skb
->len
, pdu_len_ddp
);
1258 spin_lock_bh(&csk
->lock
);
1260 if (unlikely(csk
->state
>= CTP_PASSIVE_CLOSE
)) {
1261 log_debug(1 << CXGBI_DBG_TOE
| 1 << CXGBI_DBG_SOCK
,
1262 "csk 0x%p,%u,0x%lx,%u, bad state.\n",
1263 csk
, csk
->state
, csk
->flags
, csk
->tid
);
1265 if (csk
->state
!= CTP_ABORTING
)
1271 cxgbi_skcb_tcp_seq(skb
) = be32_to_cpu(cpl
->seq
);
1272 cxgbi_skcb_flags(skb
) = 0;
1274 skb_reset_transport_header(skb
);
1275 __skb_pull(skb
, sizeof(*cpl
));
1276 __pskb_trim(skb
, ntohs(cpl
->len
));
1278 if (!csk
->skb_ulp_lhdr
)
1279 csk
->skb_ulp_lhdr
= skb
;
1281 lskb
= csk
->skb_ulp_lhdr
;
1282 cxgbi_skcb_set_flag(lskb
, SKCBF_RX_DATA
);
1284 log_debug(1 << CXGBI_DBG_TOE
| 1 << CXGBI_DBG_PDU_RX
,
1285 "csk 0x%p,%u,0x%lx, skb 0x%p data, 0x%p.\n",
1286 csk
, csk
->state
, csk
->flags
, skb
, lskb
);
1288 __skb_queue_tail(&csk
->receive_queue
, skb
);
1289 spin_unlock_bh(&csk
->lock
);
1293 send_abort_req(csk
);
1295 spin_unlock_bh(&csk
->lock
);
1301 cxgb4i_process_ddpvld(struct cxgbi_sock
*csk
,
1302 struct sk_buff
*skb
, u32 ddpvld
)
1304 if (ddpvld
& (1 << CPL_RX_DDP_STATUS_HCRC_SHIFT
)) {
1305 pr_info("csk 0x%p, lhdr 0x%p, status 0x%x, hcrc bad 0x%lx.\n",
1306 csk
, skb
, ddpvld
, cxgbi_skcb_flags(skb
));
1307 cxgbi_skcb_set_flag(skb
, SKCBF_RX_HCRC_ERR
);
1310 if (ddpvld
& (1 << CPL_RX_DDP_STATUS_DCRC_SHIFT
)) {
1311 pr_info("csk 0x%p, lhdr 0x%p, status 0x%x, dcrc bad 0x%lx.\n",
1312 csk
, skb
, ddpvld
, cxgbi_skcb_flags(skb
));
1313 cxgbi_skcb_set_flag(skb
, SKCBF_RX_DCRC_ERR
);
1316 if (ddpvld
& (1 << CPL_RX_DDP_STATUS_PAD_SHIFT
)) {
1317 log_debug(1 << CXGBI_DBG_PDU_RX
,
1318 "csk 0x%p, lhdr 0x%p, status 0x%x, pad bad.\n",
1320 cxgbi_skcb_set_flag(skb
, SKCBF_RX_PAD_ERR
);
1323 if ((ddpvld
& (1 << CPL_RX_DDP_STATUS_DDP_SHIFT
)) &&
1324 !cxgbi_skcb_test_flag(skb
, SKCBF_RX_DATA
)) {
1325 log_debug(1 << CXGBI_DBG_PDU_RX
,
1326 "csk 0x%p, lhdr 0x%p, 0x%x, data ddp'ed.\n",
1328 cxgbi_skcb_set_flag(skb
, SKCBF_RX_DATA_DDPD
);
1332 static void do_rx_data_ddp(struct cxgbi_device
*cdev
,
1333 struct sk_buff
*skb
)
1335 struct cxgbi_sock
*csk
;
1336 struct sk_buff
*lskb
;
1337 struct cpl_rx_data_ddp
*rpl
= (struct cpl_rx_data_ddp
*)skb
->data
;
1338 unsigned int tid
= GET_TID(rpl
);
1339 struct cxgb4_lld_info
*lldi
= cxgbi_cdev_priv(cdev
);
1340 struct tid_info
*t
= lldi
->tids
;
1341 u32 ddpvld
= be32_to_cpu(rpl
->ddpvld
);
1343 csk
= lookup_tid(t
, tid
);
1344 if (unlikely(!csk
)) {
1345 pr_err("can't find connection for tid %u.\n", tid
);
1349 log_debug(1 << CXGBI_DBG_TOE
| 1 << CXGBI_DBG_PDU_RX
,
1350 "csk 0x%p,%u,0x%lx, skb 0x%p,0x%x, lhdr 0x%p.\n",
1351 csk
, csk
->state
, csk
->flags
, skb
, ddpvld
, csk
->skb_ulp_lhdr
);
1353 spin_lock_bh(&csk
->lock
);
1355 if (unlikely(csk
->state
>= CTP_PASSIVE_CLOSE
)) {
1356 log_debug(1 << CXGBI_DBG_TOE
| 1 << CXGBI_DBG_SOCK
,
1357 "csk 0x%p,%u,0x%lx,%u, bad state.\n",
1358 csk
, csk
->state
, csk
->flags
, csk
->tid
);
1359 if (csk
->state
!= CTP_ABORTING
)
1365 if (!csk
->skb_ulp_lhdr
) {
1366 pr_err("tid 0x%x, rcv RX_DATA_DDP w/o pdu bhs.\n", csk
->tid
);
1370 lskb
= csk
->skb_ulp_lhdr
;
1371 csk
->skb_ulp_lhdr
= NULL
;
1373 cxgbi_skcb_rx_ddigest(lskb
) = ntohl(rpl
->ulp_crc
);
1375 if (ntohs(rpl
->len
) != cxgbi_skcb_rx_pdulen(lskb
))
1376 pr_info("tid 0x%x, RX_DATA_DDP pdulen %u != %u.\n",
1377 csk
->tid
, ntohs(rpl
->len
), cxgbi_skcb_rx_pdulen(lskb
));
1379 cxgb4i_process_ddpvld(csk
, lskb
, ddpvld
);
1381 log_debug(1 << CXGBI_DBG_PDU_RX
,
1382 "csk 0x%p, lskb 0x%p, f 0x%lx.\n",
1383 csk
, lskb
, cxgbi_skcb_flags(lskb
));
1385 cxgbi_skcb_set_flag(lskb
, SKCBF_RX_STATUS
);
1386 cxgbi_conn_pdu_ready(csk
);
1387 spin_unlock_bh(&csk
->lock
);
1391 send_abort_req(csk
);
1393 spin_unlock_bh(&csk
->lock
);
1399 do_rx_iscsi_cmp(struct cxgbi_device
*cdev
, struct sk_buff
*skb
)
1401 struct cxgbi_sock
*csk
;
1402 struct cpl_rx_iscsi_cmp
*rpl
= (struct cpl_rx_iscsi_cmp
*)skb
->data
;
1403 struct cxgb4_lld_info
*lldi
= cxgbi_cdev_priv(cdev
);
1404 struct tid_info
*t
= lldi
->tids
;
1405 struct sk_buff
*data_skb
= NULL
;
1406 u32 tid
= GET_TID(rpl
);
1407 u32 ddpvld
= be32_to_cpu(rpl
->ddpvld
);
1408 u32 seq
= be32_to_cpu(rpl
->seq
);
1409 u16 pdu_len_ddp
= be16_to_cpu(rpl
->pdu_len_ddp
);
1411 csk
= lookup_tid(t
, tid
);
1412 if (unlikely(!csk
)) {
1413 pr_err("can't find connection for tid %u.\n", tid
);
1417 log_debug(1 << CXGBI_DBG_TOE
| 1 << CXGBI_DBG_PDU_RX
,
1418 "csk 0x%p,%u,0x%lx, skb 0x%p,0x%x, lhdr 0x%p, len %u, "
1419 "pdu_len_ddp %u, status %u.\n",
1420 csk
, csk
->state
, csk
->flags
, skb
, ddpvld
, csk
->skb_ulp_lhdr
,
1421 ntohs(rpl
->len
), pdu_len_ddp
, rpl
->status
);
1423 spin_lock_bh(&csk
->lock
);
1425 if (unlikely(csk
->state
>= CTP_PASSIVE_CLOSE
)) {
1426 log_debug(1 << CXGBI_DBG_TOE
| 1 << CXGBI_DBG_SOCK
,
1427 "csk 0x%p,%u,0x%lx,%u, bad state.\n",
1428 csk
, csk
->state
, csk
->flags
, csk
->tid
);
1430 if (csk
->state
!= CTP_ABORTING
)
1436 cxgbi_skcb_tcp_seq(skb
) = seq
;
1437 cxgbi_skcb_flags(skb
) = 0;
1438 cxgbi_skcb_rx_pdulen(skb
) = 0;
1440 skb_reset_transport_header(skb
);
1441 __skb_pull(skb
, sizeof(*rpl
));
1442 __pskb_trim(skb
, be16_to_cpu(rpl
->len
));
1444 csk
->rcv_nxt
= seq
+ pdu_len_ddp
;
1446 if (csk
->skb_ulp_lhdr
) {
1447 data_skb
= skb_peek(&csk
->receive_queue
);
1449 !cxgbi_skcb_test_flag(data_skb
, SKCBF_RX_DATA
)) {
1450 pr_err("Error! freelist data not found 0x%p, tid %u\n",
1455 __skb_unlink(data_skb
, &csk
->receive_queue
);
1457 cxgbi_skcb_set_flag(skb
, SKCBF_RX_DATA
);
1459 __skb_queue_tail(&csk
->receive_queue
, skb
);
1460 __skb_queue_tail(&csk
->receive_queue
, data_skb
);
1462 __skb_queue_tail(&csk
->receive_queue
, skb
);
1465 csk
->skb_ulp_lhdr
= NULL
;
1467 cxgbi_skcb_set_flag(skb
, SKCBF_RX_HDR
);
1468 cxgbi_skcb_set_flag(skb
, SKCBF_RX_STATUS
);
1469 cxgbi_skcb_set_flag(skb
, SKCBF_RX_ISCSI_COMPL
);
1470 cxgbi_skcb_rx_ddigest(skb
) = be32_to_cpu(rpl
->ulp_crc
);
1472 cxgb4i_process_ddpvld(csk
, skb
, ddpvld
);
1474 log_debug(1 << CXGBI_DBG_PDU_RX
, "csk 0x%p, skb 0x%p, f 0x%lx.\n",
1475 csk
, skb
, cxgbi_skcb_flags(skb
));
1477 cxgbi_conn_pdu_ready(csk
);
1478 spin_unlock_bh(&csk
->lock
);
1483 send_abort_req(csk
);
1485 spin_unlock_bh(&csk
->lock
);
1490 static void do_fw4_ack(struct cxgbi_device
*cdev
, struct sk_buff
*skb
)
1492 struct cxgbi_sock
*csk
;
1493 struct cpl_fw4_ack
*rpl
= (struct cpl_fw4_ack
*)skb
->data
;
1494 unsigned int tid
= GET_TID(rpl
);
1495 struct cxgb4_lld_info
*lldi
= cxgbi_cdev_priv(cdev
);
1496 struct tid_info
*t
= lldi
->tids
;
1498 csk
= lookup_tid(t
, tid
);
1500 pr_err("can't find connection for tid %u.\n", tid
);
1502 log_debug(1 << CXGBI_DBG_TOE
| 1 << CXGBI_DBG_SOCK
,
1503 "csk 0x%p,%u,0x%lx,%u.\n",
1504 csk
, csk
->state
, csk
->flags
, csk
->tid
);
1505 cxgbi_sock_rcv_wr_ack(csk
, rpl
->credits
, ntohl(rpl
->snd_una
),
1511 static void do_set_tcb_rpl(struct cxgbi_device
*cdev
, struct sk_buff
*skb
)
1513 struct cpl_set_tcb_rpl
*rpl
= (struct cpl_set_tcb_rpl
*)skb
->data
;
1514 unsigned int tid
= GET_TID(rpl
);
1515 struct cxgb4_lld_info
*lldi
= cxgbi_cdev_priv(cdev
);
1516 struct tid_info
*t
= lldi
->tids
;
1517 struct cxgbi_sock
*csk
;
1519 csk
= lookup_tid(t
, tid
);
1521 pr_err("can't find conn. for tid %u.\n", tid
);
1525 log_debug(1 << CXGBI_DBG_TOE
| 1 << CXGBI_DBG_SOCK
,
1526 "csk 0x%p,%u,%lx,%u, status 0x%x.\n",
1527 csk
, csk
->state
, csk
->flags
, csk
->tid
, rpl
->status
);
1529 if (rpl
->status
!= CPL_ERR_NONE
) {
1530 pr_err("csk 0x%p,%u, SET_TCB_RPL status %u.\n",
1531 csk
, tid
, rpl
->status
);
1535 complete(&csk
->cmpl
);
1540 static int alloc_cpls(struct cxgbi_sock
*csk
)
1542 csk
->cpl_close
= alloc_wr(sizeof(struct cpl_close_con_req
),
1544 if (!csk
->cpl_close
)
1547 csk
->cpl_abort_req
= alloc_wr(sizeof(struct cpl_abort_req
),
1549 if (!csk
->cpl_abort_req
)
1552 csk
->cpl_abort_rpl
= alloc_wr(sizeof(struct cpl_abort_rpl
),
1554 if (!csk
->cpl_abort_rpl
)
1559 cxgbi_sock_free_cpl_skbs(csk
);
1563 static inline void l2t_put(struct cxgbi_sock
*csk
)
1566 cxgb4_l2t_release(csk
->l2t
);
1568 cxgbi_sock_put(csk
);
1572 static void release_offload_resources(struct cxgbi_sock
*csk
)
1574 struct cxgb4_lld_info
*lldi
;
1575 #if IS_ENABLED(CONFIG_IPV6)
1576 struct net_device
*ndev
= csk
->cdev
->ports
[csk
->port_id
];
1579 log_debug(1 << CXGBI_DBG_TOE
| 1 << CXGBI_DBG_SOCK
,
1580 "csk 0x%p,%u,0x%lx,%u.\n",
1581 csk
, csk
->state
, csk
->flags
, csk
->tid
);
1583 cxgbi_sock_free_cpl_skbs(csk
);
1584 cxgbi_sock_purge_write_queue(csk
);
1585 if (csk
->wr_cred
!= csk
->wr_max_cred
) {
1586 cxgbi_sock_purge_wr_queue(csk
);
1587 cxgbi_sock_reset_wr_list(csk
);
1591 #if IS_ENABLED(CONFIG_IPV6)
1592 if (csk
->csk_family
== AF_INET6
)
1593 cxgb4_clip_release(ndev
,
1594 (const u32
*)&csk
->saddr6
.sin6_addr
, 1);
1597 if (cxgbi_sock_flag(csk
, CTPF_HAS_ATID
))
1599 else if (cxgbi_sock_flag(csk
, CTPF_HAS_TID
)) {
1600 lldi
= cxgbi_cdev_priv(csk
->cdev
);
1601 cxgb4_remove_tid(lldi
->tids
, 0, csk
->tid
,
1603 cxgbi_sock_clear_flag(csk
, CTPF_HAS_TID
);
1604 cxgbi_sock_put(csk
);
1609 static int init_act_open(struct cxgbi_sock
*csk
)
1611 struct cxgbi_device
*cdev
= csk
->cdev
;
1612 struct cxgb4_lld_info
*lldi
= cxgbi_cdev_priv(cdev
);
1613 struct net_device
*ndev
= cdev
->ports
[csk
->port_id
];
1614 struct sk_buff
*skb
= NULL
;
1615 struct neighbour
*n
= NULL
;
1618 unsigned int rxq_idx
;
1619 unsigned int size
, size6
;
1620 unsigned int linkspeed
;
1621 unsigned int rcv_winf
, snd_winf
;
1623 log_debug(1 << CXGBI_DBG_TOE
| 1 << CXGBI_DBG_SOCK
,
1624 "csk 0x%p,%u,0x%lx,%u.\n",
1625 csk
, csk
->state
, csk
->flags
, csk
->tid
);
1627 if (csk
->csk_family
== AF_INET
)
1628 daddr
= &csk
->daddr
.sin_addr
.s_addr
;
1629 #if IS_ENABLED(CONFIG_IPV6)
1630 else if (csk
->csk_family
== AF_INET6
)
1631 daddr
= &csk
->daddr6
.sin6_addr
;
1634 pr_err("address family 0x%x not supported\n", csk
->csk_family
);
1638 n
= dst_neigh_lookup(csk
->dst
, daddr
);
1641 pr_err("%s, can't get neighbour of csk->dst.\n", ndev
->name
);
1645 if (!(n
->nud_state
& NUD_VALID
))
1646 neigh_event_send(n
, NULL
);
1648 csk
->atid
= cxgb4_alloc_atid(lldi
->tids
, csk
);
1649 if (csk
->atid
< 0) {
1650 pr_err("%s, NO atid available.\n", ndev
->name
);
1651 goto rel_resource_without_clip
;
1653 cxgbi_sock_set_flag(csk
, CTPF_HAS_ATID
);
1654 cxgbi_sock_get(csk
);
1656 csk
->l2t
= cxgb4_l2t_get(lldi
->l2t
, n
, ndev
, 0);
1658 pr_err("%s, cannot alloc l2t.\n", ndev
->name
);
1659 goto rel_resource_without_clip
;
1661 cxgbi_sock_get(csk
);
1663 #if IS_ENABLED(CONFIG_IPV6)
1664 if (csk
->csk_family
== AF_INET6
)
1665 cxgb4_clip_get(ndev
, (const u32
*)&csk
->saddr6
.sin6_addr
, 1);
1668 if (is_t4(lldi
->adapter_type
)) {
1669 size
= sizeof(struct cpl_act_open_req
);
1670 size6
= sizeof(struct cpl_act_open_req6
);
1671 } else if (is_t5(lldi
->adapter_type
)) {
1672 size
= sizeof(struct cpl_t5_act_open_req
);
1673 size6
= sizeof(struct cpl_t5_act_open_req6
);
1675 size
= sizeof(struct cpl_t6_act_open_req
);
1676 size6
= sizeof(struct cpl_t6_act_open_req6
);
1679 if (csk
->csk_family
== AF_INET
)
1680 skb
= alloc_wr(size
, 0, GFP_NOIO
);
1681 #if IS_ENABLED(CONFIG_IPV6)
1683 skb
= alloc_wr(size6
, 0, GFP_NOIO
);
1688 skb
->sk
= (struct sock
*)csk
;
1689 t4_set_arp_err_handler(skb
, csk
, cxgbi_sock_act_open_req_arp_failure
);
1692 csk
->mtu
= dst_mtu(csk
->dst
);
1693 cxgb4_best_mtu(lldi
->mtus
, csk
->mtu
, &csk
->mss_idx
);
1694 csk
->tx_chan
= cxgb4_port_chan(ndev
);
1695 csk
->smac_idx
= cxgb4_tp_smt_idx(lldi
->adapter_type
,
1696 cxgb4_port_viid(ndev
));
1697 step
= lldi
->ntxq
/ lldi
->nchan
;
1698 csk
->txq_idx
= cxgb4_port_idx(ndev
) * step
;
1699 step
= lldi
->nrxq
/ lldi
->nchan
;
1700 rxq_idx
= (cxgb4_port_idx(ndev
) * step
) + (cdev
->rxq_idx_cntr
% step
);
1701 cdev
->rxq_idx_cntr
++;
1702 csk
->rss_qid
= lldi
->rxq_ids
[rxq_idx
];
1703 linkspeed
= ((struct port_info
*)netdev_priv(ndev
))->link_cfg
.speed
;
1704 csk
->snd_win
= cxgb4i_snd_win
;
1705 csk
->rcv_win
= cxgb4i_rcv_win
;
1706 if (cxgb4i_rcv_win
<= 0) {
1707 csk
->rcv_win
= CXGB4I_DEFAULT_10G_RCV_WIN
;
1708 rcv_winf
= linkspeed
/ SPEED_10000
;
1710 csk
->rcv_win
*= rcv_winf
;
1712 if (cxgb4i_snd_win
<= 0) {
1713 csk
->snd_win
= CXGB4I_DEFAULT_10G_SND_WIN
;
1714 snd_winf
= linkspeed
/ SPEED_10000
;
1716 csk
->snd_win
*= snd_winf
;
1718 csk
->wr_cred
= lldi
->wr_cred
-
1719 DIV_ROUND_UP(sizeof(struct cpl_abort_req
), 16);
1720 csk
->wr_max_cred
= csk
->wr_cred
;
1721 csk
->wr_una_cred
= 0;
1722 cxgbi_sock_reset_wr_list(csk
);
1725 pr_info_ipaddr("csk 0x%p,%u,0x%lx,%u,%u,%u, mtu %u,%u, smac %u.\n",
1726 (&csk
->saddr
), (&csk
->daddr
), csk
, csk
->state
,
1727 csk
->flags
, csk
->tx_chan
, csk
->txq_idx
, csk
->rss_qid
,
1728 csk
->mtu
, csk
->mss_idx
, csk
->smac_idx
);
1730 /* must wait for either a act_open_rpl or act_open_establish */
1731 if (!try_module_get(cdev
->owner
)) {
1732 pr_err("%s, try_module_get failed.\n", ndev
->name
);
1736 cxgbi_sock_set_state(csk
, CTP_ACTIVE_OPEN
);
1737 if (csk
->csk_family
== AF_INET
)
1738 send_act_open_req(csk
, skb
, csk
->l2t
);
1739 #if IS_ENABLED(CONFIG_IPV6)
1741 send_act_open_req6(csk
, skb
, csk
->l2t
);
1748 #if IS_ENABLED(CONFIG_IPV6)
1749 if (csk
->csk_family
== AF_INET6
)
1750 cxgb4_clip_release(ndev
,
1751 (const u32
*)&csk
->saddr6
.sin6_addr
, 1);
1753 rel_resource_without_clip
:
1761 static cxgb4i_cplhandler_func cxgb4i_cplhandlers
[NUM_CPL_CMDS
] = {
1762 [CPL_ACT_ESTABLISH
] = do_act_establish
,
1763 [CPL_ACT_OPEN_RPL
] = do_act_open_rpl
,
1764 [CPL_PEER_CLOSE
] = do_peer_close
,
1765 [CPL_ABORT_REQ_RSS
] = do_abort_req_rss
,
1766 [CPL_ABORT_RPL_RSS
] = do_abort_rpl_rss
,
1767 [CPL_CLOSE_CON_RPL
] = do_close_con_rpl
,
1768 [CPL_FW4_ACK
] = do_fw4_ack
,
1769 [CPL_ISCSI_HDR
] = do_rx_iscsi_hdr
,
1770 [CPL_ISCSI_DATA
] = do_rx_iscsi_data
,
1771 [CPL_SET_TCB_RPL
] = do_set_tcb_rpl
,
1772 [CPL_RX_DATA_DDP
] = do_rx_data_ddp
,
1773 [CPL_RX_ISCSI_DDP
] = do_rx_data_ddp
,
1774 [CPL_RX_ISCSI_CMP
] = do_rx_iscsi_cmp
,
1775 [CPL_RX_DATA
] = do_rx_data
,
1778 static int cxgb4i_ofld_init(struct cxgbi_device
*cdev
)
1782 if (cxgb4i_max_connect
> CXGB4I_MAX_CONN
)
1783 cxgb4i_max_connect
= CXGB4I_MAX_CONN
;
1785 rc
= cxgbi_device_portmap_create(cdev
, cxgb4i_sport_base
,
1786 cxgb4i_max_connect
);
1790 cdev
->csk_release_offload_resources
= release_offload_resources
;
1791 cdev
->csk_push_tx_frames
= push_tx_frames
;
1792 cdev
->csk_send_abort_req
= send_abort_req
;
1793 cdev
->csk_send_close_req
= send_close_req
;
1794 cdev
->csk_send_rx_credits
= send_rx_credits
;
1795 cdev
->csk_alloc_cpls
= alloc_cpls
;
1796 cdev
->csk_init_act_open
= init_act_open
;
1798 pr_info("cdev 0x%p, offload up, added.\n", cdev
);
1803 ulp_mem_io_set_hdr(struct cxgbi_device
*cdev
,
1804 struct ulp_mem_io
*req
,
1805 unsigned int wr_len
, unsigned int dlen
,
1806 unsigned int pm_addr
,
1809 struct cxgb4_lld_info
*lldi
= cxgbi_cdev_priv(cdev
);
1810 struct ulptx_idata
*idata
= (struct ulptx_idata
*)(req
+ 1);
1812 INIT_ULPTX_WR(req
, wr_len
, 0, tid
);
1813 req
->wr
.wr_hi
= htonl(FW_WR_OP_V(FW_ULPTX_WR
) |
1815 req
->cmd
= htonl(ULPTX_CMD_V(ULP_TX_MEM_WRITE
) |
1816 ULP_MEMIO_ORDER_V(is_t4(lldi
->adapter_type
)) |
1817 T5_ULP_MEMIO_IMM_V(!is_t4(lldi
->adapter_type
)));
1818 req
->dlen
= htonl(ULP_MEMIO_DATA_LEN_V(dlen
>> 5));
1819 req
->lock_addr
= htonl(ULP_MEMIO_ADDR_V(pm_addr
>> 5));
1820 req
->len16
= htonl(DIV_ROUND_UP(wr_len
- sizeof(req
->wr
), 16));
1822 idata
->cmd_more
= htonl(ULPTX_CMD_V(ULP_TX_SC_IMM
));
1823 idata
->len
= htonl(dlen
);
1826 static struct sk_buff
*
1827 ddp_ppod_init_idata(struct cxgbi_device
*cdev
,
1828 struct cxgbi_ppm
*ppm
,
1829 unsigned int idx
, unsigned int npods
,
1832 unsigned int pm_addr
= (idx
<< PPOD_SIZE_SHIFT
) + ppm
->llimit
;
1833 unsigned int dlen
= npods
<< PPOD_SIZE_SHIFT
;
1834 unsigned int wr_len
= roundup(sizeof(struct ulp_mem_io
) +
1835 sizeof(struct ulptx_idata
) + dlen
, 16);
1836 struct sk_buff
*skb
= alloc_wr(wr_len
, 0, GFP_ATOMIC
);
1839 pr_err("%s: %s idx %u, npods %u, OOM.\n",
1840 __func__
, ppm
->ndev
->name
, idx
, npods
);
1844 ulp_mem_io_set_hdr(cdev
, (struct ulp_mem_io
*)skb
->head
, wr_len
, dlen
,
1850 static int ddp_ppod_write_idata(struct cxgbi_ppm
*ppm
, struct cxgbi_sock
*csk
,
1851 struct cxgbi_task_tag_info
*ttinfo
,
1852 unsigned int idx
, unsigned int npods
,
1853 struct scatterlist
**sg_pp
,
1854 unsigned int *sg_off
)
1856 struct cxgbi_device
*cdev
= csk
->cdev
;
1857 struct sk_buff
*skb
= ddp_ppod_init_idata(cdev
, ppm
, idx
, npods
,
1859 struct ulp_mem_io
*req
;
1860 struct ulptx_idata
*idata
;
1861 struct cxgbi_pagepod
*ppod
;
1867 req
= (struct ulp_mem_io
*)skb
->head
;
1868 idata
= (struct ulptx_idata
*)(req
+ 1);
1869 ppod
= (struct cxgbi_pagepod
*)(idata
+ 1);
1871 for (i
= 0; i
< npods
; i
++, ppod
++)
1872 cxgbi_ddp_set_one_ppod(ppod
, ttinfo
, sg_pp
, sg_off
);
1874 cxgbi_skcb_set_flag(skb
, SKCBF_TX_MEM_WRITE
);
1875 cxgbi_skcb_set_flag(skb
, SKCBF_TX_FLAG_COMPL
);
1876 set_wr_txq(skb
, CPL_PRIORITY_DATA
, csk
->port_id
);
1878 spin_lock_bh(&csk
->lock
);
1879 cxgbi_sock_skb_entail(csk
, skb
);
1880 spin_unlock_bh(&csk
->lock
);
1885 static int ddp_set_map(struct cxgbi_ppm
*ppm
, struct cxgbi_sock
*csk
,
1886 struct cxgbi_task_tag_info
*ttinfo
)
1888 unsigned int pidx
= ttinfo
->idx
;
1889 unsigned int npods
= ttinfo
->npods
;
1890 unsigned int i
, cnt
;
1892 struct scatterlist
*sg
= ttinfo
->sgl
;
1893 unsigned int offset
= 0;
1895 ttinfo
->cid
= csk
->port_id
;
1897 for (i
= 0; i
< npods
; i
+= cnt
, pidx
+= cnt
) {
1900 if (cnt
> ULPMEM_IDATA_MAX_NPPODS
)
1901 cnt
= ULPMEM_IDATA_MAX_NPPODS
;
1902 err
= ddp_ppod_write_idata(ppm
, csk
, ttinfo
, pidx
, cnt
,
1911 static int ddp_setup_conn_pgidx(struct cxgbi_sock
*csk
, unsigned int tid
,
1914 struct sk_buff
*skb
;
1915 struct cpl_set_tcb_field
*req
;
1917 if (!pg_idx
|| pg_idx
>= DDP_PGIDX_MAX
)
1920 skb
= alloc_wr(sizeof(*req
), 0, GFP_KERNEL
);
1924 /* set up ulp page size */
1925 req
= (struct cpl_set_tcb_field
*)skb
->head
;
1926 INIT_TP_WR(req
, csk
->tid
);
1927 OPCODE_TID(req
) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD
, csk
->tid
));
1928 req
->reply_ctrl
= htons(NO_REPLY_V(0) | QUEUENO_V(csk
->rss_qid
));
1929 req
->word_cookie
= htons(0);
1930 req
->mask
= cpu_to_be64(0x3 << 8);
1931 req
->val
= cpu_to_be64(pg_idx
<< 8);
1932 set_wr_txq(skb
, CPL_PRIORITY_CONTROL
, csk
->port_id
);
1934 log_debug(1 << CXGBI_DBG_TOE
| 1 << CXGBI_DBG_SOCK
,
1935 "csk 0x%p, tid 0x%x, pg_idx %u.\n", csk
, csk
->tid
, pg_idx
);
1937 reinit_completion(&csk
->cmpl
);
1938 cxgb4_ofld_send(csk
->cdev
->ports
[csk
->port_id
], skb
);
1939 wait_for_completion(&csk
->cmpl
);
1944 static int ddp_setup_conn_digest(struct cxgbi_sock
*csk
, unsigned int tid
,
1947 struct sk_buff
*skb
;
1948 struct cpl_set_tcb_field
*req
;
1953 skb
= alloc_wr(sizeof(*req
), 0, GFP_KERNEL
);
1957 csk
->hcrc_len
= (hcrc
? 4 : 0);
1958 csk
->dcrc_len
= (dcrc
? 4 : 0);
1959 /* set up ulp submode */
1960 req
= (struct cpl_set_tcb_field
*)skb
->head
;
1961 INIT_TP_WR(req
, tid
);
1962 OPCODE_TID(req
) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD
, tid
));
1963 req
->reply_ctrl
= htons(NO_REPLY_V(0) | QUEUENO_V(csk
->rss_qid
));
1964 req
->word_cookie
= htons(0);
1965 req
->mask
= cpu_to_be64(0x3 << 4);
1966 req
->val
= cpu_to_be64(((hcrc
? ULP_CRC_HEADER
: 0) |
1967 (dcrc
? ULP_CRC_DATA
: 0)) << 4);
1968 set_wr_txq(skb
, CPL_PRIORITY_CONTROL
, csk
->port_id
);
1970 log_debug(1 << CXGBI_DBG_TOE
| 1 << CXGBI_DBG_SOCK
,
1971 "csk 0x%p, tid 0x%x, crc %d,%d.\n", csk
, csk
->tid
, hcrc
, dcrc
);
1973 reinit_completion(&csk
->cmpl
);
1974 cxgb4_ofld_send(csk
->cdev
->ports
[csk
->port_id
], skb
);
1975 wait_for_completion(&csk
->cmpl
);
1980 static struct cxgbi_ppm
*cdev2ppm(struct cxgbi_device
*cdev
)
1982 return (struct cxgbi_ppm
*)(*((struct cxgb4_lld_info
*)
1983 (cxgbi_cdev_priv(cdev
)))->iscsi_ppm
);
1986 static int cxgb4i_ddp_init(struct cxgbi_device
*cdev
)
1988 struct cxgb4_lld_info
*lldi
= cxgbi_cdev_priv(cdev
);
1989 struct net_device
*ndev
= cdev
->ports
[0];
1990 struct cxgbi_tag_format tformat
;
1994 if (!lldi
->vr
->iscsi
.size
) {
1995 pr_warn("%s, iscsi NOT enabled, check config!\n", ndev
->name
);
1999 cdev
->flags
|= CXGBI_FLAG_USE_PPOD_OFLDQ
;
2000 ppmax
= lldi
->vr
->iscsi
.size
>> PPOD_SIZE_SHIFT
;
2002 memset(&tformat
, 0, sizeof(struct cxgbi_tag_format
));
2003 for (i
= 0; i
< 4; i
++)
2004 tformat
.pgsz_order
[i
] = (lldi
->iscsi_pgsz_order
>> (i
<< 3))
2006 cxgbi_tagmask_check(lldi
->iscsi_tagmask
, &tformat
);
2008 cxgbi_ddp_ppm_setup(lldi
->iscsi_ppm
, cdev
, &tformat
, ppmax
,
2009 lldi
->iscsi_llimit
, lldi
->vr
->iscsi
.start
, 2);
2011 cdev
->csk_ddp_setup_digest
= ddp_setup_conn_digest
;
2012 cdev
->csk_ddp_setup_pgidx
= ddp_setup_conn_pgidx
;
2013 cdev
->csk_ddp_set_map
= ddp_set_map
;
2014 cdev
->tx_max_size
= min_t(unsigned int, ULP2_MAX_PDU_PAYLOAD
,
2015 lldi
->iscsi_iolen
- ISCSI_PDU_NONPAYLOAD_LEN
);
2016 cdev
->rx_max_size
= min_t(unsigned int, ULP2_MAX_PDU_PAYLOAD
,
2017 lldi
->iscsi_iolen
- ISCSI_PDU_NONPAYLOAD_LEN
);
2018 cdev
->cdev2ppm
= cdev2ppm
;
2023 static void *t4_uld_add(const struct cxgb4_lld_info
*lldi
)
2025 struct cxgbi_device
*cdev
;
2026 struct port_info
*pi
;
2029 cdev
= cxgbi_device_register(sizeof(*lldi
), lldi
->nports
);
2031 pr_info("t4 device 0x%p, register failed.\n", lldi
);
2034 pr_info("0x%p,0x%x, ports %u,%s, chan %u, q %u,%u, wr %u.\n",
2035 cdev
, lldi
->adapter_type
, lldi
->nports
,
2036 lldi
->ports
[0]->name
, lldi
->nchan
, lldi
->ntxq
,
2037 lldi
->nrxq
, lldi
->wr_cred
);
2038 for (i
= 0; i
< lldi
->nrxq
; i
++)
2039 log_debug(1 << CXGBI_DBG_DEV
,
2040 "t4 0x%p, rxq id #%d: %u.\n",
2041 cdev
, i
, lldi
->rxq_ids
[i
]);
2043 memcpy(cxgbi_cdev_priv(cdev
), lldi
, sizeof(*lldi
));
2044 cdev
->flags
= CXGBI_FLAG_DEV_T4
;
2045 cdev
->pdev
= lldi
->pdev
;
2046 cdev
->ports
= lldi
->ports
;
2047 cdev
->nports
= lldi
->nports
;
2048 cdev
->mtus
= lldi
->mtus
;
2049 cdev
->nmtus
= NMTUS
;
2050 cdev
->rx_credit_thres
= (CHELSIO_CHIP_VERSION(lldi
->adapter_type
) <=
2051 CHELSIO_T5
) ? cxgb4i_rx_credit_thres
: 0;
2052 cdev
->skb_tx_rsvd
= CXGB4I_TX_HEADER_LEN
;
2053 cdev
->skb_rx_extra
= sizeof(struct cpl_iscsi_hdr
);
2054 cdev
->itp
= &cxgb4i_iscsi_transport
;
2055 cdev
->owner
= THIS_MODULE
;
2057 cdev
->pfvf
= FW_VIID_PFN_G(cxgb4_port_viid(lldi
->ports
[0]))
2059 pr_info("cdev 0x%p,%s, pfvf %u.\n",
2060 cdev
, lldi
->ports
[0]->name
, cdev
->pfvf
);
2062 rc
= cxgb4i_ddp_init(cdev
);
2064 pr_info("t4 0x%p ddp init failed.\n", cdev
);
2067 rc
= cxgb4i_ofld_init(cdev
);
2069 pr_info("t4 0x%p ofld init failed.\n", cdev
);
2073 rc
= cxgbi_hbas_add(cdev
, CXGB4I_MAX_LUN
, CXGBI_MAX_CONN
,
2074 &cxgb4i_host_template
, cxgb4i_stt
);
2078 for (i
= 0; i
< cdev
->nports
; i
++) {
2079 pi
= netdev_priv(lldi
->ports
[i
]);
2080 cdev
->hbas
[i
]->port_id
= pi
->port_id
;
2085 cxgbi_device_unregister(cdev
);
2086 return ERR_PTR(-ENOMEM
);
2089 #define RX_PULL_LEN 128
2090 static int t4_uld_rx_handler(void *handle
, const __be64
*rsp
,
2091 const struct pkt_gl
*pgl
)
2093 const struct cpl_act_establish
*rpl
;
2094 struct sk_buff
*skb
;
2096 struct cxgbi_device
*cdev
= handle
;
2099 unsigned int len
= 64 - sizeof(struct rsp_ctrl
) - 8;
2101 skb
= alloc_wr(len
, 0, GFP_ATOMIC
);
2104 skb_copy_to_linear_data(skb
, &rsp
[1], len
);
2106 if (unlikely(*(u8
*)rsp
!= *(u8
*)pgl
->va
)) {
2107 pr_info("? FL 0x%p,RSS%#llx,FL %#llx,len %u.\n",
2108 pgl
->va
, be64_to_cpu(*rsp
),
2109 be64_to_cpu(*(u64
*)pgl
->va
),
2113 skb
= cxgb4_pktgl_to_skb(pgl
, RX_PULL_LEN
, RX_PULL_LEN
);
2118 rpl
= (struct cpl_act_establish
*)skb
->data
;
2119 opc
= rpl
->ot
.opcode
;
2120 log_debug(1 << CXGBI_DBG_TOE
,
2121 "cdev %p, opcode 0x%x(0x%x,0x%x), skb %p.\n",
2122 cdev
, opc
, rpl
->ot
.opcode_tid
, ntohl(rpl
->ot
.opcode_tid
), skb
);
2123 if (opc
>= ARRAY_SIZE(cxgb4i_cplhandlers
) || !cxgb4i_cplhandlers
[opc
]) {
2124 pr_err("No handler for opcode 0x%x.\n", opc
);
2127 cxgb4i_cplhandlers
[opc
](cdev
, skb
);
2131 log_debug(1 << CXGBI_DBG_TOE
, "OOM bailing out.\n");
2135 static int t4_uld_state_change(void *handle
, enum cxgb4_state state
)
2137 struct cxgbi_device
*cdev
= handle
;
2140 case CXGB4_STATE_UP
:
2141 pr_info("cdev 0x%p, UP.\n", cdev
);
2143 case CXGB4_STATE_START_RECOVERY
:
2144 pr_info("cdev 0x%p, RECOVERY.\n", cdev
);
2145 /* close all connections */
2147 case CXGB4_STATE_DOWN
:
2148 pr_info("cdev 0x%p, DOWN.\n", cdev
);
2150 case CXGB4_STATE_DETACH
:
2151 pr_info("cdev 0x%p, DETACH.\n", cdev
);
2152 cxgbi_device_unregister(cdev
);
2155 pr_info("cdev 0x%p, unknown state %d.\n", cdev
, state
);
2161 static int __init
cxgb4i_init_module(void)
2165 printk(KERN_INFO
"%s", version
);
2167 rc
= cxgbi_iscsi_init(&cxgb4i_iscsi_transport
, &cxgb4i_stt
);
2170 cxgb4_register_uld(CXGB4_ULD_ISCSI
, &cxgb4i_uld_info
);
2175 static void __exit
cxgb4i_exit_module(void)
2177 cxgb4_unregister_uld(CXGB4_ULD_ISCSI
);
2178 cxgbi_device_unregister_all(CXGBI_FLAG_DEV_T4
);
2179 cxgbi_iscsi_cleanup(&cxgb4i_iscsi_transport
, &cxgb4i_stt
);
2182 module_init(cxgb4i_init_module
);
2183 module_exit(cxgb4i_exit_module
);