2 * cxgb4i.c: Chelsio T4 iSCSI driver.
4 * Copyright (c) 2010-2015 Chelsio Communications, Inc.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation.
10 * Written by: Karen Xie (kxie@chelsio.com)
11 * Rakesh Ranjan (rranjan@chelsio.com)
14 #define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
16 #include <linux/kernel.h>
17 #include <linux/module.h>
18 #include <linux/moduleparam.h>
19 #include <scsi/scsi_host.h>
22 #include <linux/netdevice.h>
23 #include <net/addrconf.h>
28 #include "cxgb4_uld.h"
34 static unsigned int dbg_level
;
36 #include "../libcxgbi.h"
38 #ifdef CONFIG_CHELSIO_T4_DCB
39 #include <net/dcbevent.h>
40 #include "cxgb4_dcb.h"
43 #define DRV_MODULE_NAME "cxgb4i"
44 #define DRV_MODULE_DESC "Chelsio T4-T6 iSCSI Driver"
45 #define DRV_MODULE_VERSION "0.9.5-ko"
46 #define DRV_MODULE_RELDATE "Apr. 2015"
48 static char version
[] =
49 DRV_MODULE_DESC
" " DRV_MODULE_NAME
50 " v" DRV_MODULE_VERSION
" (" DRV_MODULE_RELDATE
")\n";
52 MODULE_AUTHOR("Chelsio Communications, Inc.");
53 MODULE_DESCRIPTION(DRV_MODULE_DESC
);
54 MODULE_VERSION(DRV_MODULE_VERSION
);
55 MODULE_LICENSE("GPL");
57 module_param(dbg_level
, uint
, 0644);
58 MODULE_PARM_DESC(dbg_level
, "Debug flag (default=0)");
60 #define CXGB4I_DEFAULT_10G_RCV_WIN (256 * 1024)
61 static int cxgb4i_rcv_win
= -1;
62 module_param(cxgb4i_rcv_win
, int, 0644);
63 MODULE_PARM_DESC(cxgb4i_rcv_win
, "TCP receive window in bytes");
65 #define CXGB4I_DEFAULT_10G_SND_WIN (128 * 1024)
66 static int cxgb4i_snd_win
= -1;
67 module_param(cxgb4i_snd_win
, int, 0644);
68 MODULE_PARM_DESC(cxgb4i_snd_win
, "TCP send window in bytes");
70 static int cxgb4i_rx_credit_thres
= 10 * 1024;
71 module_param(cxgb4i_rx_credit_thres
, int, 0644);
72 MODULE_PARM_DESC(cxgb4i_rx_credit_thres
,
73 "RX credits return threshold in bytes (default=10KB)");
75 static unsigned int cxgb4i_max_connect
= (8 * 1024);
76 module_param(cxgb4i_max_connect
, uint
, 0644);
77 MODULE_PARM_DESC(cxgb4i_max_connect
, "Maximum number of connections");
79 static unsigned short cxgb4i_sport_base
= 20000;
80 module_param(cxgb4i_sport_base
, ushort
, 0644);
81 MODULE_PARM_DESC(cxgb4i_sport_base
, "Starting port number (default 20000)");
83 typedef void (*cxgb4i_cplhandler_func
)(struct cxgbi_device
*, struct sk_buff
*);
85 static void *t4_uld_add(const struct cxgb4_lld_info
*);
86 static int t4_uld_rx_handler(void *, const __be64
*, const struct pkt_gl
*);
87 static int t4_uld_state_change(void *, enum cxgb4_state state
);
88 static inline int send_tx_flowc_wr(struct cxgbi_sock
*);
90 static const struct cxgb4_uld_info cxgb4i_uld_info
= {
91 .name
= DRV_MODULE_NAME
,
92 .nrxq
= MAX_ULD_QSETS
,
93 .ntxq
= MAX_ULD_QSETS
,
97 .rx_handler
= t4_uld_rx_handler
,
98 .state_change
= t4_uld_state_change
,
101 static struct scsi_host_template cxgb4i_host_template
= {
102 .module
= THIS_MODULE
,
103 .name
= DRV_MODULE_NAME
,
104 .proc_name
= DRV_MODULE_NAME
,
105 .can_queue
= CXGB4I_SCSI_HOST_QDEPTH
,
106 .queuecommand
= iscsi_queuecommand
,
107 .change_queue_depth
= scsi_change_queue_depth
,
108 .sg_tablesize
= SG_ALL
,
109 .max_sectors
= 0xFFFF,
110 .cmd_per_lun
= ISCSI_DEF_CMD_PER_LUN
,
111 .eh_timed_out
= iscsi_eh_cmd_timed_out
,
112 .eh_abort_handler
= iscsi_eh_abort
,
113 .eh_device_reset_handler
= iscsi_eh_device_reset
,
114 .eh_target_reset_handler
= iscsi_eh_recover_target
,
115 .target_alloc
= iscsi_target_alloc
,
116 .dma_boundary
= PAGE_SIZE
- 1,
118 .track_queue_depth
= 1,
119 .cmd_size
= sizeof(struct iscsi_cmd
),
122 static struct iscsi_transport cxgb4i_iscsi_transport
= {
123 .owner
= THIS_MODULE
,
124 .name
= DRV_MODULE_NAME
,
125 .caps
= CAP_RECOVERY_L0
| CAP_MULTI_R2T
| CAP_HDRDGST
|
126 CAP_DATADGST
| CAP_DIGEST_OFFLOAD
|
127 CAP_PADDING_OFFLOAD
| CAP_TEXT_NEGO
,
128 .attr_is_visible
= cxgbi_attr_is_visible
,
129 .get_host_param
= cxgbi_get_host_param
,
130 .set_host_param
= cxgbi_set_host_param
,
131 /* session management */
132 .create_session
= cxgbi_create_session
,
133 .destroy_session
= cxgbi_destroy_session
,
134 .get_session_param
= iscsi_session_get_param
,
135 /* connection management */
136 .create_conn
= cxgbi_create_conn
,
137 .bind_conn
= cxgbi_bind_conn
,
138 .unbind_conn
= iscsi_conn_unbind
,
139 .destroy_conn
= iscsi_tcp_conn_teardown
,
140 .start_conn
= iscsi_conn_start
,
141 .stop_conn
= iscsi_conn_stop
,
142 .get_conn_param
= iscsi_conn_get_param
,
143 .set_param
= cxgbi_set_conn_param
,
144 .get_stats
= cxgbi_get_conn_stats
,
145 /* pdu xmit req from user space */
146 .send_pdu
= iscsi_conn_send_pdu
,
148 .init_task
= iscsi_tcp_task_init
,
149 .xmit_task
= iscsi_tcp_task_xmit
,
150 .cleanup_task
= cxgbi_cleanup_task
,
152 .alloc_pdu
= cxgbi_conn_alloc_pdu
,
153 .init_pdu
= cxgbi_conn_init_pdu
,
154 .xmit_pdu
= cxgbi_conn_xmit_pdu
,
155 .parse_pdu_itt
= cxgbi_parse_pdu_itt
,
156 /* TCP connect/disconnect */
157 .get_ep_param
= cxgbi_get_ep_param
,
158 .ep_connect
= cxgbi_ep_connect
,
159 .ep_poll
= cxgbi_ep_poll
,
160 .ep_disconnect
= cxgbi_ep_disconnect
,
161 /* Error recovery timeout call */
162 .session_recovery_timedout
= iscsi_session_recovery_timedout
,
165 #ifdef CONFIG_CHELSIO_T4_DCB
167 cxgb4_dcb_change_notify(struct notifier_block
*, unsigned long, void *);
169 static struct notifier_block cxgb4_dcb_change
= {
170 .notifier_call
= cxgb4_dcb_change_notify
,
174 static struct scsi_transport_template
*cxgb4i_stt
;
177 * CPL (Chelsio Protocol Language) defines a message passing interface between
178 * the host driver and Chelsio asic.
179 * The section below implments CPLs that related to iscsi tcp connection
180 * open/close/abort and data send/receive.
183 #define RCV_BUFSIZ_MASK 0x3FFU
184 #define MAX_IMM_TX_PKT_LEN 256
186 static int push_tx_frames(struct cxgbi_sock
*, int);
189 * is_ofld_imm - check whether a packet can be sent as immediate data
192 * Returns true if a packet can be sent as an offload WR with immediate
193 * data. We currently use the same limit as for Ethernet packets.
195 static inline bool is_ofld_imm(const struct sk_buff
*skb
)
199 if (likely(cxgbi_skcb_test_flag(skb
, SKCBF_TX_NEED_HDR
)))
200 len
+= sizeof(struct fw_ofld_tx_data_wr
);
202 if (likely(cxgbi_skcb_test_flag((struct sk_buff
*)skb
, SKCBF_TX_ISO
)))
203 len
+= sizeof(struct cpl_tx_data_iso
);
205 return (len
<= MAX_IMM_OFLD_TX_DATA_WR_LEN
);
208 static void send_act_open_req(struct cxgbi_sock
*csk
, struct sk_buff
*skb
,
211 struct cxgb4_lld_info
*lldi
= cxgbi_cdev_priv(csk
->cdev
);
212 int wscale
= cxgbi_sock_compute_wscale(csk
->mss_idx
);
213 unsigned long long opt0
;
215 unsigned int qid_atid
= ((unsigned int)csk
->atid
) |
216 (((unsigned int)csk
->rss_qid
) << 14);
218 opt0
= KEEP_ALIVE_F
|
219 WND_SCALE_V(wscale
) |
220 MSS_IDX_V(csk
->mss_idx
) |
221 L2T_IDX_V(((struct l2t_entry
*)csk
->l2t
)->idx
) |
222 TX_CHAN_V(csk
->tx_chan
) |
223 SMAC_SEL_V(csk
->smac_idx
) |
224 ULP_MODE_V(ULP_MODE_ISCSI
) |
225 RCV_BUFSIZ_V(csk
->rcv_win
>> 10);
227 opt2
= RX_CHANNEL_V(0) |
229 RSS_QUEUE_V(csk
->rss_qid
);
231 if (is_t4(lldi
->adapter_type
)) {
232 struct cpl_act_open_req
*req
=
233 (struct cpl_act_open_req
*)skb
->head
;
236 OPCODE_TID(req
) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ
,
238 req
->local_port
= csk
->saddr
.sin_port
;
239 req
->peer_port
= csk
->daddr
.sin_port
;
240 req
->local_ip
= csk
->saddr
.sin_addr
.s_addr
;
241 req
->peer_ip
= csk
->daddr
.sin_addr
.s_addr
;
242 req
->opt0
= cpu_to_be64(opt0
);
243 req
->params
= cpu_to_be32(cxgb4_select_ntuple(
244 csk
->cdev
->ports
[csk
->port_id
],
246 opt2
|= RX_FC_VALID_F
;
247 req
->opt2
= cpu_to_be32(opt2
);
249 log_debug(1 << CXGBI_DBG_TOE
| 1 << CXGBI_DBG_SOCK
,
250 "csk t4 0x%p, %pI4:%u-%pI4:%u, atid %d, qid %u.\n",
251 csk
, &req
->local_ip
, ntohs(req
->local_port
),
252 &req
->peer_ip
, ntohs(req
->peer_port
),
253 csk
->atid
, csk
->rss_qid
);
254 } else if (is_t5(lldi
->adapter_type
)) {
255 struct cpl_t5_act_open_req
*req
=
256 (struct cpl_t5_act_open_req
*)skb
->head
;
257 u32 isn
= (get_random_u32() & ~7UL) - 1;
260 OPCODE_TID(req
) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ
,
262 req
->local_port
= csk
->saddr
.sin_port
;
263 req
->peer_port
= csk
->daddr
.sin_port
;
264 req
->local_ip
= csk
->saddr
.sin_addr
.s_addr
;
265 req
->peer_ip
= csk
->daddr
.sin_addr
.s_addr
;
266 req
->opt0
= cpu_to_be64(opt0
);
267 req
->params
= cpu_to_be64(FILTER_TUPLE_V(
269 csk
->cdev
->ports
[csk
->port_id
],
271 req
->rsvd
= cpu_to_be32(isn
);
272 opt2
|= T5_ISS_VALID
;
273 opt2
|= T5_OPT_2_VALID_F
;
275 req
->opt2
= cpu_to_be32(opt2
);
277 log_debug(1 << CXGBI_DBG_TOE
| 1 << CXGBI_DBG_SOCK
,
278 "csk t5 0x%p, %pI4:%u-%pI4:%u, atid %d, qid %u.\n",
279 csk
, &req
->local_ip
, ntohs(req
->local_port
),
280 &req
->peer_ip
, ntohs(req
->peer_port
),
281 csk
->atid
, csk
->rss_qid
);
283 struct cpl_t6_act_open_req
*req
=
284 (struct cpl_t6_act_open_req
*)skb
->head
;
285 u32 isn
= (get_random_u32() & ~7UL) - 1;
288 OPCODE_TID(req
) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ
,
290 req
->local_port
= csk
->saddr
.sin_port
;
291 req
->peer_port
= csk
->daddr
.sin_port
;
292 req
->local_ip
= csk
->saddr
.sin_addr
.s_addr
;
293 req
->peer_ip
= csk
->daddr
.sin_addr
.s_addr
;
294 req
->opt0
= cpu_to_be64(opt0
);
295 req
->params
= cpu_to_be64(FILTER_TUPLE_V(
297 csk
->cdev
->ports
[csk
->port_id
],
299 req
->rsvd
= cpu_to_be32(isn
);
301 opt2
|= T5_ISS_VALID
;
302 opt2
|= RX_FC_DISABLE_F
;
303 opt2
|= T5_OPT_2_VALID_F
;
305 req
->opt2
= cpu_to_be32(opt2
);
306 req
->rsvd2
= cpu_to_be32(0);
307 req
->opt3
= cpu_to_be32(0);
309 log_debug(1 << CXGBI_DBG_TOE
| 1 << CXGBI_DBG_SOCK
,
310 "csk t6 0x%p, %pI4:%u-%pI4:%u, atid %d, qid %u.\n",
311 csk
, &req
->local_ip
, ntohs(req
->local_port
),
312 &req
->peer_ip
, ntohs(req
->peer_port
),
313 csk
->atid
, csk
->rss_qid
);
316 set_wr_txq(skb
, CPL_PRIORITY_SETUP
, csk
->port_id
);
318 pr_info_ipaddr("t%d csk 0x%p,%u,0x%lx,%u, rss_qid %u.\n",
319 (&csk
->saddr
), (&csk
->daddr
),
320 CHELSIO_CHIP_VERSION(lldi
->adapter_type
), csk
,
321 csk
->state
, csk
->flags
, csk
->atid
, csk
->rss_qid
);
323 cxgb4_l2t_send(csk
->cdev
->ports
[csk
->port_id
], skb
, csk
->l2t
);
326 #if IS_ENABLED(CONFIG_IPV6)
327 static void send_act_open_req6(struct cxgbi_sock
*csk
, struct sk_buff
*skb
,
330 struct cxgb4_lld_info
*lldi
= cxgbi_cdev_priv(csk
->cdev
);
331 int wscale
= cxgbi_sock_compute_wscale(csk
->mss_idx
);
332 unsigned long long opt0
;
334 unsigned int qid_atid
= ((unsigned int)csk
->atid
) |
335 (((unsigned int)csk
->rss_qid
) << 14);
337 opt0
= KEEP_ALIVE_F
|
338 WND_SCALE_V(wscale
) |
339 MSS_IDX_V(csk
->mss_idx
) |
340 L2T_IDX_V(((struct l2t_entry
*)csk
->l2t
)->idx
) |
341 TX_CHAN_V(csk
->tx_chan
) |
342 SMAC_SEL_V(csk
->smac_idx
) |
343 ULP_MODE_V(ULP_MODE_ISCSI
) |
344 RCV_BUFSIZ_V(csk
->rcv_win
>> 10);
346 opt2
= RX_CHANNEL_V(0) |
348 RSS_QUEUE_V(csk
->rss_qid
);
350 if (is_t4(lldi
->adapter_type
)) {
351 struct cpl_act_open_req6
*req
=
352 (struct cpl_act_open_req6
*)skb
->head
;
355 OPCODE_TID(req
) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ6
,
357 req
->local_port
= csk
->saddr6
.sin6_port
;
358 req
->peer_port
= csk
->daddr6
.sin6_port
;
360 req
->local_ip_hi
= *(__be64
*)(csk
->saddr6
.sin6_addr
.s6_addr
);
361 req
->local_ip_lo
= *(__be64
*)(csk
->saddr6
.sin6_addr
.s6_addr
+
363 req
->peer_ip_hi
= *(__be64
*)(csk
->daddr6
.sin6_addr
.s6_addr
);
364 req
->peer_ip_lo
= *(__be64
*)(csk
->daddr6
.sin6_addr
.s6_addr
+
367 req
->opt0
= cpu_to_be64(opt0
);
369 opt2
|= RX_FC_VALID_F
;
370 req
->opt2
= cpu_to_be32(opt2
);
372 req
->params
= cpu_to_be32(cxgb4_select_ntuple(
373 csk
->cdev
->ports
[csk
->port_id
],
375 } else if (is_t5(lldi
->adapter_type
)) {
376 struct cpl_t5_act_open_req6
*req
=
377 (struct cpl_t5_act_open_req6
*)skb
->head
;
380 OPCODE_TID(req
) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ6
,
382 req
->local_port
= csk
->saddr6
.sin6_port
;
383 req
->peer_port
= csk
->daddr6
.sin6_port
;
384 req
->local_ip_hi
= *(__be64
*)(csk
->saddr6
.sin6_addr
.s6_addr
);
385 req
->local_ip_lo
= *(__be64
*)(csk
->saddr6
.sin6_addr
.s6_addr
+
387 req
->peer_ip_hi
= *(__be64
*)(csk
->daddr6
.sin6_addr
.s6_addr
);
388 req
->peer_ip_lo
= *(__be64
*)(csk
->daddr6
.sin6_addr
.s6_addr
+
390 req
->opt0
= cpu_to_be64(opt0
);
392 opt2
|= T5_OPT_2_VALID_F
;
393 req
->opt2
= cpu_to_be32(opt2
);
395 req
->params
= cpu_to_be64(FILTER_TUPLE_V(cxgb4_select_ntuple(
396 csk
->cdev
->ports
[csk
->port_id
],
399 struct cpl_t6_act_open_req6
*req
=
400 (struct cpl_t6_act_open_req6
*)skb
->head
;
403 OPCODE_TID(req
) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ6
,
405 req
->local_port
= csk
->saddr6
.sin6_port
;
406 req
->peer_port
= csk
->daddr6
.sin6_port
;
407 req
->local_ip_hi
= *(__be64
*)(csk
->saddr6
.sin6_addr
.s6_addr
);
408 req
->local_ip_lo
= *(__be64
*)(csk
->saddr6
.sin6_addr
.s6_addr
+
410 req
->peer_ip_hi
= *(__be64
*)(csk
->daddr6
.sin6_addr
.s6_addr
);
411 req
->peer_ip_lo
= *(__be64
*)(csk
->daddr6
.sin6_addr
.s6_addr
+
413 req
->opt0
= cpu_to_be64(opt0
);
415 opt2
|= RX_FC_DISABLE_F
;
416 opt2
|= T5_OPT_2_VALID_F
;
418 req
->opt2
= cpu_to_be32(opt2
);
420 req
->params
= cpu_to_be64(FILTER_TUPLE_V(cxgb4_select_ntuple(
421 csk
->cdev
->ports
[csk
->port_id
],
424 req
->rsvd2
= cpu_to_be32(0);
425 req
->opt3
= cpu_to_be32(0);
428 set_wr_txq(skb
, CPL_PRIORITY_SETUP
, csk
->port_id
);
430 pr_info("t%d csk 0x%p,%u,0x%lx,%u, [%pI6]:%u-[%pI6]:%u, rss_qid %u.\n",
431 CHELSIO_CHIP_VERSION(lldi
->adapter_type
), csk
, csk
->state
,
432 csk
->flags
, csk
->atid
,
433 &csk
->saddr6
.sin6_addr
, ntohs(csk
->saddr
.sin_port
),
434 &csk
->daddr6
.sin6_addr
, ntohs(csk
->daddr
.sin_port
),
437 cxgb4_l2t_send(csk
->cdev
->ports
[csk
->port_id
], skb
, csk
->l2t
);
441 static void send_close_req(struct cxgbi_sock
*csk
)
443 struct sk_buff
*skb
= csk
->cpl_close
;
444 struct cpl_close_con_req
*req
= (struct cpl_close_con_req
*)skb
->head
;
445 unsigned int tid
= csk
->tid
;
447 log_debug(1 << CXGBI_DBG_TOE
| 1 << CXGBI_DBG_SOCK
,
448 "csk 0x%p,%u,0x%lx, tid %u.\n",
449 csk
, csk
->state
, csk
->flags
, csk
->tid
);
450 csk
->cpl_close
= NULL
;
451 set_wr_txq(skb
, CPL_PRIORITY_DATA
, csk
->port_id
);
452 INIT_TP_WR(req
, tid
);
453 OPCODE_TID(req
) = cpu_to_be32(MK_OPCODE_TID(CPL_CLOSE_CON_REQ
, tid
));
456 cxgbi_sock_skb_entail(csk
, skb
);
457 if (csk
->state
>= CTP_ESTABLISHED
)
458 push_tx_frames(csk
, 1);
461 static void abort_arp_failure(void *handle
, struct sk_buff
*skb
)
463 struct cxgbi_sock
*csk
= (struct cxgbi_sock
*)handle
;
464 struct cpl_abort_req
*req
;
466 log_debug(1 << CXGBI_DBG_TOE
| 1 << CXGBI_DBG_SOCK
,
467 "csk 0x%p,%u,0x%lx, tid %u, abort.\n",
468 csk
, csk
->state
, csk
->flags
, csk
->tid
);
469 req
= (struct cpl_abort_req
*)skb
->data
;
470 req
->cmd
= CPL_ABORT_NO_RST
;
471 cxgb4_ofld_send(csk
->cdev
->ports
[csk
->port_id
], skb
);
474 static void send_abort_req(struct cxgbi_sock
*csk
)
476 struct cpl_abort_req
*req
;
477 struct sk_buff
*skb
= csk
->cpl_abort_req
;
479 if (unlikely(csk
->state
== CTP_ABORTING
) || !skb
|| !csk
->cdev
)
482 if (!cxgbi_sock_flag(csk
, CTPF_TX_DATA_SENT
)) {
483 send_tx_flowc_wr(csk
);
484 cxgbi_sock_set_flag(csk
, CTPF_TX_DATA_SENT
);
487 cxgbi_sock_set_state(csk
, CTP_ABORTING
);
488 cxgbi_sock_set_flag(csk
, CTPF_ABORT_RPL_PENDING
);
489 cxgbi_sock_purge_write_queue(csk
);
491 csk
->cpl_abort_req
= NULL
;
492 req
= (struct cpl_abort_req
*)skb
->head
;
493 set_wr_txq(skb
, CPL_PRIORITY_DATA
, csk
->port_id
);
494 req
->cmd
= CPL_ABORT_SEND_RST
;
495 t4_set_arp_err_handler(skb
, csk
, abort_arp_failure
);
496 INIT_TP_WR(req
, csk
->tid
);
497 OPCODE_TID(req
) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_REQ
, csk
->tid
));
498 req
->rsvd0
= htonl(csk
->snd_nxt
);
499 req
->rsvd1
= !cxgbi_sock_flag(csk
, CTPF_TX_DATA_SENT
);
501 log_debug(1 << CXGBI_DBG_TOE
| 1 << CXGBI_DBG_SOCK
,
502 "csk 0x%p,%u,0x%lx,%u, snd_nxt %u, 0x%x.\n",
503 csk
, csk
->state
, csk
->flags
, csk
->tid
, csk
->snd_nxt
,
506 cxgb4_l2t_send(csk
->cdev
->ports
[csk
->port_id
], skb
, csk
->l2t
);
509 static void send_abort_rpl(struct cxgbi_sock
*csk
, int rst_status
)
511 struct sk_buff
*skb
= csk
->cpl_abort_rpl
;
512 struct cpl_abort_rpl
*rpl
= (struct cpl_abort_rpl
*)skb
->head
;
514 log_debug(1 << CXGBI_DBG_TOE
| 1 << CXGBI_DBG_SOCK
,
515 "csk 0x%p,%u,0x%lx,%u, status %d.\n",
516 csk
, csk
->state
, csk
->flags
, csk
->tid
, rst_status
);
518 csk
->cpl_abort_rpl
= NULL
;
519 set_wr_txq(skb
, CPL_PRIORITY_DATA
, csk
->port_id
);
520 INIT_TP_WR(rpl
, csk
->tid
);
521 OPCODE_TID(rpl
) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_RPL
, csk
->tid
));
522 rpl
->cmd
= rst_status
;
523 cxgb4_ofld_send(csk
->cdev
->ports
[csk
->port_id
], skb
);
527 * CPL connection rx data ack: host ->
528 * Send RX credits through an RX_DATA_ACK CPL message. Returns the number of
531 static u32
send_rx_credits(struct cxgbi_sock
*csk
, u32 credits
)
534 struct cpl_rx_data_ack
*req
;
536 log_debug(1 << CXGBI_DBG_TOE
| 1 << CXGBI_DBG_PDU_RX
,
537 "csk 0x%p,%u,0x%lx,%u, credit %u.\n",
538 csk
, csk
->state
, csk
->flags
, csk
->tid
, credits
);
540 skb
= alloc_wr(sizeof(*req
), 0, GFP_ATOMIC
);
542 pr_info("csk 0x%p, credit %u, OOM.\n", csk
, credits
);
545 req
= (struct cpl_rx_data_ack
*)skb
->head
;
547 set_wr_txq(skb
, CPL_PRIORITY_ACK
, csk
->port_id
);
548 INIT_TP_WR(req
, csk
->tid
);
549 OPCODE_TID(req
) = cpu_to_be32(MK_OPCODE_TID(CPL_RX_DATA_ACK
,
551 req
->credit_dack
= cpu_to_be32(RX_CREDITS_V(credits
)
553 cxgb4_ofld_send(csk
->cdev
->ports
[csk
->port_id
], skb
);
558 * sgl_len - calculates the size of an SGL of the given capacity
559 * @n: the number of SGL entries
560 * Calculates the number of flits needed for a scatter/gather list that
561 * can hold the given number of entries.
563 static inline unsigned int sgl_len(unsigned int n
)
566 return (3 * n
) / 2 + (n
& 1) + 2;
570 * calc_tx_flits_ofld - calculate # of flits for an offload packet
573 * Returns the number of flits needed for the given offload packet.
574 * These packets are already fully constructed and no additional headers
577 static inline unsigned int calc_tx_flits_ofld(const struct sk_buff
*skb
)
579 unsigned int flits
, cnt
;
581 if (is_ofld_imm(skb
))
582 return DIV_ROUND_UP(skb
->len
, 8);
583 flits
= skb_transport_offset(skb
) / 8;
584 cnt
= skb_shinfo(skb
)->nr_frags
;
585 if (skb_tail_pointer(skb
) != skb_transport_header(skb
))
587 return flits
+ sgl_len(cnt
);
590 #define FLOWC_WR_NPARAMS_MIN 9
591 static inline int tx_flowc_wr_credits(int *nparamsp
, int *flowclenp
)
593 int nparams
, flowclen16
, flowclen
;
595 nparams
= FLOWC_WR_NPARAMS_MIN
;
596 #ifdef CONFIG_CHELSIO_T4_DCB
599 flowclen
= offsetof(struct fw_flowc_wr
, mnemval
[nparams
]);
600 flowclen16
= DIV_ROUND_UP(flowclen
, 16);
601 flowclen
= flowclen16
* 16;
603 * Return the number of 16-byte credits used by the FlowC request.
604 * Pass back the nparams and actual FlowC length if requested.
609 *flowclenp
= flowclen
;
614 static inline int send_tx_flowc_wr(struct cxgbi_sock
*csk
)
617 struct fw_flowc_wr
*flowc
;
618 int nparams
, flowclen16
, flowclen
;
620 #ifdef CONFIG_CHELSIO_T4_DCB
621 u16 vlan
= ((struct l2t_entry
*)csk
->l2t
)->vlan
;
623 flowclen16
= tx_flowc_wr_credits(&nparams
, &flowclen
);
624 skb
= alloc_wr(flowclen
, 0, GFP_ATOMIC
);
625 flowc
= (struct fw_flowc_wr
*)skb
->head
;
626 flowc
->op_to_nparams
=
627 htonl(FW_WR_OP_V(FW_FLOWC_WR
) | FW_FLOWC_WR_NPARAMS_V(nparams
));
628 flowc
->flowid_len16
=
629 htonl(FW_WR_LEN16_V(flowclen16
) | FW_WR_FLOWID_V(csk
->tid
));
630 flowc
->mnemval
[0].mnemonic
= FW_FLOWC_MNEM_PFNVFN
;
631 flowc
->mnemval
[0].val
= htonl(csk
->cdev
->pfvf
);
632 flowc
->mnemval
[1].mnemonic
= FW_FLOWC_MNEM_CH
;
633 flowc
->mnemval
[1].val
= htonl(csk
->tx_chan
);
634 flowc
->mnemval
[2].mnemonic
= FW_FLOWC_MNEM_PORT
;
635 flowc
->mnemval
[2].val
= htonl(csk
->tx_chan
);
636 flowc
->mnemval
[3].mnemonic
= FW_FLOWC_MNEM_IQID
;
637 flowc
->mnemval
[3].val
= htonl(csk
->rss_qid
);
638 flowc
->mnemval
[4].mnemonic
= FW_FLOWC_MNEM_SNDNXT
;
639 flowc
->mnemval
[4].val
= htonl(csk
->snd_nxt
);
640 flowc
->mnemval
[5].mnemonic
= FW_FLOWC_MNEM_RCVNXT
;
641 flowc
->mnemval
[5].val
= htonl(csk
->rcv_nxt
);
642 flowc
->mnemval
[6].mnemonic
= FW_FLOWC_MNEM_SNDBUF
;
643 flowc
->mnemval
[6].val
= htonl(csk
->snd_win
);
644 flowc
->mnemval
[7].mnemonic
= FW_FLOWC_MNEM_MSS
;
645 flowc
->mnemval
[7].val
= htonl(csk
->advmss
);
646 flowc
->mnemval
[8].mnemonic
= 0;
647 flowc
->mnemval
[8].val
= 0;
648 flowc
->mnemval
[8].mnemonic
= FW_FLOWC_MNEM_TXDATAPLEN_MAX
;
649 if (csk
->cdev
->skb_iso_txhdr
)
650 flowc
->mnemval
[8].val
= cpu_to_be32(CXGBI_MAX_ISO_DATA_IN_SKB
);
652 flowc
->mnemval
[8].val
= cpu_to_be32(16128);
653 #ifdef CONFIG_CHELSIO_T4_DCB
654 flowc
->mnemval
[9].mnemonic
= FW_FLOWC_MNEM_DCBPRIO
;
655 if (vlan
== CPL_L2T_VLAN_NONE
) {
656 pr_warn_ratelimited("csk %u without VLAN Tag on DCB Link\n",
658 flowc
->mnemval
[9].val
= cpu_to_be32(0);
660 flowc
->mnemval
[9].val
= cpu_to_be32((vlan
& VLAN_PRIO_MASK
) >>
665 set_wr_txq(skb
, CPL_PRIORITY_DATA
, csk
->port_id
);
667 log_debug(1 << CXGBI_DBG_TOE
| 1 << CXGBI_DBG_SOCK
,
668 "csk 0x%p, tid 0x%x, %u,%u,%u,%u,%u,%u,%u.\n",
669 csk
, csk
->tid
, 0, csk
->tx_chan
, csk
->rss_qid
,
670 csk
->snd_nxt
, csk
->rcv_nxt
, csk
->snd_win
,
673 cxgb4_ofld_send(csk
->cdev
->ports
[csk
->port_id
], skb
);
679 cxgb4i_make_tx_iso_cpl(struct sk_buff
*skb
, struct cpl_tx_data_iso
*cpl
)
681 struct cxgbi_iso_info
*info
= (struct cxgbi_iso_info
*)skb
->head
;
682 u32 imm_en
= !!(info
->flags
& CXGBI_ISO_INFO_IMM_ENABLE
);
683 u32 fslice
= !!(info
->flags
& CXGBI_ISO_INFO_FSLICE
);
684 u32 lslice
= !!(info
->flags
& CXGBI_ISO_INFO_LSLICE
);
685 u32 pdu_type
= (info
->op
== ISCSI_OP_SCSI_CMD
) ? 0 : 1;
686 u32 submode
= cxgbi_skcb_tx_ulp_mode(skb
) & 0x3;
688 cpl
->op_to_scsi
= cpu_to_be32(CPL_TX_DATA_ISO_OP_V(CPL_TX_DATA_ISO
) |
689 CPL_TX_DATA_ISO_FIRST_V(fslice
) |
690 CPL_TX_DATA_ISO_LAST_V(lslice
) |
691 CPL_TX_DATA_ISO_CPLHDRLEN_V(0) |
692 CPL_TX_DATA_ISO_HDRCRC_V(submode
& 1) |
693 CPL_TX_DATA_ISO_PLDCRC_V(((submode
>> 1) & 1)) |
694 CPL_TX_DATA_ISO_IMMEDIATE_V(imm_en
) |
695 CPL_TX_DATA_ISO_SCSI_V(pdu_type
));
697 cpl
->ahs_len
= info
->ahs
;
698 cpl
->mpdu
= cpu_to_be16(DIV_ROUND_UP(info
->mpdu
, 4));
699 cpl
->burst_size
= cpu_to_be32(info
->burst_size
);
700 cpl
->len
= cpu_to_be32(info
->len
);
701 cpl
->reserved2_seglen_offset
=
702 cpu_to_be32(CPL_TX_DATA_ISO_SEGLEN_OFFSET_V(info
->segment_offset
));
703 cpl
->datasn_offset
= cpu_to_be32(info
->datasn_offset
);
704 cpl
->buffer_offset
= cpu_to_be32(info
->buffer_offset
);
705 cpl
->reserved3
= cpu_to_be32(0);
706 log_debug(1 << CXGBI_DBG_ISCSI
| 1 << CXGBI_DBG_PDU_TX
,
707 "iso: flags 0x%x, op %u, ahs %u, num_pdu %u, mpdu %u, "
708 "burst_size %u, iso_len %u\n",
709 info
->flags
, info
->op
, info
->ahs
, info
->num_pdu
,
710 info
->mpdu
, info
->burst_size
<< 2, info
->len
);
714 cxgb4i_make_tx_data_wr(struct cxgbi_sock
*csk
, struct sk_buff
*skb
, int dlen
,
715 int len
, u32 credits
, int compl)
717 struct cxgbi_device
*cdev
= csk
->cdev
;
718 struct cxgb4_lld_info
*lldi
= cxgbi_cdev_priv(cdev
);
719 struct fw_ofld_tx_data_wr
*req
;
720 struct cpl_tx_data_iso
*cpl
;
721 u32 submode
= cxgbi_skcb_tx_ulp_mode(skb
) & 0x3;
723 u32 hdr_size
= sizeof(*req
);
724 u32 opcode
= FW_OFLD_TX_DATA_WR
;
726 u32 force
= is_t5(lldi
->adapter_type
) ? TX_FORCE_V(!submode
) :
729 if (cxgbi_skcb_test_flag(skb
, SKCBF_TX_ISO
)) {
730 hdr_size
+= sizeof(struct cpl_tx_data_iso
);
731 opcode
= FW_ISCSI_TX_DATA_WR
;
732 immlen
+= sizeof(struct cpl_tx_data_iso
);
736 if (is_ofld_imm(skb
))
739 req
= (struct fw_ofld_tx_data_wr
*)__skb_push(skb
, hdr_size
);
740 req
->op_to_immdlen
= cpu_to_be32(FW_WR_OP_V(opcode
) |
741 FW_WR_COMPL_V(compl) |
742 FW_WR_IMMDLEN_V(immlen
));
743 req
->flowid_len16
= cpu_to_be32(FW_WR_FLOWID_V(csk
->tid
) |
744 FW_WR_LEN16_V(credits
));
745 req
->plen
= cpu_to_be32(len
);
746 cpl
= (struct cpl_tx_data_iso
*)(req
+ 1);
748 if (likely(cxgbi_skcb_test_flag(skb
, SKCBF_TX_ISO
)))
749 cxgb4i_make_tx_iso_cpl(skb
, cpl
);
752 wr_ulp_mode
= FW_OFLD_TX_DATA_WR_ULPMODE_V(ULP2_MODE_ISCSI
) |
753 FW_OFLD_TX_DATA_WR_ULPSUBMODE_V(submode
);
755 req
->tunnel_to_proxy
= cpu_to_be32(wr_ulp_mode
| force
|
756 FW_OFLD_TX_DATA_WR_SHOVE_V(1U));
758 if (!cxgbi_sock_flag(csk
, CTPF_TX_DATA_SENT
))
759 cxgbi_sock_set_flag(csk
, CTPF_TX_DATA_SENT
);
762 static void arp_failure_skb_discard(void *handle
, struct sk_buff
*skb
)
767 static int push_tx_frames(struct cxgbi_sock
*csk
, int req_completion
)
772 if (unlikely(csk
->state
< CTP_ESTABLISHED
||
773 csk
->state
== CTP_CLOSE_WAIT_1
|| csk
->state
>= CTP_ABORTING
)) {
774 log_debug(1 << CXGBI_DBG_TOE
| 1 << CXGBI_DBG_SOCK
|
775 1 << CXGBI_DBG_PDU_TX
,
776 "csk 0x%p,%u,0x%lx,%u, in closing state.\n",
777 csk
, csk
->state
, csk
->flags
, csk
->tid
);
781 while (csk
->wr_cred
&& ((skb
= skb_peek(&csk
->write_queue
)) != NULL
)) {
782 struct cxgbi_iso_info
*iso_cpl
;
788 u32 num_pdu
= 1, hdr_len
;
790 if (cxgbi_skcb_test_flag(skb
, SKCBF_TX_ISO
))
791 iso_cpl_len
= sizeof(struct cpl_tx_data_iso
);
793 if (is_ofld_imm(skb
))
794 credits_needed
= DIV_ROUND_UP(dlen
+ iso_cpl_len
, 16);
797 DIV_ROUND_UP((8 * calc_tx_flits_ofld(skb
)) +
800 if (likely(cxgbi_skcb_test_flag(skb
, SKCBF_TX_NEED_HDR
)))
802 DIV_ROUND_UP(sizeof(struct fw_ofld_tx_data_wr
), 16);
805 * Assumes the initial credits is large enough to support
806 * fw_flowc_wr plus largest possible first payload
808 if (!cxgbi_sock_flag(csk
, CTPF_TX_DATA_SENT
)) {
809 flowclen16
= send_tx_flowc_wr(csk
);
810 csk
->wr_cred
-= flowclen16
;
811 csk
->wr_una_cred
+= flowclen16
;
812 cxgbi_sock_set_flag(csk
, CTPF_TX_DATA_SENT
);
815 if (csk
->wr_cred
< credits_needed
) {
816 log_debug(1 << CXGBI_DBG_PDU_TX
,
817 "csk 0x%p, skb %u/%u, wr %d < %u.\n",
818 csk
, skb
->len
, skb
->data_len
,
819 credits_needed
, csk
->wr_cred
);
821 csk
->no_tx_credits
++;
825 csk
->no_tx_credits
= 0;
827 __skb_unlink(skb
, &csk
->write_queue
);
828 set_wr_txq(skb
, CPL_PRIORITY_DATA
, csk
->port_id
);
829 skb
->csum
= (__force __wsum
)(credits_needed
+ flowclen16
);
830 csk
->wr_cred
-= credits_needed
;
831 csk
->wr_una_cred
+= credits_needed
;
832 cxgbi_sock_enqueue_wr(csk
, skb
);
834 log_debug(1 << CXGBI_DBG_PDU_TX
,
835 "csk 0x%p, skb %u/%u, wr %d, left %u, unack %u.\n",
836 csk
, skb
->len
, skb
->data_len
, credits_needed
,
837 csk
->wr_cred
, csk
->wr_una_cred
);
839 if (!req_completion
&&
840 ((csk
->wr_una_cred
>= (csk
->wr_max_cred
/ 2)) ||
841 after(csk
->write_seq
, (csk
->snd_una
+ csk
->snd_win
/ 2))))
844 if (likely(cxgbi_skcb_test_flag(skb
, SKCBF_TX_NEED_HDR
))) {
845 u32 ulp_mode
= cxgbi_skcb_tx_ulp_mode(skb
);
847 if (cxgbi_skcb_test_flag(skb
, SKCBF_TX_ISO
)) {
848 iso_cpl
= (struct cxgbi_iso_info
*)skb
->head
;
849 num_pdu
= iso_cpl
->num_pdu
;
850 hdr_len
= cxgbi_skcb_tx_iscsi_hdrlen(skb
);
851 len
+= (cxgbi_ulp_extra_len(ulp_mode
) * num_pdu
) +
852 (hdr_len
* (num_pdu
- 1));
854 len
+= cxgbi_ulp_extra_len(ulp_mode
);
857 cxgb4i_make_tx_data_wr(csk
, skb
, dlen
, len
,
858 credits_needed
, req_completion
);
860 cxgbi_skcb_clear_flag(skb
, SKCBF_TX_NEED_HDR
);
861 } else if (cxgbi_skcb_test_flag(skb
, SKCBF_TX_FLAG_COMPL
) &&
862 (csk
->wr_una_cred
>= (csk
->wr_max_cred
/ 2))) {
863 struct cpl_close_con_req
*req
=
864 (struct cpl_close_con_req
*)skb
->data
;
866 req
->wr
.wr_hi
|= cpu_to_be32(FW_WR_COMPL_F
);
869 total_size
+= skb
->truesize
;
870 t4_set_arp_err_handler(skb
, csk
, arp_failure_skb_discard
);
872 log_debug(1 << CXGBI_DBG_TOE
| 1 << CXGBI_DBG_PDU_TX
,
873 "csk 0x%p,%u,0x%lx,%u, skb 0x%p, %u.\n",
874 csk
, csk
->state
, csk
->flags
, csk
->tid
, skb
, len
);
875 cxgb4_l2t_send(csk
->cdev
->ports
[csk
->port_id
], skb
, csk
->l2t
);
880 static inline void free_atid(struct cxgbi_sock
*csk
)
882 struct cxgb4_lld_info
*lldi
= cxgbi_cdev_priv(csk
->cdev
);
884 if (cxgbi_sock_flag(csk
, CTPF_HAS_ATID
)) {
885 cxgb4_free_atid(lldi
->tids
, csk
->atid
);
886 cxgbi_sock_clear_flag(csk
, CTPF_HAS_ATID
);
891 static void do_act_establish(struct cxgbi_device
*cdev
, struct sk_buff
*skb
)
893 struct cxgbi_sock
*csk
;
894 struct cpl_act_establish
*req
= (struct cpl_act_establish
*)skb
->data
;
895 unsigned short tcp_opt
= ntohs(req
->tcp_opt
);
896 unsigned int tid
= GET_TID(req
);
897 unsigned int atid
= TID_TID_G(ntohl(req
->tos_atid
));
898 struct cxgb4_lld_info
*lldi
= cxgbi_cdev_priv(cdev
);
899 struct tid_info
*t
= lldi
->tids
;
900 u32 rcv_isn
= be32_to_cpu(req
->rcv_isn
);
902 csk
= lookup_atid(t
, atid
);
903 if (unlikely(!csk
)) {
904 pr_err("NO conn. for atid %u, cdev 0x%p.\n", atid
, cdev
);
908 if (csk
->atid
!= atid
) {
909 pr_err("bad conn atid %u, csk 0x%p,%u,0x%lx,tid %u, atid %u.\n",
910 atid
, csk
, csk
->state
, csk
->flags
, csk
->tid
, csk
->atid
);
914 pr_info_ipaddr("atid 0x%x, tid 0x%x, csk 0x%p,%u,0x%lx, isn %u.\n",
915 (&csk
->saddr
), (&csk
->daddr
),
916 atid
, tid
, csk
, csk
->state
, csk
->flags
, rcv_isn
);
918 module_put(cdev
->owner
);
922 cxgb4_insert_tid(lldi
->tids
, csk
, tid
, csk
->csk_family
);
923 cxgbi_sock_set_flag(csk
, CTPF_HAS_TID
);
927 spin_lock_bh(&csk
->lock
);
928 if (unlikely(csk
->state
!= CTP_ACTIVE_OPEN
))
929 pr_info("csk 0x%p,%u,0x%lx,%u, got EST.\n",
930 csk
, csk
->state
, csk
->flags
, csk
->tid
);
932 if (csk
->retry_timer
.function
) {
933 del_timer(&csk
->retry_timer
);
934 csk
->retry_timer
.function
= NULL
;
937 csk
->copied_seq
= csk
->rcv_wup
= csk
->rcv_nxt
= rcv_isn
;
939 * Causes the first RX_DATA_ACK to supply any Rx credits we couldn't
942 if (csk
->rcv_win
> (RCV_BUFSIZ_MASK
<< 10))
943 csk
->rcv_wup
-= csk
->rcv_win
- (RCV_BUFSIZ_MASK
<< 10);
945 csk
->advmss
= lldi
->mtus
[TCPOPT_MSS_G(tcp_opt
)] - 40;
946 if (TCPOPT_TSTAMP_G(tcp_opt
))
948 if (csk
->advmss
< 128)
951 log_debug(1 << CXGBI_DBG_TOE
| 1 << CXGBI_DBG_SOCK
,
952 "csk 0x%p, mss_idx %u, advmss %u.\n",
953 csk
, TCPOPT_MSS_G(tcp_opt
), csk
->advmss
);
955 cxgbi_sock_established(csk
, ntohl(req
->snd_isn
), ntohs(req
->tcp_opt
));
957 if (unlikely(cxgbi_sock_flag(csk
, CTPF_ACTIVE_CLOSE_NEEDED
)))
960 if (skb_queue_len(&csk
->write_queue
))
961 push_tx_frames(csk
, 0);
962 cxgbi_conn_tx_open(csk
);
964 spin_unlock_bh(&csk
->lock
);
970 static int act_open_rpl_status_to_errno(int status
)
973 case CPL_ERR_CONN_RESET
:
974 return -ECONNREFUSED
;
975 case CPL_ERR_ARP_MISS
:
976 return -EHOSTUNREACH
;
977 case CPL_ERR_CONN_TIMEDOUT
:
979 case CPL_ERR_TCAM_FULL
:
981 case CPL_ERR_CONN_EXIST
:
988 static void csk_act_open_retry_timer(struct timer_list
*t
)
990 struct sk_buff
*skb
= NULL
;
991 struct cxgbi_sock
*csk
= from_timer(csk
, t
, retry_timer
);
992 struct cxgb4_lld_info
*lldi
= cxgbi_cdev_priv(csk
->cdev
);
993 void (*send_act_open_func
)(struct cxgbi_sock
*, struct sk_buff
*,
995 int t4
= is_t4(lldi
->adapter_type
), size
, size6
;
997 log_debug(1 << CXGBI_DBG_TOE
| 1 << CXGBI_DBG_SOCK
,
998 "csk 0x%p,%u,0x%lx,%u.\n",
999 csk
, csk
->state
, csk
->flags
, csk
->tid
);
1001 cxgbi_sock_get(csk
);
1002 spin_lock_bh(&csk
->lock
);
1005 size
= sizeof(struct cpl_act_open_req
);
1006 size6
= sizeof(struct cpl_act_open_req6
);
1008 size
= sizeof(struct cpl_t5_act_open_req
);
1009 size6
= sizeof(struct cpl_t5_act_open_req6
);
1012 if (csk
->csk_family
== AF_INET
) {
1013 send_act_open_func
= send_act_open_req
;
1014 skb
= alloc_wr(size
, 0, GFP_ATOMIC
);
1015 #if IS_ENABLED(CONFIG_IPV6)
1017 send_act_open_func
= send_act_open_req6
;
1018 skb
= alloc_wr(size6
, 0, GFP_ATOMIC
);
1023 cxgbi_sock_fail_act_open(csk
, -ENOMEM
);
1025 skb
->sk
= (struct sock
*)csk
;
1026 t4_set_arp_err_handler(skb
, csk
,
1027 cxgbi_sock_act_open_req_arp_failure
);
1028 send_act_open_func(csk
, skb
, csk
->l2t
);
1031 spin_unlock_bh(&csk
->lock
);
1032 cxgbi_sock_put(csk
);
1036 static inline bool is_neg_adv(unsigned int status
)
1038 return status
== CPL_ERR_RTX_NEG_ADVICE
||
1039 status
== CPL_ERR_KEEPALV_NEG_ADVICE
||
1040 status
== CPL_ERR_PERSIST_NEG_ADVICE
;
1043 static void do_act_open_rpl(struct cxgbi_device
*cdev
, struct sk_buff
*skb
)
1045 struct cxgbi_sock
*csk
;
1046 struct cpl_act_open_rpl
*rpl
= (struct cpl_act_open_rpl
*)skb
->data
;
1047 unsigned int tid
= GET_TID(rpl
);
1049 TID_TID_G(AOPEN_ATID_G(be32_to_cpu(rpl
->atid_status
)));
1050 unsigned int status
= AOPEN_STATUS_G(be32_to_cpu(rpl
->atid_status
));
1051 struct cxgb4_lld_info
*lldi
= cxgbi_cdev_priv(cdev
);
1052 struct tid_info
*t
= lldi
->tids
;
1054 csk
= lookup_atid(t
, atid
);
1055 if (unlikely(!csk
)) {
1056 pr_err("NO matching conn. atid %u, tid %u.\n", atid
, tid
);
1060 pr_info_ipaddr("tid %u/%u, status %u.\n"
1061 "csk 0x%p,%u,0x%lx. ", (&csk
->saddr
), (&csk
->daddr
),
1062 atid
, tid
, status
, csk
, csk
->state
, csk
->flags
);
1064 if (is_neg_adv(status
))
1067 module_put(cdev
->owner
);
1069 if (status
&& status
!= CPL_ERR_TCAM_FULL
&&
1070 status
!= CPL_ERR_CONN_EXIST
&&
1071 status
!= CPL_ERR_ARP_MISS
)
1072 cxgb4_remove_tid(lldi
->tids
, csk
->port_id
, GET_TID(rpl
),
1075 cxgbi_sock_get(csk
);
1076 spin_lock_bh(&csk
->lock
);
1078 if (status
== CPL_ERR_CONN_EXIST
&&
1079 csk
->retry_timer
.function
!= csk_act_open_retry_timer
) {
1080 csk
->retry_timer
.function
= csk_act_open_retry_timer
;
1081 mod_timer(&csk
->retry_timer
, jiffies
+ HZ
/ 2);
1083 cxgbi_sock_fail_act_open(csk
,
1084 act_open_rpl_status_to_errno(status
));
1086 spin_unlock_bh(&csk
->lock
);
1087 cxgbi_sock_put(csk
);
1092 static void do_peer_close(struct cxgbi_device
*cdev
, struct sk_buff
*skb
)
1094 struct cxgbi_sock
*csk
;
1095 struct cpl_peer_close
*req
= (struct cpl_peer_close
*)skb
->data
;
1096 unsigned int tid
= GET_TID(req
);
1097 struct cxgb4_lld_info
*lldi
= cxgbi_cdev_priv(cdev
);
1098 struct tid_info
*t
= lldi
->tids
;
1100 csk
= lookup_tid(t
, tid
);
1101 if (unlikely(!csk
)) {
1102 pr_err("can't find connection for tid %u.\n", tid
);
1105 pr_info_ipaddr("csk 0x%p,%u,0x%lx,%u.\n",
1106 (&csk
->saddr
), (&csk
->daddr
),
1107 csk
, csk
->state
, csk
->flags
, csk
->tid
);
1108 cxgbi_sock_rcv_peer_close(csk
);
1113 static void do_close_con_rpl(struct cxgbi_device
*cdev
, struct sk_buff
*skb
)
1115 struct cxgbi_sock
*csk
;
1116 struct cpl_close_con_rpl
*rpl
= (struct cpl_close_con_rpl
*)skb
->data
;
1117 unsigned int tid
= GET_TID(rpl
);
1118 struct cxgb4_lld_info
*lldi
= cxgbi_cdev_priv(cdev
);
1119 struct tid_info
*t
= lldi
->tids
;
1121 csk
= lookup_tid(t
, tid
);
1122 if (unlikely(!csk
)) {
1123 pr_err("can't find connection for tid %u.\n", tid
);
1126 pr_info_ipaddr("csk 0x%p,%u,0x%lx,%u.\n",
1127 (&csk
->saddr
), (&csk
->daddr
),
1128 csk
, csk
->state
, csk
->flags
, csk
->tid
);
1129 cxgbi_sock_rcv_close_conn_rpl(csk
, ntohl(rpl
->snd_nxt
));
1134 static int abort_status_to_errno(struct cxgbi_sock
*csk
, int abort_reason
,
1137 switch (abort_reason
) {
1138 case CPL_ERR_BAD_SYN
:
1139 case CPL_ERR_CONN_RESET
:
1140 return csk
->state
> CTP_ESTABLISHED
?
1141 -EPIPE
: -ECONNRESET
;
1142 case CPL_ERR_XMIT_TIMEDOUT
:
1143 case CPL_ERR_PERSIST_TIMEDOUT
:
1144 case CPL_ERR_FINWAIT2_TIMEDOUT
:
1145 case CPL_ERR_KEEPALIVE_TIMEDOUT
:
1152 static void do_abort_req_rss(struct cxgbi_device
*cdev
, struct sk_buff
*skb
)
1154 struct cxgbi_sock
*csk
;
1155 struct cpl_abort_req_rss
*req
= (struct cpl_abort_req_rss
*)skb
->data
;
1156 unsigned int tid
= GET_TID(req
);
1157 struct cxgb4_lld_info
*lldi
= cxgbi_cdev_priv(cdev
);
1158 struct tid_info
*t
= lldi
->tids
;
1159 int rst_status
= CPL_ABORT_NO_RST
;
1161 csk
= lookup_tid(t
, tid
);
1162 if (unlikely(!csk
)) {
1163 pr_err("can't find connection for tid %u.\n", tid
);
1167 pr_info_ipaddr("csk 0x%p,%u,0x%lx,%u, status %u.\n",
1168 (&csk
->saddr
), (&csk
->daddr
),
1169 csk
, csk
->state
, csk
->flags
, csk
->tid
, req
->status
);
1171 if (is_neg_adv(req
->status
))
1174 cxgbi_sock_get(csk
);
1175 spin_lock_bh(&csk
->lock
);
1177 cxgbi_sock_clear_flag(csk
, CTPF_ABORT_REQ_RCVD
);
1179 if (!cxgbi_sock_flag(csk
, CTPF_TX_DATA_SENT
)) {
1180 send_tx_flowc_wr(csk
);
1181 cxgbi_sock_set_flag(csk
, CTPF_TX_DATA_SENT
);
1184 cxgbi_sock_set_flag(csk
, CTPF_ABORT_REQ_RCVD
);
1185 cxgbi_sock_set_state(csk
, CTP_ABORTING
);
1187 send_abort_rpl(csk
, rst_status
);
1189 if (!cxgbi_sock_flag(csk
, CTPF_ABORT_RPL_PENDING
)) {
1190 csk
->err
= abort_status_to_errno(csk
, req
->status
, &rst_status
);
1191 cxgbi_sock_closed(csk
);
1194 spin_unlock_bh(&csk
->lock
);
1195 cxgbi_sock_put(csk
);
1200 static void do_abort_rpl_rss(struct cxgbi_device
*cdev
, struct sk_buff
*skb
)
1202 struct cxgbi_sock
*csk
;
1203 struct cpl_abort_rpl_rss
*rpl
= (struct cpl_abort_rpl_rss
*)skb
->data
;
1204 unsigned int tid
= GET_TID(rpl
);
1205 struct cxgb4_lld_info
*lldi
= cxgbi_cdev_priv(cdev
);
1206 struct tid_info
*t
= lldi
->tids
;
1208 csk
= lookup_tid(t
, tid
);
1212 pr_info_ipaddr("csk 0x%p,%u,0x%lx,%u, status %u.\n",
1213 (&csk
->saddr
), (&csk
->daddr
), csk
,
1214 csk
->state
, csk
->flags
, csk
->tid
, rpl
->status
);
1216 if (rpl
->status
== CPL_ERR_ABORT_FAILED
)
1219 cxgbi_sock_rcv_abort_rpl(csk
);
1224 static void do_rx_data(struct cxgbi_device
*cdev
, struct sk_buff
*skb
)
1226 struct cxgbi_sock
*csk
;
1227 struct cpl_rx_data
*cpl
= (struct cpl_rx_data
*)skb
->data
;
1228 unsigned int tid
= GET_TID(cpl
);
1229 struct cxgb4_lld_info
*lldi
= cxgbi_cdev_priv(cdev
);
1230 struct tid_info
*t
= lldi
->tids
;
1232 csk
= lookup_tid(t
, tid
);
1234 pr_err("can't find connection for tid %u.\n", tid
);
1236 /* not expecting this, reset the connection. */
1237 pr_err("csk 0x%p, tid %u, rcv cpl_rx_data.\n", csk
, tid
);
1238 spin_lock_bh(&csk
->lock
);
1239 send_abort_req(csk
);
1240 spin_unlock_bh(&csk
->lock
);
1245 static void do_rx_iscsi_hdr(struct cxgbi_device
*cdev
, struct sk_buff
*skb
)
1247 struct cxgbi_sock
*csk
;
1248 struct cpl_iscsi_hdr
*cpl
= (struct cpl_iscsi_hdr
*)skb
->data
;
1249 unsigned short pdu_len_ddp
= be16_to_cpu(cpl
->pdu_len_ddp
);
1250 unsigned int tid
= GET_TID(cpl
);
1251 struct cxgb4_lld_info
*lldi
= cxgbi_cdev_priv(cdev
);
1252 struct tid_info
*t
= lldi
->tids
;
1254 csk
= lookup_tid(t
, tid
);
1255 if (unlikely(!csk
)) {
1256 pr_err("can't find conn. for tid %u.\n", tid
);
1260 log_debug(1 << CXGBI_DBG_TOE
| 1 << CXGBI_DBG_PDU_RX
,
1261 "csk 0x%p,%u,0x%lx, tid %u, skb 0x%p,%u, 0x%x.\n",
1262 csk
, csk
->state
, csk
->flags
, csk
->tid
, skb
, skb
->len
,
1265 spin_lock_bh(&csk
->lock
);
1267 if (unlikely(csk
->state
>= CTP_PASSIVE_CLOSE
)) {
1268 log_debug(1 << CXGBI_DBG_TOE
| 1 << CXGBI_DBG_SOCK
,
1269 "csk 0x%p,%u,0x%lx,%u, bad state.\n",
1270 csk
, csk
->state
, csk
->flags
, csk
->tid
);
1271 if (csk
->state
!= CTP_ABORTING
)
1277 cxgbi_skcb_tcp_seq(skb
) = ntohl(cpl
->seq
);
1278 cxgbi_skcb_flags(skb
) = 0;
1280 skb_reset_transport_header(skb
);
1281 __skb_pull(skb
, sizeof(*cpl
));
1282 __pskb_trim(skb
, ntohs(cpl
->len
));
1284 if (!csk
->skb_ulp_lhdr
) {
1286 unsigned int hlen
, dlen
, plen
;
1288 log_debug(1 << CXGBI_DBG_TOE
| 1 << CXGBI_DBG_PDU_RX
,
1289 "csk 0x%p,%u,0x%lx, tid %u, skb 0x%p header.\n",
1290 csk
, csk
->state
, csk
->flags
, csk
->tid
, skb
);
1291 csk
->skb_ulp_lhdr
= skb
;
1292 cxgbi_skcb_set_flag(skb
, SKCBF_RX_HDR
);
1294 if ((CHELSIO_CHIP_VERSION(lldi
->adapter_type
) <= CHELSIO_T5
) &&
1295 (cxgbi_skcb_tcp_seq(skb
) != csk
->rcv_nxt
)) {
1296 pr_info("tid %u, CPL_ISCSI_HDR, bad seq, 0x%x/0x%x.\n",
1297 csk
->tid
, cxgbi_skcb_tcp_seq(skb
),
1303 hlen
= ntohs(cpl
->len
);
1304 dlen
= ntohl(*(unsigned int *)(bhs
+ 4)) & 0xFFFFFF;
1306 plen
= ISCSI_PDU_LEN_G(pdu_len_ddp
);
1307 if (is_t4(lldi
->adapter_type
))
1310 if ((hlen
+ dlen
) != plen
) {
1311 pr_info("tid 0x%x, CPL_ISCSI_HDR, pdu len "
1312 "mismatch %u != %u + %u, seq 0x%x.\n",
1313 csk
->tid
, plen
, hlen
, dlen
,
1314 cxgbi_skcb_tcp_seq(skb
));
1318 cxgbi_skcb_rx_pdulen(skb
) = (hlen
+ dlen
+ 3) & (~0x3);
1320 cxgbi_skcb_rx_pdulen(skb
) += csk
->dcrc_len
;
1321 csk
->rcv_nxt
+= cxgbi_skcb_rx_pdulen(skb
);
1323 log_debug(1 << CXGBI_DBG_TOE
| 1 << CXGBI_DBG_PDU_RX
,
1324 "csk 0x%p, skb 0x%p, 0x%x,%u+%u,0x%x,0x%x.\n",
1325 csk
, skb
, *bhs
, hlen
, dlen
,
1326 ntohl(*((unsigned int *)(bhs
+ 16))),
1327 ntohl(*((unsigned int *)(bhs
+ 24))));
1330 struct sk_buff
*lskb
= csk
->skb_ulp_lhdr
;
1332 cxgbi_skcb_set_flag(lskb
, SKCBF_RX_DATA
);
1333 log_debug(1 << CXGBI_DBG_TOE
| 1 << CXGBI_DBG_PDU_RX
,
1334 "csk 0x%p,%u,0x%lx, skb 0x%p data, 0x%p.\n",
1335 csk
, csk
->state
, csk
->flags
, skb
, lskb
);
1338 __skb_queue_tail(&csk
->receive_queue
, skb
);
1339 spin_unlock_bh(&csk
->lock
);
1343 send_abort_req(csk
);
1345 spin_unlock_bh(&csk
->lock
);
1350 static void do_rx_iscsi_data(struct cxgbi_device
*cdev
, struct sk_buff
*skb
)
1352 struct cxgbi_sock
*csk
;
1353 struct cpl_iscsi_hdr
*cpl
= (struct cpl_iscsi_hdr
*)skb
->data
;
1354 struct cxgb4_lld_info
*lldi
= cxgbi_cdev_priv(cdev
);
1355 struct tid_info
*t
= lldi
->tids
;
1356 struct sk_buff
*lskb
;
1357 u32 tid
= GET_TID(cpl
);
1358 u16 pdu_len_ddp
= be16_to_cpu(cpl
->pdu_len_ddp
);
1360 csk
= lookup_tid(t
, tid
);
1361 if (unlikely(!csk
)) {
1362 pr_err("can't find conn. for tid %u.\n", tid
);
1366 log_debug(1 << CXGBI_DBG_TOE
| 1 << CXGBI_DBG_PDU_RX
,
1367 "csk 0x%p,%u,0x%lx, tid %u, skb 0x%p,%u, 0x%x.\n",
1368 csk
, csk
->state
, csk
->flags
, csk
->tid
, skb
,
1369 skb
->len
, pdu_len_ddp
);
1371 spin_lock_bh(&csk
->lock
);
1373 if (unlikely(csk
->state
>= CTP_PASSIVE_CLOSE
)) {
1374 log_debug(1 << CXGBI_DBG_TOE
| 1 << CXGBI_DBG_SOCK
,
1375 "csk 0x%p,%u,0x%lx,%u, bad state.\n",
1376 csk
, csk
->state
, csk
->flags
, csk
->tid
);
1378 if (csk
->state
!= CTP_ABORTING
)
1384 cxgbi_skcb_tcp_seq(skb
) = be32_to_cpu(cpl
->seq
);
1385 cxgbi_skcb_flags(skb
) = 0;
1387 skb_reset_transport_header(skb
);
1388 __skb_pull(skb
, sizeof(*cpl
));
1389 __pskb_trim(skb
, ntohs(cpl
->len
));
1391 if (!csk
->skb_ulp_lhdr
)
1392 csk
->skb_ulp_lhdr
= skb
;
1394 lskb
= csk
->skb_ulp_lhdr
;
1395 cxgbi_skcb_set_flag(lskb
, SKCBF_RX_DATA
);
1397 log_debug(1 << CXGBI_DBG_TOE
| 1 << CXGBI_DBG_PDU_RX
,
1398 "csk 0x%p,%u,0x%lx, skb 0x%p data, 0x%p.\n",
1399 csk
, csk
->state
, csk
->flags
, skb
, lskb
);
1401 __skb_queue_tail(&csk
->receive_queue
, skb
);
1402 spin_unlock_bh(&csk
->lock
);
1406 send_abort_req(csk
);
1408 spin_unlock_bh(&csk
->lock
);
1414 cxgb4i_process_ddpvld(struct cxgbi_sock
*csk
,
1415 struct sk_buff
*skb
, u32 ddpvld
)
1417 if (ddpvld
& (1 << CPL_RX_DDP_STATUS_HCRC_SHIFT
)) {
1418 pr_info("csk 0x%p, lhdr 0x%p, status 0x%x, hcrc bad 0x%lx.\n",
1419 csk
, skb
, ddpvld
, cxgbi_skcb_flags(skb
));
1420 cxgbi_skcb_set_flag(skb
, SKCBF_RX_HCRC_ERR
);
1423 if (ddpvld
& (1 << CPL_RX_DDP_STATUS_DCRC_SHIFT
)) {
1424 pr_info("csk 0x%p, lhdr 0x%p, status 0x%x, dcrc bad 0x%lx.\n",
1425 csk
, skb
, ddpvld
, cxgbi_skcb_flags(skb
));
1426 cxgbi_skcb_set_flag(skb
, SKCBF_RX_DCRC_ERR
);
1429 if (ddpvld
& (1 << CPL_RX_DDP_STATUS_PAD_SHIFT
)) {
1430 log_debug(1 << CXGBI_DBG_PDU_RX
,
1431 "csk 0x%p, lhdr 0x%p, status 0x%x, pad bad.\n",
1433 cxgbi_skcb_set_flag(skb
, SKCBF_RX_PAD_ERR
);
1436 if ((ddpvld
& (1 << CPL_RX_DDP_STATUS_DDP_SHIFT
)) &&
1437 !cxgbi_skcb_test_flag(skb
, SKCBF_RX_DATA
)) {
1438 log_debug(1 << CXGBI_DBG_PDU_RX
,
1439 "csk 0x%p, lhdr 0x%p, 0x%x, data ddp'ed.\n",
1441 cxgbi_skcb_set_flag(skb
, SKCBF_RX_DATA_DDPD
);
1445 static void do_rx_data_ddp(struct cxgbi_device
*cdev
,
1446 struct sk_buff
*skb
)
1448 struct cxgbi_sock
*csk
;
1449 struct sk_buff
*lskb
;
1450 struct cpl_rx_data_ddp
*rpl
= (struct cpl_rx_data_ddp
*)skb
->data
;
1451 unsigned int tid
= GET_TID(rpl
);
1452 struct cxgb4_lld_info
*lldi
= cxgbi_cdev_priv(cdev
);
1453 struct tid_info
*t
= lldi
->tids
;
1454 u32 ddpvld
= be32_to_cpu(rpl
->ddpvld
);
1456 csk
= lookup_tid(t
, tid
);
1457 if (unlikely(!csk
)) {
1458 pr_err("can't find connection for tid %u.\n", tid
);
1462 log_debug(1 << CXGBI_DBG_TOE
| 1 << CXGBI_DBG_PDU_RX
,
1463 "csk 0x%p,%u,0x%lx, skb 0x%p,0x%x, lhdr 0x%p.\n",
1464 csk
, csk
->state
, csk
->flags
, skb
, ddpvld
, csk
->skb_ulp_lhdr
);
1466 spin_lock_bh(&csk
->lock
);
1468 if (unlikely(csk
->state
>= CTP_PASSIVE_CLOSE
)) {
1469 log_debug(1 << CXGBI_DBG_TOE
| 1 << CXGBI_DBG_SOCK
,
1470 "csk 0x%p,%u,0x%lx,%u, bad state.\n",
1471 csk
, csk
->state
, csk
->flags
, csk
->tid
);
1472 if (csk
->state
!= CTP_ABORTING
)
1478 if (!csk
->skb_ulp_lhdr
) {
1479 pr_err("tid 0x%x, rcv RX_DATA_DDP w/o pdu bhs.\n", csk
->tid
);
1483 lskb
= csk
->skb_ulp_lhdr
;
1484 csk
->skb_ulp_lhdr
= NULL
;
1486 cxgbi_skcb_rx_ddigest(lskb
) = ntohl(rpl
->ulp_crc
);
1488 if (ntohs(rpl
->len
) != cxgbi_skcb_rx_pdulen(lskb
))
1489 pr_info("tid 0x%x, RX_DATA_DDP pdulen %u != %u.\n",
1490 csk
->tid
, ntohs(rpl
->len
), cxgbi_skcb_rx_pdulen(lskb
));
1492 cxgb4i_process_ddpvld(csk
, lskb
, ddpvld
);
1494 log_debug(1 << CXGBI_DBG_PDU_RX
,
1495 "csk 0x%p, lskb 0x%p, f 0x%lx.\n",
1496 csk
, lskb
, cxgbi_skcb_flags(lskb
));
1498 cxgbi_skcb_set_flag(lskb
, SKCBF_RX_STATUS
);
1499 cxgbi_conn_pdu_ready(csk
);
1500 spin_unlock_bh(&csk
->lock
);
1504 send_abort_req(csk
);
1506 spin_unlock_bh(&csk
->lock
);
1512 do_rx_iscsi_cmp(struct cxgbi_device
*cdev
, struct sk_buff
*skb
)
1514 struct cxgbi_sock
*csk
;
1515 struct cpl_rx_iscsi_cmp
*rpl
= (struct cpl_rx_iscsi_cmp
*)skb
->data
;
1516 struct cxgb4_lld_info
*lldi
= cxgbi_cdev_priv(cdev
);
1517 struct tid_info
*t
= lldi
->tids
;
1518 struct sk_buff
*data_skb
= NULL
;
1519 u32 tid
= GET_TID(rpl
);
1520 u32 ddpvld
= be32_to_cpu(rpl
->ddpvld
);
1521 u32 seq
= be32_to_cpu(rpl
->seq
);
1522 u16 pdu_len_ddp
= be16_to_cpu(rpl
->pdu_len_ddp
);
1524 csk
= lookup_tid(t
, tid
);
1525 if (unlikely(!csk
)) {
1526 pr_err("can't find connection for tid %u.\n", tid
);
1530 log_debug(1 << CXGBI_DBG_TOE
| 1 << CXGBI_DBG_PDU_RX
,
1531 "csk 0x%p,%u,0x%lx, skb 0x%p,0x%x, lhdr 0x%p, len %u, "
1532 "pdu_len_ddp %u, status %u.\n",
1533 csk
, csk
->state
, csk
->flags
, skb
, ddpvld
, csk
->skb_ulp_lhdr
,
1534 ntohs(rpl
->len
), pdu_len_ddp
, rpl
->status
);
1536 spin_lock_bh(&csk
->lock
);
1538 if (unlikely(csk
->state
>= CTP_PASSIVE_CLOSE
)) {
1539 log_debug(1 << CXGBI_DBG_TOE
| 1 << CXGBI_DBG_SOCK
,
1540 "csk 0x%p,%u,0x%lx,%u, bad state.\n",
1541 csk
, csk
->state
, csk
->flags
, csk
->tid
);
1543 if (csk
->state
!= CTP_ABORTING
)
1549 cxgbi_skcb_tcp_seq(skb
) = seq
;
1550 cxgbi_skcb_flags(skb
) = 0;
1551 cxgbi_skcb_rx_pdulen(skb
) = 0;
1553 skb_reset_transport_header(skb
);
1554 __skb_pull(skb
, sizeof(*rpl
));
1555 __pskb_trim(skb
, be16_to_cpu(rpl
->len
));
1557 csk
->rcv_nxt
= seq
+ pdu_len_ddp
;
1559 if (csk
->skb_ulp_lhdr
) {
1560 data_skb
= skb_peek(&csk
->receive_queue
);
1562 !cxgbi_skcb_test_flag(data_skb
, SKCBF_RX_DATA
)) {
1563 pr_err("Error! freelist data not found 0x%p, tid %u\n",
1568 __skb_unlink(data_skb
, &csk
->receive_queue
);
1570 cxgbi_skcb_set_flag(skb
, SKCBF_RX_DATA
);
1572 __skb_queue_tail(&csk
->receive_queue
, skb
);
1573 __skb_queue_tail(&csk
->receive_queue
, data_skb
);
1575 __skb_queue_tail(&csk
->receive_queue
, skb
);
1578 csk
->skb_ulp_lhdr
= NULL
;
1580 cxgbi_skcb_set_flag(skb
, SKCBF_RX_HDR
);
1581 cxgbi_skcb_set_flag(skb
, SKCBF_RX_STATUS
);
1582 cxgbi_skcb_set_flag(skb
, SKCBF_RX_ISCSI_COMPL
);
1583 cxgbi_skcb_rx_ddigest(skb
) = be32_to_cpu(rpl
->ulp_crc
);
1585 cxgb4i_process_ddpvld(csk
, skb
, ddpvld
);
1587 log_debug(1 << CXGBI_DBG_PDU_RX
, "csk 0x%p, skb 0x%p, f 0x%lx.\n",
1588 csk
, skb
, cxgbi_skcb_flags(skb
));
1590 cxgbi_conn_pdu_ready(csk
);
1591 spin_unlock_bh(&csk
->lock
);
1596 send_abort_req(csk
);
1598 spin_unlock_bh(&csk
->lock
);
1603 static void do_fw4_ack(struct cxgbi_device
*cdev
, struct sk_buff
*skb
)
1605 struct cxgbi_sock
*csk
;
1606 struct cpl_fw4_ack
*rpl
= (struct cpl_fw4_ack
*)skb
->data
;
1607 unsigned int tid
= GET_TID(rpl
);
1608 struct cxgb4_lld_info
*lldi
= cxgbi_cdev_priv(cdev
);
1609 struct tid_info
*t
= lldi
->tids
;
1611 csk
= lookup_tid(t
, tid
);
1613 pr_err("can't find connection for tid %u.\n", tid
);
1615 log_debug(1 << CXGBI_DBG_TOE
| 1 << CXGBI_DBG_SOCK
,
1616 "csk 0x%p,%u,0x%lx,%u.\n",
1617 csk
, csk
->state
, csk
->flags
, csk
->tid
);
1618 cxgbi_sock_rcv_wr_ack(csk
, rpl
->credits
, ntohl(rpl
->snd_una
),
1624 static void do_set_tcb_rpl(struct cxgbi_device
*cdev
, struct sk_buff
*skb
)
1626 struct cpl_set_tcb_rpl
*rpl
= (struct cpl_set_tcb_rpl
*)skb
->data
;
1627 unsigned int tid
= GET_TID(rpl
);
1628 struct cxgb4_lld_info
*lldi
= cxgbi_cdev_priv(cdev
);
1629 struct tid_info
*t
= lldi
->tids
;
1630 struct cxgbi_sock
*csk
;
1632 csk
= lookup_tid(t
, tid
);
1634 pr_err("can't find conn. for tid %u.\n", tid
);
1638 log_debug(1 << CXGBI_DBG_TOE
| 1 << CXGBI_DBG_SOCK
,
1639 "csk 0x%p,%u,%lx,%u, status 0x%x.\n",
1640 csk
, csk
->state
, csk
->flags
, csk
->tid
, rpl
->status
);
1642 if (rpl
->status
!= CPL_ERR_NONE
) {
1643 pr_err("csk 0x%p,%u, SET_TCB_RPL status %u.\n",
1644 csk
, tid
, rpl
->status
);
1648 complete(&csk
->cmpl
);
1653 static int alloc_cpls(struct cxgbi_sock
*csk
)
1655 csk
->cpl_close
= alloc_wr(sizeof(struct cpl_close_con_req
),
1657 if (!csk
->cpl_close
)
1660 csk
->cpl_abort_req
= alloc_wr(sizeof(struct cpl_abort_req
),
1662 if (!csk
->cpl_abort_req
)
1665 csk
->cpl_abort_rpl
= alloc_wr(sizeof(struct cpl_abort_rpl
),
1667 if (!csk
->cpl_abort_rpl
)
1672 cxgbi_sock_free_cpl_skbs(csk
);
1676 static inline void l2t_put(struct cxgbi_sock
*csk
)
1679 cxgb4_l2t_release(csk
->l2t
);
1681 cxgbi_sock_put(csk
);
1685 static void release_offload_resources(struct cxgbi_sock
*csk
)
1687 struct cxgb4_lld_info
*lldi
;
1688 #if IS_ENABLED(CONFIG_IPV6)
1689 struct net_device
*ndev
= csk
->cdev
->ports
[csk
->port_id
];
1692 log_debug(1 << CXGBI_DBG_TOE
| 1 << CXGBI_DBG_SOCK
,
1693 "csk 0x%p,%u,0x%lx,%u.\n",
1694 csk
, csk
->state
, csk
->flags
, csk
->tid
);
1696 cxgbi_sock_free_cpl_skbs(csk
);
1697 cxgbi_sock_purge_write_queue(csk
);
1698 if (csk
->wr_cred
!= csk
->wr_max_cred
) {
1699 cxgbi_sock_purge_wr_queue(csk
);
1700 cxgbi_sock_reset_wr_list(csk
);
1704 #if IS_ENABLED(CONFIG_IPV6)
1705 if (csk
->csk_family
== AF_INET6
)
1706 cxgb4_clip_release(ndev
,
1707 (const u32
*)&csk
->saddr6
.sin6_addr
, 1);
1710 if (cxgbi_sock_flag(csk
, CTPF_HAS_ATID
))
1712 else if (cxgbi_sock_flag(csk
, CTPF_HAS_TID
)) {
1713 lldi
= cxgbi_cdev_priv(csk
->cdev
);
1714 cxgb4_remove_tid(lldi
->tids
, 0, csk
->tid
,
1716 cxgbi_sock_clear_flag(csk
, CTPF_HAS_TID
);
1717 cxgbi_sock_put(csk
);
1722 #ifdef CONFIG_CHELSIO_T4_DCB
1723 static inline u8
get_iscsi_dcb_state(struct net_device
*ndev
)
1725 return ndev
->dcbnl_ops
->getstate(ndev
);
1728 static int select_priority(int pri_mask
)
1732 return (ffs(pri_mask
) - 1);
1735 static u8
get_iscsi_dcb_priority(struct net_device
*ndev
)
1740 struct dcb_app iscsi_dcb_app
= {
1744 rv
= (int)ndev
->dcbnl_ops
->getcap(ndev
, DCB_CAP_ATTR_DCBX
, &caps
);
1748 if (caps
& DCB_CAP_DCBX_VER_IEEE
) {
1749 iscsi_dcb_app
.selector
= IEEE_8021QAZ_APP_SEL_STREAM
;
1750 rv
= dcb_ieee_getapp_mask(ndev
, &iscsi_dcb_app
);
1752 iscsi_dcb_app
.selector
= IEEE_8021QAZ_APP_SEL_ANY
;
1753 rv
= dcb_ieee_getapp_mask(ndev
, &iscsi_dcb_app
);
1755 } else if (caps
& DCB_CAP_DCBX_VER_CEE
) {
1756 iscsi_dcb_app
.selector
= DCB_APP_IDTYPE_PORTNUM
;
1757 rv
= dcb_getapp(ndev
, &iscsi_dcb_app
);
1760 log_debug(1 << CXGBI_DBG_ISCSI
,
1761 "iSCSI priority is set to %u\n", select_priority(rv
));
1762 return select_priority(rv
);
1766 static int init_act_open(struct cxgbi_sock
*csk
)
1768 struct cxgbi_device
*cdev
= csk
->cdev
;
1769 struct cxgb4_lld_info
*lldi
= cxgbi_cdev_priv(cdev
);
1770 struct net_device
*ndev
= cdev
->ports
[csk
->port_id
];
1771 struct sk_buff
*skb
= NULL
;
1772 struct neighbour
*n
= NULL
;
1775 unsigned int rxq_idx
;
1776 unsigned int size
, size6
;
1777 unsigned int linkspeed
;
1778 unsigned int rcv_winf
, snd_winf
;
1779 #ifdef CONFIG_CHELSIO_T4_DCB
1782 log_debug(1 << CXGBI_DBG_TOE
| 1 << CXGBI_DBG_SOCK
,
1783 "csk 0x%p,%u,0x%lx,%u.\n",
1784 csk
, csk
->state
, csk
->flags
, csk
->tid
);
1786 if (csk
->csk_family
== AF_INET
)
1787 daddr
= &csk
->daddr
.sin_addr
.s_addr
;
1788 #if IS_ENABLED(CONFIG_IPV6)
1789 else if (csk
->csk_family
== AF_INET6
)
1790 daddr
= &csk
->daddr6
.sin6_addr
;
1793 pr_err("address family 0x%x not supported\n", csk
->csk_family
);
1797 n
= dst_neigh_lookup(csk
->dst
, daddr
);
1800 pr_err("%s, can't get neighbour of csk->dst.\n", ndev
->name
);
1804 if (!(n
->nud_state
& NUD_VALID
))
1805 neigh_event_send(n
, NULL
);
1807 csk
->atid
= cxgb4_alloc_atid(lldi
->tids
, csk
);
1808 if (csk
->atid
< 0) {
1809 pr_err("%s, NO atid available.\n", ndev
->name
);
1810 goto rel_resource_without_clip
;
1812 cxgbi_sock_set_flag(csk
, CTPF_HAS_ATID
);
1813 cxgbi_sock_get(csk
);
1815 #ifdef CONFIG_CHELSIO_T4_DCB
1816 if (get_iscsi_dcb_state(ndev
))
1817 priority
= get_iscsi_dcb_priority(ndev
);
1819 csk
->dcb_priority
= priority
;
1820 csk
->l2t
= cxgb4_l2t_get(lldi
->l2t
, n
, ndev
, priority
);
1822 csk
->l2t
= cxgb4_l2t_get(lldi
->l2t
, n
, ndev
, 0);
1825 pr_err("%s, cannot alloc l2t.\n", ndev
->name
);
1826 goto rel_resource_without_clip
;
1828 cxgbi_sock_get(csk
);
1830 #if IS_ENABLED(CONFIG_IPV6)
1831 if (csk
->csk_family
== AF_INET6
)
1832 cxgb4_clip_get(ndev
, (const u32
*)&csk
->saddr6
.sin6_addr
, 1);
1835 if (is_t4(lldi
->adapter_type
)) {
1836 size
= sizeof(struct cpl_act_open_req
);
1837 size6
= sizeof(struct cpl_act_open_req6
);
1838 } else if (is_t5(lldi
->adapter_type
)) {
1839 size
= sizeof(struct cpl_t5_act_open_req
);
1840 size6
= sizeof(struct cpl_t5_act_open_req6
);
1842 size
= sizeof(struct cpl_t6_act_open_req
);
1843 size6
= sizeof(struct cpl_t6_act_open_req6
);
1846 if (csk
->csk_family
== AF_INET
)
1847 skb
= alloc_wr(size
, 0, GFP_NOIO
);
1848 #if IS_ENABLED(CONFIG_IPV6)
1850 skb
= alloc_wr(size6
, 0, GFP_NOIO
);
1855 skb
->sk
= (struct sock
*)csk
;
1856 t4_set_arp_err_handler(skb
, csk
, cxgbi_sock_act_open_req_arp_failure
);
1859 csk
->mtu
= dst_mtu(csk
->dst
);
1860 cxgb4_best_mtu(lldi
->mtus
, csk
->mtu
, &csk
->mss_idx
);
1861 csk
->tx_chan
= cxgb4_port_chan(ndev
);
1862 csk
->smac_idx
= ((struct port_info
*)netdev_priv(ndev
))->smt_idx
;
1863 step
= lldi
->ntxq
/ lldi
->nchan
;
1864 csk
->txq_idx
= cxgb4_port_idx(ndev
) * step
;
1865 step
= lldi
->nrxq
/ lldi
->nchan
;
1866 rxq_idx
= (cxgb4_port_idx(ndev
) * step
) + (cdev
->rxq_idx_cntr
% step
);
1867 cdev
->rxq_idx_cntr
++;
1868 csk
->rss_qid
= lldi
->rxq_ids
[rxq_idx
];
1869 linkspeed
= ((struct port_info
*)netdev_priv(ndev
))->link_cfg
.speed
;
1870 csk
->snd_win
= cxgb4i_snd_win
;
1871 csk
->rcv_win
= cxgb4i_rcv_win
;
1872 if (cxgb4i_rcv_win
<= 0) {
1873 csk
->rcv_win
= CXGB4I_DEFAULT_10G_RCV_WIN
;
1874 rcv_winf
= linkspeed
/ SPEED_10000
;
1876 csk
->rcv_win
*= rcv_winf
;
1878 if (cxgb4i_snd_win
<= 0) {
1879 csk
->snd_win
= CXGB4I_DEFAULT_10G_SND_WIN
;
1880 snd_winf
= linkspeed
/ SPEED_10000
;
1882 csk
->snd_win
*= snd_winf
;
1884 csk
->wr_cred
= lldi
->wr_cred
-
1885 DIV_ROUND_UP(sizeof(struct cpl_abort_req
), 16);
1886 csk
->wr_max_cred
= csk
->wr_cred
;
1887 csk
->wr_una_cred
= 0;
1888 cxgbi_sock_reset_wr_list(csk
);
1891 pr_info_ipaddr("csk 0x%p,%u,0x%lx,%u,%u,%u, mtu %u,%u, smac %u.\n",
1892 (&csk
->saddr
), (&csk
->daddr
), csk
, csk
->state
,
1893 csk
->flags
, csk
->tx_chan
, csk
->txq_idx
, csk
->rss_qid
,
1894 csk
->mtu
, csk
->mss_idx
, csk
->smac_idx
);
1896 /* must wait for either a act_open_rpl or act_open_establish */
1897 if (!try_module_get(cdev
->owner
)) {
1898 pr_err("%s, try_module_get failed.\n", ndev
->name
);
1902 cxgbi_sock_set_state(csk
, CTP_ACTIVE_OPEN
);
1903 if (csk
->csk_family
== AF_INET
)
1904 send_act_open_req(csk
, skb
, csk
->l2t
);
1905 #if IS_ENABLED(CONFIG_IPV6)
1907 send_act_open_req6(csk
, skb
, csk
->l2t
);
1914 #if IS_ENABLED(CONFIG_IPV6)
1915 if (csk
->csk_family
== AF_INET6
)
1916 cxgb4_clip_release(ndev
,
1917 (const u32
*)&csk
->saddr6
.sin6_addr
, 1);
1919 rel_resource_without_clip
:
1927 static cxgb4i_cplhandler_func cxgb4i_cplhandlers
[NUM_CPL_CMDS
] = {
1928 [CPL_ACT_ESTABLISH
] = do_act_establish
,
1929 [CPL_ACT_OPEN_RPL
] = do_act_open_rpl
,
1930 [CPL_PEER_CLOSE
] = do_peer_close
,
1931 [CPL_ABORT_REQ_RSS
] = do_abort_req_rss
,
1932 [CPL_ABORT_RPL_RSS
] = do_abort_rpl_rss
,
1933 [CPL_CLOSE_CON_RPL
] = do_close_con_rpl
,
1934 [CPL_FW4_ACK
] = do_fw4_ack
,
1935 [CPL_ISCSI_HDR
] = do_rx_iscsi_hdr
,
1936 [CPL_ISCSI_DATA
] = do_rx_iscsi_data
,
1937 [CPL_SET_TCB_RPL
] = do_set_tcb_rpl
,
1938 [CPL_RX_DATA_DDP
] = do_rx_data_ddp
,
1939 [CPL_RX_ISCSI_DDP
] = do_rx_data_ddp
,
1940 [CPL_RX_ISCSI_CMP
] = do_rx_iscsi_cmp
,
1941 [CPL_RX_DATA
] = do_rx_data
,
1944 static int cxgb4i_ofld_init(struct cxgbi_device
*cdev
)
1948 if (cxgb4i_max_connect
> CXGB4I_MAX_CONN
)
1949 cxgb4i_max_connect
= CXGB4I_MAX_CONN
;
1951 rc
= cxgbi_device_portmap_create(cdev
, cxgb4i_sport_base
,
1952 cxgb4i_max_connect
);
1956 cdev
->csk_release_offload_resources
= release_offload_resources
;
1957 cdev
->csk_push_tx_frames
= push_tx_frames
;
1958 cdev
->csk_send_abort_req
= send_abort_req
;
1959 cdev
->csk_send_close_req
= send_close_req
;
1960 cdev
->csk_send_rx_credits
= send_rx_credits
;
1961 cdev
->csk_alloc_cpls
= alloc_cpls
;
1962 cdev
->csk_init_act_open
= init_act_open
;
1964 pr_info("cdev 0x%p, offload up, added.\n", cdev
);
1969 ulp_mem_io_set_hdr(struct cxgbi_device
*cdev
,
1970 struct ulp_mem_io
*req
,
1971 unsigned int wr_len
, unsigned int dlen
,
1972 unsigned int pm_addr
,
1975 struct cxgb4_lld_info
*lldi
= cxgbi_cdev_priv(cdev
);
1976 struct ulptx_idata
*idata
= (struct ulptx_idata
*)(req
+ 1);
1978 INIT_ULPTX_WR(req
, wr_len
, 0, tid
);
1979 req
->wr
.wr_hi
= htonl(FW_WR_OP_V(FW_ULPTX_WR
) |
1981 req
->cmd
= htonl(ULPTX_CMD_V(ULP_TX_MEM_WRITE
) |
1982 ULP_MEMIO_ORDER_V(is_t4(lldi
->adapter_type
)) |
1983 T5_ULP_MEMIO_IMM_V(!is_t4(lldi
->adapter_type
)));
1984 req
->dlen
= htonl(ULP_MEMIO_DATA_LEN_V(dlen
>> 5));
1985 req
->lock_addr
= htonl(ULP_MEMIO_ADDR_V(pm_addr
>> 5));
1986 req
->len16
= htonl(DIV_ROUND_UP(wr_len
- sizeof(req
->wr
), 16));
1988 idata
->cmd_more
= htonl(ULPTX_CMD_V(ULP_TX_SC_IMM
));
1989 idata
->len
= htonl(dlen
);
1992 static struct sk_buff
*
1993 ddp_ppod_init_idata(struct cxgbi_device
*cdev
,
1994 struct cxgbi_ppm
*ppm
,
1995 unsigned int idx
, unsigned int npods
,
1998 unsigned int pm_addr
= (idx
<< PPOD_SIZE_SHIFT
) + ppm
->llimit
;
1999 unsigned int dlen
= npods
<< PPOD_SIZE_SHIFT
;
2000 unsigned int wr_len
= roundup(sizeof(struct ulp_mem_io
) +
2001 sizeof(struct ulptx_idata
) + dlen
, 16);
2002 struct sk_buff
*skb
= alloc_wr(wr_len
, 0, GFP_ATOMIC
);
2005 pr_err("%s: %s idx %u, npods %u, OOM.\n",
2006 __func__
, ppm
->ndev
->name
, idx
, npods
);
2010 ulp_mem_io_set_hdr(cdev
, (struct ulp_mem_io
*)skb
->head
, wr_len
, dlen
,
2016 static int ddp_ppod_write_idata(struct cxgbi_ppm
*ppm
, struct cxgbi_sock
*csk
,
2017 struct cxgbi_task_tag_info
*ttinfo
,
2018 unsigned int idx
, unsigned int npods
,
2019 struct scatterlist
**sg_pp
,
2020 unsigned int *sg_off
)
2022 struct cxgbi_device
*cdev
= csk
->cdev
;
2023 struct sk_buff
*skb
= ddp_ppod_init_idata(cdev
, ppm
, idx
, npods
,
2025 struct ulp_mem_io
*req
;
2026 struct ulptx_idata
*idata
;
2027 struct cxgbi_pagepod
*ppod
;
2033 req
= (struct ulp_mem_io
*)skb
->head
;
2034 idata
= (struct ulptx_idata
*)(req
+ 1);
2035 ppod
= (struct cxgbi_pagepod
*)(idata
+ 1);
2037 for (i
= 0; i
< npods
; i
++, ppod
++)
2038 cxgbi_ddp_set_one_ppod(ppod
, ttinfo
, sg_pp
, sg_off
);
2040 cxgbi_skcb_set_flag(skb
, SKCBF_TX_MEM_WRITE
);
2041 cxgbi_skcb_set_flag(skb
, SKCBF_TX_FLAG_COMPL
);
2042 set_wr_txq(skb
, CPL_PRIORITY_DATA
, csk
->port_id
);
2044 spin_lock_bh(&csk
->lock
);
2045 cxgbi_sock_skb_entail(csk
, skb
);
2046 spin_unlock_bh(&csk
->lock
);
2051 static int ddp_set_map(struct cxgbi_ppm
*ppm
, struct cxgbi_sock
*csk
,
2052 struct cxgbi_task_tag_info
*ttinfo
)
2054 unsigned int pidx
= ttinfo
->idx
;
2055 unsigned int npods
= ttinfo
->npods
;
2056 unsigned int i
, cnt
;
2058 struct scatterlist
*sg
= ttinfo
->sgl
;
2059 unsigned int offset
= 0;
2061 ttinfo
->cid
= csk
->port_id
;
2063 for (i
= 0; i
< npods
; i
+= cnt
, pidx
+= cnt
) {
2066 if (cnt
> ULPMEM_IDATA_MAX_NPPODS
)
2067 cnt
= ULPMEM_IDATA_MAX_NPPODS
;
2068 err
= ddp_ppod_write_idata(ppm
, csk
, ttinfo
, pidx
, cnt
,
2077 static int ddp_setup_conn_pgidx(struct cxgbi_sock
*csk
, unsigned int tid
,
2080 struct sk_buff
*skb
;
2081 struct cpl_set_tcb_field
*req
;
2083 if (!pg_idx
|| pg_idx
>= DDP_PGIDX_MAX
)
2086 skb
= alloc_wr(sizeof(*req
), 0, GFP_KERNEL
);
2090 /* set up ulp page size */
2091 req
= (struct cpl_set_tcb_field
*)skb
->head
;
2092 INIT_TP_WR(req
, csk
->tid
);
2093 OPCODE_TID(req
) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD
, csk
->tid
));
2094 req
->reply_ctrl
= htons(NO_REPLY_V(0) | QUEUENO_V(csk
->rss_qid
));
2095 req
->word_cookie
= htons(0);
2096 req
->mask
= cpu_to_be64(0x3 << 8);
2097 req
->val
= cpu_to_be64(pg_idx
<< 8);
2098 set_wr_txq(skb
, CPL_PRIORITY_CONTROL
, csk
->port_id
);
2100 log_debug(1 << CXGBI_DBG_TOE
| 1 << CXGBI_DBG_SOCK
,
2101 "csk 0x%p, tid 0x%x, pg_idx %u.\n", csk
, csk
->tid
, pg_idx
);
2103 reinit_completion(&csk
->cmpl
);
2104 cxgb4_ofld_send(csk
->cdev
->ports
[csk
->port_id
], skb
);
2105 wait_for_completion(&csk
->cmpl
);
2110 static int ddp_setup_conn_digest(struct cxgbi_sock
*csk
, unsigned int tid
,
2113 struct sk_buff
*skb
;
2114 struct cpl_set_tcb_field
*req
;
2119 skb
= alloc_wr(sizeof(*req
), 0, GFP_KERNEL
);
2123 csk
->hcrc_len
= (hcrc
? 4 : 0);
2124 csk
->dcrc_len
= (dcrc
? 4 : 0);
2125 /* set up ulp submode */
2126 req
= (struct cpl_set_tcb_field
*)skb
->head
;
2127 INIT_TP_WR(req
, tid
);
2128 OPCODE_TID(req
) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD
, tid
));
2129 req
->reply_ctrl
= htons(NO_REPLY_V(0) | QUEUENO_V(csk
->rss_qid
));
2130 req
->word_cookie
= htons(0);
2131 req
->mask
= cpu_to_be64(0x3 << 4);
2132 req
->val
= cpu_to_be64(((hcrc
? ULP_CRC_HEADER
: 0) |
2133 (dcrc
? ULP_CRC_DATA
: 0)) << 4);
2134 set_wr_txq(skb
, CPL_PRIORITY_CONTROL
, csk
->port_id
);
2136 log_debug(1 << CXGBI_DBG_TOE
| 1 << CXGBI_DBG_SOCK
,
2137 "csk 0x%p, tid 0x%x, crc %d,%d.\n", csk
, csk
->tid
, hcrc
, dcrc
);
2139 reinit_completion(&csk
->cmpl
);
2140 cxgb4_ofld_send(csk
->cdev
->ports
[csk
->port_id
], skb
);
2141 wait_for_completion(&csk
->cmpl
);
2146 static struct cxgbi_ppm
*cdev2ppm(struct cxgbi_device
*cdev
)
2148 return (struct cxgbi_ppm
*)(*((struct cxgb4_lld_info
*)
2149 (cxgbi_cdev_priv(cdev
)))->iscsi_ppm
);
2152 static int cxgb4i_ddp_init(struct cxgbi_device
*cdev
)
2154 struct cxgb4_lld_info
*lldi
= cxgbi_cdev_priv(cdev
);
2155 struct net_device
*ndev
= cdev
->ports
[0];
2156 struct cxgbi_tag_format tformat
;
2159 if (!lldi
->vr
->iscsi
.size
) {
2160 pr_warn("%s, iscsi NOT enabled, check config!\n", ndev
->name
);
2164 cdev
->flags
|= CXGBI_FLAG_USE_PPOD_OFLDQ
;
2166 memset(&tformat
, 0, sizeof(struct cxgbi_tag_format
));
2167 for (i
= 0; i
< 4; i
++)
2168 tformat
.pgsz_order
[i
] = (lldi
->iscsi_pgsz_order
>> (i
<< 3))
2170 cxgbi_tagmask_check(lldi
->iscsi_tagmask
, &tformat
);
2172 pr_info("iscsi_edram.start 0x%x iscsi_edram.size 0x%x",
2173 lldi
->vr
->ppod_edram
.start
, lldi
->vr
->ppod_edram
.size
);
2175 err
= cxgbi_ddp_ppm_setup(lldi
->iscsi_ppm
, cdev
, &tformat
,
2176 lldi
->vr
->iscsi
.size
, lldi
->iscsi_llimit
,
2177 lldi
->vr
->iscsi
.start
, 2,
2178 lldi
->vr
->ppod_edram
.start
,
2179 lldi
->vr
->ppod_edram
.size
);
2184 cdev
->csk_ddp_setup_digest
= ddp_setup_conn_digest
;
2185 cdev
->csk_ddp_setup_pgidx
= ddp_setup_conn_pgidx
;
2186 cdev
->csk_ddp_set_map
= ddp_set_map
;
2187 cdev
->tx_max_size
= min_t(unsigned int, ULP2_MAX_PDU_PAYLOAD
,
2188 lldi
->iscsi_iolen
- ISCSI_PDU_NONPAYLOAD_LEN
);
2189 cdev
->rx_max_size
= min_t(unsigned int, ULP2_MAX_PDU_PAYLOAD
,
2190 lldi
->iscsi_iolen
- ISCSI_PDU_NONPAYLOAD_LEN
);
2191 cdev
->cdev2ppm
= cdev2ppm
;
2196 static bool is_memfree(struct adapter
*adap
)
2200 io
= t4_read_reg(adap
, MA_TARGET_MEM_ENABLE_A
);
2201 if (is_t5(adap
->params
.chip
)) {
2202 if ((io
& EXT_MEM0_ENABLE_F
) || (io
& EXT_MEM1_ENABLE_F
))
2204 } else if (io
& EXT_MEM_ENABLE_F
) {
2211 static void *t4_uld_add(const struct cxgb4_lld_info
*lldi
)
2213 struct cxgbi_device
*cdev
;
2214 struct port_info
*pi
;
2215 struct net_device
*ndev
;
2216 struct adapter
*adap
;
2218 u32 max_cmds
= CXGB4I_SCSI_HOST_QDEPTH
;
2219 u32 max_conn
= CXGBI_MAX_CONN
;
2222 cdev
= cxgbi_device_register(sizeof(*lldi
), lldi
->nports
);
2224 pr_info("t4 device 0x%p, register failed.\n", lldi
);
2227 pr_info("0x%p,0x%x, ports %u,%s, chan %u, q %u,%u, wr %u.\n",
2228 cdev
, lldi
->adapter_type
, lldi
->nports
,
2229 lldi
->ports
[0]->name
, lldi
->nchan
, lldi
->ntxq
,
2230 lldi
->nrxq
, lldi
->wr_cred
);
2231 for (i
= 0; i
< lldi
->nrxq
; i
++)
2232 log_debug(1 << CXGBI_DBG_DEV
,
2233 "t4 0x%p, rxq id #%d: %u.\n",
2234 cdev
, i
, lldi
->rxq_ids
[i
]);
2236 memcpy(cxgbi_cdev_priv(cdev
), lldi
, sizeof(*lldi
));
2237 cdev
->flags
= CXGBI_FLAG_DEV_T4
;
2238 cdev
->pdev
= lldi
->pdev
;
2239 cdev
->ports
= lldi
->ports
;
2240 cdev
->nports
= lldi
->nports
;
2241 cdev
->mtus
= lldi
->mtus
;
2242 cdev
->nmtus
= NMTUS
;
2243 cdev
->rx_credit_thres
= (CHELSIO_CHIP_VERSION(lldi
->adapter_type
) <=
2244 CHELSIO_T5
) ? cxgb4i_rx_credit_thres
: 0;
2245 cdev
->skb_tx_rsvd
= CXGB4I_TX_HEADER_LEN
;
2246 cdev
->skb_rx_extra
= sizeof(struct cpl_iscsi_hdr
);
2247 cdev
->itp
= &cxgb4i_iscsi_transport
;
2248 cdev
->owner
= THIS_MODULE
;
2250 cdev
->pfvf
= FW_PFVF_CMD_PFN_V(lldi
->pf
);
2251 pr_info("cdev 0x%p,%s, pfvf %u.\n",
2252 cdev
, lldi
->ports
[0]->name
, cdev
->pfvf
);
2254 rc
= cxgb4i_ddp_init(cdev
);
2256 pr_info("t4 0x%p ddp init failed %d.\n", cdev
, rc
);
2260 ndev
= cdev
->ports
[0];
2261 adap
= netdev2adap(ndev
);
2264 if (t
->ntids
<= CXGBI_MAX_CONN
)
2265 max_conn
= t
->ntids
;
2267 if (is_memfree(adap
)) {
2268 cdev
->flags
|= CXGBI_FLAG_DEV_ISO_OFF
;
2269 max_cmds
= CXGB4I_SCSI_HOST_QDEPTH
>> 2;
2271 pr_info("%s: 0x%p, tid %u, SO adapter.\n",
2272 ndev
->name
, cdev
, t
->ntids
);
2275 pr_info("%s, 0x%p, NO adapter struct.\n", ndev
->name
, cdev
);
2278 /* ISO is enabled in T5/T6 firmware version >= 1.13.43.0 */
2279 if (!is_t4(lldi
->adapter_type
) &&
2280 (lldi
->fw_vers
>= 0x10d2b00) &&
2281 !(cdev
->flags
& CXGBI_FLAG_DEV_ISO_OFF
))
2282 cdev
->skb_iso_txhdr
= sizeof(struct cpl_tx_data_iso
);
2284 rc
= cxgb4i_ofld_init(cdev
);
2286 pr_info("t4 0x%p ofld init failed.\n", cdev
);
2290 cxgb4i_host_template
.can_queue
= max_cmds
;
2291 rc
= cxgbi_hbas_add(cdev
, CXGB4I_MAX_LUN
, max_conn
,
2292 &cxgb4i_host_template
, cxgb4i_stt
);
2296 for (i
= 0; i
< cdev
->nports
; i
++) {
2297 pi
= netdev_priv(lldi
->ports
[i
]);
2298 cdev
->hbas
[i
]->port_id
= pi
->port_id
;
2303 cxgbi_device_unregister(cdev
);
2304 return ERR_PTR(-ENOMEM
);
2307 #define RX_PULL_LEN 128
2308 static int t4_uld_rx_handler(void *handle
, const __be64
*rsp
,
2309 const struct pkt_gl
*pgl
)
2311 const struct cpl_act_establish
*rpl
;
2312 struct sk_buff
*skb
;
2314 struct cxgbi_device
*cdev
= handle
;
2317 unsigned int len
= 64 - sizeof(struct rsp_ctrl
) - 8;
2319 skb
= alloc_wr(len
, 0, GFP_ATOMIC
);
2322 skb_copy_to_linear_data(skb
, &rsp
[1], len
);
2324 if (unlikely(*(u8
*)rsp
!= *(u8
*)pgl
->va
)) {
2325 pr_info("? FL 0x%p,RSS%#llx,FL %#llx,len %u.\n",
2326 pgl
->va
, be64_to_cpu(*rsp
),
2327 be64_to_cpu(*(u64
*)pgl
->va
),
2331 skb
= cxgb4_pktgl_to_skb(pgl
, RX_PULL_LEN
, RX_PULL_LEN
);
2336 rpl
= (struct cpl_act_establish
*)skb
->data
;
2337 opc
= rpl
->ot
.opcode
;
2338 log_debug(1 << CXGBI_DBG_TOE
,
2339 "cdev %p, opcode 0x%x(0x%x,0x%x), skb %p.\n",
2340 cdev
, opc
, rpl
->ot
.opcode_tid
, ntohl(rpl
->ot
.opcode_tid
), skb
);
2341 if (opc
>= ARRAY_SIZE(cxgb4i_cplhandlers
) || !cxgb4i_cplhandlers
[opc
]) {
2342 pr_err("No handler for opcode 0x%x.\n", opc
);
2345 cxgb4i_cplhandlers
[opc
](cdev
, skb
);
2349 log_debug(1 << CXGBI_DBG_TOE
, "OOM bailing out.\n");
2353 static int t4_uld_state_change(void *handle
, enum cxgb4_state state
)
2355 struct cxgbi_device
*cdev
= handle
;
2358 case CXGB4_STATE_UP
:
2359 pr_info("cdev 0x%p, UP.\n", cdev
);
2361 case CXGB4_STATE_START_RECOVERY
:
2362 pr_info("cdev 0x%p, RECOVERY.\n", cdev
);
2363 /* close all connections */
2365 case CXGB4_STATE_DOWN
:
2366 pr_info("cdev 0x%p, DOWN.\n", cdev
);
2368 case CXGB4_STATE_DETACH
:
2369 pr_info("cdev 0x%p, DETACH.\n", cdev
);
2370 cxgbi_device_unregister(cdev
);
2373 pr_info("cdev 0x%p, unknown state %d.\n", cdev
, state
);
2379 #ifdef CONFIG_CHELSIO_T4_DCB
2381 cxgb4_dcb_change_notify(struct notifier_block
*self
, unsigned long val
,
2385 struct net_device
*ndev
;
2386 struct cxgbi_device
*cdev
= NULL
;
2387 struct dcb_app_type
*iscsi_app
= data
;
2388 struct cxgbi_ports_map
*pmap
;
2391 if (iscsi_app
->dcbx
& DCB_CAP_DCBX_VER_IEEE
) {
2392 if ((iscsi_app
->app
.selector
!= IEEE_8021QAZ_APP_SEL_STREAM
) &&
2393 (iscsi_app
->app
.selector
!= IEEE_8021QAZ_APP_SEL_ANY
))
2396 priority
= iscsi_app
->app
.priority
;
2397 } else if (iscsi_app
->dcbx
& DCB_CAP_DCBX_VER_CEE
) {
2398 if (iscsi_app
->app
.selector
!= DCB_APP_IDTYPE_PORTNUM
)
2401 if (!iscsi_app
->app
.priority
)
2404 priority
= ffs(iscsi_app
->app
.priority
) - 1;
2409 if (iscsi_app
->app
.protocol
!= 3260)
2412 log_debug(1 << CXGBI_DBG_ISCSI
, "iSCSI priority for ifid %d is %u\n",
2413 iscsi_app
->ifindex
, priority
);
2415 ndev
= dev_get_by_index(&init_net
, iscsi_app
->ifindex
);
2419 cdev
= cxgbi_device_find_by_netdev_rcu(ndev
, &port
);
2427 for (i
= 0; i
< pmap
->used
; i
++) {
2428 if (pmap
->port_csk
[i
]) {
2429 struct cxgbi_sock
*csk
= pmap
->port_csk
[i
];
2431 if (csk
->dcb_priority
!= priority
) {
2432 iscsi_conn_failure(csk
->user_data
,
2433 ISCSI_ERR_CONN_FAILED
);
2434 pr_info("Restarting iSCSI connection %p with "
2435 "priority %u->%u.\n", csk
,
2436 csk
->dcb_priority
, priority
);
2444 static int __init
cxgb4i_init_module(void)
2448 printk(KERN_INFO
"%s", version
);
2450 rc
= cxgbi_iscsi_init(&cxgb4i_iscsi_transport
, &cxgb4i_stt
);
2453 cxgb4_register_uld(CXGB4_ULD_ISCSI
, &cxgb4i_uld_info
);
2455 #ifdef CONFIG_CHELSIO_T4_DCB
2456 pr_info("%s dcb enabled.\n", DRV_MODULE_NAME
);
2457 register_dcbevent_notifier(&cxgb4_dcb_change
);
2462 static void __exit
cxgb4i_exit_module(void)
2464 #ifdef CONFIG_CHELSIO_T4_DCB
2465 unregister_dcbevent_notifier(&cxgb4_dcb_change
);
2467 cxgb4_unregister_uld(CXGB4_ULD_ISCSI
);
2468 cxgbi_device_unregister_all(CXGBI_FLAG_DEV_T4
);
2469 cxgbi_iscsi_cleanup(&cxgb4i_iscsi_transport
, &cxgb4i_stt
);
2472 module_init(cxgb4i_init_module
);
2473 module_exit(cxgb4i_exit_module
);