2 * cxgb4i.c: Chelsio T4 iSCSI driver.
4 * Copyright (c) 2010-2015 Chelsio Communications, Inc.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation.
10 * Written by: Karen Xie (kxie@chelsio.com)
11 * Rakesh Ranjan (rranjan@chelsio.com)
14 #define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
16 #include <linux/module.h>
17 #include <linux/moduleparam.h>
18 #include <scsi/scsi_host.h>
21 #include <linux/netdevice.h>
22 #include <net/addrconf.h>
27 #include "cxgb4_uld.h"
33 static unsigned int dbg_level
;
35 #include "../libcxgbi.h"
37 #define DRV_MODULE_NAME "cxgb4i"
38 #define DRV_MODULE_DESC "Chelsio T4/T5 iSCSI Driver"
39 #define DRV_MODULE_VERSION "0.9.5-ko"
40 #define DRV_MODULE_RELDATE "Apr. 2015"
42 static char version
[] =
43 DRV_MODULE_DESC
" " DRV_MODULE_NAME
44 " v" DRV_MODULE_VERSION
" (" DRV_MODULE_RELDATE
")\n";
46 MODULE_AUTHOR("Chelsio Communications, Inc.");
47 MODULE_DESCRIPTION(DRV_MODULE_DESC
);
48 MODULE_VERSION(DRV_MODULE_VERSION
);
49 MODULE_LICENSE("GPL");
51 module_param(dbg_level
, uint
, 0644);
52 MODULE_PARM_DESC(dbg_level
, "Debug flag (default=0)");
54 #define CXGB4I_DEFAULT_10G_RCV_WIN (256 * 1024)
55 static int cxgb4i_rcv_win
= -1;
56 module_param(cxgb4i_rcv_win
, int, 0644);
57 MODULE_PARM_DESC(cxgb4i_rcv_win
, "TCP reveive window in bytes");
59 #define CXGB4I_DEFAULT_10G_SND_WIN (128 * 1024)
60 static int cxgb4i_snd_win
= -1;
61 module_param(cxgb4i_snd_win
, int, 0644);
62 MODULE_PARM_DESC(cxgb4i_snd_win
, "TCP send window in bytes");
64 static int cxgb4i_rx_credit_thres
= 10 * 1024;
65 module_param(cxgb4i_rx_credit_thres
, int, 0644);
66 MODULE_PARM_DESC(cxgb4i_rx_credit_thres
,
67 "RX credits return threshold in bytes (default=10KB)");
69 static unsigned int cxgb4i_max_connect
= (8 * 1024);
70 module_param(cxgb4i_max_connect
, uint
, 0644);
71 MODULE_PARM_DESC(cxgb4i_max_connect
, "Maximum number of connections");
73 static unsigned short cxgb4i_sport_base
= 20000;
74 module_param(cxgb4i_sport_base
, ushort
, 0644);
75 MODULE_PARM_DESC(cxgb4i_sport_base
, "Starting port number (default 20000)");
77 typedef void (*cxgb4i_cplhandler_func
)(struct cxgbi_device
*, struct sk_buff
*);
79 static void *t4_uld_add(const struct cxgb4_lld_info
*);
80 static int t4_uld_rx_handler(void *, const __be64
*, const struct pkt_gl
*);
81 static int t4_uld_state_change(void *, enum cxgb4_state state
);
82 static inline int send_tx_flowc_wr(struct cxgbi_sock
*);
84 static const struct cxgb4_uld_info cxgb4i_uld_info
= {
85 .name
= DRV_MODULE_NAME
,
87 .rx_handler
= t4_uld_rx_handler
,
88 .state_change
= t4_uld_state_change
,
91 static struct scsi_host_template cxgb4i_host_template
= {
92 .module
= THIS_MODULE
,
93 .name
= DRV_MODULE_NAME
,
94 .proc_name
= DRV_MODULE_NAME
,
95 .can_queue
= CXGB4I_SCSI_HOST_QDEPTH
,
96 .queuecommand
= iscsi_queuecommand
,
97 .change_queue_depth
= scsi_change_queue_depth
,
98 .sg_tablesize
= SG_ALL
,
99 .max_sectors
= 0xFFFF,
100 .cmd_per_lun
= ISCSI_DEF_CMD_PER_LUN
,
101 .eh_abort_handler
= iscsi_eh_abort
,
102 .eh_device_reset_handler
= iscsi_eh_device_reset
,
103 .eh_target_reset_handler
= iscsi_eh_recover_target
,
104 .target_alloc
= iscsi_target_alloc
,
105 .use_clustering
= DISABLE_CLUSTERING
,
107 .track_queue_depth
= 1,
110 static struct iscsi_transport cxgb4i_iscsi_transport
= {
111 .owner
= THIS_MODULE
,
112 .name
= DRV_MODULE_NAME
,
113 .caps
= CAP_RECOVERY_L0
| CAP_MULTI_R2T
| CAP_HDRDGST
|
114 CAP_DATADGST
| CAP_DIGEST_OFFLOAD
|
115 CAP_PADDING_OFFLOAD
| CAP_TEXT_NEGO
,
116 .attr_is_visible
= cxgbi_attr_is_visible
,
117 .get_host_param
= cxgbi_get_host_param
,
118 .set_host_param
= cxgbi_set_host_param
,
119 /* session management */
120 .create_session
= cxgbi_create_session
,
121 .destroy_session
= cxgbi_destroy_session
,
122 .get_session_param
= iscsi_session_get_param
,
123 /* connection management */
124 .create_conn
= cxgbi_create_conn
,
125 .bind_conn
= cxgbi_bind_conn
,
126 .destroy_conn
= iscsi_tcp_conn_teardown
,
127 .start_conn
= iscsi_conn_start
,
128 .stop_conn
= iscsi_conn_stop
,
129 .get_conn_param
= iscsi_conn_get_param
,
130 .set_param
= cxgbi_set_conn_param
,
131 .get_stats
= cxgbi_get_conn_stats
,
132 /* pdu xmit req from user space */
133 .send_pdu
= iscsi_conn_send_pdu
,
135 .init_task
= iscsi_tcp_task_init
,
136 .xmit_task
= iscsi_tcp_task_xmit
,
137 .cleanup_task
= cxgbi_cleanup_task
,
139 .alloc_pdu
= cxgbi_conn_alloc_pdu
,
140 .init_pdu
= cxgbi_conn_init_pdu
,
141 .xmit_pdu
= cxgbi_conn_xmit_pdu
,
142 .parse_pdu_itt
= cxgbi_parse_pdu_itt
,
143 /* TCP connect/disconnect */
144 .get_ep_param
= cxgbi_get_ep_param
,
145 .ep_connect
= cxgbi_ep_connect
,
146 .ep_poll
= cxgbi_ep_poll
,
147 .ep_disconnect
= cxgbi_ep_disconnect
,
148 /* Error recovery timeout call */
149 .session_recovery_timedout
= iscsi_session_recovery_timedout
,
152 static struct scsi_transport_template
*cxgb4i_stt
;
155 * CPL (Chelsio Protocol Language) defines a message passing interface between
156 * the host driver and Chelsio asic.
157 * The section below implments CPLs that related to iscsi tcp connection
158 * open/close/abort and data send/receive.
161 #define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
162 #define RCV_BUFSIZ_MASK 0x3FFU
163 #define MAX_IMM_TX_PKT_LEN 256
165 static int push_tx_frames(struct cxgbi_sock
*, int);
168 * is_ofld_imm - check whether a packet can be sent as immediate data
171 * Returns true if a packet can be sent as an offload WR with immediate
172 * data. We currently use the same limit as for Ethernet packets.
174 static inline bool is_ofld_imm(const struct sk_buff
*skb
)
178 if (likely(cxgbi_skcb_test_flag(skb
, SKCBF_TX_NEED_HDR
)))
179 len
+= sizeof(struct fw_ofld_tx_data_wr
);
181 return len
<= MAX_IMM_TX_PKT_LEN
;
184 static void send_act_open_req(struct cxgbi_sock
*csk
, struct sk_buff
*skb
,
187 struct cxgb4_lld_info
*lldi
= cxgbi_cdev_priv(csk
->cdev
);
188 int t4
= is_t4(lldi
->adapter_type
);
189 int wscale
= cxgbi_sock_compute_wscale(csk
->mss_idx
);
190 unsigned long long opt0
;
192 unsigned int qid_atid
= ((unsigned int)csk
->atid
) |
193 (((unsigned int)csk
->rss_qid
) << 14);
195 opt0
= KEEP_ALIVE_F
|
196 WND_SCALE_V(wscale
) |
197 MSS_IDX_V(csk
->mss_idx
) |
198 L2T_IDX_V(((struct l2t_entry
*)csk
->l2t
)->idx
) |
199 TX_CHAN_V(csk
->tx_chan
) |
200 SMAC_SEL_V(csk
->smac_idx
) |
201 ULP_MODE_V(ULP_MODE_ISCSI
) |
202 RCV_BUFSIZ_V(csk
->rcv_win
>> 10);
204 opt2
= RX_CHANNEL_V(0) |
206 RSS_QUEUE_V(csk
->rss_qid
);
208 if (is_t4(lldi
->adapter_type
)) {
209 struct cpl_act_open_req
*req
=
210 (struct cpl_act_open_req
*)skb
->head
;
213 OPCODE_TID(req
) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ
,
215 req
->local_port
= csk
->saddr
.sin_port
;
216 req
->peer_port
= csk
->daddr
.sin_port
;
217 req
->local_ip
= csk
->saddr
.sin_addr
.s_addr
;
218 req
->peer_ip
= csk
->daddr
.sin_addr
.s_addr
;
219 req
->opt0
= cpu_to_be64(opt0
);
220 req
->params
= cpu_to_be32(cxgb4_select_ntuple(
221 csk
->cdev
->ports
[csk
->port_id
],
223 opt2
|= RX_FC_VALID_F
;
224 req
->opt2
= cpu_to_be32(opt2
);
226 log_debug(1 << CXGBI_DBG_TOE
| 1 << CXGBI_DBG_SOCK
,
227 "csk t4 0x%p, %pI4:%u-%pI4:%u, atid %d, qid %u.\n",
228 csk
, &req
->local_ip
, ntohs(req
->local_port
),
229 &req
->peer_ip
, ntohs(req
->peer_port
),
230 csk
->atid
, csk
->rss_qid
);
232 struct cpl_t5_act_open_req
*req
=
233 (struct cpl_t5_act_open_req
*)skb
->head
;
234 u32 isn
= (prandom_u32() & ~7UL) - 1;
237 OPCODE_TID(req
) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ
,
239 req
->local_port
= csk
->saddr
.sin_port
;
240 req
->peer_port
= csk
->daddr
.sin_port
;
241 req
->local_ip
= csk
->saddr
.sin_addr
.s_addr
;
242 req
->peer_ip
= csk
->daddr
.sin_addr
.s_addr
;
243 req
->opt0
= cpu_to_be64(opt0
);
244 req
->params
= cpu_to_be64(FILTER_TUPLE_V(
246 csk
->cdev
->ports
[csk
->port_id
],
248 req
->rsvd
= cpu_to_be32(isn
);
249 opt2
|= T5_ISS_VALID
;
250 opt2
|= T5_OPT_2_VALID_F
;
252 req
->opt2
= cpu_to_be32(opt2
);
254 log_debug(1 << CXGBI_DBG_TOE
| 1 << CXGBI_DBG_SOCK
,
255 "csk t5 0x%p, %pI4:%u-%pI4:%u, atid %d, qid %u.\n",
256 csk
, &req
->local_ip
, ntohs(req
->local_port
),
257 &req
->peer_ip
, ntohs(req
->peer_port
),
258 csk
->atid
, csk
->rss_qid
);
261 set_wr_txq(skb
, CPL_PRIORITY_SETUP
, csk
->port_id
);
263 pr_info_ipaddr("t%d csk 0x%p,%u,0x%lx,%u, rss_qid %u.\n",
264 (&csk
->saddr
), (&csk
->daddr
), t4
? 4 : 5, csk
,
265 csk
->state
, csk
->flags
, csk
->atid
, csk
->rss_qid
);
267 cxgb4_l2t_send(csk
->cdev
->ports
[csk
->port_id
], skb
, csk
->l2t
);
270 #if IS_ENABLED(CONFIG_IPV6)
271 static void send_act_open_req6(struct cxgbi_sock
*csk
, struct sk_buff
*skb
,
274 struct cxgb4_lld_info
*lldi
= cxgbi_cdev_priv(csk
->cdev
);
275 int t4
= is_t4(lldi
->adapter_type
);
276 int wscale
= cxgbi_sock_compute_wscale(csk
->mss_idx
);
277 unsigned long long opt0
;
279 unsigned int qid_atid
= ((unsigned int)csk
->atid
) |
280 (((unsigned int)csk
->rss_qid
) << 14);
282 opt0
= KEEP_ALIVE_F
|
283 WND_SCALE_V(wscale
) |
284 MSS_IDX_V(csk
->mss_idx
) |
285 L2T_IDX_V(((struct l2t_entry
*)csk
->l2t
)->idx
) |
286 TX_CHAN_V(csk
->tx_chan
) |
287 SMAC_SEL_V(csk
->smac_idx
) |
288 ULP_MODE_V(ULP_MODE_ISCSI
) |
289 RCV_BUFSIZ_V(csk
->rcv_win
>> 10);
291 opt2
= RX_CHANNEL_V(0) |
294 RSS_QUEUE_V(csk
->rss_qid
);
297 struct cpl_act_open_req6
*req
=
298 (struct cpl_act_open_req6
*)skb
->head
;
301 OPCODE_TID(req
) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ6
,
303 req
->local_port
= csk
->saddr6
.sin6_port
;
304 req
->peer_port
= csk
->daddr6
.sin6_port
;
306 req
->local_ip_hi
= *(__be64
*)(csk
->saddr6
.sin6_addr
.s6_addr
);
307 req
->local_ip_lo
= *(__be64
*)(csk
->saddr6
.sin6_addr
.s6_addr
+
309 req
->peer_ip_hi
= *(__be64
*)(csk
->daddr6
.sin6_addr
.s6_addr
);
310 req
->peer_ip_lo
= *(__be64
*)(csk
->daddr6
.sin6_addr
.s6_addr
+
313 req
->opt0
= cpu_to_be64(opt0
);
315 opt2
|= RX_FC_VALID_F
;
316 req
->opt2
= cpu_to_be32(opt2
);
318 req
->params
= cpu_to_be32(cxgb4_select_ntuple(
319 csk
->cdev
->ports
[csk
->port_id
],
322 struct cpl_t5_act_open_req6
*req
=
323 (struct cpl_t5_act_open_req6
*)skb
->head
;
326 OPCODE_TID(req
) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ6
,
328 req
->local_port
= csk
->saddr6
.sin6_port
;
329 req
->peer_port
= csk
->daddr6
.sin6_port
;
330 req
->local_ip_hi
= *(__be64
*)(csk
->saddr6
.sin6_addr
.s6_addr
);
331 req
->local_ip_lo
= *(__be64
*)(csk
->saddr6
.sin6_addr
.s6_addr
+
333 req
->peer_ip_hi
= *(__be64
*)(csk
->daddr6
.sin6_addr
.s6_addr
);
334 req
->peer_ip_lo
= *(__be64
*)(csk
->daddr6
.sin6_addr
.s6_addr
+
336 req
->opt0
= cpu_to_be64(opt0
);
338 opt2
|= T5_OPT_2_VALID_F
;
339 req
->opt2
= cpu_to_be32(opt2
);
341 req
->params
= cpu_to_be64(FILTER_TUPLE_V(cxgb4_select_ntuple(
342 csk
->cdev
->ports
[csk
->port_id
],
346 set_wr_txq(skb
, CPL_PRIORITY_SETUP
, csk
->port_id
);
348 pr_info("t%d csk 0x%p,%u,0x%lx,%u, [%pI6]:%u-[%pI6]:%u, rss_qid %u.\n",
349 t4
? 4 : 5, csk
, csk
->state
, csk
->flags
, csk
->atid
,
350 &csk
->saddr6
.sin6_addr
, ntohs(csk
->saddr
.sin_port
),
351 &csk
->daddr6
.sin6_addr
, ntohs(csk
->daddr
.sin_port
),
354 cxgb4_l2t_send(csk
->cdev
->ports
[csk
->port_id
], skb
, csk
->l2t
);
358 static void send_close_req(struct cxgbi_sock
*csk
)
360 struct sk_buff
*skb
= csk
->cpl_close
;
361 struct cpl_close_con_req
*req
= (struct cpl_close_con_req
*)skb
->head
;
362 unsigned int tid
= csk
->tid
;
364 log_debug(1 << CXGBI_DBG_TOE
| 1 << CXGBI_DBG_SOCK
,
365 "csk 0x%p,%u,0x%lx, tid %u.\n",
366 csk
, csk
->state
, csk
->flags
, csk
->tid
);
367 csk
->cpl_close
= NULL
;
368 set_wr_txq(skb
, CPL_PRIORITY_DATA
, csk
->port_id
);
369 INIT_TP_WR(req
, tid
);
370 OPCODE_TID(req
) = cpu_to_be32(MK_OPCODE_TID(CPL_CLOSE_CON_REQ
, tid
));
373 cxgbi_sock_skb_entail(csk
, skb
);
374 if (csk
->state
>= CTP_ESTABLISHED
)
375 push_tx_frames(csk
, 1);
378 static void abort_arp_failure(void *handle
, struct sk_buff
*skb
)
380 struct cxgbi_sock
*csk
= (struct cxgbi_sock
*)handle
;
381 struct cpl_abort_req
*req
;
383 log_debug(1 << CXGBI_DBG_TOE
| 1 << CXGBI_DBG_SOCK
,
384 "csk 0x%p,%u,0x%lx, tid %u, abort.\n",
385 csk
, csk
->state
, csk
->flags
, csk
->tid
);
386 req
= (struct cpl_abort_req
*)skb
->data
;
387 req
->cmd
= CPL_ABORT_NO_RST
;
388 cxgb4_ofld_send(csk
->cdev
->ports
[csk
->port_id
], skb
);
391 static void send_abort_req(struct cxgbi_sock
*csk
)
393 struct cpl_abort_req
*req
;
394 struct sk_buff
*skb
= csk
->cpl_abort_req
;
396 if (unlikely(csk
->state
== CTP_ABORTING
) || !skb
|| !csk
->cdev
)
399 if (!cxgbi_sock_flag(csk
, CTPF_TX_DATA_SENT
)) {
400 send_tx_flowc_wr(csk
);
401 cxgbi_sock_set_flag(csk
, CTPF_TX_DATA_SENT
);
404 cxgbi_sock_set_state(csk
, CTP_ABORTING
);
405 cxgbi_sock_set_flag(csk
, CTPF_ABORT_RPL_PENDING
);
406 cxgbi_sock_purge_write_queue(csk
);
408 csk
->cpl_abort_req
= NULL
;
409 req
= (struct cpl_abort_req
*)skb
->head
;
410 set_wr_txq(skb
, CPL_PRIORITY_DATA
, csk
->port_id
);
411 req
->cmd
= CPL_ABORT_SEND_RST
;
412 t4_set_arp_err_handler(skb
, csk
, abort_arp_failure
);
413 INIT_TP_WR(req
, csk
->tid
);
414 OPCODE_TID(req
) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_REQ
, csk
->tid
));
415 req
->rsvd0
= htonl(csk
->snd_nxt
);
416 req
->rsvd1
= !cxgbi_sock_flag(csk
, CTPF_TX_DATA_SENT
);
418 log_debug(1 << CXGBI_DBG_TOE
| 1 << CXGBI_DBG_SOCK
,
419 "csk 0x%p,%u,0x%lx,%u, snd_nxt %u, 0x%x.\n",
420 csk
, csk
->state
, csk
->flags
, csk
->tid
, csk
->snd_nxt
,
423 cxgb4_l2t_send(csk
->cdev
->ports
[csk
->port_id
], skb
, csk
->l2t
);
426 static void send_abort_rpl(struct cxgbi_sock
*csk
, int rst_status
)
428 struct sk_buff
*skb
= csk
->cpl_abort_rpl
;
429 struct cpl_abort_rpl
*rpl
= (struct cpl_abort_rpl
*)skb
->head
;
431 log_debug(1 << CXGBI_DBG_TOE
| 1 << CXGBI_DBG_SOCK
,
432 "csk 0x%p,%u,0x%lx,%u, status %d.\n",
433 csk
, csk
->state
, csk
->flags
, csk
->tid
, rst_status
);
435 csk
->cpl_abort_rpl
= NULL
;
436 set_wr_txq(skb
, CPL_PRIORITY_DATA
, csk
->port_id
);
437 INIT_TP_WR(rpl
, csk
->tid
);
438 OPCODE_TID(rpl
) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_RPL
, csk
->tid
));
439 rpl
->cmd
= rst_status
;
440 cxgb4_ofld_send(csk
->cdev
->ports
[csk
->port_id
], skb
);
444 * CPL connection rx data ack: host ->
445 * Send RX credits through an RX_DATA_ACK CPL message. Returns the number of
448 static u32
send_rx_credits(struct cxgbi_sock
*csk
, u32 credits
)
451 struct cpl_rx_data_ack
*req
;
453 log_debug(1 << CXGBI_DBG_TOE
| 1 << CXGBI_DBG_PDU_RX
,
454 "csk 0x%p,%u,0x%lx,%u, credit %u.\n",
455 csk
, csk
->state
, csk
->flags
, csk
->tid
, credits
);
457 skb
= alloc_wr(sizeof(*req
), 0, GFP_ATOMIC
);
459 pr_info("csk 0x%p, credit %u, OOM.\n", csk
, credits
);
462 req
= (struct cpl_rx_data_ack
*)skb
->head
;
464 set_wr_txq(skb
, CPL_PRIORITY_ACK
, csk
->port_id
);
465 INIT_TP_WR(req
, csk
->tid
);
466 OPCODE_TID(req
) = cpu_to_be32(MK_OPCODE_TID(CPL_RX_DATA_ACK
,
468 req
->credit_dack
= cpu_to_be32(RX_CREDITS_V(credits
)
470 cxgb4_ofld_send(csk
->cdev
->ports
[csk
->port_id
], skb
);
475 * sgl_len - calculates the size of an SGL of the given capacity
476 * @n: the number of SGL entries
477 * Calculates the number of flits needed for a scatter/gather list that
478 * can hold the given number of entries.
480 static inline unsigned int sgl_len(unsigned int n
)
483 return (3 * n
) / 2 + (n
& 1) + 2;
487 * calc_tx_flits_ofld - calculate # of flits for an offload packet
490 * Returns the number of flits needed for the given offload packet.
491 * These packets are already fully constructed and no additional headers
494 static inline unsigned int calc_tx_flits_ofld(const struct sk_buff
*skb
)
496 unsigned int flits
, cnt
;
498 if (is_ofld_imm(skb
))
499 return DIV_ROUND_UP(skb
->len
, 8);
500 flits
= skb_transport_offset(skb
) / 8;
501 cnt
= skb_shinfo(skb
)->nr_frags
;
502 if (skb_tail_pointer(skb
) != skb_transport_header(skb
))
504 return flits
+ sgl_len(cnt
);
507 #define FLOWC_WR_NPARAMS_MIN 9
508 static inline int tx_flowc_wr_credits(int *nparamsp
, int *flowclenp
)
510 int nparams
, flowclen16
, flowclen
;
512 nparams
= FLOWC_WR_NPARAMS_MIN
;
513 flowclen
= offsetof(struct fw_flowc_wr
, mnemval
[nparams
]);
514 flowclen16
= DIV_ROUND_UP(flowclen
, 16);
515 flowclen
= flowclen16
* 16;
517 * Return the number of 16-byte credits used by the FlowC request.
518 * Pass back the nparams and actual FlowC length if requested.
523 *flowclenp
= flowclen
;
528 static inline int send_tx_flowc_wr(struct cxgbi_sock
*csk
)
531 struct fw_flowc_wr
*flowc
;
532 int nparams
, flowclen16
, flowclen
;
534 flowclen16
= tx_flowc_wr_credits(&nparams
, &flowclen
);
535 skb
= alloc_wr(flowclen
, 0, GFP_ATOMIC
);
536 flowc
= (struct fw_flowc_wr
*)skb
->head
;
537 flowc
->op_to_nparams
=
538 htonl(FW_WR_OP_V(FW_FLOWC_WR
) | FW_FLOWC_WR_NPARAMS_V(nparams
));
539 flowc
->flowid_len16
=
540 htonl(FW_WR_LEN16_V(flowclen16
) | FW_WR_FLOWID_V(csk
->tid
));
541 flowc
->mnemval
[0].mnemonic
= FW_FLOWC_MNEM_PFNVFN
;
542 flowc
->mnemval
[0].val
= htonl(csk
->cdev
->pfvf
);
543 flowc
->mnemval
[1].mnemonic
= FW_FLOWC_MNEM_CH
;
544 flowc
->mnemval
[1].val
= htonl(csk
->tx_chan
);
545 flowc
->mnemval
[2].mnemonic
= FW_FLOWC_MNEM_PORT
;
546 flowc
->mnemval
[2].val
= htonl(csk
->tx_chan
);
547 flowc
->mnemval
[3].mnemonic
= FW_FLOWC_MNEM_IQID
;
548 flowc
->mnemval
[3].val
= htonl(csk
->rss_qid
);
549 flowc
->mnemval
[4].mnemonic
= FW_FLOWC_MNEM_SNDNXT
;
550 flowc
->mnemval
[4].val
= htonl(csk
->snd_nxt
);
551 flowc
->mnemval
[5].mnemonic
= FW_FLOWC_MNEM_RCVNXT
;
552 flowc
->mnemval
[5].val
= htonl(csk
->rcv_nxt
);
553 flowc
->mnemval
[6].mnemonic
= FW_FLOWC_MNEM_SNDBUF
;
554 flowc
->mnemval
[6].val
= htonl(csk
->snd_win
);
555 flowc
->mnemval
[7].mnemonic
= FW_FLOWC_MNEM_MSS
;
556 flowc
->mnemval
[7].val
= htonl(csk
->advmss
);
557 flowc
->mnemval
[8].mnemonic
= 0;
558 flowc
->mnemval
[8].val
= 0;
559 flowc
->mnemval
[8].mnemonic
= FW_FLOWC_MNEM_TXDATAPLEN_MAX
;
560 flowc
->mnemval
[8].val
= 16384;
562 set_wr_txq(skb
, CPL_PRIORITY_DATA
, csk
->port_id
);
564 log_debug(1 << CXGBI_DBG_TOE
| 1 << CXGBI_DBG_SOCK
,
565 "csk 0x%p, tid 0x%x, %u,%u,%u,%u,%u,%u,%u.\n",
566 csk
, csk
->tid
, 0, csk
->tx_chan
, csk
->rss_qid
,
567 csk
->snd_nxt
, csk
->rcv_nxt
, csk
->snd_win
,
570 cxgb4_ofld_send(csk
->cdev
->ports
[csk
->port_id
], skb
);
575 static inline void make_tx_data_wr(struct cxgbi_sock
*csk
, struct sk_buff
*skb
,
576 int dlen
, int len
, u32 credits
, int compl)
578 struct fw_ofld_tx_data_wr
*req
;
579 unsigned int submode
= cxgbi_skcb_ulp_mode(skb
) & 3;
580 unsigned int wr_ulp_mode
= 0, val
;
581 bool imm
= is_ofld_imm(skb
);
583 req
= (struct fw_ofld_tx_data_wr
*)__skb_push(skb
, sizeof(*req
));
586 req
->op_to_immdlen
= htonl(FW_WR_OP_V(FW_OFLD_TX_DATA_WR
) |
588 FW_WR_IMMDLEN_V(dlen
));
589 req
->flowid_len16
= htonl(FW_WR_FLOWID_V(csk
->tid
) |
590 FW_WR_LEN16_V(credits
));
593 cpu_to_be32(FW_WR_OP_V(FW_OFLD_TX_DATA_WR
) |
597 cpu_to_be32(FW_WR_FLOWID_V(csk
->tid
) |
598 FW_WR_LEN16_V(credits
));
601 wr_ulp_mode
= FW_OFLD_TX_DATA_WR_ULPMODE_V(ULP2_MODE_ISCSI
) |
602 FW_OFLD_TX_DATA_WR_ULPSUBMODE_V(submode
);
603 val
= skb_peek(&csk
->write_queue
) ? 0 : 1;
604 req
->tunnel_to_proxy
= htonl(wr_ulp_mode
|
605 FW_OFLD_TX_DATA_WR_SHOVE_V(val
));
606 req
->plen
= htonl(len
);
607 if (!cxgbi_sock_flag(csk
, CTPF_TX_DATA_SENT
))
608 cxgbi_sock_set_flag(csk
, CTPF_TX_DATA_SENT
);
611 static void arp_failure_skb_discard(void *handle
, struct sk_buff
*skb
)
616 static int push_tx_frames(struct cxgbi_sock
*csk
, int req_completion
)
621 if (unlikely(csk
->state
< CTP_ESTABLISHED
||
622 csk
->state
== CTP_CLOSE_WAIT_1
|| csk
->state
>= CTP_ABORTING
)) {
623 log_debug(1 << CXGBI_DBG_TOE
| 1 << CXGBI_DBG_SOCK
|
624 1 << CXGBI_DBG_PDU_TX
,
625 "csk 0x%p,%u,0x%lx,%u, in closing state.\n",
626 csk
, csk
->state
, csk
->flags
, csk
->tid
);
630 while (csk
->wr_cred
&& (skb
= skb_peek(&csk
->write_queue
)) != NULL
) {
633 unsigned int credits_needed
;
636 skb_reset_transport_header(skb
);
637 if (is_ofld_imm(skb
))
638 credits_needed
= DIV_ROUND_UP(dlen
, 16);
640 credits_needed
= DIV_ROUND_UP(
641 8 * calc_tx_flits_ofld(skb
),
644 if (likely(cxgbi_skcb_test_flag(skb
, SKCBF_TX_NEED_HDR
)))
645 credits_needed
+= DIV_ROUND_UP(
646 sizeof(struct fw_ofld_tx_data_wr
),
650 * Assumes the initial credits is large enough to support
651 * fw_flowc_wr plus largest possible first payload
653 if (!cxgbi_sock_flag(csk
, CTPF_TX_DATA_SENT
)) {
654 flowclen16
= send_tx_flowc_wr(csk
);
655 csk
->wr_cred
-= flowclen16
;
656 csk
->wr_una_cred
+= flowclen16
;
657 cxgbi_sock_set_flag(csk
, CTPF_TX_DATA_SENT
);
660 if (csk
->wr_cred
< credits_needed
) {
661 log_debug(1 << CXGBI_DBG_PDU_TX
,
662 "csk 0x%p, skb %u/%u, wr %d < %u.\n",
663 csk
, skb
->len
, skb
->data_len
,
664 credits_needed
, csk
->wr_cred
);
667 __skb_unlink(skb
, &csk
->write_queue
);
668 set_wr_txq(skb
, CPL_PRIORITY_DATA
, csk
->port_id
);
669 skb
->csum
= credits_needed
+ flowclen16
;
670 csk
->wr_cred
-= credits_needed
;
671 csk
->wr_una_cred
+= credits_needed
;
672 cxgbi_sock_enqueue_wr(csk
, skb
);
674 log_debug(1 << CXGBI_DBG_PDU_TX
,
675 "csk 0x%p, skb %u/%u, wr %d, left %u, unack %u.\n",
676 csk
, skb
->len
, skb
->data_len
, credits_needed
,
677 csk
->wr_cred
, csk
->wr_una_cred
);
679 if (likely(cxgbi_skcb_test_flag(skb
, SKCBF_TX_NEED_HDR
))) {
680 len
+= cxgbi_ulp_extra_len(cxgbi_skcb_ulp_mode(skb
));
681 make_tx_data_wr(csk
, skb
, dlen
, len
, credits_needed
,
684 cxgbi_skcb_clear_flag(skb
, SKCBF_TX_NEED_HDR
);
686 total_size
+= skb
->truesize
;
687 t4_set_arp_err_handler(skb
, csk
, arp_failure_skb_discard
);
689 log_debug(1 << CXGBI_DBG_TOE
| 1 << CXGBI_DBG_PDU_TX
,
690 "csk 0x%p,%u,0x%lx,%u, skb 0x%p, %u.\n",
691 csk
, csk
->state
, csk
->flags
, csk
->tid
, skb
, len
);
693 cxgb4_l2t_send(csk
->cdev
->ports
[csk
->port_id
], skb
, csk
->l2t
);
698 static inline void free_atid(struct cxgbi_sock
*csk
)
700 struct cxgb4_lld_info
*lldi
= cxgbi_cdev_priv(csk
->cdev
);
702 if (cxgbi_sock_flag(csk
, CTPF_HAS_ATID
)) {
703 cxgb4_free_atid(lldi
->tids
, csk
->atid
);
704 cxgbi_sock_clear_flag(csk
, CTPF_HAS_ATID
);
709 static void do_act_establish(struct cxgbi_device
*cdev
, struct sk_buff
*skb
)
711 struct cxgbi_sock
*csk
;
712 struct cpl_act_establish
*req
= (struct cpl_act_establish
*)skb
->data
;
713 unsigned short tcp_opt
= ntohs(req
->tcp_opt
);
714 unsigned int tid
= GET_TID(req
);
715 unsigned int atid
= TID_TID_G(ntohl(req
->tos_atid
));
716 struct cxgb4_lld_info
*lldi
= cxgbi_cdev_priv(cdev
);
717 struct tid_info
*t
= lldi
->tids
;
718 u32 rcv_isn
= be32_to_cpu(req
->rcv_isn
);
720 csk
= lookup_atid(t
, atid
);
721 if (unlikely(!csk
)) {
722 pr_err("NO conn. for atid %u, cdev 0x%p.\n", atid
, cdev
);
726 if (csk
->atid
!= atid
) {
727 pr_err("bad conn atid %u, csk 0x%p,%u,0x%lx,tid %u, atid %u.\n",
728 atid
, csk
, csk
->state
, csk
->flags
, csk
->tid
, csk
->atid
);
732 pr_info_ipaddr("atid 0x%x, tid 0x%x, csk 0x%p,%u,0x%lx, isn %u.\n",
733 (&csk
->saddr
), (&csk
->daddr
),
734 atid
, tid
, csk
, csk
->state
, csk
->flags
, rcv_isn
);
736 module_put(THIS_MODULE
);
740 cxgb4_insert_tid(lldi
->tids
, csk
, tid
);
741 cxgbi_sock_set_flag(csk
, CTPF_HAS_TID
);
745 spin_lock_bh(&csk
->lock
);
746 if (unlikely(csk
->state
!= CTP_ACTIVE_OPEN
))
747 pr_info("csk 0x%p,%u,0x%lx,%u, got EST.\n",
748 csk
, csk
->state
, csk
->flags
, csk
->tid
);
750 if (csk
->retry_timer
.function
) {
751 del_timer(&csk
->retry_timer
);
752 csk
->retry_timer
.function
= NULL
;
755 csk
->copied_seq
= csk
->rcv_wup
= csk
->rcv_nxt
= rcv_isn
;
757 * Causes the first RX_DATA_ACK to supply any Rx credits we couldn't
760 if (csk
->rcv_win
> (RCV_BUFSIZ_MASK
<< 10))
761 csk
->rcv_wup
-= csk
->rcv_win
- (RCV_BUFSIZ_MASK
<< 10);
763 csk
->advmss
= lldi
->mtus
[TCPOPT_MSS_G(tcp_opt
)] - 40;
764 if (TCPOPT_TSTAMP_G(tcp_opt
))
766 if (csk
->advmss
< 128)
769 log_debug(1 << CXGBI_DBG_TOE
| 1 << CXGBI_DBG_SOCK
,
770 "csk 0x%p, mss_idx %u, advmss %u.\n",
771 csk
, TCPOPT_MSS_G(tcp_opt
), csk
->advmss
);
773 cxgbi_sock_established(csk
, ntohl(req
->snd_isn
), ntohs(req
->tcp_opt
));
775 if (unlikely(cxgbi_sock_flag(csk
, CTPF_ACTIVE_CLOSE_NEEDED
)))
778 if (skb_queue_len(&csk
->write_queue
))
779 push_tx_frames(csk
, 0);
780 cxgbi_conn_tx_open(csk
);
782 spin_unlock_bh(&csk
->lock
);
788 static int act_open_rpl_status_to_errno(int status
)
791 case CPL_ERR_CONN_RESET
:
792 return -ECONNREFUSED
;
793 case CPL_ERR_ARP_MISS
:
794 return -EHOSTUNREACH
;
795 case CPL_ERR_CONN_TIMEDOUT
:
797 case CPL_ERR_TCAM_FULL
:
799 case CPL_ERR_CONN_EXIST
:
806 static void csk_act_open_retry_timer(unsigned long data
)
808 struct sk_buff
*skb
= NULL
;
809 struct cxgbi_sock
*csk
= (struct cxgbi_sock
*)data
;
810 struct cxgb4_lld_info
*lldi
= cxgbi_cdev_priv(csk
->cdev
);
811 void (*send_act_open_func
)(struct cxgbi_sock
*, struct sk_buff
*,
813 int t4
= is_t4(lldi
->adapter_type
), size
, size6
;
815 log_debug(1 << CXGBI_DBG_TOE
| 1 << CXGBI_DBG_SOCK
,
816 "csk 0x%p,%u,0x%lx,%u.\n",
817 csk
, csk
->state
, csk
->flags
, csk
->tid
);
820 spin_lock_bh(&csk
->lock
);
823 size
= sizeof(struct cpl_act_open_req
);
824 size6
= sizeof(struct cpl_act_open_req6
);
826 size
= sizeof(struct cpl_t5_act_open_req
);
827 size6
= sizeof(struct cpl_t5_act_open_req6
);
830 if (csk
->csk_family
== AF_INET
) {
831 send_act_open_func
= send_act_open_req
;
832 skb
= alloc_wr(size
, 0, GFP_ATOMIC
);
833 #if IS_ENABLED(CONFIG_IPV6)
835 send_act_open_func
= send_act_open_req6
;
836 skb
= alloc_wr(size6
, 0, GFP_ATOMIC
);
841 cxgbi_sock_fail_act_open(csk
, -ENOMEM
);
843 skb
->sk
= (struct sock
*)csk
;
844 t4_set_arp_err_handler(skb
, csk
,
845 cxgbi_sock_act_open_req_arp_failure
);
846 send_act_open_func(csk
, skb
, csk
->l2t
);
849 spin_unlock_bh(&csk
->lock
);
854 static inline bool is_neg_adv(unsigned int status
)
856 return status
== CPL_ERR_RTX_NEG_ADVICE
||
857 status
== CPL_ERR_KEEPALV_NEG_ADVICE
||
858 status
== CPL_ERR_PERSIST_NEG_ADVICE
;
861 static void do_act_open_rpl(struct cxgbi_device
*cdev
, struct sk_buff
*skb
)
863 struct cxgbi_sock
*csk
;
864 struct cpl_act_open_rpl
*rpl
= (struct cpl_act_open_rpl
*)skb
->data
;
865 unsigned int tid
= GET_TID(rpl
);
867 TID_TID_G(AOPEN_ATID_G(be32_to_cpu(rpl
->atid_status
)));
868 unsigned int status
= AOPEN_STATUS_G(be32_to_cpu(rpl
->atid_status
));
869 struct cxgb4_lld_info
*lldi
= cxgbi_cdev_priv(cdev
);
870 struct tid_info
*t
= lldi
->tids
;
872 csk
= lookup_atid(t
, atid
);
873 if (unlikely(!csk
)) {
874 pr_err("NO matching conn. atid %u, tid %u.\n", atid
, tid
);
878 pr_info_ipaddr("tid %u/%u, status %u.\n"
879 "csk 0x%p,%u,0x%lx. ", (&csk
->saddr
), (&csk
->daddr
),
880 atid
, tid
, status
, csk
, csk
->state
, csk
->flags
);
882 if (is_neg_adv(status
))
885 module_put(THIS_MODULE
);
887 if (status
&& status
!= CPL_ERR_TCAM_FULL
&&
888 status
!= CPL_ERR_CONN_EXIST
&&
889 status
!= CPL_ERR_ARP_MISS
)
890 cxgb4_remove_tid(lldi
->tids
, csk
->port_id
, GET_TID(rpl
));
893 spin_lock_bh(&csk
->lock
);
895 if (status
== CPL_ERR_CONN_EXIST
&&
896 csk
->retry_timer
.function
!= csk_act_open_retry_timer
) {
897 csk
->retry_timer
.function
= csk_act_open_retry_timer
;
898 mod_timer(&csk
->retry_timer
, jiffies
+ HZ
/ 2);
900 cxgbi_sock_fail_act_open(csk
,
901 act_open_rpl_status_to_errno(status
));
903 spin_unlock_bh(&csk
->lock
);
909 static void do_peer_close(struct cxgbi_device
*cdev
, struct sk_buff
*skb
)
911 struct cxgbi_sock
*csk
;
912 struct cpl_peer_close
*req
= (struct cpl_peer_close
*)skb
->data
;
913 unsigned int tid
= GET_TID(req
);
914 struct cxgb4_lld_info
*lldi
= cxgbi_cdev_priv(cdev
);
915 struct tid_info
*t
= lldi
->tids
;
917 csk
= lookup_tid(t
, tid
);
918 if (unlikely(!csk
)) {
919 pr_err("can't find connection for tid %u.\n", tid
);
922 pr_info_ipaddr("csk 0x%p,%u,0x%lx,%u.\n",
923 (&csk
->saddr
), (&csk
->daddr
),
924 csk
, csk
->state
, csk
->flags
, csk
->tid
);
925 cxgbi_sock_rcv_peer_close(csk
);
930 static void do_close_con_rpl(struct cxgbi_device
*cdev
, struct sk_buff
*skb
)
932 struct cxgbi_sock
*csk
;
933 struct cpl_close_con_rpl
*rpl
= (struct cpl_close_con_rpl
*)skb
->data
;
934 unsigned int tid
= GET_TID(rpl
);
935 struct cxgb4_lld_info
*lldi
= cxgbi_cdev_priv(cdev
);
936 struct tid_info
*t
= lldi
->tids
;
938 csk
= lookup_tid(t
, tid
);
939 if (unlikely(!csk
)) {
940 pr_err("can't find connection for tid %u.\n", tid
);
943 pr_info_ipaddr("csk 0x%p,%u,0x%lx,%u.\n",
944 (&csk
->saddr
), (&csk
->daddr
),
945 csk
, csk
->state
, csk
->flags
, csk
->tid
);
946 cxgbi_sock_rcv_close_conn_rpl(csk
, ntohl(rpl
->snd_nxt
));
951 static int abort_status_to_errno(struct cxgbi_sock
*csk
, int abort_reason
,
954 switch (abort_reason
) {
955 case CPL_ERR_BAD_SYN
: /* fall through */
956 case CPL_ERR_CONN_RESET
:
957 return csk
->state
> CTP_ESTABLISHED
?
958 -EPIPE
: -ECONNRESET
;
959 case CPL_ERR_XMIT_TIMEDOUT
:
960 case CPL_ERR_PERSIST_TIMEDOUT
:
961 case CPL_ERR_FINWAIT2_TIMEDOUT
:
962 case CPL_ERR_KEEPALIVE_TIMEDOUT
:
969 static void do_abort_req_rss(struct cxgbi_device
*cdev
, struct sk_buff
*skb
)
971 struct cxgbi_sock
*csk
;
972 struct cpl_abort_req_rss
*req
= (struct cpl_abort_req_rss
*)skb
->data
;
973 unsigned int tid
= GET_TID(req
);
974 struct cxgb4_lld_info
*lldi
= cxgbi_cdev_priv(cdev
);
975 struct tid_info
*t
= lldi
->tids
;
976 int rst_status
= CPL_ABORT_NO_RST
;
978 csk
= lookup_tid(t
, tid
);
979 if (unlikely(!csk
)) {
980 pr_err("can't find connection for tid %u.\n", tid
);
984 pr_info_ipaddr("csk 0x%p,%u,0x%lx,%u, status %u.\n",
985 (&csk
->saddr
), (&csk
->daddr
),
986 csk
, csk
->state
, csk
->flags
, csk
->tid
, req
->status
);
988 if (is_neg_adv(req
->status
))
992 spin_lock_bh(&csk
->lock
);
994 cxgbi_sock_clear_flag(csk
, CTPF_ABORT_REQ_RCVD
);
996 if (!cxgbi_sock_flag(csk
, CTPF_TX_DATA_SENT
)) {
997 send_tx_flowc_wr(csk
);
998 cxgbi_sock_set_flag(csk
, CTPF_TX_DATA_SENT
);
1001 cxgbi_sock_set_flag(csk
, CTPF_ABORT_REQ_RCVD
);
1002 cxgbi_sock_set_state(csk
, CTP_ABORTING
);
1004 send_abort_rpl(csk
, rst_status
);
1006 if (!cxgbi_sock_flag(csk
, CTPF_ABORT_RPL_PENDING
)) {
1007 csk
->err
= abort_status_to_errno(csk
, req
->status
, &rst_status
);
1008 cxgbi_sock_closed(csk
);
1011 spin_unlock_bh(&csk
->lock
);
1012 cxgbi_sock_put(csk
);
1017 static void do_abort_rpl_rss(struct cxgbi_device
*cdev
, struct sk_buff
*skb
)
1019 struct cxgbi_sock
*csk
;
1020 struct cpl_abort_rpl_rss
*rpl
= (struct cpl_abort_rpl_rss
*)skb
->data
;
1021 unsigned int tid
= GET_TID(rpl
);
1022 struct cxgb4_lld_info
*lldi
= cxgbi_cdev_priv(cdev
);
1023 struct tid_info
*t
= lldi
->tids
;
1025 csk
= lookup_tid(t
, tid
);
1030 pr_info_ipaddr("csk 0x%p,%u,0x%lx,%u, status %u.\n",
1031 (&csk
->saddr
), (&csk
->daddr
), csk
,
1032 csk
->state
, csk
->flags
, csk
->tid
, rpl
->status
);
1034 if (rpl
->status
== CPL_ERR_ABORT_FAILED
)
1037 cxgbi_sock_rcv_abort_rpl(csk
);
1042 static void do_rx_data(struct cxgbi_device
*cdev
, struct sk_buff
*skb
)
1044 struct cxgbi_sock
*csk
;
1045 struct cpl_rx_data
*cpl
= (struct cpl_rx_data
*)skb
->data
;
1046 unsigned int tid
= GET_TID(cpl
);
1047 struct cxgb4_lld_info
*lldi
= cxgbi_cdev_priv(cdev
);
1048 struct tid_info
*t
= lldi
->tids
;
1050 csk
= lookup_tid(t
, tid
);
1052 pr_err("can't find connection for tid %u.\n", tid
);
1054 /* not expecting this, reset the connection. */
1055 pr_err("csk 0x%p, tid %u, rcv cpl_rx_data.\n", csk
, tid
);
1056 spin_lock_bh(&csk
->lock
);
1057 send_abort_req(csk
);
1058 spin_unlock_bh(&csk
->lock
);
1063 static void do_rx_iscsi_hdr(struct cxgbi_device
*cdev
, struct sk_buff
*skb
)
1065 struct cxgbi_sock
*csk
;
1066 struct cpl_iscsi_hdr
*cpl
= (struct cpl_iscsi_hdr
*)skb
->data
;
1067 unsigned short pdu_len_ddp
= be16_to_cpu(cpl
->pdu_len_ddp
);
1068 unsigned int tid
= GET_TID(cpl
);
1069 struct cxgb4_lld_info
*lldi
= cxgbi_cdev_priv(cdev
);
1070 struct tid_info
*t
= lldi
->tids
;
1072 csk
= lookup_tid(t
, tid
);
1073 if (unlikely(!csk
)) {
1074 pr_err("can't find conn. for tid %u.\n", tid
);
1078 log_debug(1 << CXGBI_DBG_TOE
| 1 << CXGBI_DBG_PDU_RX
,
1079 "csk 0x%p,%u,0x%lx, tid %u, skb 0x%p,%u, 0x%x.\n",
1080 csk
, csk
->state
, csk
->flags
, csk
->tid
, skb
, skb
->len
,
1083 spin_lock_bh(&csk
->lock
);
1085 if (unlikely(csk
->state
>= CTP_PASSIVE_CLOSE
)) {
1086 log_debug(1 << CXGBI_DBG_TOE
| 1 << CXGBI_DBG_SOCK
,
1087 "csk 0x%p,%u,0x%lx,%u, bad state.\n",
1088 csk
, csk
->state
, csk
->flags
, csk
->tid
);
1089 if (csk
->state
!= CTP_ABORTING
)
1095 cxgbi_skcb_tcp_seq(skb
) = ntohl(cpl
->seq
);
1096 cxgbi_skcb_flags(skb
) = 0;
1098 skb_reset_transport_header(skb
);
1099 __skb_pull(skb
, sizeof(*cpl
));
1100 __pskb_trim(skb
, ntohs(cpl
->len
));
1102 if (!csk
->skb_ulp_lhdr
) {
1104 unsigned int hlen
, dlen
, plen
;
1106 log_debug(1 << CXGBI_DBG_TOE
| 1 << CXGBI_DBG_PDU_RX
,
1107 "csk 0x%p,%u,0x%lx, tid %u, skb 0x%p header.\n",
1108 csk
, csk
->state
, csk
->flags
, csk
->tid
, skb
);
1109 csk
->skb_ulp_lhdr
= skb
;
1110 cxgbi_skcb_set_flag(skb
, SKCBF_RX_HDR
);
1112 if (cxgbi_skcb_tcp_seq(skb
) != csk
->rcv_nxt
) {
1113 pr_info("tid %u, CPL_ISCSI_HDR, bad seq, 0x%x/0x%x.\n",
1114 csk
->tid
, cxgbi_skcb_tcp_seq(skb
),
1120 hlen
= ntohs(cpl
->len
);
1121 dlen
= ntohl(*(unsigned int *)(bhs
+ 4)) & 0xFFFFFF;
1123 plen
= ISCSI_PDU_LEN_G(pdu_len_ddp
);
1124 if (is_t4(lldi
->adapter_type
))
1127 if ((hlen
+ dlen
) != plen
) {
1128 pr_info("tid 0x%x, CPL_ISCSI_HDR, pdu len "
1129 "mismatch %u != %u + %u, seq 0x%x.\n",
1130 csk
->tid
, plen
, hlen
, dlen
,
1131 cxgbi_skcb_tcp_seq(skb
));
1135 cxgbi_skcb_rx_pdulen(skb
) = (hlen
+ dlen
+ 3) & (~0x3);
1137 cxgbi_skcb_rx_pdulen(skb
) += csk
->dcrc_len
;
1138 csk
->rcv_nxt
+= cxgbi_skcb_rx_pdulen(skb
);
1140 log_debug(1 << CXGBI_DBG_TOE
| 1 << CXGBI_DBG_PDU_RX
,
1141 "csk 0x%p, skb 0x%p, 0x%x,%u+%u,0x%x,0x%x.\n",
1142 csk
, skb
, *bhs
, hlen
, dlen
,
1143 ntohl(*((unsigned int *)(bhs
+ 16))),
1144 ntohl(*((unsigned int *)(bhs
+ 24))));
1147 struct sk_buff
*lskb
= csk
->skb_ulp_lhdr
;
1149 cxgbi_skcb_set_flag(lskb
, SKCBF_RX_DATA
);
1150 log_debug(1 << CXGBI_DBG_TOE
| 1 << CXGBI_DBG_PDU_RX
,
1151 "csk 0x%p,%u,0x%lx, skb 0x%p data, 0x%p.\n",
1152 csk
, csk
->state
, csk
->flags
, skb
, lskb
);
1155 __skb_queue_tail(&csk
->receive_queue
, skb
);
1156 spin_unlock_bh(&csk
->lock
);
1160 send_abort_req(csk
);
1162 spin_unlock_bh(&csk
->lock
);
1167 static void do_rx_data_ddp(struct cxgbi_device
*cdev
,
1168 struct sk_buff
*skb
)
1170 struct cxgbi_sock
*csk
;
1171 struct sk_buff
*lskb
;
1172 struct cpl_rx_data_ddp
*rpl
= (struct cpl_rx_data_ddp
*)skb
->data
;
1173 unsigned int tid
= GET_TID(rpl
);
1174 struct cxgb4_lld_info
*lldi
= cxgbi_cdev_priv(cdev
);
1175 struct tid_info
*t
= lldi
->tids
;
1176 unsigned int status
= ntohl(rpl
->ddpvld
);
1178 csk
= lookup_tid(t
, tid
);
1179 if (unlikely(!csk
)) {
1180 pr_err("can't find connection for tid %u.\n", tid
);
1184 log_debug(1 << CXGBI_DBG_TOE
| 1 << CXGBI_DBG_PDU_RX
,
1185 "csk 0x%p,%u,0x%lx, skb 0x%p,0x%x, lhdr 0x%p.\n",
1186 csk
, csk
->state
, csk
->flags
, skb
, status
, csk
->skb_ulp_lhdr
);
1188 spin_lock_bh(&csk
->lock
);
1190 if (unlikely(csk
->state
>= CTP_PASSIVE_CLOSE
)) {
1191 log_debug(1 << CXGBI_DBG_TOE
| 1 << CXGBI_DBG_SOCK
,
1192 "csk 0x%p,%u,0x%lx,%u, bad state.\n",
1193 csk
, csk
->state
, csk
->flags
, csk
->tid
);
1194 if (csk
->state
!= CTP_ABORTING
)
1200 if (!csk
->skb_ulp_lhdr
) {
1201 pr_err("tid 0x%x, rcv RX_DATA_DDP w/o pdu bhs.\n", csk
->tid
);
1205 lskb
= csk
->skb_ulp_lhdr
;
1206 csk
->skb_ulp_lhdr
= NULL
;
1208 cxgbi_skcb_rx_ddigest(lskb
) = ntohl(rpl
->ulp_crc
);
1210 if (ntohs(rpl
->len
) != cxgbi_skcb_rx_pdulen(lskb
))
1211 pr_info("tid 0x%x, RX_DATA_DDP pdulen %u != %u.\n",
1212 csk
->tid
, ntohs(rpl
->len
), cxgbi_skcb_rx_pdulen(lskb
));
1214 if (status
& (1 << CPL_RX_DDP_STATUS_HCRC_SHIFT
)) {
1215 pr_info("csk 0x%p, lhdr 0x%p, status 0x%x, hcrc bad 0x%lx.\n",
1216 csk
, lskb
, status
, cxgbi_skcb_flags(lskb
));
1217 cxgbi_skcb_set_flag(lskb
, SKCBF_RX_HCRC_ERR
);
1219 if (status
& (1 << CPL_RX_DDP_STATUS_DCRC_SHIFT
)) {
1220 pr_info("csk 0x%p, lhdr 0x%p, status 0x%x, dcrc bad 0x%lx.\n",
1221 csk
, lskb
, status
, cxgbi_skcb_flags(lskb
));
1222 cxgbi_skcb_set_flag(lskb
, SKCBF_RX_DCRC_ERR
);
1224 if (status
& (1 << CPL_RX_DDP_STATUS_PAD_SHIFT
)) {
1225 log_debug(1 << CXGBI_DBG_PDU_RX
,
1226 "csk 0x%p, lhdr 0x%p, status 0x%x, pad bad.\n",
1228 cxgbi_skcb_set_flag(lskb
, SKCBF_RX_PAD_ERR
);
1230 if ((status
& (1 << CPL_RX_DDP_STATUS_DDP_SHIFT
)) &&
1231 !cxgbi_skcb_test_flag(lskb
, SKCBF_RX_DATA
)) {
1232 log_debug(1 << CXGBI_DBG_PDU_RX
,
1233 "csk 0x%p, lhdr 0x%p, 0x%x, data ddp'ed.\n",
1235 cxgbi_skcb_set_flag(lskb
, SKCBF_RX_DATA_DDPD
);
1237 log_debug(1 << CXGBI_DBG_PDU_RX
,
1238 "csk 0x%p, lskb 0x%p, f 0x%lx.\n",
1239 csk
, lskb
, cxgbi_skcb_flags(lskb
));
1241 cxgbi_skcb_set_flag(lskb
, SKCBF_RX_STATUS
);
1242 cxgbi_conn_pdu_ready(csk
);
1243 spin_unlock_bh(&csk
->lock
);
1247 send_abort_req(csk
);
1249 spin_unlock_bh(&csk
->lock
);
1254 static void do_fw4_ack(struct cxgbi_device
*cdev
, struct sk_buff
*skb
)
1256 struct cxgbi_sock
*csk
;
1257 struct cpl_fw4_ack
*rpl
= (struct cpl_fw4_ack
*)skb
->data
;
1258 unsigned int tid
= GET_TID(rpl
);
1259 struct cxgb4_lld_info
*lldi
= cxgbi_cdev_priv(cdev
);
1260 struct tid_info
*t
= lldi
->tids
;
1262 csk
= lookup_tid(t
, tid
);
1264 pr_err("can't find connection for tid %u.\n", tid
);
1266 log_debug(1 << CXGBI_DBG_TOE
| 1 << CXGBI_DBG_SOCK
,
1267 "csk 0x%p,%u,0x%lx,%u.\n",
1268 csk
, csk
->state
, csk
->flags
, csk
->tid
);
1269 cxgbi_sock_rcv_wr_ack(csk
, rpl
->credits
, ntohl(rpl
->snd_una
),
1275 static void do_set_tcb_rpl(struct cxgbi_device
*cdev
, struct sk_buff
*skb
)
1277 struct cpl_set_tcb_rpl
*rpl
= (struct cpl_set_tcb_rpl
*)skb
->data
;
1278 unsigned int tid
= GET_TID(rpl
);
1279 struct cxgb4_lld_info
*lldi
= cxgbi_cdev_priv(cdev
);
1280 struct tid_info
*t
= lldi
->tids
;
1281 struct cxgbi_sock
*csk
;
1283 csk
= lookup_tid(t
, tid
);
1285 pr_err("can't find conn. for tid %u.\n", tid
);
1287 log_debug(1 << CXGBI_DBG_TOE
| 1 << CXGBI_DBG_SOCK
,
1288 "csk 0x%p,%u,%lx,%u, status 0x%x.\n",
1289 csk
, csk
->state
, csk
->flags
, csk
->tid
, rpl
->status
);
1291 if (rpl
->status
!= CPL_ERR_NONE
)
1292 pr_err("csk 0x%p,%u, SET_TCB_RPL status %u.\n",
1293 csk
, tid
, rpl
->status
);
1298 static int alloc_cpls(struct cxgbi_sock
*csk
)
1300 csk
->cpl_close
= alloc_wr(sizeof(struct cpl_close_con_req
),
1302 if (!csk
->cpl_close
)
1305 csk
->cpl_abort_req
= alloc_wr(sizeof(struct cpl_abort_req
),
1307 if (!csk
->cpl_abort_req
)
1310 csk
->cpl_abort_rpl
= alloc_wr(sizeof(struct cpl_abort_rpl
),
1312 if (!csk
->cpl_abort_rpl
)
1317 cxgbi_sock_free_cpl_skbs(csk
);
1321 static inline void l2t_put(struct cxgbi_sock
*csk
)
1324 cxgb4_l2t_release(csk
->l2t
);
1326 cxgbi_sock_put(csk
);
1330 static void release_offload_resources(struct cxgbi_sock
*csk
)
1332 struct cxgb4_lld_info
*lldi
;
1333 #if IS_ENABLED(CONFIG_IPV6)
1334 struct net_device
*ndev
= csk
->cdev
->ports
[csk
->port_id
];
1337 log_debug(1 << CXGBI_DBG_TOE
| 1 << CXGBI_DBG_SOCK
,
1338 "csk 0x%p,%u,0x%lx,%u.\n",
1339 csk
, csk
->state
, csk
->flags
, csk
->tid
);
1341 cxgbi_sock_free_cpl_skbs(csk
);
1342 if (csk
->wr_cred
!= csk
->wr_max_cred
) {
1343 cxgbi_sock_purge_wr_queue(csk
);
1344 cxgbi_sock_reset_wr_list(csk
);
1348 #if IS_ENABLED(CONFIG_IPV6)
1349 if (csk
->csk_family
== AF_INET6
)
1350 cxgb4_clip_release(ndev
,
1351 (const u32
*)&csk
->saddr6
.sin6_addr
, 1);
1354 if (cxgbi_sock_flag(csk
, CTPF_HAS_ATID
))
1356 else if (cxgbi_sock_flag(csk
, CTPF_HAS_TID
)) {
1357 lldi
= cxgbi_cdev_priv(csk
->cdev
);
1358 cxgb4_remove_tid(lldi
->tids
, 0, csk
->tid
);
1359 cxgbi_sock_clear_flag(csk
, CTPF_HAS_TID
);
1360 cxgbi_sock_put(csk
);
1366 static int init_act_open(struct cxgbi_sock
*csk
)
1368 struct cxgbi_device
*cdev
= csk
->cdev
;
1369 struct cxgb4_lld_info
*lldi
= cxgbi_cdev_priv(cdev
);
1370 struct net_device
*ndev
= cdev
->ports
[csk
->port_id
];
1371 struct sk_buff
*skb
= NULL
;
1372 struct neighbour
*n
= NULL
;
1375 unsigned int size
, size6
;
1376 int t4
= is_t4(lldi
->adapter_type
);
1377 unsigned int linkspeed
;
1378 unsigned int rcv_winf
, snd_winf
;
1380 log_debug(1 << CXGBI_DBG_TOE
| 1 << CXGBI_DBG_SOCK
,
1381 "csk 0x%p,%u,0x%lx,%u.\n",
1382 csk
, csk
->state
, csk
->flags
, csk
->tid
);
1384 if (csk
->csk_family
== AF_INET
)
1385 daddr
= &csk
->daddr
.sin_addr
.s_addr
;
1386 #if IS_ENABLED(CONFIG_IPV6)
1387 else if (csk
->csk_family
== AF_INET6
)
1388 daddr
= &csk
->daddr6
.sin6_addr
;
1391 pr_err("address family 0x%x not supported\n", csk
->csk_family
);
1395 n
= dst_neigh_lookup(csk
->dst
, daddr
);
1398 pr_err("%s, can't get neighbour of csk->dst.\n", ndev
->name
);
1402 csk
->atid
= cxgb4_alloc_atid(lldi
->tids
, csk
);
1403 if (csk
->atid
< 0) {
1404 pr_err("%s, NO atid available.\n", ndev
->name
);
1407 cxgbi_sock_set_flag(csk
, CTPF_HAS_ATID
);
1408 cxgbi_sock_get(csk
);
1410 csk
->l2t
= cxgb4_l2t_get(lldi
->l2t
, n
, ndev
, 0);
1412 pr_err("%s, cannot alloc l2t.\n", ndev
->name
);
1413 goto rel_resource_without_clip
;
1415 cxgbi_sock_get(csk
);
1417 #if IS_ENABLED(CONFIG_IPV6)
1418 if (csk
->csk_family
== AF_INET6
)
1419 cxgb4_clip_get(ndev
, (const u32
*)&csk
->saddr6
.sin6_addr
, 1);
1423 size
= sizeof(struct cpl_act_open_req
);
1424 size6
= sizeof(struct cpl_act_open_req6
);
1426 size
= sizeof(struct cpl_t5_act_open_req
);
1427 size6
= sizeof(struct cpl_t5_act_open_req6
);
1430 if (csk
->csk_family
== AF_INET
)
1431 skb
= alloc_wr(size
, 0, GFP_NOIO
);
1432 #if IS_ENABLED(CONFIG_IPV6)
1434 skb
= alloc_wr(size6
, 0, GFP_NOIO
);
1439 skb
->sk
= (struct sock
*)csk
;
1440 t4_set_arp_err_handler(skb
, csk
, cxgbi_sock_act_open_req_arp_failure
);
1443 csk
->mtu
= dst_mtu(csk
->dst
);
1444 cxgb4_best_mtu(lldi
->mtus
, csk
->mtu
, &csk
->mss_idx
);
1445 csk
->tx_chan
= cxgb4_port_chan(ndev
);
1446 /* SMT two entries per row */
1447 csk
->smac_idx
= ((cxgb4_port_viid(ndev
) & 0x7F)) << 1;
1448 step
= lldi
->ntxq
/ lldi
->nchan
;
1449 csk
->txq_idx
= cxgb4_port_idx(ndev
) * step
;
1450 step
= lldi
->nrxq
/ lldi
->nchan
;
1451 csk
->rss_qid
= lldi
->rxq_ids
[cxgb4_port_idx(ndev
) * step
];
1452 linkspeed
= ((struct port_info
*)netdev_priv(ndev
))->link_cfg
.speed
;
1453 csk
->snd_win
= cxgb4i_snd_win
;
1454 csk
->rcv_win
= cxgb4i_rcv_win
;
1455 if (cxgb4i_rcv_win
<= 0) {
1456 csk
->rcv_win
= CXGB4I_DEFAULT_10G_RCV_WIN
;
1457 rcv_winf
= linkspeed
/ SPEED_10000
;
1459 csk
->rcv_win
*= rcv_winf
;
1461 if (cxgb4i_snd_win
<= 0) {
1462 csk
->snd_win
= CXGB4I_DEFAULT_10G_SND_WIN
;
1463 snd_winf
= linkspeed
/ SPEED_10000
;
1465 csk
->snd_win
*= snd_winf
;
1467 csk
->wr_cred
= lldi
->wr_cred
-
1468 DIV_ROUND_UP(sizeof(struct cpl_abort_req
), 16);
1469 csk
->wr_max_cred
= csk
->wr_cred
;
1470 csk
->wr_una_cred
= 0;
1471 cxgbi_sock_reset_wr_list(csk
);
1474 pr_info_ipaddr("csk 0x%p,%u,0x%lx,%u,%u,%u, mtu %u,%u, smac %u.\n",
1475 (&csk
->saddr
), (&csk
->daddr
), csk
, csk
->state
,
1476 csk
->flags
, csk
->tx_chan
, csk
->txq_idx
, csk
->rss_qid
,
1477 csk
->mtu
, csk
->mss_idx
, csk
->smac_idx
);
1479 /* must wait for either a act_open_rpl or act_open_establish */
1480 try_module_get(THIS_MODULE
);
1481 cxgbi_sock_set_state(csk
, CTP_ACTIVE_OPEN
);
1482 if (csk
->csk_family
== AF_INET
)
1483 send_act_open_req(csk
, skb
, csk
->l2t
);
1484 #if IS_ENABLED(CONFIG_IPV6)
1486 send_act_open_req6(csk
, skb
, csk
->l2t
);
1493 #if IS_ENABLED(CONFIG_IPV6)
1494 if (csk
->csk_family
== AF_INET6
)
1495 cxgb4_clip_release(ndev
,
1496 (const u32
*)&csk
->saddr6
.sin6_addr
, 1);
1498 rel_resource_without_clip
:
1506 cxgb4i_cplhandler_func cxgb4i_cplhandlers
[NUM_CPL_CMDS
] = {
1507 [CPL_ACT_ESTABLISH
] = do_act_establish
,
1508 [CPL_ACT_OPEN_RPL
] = do_act_open_rpl
,
1509 [CPL_PEER_CLOSE
] = do_peer_close
,
1510 [CPL_ABORT_REQ_RSS
] = do_abort_req_rss
,
1511 [CPL_ABORT_RPL_RSS
] = do_abort_rpl_rss
,
1512 [CPL_CLOSE_CON_RPL
] = do_close_con_rpl
,
1513 [CPL_FW4_ACK
] = do_fw4_ack
,
1514 [CPL_ISCSI_HDR
] = do_rx_iscsi_hdr
,
1515 [CPL_ISCSI_DATA
] = do_rx_iscsi_hdr
,
1516 [CPL_SET_TCB_RPL
] = do_set_tcb_rpl
,
1517 [CPL_RX_DATA_DDP
] = do_rx_data_ddp
,
1518 [CPL_RX_ISCSI_DDP
] = do_rx_data_ddp
,
1519 [CPL_RX_DATA
] = do_rx_data
,
1522 int cxgb4i_ofld_init(struct cxgbi_device
*cdev
)
1526 if (cxgb4i_max_connect
> CXGB4I_MAX_CONN
)
1527 cxgb4i_max_connect
= CXGB4I_MAX_CONN
;
1529 rc
= cxgbi_device_portmap_create(cdev
, cxgb4i_sport_base
,
1530 cxgb4i_max_connect
);
1534 cdev
->csk_release_offload_resources
= release_offload_resources
;
1535 cdev
->csk_push_tx_frames
= push_tx_frames
;
1536 cdev
->csk_send_abort_req
= send_abort_req
;
1537 cdev
->csk_send_close_req
= send_close_req
;
1538 cdev
->csk_send_rx_credits
= send_rx_credits
;
1539 cdev
->csk_alloc_cpls
= alloc_cpls
;
1540 cdev
->csk_init_act_open
= init_act_open
;
1542 pr_info("cdev 0x%p, offload up, added.\n", cdev
);
1547 * functions to program the pagepod in h/w
1549 #define ULPMEM_IDATA_MAX_NPPODS 4 /* 256/PPOD_SIZE */
1550 static inline void ulp_mem_io_set_hdr(struct cxgb4_lld_info
*lldi
,
1551 struct ulp_mem_io
*req
,
1552 unsigned int wr_len
, unsigned int dlen
,
1553 unsigned int pm_addr
)
1555 struct ulptx_idata
*idata
= (struct ulptx_idata
*)(req
+ 1);
1557 INIT_ULPTX_WR(req
, wr_len
, 0, 0);
1558 if (is_t4(lldi
->adapter_type
))
1559 req
->cmd
= htonl(ULPTX_CMD_V(ULP_TX_MEM_WRITE
) |
1560 (ULP_MEMIO_ORDER_F
));
1562 req
->cmd
= htonl(ULPTX_CMD_V(ULP_TX_MEM_WRITE
) |
1563 (T5_ULP_MEMIO_IMM_F
));
1564 req
->dlen
= htonl(ULP_MEMIO_DATA_LEN_V(dlen
>> 5));
1565 req
->lock_addr
= htonl(ULP_MEMIO_ADDR_V(pm_addr
>> 5));
1566 req
->len16
= htonl(DIV_ROUND_UP(wr_len
- sizeof(req
->wr
), 16));
1568 idata
->cmd_more
= htonl(ULPTX_CMD_V(ULP_TX_SC_IMM
));
1569 idata
->len
= htonl(dlen
);
1572 static int ddp_ppod_write_idata(struct cxgbi_device
*cdev
, unsigned int port_id
,
1573 struct cxgbi_pagepod_hdr
*hdr
, unsigned int idx
,
1575 struct cxgbi_gather_list
*gl
,
1576 unsigned int gl_pidx
)
1578 struct cxgbi_ddp_info
*ddp
= cdev
->ddp
;
1579 struct cxgb4_lld_info
*lldi
= cxgbi_cdev_priv(cdev
);
1580 struct sk_buff
*skb
;
1581 struct ulp_mem_io
*req
;
1582 struct ulptx_idata
*idata
;
1583 struct cxgbi_pagepod
*ppod
;
1584 unsigned int pm_addr
= idx
* PPOD_SIZE
+ ddp
->llimit
;
1585 unsigned int dlen
= PPOD_SIZE
* npods
;
1586 unsigned int wr_len
= roundup(sizeof(struct ulp_mem_io
) +
1587 sizeof(struct ulptx_idata
) + dlen
, 16);
1590 skb
= alloc_wr(wr_len
, 0, GFP_ATOMIC
);
1592 pr_err("cdev 0x%p, idx %u, npods %u, OOM.\n",
1596 req
= (struct ulp_mem_io
*)skb
->head
;
1597 set_wr_txq(skb
, CPL_PRIORITY_CONTROL
, 0);
1599 ulp_mem_io_set_hdr(lldi
, req
, wr_len
, dlen
, pm_addr
);
1600 idata
= (struct ulptx_idata
*)(req
+ 1);
1601 ppod
= (struct cxgbi_pagepod
*)(idata
+ 1);
1603 for (i
= 0; i
< npods
; i
++, ppod
++, gl_pidx
+= PPOD_PAGES_MAX
) {
1605 cxgbi_ddp_ppod_clear(ppod
);
1607 cxgbi_ddp_ppod_set(ppod
, hdr
, gl
, gl_pidx
);
1610 cxgb4_ofld_send(cdev
->ports
[port_id
], skb
);
1614 static int ddp_set_map(struct cxgbi_sock
*csk
, struct cxgbi_pagepod_hdr
*hdr
,
1615 unsigned int idx
, unsigned int npods
,
1616 struct cxgbi_gather_list
*gl
)
1618 unsigned int i
, cnt
;
1621 for (i
= 0; i
< npods
; i
+= cnt
, idx
+= cnt
) {
1623 if (cnt
> ULPMEM_IDATA_MAX_NPPODS
)
1624 cnt
= ULPMEM_IDATA_MAX_NPPODS
;
1625 err
= ddp_ppod_write_idata(csk
->cdev
, csk
->port_id
, hdr
,
1626 idx
, cnt
, gl
, 4 * i
);
1633 static void ddp_clear_map(struct cxgbi_hba
*chba
, unsigned int tag
,
1634 unsigned int idx
, unsigned int npods
)
1636 unsigned int i
, cnt
;
1639 for (i
= 0; i
< npods
; i
+= cnt
, idx
+= cnt
) {
1641 if (cnt
> ULPMEM_IDATA_MAX_NPPODS
)
1642 cnt
= ULPMEM_IDATA_MAX_NPPODS
;
1643 err
= ddp_ppod_write_idata(chba
->cdev
, chba
->port_id
, NULL
,
1650 static int ddp_setup_conn_pgidx(struct cxgbi_sock
*csk
, unsigned int tid
,
1651 int pg_idx
, bool reply
)
1653 struct sk_buff
*skb
;
1654 struct cpl_set_tcb_field
*req
;
1656 if (!pg_idx
|| pg_idx
>= DDP_PGIDX_MAX
)
1659 skb
= alloc_wr(sizeof(*req
), 0, GFP_KERNEL
);
1663 /* set up ulp page size */
1664 req
= (struct cpl_set_tcb_field
*)skb
->head
;
1665 INIT_TP_WR(req
, csk
->tid
);
1666 OPCODE_TID(req
) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD
, csk
->tid
));
1667 req
->reply_ctrl
= htons(NO_REPLY_V(reply
) | QUEUENO_V(csk
->rss_qid
));
1668 req
->word_cookie
= htons(0);
1669 req
->mask
= cpu_to_be64(0x3 << 8);
1670 req
->val
= cpu_to_be64(pg_idx
<< 8);
1671 set_wr_txq(skb
, CPL_PRIORITY_CONTROL
, csk
->port_id
);
1673 log_debug(1 << CXGBI_DBG_TOE
| 1 << CXGBI_DBG_SOCK
,
1674 "csk 0x%p, tid 0x%x, pg_idx %u.\n", csk
, csk
->tid
, pg_idx
);
1676 cxgb4_ofld_send(csk
->cdev
->ports
[csk
->port_id
], skb
);
1680 static int ddp_setup_conn_digest(struct cxgbi_sock
*csk
, unsigned int tid
,
1681 int hcrc
, int dcrc
, int reply
)
1683 struct sk_buff
*skb
;
1684 struct cpl_set_tcb_field
*req
;
1689 skb
= alloc_wr(sizeof(*req
), 0, GFP_KERNEL
);
1693 csk
->hcrc_len
= (hcrc
? 4 : 0);
1694 csk
->dcrc_len
= (dcrc
? 4 : 0);
1695 /* set up ulp submode */
1696 req
= (struct cpl_set_tcb_field
*)skb
->head
;
1697 INIT_TP_WR(req
, tid
);
1698 OPCODE_TID(req
) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD
, tid
));
1699 req
->reply_ctrl
= htons(NO_REPLY_V(reply
) | QUEUENO_V(csk
->rss_qid
));
1700 req
->word_cookie
= htons(0);
1701 req
->mask
= cpu_to_be64(0x3 << 4);
1702 req
->val
= cpu_to_be64(((hcrc
? ULP_CRC_HEADER
: 0) |
1703 (dcrc
? ULP_CRC_DATA
: 0)) << 4);
1704 set_wr_txq(skb
, CPL_PRIORITY_CONTROL
, csk
->port_id
);
1706 log_debug(1 << CXGBI_DBG_TOE
| 1 << CXGBI_DBG_SOCK
,
1707 "csk 0x%p, tid 0x%x, crc %d,%d.\n", csk
, csk
->tid
, hcrc
, dcrc
);
1709 cxgb4_ofld_send(csk
->cdev
->ports
[csk
->port_id
], skb
);
1713 static int cxgb4i_ddp_init(struct cxgbi_device
*cdev
)
1715 struct cxgb4_lld_info
*lldi
= cxgbi_cdev_priv(cdev
);
1716 struct cxgbi_ddp_info
*ddp
= cdev
->ddp
;
1717 unsigned int tagmask
, pgsz_factor
[4];
1721 kref_get(&ddp
->refcnt
);
1722 pr_warn("cdev 0x%p, ddp 0x%p already set up.\n",
1727 err
= cxgbi_ddp_init(cdev
, lldi
->vr
->iscsi
.start
,
1728 lldi
->vr
->iscsi
.start
+ lldi
->vr
->iscsi
.size
- 1,
1729 lldi
->iscsi_iolen
, lldi
->iscsi_iolen
);
1735 tagmask
= ddp
->idx_mask
<< PPOD_IDX_SHIFT
;
1736 cxgbi_ddp_page_size_factor(pgsz_factor
);
1737 cxgb4_iscsi_init(lldi
->ports
[0], tagmask
, pgsz_factor
);
1739 cdev
->csk_ddp_setup_digest
= ddp_setup_conn_digest
;
1740 cdev
->csk_ddp_setup_pgidx
= ddp_setup_conn_pgidx
;
1741 cdev
->csk_ddp_set
= ddp_set_map
;
1742 cdev
->csk_ddp_clear
= ddp_clear_map
;
1744 pr_info("cxgb4i 0x%p tag: sw %u, rsvd %u,%u, mask 0x%x.\n",
1745 cdev
, cdev
->tag_format
.sw_bits
, cdev
->tag_format
.rsvd_bits
,
1746 cdev
->tag_format
.rsvd_shift
, cdev
->tag_format
.rsvd_mask
);
1747 pr_info("cxgb4i 0x%p, nppods %u, bits %u, mask 0x%x,0x%x pkt %u/%u, "
1749 cdev
, ddp
->nppods
, ddp
->idx_bits
, ddp
->idx_mask
,
1750 ddp
->rsvd_tag_mask
, ddp
->max_txsz
, lldi
->iscsi_iolen
,
1751 ddp
->max_rxsz
, lldi
->iscsi_iolen
);
1752 pr_info("cxgb4i 0x%p max payload size: %u/%u, %u/%u.\n",
1753 cdev
, cdev
->tx_max_size
, ddp
->max_txsz
, cdev
->rx_max_size
,
1758 static void *t4_uld_add(const struct cxgb4_lld_info
*lldi
)
1760 struct cxgbi_device
*cdev
;
1761 struct port_info
*pi
;
1764 cdev
= cxgbi_device_register(sizeof(*lldi
), lldi
->nports
);
1766 pr_info("t4 device 0x%p, register failed.\n", lldi
);
1769 pr_info("0x%p,0x%x, ports %u,%s, chan %u, q %u,%u, wr %u.\n",
1770 cdev
, lldi
->adapter_type
, lldi
->nports
,
1771 lldi
->ports
[0]->name
, lldi
->nchan
, lldi
->ntxq
,
1772 lldi
->nrxq
, lldi
->wr_cred
);
1773 for (i
= 0; i
< lldi
->nrxq
; i
++)
1774 log_debug(1 << CXGBI_DBG_DEV
,
1775 "t4 0x%p, rxq id #%d: %u.\n",
1776 cdev
, i
, lldi
->rxq_ids
[i
]);
1778 memcpy(cxgbi_cdev_priv(cdev
), lldi
, sizeof(*lldi
));
1779 cdev
->flags
= CXGBI_FLAG_DEV_T4
;
1780 cdev
->pdev
= lldi
->pdev
;
1781 cdev
->ports
= lldi
->ports
;
1782 cdev
->nports
= lldi
->nports
;
1783 cdev
->mtus
= lldi
->mtus
;
1784 cdev
->nmtus
= NMTUS
;
1785 cdev
->rx_credit_thres
= cxgb4i_rx_credit_thres
;
1786 cdev
->skb_tx_rsvd
= CXGB4I_TX_HEADER_LEN
;
1787 cdev
->skb_rx_extra
= sizeof(struct cpl_iscsi_hdr
);
1788 cdev
->itp
= &cxgb4i_iscsi_transport
;
1790 cdev
->pfvf
= FW_VIID_PFN_G(cxgb4_port_viid(lldi
->ports
[0]))
1792 pr_info("cdev 0x%p,%s, pfvf %u.\n",
1793 cdev
, lldi
->ports
[0]->name
, cdev
->pfvf
);
1795 rc
= cxgb4i_ddp_init(cdev
);
1797 pr_info("t4 0x%p ddp init failed.\n", cdev
);
1800 rc
= cxgb4i_ofld_init(cdev
);
1802 pr_info("t4 0x%p ofld init failed.\n", cdev
);
1806 rc
= cxgbi_hbas_add(cdev
, CXGB4I_MAX_LUN
, CXGBI_MAX_CONN
,
1807 &cxgb4i_host_template
, cxgb4i_stt
);
1811 for (i
= 0; i
< cdev
->nports
; i
++) {
1812 pi
= netdev_priv(lldi
->ports
[i
]);
1813 cdev
->hbas
[i
]->port_id
= pi
->port_id
;
1818 cxgbi_device_unregister(cdev
);
1819 return ERR_PTR(-ENOMEM
);
1822 #define RX_PULL_LEN 128
1823 static int t4_uld_rx_handler(void *handle
, const __be64
*rsp
,
1824 const struct pkt_gl
*pgl
)
1826 const struct cpl_act_establish
*rpl
;
1827 struct sk_buff
*skb
;
1829 struct cxgbi_device
*cdev
= handle
;
1832 unsigned int len
= 64 - sizeof(struct rsp_ctrl
) - 8;
1834 skb
= alloc_wr(len
, 0, GFP_ATOMIC
);
1837 skb_copy_to_linear_data(skb
, &rsp
[1], len
);
1839 if (unlikely(*(u8
*)rsp
!= *(u8
*)pgl
->va
)) {
1840 pr_info("? FL 0x%p,RSS%#llx,FL %#llx,len %u.\n",
1841 pgl
->va
, be64_to_cpu(*rsp
),
1842 be64_to_cpu(*(u64
*)pgl
->va
),
1846 skb
= cxgb4_pktgl_to_skb(pgl
, RX_PULL_LEN
, RX_PULL_LEN
);
1851 rpl
= (struct cpl_act_establish
*)skb
->data
;
1852 opc
= rpl
->ot
.opcode
;
1853 log_debug(1 << CXGBI_DBG_TOE
,
1854 "cdev %p, opcode 0x%x(0x%x,0x%x), skb %p.\n",
1855 cdev
, opc
, rpl
->ot
.opcode_tid
, ntohl(rpl
->ot
.opcode_tid
), skb
);
1856 if (cxgb4i_cplhandlers
[opc
])
1857 cxgb4i_cplhandlers
[opc
](cdev
, skb
);
1859 pr_err("No handler for opcode 0x%x.\n", opc
);
1864 log_debug(1 << CXGBI_DBG_TOE
, "OOM bailing out.\n");
1868 static int t4_uld_state_change(void *handle
, enum cxgb4_state state
)
1870 struct cxgbi_device
*cdev
= handle
;
1873 case CXGB4_STATE_UP
:
1874 pr_info("cdev 0x%p, UP.\n", cdev
);
1876 case CXGB4_STATE_START_RECOVERY
:
1877 pr_info("cdev 0x%p, RECOVERY.\n", cdev
);
1878 /* close all connections */
1880 case CXGB4_STATE_DOWN
:
1881 pr_info("cdev 0x%p, DOWN.\n", cdev
);
1883 case CXGB4_STATE_DETACH
:
1884 pr_info("cdev 0x%p, DETACH.\n", cdev
);
1885 cxgbi_device_unregister(cdev
);
1888 pr_info("cdev 0x%p, unknown state %d.\n", cdev
, state
);
1894 static int __init
cxgb4i_init_module(void)
1898 printk(KERN_INFO
"%s", version
);
1900 rc
= cxgbi_iscsi_init(&cxgb4i_iscsi_transport
, &cxgb4i_stt
);
1903 cxgb4_register_uld(CXGB4_ULD_ISCSI
, &cxgb4i_uld_info
);
1908 static void __exit
cxgb4i_exit_module(void)
1910 cxgb4_unregister_uld(CXGB4_ULD_ISCSI
);
1911 cxgbi_device_unregister_all(CXGBI_FLAG_DEV_T4
);
1912 cxgbi_iscsi_cleanup(&cxgb4i_iscsi_transport
, &cxgb4i_stt
);
1915 module_init(cxgb4i_init_module
);
1916 module_exit(cxgb4i_exit_module
);