2 * cxgb4i.c: Chelsio T4 iSCSI driver.
4 * Copyright (c) 2010 Chelsio Communications, Inc.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation.
10 * Written by: Karen Xie (kxie@chelsio.com)
11 * Rakesh Ranjan (rranjan@chelsio.com)
14 #define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
16 #include <linux/module.h>
17 #include <linux/moduleparam.h>
18 #include <scsi/scsi_host.h>
21 #include <linux/netdevice.h>
26 #include "cxgb4_uld.h"
31 static unsigned int dbg_level
;
33 #include "../libcxgbi.h"
35 #define DRV_MODULE_NAME "cxgb4i"
36 #define DRV_MODULE_DESC "Chelsio T4/T5 iSCSI Driver"
37 #define DRV_MODULE_VERSION "0.9.4"
39 static char version
[] =
40 DRV_MODULE_DESC
" " DRV_MODULE_NAME
41 " v" DRV_MODULE_VERSION
"\n";
43 MODULE_AUTHOR("Chelsio Communications, Inc.");
44 MODULE_DESCRIPTION(DRV_MODULE_DESC
);
45 MODULE_VERSION(DRV_MODULE_VERSION
);
46 MODULE_LICENSE("GPL");
48 module_param(dbg_level
, uint
, 0644);
49 MODULE_PARM_DESC(dbg_level
, "Debug flag (default=0)");
51 static int cxgb4i_rcv_win
= 256 * 1024;
52 module_param(cxgb4i_rcv_win
, int, 0644);
53 MODULE_PARM_DESC(cxgb4i_rcv_win
, "TCP reveive window in bytes");
55 static int cxgb4i_snd_win
= 128 * 1024;
56 module_param(cxgb4i_snd_win
, int, 0644);
57 MODULE_PARM_DESC(cxgb4i_snd_win
, "TCP send window in bytes");
59 static int cxgb4i_rx_credit_thres
= 10 * 1024;
60 module_param(cxgb4i_rx_credit_thres
, int, 0644);
61 MODULE_PARM_DESC(cxgb4i_rx_credit_thres
,
62 "RX credits return threshold in bytes (default=10KB)");
64 static unsigned int cxgb4i_max_connect
= (8 * 1024);
65 module_param(cxgb4i_max_connect
, uint
, 0644);
66 MODULE_PARM_DESC(cxgb4i_max_connect
, "Maximum number of connections");
68 static unsigned short cxgb4i_sport_base
= 20000;
69 module_param(cxgb4i_sport_base
, ushort
, 0644);
70 MODULE_PARM_DESC(cxgb4i_sport_base
, "Starting port number (default 20000)");
72 typedef void (*cxgb4i_cplhandler_func
)(struct cxgbi_device
*, struct sk_buff
*);
74 static void *t4_uld_add(const struct cxgb4_lld_info
*);
75 static int t4_uld_rx_handler(void *, const __be64
*, const struct pkt_gl
*);
76 static int t4_uld_state_change(void *, enum cxgb4_state state
);
78 static const struct cxgb4_uld_info cxgb4i_uld_info
= {
79 .name
= DRV_MODULE_NAME
,
81 .rx_handler
= t4_uld_rx_handler
,
82 .state_change
= t4_uld_state_change
,
85 static struct scsi_host_template cxgb4i_host_template
= {
86 .module
= THIS_MODULE
,
87 .name
= DRV_MODULE_NAME
,
88 .proc_name
= DRV_MODULE_NAME
,
89 .can_queue
= CXGB4I_SCSI_HOST_QDEPTH
,
90 .queuecommand
= iscsi_queuecommand
,
91 .change_queue_depth
= iscsi_change_queue_depth
,
92 .sg_tablesize
= SG_ALL
,
93 .max_sectors
= 0xFFFF,
94 .cmd_per_lun
= ISCSI_DEF_CMD_PER_LUN
,
95 .eh_abort_handler
= iscsi_eh_abort
,
96 .eh_device_reset_handler
= iscsi_eh_device_reset
,
97 .eh_target_reset_handler
= iscsi_eh_recover_target
,
98 .target_alloc
= iscsi_target_alloc
,
99 .use_clustering
= DISABLE_CLUSTERING
,
103 static struct iscsi_transport cxgb4i_iscsi_transport
= {
104 .owner
= THIS_MODULE
,
105 .name
= DRV_MODULE_NAME
,
106 .caps
= CAP_RECOVERY_L0
| CAP_MULTI_R2T
| CAP_HDRDGST
|
107 CAP_DATADGST
| CAP_DIGEST_OFFLOAD
|
108 CAP_PADDING_OFFLOAD
| CAP_TEXT_NEGO
,
109 .attr_is_visible
= cxgbi_attr_is_visible
,
110 .get_host_param
= cxgbi_get_host_param
,
111 .set_host_param
= cxgbi_set_host_param
,
112 /* session management */
113 .create_session
= cxgbi_create_session
,
114 .destroy_session
= cxgbi_destroy_session
,
115 .get_session_param
= iscsi_session_get_param
,
116 /* connection management */
117 .create_conn
= cxgbi_create_conn
,
118 .bind_conn
= cxgbi_bind_conn
,
119 .destroy_conn
= iscsi_tcp_conn_teardown
,
120 .start_conn
= iscsi_conn_start
,
121 .stop_conn
= iscsi_conn_stop
,
122 .get_conn_param
= iscsi_conn_get_param
,
123 .set_param
= cxgbi_set_conn_param
,
124 .get_stats
= cxgbi_get_conn_stats
,
125 /* pdu xmit req from user space */
126 .send_pdu
= iscsi_conn_send_pdu
,
128 .init_task
= iscsi_tcp_task_init
,
129 .xmit_task
= iscsi_tcp_task_xmit
,
130 .cleanup_task
= cxgbi_cleanup_task
,
132 .alloc_pdu
= cxgbi_conn_alloc_pdu
,
133 .init_pdu
= cxgbi_conn_init_pdu
,
134 .xmit_pdu
= cxgbi_conn_xmit_pdu
,
135 .parse_pdu_itt
= cxgbi_parse_pdu_itt
,
136 /* TCP connect/disconnect */
137 .get_ep_param
= cxgbi_get_ep_param
,
138 .ep_connect
= cxgbi_ep_connect
,
139 .ep_poll
= cxgbi_ep_poll
,
140 .ep_disconnect
= cxgbi_ep_disconnect
,
141 /* Error recovery timeout call */
142 .session_recovery_timedout
= iscsi_session_recovery_timedout
,
145 static struct scsi_transport_template
*cxgb4i_stt
;
148 * CPL (Chelsio Protocol Language) defines a message passing interface between
149 * the host driver and Chelsio asic.
150 * The section below implments CPLs that related to iscsi tcp connection
151 * open/close/abort and data send/receive.
153 #define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
154 #define RCV_BUFSIZ_MASK 0x3FFU
155 #define MAX_IMM_TX_PKT_LEN 128
157 static inline void set_queue(struct sk_buff
*skb
, unsigned int queue
,
158 const struct cxgbi_sock
*csk
)
160 skb
->queue_mapping
= queue
;
163 static int push_tx_frames(struct cxgbi_sock
*, int);
166 * is_ofld_imm - check whether a packet can be sent as immediate data
169 * Returns true if a packet can be sent as an offload WR with immediate
170 * data. We currently use the same limit as for Ethernet packets.
172 static inline int is_ofld_imm(const struct sk_buff
*skb
)
174 return skb
->len
<= (MAX_IMM_TX_PKT_LEN
-
175 sizeof(struct fw_ofld_tx_data_wr
));
179 #define VLAN_NONE 0xfff
180 #define FILTER_SEL_VLAN_NONE 0xffff
181 #define FILTER_SEL_WIDTH_P_FC (3+1) /* port uses 3 bits, FCoE one bit */
182 #define FILTER_SEL_WIDTH_VIN_P_FC \
183 (6 + 7 + FILTER_SEL_WIDTH_P_FC) /* 6 bits are unused, VF uses 7 bits*/
184 #define FILTER_SEL_WIDTH_TAG_P_FC \
185 (3 + FILTER_SEL_WIDTH_VIN_P_FC) /* PF uses 3 bits */
186 #define FILTER_SEL_WIDTH_VLD_TAG_P_FC (1 + FILTER_SEL_WIDTH_TAG_P_FC)
188 static unsigned int select_ntuple(struct cxgbi_device
*cdev
,
189 struct l2t_entry
*l2t
)
191 struct cxgb4_lld_info
*lldi
= cxgbi_cdev_priv(cdev
);
192 unsigned int ntuple
= 0;
195 switch (lldi
->filt_mode
) {
197 /* default filter mode */
198 case HW_TPL_FR_MT_PR_IV_P_FC
:
199 if (l2t
->vlan
== VLAN_NONE
)
200 ntuple
|= FILTER_SEL_VLAN_NONE
<< FILTER_SEL_WIDTH_P_FC
;
202 ntuple
|= l2t
->vlan
<< FILTER_SEL_WIDTH_P_FC
;
203 ntuple
|= 1 << FILTER_SEL_WIDTH_VLD_TAG_P_FC
;
205 ntuple
|= l2t
->lport
<< S_PORT
| IPPROTO_TCP
<<
206 FILTER_SEL_WIDTH_VLD_TAG_P_FC
;
208 case HW_TPL_FR_MT_PR_OV_P_FC
: {
209 viid
= cxgb4_port_viid(l2t
->neigh
->dev
);
211 ntuple
|= FW_VIID_VIN_GET(viid
) << FILTER_SEL_WIDTH_P_FC
;
212 ntuple
|= FW_VIID_PFN_GET(viid
) << FILTER_SEL_WIDTH_VIN_P_FC
;
213 ntuple
|= FW_VIID_VIVLD_GET(viid
) << FILTER_SEL_WIDTH_TAG_P_FC
;
214 ntuple
|= l2t
->lport
<< S_PORT
| IPPROTO_TCP
<<
215 FILTER_SEL_WIDTH_VLD_TAG_P_FC
;
224 static void send_act_open_req(struct cxgbi_sock
*csk
, struct sk_buff
*skb
,
227 struct cxgb4_lld_info
*lldi
= cxgbi_cdev_priv(csk
->cdev
);
228 int wscale
= cxgbi_sock_compute_wscale(csk
->mss_idx
);
229 unsigned long long opt0
;
231 unsigned int qid_atid
= ((unsigned int)csk
->atid
) |
232 (((unsigned int)csk
->rss_qid
) << 14);
234 opt0
= KEEP_ALIVE(1) |
236 MSS_IDX(csk
->mss_idx
) |
237 L2T_IDX(((struct l2t_entry
*)csk
->l2t
)->idx
) |
238 TX_CHAN(csk
->tx_chan
) |
239 SMAC_SEL(csk
->smac_idx
) |
240 ULP_MODE(ULP_MODE_ISCSI
) |
241 RCV_BUFSIZ(cxgb4i_rcv_win
>> 10);
242 opt2
= RX_CHANNEL(0) |
245 RSS_QUEUE(csk
->rss_qid
);
247 if (is_t4(lldi
->adapter_type
)) {
248 struct cpl_act_open_req
*req
=
249 (struct cpl_act_open_req
*)skb
->head
;
251 req
= (struct cpl_act_open_req
*)skb
->head
;
254 OPCODE_TID(req
) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ
,
256 req
->local_port
= csk
->saddr
.sin_port
;
257 req
->peer_port
= csk
->daddr
.sin_port
;
258 req
->local_ip
= csk
->saddr
.sin_addr
.s_addr
;
259 req
->peer_ip
= csk
->daddr
.sin_addr
.s_addr
;
260 req
->opt0
= cpu_to_be64(opt0
);
261 req
->params
= cpu_to_be32(select_ntuple(csk
->cdev
, csk
->l2t
));
263 req
->opt2
= cpu_to_be32(opt2
);
265 log_debug(1 << CXGBI_DBG_TOE
| 1 << CXGBI_DBG_SOCK
,
266 "csk t4 0x%p, %pI4:%u-%pI4:%u, atid %d, qid %u.\n",
267 csk
, &req
->local_ip
, ntohs(req
->local_port
),
268 &req
->peer_ip
, ntohs(req
->peer_port
),
269 csk
->atid
, csk
->rss_qid
);
271 struct cpl_t5_act_open_req
*req
=
272 (struct cpl_t5_act_open_req
*)skb
->head
;
274 req
= (struct cpl_t5_act_open_req
*)skb
->head
;
277 OPCODE_TID(req
) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ
,
279 req
->local_port
= csk
->saddr
.sin_port
;
280 req
->peer_port
= csk
->daddr
.sin_port
;
281 req
->local_ip
= csk
->saddr
.sin_addr
.s_addr
;
282 req
->peer_ip
= csk
->daddr
.sin_addr
.s_addr
;
283 req
->opt0
= cpu_to_be64(opt0
);
284 req
->params
= cpu_to_be32(select_ntuple(csk
->cdev
, csk
->l2t
));
286 req
->opt2
= cpu_to_be32(opt2
);
288 log_debug(1 << CXGBI_DBG_TOE
| 1 << CXGBI_DBG_SOCK
,
289 "csk t5 0x%p, %pI4:%u-%pI4:%u, atid %d, qid %u.\n",
290 csk
, &req
->local_ip
, ntohs(req
->local_port
),
291 &req
->peer_ip
, ntohs(req
->peer_port
),
292 csk
->atid
, csk
->rss_qid
);
295 set_wr_txq(skb
, CPL_PRIORITY_SETUP
, csk
->port_id
);
296 cxgb4_l2t_send(csk
->cdev
->ports
[csk
->port_id
], skb
, csk
->l2t
);
299 static void send_close_req(struct cxgbi_sock
*csk
)
301 struct sk_buff
*skb
= csk
->cpl_close
;
302 struct cpl_close_con_req
*req
= (struct cpl_close_con_req
*)skb
->head
;
303 unsigned int tid
= csk
->tid
;
305 log_debug(1 << CXGBI_DBG_TOE
| 1 << CXGBI_DBG_SOCK
,
306 "csk 0x%p,%u,0x%lx, tid %u.\n",
307 csk
, csk
->state
, csk
->flags
, csk
->tid
);
308 csk
->cpl_close
= NULL
;
309 set_wr_txq(skb
, CPL_PRIORITY_DATA
, csk
->port_id
);
310 INIT_TP_WR(req
, tid
);
311 OPCODE_TID(req
) = cpu_to_be32(MK_OPCODE_TID(CPL_CLOSE_CON_REQ
, tid
));
314 cxgbi_sock_skb_entail(csk
, skb
);
315 if (csk
->state
>= CTP_ESTABLISHED
)
316 push_tx_frames(csk
, 1);
319 static void abort_arp_failure(void *handle
, struct sk_buff
*skb
)
321 struct cxgbi_sock
*csk
= (struct cxgbi_sock
*)handle
;
322 struct cpl_abort_req
*req
;
324 log_debug(1 << CXGBI_DBG_TOE
| 1 << CXGBI_DBG_SOCK
,
325 "csk 0x%p,%u,0x%lx, tid %u, abort.\n",
326 csk
, csk
->state
, csk
->flags
, csk
->tid
);
327 req
= (struct cpl_abort_req
*)skb
->data
;
328 req
->cmd
= CPL_ABORT_NO_RST
;
329 cxgb4_ofld_send(csk
->cdev
->ports
[csk
->port_id
], skb
);
332 static void send_abort_req(struct cxgbi_sock
*csk
)
334 struct cpl_abort_req
*req
;
335 struct sk_buff
*skb
= csk
->cpl_abort_req
;
337 if (unlikely(csk
->state
== CTP_ABORTING
) || !skb
|| !csk
->cdev
)
339 cxgbi_sock_set_state(csk
, CTP_ABORTING
);
340 cxgbi_sock_set_flag(csk
, CTPF_ABORT_RPL_PENDING
);
341 cxgbi_sock_purge_write_queue(csk
);
343 csk
->cpl_abort_req
= NULL
;
344 req
= (struct cpl_abort_req
*)skb
->head
;
345 set_queue(skb
, CPL_PRIORITY_DATA
, csk
);
346 req
->cmd
= CPL_ABORT_SEND_RST
;
347 t4_set_arp_err_handler(skb
, csk
, abort_arp_failure
);
348 INIT_TP_WR(req
, csk
->tid
);
349 OPCODE_TID(req
) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_REQ
, csk
->tid
));
350 req
->rsvd0
= htonl(csk
->snd_nxt
);
351 req
->rsvd1
= !cxgbi_sock_flag(csk
, CTPF_TX_DATA_SENT
);
353 log_debug(1 << CXGBI_DBG_TOE
| 1 << CXGBI_DBG_SOCK
,
354 "csk 0x%p,%u,0x%lx,%u, snd_nxt %u, 0x%x.\n",
355 csk
, csk
->state
, csk
->flags
, csk
->tid
, csk
->snd_nxt
,
358 cxgb4_l2t_send(csk
->cdev
->ports
[csk
->port_id
], skb
, csk
->l2t
);
361 static void send_abort_rpl(struct cxgbi_sock
*csk
, int rst_status
)
363 struct sk_buff
*skb
= csk
->cpl_abort_rpl
;
364 struct cpl_abort_rpl
*rpl
= (struct cpl_abort_rpl
*)skb
->head
;
366 log_debug(1 << CXGBI_DBG_TOE
| 1 << CXGBI_DBG_SOCK
,
367 "csk 0x%p,%u,0x%lx,%u, status %d.\n",
368 csk
, csk
->state
, csk
->flags
, csk
->tid
, rst_status
);
370 csk
->cpl_abort_rpl
= NULL
;
371 set_queue(skb
, CPL_PRIORITY_DATA
, csk
);
372 INIT_TP_WR(rpl
, csk
->tid
);
373 OPCODE_TID(rpl
) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_RPL
, csk
->tid
));
374 rpl
->cmd
= rst_status
;
375 cxgb4_ofld_send(csk
->cdev
->ports
[csk
->port_id
], skb
);
379 * CPL connection rx data ack: host ->
380 * Send RX credits through an RX_DATA_ACK CPL message. Returns the number of
383 static u32
send_rx_credits(struct cxgbi_sock
*csk
, u32 credits
)
386 struct cpl_rx_data_ack
*req
;
388 log_debug(1 << CXGBI_DBG_TOE
| 1 << CXGBI_DBG_PDU_RX
,
389 "csk 0x%p,%u,0x%lx,%u, credit %u.\n",
390 csk
, csk
->state
, csk
->flags
, csk
->tid
, credits
);
392 skb
= alloc_wr(sizeof(*req
), 0, GFP_ATOMIC
);
394 pr_info("csk 0x%p, credit %u, OOM.\n", csk
, credits
);
397 req
= (struct cpl_rx_data_ack
*)skb
->head
;
399 set_wr_txq(skb
, CPL_PRIORITY_ACK
, csk
->port_id
);
400 INIT_TP_WR(req
, csk
->tid
);
401 OPCODE_TID(req
) = cpu_to_be32(MK_OPCODE_TID(CPL_RX_DATA_ACK
,
403 req
->credit_dack
= cpu_to_be32(RX_CREDITS(credits
) | RX_FORCE_ACK(1));
404 cxgb4_ofld_send(csk
->cdev
->ports
[csk
->port_id
], skb
);
409 * sgl_len - calculates the size of an SGL of the given capacity
410 * @n: the number of SGL entries
411 * Calculates the number of flits needed for a scatter/gather list that
412 * can hold the given number of entries.
414 static inline unsigned int sgl_len(unsigned int n
)
417 return (3 * n
) / 2 + (n
& 1) + 2;
421 * calc_tx_flits_ofld - calculate # of flits for an offload packet
424 * Returns the number of flits needed for the given offload packet.
425 * These packets are already fully constructed and no additional headers
428 static inline unsigned int calc_tx_flits_ofld(const struct sk_buff
*skb
)
430 unsigned int flits
, cnt
;
432 if (is_ofld_imm(skb
))
433 return DIV_ROUND_UP(skb
->len
, 8);
434 flits
= skb_transport_offset(skb
) / 8;
435 cnt
= skb_shinfo(skb
)->nr_frags
;
436 if (skb_tail_pointer(skb
) != skb_transport_header(skb
))
438 return flits
+ sgl_len(cnt
);
441 static inline void send_tx_flowc_wr(struct cxgbi_sock
*csk
)
444 struct fw_flowc_wr
*flowc
;
448 skb
= alloc_wr(flowclen
, 0, GFP_ATOMIC
);
449 flowc
= (struct fw_flowc_wr
*)skb
->head
;
450 flowc
->op_to_nparams
=
451 htonl(FW_WR_OP(FW_FLOWC_WR
) | FW_FLOWC_WR_NPARAMS(8));
452 flowc
->flowid_len16
=
453 htonl(FW_WR_LEN16(DIV_ROUND_UP(72, 16)) |
454 FW_WR_FLOWID(csk
->tid
));
455 flowc
->mnemval
[0].mnemonic
= FW_FLOWC_MNEM_PFNVFN
;
456 flowc
->mnemval
[0].val
= htonl(csk
->cdev
->pfvf
);
457 flowc
->mnemval
[1].mnemonic
= FW_FLOWC_MNEM_CH
;
458 flowc
->mnemval
[1].val
= htonl(csk
->tx_chan
);
459 flowc
->mnemval
[2].mnemonic
= FW_FLOWC_MNEM_PORT
;
460 flowc
->mnemval
[2].val
= htonl(csk
->tx_chan
);
461 flowc
->mnemval
[3].mnemonic
= FW_FLOWC_MNEM_IQID
;
462 flowc
->mnemval
[3].val
= htonl(csk
->rss_qid
);
463 flowc
->mnemval
[4].mnemonic
= FW_FLOWC_MNEM_SNDNXT
;
464 flowc
->mnemval
[4].val
= htonl(csk
->snd_nxt
);
465 flowc
->mnemval
[5].mnemonic
= FW_FLOWC_MNEM_RCVNXT
;
466 flowc
->mnemval
[5].val
= htonl(csk
->rcv_nxt
);
467 flowc
->mnemval
[6].mnemonic
= FW_FLOWC_MNEM_SNDBUF
;
468 flowc
->mnemval
[6].val
= htonl(cxgb4i_snd_win
);
469 flowc
->mnemval
[7].mnemonic
= FW_FLOWC_MNEM_MSS
;
470 flowc
->mnemval
[7].val
= htonl(csk
->advmss
);
471 flowc
->mnemval
[8].mnemonic
= 0;
472 flowc
->mnemval
[8].val
= 0;
473 for (i
= 0; i
< 9; i
++) {
474 flowc
->mnemval
[i
].r4
[0] = 0;
475 flowc
->mnemval
[i
].r4
[1] = 0;
476 flowc
->mnemval
[i
].r4
[2] = 0;
478 set_queue(skb
, CPL_PRIORITY_DATA
, csk
);
480 log_debug(1 << CXGBI_DBG_TOE
| 1 << CXGBI_DBG_SOCK
,
481 "csk 0x%p, tid 0x%x, %u,%u,%u,%u,%u,%u,%u.\n",
482 csk
, csk
->tid
, 0, csk
->tx_chan
, csk
->rss_qid
,
483 csk
->snd_nxt
, csk
->rcv_nxt
, cxgb4i_snd_win
,
486 cxgb4_ofld_send(csk
->cdev
->ports
[csk
->port_id
], skb
);
489 static inline void make_tx_data_wr(struct cxgbi_sock
*csk
, struct sk_buff
*skb
,
490 int dlen
, int len
, u32 credits
, int compl)
492 struct fw_ofld_tx_data_wr
*req
;
493 unsigned int submode
= cxgbi_skcb_ulp_mode(skb
) & 3;
494 unsigned int wr_ulp_mode
= 0;
496 req
= (struct fw_ofld_tx_data_wr
*)__skb_push(skb
, sizeof(*req
));
498 if (is_ofld_imm(skb
)) {
499 req
->op_to_immdlen
= htonl(FW_WR_OP(FW_OFLD_TX_DATA_WR
) |
501 FW_WR_IMMDLEN(dlen
));
502 req
->flowid_len16
= htonl(FW_WR_FLOWID(csk
->tid
) |
503 FW_WR_LEN16(credits
));
506 cpu_to_be32(FW_WR_OP(FW_OFLD_TX_DATA_WR
) |
510 cpu_to_be32(FW_WR_FLOWID(csk
->tid
) |
511 FW_WR_LEN16(credits
));
514 wr_ulp_mode
= FW_OFLD_TX_DATA_WR_ULPMODE(ULP2_MODE_ISCSI
) |
515 FW_OFLD_TX_DATA_WR_ULPSUBMODE(submode
);
516 req
->tunnel_to_proxy
= htonl(wr_ulp_mode
|
517 FW_OFLD_TX_DATA_WR_SHOVE(skb_peek(&csk
->write_queue
) ? 0 : 1));
518 req
->plen
= htonl(len
);
519 if (!cxgbi_sock_flag(csk
, CTPF_TX_DATA_SENT
))
520 cxgbi_sock_set_flag(csk
, CTPF_TX_DATA_SENT
);
523 static void arp_failure_skb_discard(void *handle
, struct sk_buff
*skb
)
528 static int push_tx_frames(struct cxgbi_sock
*csk
, int req_completion
)
533 if (unlikely(csk
->state
< CTP_ESTABLISHED
||
534 csk
->state
== CTP_CLOSE_WAIT_1
|| csk
->state
>= CTP_ABORTING
)) {
535 log_debug(1 << CXGBI_DBG_TOE
| 1 << CXGBI_DBG_SOCK
|
536 1 << CXGBI_DBG_PDU_TX
,
537 "csk 0x%p,%u,0x%lx,%u, in closing state.\n",
538 csk
, csk
->state
, csk
->flags
, csk
->tid
);
542 while (csk
->wr_cred
&& (skb
= skb_peek(&csk
->write_queue
)) != NULL
) {
545 unsigned int credits_needed
;
547 skb_reset_transport_header(skb
);
548 if (is_ofld_imm(skb
))
549 credits_needed
= DIV_ROUND_UP(dlen
+
550 sizeof(struct fw_ofld_tx_data_wr
), 16);
552 credits_needed
= DIV_ROUND_UP(8*calc_tx_flits_ofld(skb
)
553 + sizeof(struct fw_ofld_tx_data_wr
),
556 if (csk
->wr_cred
< credits_needed
) {
557 log_debug(1 << CXGBI_DBG_PDU_TX
,
558 "csk 0x%p, skb %u/%u, wr %d < %u.\n",
559 csk
, skb
->len
, skb
->data_len
,
560 credits_needed
, csk
->wr_cred
);
563 __skb_unlink(skb
, &csk
->write_queue
);
564 set_queue(skb
, CPL_PRIORITY_DATA
, csk
);
565 skb
->csum
= credits_needed
;
566 csk
->wr_cred
-= credits_needed
;
567 csk
->wr_una_cred
+= credits_needed
;
568 cxgbi_sock_enqueue_wr(csk
, skb
);
570 log_debug(1 << CXGBI_DBG_PDU_TX
,
571 "csk 0x%p, skb %u/%u, wr %d, left %u, unack %u.\n",
572 csk
, skb
->len
, skb
->data_len
, credits_needed
,
573 csk
->wr_cred
, csk
->wr_una_cred
);
575 if (likely(cxgbi_skcb_test_flag(skb
, SKCBF_TX_NEED_HDR
))) {
576 if (!cxgbi_sock_flag(csk
, CTPF_TX_DATA_SENT
)) {
577 send_tx_flowc_wr(csk
);
580 csk
->wr_una_cred
+= 5;
582 len
+= cxgbi_ulp_extra_len(cxgbi_skcb_ulp_mode(skb
));
583 make_tx_data_wr(csk
, skb
, dlen
, len
, credits_needed
,
586 cxgbi_skcb_clear_flag(skb
, SKCBF_TX_NEED_HDR
);
588 total_size
+= skb
->truesize
;
589 t4_set_arp_err_handler(skb
, csk
, arp_failure_skb_discard
);
591 log_debug(1 << CXGBI_DBG_TOE
| 1 << CXGBI_DBG_PDU_TX
,
592 "csk 0x%p,%u,0x%lx,%u, skb 0x%p, %u.\n",
593 csk
, csk
->state
, csk
->flags
, csk
->tid
, skb
, len
);
595 cxgb4_l2t_send(csk
->cdev
->ports
[csk
->port_id
], skb
, csk
->l2t
);
600 static inline void free_atid(struct cxgbi_sock
*csk
)
602 struct cxgb4_lld_info
*lldi
= cxgbi_cdev_priv(csk
->cdev
);
604 if (cxgbi_sock_flag(csk
, CTPF_HAS_ATID
)) {
605 cxgb4_free_atid(lldi
->tids
, csk
->atid
);
606 cxgbi_sock_clear_flag(csk
, CTPF_HAS_ATID
);
611 static void do_act_establish(struct cxgbi_device
*cdev
, struct sk_buff
*skb
)
613 struct cxgbi_sock
*csk
;
614 struct cpl_act_establish
*req
= (struct cpl_act_establish
*)skb
->data
;
615 unsigned short tcp_opt
= ntohs(req
->tcp_opt
);
616 unsigned int tid
= GET_TID(req
);
617 unsigned int atid
= GET_TID_TID(ntohl(req
->tos_atid
));
618 struct cxgb4_lld_info
*lldi
= cxgbi_cdev_priv(cdev
);
619 struct tid_info
*t
= lldi
->tids
;
620 u32 rcv_isn
= be32_to_cpu(req
->rcv_isn
);
622 csk
= lookup_atid(t
, atid
);
623 if (unlikely(!csk
)) {
624 pr_err("NO conn. for atid %u, cdev 0x%p.\n", atid
, cdev
);
628 if (csk
->atid
!= atid
) {
629 pr_err("bad conn atid %u, csk 0x%p,%u,0x%lx,tid %u, atid %u.\n",
630 atid
, csk
, csk
->state
, csk
->flags
, csk
->tid
, csk
->atid
);
634 log_debug(1 << CXGBI_DBG_TOE
| 1 << CXGBI_DBG_SOCK
,
635 "csk 0x%p,%u,0x%lx, tid %u, atid %u, rseq %u.\n",
636 csk
, csk
->state
, csk
->flags
, tid
, atid
, rcv_isn
);
640 cxgb4_insert_tid(lldi
->tids
, csk
, tid
);
641 cxgbi_sock_set_flag(csk
, CTPF_HAS_TID
);
645 spin_lock_bh(&csk
->lock
);
646 if (unlikely(csk
->state
!= CTP_ACTIVE_OPEN
))
647 pr_info("csk 0x%p,%u,0x%lx,%u, got EST.\n",
648 csk
, csk
->state
, csk
->flags
, csk
->tid
);
650 if (csk
->retry_timer
.function
) {
651 del_timer(&csk
->retry_timer
);
652 csk
->retry_timer
.function
= NULL
;
655 csk
->copied_seq
= csk
->rcv_wup
= csk
->rcv_nxt
= rcv_isn
;
657 * Causes the first RX_DATA_ACK to supply any Rx credits we couldn't
660 if (cxgb4i_rcv_win
> (RCV_BUFSIZ_MASK
<< 10))
661 csk
->rcv_wup
-= cxgb4i_rcv_win
- (RCV_BUFSIZ_MASK
<< 10);
663 csk
->advmss
= lldi
->mtus
[GET_TCPOPT_MSS(tcp_opt
)] - 40;
664 if (GET_TCPOPT_TSTAMP(tcp_opt
))
666 if (csk
->advmss
< 128)
669 log_debug(1 << CXGBI_DBG_TOE
| 1 << CXGBI_DBG_SOCK
,
670 "csk 0x%p, mss_idx %u, advmss %u.\n",
671 csk
, GET_TCPOPT_MSS(tcp_opt
), csk
->advmss
);
673 cxgbi_sock_established(csk
, ntohl(req
->snd_isn
), ntohs(req
->tcp_opt
));
675 if (unlikely(cxgbi_sock_flag(csk
, CTPF_ACTIVE_CLOSE_NEEDED
)))
678 if (skb_queue_len(&csk
->write_queue
))
679 push_tx_frames(csk
, 0);
680 cxgbi_conn_tx_open(csk
);
682 spin_unlock_bh(&csk
->lock
);
688 static int act_open_rpl_status_to_errno(int status
)
691 case CPL_ERR_CONN_RESET
:
692 return -ECONNREFUSED
;
693 case CPL_ERR_ARP_MISS
:
694 return -EHOSTUNREACH
;
695 case CPL_ERR_CONN_TIMEDOUT
:
697 case CPL_ERR_TCAM_FULL
:
699 case CPL_ERR_CONN_EXIST
:
706 static void csk_act_open_retry_timer(unsigned long data
)
709 struct cxgbi_sock
*csk
= (struct cxgbi_sock
*)data
;
710 struct cxgb4_lld_info
*lldi
= cxgbi_cdev_priv(csk
->cdev
);
712 log_debug(1 << CXGBI_DBG_TOE
| 1 << CXGBI_DBG_SOCK
,
713 "csk 0x%p,%u,0x%lx,%u.\n",
714 csk
, csk
->state
, csk
->flags
, csk
->tid
);
717 spin_lock_bh(&csk
->lock
);
718 skb
= alloc_wr(is_t4(lldi
->adapter_type
) ?
719 sizeof(struct cpl_act_open_req
) :
720 sizeof(struct cpl_t5_act_open_req
),
723 cxgbi_sock_fail_act_open(csk
, -ENOMEM
);
725 skb
->sk
= (struct sock
*)csk
;
726 t4_set_arp_err_handler(skb
, csk
,
727 cxgbi_sock_act_open_req_arp_failure
);
728 send_act_open_req(csk
, skb
, csk
->l2t
);
730 spin_unlock_bh(&csk
->lock
);
734 static void do_act_open_rpl(struct cxgbi_device
*cdev
, struct sk_buff
*skb
)
736 struct cxgbi_sock
*csk
;
737 struct cpl_act_open_rpl
*rpl
= (struct cpl_act_open_rpl
*)skb
->data
;
738 unsigned int tid
= GET_TID(rpl
);
740 GET_TID_TID(GET_AOPEN_ATID(be32_to_cpu(rpl
->atid_status
)));
741 unsigned int status
= GET_AOPEN_STATUS(be32_to_cpu(rpl
->atid_status
));
742 struct cxgb4_lld_info
*lldi
= cxgbi_cdev_priv(cdev
);
743 struct tid_info
*t
= lldi
->tids
;
745 csk
= lookup_atid(t
, atid
);
746 if (unlikely(!csk
)) {
747 pr_err("NO matching conn. atid %u, tid %u.\n", atid
, tid
);
751 pr_info("%pI4:%u-%pI4:%u, atid %u,%u, status %u, csk 0x%p,%u,0x%lx.\n",
752 &csk
->saddr
.sin_addr
.s_addr
, ntohs(csk
->saddr
.sin_port
),
753 &csk
->daddr
.sin_addr
.s_addr
, ntohs(csk
->daddr
.sin_port
),
754 atid
, tid
, status
, csk
, csk
->state
, csk
->flags
);
756 if (status
== CPL_ERR_RTX_NEG_ADVICE
)
759 if (status
&& status
!= CPL_ERR_TCAM_FULL
&&
760 status
!= CPL_ERR_CONN_EXIST
&&
761 status
!= CPL_ERR_ARP_MISS
)
762 cxgb4_remove_tid(lldi
->tids
, csk
->port_id
, GET_TID(rpl
));
765 spin_lock_bh(&csk
->lock
);
767 if (status
== CPL_ERR_CONN_EXIST
&&
768 csk
->retry_timer
.function
!= csk_act_open_retry_timer
) {
769 csk
->retry_timer
.function
= csk_act_open_retry_timer
;
770 mod_timer(&csk
->retry_timer
, jiffies
+ HZ
/ 2);
772 cxgbi_sock_fail_act_open(csk
,
773 act_open_rpl_status_to_errno(status
));
775 spin_unlock_bh(&csk
->lock
);
781 static void do_peer_close(struct cxgbi_device
*cdev
, struct sk_buff
*skb
)
783 struct cxgbi_sock
*csk
;
784 struct cpl_peer_close
*req
= (struct cpl_peer_close
*)skb
->data
;
785 unsigned int tid
= GET_TID(req
);
786 struct cxgb4_lld_info
*lldi
= cxgbi_cdev_priv(cdev
);
787 struct tid_info
*t
= lldi
->tids
;
789 csk
= lookup_tid(t
, tid
);
790 if (unlikely(!csk
)) {
791 pr_err("can't find connection for tid %u.\n", tid
);
794 log_debug(1 << CXGBI_DBG_TOE
| 1 << CXGBI_DBG_SOCK
,
795 "csk 0x%p,%u,0x%lx,%u.\n",
796 csk
, csk
->state
, csk
->flags
, csk
->tid
);
797 cxgbi_sock_rcv_peer_close(csk
);
802 static void do_close_con_rpl(struct cxgbi_device
*cdev
, struct sk_buff
*skb
)
804 struct cxgbi_sock
*csk
;
805 struct cpl_close_con_rpl
*rpl
= (struct cpl_close_con_rpl
*)skb
->data
;
806 unsigned int tid
= GET_TID(rpl
);
807 struct cxgb4_lld_info
*lldi
= cxgbi_cdev_priv(cdev
);
808 struct tid_info
*t
= lldi
->tids
;
810 csk
= lookup_tid(t
, tid
);
811 if (unlikely(!csk
)) {
812 pr_err("can't find connection for tid %u.\n", tid
);
815 log_debug(1 << CXGBI_DBG_TOE
| 1 << CXGBI_DBG_SOCK
,
816 "csk 0x%p,%u,0x%lx,%u.\n",
817 csk
, csk
->state
, csk
->flags
, csk
->tid
);
818 cxgbi_sock_rcv_close_conn_rpl(csk
, ntohl(rpl
->snd_nxt
));
823 static int abort_status_to_errno(struct cxgbi_sock
*csk
, int abort_reason
,
826 switch (abort_reason
) {
827 case CPL_ERR_BAD_SYN
: /* fall through */
828 case CPL_ERR_CONN_RESET
:
829 return csk
->state
> CTP_ESTABLISHED
?
830 -EPIPE
: -ECONNRESET
;
831 case CPL_ERR_XMIT_TIMEDOUT
:
832 case CPL_ERR_PERSIST_TIMEDOUT
:
833 case CPL_ERR_FINWAIT2_TIMEDOUT
:
834 case CPL_ERR_KEEPALIVE_TIMEDOUT
:
841 static void do_abort_req_rss(struct cxgbi_device
*cdev
, struct sk_buff
*skb
)
843 struct cxgbi_sock
*csk
;
844 struct cpl_abort_req_rss
*req
= (struct cpl_abort_req_rss
*)skb
->data
;
845 unsigned int tid
= GET_TID(req
);
846 struct cxgb4_lld_info
*lldi
= cxgbi_cdev_priv(cdev
);
847 struct tid_info
*t
= lldi
->tids
;
848 int rst_status
= CPL_ABORT_NO_RST
;
850 csk
= lookup_tid(t
, tid
);
851 if (unlikely(!csk
)) {
852 pr_err("can't find connection for tid %u.\n", tid
);
856 log_debug(1 << CXGBI_DBG_TOE
| 1 << CXGBI_DBG_SOCK
,
857 "csk 0x%p,%u,0x%lx, tid %u, status 0x%x.\n",
858 csk
, csk
->state
, csk
->flags
, csk
->tid
, req
->status
);
860 if (req
->status
== CPL_ERR_RTX_NEG_ADVICE
||
861 req
->status
== CPL_ERR_PERSIST_NEG_ADVICE
)
865 spin_lock_bh(&csk
->lock
);
867 if (!cxgbi_sock_flag(csk
, CTPF_ABORT_REQ_RCVD
)) {
868 cxgbi_sock_set_flag(csk
, CTPF_ABORT_REQ_RCVD
);
869 cxgbi_sock_set_state(csk
, CTP_ABORTING
);
873 cxgbi_sock_clear_flag(csk
, CTPF_ABORT_REQ_RCVD
);
874 send_abort_rpl(csk
, rst_status
);
876 if (!cxgbi_sock_flag(csk
, CTPF_ABORT_RPL_PENDING
)) {
877 csk
->err
= abort_status_to_errno(csk
, req
->status
, &rst_status
);
878 cxgbi_sock_closed(csk
);
881 spin_unlock_bh(&csk
->lock
);
887 static void do_abort_rpl_rss(struct cxgbi_device
*cdev
, struct sk_buff
*skb
)
889 struct cxgbi_sock
*csk
;
890 struct cpl_abort_rpl_rss
*rpl
= (struct cpl_abort_rpl_rss
*)skb
->data
;
891 unsigned int tid
= GET_TID(rpl
);
892 struct cxgb4_lld_info
*lldi
= cxgbi_cdev_priv(cdev
);
893 struct tid_info
*t
= lldi
->tids
;
895 csk
= lookup_tid(t
, tid
);
899 log_debug(1 << CXGBI_DBG_TOE
| 1 << CXGBI_DBG_SOCK
,
900 "status 0x%x, csk 0x%p, s %u, 0x%lx.\n",
901 rpl
->status
, csk
, csk
? csk
->state
: 0,
902 csk
? csk
->flags
: 0UL);
904 if (rpl
->status
== CPL_ERR_ABORT_FAILED
)
907 cxgbi_sock_rcv_abort_rpl(csk
);
912 static void do_rx_iscsi_hdr(struct cxgbi_device
*cdev
, struct sk_buff
*skb
)
914 struct cxgbi_sock
*csk
;
915 struct cpl_iscsi_hdr
*cpl
= (struct cpl_iscsi_hdr
*)skb
->data
;
916 unsigned short pdu_len_ddp
= be16_to_cpu(cpl
->pdu_len_ddp
);
917 unsigned int tid
= GET_TID(cpl
);
918 struct cxgb4_lld_info
*lldi
= cxgbi_cdev_priv(cdev
);
919 struct tid_info
*t
= lldi
->tids
;
921 csk
= lookup_tid(t
, tid
);
922 if (unlikely(!csk
)) {
923 pr_err("can't find conn. for tid %u.\n", tid
);
927 log_debug(1 << CXGBI_DBG_TOE
| 1 << CXGBI_DBG_PDU_RX
,
928 "csk 0x%p,%u,0x%lx, tid %u, skb 0x%p,%u, 0x%x.\n",
929 csk
, csk
->state
, csk
->flags
, csk
->tid
, skb
, skb
->len
,
932 spin_lock_bh(&csk
->lock
);
934 if (unlikely(csk
->state
>= CTP_PASSIVE_CLOSE
)) {
935 log_debug(1 << CXGBI_DBG_TOE
| 1 << CXGBI_DBG_SOCK
,
936 "csk 0x%p,%u,0x%lx,%u, bad state.\n",
937 csk
, csk
->state
, csk
->flags
, csk
->tid
);
938 if (csk
->state
!= CTP_ABORTING
)
944 cxgbi_skcb_tcp_seq(skb
) = ntohl(cpl
->seq
);
945 cxgbi_skcb_flags(skb
) = 0;
947 skb_reset_transport_header(skb
);
948 __skb_pull(skb
, sizeof(*cpl
));
949 __pskb_trim(skb
, ntohs(cpl
->len
));
951 if (!csk
->skb_ulp_lhdr
) {
953 unsigned int hlen
, dlen
, plen
;
955 log_debug(1 << CXGBI_DBG_TOE
| 1 << CXGBI_DBG_PDU_RX
,
956 "csk 0x%p,%u,0x%lx, tid %u, skb 0x%p header.\n",
957 csk
, csk
->state
, csk
->flags
, csk
->tid
, skb
);
958 csk
->skb_ulp_lhdr
= skb
;
959 cxgbi_skcb_set_flag(skb
, SKCBF_RX_HDR
);
961 if (cxgbi_skcb_tcp_seq(skb
) != csk
->rcv_nxt
) {
962 pr_info("tid %u, CPL_ISCSI_HDR, bad seq, 0x%x/0x%x.\n",
963 csk
->tid
, cxgbi_skcb_tcp_seq(skb
),
969 hlen
= ntohs(cpl
->len
);
970 dlen
= ntohl(*(unsigned int *)(bhs
+ 4)) & 0xFFFFFF;
972 plen
= ISCSI_PDU_LEN(pdu_len_ddp
);
973 if (is_t4(lldi
->adapter_type
))
976 if ((hlen
+ dlen
) != plen
) {
977 pr_info("tid 0x%x, CPL_ISCSI_HDR, pdu len "
978 "mismatch %u != %u + %u, seq 0x%x.\n",
979 csk
->tid
, plen
, hlen
, dlen
,
980 cxgbi_skcb_tcp_seq(skb
));
984 cxgbi_skcb_rx_pdulen(skb
) = (hlen
+ dlen
+ 3) & (~0x3);
986 cxgbi_skcb_rx_pdulen(skb
) += csk
->dcrc_len
;
987 csk
->rcv_nxt
+= cxgbi_skcb_rx_pdulen(skb
);
989 log_debug(1 << CXGBI_DBG_TOE
| 1 << CXGBI_DBG_PDU_RX
,
990 "csk 0x%p, skb 0x%p, 0x%x,%u+%u,0x%x,0x%x.\n",
991 csk
, skb
, *bhs
, hlen
, dlen
,
992 ntohl(*((unsigned int *)(bhs
+ 16))),
993 ntohl(*((unsigned int *)(bhs
+ 24))));
996 struct sk_buff
*lskb
= csk
->skb_ulp_lhdr
;
998 cxgbi_skcb_set_flag(lskb
, SKCBF_RX_DATA
);
999 log_debug(1 << CXGBI_DBG_TOE
| 1 << CXGBI_DBG_PDU_RX
,
1000 "csk 0x%p,%u,0x%lx, skb 0x%p data, 0x%p.\n",
1001 csk
, csk
->state
, csk
->flags
, skb
, lskb
);
1004 __skb_queue_tail(&csk
->receive_queue
, skb
);
1005 spin_unlock_bh(&csk
->lock
);
1009 send_abort_req(csk
);
1011 spin_unlock_bh(&csk
->lock
);
1016 static void do_rx_data_ddp(struct cxgbi_device
*cdev
,
1017 struct sk_buff
*skb
)
1019 struct cxgbi_sock
*csk
;
1020 struct sk_buff
*lskb
;
1021 struct cpl_rx_data_ddp
*rpl
= (struct cpl_rx_data_ddp
*)skb
->data
;
1022 unsigned int tid
= GET_TID(rpl
);
1023 struct cxgb4_lld_info
*lldi
= cxgbi_cdev_priv(cdev
);
1024 struct tid_info
*t
= lldi
->tids
;
1025 unsigned int status
= ntohl(rpl
->ddpvld
);
1027 csk
= lookup_tid(t
, tid
);
1028 if (unlikely(!csk
)) {
1029 pr_err("can't find connection for tid %u.\n", tid
);
1033 log_debug(1 << CXGBI_DBG_TOE
| 1 << CXGBI_DBG_PDU_RX
,
1034 "csk 0x%p,%u,0x%lx, skb 0x%p,0x%x, lhdr 0x%p.\n",
1035 csk
, csk
->state
, csk
->flags
, skb
, status
, csk
->skb_ulp_lhdr
);
1037 spin_lock_bh(&csk
->lock
);
1039 if (unlikely(csk
->state
>= CTP_PASSIVE_CLOSE
)) {
1040 log_debug(1 << CXGBI_DBG_TOE
| 1 << CXGBI_DBG_SOCK
,
1041 "csk 0x%p,%u,0x%lx,%u, bad state.\n",
1042 csk
, csk
->state
, csk
->flags
, csk
->tid
);
1043 if (csk
->state
!= CTP_ABORTING
)
1049 if (!csk
->skb_ulp_lhdr
) {
1050 pr_err("tid 0x%x, rcv RX_DATA_DDP w/o pdu bhs.\n", csk
->tid
);
1054 lskb
= csk
->skb_ulp_lhdr
;
1055 csk
->skb_ulp_lhdr
= NULL
;
1057 cxgbi_skcb_rx_ddigest(lskb
) = ntohl(rpl
->ulp_crc
);
1059 if (ntohs(rpl
->len
) != cxgbi_skcb_rx_pdulen(lskb
))
1060 pr_info("tid 0x%x, RX_DATA_DDP pdulen %u != %u.\n",
1061 csk
->tid
, ntohs(rpl
->len
), cxgbi_skcb_rx_pdulen(lskb
));
1063 if (status
& (1 << CPL_RX_DDP_STATUS_HCRC_SHIFT
)) {
1064 pr_info("csk 0x%p, lhdr 0x%p, status 0x%x, hcrc bad 0x%lx.\n",
1065 csk
, lskb
, status
, cxgbi_skcb_flags(lskb
));
1066 cxgbi_skcb_set_flag(lskb
, SKCBF_RX_HCRC_ERR
);
1068 if (status
& (1 << CPL_RX_DDP_STATUS_DCRC_SHIFT
)) {
1069 pr_info("csk 0x%p, lhdr 0x%p, status 0x%x, dcrc bad 0x%lx.\n",
1070 csk
, lskb
, status
, cxgbi_skcb_flags(lskb
));
1071 cxgbi_skcb_set_flag(lskb
, SKCBF_RX_DCRC_ERR
);
1073 if (status
& (1 << CPL_RX_DDP_STATUS_PAD_SHIFT
)) {
1074 log_debug(1 << CXGBI_DBG_PDU_RX
,
1075 "csk 0x%p, lhdr 0x%p, status 0x%x, pad bad.\n",
1077 cxgbi_skcb_set_flag(lskb
, SKCBF_RX_PAD_ERR
);
1079 if ((status
& (1 << CPL_RX_DDP_STATUS_DDP_SHIFT
)) &&
1080 !cxgbi_skcb_test_flag(lskb
, SKCBF_RX_DATA
)) {
1081 log_debug(1 << CXGBI_DBG_PDU_RX
,
1082 "csk 0x%p, lhdr 0x%p, 0x%x, data ddp'ed.\n",
1084 cxgbi_skcb_set_flag(lskb
, SKCBF_RX_DATA_DDPD
);
1086 log_debug(1 << CXGBI_DBG_PDU_RX
,
1087 "csk 0x%p, lskb 0x%p, f 0x%lx.\n",
1088 csk
, lskb
, cxgbi_skcb_flags(lskb
));
1090 cxgbi_skcb_set_flag(lskb
, SKCBF_RX_STATUS
);
1091 cxgbi_conn_pdu_ready(csk
);
1092 spin_unlock_bh(&csk
->lock
);
1096 send_abort_req(csk
);
1098 spin_unlock_bh(&csk
->lock
);
1103 static void do_fw4_ack(struct cxgbi_device
*cdev
, struct sk_buff
*skb
)
1105 struct cxgbi_sock
*csk
;
1106 struct cpl_fw4_ack
*rpl
= (struct cpl_fw4_ack
*)skb
->data
;
1107 unsigned int tid
= GET_TID(rpl
);
1108 struct cxgb4_lld_info
*lldi
= cxgbi_cdev_priv(cdev
);
1109 struct tid_info
*t
= lldi
->tids
;
1111 csk
= lookup_tid(t
, tid
);
1113 pr_err("can't find connection for tid %u.\n", tid
);
1115 log_debug(1 << CXGBI_DBG_TOE
| 1 << CXGBI_DBG_SOCK
,
1116 "csk 0x%p,%u,0x%lx,%u.\n",
1117 csk
, csk
->state
, csk
->flags
, csk
->tid
);
1118 cxgbi_sock_rcv_wr_ack(csk
, rpl
->credits
, ntohl(rpl
->snd_una
),
1124 static void do_set_tcb_rpl(struct cxgbi_device
*cdev
, struct sk_buff
*skb
)
1126 struct cpl_set_tcb_rpl
*rpl
= (struct cpl_set_tcb_rpl
*)skb
->data
;
1127 unsigned int tid
= GET_TID(rpl
);
1128 struct cxgb4_lld_info
*lldi
= cxgbi_cdev_priv(cdev
);
1129 struct tid_info
*t
= lldi
->tids
;
1130 struct cxgbi_sock
*csk
;
1132 csk
= lookup_tid(t
, tid
);
1134 pr_err("can't find conn. for tid %u.\n", tid
);
1136 log_debug(1 << CXGBI_DBG_TOE
| 1 << CXGBI_DBG_SOCK
,
1137 "csk 0x%p,%u,%lx,%u, status 0x%x.\n",
1138 csk
, csk
->state
, csk
->flags
, csk
->tid
, rpl
->status
);
1140 if (rpl
->status
!= CPL_ERR_NONE
)
1141 pr_err("csk 0x%p,%u, SET_TCB_RPL status %u.\n",
1142 csk
, tid
, rpl
->status
);
1147 static int alloc_cpls(struct cxgbi_sock
*csk
)
1149 csk
->cpl_close
= alloc_wr(sizeof(struct cpl_close_con_req
),
1151 if (!csk
->cpl_close
)
1154 csk
->cpl_abort_req
= alloc_wr(sizeof(struct cpl_abort_req
),
1156 if (!csk
->cpl_abort_req
)
1159 csk
->cpl_abort_rpl
= alloc_wr(sizeof(struct cpl_abort_rpl
),
1161 if (!csk
->cpl_abort_rpl
)
1166 cxgbi_sock_free_cpl_skbs(csk
);
1170 static inline void l2t_put(struct cxgbi_sock
*csk
)
1173 cxgb4_l2t_release(csk
->l2t
);
1175 cxgbi_sock_put(csk
);
1179 static void release_offload_resources(struct cxgbi_sock
*csk
)
1181 struct cxgb4_lld_info
*lldi
;
1183 log_debug(1 << CXGBI_DBG_TOE
| 1 << CXGBI_DBG_SOCK
,
1184 "csk 0x%p,%u,0x%lx,%u.\n",
1185 csk
, csk
->state
, csk
->flags
, csk
->tid
);
1187 cxgbi_sock_free_cpl_skbs(csk
);
1188 if (csk
->wr_cred
!= csk
->wr_max_cred
) {
1189 cxgbi_sock_purge_wr_queue(csk
);
1190 cxgbi_sock_reset_wr_list(csk
);
1194 if (cxgbi_sock_flag(csk
, CTPF_HAS_ATID
))
1196 else if (cxgbi_sock_flag(csk
, CTPF_HAS_TID
)) {
1197 lldi
= cxgbi_cdev_priv(csk
->cdev
);
1198 cxgb4_remove_tid(lldi
->tids
, 0, csk
->tid
);
1199 cxgbi_sock_clear_flag(csk
, CTPF_HAS_TID
);
1200 cxgbi_sock_put(csk
);
1206 static int init_act_open(struct cxgbi_sock
*csk
)
1208 struct cxgbi_device
*cdev
= csk
->cdev
;
1209 struct cxgb4_lld_info
*lldi
= cxgbi_cdev_priv(cdev
);
1210 struct net_device
*ndev
= cdev
->ports
[csk
->port_id
];
1211 struct port_info
*pi
= netdev_priv(ndev
);
1212 struct sk_buff
*skb
= NULL
;
1213 struct neighbour
*n
;
1216 log_debug(1 << CXGBI_DBG_TOE
| 1 << CXGBI_DBG_SOCK
,
1217 "csk 0x%p,%u,0x%lx,%u.\n",
1218 csk
, csk
->state
, csk
->flags
, csk
->tid
);
1220 csk
->atid
= cxgb4_alloc_atid(lldi
->tids
, csk
);
1221 if (csk
->atid
< 0) {
1222 pr_err("%s, NO atid available.\n", ndev
->name
);
1225 cxgbi_sock_set_flag(csk
, CTPF_HAS_ATID
);
1226 cxgbi_sock_get(csk
);
1228 n
= dst_neigh_lookup(csk
->dst
, &csk
->daddr
.sin_addr
.s_addr
);
1230 pr_err("%s, can't get neighbour of csk->dst.\n", ndev
->name
);
1233 csk
->l2t
= cxgb4_l2t_get(lldi
->l2t
, n
, ndev
, 0);
1235 pr_err("%s, cannot alloc l2t.\n", ndev
->name
);
1238 cxgbi_sock_get(csk
);
1240 skb
= alloc_wr(is_t4(lldi
->adapter_type
) ?
1241 sizeof(struct cpl_act_open_req
) :
1242 sizeof(struct cpl_t5_act_open_req
),
1246 skb
->sk
= (struct sock
*)csk
;
1247 t4_set_arp_err_handler(skb
, csk
, cxgbi_sock_act_open_req_arp_failure
);
1250 csk
->mtu
= dst_mtu(csk
->dst
);
1251 cxgb4_best_mtu(lldi
->mtus
, csk
->mtu
, &csk
->mss_idx
);
1252 csk
->tx_chan
= cxgb4_port_chan(ndev
);
1253 /* SMT two entries per row */
1254 csk
->smac_idx
= ((cxgb4_port_viid(ndev
) & 0x7F)) << 1;
1255 step
= lldi
->ntxq
/ lldi
->nchan
;
1256 csk
->txq_idx
= cxgb4_port_idx(ndev
) * step
;
1257 step
= lldi
->nrxq
/ lldi
->nchan
;
1258 csk
->rss_qid
= lldi
->rxq_ids
[cxgb4_port_idx(ndev
) * step
];
1259 csk
->wr_max_cred
= csk
->wr_cred
= lldi
->wr_cred
;
1260 csk
->wr_una_cred
= 0;
1261 cxgbi_sock_reset_wr_list(csk
);
1263 log_debug(1 << CXGBI_DBG_TOE
| 1 << CXGBI_DBG_SOCK
,
1264 "csk 0x%p,p%d,%s, %u,%u,%u, mss %u,%u, smac %u.\n",
1265 csk
, pi
->port_id
, ndev
->name
, csk
->tx_chan
,
1266 csk
->txq_idx
, csk
->rss_qid
, csk
->mtu
, csk
->mss_idx
,
1269 cxgbi_sock_set_state(csk
, CTP_ACTIVE_OPEN
);
1270 send_act_open_req(csk
, skb
, csk
->l2t
);
1282 #define CPL_ISCSI_DATA 0xB2
1283 #define CPL_RX_ISCSI_DDP 0x49
1284 cxgb4i_cplhandler_func cxgb4i_cplhandlers
[NUM_CPL_CMDS
] = {
1285 [CPL_ACT_ESTABLISH
] = do_act_establish
,
1286 [CPL_ACT_OPEN_RPL
] = do_act_open_rpl
,
1287 [CPL_PEER_CLOSE
] = do_peer_close
,
1288 [CPL_ABORT_REQ_RSS
] = do_abort_req_rss
,
1289 [CPL_ABORT_RPL_RSS
] = do_abort_rpl_rss
,
1290 [CPL_CLOSE_CON_RPL
] = do_close_con_rpl
,
1291 [CPL_FW4_ACK
] = do_fw4_ack
,
1292 [CPL_ISCSI_HDR
] = do_rx_iscsi_hdr
,
1293 [CPL_ISCSI_DATA
] = do_rx_iscsi_hdr
,
1294 [CPL_SET_TCB_RPL
] = do_set_tcb_rpl
,
1295 [CPL_RX_DATA_DDP
] = do_rx_data_ddp
,
1296 [CPL_RX_ISCSI_DDP
] = do_rx_data_ddp
,
1299 int cxgb4i_ofld_init(struct cxgbi_device
*cdev
)
1303 if (cxgb4i_max_connect
> CXGB4I_MAX_CONN
)
1304 cxgb4i_max_connect
= CXGB4I_MAX_CONN
;
1306 rc
= cxgbi_device_portmap_create(cdev
, cxgb4i_sport_base
,
1307 cxgb4i_max_connect
);
1311 cdev
->csk_release_offload_resources
= release_offload_resources
;
1312 cdev
->csk_push_tx_frames
= push_tx_frames
;
1313 cdev
->csk_send_abort_req
= send_abort_req
;
1314 cdev
->csk_send_close_req
= send_close_req
;
1315 cdev
->csk_send_rx_credits
= send_rx_credits
;
1316 cdev
->csk_alloc_cpls
= alloc_cpls
;
1317 cdev
->csk_init_act_open
= init_act_open
;
1319 pr_info("cdev 0x%p, offload up, added.\n", cdev
);
1324 * functions to program the pagepod in h/w
1326 #define ULPMEM_IDATA_MAX_NPPODS 4 /* 256/PPOD_SIZE */
1327 static inline void ulp_mem_io_set_hdr(struct cxgb4_lld_info
*lldi
,
1328 struct ulp_mem_io
*req
,
1329 unsigned int wr_len
, unsigned int dlen
,
1330 unsigned int pm_addr
)
1332 struct ulptx_idata
*idata
= (struct ulptx_idata
*)(req
+ 1);
1334 INIT_ULPTX_WR(req
, wr_len
, 0, 0);
1335 if (is_t4(lldi
->adapter_type
))
1336 req
->cmd
= htonl(ULPTX_CMD(ULP_TX_MEM_WRITE
) |
1337 (ULP_MEMIO_ORDER(1)));
1339 req
->cmd
= htonl(ULPTX_CMD(ULP_TX_MEM_WRITE
) |
1340 (V_T5_ULP_MEMIO_IMM(1)));
1341 req
->dlen
= htonl(ULP_MEMIO_DATA_LEN(dlen
>> 5));
1342 req
->lock_addr
= htonl(ULP_MEMIO_ADDR(pm_addr
>> 5));
1343 req
->len16
= htonl(DIV_ROUND_UP(wr_len
- sizeof(req
->wr
), 16));
1345 idata
->cmd_more
= htonl(ULPTX_CMD(ULP_TX_SC_IMM
));
1346 idata
->len
= htonl(dlen
);
1349 static int ddp_ppod_write_idata(struct cxgbi_device
*cdev
, unsigned int port_id
,
1350 struct cxgbi_pagepod_hdr
*hdr
, unsigned int idx
,
1352 struct cxgbi_gather_list
*gl
,
1353 unsigned int gl_pidx
)
1355 struct cxgbi_ddp_info
*ddp
= cdev
->ddp
;
1356 struct cxgb4_lld_info
*lldi
= cxgbi_cdev_priv(cdev
);
1357 struct sk_buff
*skb
;
1358 struct ulp_mem_io
*req
;
1359 struct ulptx_idata
*idata
;
1360 struct cxgbi_pagepod
*ppod
;
1361 unsigned int pm_addr
= idx
* PPOD_SIZE
+ ddp
->llimit
;
1362 unsigned int dlen
= PPOD_SIZE
* npods
;
1363 unsigned int wr_len
= roundup(sizeof(struct ulp_mem_io
) +
1364 sizeof(struct ulptx_idata
) + dlen
, 16);
1367 skb
= alloc_wr(wr_len
, 0, GFP_ATOMIC
);
1369 pr_err("cdev 0x%p, idx %u, npods %u, OOM.\n",
1373 req
= (struct ulp_mem_io
*)skb
->head
;
1374 set_queue(skb
, CPL_PRIORITY_CONTROL
, NULL
);
1376 ulp_mem_io_set_hdr(lldi
, req
, wr_len
, dlen
, pm_addr
);
1377 idata
= (struct ulptx_idata
*)(req
+ 1);
1378 ppod
= (struct cxgbi_pagepod
*)(idata
+ 1);
1380 for (i
= 0; i
< npods
; i
++, ppod
++, gl_pidx
+= PPOD_PAGES_MAX
) {
1382 cxgbi_ddp_ppod_clear(ppod
);
1384 cxgbi_ddp_ppod_set(ppod
, hdr
, gl
, gl_pidx
);
1387 cxgb4_ofld_send(cdev
->ports
[port_id
], skb
);
1391 static int ddp_set_map(struct cxgbi_sock
*csk
, struct cxgbi_pagepod_hdr
*hdr
,
1392 unsigned int idx
, unsigned int npods
,
1393 struct cxgbi_gather_list
*gl
)
1395 unsigned int i
, cnt
;
1398 for (i
= 0; i
< npods
; i
+= cnt
, idx
+= cnt
) {
1400 if (cnt
> ULPMEM_IDATA_MAX_NPPODS
)
1401 cnt
= ULPMEM_IDATA_MAX_NPPODS
;
1402 err
= ddp_ppod_write_idata(csk
->cdev
, csk
->port_id
, hdr
,
1403 idx
, cnt
, gl
, 4 * i
);
1410 static void ddp_clear_map(struct cxgbi_hba
*chba
, unsigned int tag
,
1411 unsigned int idx
, unsigned int npods
)
1413 unsigned int i
, cnt
;
1416 for (i
= 0; i
< npods
; i
+= cnt
, idx
+= cnt
) {
1418 if (cnt
> ULPMEM_IDATA_MAX_NPPODS
)
1419 cnt
= ULPMEM_IDATA_MAX_NPPODS
;
1420 err
= ddp_ppod_write_idata(chba
->cdev
, chba
->port_id
, NULL
,
1427 static int ddp_setup_conn_pgidx(struct cxgbi_sock
*csk
, unsigned int tid
,
1428 int pg_idx
, bool reply
)
1430 struct sk_buff
*skb
;
1431 struct cpl_set_tcb_field
*req
;
1433 if (!pg_idx
|| pg_idx
>= DDP_PGIDX_MAX
)
1436 skb
= alloc_wr(sizeof(*req
), 0, GFP_KERNEL
);
1440 /* set up ulp page size */
1441 req
= (struct cpl_set_tcb_field
*)skb
->head
;
1442 INIT_TP_WR(req
, csk
->tid
);
1443 OPCODE_TID(req
) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD
, csk
->tid
));
1444 req
->reply_ctrl
= htons(NO_REPLY(reply
) | QUEUENO(csk
->rss_qid
));
1445 req
->word_cookie
= htons(0);
1446 req
->mask
= cpu_to_be64(0x3 << 8);
1447 req
->val
= cpu_to_be64(pg_idx
<< 8);
1448 set_wr_txq(skb
, CPL_PRIORITY_CONTROL
, csk
->port_id
);
1450 log_debug(1 << CXGBI_DBG_TOE
| 1 << CXGBI_DBG_SOCK
,
1451 "csk 0x%p, tid 0x%x, pg_idx %u.\n", csk
, csk
->tid
, pg_idx
);
1453 cxgb4_ofld_send(csk
->cdev
->ports
[csk
->port_id
], skb
);
1457 static int ddp_setup_conn_digest(struct cxgbi_sock
*csk
, unsigned int tid
,
1458 int hcrc
, int dcrc
, int reply
)
1460 struct sk_buff
*skb
;
1461 struct cpl_set_tcb_field
*req
;
1466 skb
= alloc_wr(sizeof(*req
), 0, GFP_KERNEL
);
1470 csk
->hcrc_len
= (hcrc
? 4 : 0);
1471 csk
->dcrc_len
= (dcrc
? 4 : 0);
1472 /* set up ulp submode */
1473 req
= (struct cpl_set_tcb_field
*)skb
->head
;
1474 INIT_TP_WR(req
, tid
);
1475 OPCODE_TID(req
) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD
, tid
));
1476 req
->reply_ctrl
= htons(NO_REPLY(reply
) | QUEUENO(csk
->rss_qid
));
1477 req
->word_cookie
= htons(0);
1478 req
->mask
= cpu_to_be64(0x3 << 4);
1479 req
->val
= cpu_to_be64(((hcrc
? ULP_CRC_HEADER
: 0) |
1480 (dcrc
? ULP_CRC_DATA
: 0)) << 4);
1481 set_wr_txq(skb
, CPL_PRIORITY_CONTROL
, csk
->port_id
);
1483 log_debug(1 << CXGBI_DBG_TOE
| 1 << CXGBI_DBG_SOCK
,
1484 "csk 0x%p, tid 0x%x, crc %d,%d.\n", csk
, csk
->tid
, hcrc
, dcrc
);
1486 cxgb4_ofld_send(csk
->cdev
->ports
[csk
->port_id
], skb
);
1490 static int cxgb4i_ddp_init(struct cxgbi_device
*cdev
)
1492 struct cxgb4_lld_info
*lldi
= cxgbi_cdev_priv(cdev
);
1493 struct cxgbi_ddp_info
*ddp
= cdev
->ddp
;
1494 unsigned int tagmask
, pgsz_factor
[4];
1498 kref_get(&ddp
->refcnt
);
1499 pr_warn("cdev 0x%p, ddp 0x%p already set up.\n",
1504 err
= cxgbi_ddp_init(cdev
, lldi
->vr
->iscsi
.start
,
1505 lldi
->vr
->iscsi
.start
+ lldi
->vr
->iscsi
.size
- 1,
1506 lldi
->iscsi_iolen
, lldi
->iscsi_iolen
);
1512 tagmask
= ddp
->idx_mask
<< PPOD_IDX_SHIFT
;
1513 cxgbi_ddp_page_size_factor(pgsz_factor
);
1514 cxgb4_iscsi_init(lldi
->ports
[0], tagmask
, pgsz_factor
);
1516 cdev
->csk_ddp_setup_digest
= ddp_setup_conn_digest
;
1517 cdev
->csk_ddp_setup_pgidx
= ddp_setup_conn_pgidx
;
1518 cdev
->csk_ddp_set
= ddp_set_map
;
1519 cdev
->csk_ddp_clear
= ddp_clear_map
;
1521 pr_info("cxgb4i 0x%p tag: sw %u, rsvd %u,%u, mask 0x%x.\n",
1522 cdev
, cdev
->tag_format
.sw_bits
, cdev
->tag_format
.rsvd_bits
,
1523 cdev
->tag_format
.rsvd_shift
, cdev
->tag_format
.rsvd_mask
);
1524 pr_info("cxgb4i 0x%p, nppods %u, bits %u, mask 0x%x,0x%x pkt %u/%u, "
1526 cdev
, ddp
->nppods
, ddp
->idx_bits
, ddp
->idx_mask
,
1527 ddp
->rsvd_tag_mask
, ddp
->max_txsz
, lldi
->iscsi_iolen
,
1528 ddp
->max_rxsz
, lldi
->iscsi_iolen
);
1529 pr_info("cxgb4i 0x%p max payload size: %u/%u, %u/%u.\n",
1530 cdev
, cdev
->tx_max_size
, ddp
->max_txsz
, cdev
->rx_max_size
,
1535 static void *t4_uld_add(const struct cxgb4_lld_info
*lldi
)
1537 struct cxgbi_device
*cdev
;
1538 struct port_info
*pi
;
1541 cdev
= cxgbi_device_register(sizeof(*lldi
), lldi
->nports
);
1543 pr_info("t4 device 0x%p, register failed.\n", lldi
);
1546 pr_info("0x%p,0x%x, ports %u,%s, chan %u, q %u,%u, wr %u.\n",
1547 cdev
, lldi
->adapter_type
, lldi
->nports
,
1548 lldi
->ports
[0]->name
, lldi
->nchan
, lldi
->ntxq
,
1549 lldi
->nrxq
, lldi
->wr_cred
);
1550 for (i
= 0; i
< lldi
->nrxq
; i
++)
1551 log_debug(1 << CXGBI_DBG_DEV
,
1552 "t4 0x%p, rxq id #%d: %u.\n",
1553 cdev
, i
, lldi
->rxq_ids
[i
]);
1555 memcpy(cxgbi_cdev_priv(cdev
), lldi
, sizeof(*lldi
));
1556 cdev
->flags
= CXGBI_FLAG_DEV_T4
;
1557 cdev
->pdev
= lldi
->pdev
;
1558 cdev
->ports
= lldi
->ports
;
1559 cdev
->nports
= lldi
->nports
;
1560 cdev
->mtus
= lldi
->mtus
;
1561 cdev
->nmtus
= NMTUS
;
1562 cdev
->snd_win
= cxgb4i_snd_win
;
1563 cdev
->rcv_win
= cxgb4i_rcv_win
;
1564 cdev
->rx_credit_thres
= cxgb4i_rx_credit_thres
;
1565 cdev
->skb_tx_rsvd
= CXGB4I_TX_HEADER_LEN
;
1566 cdev
->skb_rx_extra
= sizeof(struct cpl_iscsi_hdr
);
1567 cdev
->itp
= &cxgb4i_iscsi_transport
;
1569 cdev
->pfvf
= FW_VIID_PFN_GET(cxgb4_port_viid(lldi
->ports
[0])) << 8;
1570 pr_info("cdev 0x%p,%s, pfvf %u.\n",
1571 cdev
, lldi
->ports
[0]->name
, cdev
->pfvf
);
1573 rc
= cxgb4i_ddp_init(cdev
);
1575 pr_info("t4 0x%p ddp init failed.\n", cdev
);
1578 rc
= cxgb4i_ofld_init(cdev
);
1580 pr_info("t4 0x%p ofld init failed.\n", cdev
);
1584 rc
= cxgbi_hbas_add(cdev
, CXGB4I_MAX_LUN
, CXGBI_MAX_CONN
,
1585 &cxgb4i_host_template
, cxgb4i_stt
);
1589 for (i
= 0; i
< cdev
->nports
; i
++) {
1590 pi
= netdev_priv(lldi
->ports
[i
]);
1591 cdev
->hbas
[i
]->port_id
= pi
->port_id
;
1596 cxgbi_device_unregister(cdev
);
1597 return ERR_PTR(-ENOMEM
);
1600 #define RX_PULL_LEN 128
1601 static int t4_uld_rx_handler(void *handle
, const __be64
*rsp
,
1602 const struct pkt_gl
*pgl
)
1604 const struct cpl_act_establish
*rpl
;
1605 struct sk_buff
*skb
;
1607 struct cxgbi_device
*cdev
= handle
;
1610 unsigned int len
= 64 - sizeof(struct rsp_ctrl
) - 8;
1612 skb
= alloc_wr(len
, 0, GFP_ATOMIC
);
1615 skb_copy_to_linear_data(skb
, &rsp
[1], len
);
1617 if (unlikely(*(u8
*)rsp
!= *(u8
*)pgl
->va
)) {
1618 pr_info("? FL 0x%p,RSS%#llx,FL %#llx,len %u.\n",
1619 pgl
->va
, be64_to_cpu(*rsp
),
1620 be64_to_cpu(*(u64
*)pgl
->va
),
1624 skb
= cxgb4_pktgl_to_skb(pgl
, RX_PULL_LEN
, RX_PULL_LEN
);
1629 rpl
= (struct cpl_act_establish
*)skb
->data
;
1630 opc
= rpl
->ot
.opcode
;
1631 log_debug(1 << CXGBI_DBG_TOE
,
1632 "cdev %p, opcode 0x%x(0x%x,0x%x), skb %p.\n",
1633 cdev
, opc
, rpl
->ot
.opcode_tid
, ntohl(rpl
->ot
.opcode_tid
), skb
);
1634 if (cxgb4i_cplhandlers
[opc
])
1635 cxgb4i_cplhandlers
[opc
](cdev
, skb
);
1637 pr_err("No handler for opcode 0x%x.\n", opc
);
1642 log_debug(1 << CXGBI_DBG_TOE
, "OOM bailing out.\n");
1646 static int t4_uld_state_change(void *handle
, enum cxgb4_state state
)
1648 struct cxgbi_device
*cdev
= handle
;
1651 case CXGB4_STATE_UP
:
1652 pr_info("cdev 0x%p, UP.\n", cdev
);
1655 case CXGB4_STATE_START_RECOVERY
:
1656 pr_info("cdev 0x%p, RECOVERY.\n", cdev
);
1657 /* close all connections */
1659 case CXGB4_STATE_DOWN
:
1660 pr_info("cdev 0x%p, DOWN.\n", cdev
);
1662 case CXGB4_STATE_DETACH
:
1663 pr_info("cdev 0x%p, DETACH.\n", cdev
);
1664 cxgbi_device_unregister(cdev
);
1667 pr_info("cdev 0x%p, unknown state %d.\n", cdev
, state
);
1673 static int __init
cxgb4i_init_module(void)
1677 printk(KERN_INFO
"%s", version
);
1679 rc
= cxgbi_iscsi_init(&cxgb4i_iscsi_transport
, &cxgb4i_stt
);
1682 cxgb4_register_uld(CXGB4_ULD_ISCSI
, &cxgb4i_uld_info
);
1686 static void __exit
cxgb4i_exit_module(void)
1688 cxgb4_unregister_uld(CXGB4_ULD_ISCSI
);
1689 cxgbi_device_unregister_all(CXGBI_FLAG_DEV_T4
);
1690 cxgbi_iscsi_cleanup(&cxgb4i_iscsi_transport
, &cxgb4i_stt
);
1693 module_init(cxgb4i_init_module
);
1694 module_exit(cxgb4i_exit_module
);