1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (c) 2016 Chelsio Communications, Inc.
6 #include <linux/workqueue.h>
7 #include <linux/kthread.h>
8 #include <linux/sched/signal.h>
10 #include <asm/unaligned.h>
12 #include <target/target_core_base.h>
13 #include <target/target_core_fabric.h>
16 struct sge_opaque_hdr
{
18 dma_addr_t addr
[MAX_SKB_FRAGS
+ 1];
21 static const u8 cxgbit_digest_len
[] = {0, 4, 4, 8};
23 #define TX_HDR_LEN (sizeof(struct sge_opaque_hdr) + \
24 sizeof(struct fw_ofld_tx_data_wr))
26 static struct sk_buff
*
27 __cxgbit_alloc_skb(struct cxgbit_sock
*csk
, u32 len
, bool iso
)
29 struct sk_buff
*skb
= NULL
;
32 static const u32 hdr_len
= TX_HDR_LEN
+ ISCSI_HDR_LEN
;
35 skb
= alloc_skb_with_frags(hdr_len
, len
,
41 skb_reserve(skb
, TX_HDR_LEN
);
42 skb_reset_transport_header(skb
);
43 __skb_put(skb
, ISCSI_HDR_LEN
);
46 submode
|= (csk
->submode
& CXGBIT_SUBMODE_DCRC
);
49 u32 iso_len
= iso
? sizeof(struct cpl_tx_data_iso
) : 0;
51 skb
= alloc_skb(hdr_len
+ iso_len
, GFP_KERNEL
);
55 skb_reserve(skb
, TX_HDR_LEN
+ iso_len
);
56 skb_reset_transport_header(skb
);
57 __skb_put(skb
, ISCSI_HDR_LEN
);
60 submode
|= (csk
->submode
& CXGBIT_SUBMODE_HCRC
);
61 cxgbit_skcb_submode(skb
) = submode
;
62 cxgbit_skcb_tx_extralen(skb
) = cxgbit_digest_len
[submode
];
63 cxgbit_skcb_flags(skb
) |= SKCBF_TX_NEED_HDR
;
67 static struct sk_buff
*cxgbit_alloc_skb(struct cxgbit_sock
*csk
, u32 len
)
69 return __cxgbit_alloc_skb(csk
, len
, false);
73 * cxgbit_is_ofld_imm - check whether a packet can be sent as immediate data
76 * Returns true if a packet can be sent as an offload WR with immediate
77 * data. We currently use the same limit as for Ethernet packets.
79 static int cxgbit_is_ofld_imm(const struct sk_buff
*skb
)
81 int length
= skb
->len
;
83 if (likely(cxgbit_skcb_flags(skb
) & SKCBF_TX_NEED_HDR
))
84 length
+= sizeof(struct fw_ofld_tx_data_wr
);
86 if (likely(cxgbit_skcb_flags(skb
) & SKCBF_TX_ISO
))
87 length
+= sizeof(struct cpl_tx_data_iso
);
89 #define MAX_IMM_TX_PKT_LEN 256
90 return length
<= MAX_IMM_TX_PKT_LEN
;
94 * cxgbit_sgl_len - calculates the size of an SGL of the given capacity
95 * @n: the number of SGL entries
96 * Calculates the number of flits needed for a scatter/gather list that
97 * can hold the given number of entries.
99 static inline unsigned int cxgbit_sgl_len(unsigned int n
)
102 return (3 * n
) / 2 + (n
& 1) + 2;
106 * cxgbit_calc_tx_flits_ofld - calculate # of flits for an offload packet
109 * Returns the number of flits needed for the given offload packet.
110 * These packets are already fully constructed and no additional headers
113 static unsigned int cxgbit_calc_tx_flits_ofld(const struct sk_buff
*skb
)
115 unsigned int flits
, cnt
;
117 if (cxgbit_is_ofld_imm(skb
))
118 return DIV_ROUND_UP(skb
->len
, 8);
119 flits
= skb_transport_offset(skb
) / 8;
120 cnt
= skb_shinfo(skb
)->nr_frags
;
121 if (skb_tail_pointer(skb
) != skb_transport_header(skb
))
123 return flits
+ cxgbit_sgl_len(cnt
);
126 #define CXGBIT_ISO_FSLICE 0x1
127 #define CXGBIT_ISO_LSLICE 0x2
129 cxgbit_cpl_tx_data_iso(struct sk_buff
*skb
, struct cxgbit_iso_info
*iso_info
)
131 struct cpl_tx_data_iso
*cpl
;
132 unsigned int submode
= cxgbit_skcb_submode(skb
);
133 unsigned int fslice
= !!(iso_info
->flags
& CXGBIT_ISO_FSLICE
);
134 unsigned int lslice
= !!(iso_info
->flags
& CXGBIT_ISO_LSLICE
);
136 cpl
= __skb_push(skb
, sizeof(*cpl
));
138 cpl
->op_to_scsi
= htonl(CPL_TX_DATA_ISO_OP_V(CPL_TX_DATA_ISO
) |
139 CPL_TX_DATA_ISO_FIRST_V(fslice
) |
140 CPL_TX_DATA_ISO_LAST_V(lslice
) |
141 CPL_TX_DATA_ISO_CPLHDRLEN_V(0) |
142 CPL_TX_DATA_ISO_HDRCRC_V(submode
& 1) |
143 CPL_TX_DATA_ISO_PLDCRC_V(((submode
>> 1) & 1)) |
144 CPL_TX_DATA_ISO_IMMEDIATE_V(0) |
145 CPL_TX_DATA_ISO_SCSI_V(2));
148 cpl
->mpdu
= htons(DIV_ROUND_UP(iso_info
->mpdu
, 4));
149 cpl
->burst_size
= htonl(DIV_ROUND_UP(iso_info
->burst_len
, 4));
150 cpl
->len
= htonl(iso_info
->len
);
151 cpl
->reserved2_seglen_offset
= htonl(0);
152 cpl
->datasn_offset
= htonl(0);
153 cpl
->buffer_offset
= htonl(0);
156 __skb_pull(skb
, sizeof(*cpl
));
160 cxgbit_tx_data_wr(struct cxgbit_sock
*csk
, struct sk_buff
*skb
, u32 dlen
,
161 u32 len
, u32 credits
, u32
compl)
163 struct fw_ofld_tx_data_wr
*req
;
164 const struct cxgb4_lld_info
*lldi
= &csk
->com
.cdev
->lldi
;
165 u32 submode
= cxgbit_skcb_submode(skb
);
167 u32 hdr_size
= sizeof(*req
);
168 u32 opcode
= FW_OFLD_TX_DATA_WR
;
170 u32 force
= is_t5(lldi
->adapter_type
) ? TX_FORCE_V(!submode
) :
173 if (cxgbit_skcb_flags(skb
) & SKCBF_TX_ISO
) {
174 opcode
= FW_ISCSI_TX_DATA_WR
;
175 immlen
+= sizeof(struct cpl_tx_data_iso
);
176 hdr_size
+= sizeof(struct cpl_tx_data_iso
);
180 if (cxgbit_is_ofld_imm(skb
))
183 req
= __skb_push(skb
, hdr_size
);
184 req
->op_to_immdlen
= cpu_to_be32(FW_WR_OP_V(opcode
) |
185 FW_WR_COMPL_V(compl) |
186 FW_WR_IMMDLEN_V(immlen
));
187 req
->flowid_len16
= cpu_to_be32(FW_WR_FLOWID_V(csk
->tid
) |
188 FW_WR_LEN16_V(credits
));
189 req
->plen
= htonl(len
);
190 wr_ulp_mode
= FW_OFLD_TX_DATA_WR_ULPMODE_V(ULP_MODE_ISCSI
) |
191 FW_OFLD_TX_DATA_WR_ULPSUBMODE_V(submode
);
193 req
->tunnel_to_proxy
= htonl((wr_ulp_mode
) | force
|
194 FW_OFLD_TX_DATA_WR_SHOVE_V(skb_peek(&csk
->txq
) ? 0 : 1));
197 static void cxgbit_arp_failure_skb_discard(void *handle
, struct sk_buff
*skb
)
202 void cxgbit_push_tx_frames(struct cxgbit_sock
*csk
)
206 while (csk
->wr_cred
&& ((skb
= skb_peek(&csk
->txq
)) != NULL
)) {
214 if (cxgbit_skcb_flags(skb
) & SKCBF_TX_ISO
)
215 iso_cpl_len
= sizeof(struct cpl_tx_data_iso
);
217 if (cxgbit_is_ofld_imm(skb
))
218 credits_needed
= DIV_ROUND_UP(dlen
+ iso_cpl_len
, 16);
220 credits_needed
= DIV_ROUND_UP((8 *
221 cxgbit_calc_tx_flits_ofld(skb
)) +
224 if (likely(cxgbit_skcb_flags(skb
) & SKCBF_TX_NEED_HDR
))
225 credits_needed
+= DIV_ROUND_UP(
226 sizeof(struct fw_ofld_tx_data_wr
), 16);
228 * Assumes the initial credits is large enough to support
229 * fw_flowc_wr plus largest possible first payload
232 if (!test_and_set_bit(CSK_TX_DATA_SENT
, &csk
->com
.flags
)) {
233 flowclen16
= cxgbit_send_tx_flowc_wr(csk
);
234 csk
->wr_cred
-= flowclen16
;
235 csk
->wr_una_cred
+= flowclen16
;
238 if (csk
->wr_cred
< credits_needed
) {
239 pr_debug("csk 0x%p, skb %u/%u, wr %d < %u.\n",
240 csk
, skb
->len
, skb
->data_len
,
241 credits_needed
, csk
->wr_cred
);
244 __skb_unlink(skb
, &csk
->txq
);
245 set_wr_txq(skb
, CPL_PRIORITY_DATA
, csk
->txq_idx
);
246 skb
->csum
= (__force __wsum
)(credits_needed
+ flowclen16
);
247 csk
->wr_cred
-= credits_needed
;
248 csk
->wr_una_cred
+= credits_needed
;
250 pr_debug("csk 0x%p, skb %u/%u, wr %d, left %u, unack %u.\n",
251 csk
, skb
->len
, skb
->data_len
, credits_needed
,
252 csk
->wr_cred
, csk
->wr_una_cred
);
254 if (likely(cxgbit_skcb_flags(skb
) & SKCBF_TX_NEED_HDR
)) {
255 len
+= cxgbit_skcb_tx_extralen(skb
);
257 if ((csk
->wr_una_cred
>= (csk
->wr_max_cred
/ 2)) ||
258 (!before(csk
->write_seq
,
259 csk
->snd_una
+ csk
->snd_win
))) {
261 csk
->wr_una_cred
= 0;
264 cxgbit_tx_data_wr(csk
, skb
, dlen
, len
, credits_needed
,
268 } else if ((cxgbit_skcb_flags(skb
) & SKCBF_TX_FLAG_COMPL
) ||
269 (csk
->wr_una_cred
>= (csk
->wr_max_cred
/ 2))) {
270 struct cpl_close_con_req
*req
=
271 (struct cpl_close_con_req
*)skb
->data
;
272 req
->wr
.wr_hi
|= htonl(FW_WR_COMPL_F
);
273 csk
->wr_una_cred
= 0;
276 cxgbit_sock_enqueue_wr(csk
, skb
);
277 t4_set_arp_err_handler(skb
, csk
,
278 cxgbit_arp_failure_skb_discard
);
280 pr_debug("csk 0x%p,%u, skb 0x%p, %u.\n",
281 csk
, csk
->tid
, skb
, len
);
283 cxgbit_l2t_send(csk
->com
.cdev
, skb
, csk
->l2t
);
287 static bool cxgbit_lock_sock(struct cxgbit_sock
*csk
)
289 spin_lock_bh(&csk
->lock
);
291 if (before(csk
->write_seq
, csk
->snd_una
+ csk
->snd_win
))
292 csk
->lock_owner
= true;
294 spin_unlock_bh(&csk
->lock
);
296 return csk
->lock_owner
;
299 static void cxgbit_unlock_sock(struct cxgbit_sock
*csk
)
301 struct sk_buff_head backlogq
;
303 void (*fn
)(struct cxgbit_sock
*, struct sk_buff
*);
305 skb_queue_head_init(&backlogq
);
307 spin_lock_bh(&csk
->lock
);
308 while (skb_queue_len(&csk
->backlogq
)) {
309 skb_queue_splice_init(&csk
->backlogq
, &backlogq
);
310 spin_unlock_bh(&csk
->lock
);
312 while ((skb
= __skb_dequeue(&backlogq
))) {
313 fn
= cxgbit_skcb_rx_backlog_fn(skb
);
317 spin_lock_bh(&csk
->lock
);
320 csk
->lock_owner
= false;
321 spin_unlock_bh(&csk
->lock
);
324 static int cxgbit_queue_skb(struct cxgbit_sock
*csk
, struct sk_buff
*skb
)
328 wait_event_interruptible(csk
->ack_waitq
, cxgbit_lock_sock(csk
));
330 if (unlikely((csk
->com
.state
!= CSK_STATE_ESTABLISHED
) ||
331 signal_pending(current
))) {
333 __skb_queue_purge(&csk
->ppodq
);
335 spin_lock_bh(&csk
->lock
);
336 if (csk
->lock_owner
) {
337 spin_unlock_bh(&csk
->lock
);
340 spin_unlock_bh(&csk
->lock
);
344 csk
->write_seq
+= skb
->len
+
345 cxgbit_skcb_tx_extralen(skb
);
347 skb_queue_splice_tail_init(&csk
->ppodq
, &csk
->txq
);
348 __skb_queue_tail(&csk
->txq
, skb
);
349 cxgbit_push_tx_frames(csk
);
352 cxgbit_unlock_sock(csk
);
357 cxgbit_map_skb(struct iscsi_cmd
*cmd
, struct sk_buff
*skb
, u32 data_offset
,
360 u32 i
= 0, nr_frags
= MAX_SKB_FRAGS
;
361 u32 padding
= ((-data_length
) & 3);
362 struct scatterlist
*sg
;
364 unsigned int page_off
;
370 * We know each entry in t_data_sg contains a page.
372 sg
= &cmd
->se_cmd
.t_data_sg
[data_offset
/ PAGE_SIZE
];
373 page_off
= (data_offset
% PAGE_SIZE
);
375 while (data_length
&& (i
< nr_frags
)) {
376 u32 cur_len
= min_t(u32
, data_length
, sg
->length
- page_off
);
381 skb_fill_page_desc(skb
, i
, page
, sg
->offset
+ page_off
,
383 skb
->data_len
+= cur_len
;
385 skb
->truesize
+= cur_len
;
387 data_length
-= cur_len
;
397 page
= alloc_page(GFP_KERNEL
| __GFP_ZERO
);
400 skb_fill_page_desc(skb
, i
, page
, 0, padding
);
401 skb
->data_len
+= padding
;
403 skb
->truesize
+= padding
;
410 cxgbit_tx_datain_iso(struct cxgbit_sock
*csk
, struct iscsi_cmd
*cmd
,
411 struct iscsi_datain_req
*dr
)
413 struct iscsi_conn
*conn
= csk
->conn
;
415 struct iscsi_datain datain
;
416 struct cxgbit_iso_info iso_info
;
417 u32 data_length
= cmd
->se_cmd
.data_length
;
418 u32 mrdsl
= conn
->conn_ops
->MaxRecvDataSegmentLength
;
419 u32 num_pdu
, plen
, tx_data
= 0;
420 bool task_sense
= !!(cmd
->se_cmd
.se_cmd_flags
&
421 SCF_TRANSPORT_TASK_SENSE
);
422 bool set_statsn
= false;
425 while (data_length
) {
426 num_pdu
= (data_length
+ mrdsl
- 1) / mrdsl
;
427 if (num_pdu
> csk
->max_iso_npdu
)
428 num_pdu
= csk
->max_iso_npdu
;
430 plen
= num_pdu
* mrdsl
;
431 if (plen
> data_length
)
434 skb
= __cxgbit_alloc_skb(csk
, 0, true);
438 memset(skb
->data
, 0, ISCSI_HDR_LEN
);
439 cxgbit_skcb_flags(skb
) |= SKCBF_TX_ISO
;
440 cxgbit_skcb_submode(skb
) |= (csk
->submode
&
441 CXGBIT_SUBMODE_DCRC
);
442 cxgbit_skcb_tx_extralen(skb
) = (num_pdu
*
443 cxgbit_digest_len
[cxgbit_skcb_submode(skb
)]) +
444 ((num_pdu
- 1) * ISCSI_HDR_LEN
);
446 memset(&datain
, 0, sizeof(struct iscsi_datain
));
447 memset(&iso_info
, 0, sizeof(iso_info
));
450 iso_info
.flags
|= CXGBIT_ISO_FSLICE
;
452 if (!(data_length
- plen
)) {
453 iso_info
.flags
|= CXGBIT_ISO_LSLICE
;
455 datain
.flags
= ISCSI_FLAG_DATA_STATUS
;
456 iscsit_increment_maxcmdsn(cmd
, conn
->sess
);
457 cmd
->stat_sn
= conn
->stat_sn
++;
462 iso_info
.burst_len
= num_pdu
* mrdsl
;
463 iso_info
.mpdu
= mrdsl
;
464 iso_info
.len
= ISCSI_HDR_LEN
+ plen
;
466 cxgbit_cpl_tx_data_iso(skb
, &iso_info
);
468 datain
.offset
= tx_data
;
469 datain
.data_sn
= cmd
->data_sn
- 1;
471 iscsit_build_datain_pdu(cmd
, conn
, &datain
,
472 (struct iscsi_data_rsp
*)skb
->data
,
475 ret
= cxgbit_map_skb(cmd
, skb
, tx_data
, plen
);
481 ret
= cxgbit_queue_skb(csk
, skb
);
488 cmd
->read_data_done
+= plen
;
489 cmd
->data_sn
+= num_pdu
;
492 dr
->dr_complete
= DATAIN_COMPLETE_NORMAL
;
501 cxgbit_tx_datain(struct cxgbit_sock
*csk
, struct iscsi_cmd
*cmd
,
502 const struct iscsi_datain
*datain
)
507 skb
= cxgbit_alloc_skb(csk
, 0);
511 memcpy(skb
->data
, cmd
->pdu
, ISCSI_HDR_LEN
);
513 if (datain
->length
) {
514 cxgbit_skcb_submode(skb
) |= (csk
->submode
&
515 CXGBIT_SUBMODE_DCRC
);
516 cxgbit_skcb_tx_extralen(skb
) =
517 cxgbit_digest_len
[cxgbit_skcb_submode(skb
)];
520 ret
= cxgbit_map_skb(cmd
, skb
, datain
->offset
, datain
->length
);
526 return cxgbit_queue_skb(csk
, skb
);
530 cxgbit_xmit_datain_pdu(struct iscsi_conn
*conn
, struct iscsi_cmd
*cmd
,
531 struct iscsi_datain_req
*dr
,
532 const struct iscsi_datain
*datain
)
534 struct cxgbit_sock
*csk
= conn
->context
;
535 u32 data_length
= cmd
->se_cmd
.data_length
;
536 u32 padding
= ((-data_length
) & 3);
537 u32 mrdsl
= conn
->conn_ops
->MaxRecvDataSegmentLength
;
539 if ((data_length
> mrdsl
) && (!dr
->recovery
) &&
540 (!padding
) && (!datain
->offset
) && csk
->max_iso_npdu
) {
541 atomic_long_add(data_length
- datain
->length
,
542 &conn
->sess
->tx_data_octets
);
543 return cxgbit_tx_datain_iso(csk
, cmd
, dr
);
546 return cxgbit_tx_datain(csk
, cmd
, datain
);
550 cxgbit_xmit_nondatain_pdu(struct iscsi_conn
*conn
, struct iscsi_cmd
*cmd
,
551 const void *data_buf
, u32 data_buf_len
)
553 struct cxgbit_sock
*csk
= conn
->context
;
555 u32 padding
= ((-data_buf_len
) & 3);
557 skb
= cxgbit_alloc_skb(csk
, data_buf_len
+ padding
);
561 memcpy(skb
->data
, cmd
->pdu
, ISCSI_HDR_LEN
);
566 skb_store_bits(skb
, ISCSI_HDR_LEN
, data_buf
, data_buf_len
);
569 skb_store_bits(skb
, ISCSI_HDR_LEN
+ data_buf_len
,
570 &pad_bytes
, padding
);
573 cxgbit_skcb_tx_extralen(skb
) = cxgbit_digest_len
[
574 cxgbit_skcb_submode(skb
)];
576 return cxgbit_queue_skb(csk
, skb
);
580 cxgbit_xmit_pdu(struct iscsi_conn
*conn
, struct iscsi_cmd
*cmd
,
581 struct iscsi_datain_req
*dr
, const void *buf
, u32 buf_len
)
584 return cxgbit_xmit_datain_pdu(conn
, cmd
, dr
, buf
);
586 return cxgbit_xmit_nondatain_pdu(conn
, cmd
, buf
, buf_len
);
589 int cxgbit_validate_params(struct iscsi_conn
*conn
)
591 struct cxgbit_sock
*csk
= conn
->context
;
592 struct cxgbit_device
*cdev
= csk
->com
.cdev
;
593 struct iscsi_param
*param
;
596 param
= iscsi_find_param_from_key(MAXXMITDATASEGMENTLENGTH
,
601 if (kstrtou32(param
->value
, 0, &max_xmitdsl
) < 0)
604 if (max_xmitdsl
> cdev
->mdsl
) {
605 if (iscsi_change_param_sprintf(
606 conn
, "MaxXmitDataSegmentLength=%u", cdev
->mdsl
))
613 static int cxgbit_set_digest(struct cxgbit_sock
*csk
)
615 struct iscsi_conn
*conn
= csk
->conn
;
616 struct iscsi_param
*param
;
618 param
= iscsi_find_param_from_key(HEADERDIGEST
, conn
->param_list
);
620 pr_err("param not found key %s\n", HEADERDIGEST
);
624 if (!strcmp(param
->value
, CRC32C
))
625 csk
->submode
|= CXGBIT_SUBMODE_HCRC
;
627 param
= iscsi_find_param_from_key(DATADIGEST
, conn
->param_list
);
630 pr_err("param not found key %s\n", DATADIGEST
);
634 if (!strcmp(param
->value
, CRC32C
))
635 csk
->submode
|= CXGBIT_SUBMODE_DCRC
;
637 if (cxgbit_setup_conn_digest(csk
)) {
645 static int cxgbit_set_iso_npdu(struct cxgbit_sock
*csk
)
647 struct iscsi_conn
*conn
= csk
->conn
;
648 struct iscsi_conn_ops
*conn_ops
= conn
->conn_ops
;
649 struct iscsi_param
*param
;
651 u32 max_npdu
, max_iso_npdu
;
654 if (conn
->login
->leading_connection
) {
655 param
= iscsi_find_param_from_key(MAXBURSTLENGTH
,
658 pr_err("param not found key %s\n", MAXBURSTLENGTH
);
662 if (kstrtou32(param
->value
, 0, &mbl
) < 0)
665 mbl
= conn
->sess
->sess_ops
->MaxBurstLength
;
668 mrdsl
= conn_ops
->MaxRecvDataSegmentLength
;
669 max_npdu
= mbl
/ mrdsl
;
671 max_iso_payload
= rounddown(CXGBIT_MAX_ISO_PAYLOAD
, csk
->emss
);
673 max_iso_npdu
= max_iso_payload
/
674 (ISCSI_HDR_LEN
+ mrdsl
+
675 cxgbit_digest_len
[csk
->submode
]);
677 csk
->max_iso_npdu
= min(max_npdu
, max_iso_npdu
);
679 if (csk
->max_iso_npdu
<= 1)
680 csk
->max_iso_npdu
= 0;
686 * cxgbit_seq_pdu_inorder()
687 * @csk: pointer to cxgbit socket structure
689 * This function checks whether data sequence and data
692 * Return: returns -1 on error, 0 if data sequence and
693 * data pdu are in order, 1 if data sequence or data pdu
696 static int cxgbit_seq_pdu_inorder(struct cxgbit_sock
*csk
)
698 struct iscsi_conn
*conn
= csk
->conn
;
699 struct iscsi_param
*param
;
701 if (conn
->login
->leading_connection
) {
702 param
= iscsi_find_param_from_key(DATASEQUENCEINORDER
,
705 pr_err("param not found key %s\n", DATASEQUENCEINORDER
);
709 if (strcmp(param
->value
, YES
))
712 param
= iscsi_find_param_from_key(DATAPDUINORDER
,
715 pr_err("param not found key %s\n", DATAPDUINORDER
);
719 if (strcmp(param
->value
, YES
))
723 if (!conn
->sess
->sess_ops
->DataSequenceInOrder
)
725 if (!conn
->sess
->sess_ops
->DataPDUInOrder
)
732 static int cxgbit_set_params(struct iscsi_conn
*conn
)
734 struct cxgbit_sock
*csk
= conn
->context
;
735 struct cxgbit_device
*cdev
= csk
->com
.cdev
;
736 struct cxgbi_ppm
*ppm
= *csk
->com
.cdev
->lldi
.iscsi_ppm
;
737 struct iscsi_conn_ops
*conn_ops
= conn
->conn_ops
;
738 struct iscsi_param
*param
;
741 if (conn_ops
->MaxRecvDataSegmentLength
> cdev
->mdsl
)
742 conn_ops
->MaxRecvDataSegmentLength
= cdev
->mdsl
;
744 if (cxgbit_set_digest(csk
))
747 if (conn
->login
->leading_connection
) {
748 param
= iscsi_find_param_from_key(ERRORRECOVERYLEVEL
,
751 pr_err("param not found key %s\n", ERRORRECOVERYLEVEL
);
754 if (kstrtou8(param
->value
, 0, &erl
) < 0)
757 erl
= conn
->sess
->sess_ops
->ErrorRecoveryLevel
;
763 ret
= cxgbit_seq_pdu_inorder(csk
);
766 } else if (ret
> 0) {
767 if (is_t5(cdev
->lldi
.adapter_type
))
773 if (test_bit(CDEV_ISO_ENABLE
, &cdev
->flags
)) {
774 if (cxgbit_set_iso_npdu(csk
))
779 if (test_bit(CDEV_DDP_ENABLE
, &cdev
->flags
)) {
780 if (cxgbit_setup_conn_pgidx(csk
,
781 ppm
->tformat
.pgsz_idx_dflt
))
783 set_bit(CSK_DDP_ENABLE
, &csk
->com
.flags
);
791 cxgbit_put_login_tx(struct iscsi_conn
*conn
, struct iscsi_login
*login
,
794 struct cxgbit_sock
*csk
= conn
->context
;
797 u8 padding
= ((-length
) & 3);
799 skb
= cxgbit_alloc_skb(csk
, length
+ padding
);
802 skb_store_bits(skb
, 0, login
->rsp
, ISCSI_HDR_LEN
);
803 skb_store_bits(skb
, ISCSI_HDR_LEN
, login
->rsp_buf
, length
);
806 skb_store_bits(skb
, ISCSI_HDR_LEN
+ length
,
807 &padding_buf
, padding
);
809 if (login
->login_complete
) {
810 if (cxgbit_set_params(conn
)) {
815 set_bit(CSK_LOGIN_DONE
, &csk
->com
.flags
);
818 if (cxgbit_queue_skb(csk
, skb
))
821 if ((!login
->login_complete
) && (!login
->login_failed
))
822 schedule_delayed_work(&conn
->login_work
, 0);
828 cxgbit_skb_copy_to_sg(struct sk_buff
*skb
, struct scatterlist
*sg
,
829 unsigned int nents
, u32 skip
)
831 struct skb_seq_state st
;
833 unsigned int consumed
= 0, buf_len
;
834 struct cxgbit_lro_pdu_cb
*pdu_cb
= cxgbit_rx_pdu_cb(skb
);
836 skb_prepare_seq_read(skb
, pdu_cb
->doffset
,
837 pdu_cb
->doffset
+ pdu_cb
->dlen
,
841 buf_len
= skb_seq_read(consumed
, &buf
, &st
);
843 skb_abort_seq_read(&st
);
847 consumed
+= sg_pcopy_from_buffer(sg
, nents
, (void *)buf
,
848 buf_len
, skip
+ consumed
);
852 static struct iscsi_cmd
*cxgbit_allocate_cmd(struct cxgbit_sock
*csk
)
854 struct iscsi_conn
*conn
= csk
->conn
;
855 struct cxgbi_ppm
*ppm
= cdev2ppm(csk
->com
.cdev
);
856 struct cxgbit_cmd
*ccmd
;
857 struct iscsi_cmd
*cmd
;
859 cmd
= iscsit_allocate_cmd(conn
, TASK_INTERRUPTIBLE
);
861 pr_err("Unable to allocate iscsi_cmd + cxgbit_cmd\n");
865 ccmd
= iscsit_priv_cmd(cmd
);
866 ccmd
->ttinfo
.tag
= ppm
->tformat
.no_ddp_mask
;
867 ccmd
->setup_ddp
= true;
873 cxgbit_handle_immediate_data(struct iscsi_cmd
*cmd
, struct iscsi_scsi_req
*hdr
,
876 struct iscsi_conn
*conn
= cmd
->conn
;
877 struct cxgbit_sock
*csk
= conn
->context
;
878 struct cxgbit_lro_pdu_cb
*pdu_cb
= cxgbit_rx_pdu_cb(csk
->skb
);
880 if (pdu_cb
->flags
& PDUCBF_RX_DCRC_ERR
) {
881 pr_err("ImmediateData CRC32C DataDigest error\n");
882 if (!conn
->sess
->sess_ops
->ErrorRecoveryLevel
) {
883 pr_err("Unable to recover from"
884 " Immediate Data digest failure while"
886 iscsit_reject_cmd(cmd
, ISCSI_REASON_DATA_DIGEST_ERROR
,
887 (unsigned char *)hdr
);
888 return IMMEDIATE_DATA_CANNOT_RECOVER
;
891 iscsit_reject_cmd(cmd
, ISCSI_REASON_DATA_DIGEST_ERROR
,
892 (unsigned char *)hdr
);
893 return IMMEDIATE_DATA_ERL1_CRC_FAILURE
;
896 if (cmd
->se_cmd
.se_cmd_flags
& SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC
) {
897 struct cxgbit_cmd
*ccmd
= iscsit_priv_cmd(cmd
);
898 struct skb_shared_info
*ssi
= skb_shinfo(csk
->skb
);
899 skb_frag_t
*dfrag
= &ssi
->frags
[pdu_cb
->dfrag_idx
];
901 sg_init_table(&ccmd
->sg
, 1);
902 sg_set_page(&ccmd
->sg
, skb_frag_page(dfrag
),
903 skb_frag_size(dfrag
), skb_frag_off(dfrag
));
904 get_page(skb_frag_page(dfrag
));
906 cmd
->se_cmd
.t_data_sg
= &ccmd
->sg
;
907 cmd
->se_cmd
.t_data_nents
= 1;
909 ccmd
->release
= true;
911 struct scatterlist
*sg
= &cmd
->se_cmd
.t_data_sg
[0];
912 u32 sg_nents
= max(1UL, DIV_ROUND_UP(pdu_cb
->dlen
, PAGE_SIZE
));
914 cxgbit_skb_copy_to_sg(csk
->skb
, sg
, sg_nents
, 0);
917 cmd
->write_data_done
+= pdu_cb
->dlen
;
919 if (cmd
->write_data_done
== cmd
->se_cmd
.data_length
) {
920 spin_lock_bh(&cmd
->istate_lock
);
921 cmd
->cmd_flags
|= ICF_GOT_LAST_DATAOUT
;
922 cmd
->i_state
= ISTATE_RECEIVED_LAST_DATAOUT
;
923 spin_unlock_bh(&cmd
->istate_lock
);
926 return IMMEDIATE_DATA_NORMAL_OPERATION
;
930 cxgbit_get_immediate_data(struct iscsi_cmd
*cmd
, struct iscsi_scsi_req
*hdr
,
933 struct iscsi_conn
*conn
= cmd
->conn
;
934 int cmdsn_ret
= 0, immed_ret
= IMMEDIATE_DATA_NORMAL_OPERATION
;
936 * Special case for Unsupported SAM WRITE Opcodes and ImmediateData=Yes.
939 goto after_immediate_data
;
941 immed_ret
= cxgbit_handle_immediate_data(cmd
, hdr
,
942 cmd
->first_burst_len
);
943 after_immediate_data
:
944 if (immed_ret
== IMMEDIATE_DATA_NORMAL_OPERATION
) {
946 * A PDU/CmdSN carrying Immediate Data passed
947 * DataCRC, check against ExpCmdSN/MaxCmdSN if
948 * Immediate Bit is not set.
950 cmdsn_ret
= iscsit_sequence_cmd(conn
, cmd
,
951 (unsigned char *)hdr
,
953 if (cmdsn_ret
== CMDSN_ERROR_CANNOT_RECOVER
)
956 if (cmd
->sense_reason
|| cmdsn_ret
== CMDSN_LOWER_THAN_EXP
) {
957 target_put_sess_cmd(&cmd
->se_cmd
);
959 } else if (cmd
->unsolicited_data
) {
960 iscsit_set_unsolicited_dataout(cmd
);
963 } else if (immed_ret
== IMMEDIATE_DATA_ERL1_CRC_FAILURE
) {
965 * Immediate Data failed DataCRC and ERL>=1,
966 * silently drop this PDU and let the initiator
967 * plug the CmdSN gap.
969 * FIXME: Send Unsolicited NOPIN with reserved
970 * TTT here to help the initiator figure out
971 * the missing CmdSN, although they should be
972 * intelligent enough to determine the missing
973 * CmdSN and issue a retry to plug the sequence.
975 cmd
->i_state
= ISTATE_REMOVE
;
976 iscsit_add_cmd_to_immediate_queue(cmd
, conn
, cmd
->i_state
);
977 } else /* immed_ret == IMMEDIATE_DATA_CANNOT_RECOVER */
984 cxgbit_handle_scsi_cmd(struct cxgbit_sock
*csk
, struct iscsi_cmd
*cmd
)
986 struct iscsi_conn
*conn
= csk
->conn
;
987 struct cxgbit_lro_pdu_cb
*pdu_cb
= cxgbit_rx_pdu_cb(csk
->skb
);
988 struct iscsi_scsi_req
*hdr
= (struct iscsi_scsi_req
*)pdu_cb
->hdr
;
990 bool dump_payload
= false;
992 rc
= iscsit_setup_scsi_cmd(conn
, cmd
, (unsigned char *)hdr
);
996 if (pdu_cb
->dlen
&& (pdu_cb
->dlen
== cmd
->se_cmd
.data_length
) &&
997 (pdu_cb
->nr_dfrags
== 1))
998 cmd
->se_cmd
.se_cmd_flags
|= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC
;
1000 rc
= iscsit_process_scsi_cmd(conn
, cmd
, hdr
);
1004 dump_payload
= true;
1009 return cxgbit_get_immediate_data(cmd
, hdr
, dump_payload
);
1012 static int cxgbit_handle_iscsi_dataout(struct cxgbit_sock
*csk
)
1014 struct scatterlist
*sg_start
;
1015 struct iscsi_conn
*conn
= csk
->conn
;
1016 struct iscsi_cmd
*cmd
= NULL
;
1017 struct cxgbit_lro_pdu_cb
*pdu_cb
= cxgbit_rx_pdu_cb(csk
->skb
);
1018 struct iscsi_data
*hdr
= (struct iscsi_data
*)pdu_cb
->hdr
;
1019 u32 data_offset
= be32_to_cpu(hdr
->offset
);
1020 u32 data_len
= pdu_cb
->dlen
;
1021 int rc
, sg_nents
, sg_off
;
1022 bool dcrc_err
= false;
1024 if (pdu_cb
->flags
& PDUCBF_RX_DDP_CMP
) {
1025 u32 offset
= be32_to_cpu(hdr
->offset
);
1027 u32 payload_length
= ntoh24(hdr
->dlength
);
1028 bool success
= false;
1030 cmd
= iscsit_find_cmd_from_itt_or_dump(conn
, hdr
->itt
, 0);
1034 ddp_data_len
= offset
- cmd
->write_data_done
;
1035 atomic_long_add(ddp_data_len
, &conn
->sess
->rx_data_octets
);
1037 cmd
->write_data_done
= offset
;
1038 cmd
->next_burst_len
= ddp_data_len
;
1039 cmd
->data_sn
= be32_to_cpu(hdr
->datasn
);
1041 rc
= __iscsit_check_dataout_hdr(conn
, (unsigned char *)hdr
,
1042 cmd
, payload_length
, &success
);
1048 rc
= iscsit_check_dataout_hdr(conn
, (unsigned char *)hdr
, &cmd
);
1055 if (pdu_cb
->flags
& PDUCBF_RX_DCRC_ERR
) {
1056 pr_err("ITT: 0x%08x, Offset: %u, Length: %u,"
1057 " DataSN: 0x%08x\n",
1058 hdr
->itt
, hdr
->offset
, data_len
,
1065 pr_debug("DataOut data_len: %u, "
1066 "write_data_done: %u, data_length: %u\n",
1067 data_len
, cmd
->write_data_done
,
1068 cmd
->se_cmd
.data_length
);
1070 if (!(pdu_cb
->flags
& PDUCBF_RX_DATA_DDPD
)) {
1071 u32 skip
= data_offset
% PAGE_SIZE
;
1073 sg_off
= data_offset
/ PAGE_SIZE
;
1074 sg_start
= &cmd
->se_cmd
.t_data_sg
[sg_off
];
1075 sg_nents
= max(1UL, DIV_ROUND_UP(skip
+ data_len
, PAGE_SIZE
));
1077 cxgbit_skb_copy_to_sg(csk
->skb
, sg_start
, sg_nents
, skip
);
1082 rc
= iscsit_check_dataout_payload(cmd
, hdr
, dcrc_err
);
1089 static int cxgbit_handle_nop_out(struct cxgbit_sock
*csk
, struct iscsi_cmd
*cmd
)
1091 struct iscsi_conn
*conn
= csk
->conn
;
1092 struct cxgbit_lro_pdu_cb
*pdu_cb
= cxgbit_rx_pdu_cb(csk
->skb
);
1093 struct iscsi_nopout
*hdr
= (struct iscsi_nopout
*)pdu_cb
->hdr
;
1094 unsigned char *ping_data
= NULL
;
1095 u32 payload_length
= pdu_cb
->dlen
;
1098 ret
= iscsit_setup_nop_out(conn
, cmd
, hdr
);
1102 if (pdu_cb
->flags
& PDUCBF_RX_DCRC_ERR
) {
1103 if (!conn
->sess
->sess_ops
->ErrorRecoveryLevel
) {
1104 pr_err("Unable to recover from"
1105 " NOPOUT Ping DataCRC failure while in"
1111 * drop this PDU and let the
1112 * initiator plug the CmdSN gap.
1114 pr_info("Dropping NOPOUT"
1115 " Command CmdSN: 0x%08x due to"
1116 " DataCRC error.\n", hdr
->cmdsn
);
1123 * Handle NOP-OUT payload for traditional iSCSI sockets
1125 if (payload_length
&& hdr
->ttt
== cpu_to_be32(0xFFFFFFFF)) {
1126 ping_data
= kzalloc(payload_length
+ 1, GFP_KERNEL
);
1128 pr_err("Unable to allocate memory for"
1129 " NOPOUT ping data.\n");
1134 skb_copy_bits(csk
->skb
, pdu_cb
->doffset
,
1135 ping_data
, payload_length
);
1137 ping_data
[payload_length
] = '\0';
1139 * Attach ping data to struct iscsi_cmd->buf_ptr.
1141 cmd
->buf_ptr
= ping_data
;
1142 cmd
->buf_ptr_size
= payload_length
;
1144 pr_debug("Got %u bytes of NOPOUT ping"
1145 " data.\n", payload_length
);
1146 pr_debug("Ping Data: \"%s\"\n", ping_data
);
1149 return iscsit_process_nop_out(conn
, cmd
, hdr
);
1152 iscsit_free_cmd(cmd
, false);
1157 cxgbit_handle_text_cmd(struct cxgbit_sock
*csk
, struct iscsi_cmd
*cmd
)
1159 struct iscsi_conn
*conn
= csk
->conn
;
1160 struct cxgbit_lro_pdu_cb
*pdu_cb
= cxgbit_rx_pdu_cb(csk
->skb
);
1161 struct iscsi_text
*hdr
= (struct iscsi_text
*)pdu_cb
->hdr
;
1162 u32 payload_length
= pdu_cb
->dlen
;
1164 unsigned char *text_in
= NULL
;
1166 rc
= iscsit_setup_text_cmd(conn
, cmd
, hdr
);
1170 if (pdu_cb
->flags
& PDUCBF_RX_DCRC_ERR
) {
1171 if (!conn
->sess
->sess_ops
->ErrorRecoveryLevel
) {
1172 pr_err("Unable to recover from"
1173 " Text Data digest failure while in"
1178 * drop this PDU and let the
1179 * initiator plug the CmdSN gap.
1181 pr_info("Dropping Text"
1182 " Command CmdSN: 0x%08x due to"
1183 " DataCRC error.\n", hdr
->cmdsn
);
1188 if (payload_length
) {
1189 text_in
= kzalloc(payload_length
, GFP_KERNEL
);
1191 pr_err("Unable to allocate text_in of payload_length: %u\n",
1195 skb_copy_bits(csk
->skb
, pdu_cb
->doffset
,
1196 text_in
, payload_length
);
1198 text_in
[payload_length
- 1] = '\0';
1200 cmd
->text_in_ptr
= text_in
;
1203 return iscsit_process_text_cmd(conn
, cmd
, hdr
);
1206 return iscsit_reject_cmd(cmd
, ISCSI_REASON_PROTOCOL_ERROR
,
1210 static int cxgbit_target_rx_opcode(struct cxgbit_sock
*csk
)
1212 struct cxgbit_lro_pdu_cb
*pdu_cb
= cxgbit_rx_pdu_cb(csk
->skb
);
1213 struct iscsi_hdr
*hdr
= (struct iscsi_hdr
*)pdu_cb
->hdr
;
1214 struct iscsi_conn
*conn
= csk
->conn
;
1215 struct iscsi_cmd
*cmd
= NULL
;
1216 u8 opcode
= (hdr
->opcode
& ISCSI_OPCODE_MASK
);
1220 case ISCSI_OP_SCSI_CMD
:
1221 cmd
= cxgbit_allocate_cmd(csk
);
1225 ret
= cxgbit_handle_scsi_cmd(csk
, cmd
);
1227 case ISCSI_OP_SCSI_DATA_OUT
:
1228 ret
= cxgbit_handle_iscsi_dataout(csk
);
1230 case ISCSI_OP_NOOP_OUT
:
1231 if (hdr
->ttt
== cpu_to_be32(0xFFFFFFFF)) {
1232 cmd
= cxgbit_allocate_cmd(csk
);
1237 ret
= cxgbit_handle_nop_out(csk
, cmd
);
1239 case ISCSI_OP_SCSI_TMFUNC
:
1240 cmd
= cxgbit_allocate_cmd(csk
);
1244 ret
= iscsit_handle_task_mgt_cmd(conn
, cmd
,
1245 (unsigned char *)hdr
);
1248 if (hdr
->ttt
!= cpu_to_be32(0xFFFFFFFF)) {
1249 cmd
= iscsit_find_cmd_from_itt(conn
, hdr
->itt
);
1253 cmd
= cxgbit_allocate_cmd(csk
);
1258 ret
= cxgbit_handle_text_cmd(csk
, cmd
);
1260 case ISCSI_OP_LOGOUT
:
1261 cmd
= cxgbit_allocate_cmd(csk
);
1265 ret
= iscsit_handle_logout_cmd(conn
, cmd
, (unsigned char *)hdr
);
1267 wait_for_completion_timeout(&conn
->conn_logout_comp
,
1268 SECONDS_FOR_LOGOUT_COMP
1271 case ISCSI_OP_SNACK
:
1272 ret
= iscsit_handle_snack(conn
, (unsigned char *)hdr
);
1275 pr_err("Got unknown iSCSI OpCode: 0x%02x\n", opcode
);
1283 return iscsit_add_reject(conn
, ISCSI_REASON_BOOKMARK_NO_RESOURCES
,
1284 (unsigned char *)hdr
);
1288 static int cxgbit_rx_opcode(struct cxgbit_sock
*csk
)
1290 struct cxgbit_lro_pdu_cb
*pdu_cb
= cxgbit_rx_pdu_cb(csk
->skb
);
1291 struct iscsi_conn
*conn
= csk
->conn
;
1292 struct iscsi_hdr
*hdr
= pdu_cb
->hdr
;
1295 if (pdu_cb
->flags
& PDUCBF_RX_HCRC_ERR
) {
1296 atomic_long_inc(&conn
->sess
->conn_digest_errors
);
1300 if (conn
->conn_state
== TARG_CONN_STATE_IN_LOGOUT
)
1303 opcode
= hdr
->opcode
& ISCSI_OPCODE_MASK
;
1305 if (conn
->sess
->sess_ops
->SessionType
&&
1306 ((!(opcode
& ISCSI_OP_TEXT
)) ||
1307 (!(opcode
& ISCSI_OP_LOGOUT
)))) {
1308 pr_err("Received illegal iSCSI Opcode: 0x%02x"
1309 " while in Discovery Session, rejecting.\n", opcode
);
1310 iscsit_add_reject(conn
, ISCSI_REASON_PROTOCOL_ERROR
,
1311 (unsigned char *)hdr
);
1315 if (cxgbit_target_rx_opcode(csk
) < 0)
1324 static int cxgbit_rx_login_pdu(struct cxgbit_sock
*csk
)
1326 struct iscsi_conn
*conn
= csk
->conn
;
1327 struct iscsi_login
*login
= conn
->login
;
1328 struct cxgbit_lro_pdu_cb
*pdu_cb
= cxgbit_rx_pdu_cb(csk
->skb
);
1329 struct iscsi_login_req
*login_req
;
1331 login_req
= (struct iscsi_login_req
*)login
->req
;
1332 memcpy(login_req
, pdu_cb
->hdr
, sizeof(*login_req
));
1334 pr_debug("Got Login Command, Flags 0x%02x, ITT: 0x%08x,"
1335 " CmdSN: 0x%08x, ExpStatSN: 0x%08x, CID: %hu, Length: %u\n",
1336 login_req
->flags
, login_req
->itt
, login_req
->cmdsn
,
1337 login_req
->exp_statsn
, login_req
->cid
, pdu_cb
->dlen
);
1339 * Setup the initial iscsi_login values from the leading
1340 * login request PDU.
1342 if (login
->first_request
) {
1343 login_req
= (struct iscsi_login_req
*)login
->req
;
1344 login
->leading_connection
= (!login_req
->tsih
) ? 1 : 0;
1345 login
->current_stage
= ISCSI_LOGIN_CURRENT_STAGE(
1347 login
->version_min
= login_req
->min_version
;
1348 login
->version_max
= login_req
->max_version
;
1349 memcpy(login
->isid
, login_req
->isid
, 6);
1350 login
->cmd_sn
= be32_to_cpu(login_req
->cmdsn
);
1351 login
->init_task_tag
= login_req
->itt
;
1352 login
->initial_exp_statsn
= be32_to_cpu(login_req
->exp_statsn
);
1353 login
->cid
= be16_to_cpu(login_req
->cid
);
1354 login
->tsih
= be16_to_cpu(login_req
->tsih
);
1357 if (iscsi_target_check_login_request(conn
, login
) < 0)
1360 memset(login
->req_buf
, 0, MAX_KEY_VALUE_PAIRS
);
1361 skb_copy_bits(csk
->skb
, pdu_cb
->doffset
, login
->req_buf
, pdu_cb
->dlen
);
1367 cxgbit_process_iscsi_pdu(struct cxgbit_sock
*csk
, struct sk_buff
*skb
, int idx
)
1369 struct cxgbit_lro_pdu_cb
*pdu_cb
= cxgbit_skb_lro_pdu_cb(skb
, idx
);
1372 cxgbit_rx_pdu_cb(skb
) = pdu_cb
;
1376 if (!test_bit(CSK_LOGIN_DONE
, &csk
->com
.flags
)) {
1377 ret
= cxgbit_rx_login_pdu(csk
);
1378 set_bit(CSK_LOGIN_PDU_DONE
, &csk
->com
.flags
);
1380 ret
= cxgbit_rx_opcode(csk
);
1386 static void cxgbit_lro_skb_dump(struct sk_buff
*skb
)
1388 struct skb_shared_info
*ssi
= skb_shinfo(skb
);
1389 struct cxgbit_lro_cb
*lro_cb
= cxgbit_skb_lro_cb(skb
);
1390 struct cxgbit_lro_pdu_cb
*pdu_cb
= cxgbit_skb_lro_pdu_cb(skb
, 0);
1393 pr_info("skb 0x%p, head 0x%p, 0x%p, len %u,%u, frags %u.\n",
1394 skb
, skb
->head
, skb
->data
, skb
->len
, skb
->data_len
,
1396 pr_info("skb 0x%p, lro_cb, csk 0x%p, pdu %u, %u.\n",
1397 skb
, lro_cb
->csk
, lro_cb
->pdu_idx
, lro_cb
->pdu_totallen
);
1399 for (i
= 0; i
< lro_cb
->pdu_idx
; i
++, pdu_cb
++)
1400 pr_info("skb 0x%p, pdu %d, %u, f 0x%x, seq 0x%x, dcrc 0x%x, "
1402 skb
, i
, pdu_cb
->pdulen
, pdu_cb
->flags
, pdu_cb
->seq
,
1403 pdu_cb
->ddigest
, pdu_cb
->frags
);
1404 for (i
= 0; i
< ssi
->nr_frags
; i
++)
1405 pr_info("skb 0x%p, frag %d, off %u, sz %u.\n",
1406 skb
, i
, skb_frag_off(&ssi
->frags
[i
]),
1407 skb_frag_size(&ssi
->frags
[i
]));
1410 static void cxgbit_lro_hskb_reset(struct cxgbit_sock
*csk
)
1412 struct sk_buff
*skb
= csk
->lro_hskb
;
1413 struct skb_shared_info
*ssi
= skb_shinfo(skb
);
1416 memset(skb
->data
, 0, LRO_SKB_MIN_HEADROOM
);
1417 for (i
= 0; i
< ssi
->nr_frags
; i
++)
1418 put_page(skb_frag_page(&ssi
->frags
[i
]));
1421 skb
->truesize
-= skb
->len
;
1426 cxgbit_lro_skb_merge(struct cxgbit_sock
*csk
, struct sk_buff
*skb
, u8 pdu_idx
)
1428 struct sk_buff
*hskb
= csk
->lro_hskb
;
1429 struct cxgbit_lro_pdu_cb
*hpdu_cb
= cxgbit_skb_lro_pdu_cb(hskb
, 0);
1430 struct cxgbit_lro_pdu_cb
*pdu_cb
= cxgbit_skb_lro_pdu_cb(skb
, pdu_idx
);
1431 struct skb_shared_info
*hssi
= skb_shinfo(hskb
);
1432 struct skb_shared_info
*ssi
= skb_shinfo(skb
);
1433 unsigned int len
= 0;
1435 if (pdu_cb
->flags
& PDUCBF_RX_HDR
) {
1436 u8 hfrag_idx
= hssi
->nr_frags
;
1438 hpdu_cb
->flags
|= pdu_cb
->flags
;
1439 hpdu_cb
->seq
= pdu_cb
->seq
;
1440 hpdu_cb
->hdr
= pdu_cb
->hdr
;
1441 hpdu_cb
->hlen
= pdu_cb
->hlen
;
1443 memcpy(&hssi
->frags
[hfrag_idx
], &ssi
->frags
[pdu_cb
->hfrag_idx
],
1444 sizeof(skb_frag_t
));
1446 get_page(skb_frag_page(&hssi
->frags
[hfrag_idx
]));
1449 hpdu_cb
->hfrag_idx
= hfrag_idx
;
1451 len
= skb_frag_size(&hssi
->frags
[hfrag_idx
]);
1453 hskb
->data_len
+= len
;
1454 hskb
->truesize
+= len
;
1457 if (pdu_cb
->flags
& PDUCBF_RX_DATA
) {
1458 u8 dfrag_idx
= hssi
->nr_frags
, i
;
1460 hpdu_cb
->flags
|= pdu_cb
->flags
;
1461 hpdu_cb
->dfrag_idx
= dfrag_idx
;
1464 for (i
= 0; i
< pdu_cb
->nr_dfrags
; dfrag_idx
++, i
++) {
1465 memcpy(&hssi
->frags
[dfrag_idx
],
1466 &ssi
->frags
[pdu_cb
->dfrag_idx
+ i
],
1467 sizeof(skb_frag_t
));
1469 get_page(skb_frag_page(&hssi
->frags
[dfrag_idx
]));
1471 len
+= skb_frag_size(&hssi
->frags
[dfrag_idx
]);
1477 hpdu_cb
->dlen
= pdu_cb
->dlen
;
1478 hpdu_cb
->doffset
= hpdu_cb
->hlen
;
1479 hpdu_cb
->nr_dfrags
= pdu_cb
->nr_dfrags
;
1481 hskb
->data_len
+= len
;
1482 hskb
->truesize
+= len
;
1485 if (pdu_cb
->flags
& PDUCBF_RX_STATUS
) {
1486 hpdu_cb
->flags
|= pdu_cb
->flags
;
1488 if (hpdu_cb
->flags
& PDUCBF_RX_DATA
)
1489 hpdu_cb
->flags
&= ~PDUCBF_RX_DATA_DDPD
;
1491 hpdu_cb
->ddigest
= pdu_cb
->ddigest
;
1492 hpdu_cb
->pdulen
= pdu_cb
->pdulen
;
1496 static int cxgbit_process_lro_skb(struct cxgbit_sock
*csk
, struct sk_buff
*skb
)
1498 struct cxgbit_lro_cb
*lro_cb
= cxgbit_skb_lro_cb(skb
);
1499 struct cxgbit_lro_pdu_cb
*pdu_cb
= cxgbit_skb_lro_pdu_cb(skb
, 0);
1500 u8 pdu_idx
= 0, last_idx
= 0;
1503 if (!pdu_cb
->complete
) {
1504 cxgbit_lro_skb_merge(csk
, skb
, 0);
1506 if (pdu_cb
->flags
& PDUCBF_RX_STATUS
) {
1507 struct sk_buff
*hskb
= csk
->lro_hskb
;
1509 ret
= cxgbit_process_iscsi_pdu(csk
, hskb
, 0);
1511 cxgbit_lro_hskb_reset(csk
);
1520 if (lro_cb
->pdu_idx
)
1521 last_idx
= lro_cb
->pdu_idx
- 1;
1523 for (; pdu_idx
<= last_idx
; pdu_idx
++) {
1524 ret
= cxgbit_process_iscsi_pdu(csk
, skb
, pdu_idx
);
1529 if ((!lro_cb
->complete
) && lro_cb
->pdu_idx
)
1530 cxgbit_lro_skb_merge(csk
, skb
, lro_cb
->pdu_idx
);
1536 static int cxgbit_rx_lro_skb(struct cxgbit_sock
*csk
, struct sk_buff
*skb
)
1538 struct cxgbit_lro_cb
*lro_cb
= cxgbit_skb_lro_cb(skb
);
1539 struct cxgbit_lro_pdu_cb
*pdu_cb
= cxgbit_skb_lro_pdu_cb(skb
, 0);
1542 if ((pdu_cb
->flags
& PDUCBF_RX_HDR
) &&
1543 (pdu_cb
->seq
!= csk
->rcv_nxt
)) {
1544 pr_info("csk 0x%p, tid 0x%x, seq 0x%x != 0x%x.\n",
1545 csk
, csk
->tid
, pdu_cb
->seq
, csk
->rcv_nxt
);
1546 cxgbit_lro_skb_dump(skb
);
1550 csk
->rcv_nxt
+= lro_cb
->pdu_totallen
;
1552 ret
= cxgbit_process_lro_skb(csk
, skb
);
1554 csk
->rx_credits
+= lro_cb
->pdu_totallen
;
1556 if (csk
->rx_credits
>= (csk
->rcv_win
/ 4))
1557 cxgbit_rx_data_ack(csk
);
1562 static int cxgbit_rx_skb(struct cxgbit_sock
*csk
, struct sk_buff
*skb
)
1564 struct cxgb4_lld_info
*lldi
= &csk
->com
.cdev
->lldi
;
1567 if (likely(cxgbit_skcb_flags(skb
) & SKCBF_RX_LRO
)) {
1568 if (is_t5(lldi
->adapter_type
))
1569 ret
= cxgbit_rx_lro_skb(csk
, skb
);
1571 ret
= cxgbit_process_lro_skb(csk
, skb
);
1578 static bool cxgbit_rxq_len(struct cxgbit_sock
*csk
, struct sk_buff_head
*rxq
)
1580 spin_lock_bh(&csk
->rxq
.lock
);
1581 if (skb_queue_len(&csk
->rxq
)) {
1582 skb_queue_splice_init(&csk
->rxq
, rxq
);
1583 spin_unlock_bh(&csk
->rxq
.lock
);
1586 spin_unlock_bh(&csk
->rxq
.lock
);
1590 static int cxgbit_wait_rxq(struct cxgbit_sock
*csk
)
1592 struct sk_buff
*skb
;
1593 struct sk_buff_head rxq
;
1595 skb_queue_head_init(&rxq
);
1597 wait_event_interruptible(csk
->waitq
, cxgbit_rxq_len(csk
, &rxq
));
1599 if (signal_pending(current
))
1602 while ((skb
= __skb_dequeue(&rxq
))) {
1603 if (cxgbit_rx_skb(csk
, skb
))
1609 __skb_queue_purge(&rxq
);
1613 int cxgbit_get_login_rx(struct iscsi_conn
*conn
, struct iscsi_login
*login
)
1615 struct cxgbit_sock
*csk
= conn
->context
;
1618 while (!test_and_clear_bit(CSK_LOGIN_PDU_DONE
, &csk
->com
.flags
)) {
1619 ret
= cxgbit_wait_rxq(csk
);
1621 clear_bit(CSK_LOGIN_PDU_DONE
, &csk
->com
.flags
);
1629 void cxgbit_get_rx_pdu(struct iscsi_conn
*conn
)
1631 struct cxgbit_sock
*csk
= conn
->context
;
1633 while (!kthread_should_stop()) {
1634 iscsit_thread_check_cpumask(conn
, current
, 0);
1635 if (cxgbit_wait_rxq(csk
))