2 * cxgb3i_pdu.c: Chelsio S3xx iSCSI driver.
4 * Copyright (c) 2008 Chelsio Communications, Inc.
5 * Copyright (c) 2008 Mike Christie
6 * Copyright (c) 2008 Red Hat, Inc. All rights reserved.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation.
12 * Written by: Karen Xie (kxie@chelsio.com)
15 #include <linux/slab.h>
16 #include <linux/skbuff.h>
17 #include <linux/crypto.h>
18 #include <scsi/scsi_cmnd.h>
19 #include <scsi/scsi_host.h>
22 #include "cxgb3i_pdu.h"
24 #ifdef __DEBUG_CXGB3I_RX__
25 #define cxgb3i_rx_debug cxgb3i_log_debug
27 #define cxgb3i_rx_debug(fmt...)
30 #ifdef __DEBUG_CXGB3I_TX__
31 #define cxgb3i_tx_debug cxgb3i_log_debug
33 #define cxgb3i_tx_debug(fmt...)
36 /* always allocate rooms for AHS */
37 #define SKB_TX_PDU_HEADER_LEN \
38 (sizeof(struct iscsi_hdr) + ISCSI_MAX_AHS_SIZE)
39 static unsigned int skb_extra_headroom
;
40 static struct page
*pad_page
;
43 * pdu receive, interact with libiscsi_tcp
45 static inline int read_pdu_skb(struct iscsi_conn
*conn
, struct sk_buff
*skb
,
46 unsigned int offset
, int offloaded
)
51 bytes_read
= iscsi_tcp_recv_skb(conn
, skb
, offset
, offloaded
, &status
);
53 case ISCSI_TCP_CONN_ERR
:
55 case ISCSI_TCP_SUSPENDED
:
56 /* no transfer - just have caller flush queue */
58 case ISCSI_TCP_SKB_DONE
:
60 * pdus should always fit in the skb and we should get
61 * segment done notifcation.
63 iscsi_conn_printk(KERN_ERR
, conn
, "Invalid pdu or skb.");
65 case ISCSI_TCP_SEGMENT_DONE
:
68 iscsi_conn_printk(KERN_ERR
, conn
, "Invalid iscsi_tcp_recv_skb "
69 "status %d\n", status
);
74 static int cxgb3i_conn_read_pdu_skb(struct iscsi_conn
*conn
,
77 struct iscsi_tcp_conn
*tcp_conn
= conn
->dd_data
;
82 cxgb3i_rx_debug("conn 0x%p, skb 0x%p, len %u, flag 0x%x.\n",
83 conn
, skb
, skb
->len
, skb_ulp_mode(skb
));
85 if (!iscsi_tcp_recv_segment_is_hdr(tcp_conn
)) {
86 iscsi_conn_failure(conn
, ISCSI_ERR_PROTO
);
90 if (conn
->hdrdgst_en
&& (skb_ulp_mode(skb
) & ULP2_FLAG_HCRC_ERROR
)) {
91 iscsi_conn_failure(conn
, ISCSI_ERR_HDR_DGST
);
95 if (conn
->datadgst_en
&& (skb_ulp_mode(skb
) & ULP2_FLAG_DCRC_ERROR
)) {
96 iscsi_conn_failure(conn
, ISCSI_ERR_DATA_DGST
);
101 rc
= read_pdu_skb(conn
, skb
, 0, 0);
105 if (iscsi_tcp_recv_segment_is_hdr(tcp_conn
))
109 if (conn
->hdrdgst_en
)
110 offset
+= ISCSI_DIGEST_SIZE
;
113 if (skb_ulp_mode(skb
) & ULP2_FLAG_DATA_DDPED
) {
114 cxgb3i_rx_debug("skb 0x%p, opcode 0x%x, data %u, ddp'ed, "
117 tcp_conn
->in
.hdr
->opcode
& ISCSI_OPCODE_MASK
,
118 tcp_conn
->in
.datalen
,
119 ntohl(tcp_conn
->in
.hdr
->itt
));
122 cxgb3i_rx_debug("skb 0x%p, opcode 0x%x, data %u, NOT ddp'ed, "
125 tcp_conn
->in
.hdr
->opcode
& ISCSI_OPCODE_MASK
,
126 tcp_conn
->in
.datalen
,
127 ntohl(tcp_conn
->in
.hdr
->itt
));
128 offset
+= sizeof(struct cpl_iscsi_hdr_norss
);
131 rc
= read_pdu_skb(conn
, skb
, offset
, offloaded
);
139 * pdu transmit, interact with libiscsi_tcp
141 static inline void tx_skb_setmode(struct sk_buff
*skb
, int hcrc
, int dcrc
)
149 skb_ulp_mode(skb
) = (ULP_MODE_ISCSI
<< 4) | submode
;
152 void cxgb3i_conn_cleanup_task(struct iscsi_task
*task
)
154 struct cxgb3i_task_data
*tdata
= task
->dd_data
+
155 sizeof(struct iscsi_tcp_task
);
157 /* never reached the xmit task callout */
159 __kfree_skb(tdata
->skb
);
160 memset(tdata
, 0, sizeof(struct cxgb3i_task_data
));
162 /* MNC - Do we need a check in case this is called but
163 * cxgb3i_conn_alloc_pdu has never been called on the task */
164 cxgb3i_release_itt(task
, task
->hdr_itt
);
165 iscsi_tcp_cleanup_task(task
);
168 static int sgl_seek_offset(struct scatterlist
*sgl
, unsigned int sgcnt
,
169 unsigned int offset
, unsigned int *off
,
170 struct scatterlist
**sgp
)
173 struct scatterlist
*sg
;
175 for_each_sg(sgl
, sg
, sgcnt
, i
) {
176 if (offset
< sg
->length
) {
181 offset
-= sg
->length
;
186 static int sgl_read_to_frags(struct scatterlist
*sg
, unsigned int sgoffset
,
187 unsigned int dlen
, skb_frag_t
*frags
,
190 unsigned int datalen
= dlen
;
191 unsigned int sglen
= sg
->length
- sgoffset
;
192 struct page
*page
= sg_page(sg
);
202 cxgb3i_log_error("%s, sg NULL, len %u/%u.\n",
203 __func__
, datalen
, dlen
);
211 copy
= min(datalen
, sglen
);
212 if (i
&& page
== frags
[i
- 1].page
&&
213 sgoffset
+ sg
->offset
==
214 frags
[i
- 1].page_offset
+ frags
[i
- 1].size
) {
215 frags
[i
- 1].size
+= copy
;
218 cxgb3i_log_error("%s, too many pages %u, "
219 "dlen %u.\n", __func__
,
224 frags
[i
].page
= page
;
225 frags
[i
].page_offset
= sg
->offset
+ sgoffset
;
226 frags
[i
].size
= copy
;
237 int cxgb3i_conn_alloc_pdu(struct iscsi_task
*task
, u8 opcode
)
239 struct iscsi_conn
*conn
= task
->conn
;
240 struct iscsi_tcp_task
*tcp_task
= task
->dd_data
;
241 struct cxgb3i_task_data
*tdata
= task
->dd_data
+ sizeof(*tcp_task
);
242 struct scsi_cmnd
*sc
= task
->sc
;
243 int headroom
= SKB_TX_PDU_HEADER_LEN
;
245 tcp_task
->dd_data
= tdata
;
248 /* write command, need to send data pdus */
249 if (skb_extra_headroom
&& (opcode
== ISCSI_OP_SCSI_DATA_OUT
||
250 (opcode
== ISCSI_OP_SCSI_CMD
&&
251 (scsi_bidi_cmnd(sc
) || sc
->sc_data_direction
== DMA_TO_DEVICE
))))
252 headroom
+= min(skb_extra_headroom
, conn
->max_xmit_dlength
);
254 tdata
->skb
= alloc_skb(TX_HEADER_LEN
+ headroom
, GFP_ATOMIC
);
257 skb_reserve(tdata
->skb
, TX_HEADER_LEN
);
259 cxgb3i_tx_debug("task 0x%p, opcode 0x%x, skb 0x%p.\n",
260 task
, opcode
, tdata
->skb
);
262 task
->hdr
= (struct iscsi_hdr
*)tdata
->skb
->data
;
263 task
->hdr_max
= SKB_TX_PDU_HEADER_LEN
;
265 /* data_out uses scsi_cmd's itt */
266 if (opcode
!= ISCSI_OP_SCSI_DATA_OUT
)
267 cxgb3i_reserve_itt(task
, &task
->hdr
->itt
);
272 int cxgb3i_conn_init_pdu(struct iscsi_task
*task
, unsigned int offset
,
275 struct iscsi_conn
*conn
= task
->conn
;
276 struct iscsi_tcp_task
*tcp_task
= task
->dd_data
;
277 struct cxgb3i_task_data
*tdata
= tcp_task
->dd_data
;
278 struct sk_buff
*skb
= tdata
->skb
;
279 unsigned int datalen
= count
;
280 int i
, padlen
= iscsi_padding(count
);
283 cxgb3i_tx_debug("task 0x%p,0x%p, offset %u, count %u, skb 0x%p.\n",
284 task
, task
->sc
, offset
, count
, skb
);
286 skb_put(skb
, task
->hdr_len
);
287 tx_skb_setmode(skb
, conn
->hdrdgst_en
, datalen
? conn
->datadgst_en
: 0);
292 struct scsi_data_buffer
*sdb
= scsi_out(task
->sc
);
293 struct scatterlist
*sg
= NULL
;
296 tdata
->offset
= offset
;
297 tdata
->count
= count
;
298 err
= sgl_seek_offset(sdb
->table
.sgl
, sdb
->table
.nents
,
299 tdata
->offset
, &tdata
->sgoffset
, &sg
);
301 cxgb3i_log_warn("tpdu, sgl %u, bad offset %u/%u.\n",
302 sdb
->table
.nents
, tdata
->offset
,
306 err
= sgl_read_to_frags(sg
, tdata
->sgoffset
, tdata
->count
,
307 tdata
->frags
, MAX_PDU_FRAGS
);
309 cxgb3i_log_warn("tpdu, sgl %u, bad offset %u + %u.\n",
310 sdb
->table
.nents
, tdata
->offset
,
314 tdata
->nr_frags
= err
;
316 if (tdata
->nr_frags
> MAX_SKB_FRAGS
||
317 (padlen
&& tdata
->nr_frags
== MAX_SKB_FRAGS
)) {
318 char *dst
= skb
->data
+ task
->hdr_len
;
319 skb_frag_t
*frag
= tdata
->frags
;
321 /* data fits in the skb's headroom */
322 for (i
= 0; i
< tdata
->nr_frags
; i
++, frag
++) {
323 char *src
= kmap_atomic(frag
->page
,
326 memcpy(dst
, src
+frag
->page_offset
, frag
->size
);
328 kunmap_atomic(src
, KM_SOFTIRQ0
);
331 memset(dst
, 0, padlen
);
334 skb_put(skb
, count
+ padlen
);
336 /* data fit into frag_list */
337 for (i
= 0; i
< tdata
->nr_frags
; i
++)
338 get_page(tdata
->frags
[i
].page
);
340 memcpy(skb_shinfo(skb
)->frags
, tdata
->frags
,
341 sizeof(skb_frag_t
) * tdata
->nr_frags
);
342 skb_shinfo(skb
)->nr_frags
= tdata
->nr_frags
;
344 skb
->data_len
+= count
;
345 skb
->truesize
+= count
;
349 pg
= virt_to_page(task
->data
);
352 skb_fill_page_desc(skb
, 0, pg
, offset_in_page(task
->data
),
355 skb
->data_len
+= count
;
356 skb
->truesize
+= count
;
360 i
= skb_shinfo(skb
)->nr_frags
;
362 skb_fill_page_desc(skb
, skb_shinfo(skb
)->nr_frags
, pad_page
, 0,
365 skb
->data_len
+= padlen
;
366 skb
->truesize
+= padlen
;
373 int cxgb3i_conn_xmit_pdu(struct iscsi_task
*task
)
375 struct iscsi_tcp_conn
*tcp_conn
= task
->conn
->dd_data
;
376 struct cxgb3i_conn
*cconn
= tcp_conn
->dd_data
;
377 struct iscsi_tcp_task
*tcp_task
= task
->dd_data
;
378 struct cxgb3i_task_data
*tdata
= tcp_task
->dd_data
;
379 struct sk_buff
*skb
= tdata
->skb
;
380 unsigned int datalen
;
386 datalen
= skb
->data_len
;
388 err
= cxgb3i_c3cn_send_pdus(cconn
->cep
->c3cn
, skb
);
392 cxgb3i_tx_debug("task 0x%p, skb 0x%p, len %u/%u, rv %d.\n",
393 task
, skb
, skb
->len
, skb
->data_len
, err
);
395 if (task
->conn
->hdrdgst_en
)
396 pdulen
+= ISCSI_DIGEST_SIZE
;
397 if (datalen
&& task
->conn
->datadgst_en
)
398 pdulen
+= ISCSI_DIGEST_SIZE
;
400 task
->conn
->txdata_octets
+= pdulen
;
404 if (err
== -EAGAIN
|| err
== -ENOBUFS
) {
405 /* reset skb to send when we are called again */
411 cxgb3i_tx_debug("itt 0x%x, skb 0x%p, len %u/%u, xmit err %d.\n",
412 task
->itt
, skb
, skb
->len
, skb
->data_len
, err
);
413 iscsi_conn_printk(KERN_ERR
, task
->conn
, "xmit err %d.\n", err
);
414 iscsi_conn_failure(task
->conn
, ISCSI_ERR_XMIT_FAILED
);
418 int cxgb3i_pdu_init(void)
420 if (SKB_TX_HEADROOM
> (512 * MAX_SKB_FRAGS
))
421 skb_extra_headroom
= SKB_TX_HEADROOM
;
422 pad_page
= alloc_page(GFP_KERNEL
);
425 memset(page_address(pad_page
), 0, PAGE_SIZE
);
429 void cxgb3i_pdu_cleanup(void)
432 __free_page(pad_page
);
437 void cxgb3i_conn_pdu_ready(struct s3_conn
*c3cn
)
440 unsigned int read
= 0;
441 struct iscsi_conn
*conn
= c3cn
->user_data
;
444 cxgb3i_rx_debug("cn 0x%p.\n", c3cn
);
446 read_lock(&c3cn
->callback_lock
);
447 if (unlikely(!conn
|| conn
->suspend_rx
)) {
448 cxgb3i_rx_debug("conn 0x%p, id %d, suspend_rx %lu!\n",
449 conn
, conn
? conn
->id
: 0xFF,
450 conn
? conn
->suspend_rx
: 0xFF);
451 read_unlock(&c3cn
->callback_lock
);
454 skb
= skb_peek(&c3cn
->receive_queue
);
455 while (!err
&& skb
) {
456 __skb_unlink(skb
, &c3cn
->receive_queue
);
457 read
+= skb_rx_pdulen(skb
);
458 cxgb3i_rx_debug("conn 0x%p, cn 0x%p, rx skb 0x%p, pdulen %u.\n",
459 conn
, c3cn
, skb
, skb_rx_pdulen(skb
));
460 err
= cxgb3i_conn_read_pdu_skb(conn
, skb
);
462 skb
= skb_peek(&c3cn
->receive_queue
);
464 read_unlock(&c3cn
->callback_lock
);
465 c3cn
->copied_seq
+= read
;
466 cxgb3i_c3cn_rx_credits(c3cn
, read
);
467 conn
->rxdata_octets
+= read
;
470 cxgb3i_log_info("conn 0x%p rx failed err %d.\n", conn
, err
);
471 iscsi_conn_failure(conn
, ISCSI_ERR_CONN_FAILED
);
475 void cxgb3i_conn_tx_open(struct s3_conn
*c3cn
)
477 struct iscsi_conn
*conn
= c3cn
->user_data
;
479 cxgb3i_tx_debug("cn 0x%p.\n", c3cn
);
481 cxgb3i_tx_debug("cn 0x%p, cid %d.\n", c3cn
, conn
->id
);
482 iscsi_conn_queue_work(conn
);
486 void cxgb3i_conn_closing(struct s3_conn
*c3cn
)
488 struct iscsi_conn
*conn
;
490 read_lock(&c3cn
->callback_lock
);
491 conn
= c3cn
->user_data
;
492 if (conn
&& c3cn
->state
!= C3CN_STATE_ESTABLISHED
)
493 iscsi_conn_failure(conn
, ISCSI_ERR_CONN_FAILED
);
494 read_unlock(&c3cn
->callback_lock
);