2 * cxgb3i_pdu.c: Chelsio S3xx iSCSI driver.
4 * Copyright (c) 2008 Chelsio Communications, Inc.
5 * Copyright (c) 2008 Mike Christie
6 * Copyright (c) 2008 Red Hat, Inc. All rights reserved.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation.
12 * Written by: Karen Xie (kxie@chelsio.com)
15 #include <linux/skbuff.h>
16 #include <linux/crypto.h>
17 #include <scsi/scsi_cmnd.h>
18 #include <scsi/scsi_host.h>
21 #include "cxgb3i_pdu.h"
23 #ifdef __DEBUG_CXGB3I_RX__
24 #define cxgb3i_rx_debug cxgb3i_log_debug
26 #define cxgb3i_rx_debug(fmt...)
29 #ifdef __DEBUG_CXGB3I_TX__
30 #define cxgb3i_tx_debug cxgb3i_log_debug
32 #define cxgb3i_tx_debug(fmt...)
35 /* always allocate rooms for AHS */
36 #define SKB_TX_PDU_HEADER_LEN \
37 (sizeof(struct iscsi_hdr) + ISCSI_MAX_AHS_SIZE)
38 static unsigned int skb_extra_headroom
;
39 static struct page
*pad_page
;
42 * pdu receive, interact with libiscsi_tcp
44 static inline int read_pdu_skb(struct iscsi_conn
*conn
, struct sk_buff
*skb
,
45 unsigned int offset
, int offloaded
)
50 bytes_read
= iscsi_tcp_recv_skb(conn
, skb
, offset
, offloaded
, &status
);
52 case ISCSI_TCP_CONN_ERR
:
54 case ISCSI_TCP_SUSPENDED
:
55 /* no transfer - just have caller flush queue */
57 case ISCSI_TCP_SKB_DONE
:
59 * pdus should always fit in the skb and we should get
60 * segment done notifcation.
62 iscsi_conn_printk(KERN_ERR
, conn
, "Invalid pdu or skb.");
64 case ISCSI_TCP_SEGMENT_DONE
:
67 iscsi_conn_printk(KERN_ERR
, conn
, "Invalid iscsi_tcp_recv_skb "
68 "status %d\n", status
);
73 static int cxgb3i_conn_read_pdu_skb(struct iscsi_conn
*conn
,
76 struct iscsi_tcp_conn
*tcp_conn
= conn
->dd_data
;
81 cxgb3i_rx_debug("conn 0x%p, skb 0x%p, len %u, flag 0x%x.\n",
82 conn
, skb
, skb
->len
, skb_ulp_mode(skb
));
84 if (!iscsi_tcp_recv_segment_is_hdr(tcp_conn
)) {
85 iscsi_conn_failure(conn
, ISCSI_ERR_PROTO
);
89 if (conn
->hdrdgst_en
&& (skb_ulp_mode(skb
) & ULP2_FLAG_HCRC_ERROR
)) {
90 iscsi_conn_failure(conn
, ISCSI_ERR_HDR_DGST
);
94 if (conn
->datadgst_en
&& (skb_ulp_mode(skb
) & ULP2_FLAG_DCRC_ERROR
)) {
95 iscsi_conn_failure(conn
, ISCSI_ERR_DATA_DGST
);
100 rc
= read_pdu_skb(conn
, skb
, 0, 0);
104 if (iscsi_tcp_recv_segment_is_hdr(tcp_conn
))
108 if (conn
->hdrdgst_en
)
109 offset
+= ISCSI_DIGEST_SIZE
;
112 if (skb_ulp_mode(skb
) & ULP2_FLAG_DATA_DDPED
) {
113 cxgb3i_rx_debug("skb 0x%p, opcode 0x%x, data %u, ddp'ed, "
116 tcp_conn
->in
.hdr
->opcode
& ISCSI_OPCODE_MASK
,
117 tcp_conn
->in
.datalen
,
118 ntohl(tcp_conn
->in
.hdr
->itt
));
121 cxgb3i_rx_debug("skb 0x%p, opcode 0x%x, data %u, NOT ddp'ed, "
124 tcp_conn
->in
.hdr
->opcode
& ISCSI_OPCODE_MASK
,
125 tcp_conn
->in
.datalen
,
126 ntohl(tcp_conn
->in
.hdr
->itt
));
127 offset
+= sizeof(struct cpl_iscsi_hdr_norss
);
130 rc
= read_pdu_skb(conn
, skb
, offset
, offloaded
);
138 * pdu transmit, interact with libiscsi_tcp
140 static inline void tx_skb_setmode(struct sk_buff
*skb
, int hcrc
, int dcrc
)
148 skb_ulp_mode(skb
) = (ULP_MODE_ISCSI
<< 4) | submode
;
151 void cxgb3i_conn_cleanup_task(struct iscsi_task
*task
)
153 struct cxgb3i_task_data
*tdata
= task
->dd_data
+
154 sizeof(struct iscsi_tcp_task
);
156 /* never reached the xmit task callout */
158 __kfree_skb(tdata
->skb
);
159 memset(tdata
, 0, sizeof(struct cxgb3i_task_data
));
161 /* MNC - Do we need a check in case this is called but
162 * cxgb3i_conn_alloc_pdu has never been called on the task */
163 cxgb3i_release_itt(task
, task
->hdr_itt
);
164 iscsi_tcp_cleanup_task(task
);
167 static int sgl_seek_offset(struct scatterlist
*sgl
, unsigned int sgcnt
,
168 unsigned int offset
, unsigned int *off
,
169 struct scatterlist
**sgp
)
172 struct scatterlist
*sg
;
174 for_each_sg(sgl
, sg
, sgcnt
, i
) {
175 if (offset
< sg
->length
) {
180 offset
-= sg
->length
;
185 static int sgl_read_to_frags(struct scatterlist
*sg
, unsigned int sgoffset
,
186 unsigned int dlen
, skb_frag_t
*frags
,
189 unsigned int datalen
= dlen
;
190 unsigned int sglen
= sg
->length
- sgoffset
;
191 struct page
*page
= sg_page(sg
);
201 cxgb3i_log_error("%s, sg NULL, len %u/%u.\n",
202 __func__
, datalen
, dlen
);
210 copy
= min(datalen
, sglen
);
211 if (i
&& page
== frags
[i
- 1].page
&&
212 sgoffset
+ sg
->offset
==
213 frags
[i
- 1].page_offset
+ frags
[i
- 1].size
) {
214 frags
[i
- 1].size
+= copy
;
217 cxgb3i_log_error("%s, too many pages %u, "
218 "dlen %u.\n", __func__
,
223 frags
[i
].page
= page
;
224 frags
[i
].page_offset
= sg
->offset
+ sgoffset
;
225 frags
[i
].size
= copy
;
236 int cxgb3i_conn_alloc_pdu(struct iscsi_task
*task
, u8 opcode
)
238 struct iscsi_conn
*conn
= task
->conn
;
239 struct iscsi_tcp_task
*tcp_task
= task
->dd_data
;
240 struct cxgb3i_task_data
*tdata
= task
->dd_data
+ sizeof(*tcp_task
);
241 struct scsi_cmnd
*sc
= task
->sc
;
242 int headroom
= SKB_TX_PDU_HEADER_LEN
;
244 tcp_task
->dd_data
= tdata
;
247 /* write command, need to send data pdus */
248 if (skb_extra_headroom
&& (opcode
== ISCSI_OP_SCSI_DATA_OUT
||
249 (opcode
== ISCSI_OP_SCSI_CMD
&&
250 (scsi_bidi_cmnd(sc
) || sc
->sc_data_direction
== DMA_TO_DEVICE
))))
251 headroom
+= min(skb_extra_headroom
, conn
->max_xmit_dlength
);
253 tdata
->skb
= alloc_skb(TX_HEADER_LEN
+ headroom
, GFP_ATOMIC
);
256 skb_reserve(tdata
->skb
, TX_HEADER_LEN
);
258 cxgb3i_tx_debug("task 0x%p, opcode 0x%x, skb 0x%p.\n",
259 task
, opcode
, tdata
->skb
);
261 task
->hdr
= (struct iscsi_hdr
*)tdata
->skb
->data
;
262 task
->hdr_max
= SKB_TX_PDU_HEADER_LEN
;
264 /* data_out uses scsi_cmd's itt */
265 if (opcode
!= ISCSI_OP_SCSI_DATA_OUT
)
266 cxgb3i_reserve_itt(task
, &task
->hdr
->itt
);
271 int cxgb3i_conn_init_pdu(struct iscsi_task
*task
, unsigned int offset
,
274 struct iscsi_conn
*conn
= task
->conn
;
275 struct iscsi_tcp_task
*tcp_task
= task
->dd_data
;
276 struct cxgb3i_task_data
*tdata
= tcp_task
->dd_data
;
277 struct sk_buff
*skb
= tdata
->skb
;
278 unsigned int datalen
= count
;
279 int i
, padlen
= iscsi_padding(count
);
282 cxgb3i_tx_debug("task 0x%p,0x%p, offset %u, count %u, skb 0x%p.\n",
283 task
, task
->sc
, offset
, count
, skb
);
285 skb_put(skb
, task
->hdr_len
);
286 tx_skb_setmode(skb
, conn
->hdrdgst_en
, datalen
? conn
->datadgst_en
: 0);
291 struct scsi_data_buffer
*sdb
= scsi_out(task
->sc
);
292 struct scatterlist
*sg
= NULL
;
295 tdata
->offset
= offset
;
296 tdata
->count
= count
;
297 err
= sgl_seek_offset(sdb
->table
.sgl
, sdb
->table
.nents
,
298 tdata
->offset
, &tdata
->sgoffset
, &sg
);
300 cxgb3i_log_warn("tpdu, sgl %u, bad offset %u/%u.\n",
301 sdb
->table
.nents
, tdata
->offset
,
305 err
= sgl_read_to_frags(sg
, tdata
->sgoffset
, tdata
->count
,
306 tdata
->frags
, MAX_PDU_FRAGS
);
308 cxgb3i_log_warn("tpdu, sgl %u, bad offset %u + %u.\n",
309 sdb
->table
.nents
, tdata
->offset
,
313 tdata
->nr_frags
= err
;
315 if (tdata
->nr_frags
> MAX_SKB_FRAGS
||
316 (padlen
&& tdata
->nr_frags
== MAX_SKB_FRAGS
)) {
317 char *dst
= skb
->data
+ task
->hdr_len
;
318 skb_frag_t
*frag
= tdata
->frags
;
320 /* data fits in the skb's headroom */
321 for (i
= 0; i
< tdata
->nr_frags
; i
++, frag
++) {
322 char *src
= kmap_atomic(frag
->page
,
325 memcpy(dst
, src
+frag
->page_offset
, frag
->size
);
327 kunmap_atomic(src
, KM_SOFTIRQ0
);
330 memset(dst
, 0, padlen
);
333 skb_put(skb
, count
+ padlen
);
335 /* data fit into frag_list */
336 for (i
= 0; i
< tdata
->nr_frags
; i
++)
337 get_page(tdata
->frags
[i
].page
);
339 memcpy(skb_shinfo(skb
)->frags
, tdata
->frags
,
340 sizeof(skb_frag_t
) * tdata
->nr_frags
);
341 skb_shinfo(skb
)->nr_frags
= tdata
->nr_frags
;
343 skb
->data_len
+= count
;
344 skb
->truesize
+= count
;
348 pg
= virt_to_page(task
->data
);
351 skb_fill_page_desc(skb
, 0, pg
, offset_in_page(task
->data
),
354 skb
->data_len
+= count
;
355 skb
->truesize
+= count
;
359 i
= skb_shinfo(skb
)->nr_frags
;
361 skb_fill_page_desc(skb
, skb_shinfo(skb
)->nr_frags
, pad_page
, 0,
364 skb
->data_len
+= padlen
;
365 skb
->truesize
+= padlen
;
372 int cxgb3i_conn_xmit_pdu(struct iscsi_task
*task
)
374 struct iscsi_tcp_conn
*tcp_conn
= task
->conn
->dd_data
;
375 struct cxgb3i_conn
*cconn
= tcp_conn
->dd_data
;
376 struct iscsi_tcp_task
*tcp_task
= task
->dd_data
;
377 struct cxgb3i_task_data
*tdata
= tcp_task
->dd_data
;
378 struct sk_buff
*skb
= tdata
->skb
;
379 unsigned int datalen
;
385 datalen
= skb
->data_len
;
387 err
= cxgb3i_c3cn_send_pdus(cconn
->cep
->c3cn
, skb
);
391 cxgb3i_tx_debug("task 0x%p, skb 0x%p, len %u/%u, rv %d.\n",
392 task
, skb
, skb
->len
, skb
->data_len
, err
);
394 if (task
->conn
->hdrdgst_en
)
395 pdulen
+= ISCSI_DIGEST_SIZE
;
396 if (datalen
&& task
->conn
->datadgst_en
)
397 pdulen
+= ISCSI_DIGEST_SIZE
;
399 task
->conn
->txdata_octets
+= pdulen
;
403 if (err
== -EAGAIN
|| err
== -ENOBUFS
) {
404 /* reset skb to send when we are called again */
410 cxgb3i_tx_debug("itt 0x%x, skb 0x%p, len %u/%u, xmit err %d.\n",
411 task
->itt
, skb
, skb
->len
, skb
->data_len
, err
);
412 iscsi_conn_printk(KERN_ERR
, task
->conn
, "xmit err %d.\n", err
);
413 iscsi_conn_failure(task
->conn
, ISCSI_ERR_XMIT_FAILED
);
417 int cxgb3i_pdu_init(void)
419 if (SKB_TX_HEADROOM
> (512 * MAX_SKB_FRAGS
))
420 skb_extra_headroom
= SKB_TX_HEADROOM
;
421 pad_page
= alloc_page(GFP_KERNEL
);
424 memset(page_address(pad_page
), 0, PAGE_SIZE
);
428 void cxgb3i_pdu_cleanup(void)
431 __free_page(pad_page
);
436 void cxgb3i_conn_pdu_ready(struct s3_conn
*c3cn
)
439 unsigned int read
= 0;
440 struct iscsi_conn
*conn
= c3cn
->user_data
;
443 cxgb3i_rx_debug("cn 0x%p.\n", c3cn
);
445 read_lock(&c3cn
->callback_lock
);
446 if (unlikely(!conn
|| conn
->suspend_rx
)) {
447 cxgb3i_rx_debug("conn 0x%p, id %d, suspend_rx %lu!\n",
448 conn
, conn
? conn
->id
: 0xFF,
449 conn
? conn
->suspend_rx
: 0xFF);
450 read_unlock(&c3cn
->callback_lock
);
453 skb
= skb_peek(&c3cn
->receive_queue
);
454 while (!err
&& skb
) {
455 __skb_unlink(skb
, &c3cn
->receive_queue
);
456 read
+= skb_rx_pdulen(skb
);
457 cxgb3i_rx_debug("conn 0x%p, cn 0x%p, rx skb 0x%p, pdulen %u.\n",
458 conn
, c3cn
, skb
, skb_rx_pdulen(skb
));
459 err
= cxgb3i_conn_read_pdu_skb(conn
, skb
);
461 skb
= skb_peek(&c3cn
->receive_queue
);
463 read_unlock(&c3cn
->callback_lock
);
464 c3cn
->copied_seq
+= read
;
465 cxgb3i_c3cn_rx_credits(c3cn
, read
);
466 conn
->rxdata_octets
+= read
;
469 cxgb3i_log_info("conn 0x%p rx failed err %d.\n", conn
, err
);
470 iscsi_conn_failure(conn
, ISCSI_ERR_CONN_FAILED
);
474 void cxgb3i_conn_tx_open(struct s3_conn
*c3cn
)
476 struct iscsi_conn
*conn
= c3cn
->user_data
;
478 cxgb3i_tx_debug("cn 0x%p.\n", c3cn
);
480 cxgb3i_tx_debug("cn 0x%p, cid %d.\n", c3cn
, conn
->id
);
481 iscsi_conn_queue_work(conn
);
485 void cxgb3i_conn_closing(struct s3_conn
*c3cn
)
487 struct iscsi_conn
*conn
;
489 read_lock(&c3cn
->callback_lock
);
490 conn
= c3cn
->user_data
;
491 if (conn
&& c3cn
->state
!= C3CN_STATE_ESTABLISHED
)
492 iscsi_conn_failure(conn
, ISCSI_ERR_CONN_FAILED
);
493 read_unlock(&c3cn
->callback_lock
);