2 * libcxgbi.c: Chelsio common library for T3/T4 iSCSI driver.
4 * Copyright (c) 2010 Chelsio Communications, Inc.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation.
10 * Written by: Karen Xie (kxie@chelsio.com)
11 * Written by: Rakesh Ranjan (rranjan@chelsio.com)
14 #define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
16 #include <linux/skbuff.h>
17 #include <linux/crypto.h>
18 #include <linux/scatterlist.h>
19 #include <linux/pci.h>
20 #include <scsi/scsi.h>
21 #include <scsi/scsi_cmnd.h>
22 #include <scsi/scsi_host.h>
23 #include <linux/if_vlan.h>
24 #include <linux/inet.h>
26 #include <net/route.h>
27 #include <linux/inetdevice.h> /* ip_dev_find */
30 static unsigned int dbg_level
;
34 #define DRV_MODULE_NAME "libcxgbi"
35 #define DRV_MODULE_DESC "Chelsio iSCSI driver library"
36 #define DRV_MODULE_VERSION "0.9.0"
37 #define DRV_MODULE_RELDATE "Jun. 2010"
39 MODULE_AUTHOR("Chelsio Communications, Inc.");
40 MODULE_DESCRIPTION(DRV_MODULE_DESC
);
41 MODULE_VERSION(DRV_MODULE_VERSION
);
42 MODULE_LICENSE("GPL");
44 module_param(dbg_level
, uint
, 0644);
45 MODULE_PARM_DESC(dbg_level
, "libiscsi debug level (default=0)");
49 * cxgbi device management
50 * maintains a list of the cxgbi devices
52 static LIST_HEAD(cdev_list
);
53 static DEFINE_MUTEX(cdev_mutex
);
55 int cxgbi_device_portmap_create(struct cxgbi_device
*cdev
, unsigned int base
,
56 unsigned int max_conn
)
58 struct cxgbi_ports_map
*pmap
= &cdev
->pmap
;
60 pmap
->port_csk
= cxgbi_alloc_big_mem(max_conn
*
61 sizeof(struct cxgbi_sock
*),
63 if (!pmap
->port_csk
) {
64 pr_warn("cdev 0x%p, portmap OOM %u.\n", cdev
, max_conn
);
68 pmap
->max_connect
= max_conn
;
69 pmap
->sport_base
= base
;
70 spin_lock_init(&pmap
->lock
);
73 EXPORT_SYMBOL_GPL(cxgbi_device_portmap_create
);
75 void cxgbi_device_portmap_cleanup(struct cxgbi_device
*cdev
)
77 struct cxgbi_ports_map
*pmap
= &cdev
->pmap
;
78 struct cxgbi_sock
*csk
;
81 for (i
= 0; i
< pmap
->max_connect
; i
++) {
82 if (pmap
->port_csk
[i
]) {
83 csk
= pmap
->port_csk
[i
];
84 pmap
->port_csk
[i
] = NULL
;
85 log_debug(1 << CXGBI_DBG_SOCK
,
86 "csk 0x%p, cdev 0x%p, offload down.\n",
88 spin_lock_bh(&csk
->lock
);
89 cxgbi_sock_set_flag(csk
, CTPF_OFFLOAD_DOWN
);
90 cxgbi_sock_closed(csk
);
91 spin_unlock_bh(&csk
->lock
);
96 EXPORT_SYMBOL_GPL(cxgbi_device_portmap_cleanup
);
98 static inline void cxgbi_device_destroy(struct cxgbi_device
*cdev
)
100 log_debug(1 << CXGBI_DBG_DEV
,
101 "cdev 0x%p, p# %u.\n", cdev
, cdev
->nports
);
102 cxgbi_hbas_remove(cdev
);
103 cxgbi_device_portmap_cleanup(cdev
);
104 if (cdev
->dev_ddp_cleanup
)
105 cdev
->dev_ddp_cleanup(cdev
);
107 cxgbi_ddp_cleanup(cdev
);
109 cxgbi_ddp_cleanup(cdev
);
110 if (cdev
->pmap
.max_connect
)
111 cxgbi_free_big_mem(cdev
->pmap
.port_csk
);
115 struct cxgbi_device
*cxgbi_device_register(unsigned int extra
,
118 struct cxgbi_device
*cdev
;
120 cdev
= kzalloc(sizeof(*cdev
) + extra
+ nports
*
121 (sizeof(struct cxgbi_hba
*) +
122 sizeof(struct net_device
*)),
125 pr_warn("nport %d, OOM.\n", nports
);
128 cdev
->ports
= (struct net_device
**)(cdev
+ 1);
129 cdev
->hbas
= (struct cxgbi_hba
**)(((char*)cdev
->ports
) + nports
*
130 sizeof(struct net_device
*));
132 cdev
->dd_data
= ((char *)cdev
->hbas
) +
133 nports
* sizeof(struct cxgbi_hba
*);
134 spin_lock_init(&cdev
->pmap
.lock
);
136 mutex_lock(&cdev_mutex
);
137 list_add_tail(&cdev
->list_head
, &cdev_list
);
138 mutex_unlock(&cdev_mutex
);
140 log_debug(1 << CXGBI_DBG_DEV
,
141 "cdev 0x%p, p# %u.\n", cdev
, nports
);
144 EXPORT_SYMBOL_GPL(cxgbi_device_register
);
146 void cxgbi_device_unregister(struct cxgbi_device
*cdev
)
148 log_debug(1 << CXGBI_DBG_DEV
,
149 "cdev 0x%p, p# %u,%s.\n",
150 cdev
, cdev
->nports
, cdev
->nports
? cdev
->ports
[0]->name
: "");
151 mutex_lock(&cdev_mutex
);
152 list_del(&cdev
->list_head
);
153 mutex_unlock(&cdev_mutex
);
154 cxgbi_device_destroy(cdev
);
156 EXPORT_SYMBOL_GPL(cxgbi_device_unregister
);
158 void cxgbi_device_unregister_all(unsigned int flag
)
160 struct cxgbi_device
*cdev
, *tmp
;
162 mutex_lock(&cdev_mutex
);
163 list_for_each_entry_safe(cdev
, tmp
, &cdev_list
, list_head
) {
164 if ((cdev
->flags
& flag
) == flag
) {
165 log_debug(1 << CXGBI_DBG_DEV
,
166 "cdev 0x%p, p# %u,%s.\n",
167 cdev
, cdev
->nports
, cdev
->nports
?
168 cdev
->ports
[0]->name
: "");
169 list_del(&cdev
->list_head
);
170 cxgbi_device_destroy(cdev
);
173 mutex_unlock(&cdev_mutex
);
175 EXPORT_SYMBOL_GPL(cxgbi_device_unregister_all
);
177 struct cxgbi_device
*cxgbi_device_find_by_lldev(void *lldev
)
179 struct cxgbi_device
*cdev
, *tmp
;
181 mutex_lock(&cdev_mutex
);
182 list_for_each_entry_safe(cdev
, tmp
, &cdev_list
, list_head
) {
183 if (cdev
->lldev
== lldev
) {
184 mutex_unlock(&cdev_mutex
);
188 mutex_unlock(&cdev_mutex
);
189 log_debug(1 << CXGBI_DBG_DEV
,
190 "lldev 0x%p, NO match found.\n", lldev
);
193 EXPORT_SYMBOL_GPL(cxgbi_device_find_by_lldev
);
195 static struct cxgbi_device
*cxgbi_device_find_by_netdev(struct net_device
*ndev
,
198 struct net_device
*vdev
= NULL
;
199 struct cxgbi_device
*cdev
, *tmp
;
202 if (ndev
->priv_flags
& IFF_802_1Q_VLAN
) {
204 ndev
= vlan_dev_real_dev(ndev
);
205 log_debug(1 << CXGBI_DBG_DEV
,
206 "vlan dev %s -> %s.\n", vdev
->name
, ndev
->name
);
209 mutex_lock(&cdev_mutex
);
210 list_for_each_entry_safe(cdev
, tmp
, &cdev_list
, list_head
) {
211 for (i
= 0; i
< cdev
->nports
; i
++) {
212 if (ndev
== cdev
->ports
[i
]) {
213 cdev
->hbas
[i
]->vdev
= vdev
;
214 mutex_unlock(&cdev_mutex
);
221 mutex_unlock(&cdev_mutex
);
222 log_debug(1 << CXGBI_DBG_DEV
,
223 "ndev 0x%p, %s, NO match found.\n", ndev
, ndev
->name
);
227 void cxgbi_hbas_remove(struct cxgbi_device
*cdev
)
230 struct cxgbi_hba
*chba
;
232 log_debug(1 << CXGBI_DBG_DEV
,
233 "cdev 0x%p, p#%u.\n", cdev
, cdev
->nports
);
235 for (i
= 0; i
< cdev
->nports
; i
++) {
236 chba
= cdev
->hbas
[i
];
238 cdev
->hbas
[i
] = NULL
;
239 iscsi_host_remove(chba
->shost
);
240 pci_dev_put(cdev
->pdev
);
241 iscsi_host_free(chba
->shost
);
245 EXPORT_SYMBOL_GPL(cxgbi_hbas_remove
);
247 int cxgbi_hbas_add(struct cxgbi_device
*cdev
, unsigned int max_lun
,
248 unsigned int max_id
, struct scsi_host_template
*sht
,
249 struct scsi_transport_template
*stt
)
251 struct cxgbi_hba
*chba
;
252 struct Scsi_Host
*shost
;
255 log_debug(1 << CXGBI_DBG_DEV
, "cdev 0x%p, p#%u.\n", cdev
, cdev
->nports
);
257 for (i
= 0; i
< cdev
->nports
; i
++) {
258 shost
= iscsi_host_alloc(sht
, sizeof(*chba
), 1);
260 pr_info("0x%p, p%d, %s, host alloc failed.\n",
261 cdev
, i
, cdev
->ports
[i
]->name
);
266 shost
->transportt
= stt
;
267 shost
->max_lun
= max_lun
;
268 shost
->max_id
= max_id
;
269 shost
->max_channel
= 0;
270 shost
->max_cmd_len
= 16;
272 chba
= iscsi_host_priv(shost
);
274 chba
->ndev
= cdev
->ports
[i
];
277 log_debug(1 << CXGBI_DBG_DEV
,
278 "cdev 0x%p, p#%d %s: chba 0x%p.\n",
279 cdev
, i
, cdev
->ports
[i
]->name
, chba
);
281 pci_dev_get(cdev
->pdev
);
282 err
= iscsi_host_add(shost
, &cdev
->pdev
->dev
);
284 pr_info("cdev 0x%p, p#%d %s, host add failed.\n",
285 cdev
, i
, cdev
->ports
[i
]->name
);
286 pci_dev_put(cdev
->pdev
);
287 scsi_host_put(shost
);
291 cdev
->hbas
[i
] = chba
;
297 cxgbi_hbas_remove(cdev
);
300 EXPORT_SYMBOL_GPL(cxgbi_hbas_add
);
305 * - source port management
306 * To find a free source port in the port allocation map we use a very simple
307 * rotor scheme to look for the next free port.
309 * If a source port has been specified make sure that it doesn't collide with
310 * our normal source port allocation map. If it's outside the range of our
311 * allocation/deallocation scheme just let them use it.
313 * If the source port is outside our allocation range, the caller is
314 * responsible for keeping track of their port usage.
316 static int sock_get_port(struct cxgbi_sock
*csk
)
318 struct cxgbi_device
*cdev
= csk
->cdev
;
319 struct cxgbi_ports_map
*pmap
= &cdev
->pmap
;
323 if (!pmap
->max_connect
) {
324 pr_err("cdev 0x%p, p#%u %s, NO port map.\n",
325 cdev
, csk
->port_id
, cdev
->ports
[csk
->port_id
]->name
);
326 return -EADDRNOTAVAIL
;
329 if (csk
->saddr
.sin_port
) {
330 pr_err("source port NON-ZERO %u.\n",
331 ntohs(csk
->saddr
.sin_port
));
335 spin_lock_bh(&pmap
->lock
);
336 if (pmap
->used
>= pmap
->max_connect
) {
337 spin_unlock_bh(&pmap
->lock
);
338 pr_info("cdev 0x%p, p#%u %s, ALL ports used.\n",
339 cdev
, csk
->port_id
, cdev
->ports
[csk
->port_id
]->name
);
340 return -EADDRNOTAVAIL
;
343 start
= idx
= pmap
->next
;
345 if (++idx
>= pmap
->max_connect
)
347 if (!pmap
->port_csk
[idx
]) {
349 csk
->saddr
.sin_port
=
350 htons(pmap
->sport_base
+ idx
);
352 pmap
->port_csk
[idx
] = csk
;
353 spin_unlock_bh(&pmap
->lock
);
355 log_debug(1 << CXGBI_DBG_SOCK
,
356 "cdev 0x%p, p#%u %s, p %u, %u.\n",
358 cdev
->ports
[csk
->port_id
]->name
,
359 pmap
->sport_base
+ idx
, pmap
->next
);
362 } while (idx
!= start
);
363 spin_unlock_bh(&pmap
->lock
);
365 /* should not happen */
366 pr_warn("cdev 0x%p, p#%u %s, next %u?\n",
367 cdev
, csk
->port_id
, cdev
->ports
[csk
->port_id
]->name
,
369 return -EADDRNOTAVAIL
;
372 static void sock_put_port(struct cxgbi_sock
*csk
)
374 struct cxgbi_device
*cdev
= csk
->cdev
;
375 struct cxgbi_ports_map
*pmap
= &cdev
->pmap
;
377 if (csk
->saddr
.sin_port
) {
378 int idx
= ntohs(csk
->saddr
.sin_port
) - pmap
->sport_base
;
380 csk
->saddr
.sin_port
= 0;
381 if (idx
< 0 || idx
>= pmap
->max_connect
) {
382 pr_err("cdev 0x%p, p#%u %s, port %u OOR.\n",
384 cdev
->ports
[csk
->port_id
]->name
,
385 ntohs(csk
->saddr
.sin_port
));
389 spin_lock_bh(&pmap
->lock
);
390 pmap
->port_csk
[idx
] = NULL
;
392 spin_unlock_bh(&pmap
->lock
);
394 log_debug(1 << CXGBI_DBG_SOCK
,
395 "cdev 0x%p, p#%u %s, release %u.\n",
396 cdev
, csk
->port_id
, cdev
->ports
[csk
->port_id
]->name
,
397 pmap
->sport_base
+ idx
);
404 * iscsi tcp connection
406 void cxgbi_sock_free_cpl_skbs(struct cxgbi_sock
*csk
)
408 if (csk
->cpl_close
) {
409 kfree_skb(csk
->cpl_close
);
410 csk
->cpl_close
= NULL
;
412 if (csk
->cpl_abort_req
) {
413 kfree_skb(csk
->cpl_abort_req
);
414 csk
->cpl_abort_req
= NULL
;
416 if (csk
->cpl_abort_rpl
) {
417 kfree_skb(csk
->cpl_abort_rpl
);
418 csk
->cpl_abort_rpl
= NULL
;
421 EXPORT_SYMBOL_GPL(cxgbi_sock_free_cpl_skbs
);
423 static struct cxgbi_sock
*cxgbi_sock_create(struct cxgbi_device
*cdev
)
425 struct cxgbi_sock
*csk
= kzalloc(sizeof(*csk
), GFP_NOIO
);
428 pr_info("alloc csk %zu failed.\n", sizeof(*csk
));
432 if (cdev
->csk_alloc_cpls(csk
) < 0) {
433 pr_info("csk 0x%p, alloc cpls failed.\n", csk
);
438 spin_lock_init(&csk
->lock
);
439 kref_init(&csk
->refcnt
);
440 skb_queue_head_init(&csk
->receive_queue
);
441 skb_queue_head_init(&csk
->write_queue
);
442 setup_timer(&csk
->retry_timer
, NULL
, (unsigned long)csk
);
443 rwlock_init(&csk
->callback_lock
);
446 cxgbi_sock_set_state(csk
, CTP_CLOSED
);
448 log_debug(1 << CXGBI_DBG_SOCK
, "cdev 0x%p, new csk 0x%p.\n", cdev
, csk
);
453 static struct rtable
*find_route_ipv4(__be32 saddr
, __be32 daddr
,
454 __be16 sport
, __be16 dport
, u8 tos
)
465 .proto
= IPPROTO_TCP
,
473 if (ip_route_output_flow(&init_net
, &rt
, &fl
, NULL
, 0))
479 static struct cxgbi_sock
*cxgbi_check_route(struct sockaddr
*dst_addr
)
481 struct sockaddr_in
*daddr
= (struct sockaddr_in
*)dst_addr
;
482 struct dst_entry
*dst
;
483 struct net_device
*ndev
;
484 struct cxgbi_device
*cdev
;
485 struct rtable
*rt
= NULL
;
486 struct cxgbi_sock
*csk
= NULL
;
487 unsigned int mtu
= 0;
491 if (daddr
->sin_family
!= AF_INET
) {
492 pr_info("address family 0x%x NOT supported.\n",
498 rt
= find_route_ipv4(0, daddr
->sin_addr
.s_addr
, 0, daddr
->sin_port
, 0);
500 pr_info("no route to ipv4 0x%x, port %u.\n",
501 daddr
->sin_addr
.s_addr
, daddr
->sin_port
);
506 ndev
= dst
->neighbour
->dev
;
508 if (rt
->rt_flags
& (RTCF_MULTICAST
| RTCF_BROADCAST
)) {
509 pr_info("multi-cast route %pI4, port %u, dev %s.\n",
510 &daddr
->sin_addr
.s_addr
, ntohs(daddr
->sin_port
),
516 if (ndev
->flags
& IFF_LOOPBACK
) {
517 ndev
= ip_dev_find(&init_net
, daddr
->sin_addr
.s_addr
);
519 pr_info("rt dev %s, loopback -> %s, mtu %u.\n",
520 dst
->neighbour
->dev
->name
, ndev
->name
, mtu
);
523 cdev
= cxgbi_device_find_by_netdev(ndev
, &port
);
525 pr_info("dst %pI4, %s, NOT cxgbi device.\n",
526 &daddr
->sin_addr
.s_addr
, ndev
->name
);
530 log_debug(1 << CXGBI_DBG_SOCK
,
531 "route to %pI4 :%u, ndev p#%d,%s, cdev 0x%p.\n",
532 &daddr
->sin_addr
.s_addr
, ntohs(daddr
->sin_port
),
533 port
, ndev
->name
, cdev
);
535 csk
= cxgbi_sock_create(cdev
);
544 csk
->daddr
.sin_addr
.s_addr
= daddr
->sin_addr
.s_addr
;
545 csk
->daddr
.sin_port
= daddr
->sin_port
;
546 csk
->saddr
.sin_addr
.s_addr
= rt
->rt_src
;
553 cxgbi_sock_closed(csk
);
558 void cxgbi_sock_established(struct cxgbi_sock
*csk
, unsigned int snd_isn
,
561 csk
->write_seq
= csk
->snd_nxt
= csk
->snd_una
= snd_isn
;
562 dst_confirm(csk
->dst
);
564 cxgbi_sock_set_state(csk
, CTP_ESTABLISHED
);
566 EXPORT_SYMBOL_GPL(cxgbi_sock_established
);
568 static void cxgbi_inform_iscsi_conn_closing(struct cxgbi_sock
*csk
)
570 log_debug(1 << CXGBI_DBG_SOCK
,
571 "csk 0x%p, state %u, flags 0x%lx, conn 0x%p.\n",
572 csk
, csk
->state
, csk
->flags
, csk
->user_data
);
574 if (csk
->state
!= CTP_ESTABLISHED
) {
575 read_lock_bh(&csk
->callback_lock
);
577 iscsi_conn_failure(csk
->user_data
,
578 ISCSI_ERR_CONN_FAILED
);
579 read_unlock_bh(&csk
->callback_lock
);
583 void cxgbi_sock_closed(struct cxgbi_sock
*csk
)
585 log_debug(1 << CXGBI_DBG_SOCK
, "csk 0x%p,%u,0x%lx,%u.\n",
586 csk
, (csk
)->state
, (csk
)->flags
, (csk
)->tid
);
587 cxgbi_sock_set_flag(csk
, CTPF_ACTIVE_CLOSE_NEEDED
);
588 if (csk
->state
== CTP_ACTIVE_OPEN
|| csk
->state
== CTP_CLOSED
)
590 if (csk
->saddr
.sin_port
)
593 dst_release(csk
->dst
);
594 csk
->cdev
->csk_release_offload_resources(csk
);
595 cxgbi_sock_set_state(csk
, CTP_CLOSED
);
596 cxgbi_inform_iscsi_conn_closing(csk
);
599 EXPORT_SYMBOL_GPL(cxgbi_sock_closed
);
601 static void need_active_close(struct cxgbi_sock
*csk
)
606 log_debug(1 << CXGBI_DBG_SOCK
, "csk 0x%p,%u,0x%lx,%u.\n",
607 csk
, (csk
)->state
, (csk
)->flags
, (csk
)->tid
);
608 spin_lock_bh(&csk
->lock
);
609 dst_confirm(csk
->dst
);
610 data_lost
= skb_queue_len(&csk
->receive_queue
);
611 __skb_queue_purge(&csk
->receive_queue
);
613 if (csk
->state
== CTP_ACTIVE_OPEN
)
614 cxgbi_sock_set_flag(csk
, CTPF_ACTIVE_CLOSE_NEEDED
);
615 else if (csk
->state
== CTP_ESTABLISHED
) {
617 cxgbi_sock_set_state(csk
, CTP_ACTIVE_CLOSE
);
618 } else if (csk
->state
== CTP_PASSIVE_CLOSE
) {
620 cxgbi_sock_set_state(csk
, CTP_CLOSE_WAIT_2
);
625 csk
->cdev
->csk_send_abort_req(csk
);
627 csk
->cdev
->csk_send_close_req(csk
);
630 spin_unlock_bh(&csk
->lock
);
633 void cxgbi_sock_fail_act_open(struct cxgbi_sock
*csk
, int errno
)
635 pr_info("csk 0x%p,%u,%lx, %pI4:%u-%pI4:%u, err %d.\n",
636 csk
, csk
->state
, csk
->flags
,
637 &csk
->saddr
.sin_addr
.s_addr
, csk
->saddr
.sin_port
,
638 &csk
->daddr
.sin_addr
.s_addr
, csk
->daddr
.sin_port
,
641 cxgbi_sock_set_state(csk
, CTP_CONNECTING
);
643 cxgbi_sock_closed(csk
);
645 EXPORT_SYMBOL_GPL(cxgbi_sock_fail_act_open
);
647 void cxgbi_sock_act_open_req_arp_failure(void *handle
, struct sk_buff
*skb
)
649 struct cxgbi_sock
*csk
= (struct cxgbi_sock
*)skb
->sk
;
651 log_debug(1 << CXGBI_DBG_SOCK
, "csk 0x%p,%u,0x%lx,%u.\n",
652 csk
, (csk
)->state
, (csk
)->flags
, (csk
)->tid
);
654 spin_lock_bh(&csk
->lock
);
655 if (csk
->state
== CTP_ACTIVE_OPEN
)
656 cxgbi_sock_fail_act_open(csk
, -EHOSTUNREACH
);
657 spin_unlock_bh(&csk
->lock
);
661 EXPORT_SYMBOL_GPL(cxgbi_sock_act_open_req_arp_failure
);
663 void cxgbi_sock_rcv_abort_rpl(struct cxgbi_sock
*csk
)
666 spin_lock_bh(&csk
->lock
);
667 if (cxgbi_sock_flag(csk
, CTPF_ABORT_RPL_PENDING
)) {
668 if (!cxgbi_sock_flag(csk
, CTPF_ABORT_RPL_RCVD
))
669 cxgbi_sock_set_flag(csk
, CTPF_ABORT_RPL_RCVD
);
671 cxgbi_sock_clear_flag(csk
, CTPF_ABORT_RPL_RCVD
);
672 cxgbi_sock_clear_flag(csk
, CTPF_ABORT_RPL_PENDING
);
673 if (cxgbi_sock_flag(csk
, CTPF_ABORT_REQ_RCVD
))
674 pr_err("csk 0x%p,%u,0x%lx,%u,ABT_RPL_RSS.\n",
675 csk
, csk
->state
, csk
->flags
, csk
->tid
);
676 cxgbi_sock_closed(csk
);
679 spin_unlock_bh(&csk
->lock
);
682 EXPORT_SYMBOL_GPL(cxgbi_sock_rcv_abort_rpl
);
684 void cxgbi_sock_rcv_peer_close(struct cxgbi_sock
*csk
)
686 log_debug(1 << CXGBI_DBG_SOCK
, "csk 0x%p,%u,0x%lx,%u.\n",
687 csk
, (csk
)->state
, (csk
)->flags
, (csk
)->tid
);
689 spin_lock_bh(&csk
->lock
);
691 if (cxgbi_sock_flag(csk
, CTPF_ABORT_RPL_PENDING
))
694 switch (csk
->state
) {
695 case CTP_ESTABLISHED
:
696 cxgbi_sock_set_state(csk
, CTP_PASSIVE_CLOSE
);
698 case CTP_ACTIVE_CLOSE
:
699 cxgbi_sock_set_state(csk
, CTP_CLOSE_WAIT_2
);
701 case CTP_CLOSE_WAIT_1
:
702 cxgbi_sock_closed(csk
);
707 pr_err("csk 0x%p,%u,0x%lx,%u, bad state.\n",
708 csk
, csk
->state
, csk
->flags
, csk
->tid
);
710 cxgbi_inform_iscsi_conn_closing(csk
);
712 spin_unlock_bh(&csk
->lock
);
715 EXPORT_SYMBOL_GPL(cxgbi_sock_rcv_peer_close
);
717 void cxgbi_sock_rcv_close_conn_rpl(struct cxgbi_sock
*csk
, u32 snd_nxt
)
719 log_debug(1 << CXGBI_DBG_SOCK
, "csk 0x%p,%u,0x%lx,%u.\n",
720 csk
, (csk
)->state
, (csk
)->flags
, (csk
)->tid
);
722 spin_lock_bh(&csk
->lock
);
724 csk
->snd_una
= snd_nxt
- 1;
725 if (cxgbi_sock_flag(csk
, CTPF_ABORT_RPL_PENDING
))
728 switch (csk
->state
) {
729 case CTP_ACTIVE_CLOSE
:
730 cxgbi_sock_set_state(csk
, CTP_CLOSE_WAIT_1
);
732 case CTP_CLOSE_WAIT_1
:
733 case CTP_CLOSE_WAIT_2
:
734 cxgbi_sock_closed(csk
);
739 pr_err("csk 0x%p,%u,0x%lx,%u, bad state.\n",
740 csk
, csk
->state
, csk
->flags
, csk
->tid
);
743 spin_unlock_bh(&csk
->lock
);
746 EXPORT_SYMBOL_GPL(cxgbi_sock_rcv_close_conn_rpl
);
748 void cxgbi_sock_rcv_wr_ack(struct cxgbi_sock
*csk
, unsigned int credits
,
749 unsigned int snd_una
, int seq_chk
)
751 log_debug(1 << CXGBI_DBG_TOE
| 1 << CXGBI_DBG_SOCK
,
752 "csk 0x%p,%u,0x%lx,%u, cr %u,%u+%u, snd_una %u,%d.\n",
753 csk
, csk
->state
, csk
->flags
, csk
->tid
, credits
,
754 csk
->wr_cred
, csk
->wr_una_cred
, snd_una
, seq_chk
);
756 spin_lock_bh(&csk
->lock
);
758 csk
->wr_cred
+= credits
;
759 if (csk
->wr_una_cred
> csk
->wr_max_cred
- csk
->wr_cred
)
760 csk
->wr_una_cred
= csk
->wr_max_cred
- csk
->wr_cred
;
763 struct sk_buff
*p
= cxgbi_sock_peek_wr(csk
);
766 pr_err("csk 0x%p,%u,0x%lx,%u, cr %u,%u+%u, empty.\n",
767 csk
, csk
->state
, csk
->flags
, csk
->tid
, credits
,
768 csk
->wr_cred
, csk
->wr_una_cred
);
772 if (unlikely(credits
< p
->csum
)) {
773 pr_warn("csk 0x%p,%u,0x%lx,%u, cr %u,%u+%u, < %u.\n",
774 csk
, csk
->state
, csk
->flags
, csk
->tid
,
775 credits
, csk
->wr_cred
, csk
->wr_una_cred
,
780 cxgbi_sock_dequeue_wr(csk
);
786 cxgbi_sock_check_wr_invariants(csk
);
789 if (unlikely(before(snd_una
, csk
->snd_una
))) {
790 pr_warn("csk 0x%p,%u,0x%lx,%u, snd_una %u/%u.",
791 csk
, csk
->state
, csk
->flags
, csk
->tid
, snd_una
,
796 if (csk
->snd_una
!= snd_una
) {
797 csk
->snd_una
= snd_una
;
798 dst_confirm(csk
->dst
);
802 if (skb_queue_len(&csk
->write_queue
)) {
803 if (csk
->cdev
->csk_push_tx_frames(csk
, 0))
804 cxgbi_conn_tx_open(csk
);
806 cxgbi_conn_tx_open(csk
);
808 spin_unlock_bh(&csk
->lock
);
810 EXPORT_SYMBOL_GPL(cxgbi_sock_rcv_wr_ack
);
812 static unsigned int cxgbi_sock_find_best_mtu(struct cxgbi_sock
*csk
,
817 while (i
< csk
->cdev
->nmtus
- 1 && csk
->cdev
->mtus
[i
+ 1] <= mtu
)
823 unsigned int cxgbi_sock_select_mss(struct cxgbi_sock
*csk
, unsigned int pmtu
)
826 struct dst_entry
*dst
= csk
->dst
;
828 csk
->advmss
= dst_metric_advmss(dst
);
830 if (csk
->advmss
> pmtu
- 40)
831 csk
->advmss
= pmtu
- 40;
832 if (csk
->advmss
< csk
->cdev
->mtus
[0] - 40)
833 csk
->advmss
= csk
->cdev
->mtus
[0] - 40;
834 idx
= cxgbi_sock_find_best_mtu(csk
, csk
->advmss
+ 40);
838 EXPORT_SYMBOL_GPL(cxgbi_sock_select_mss
);
840 void cxgbi_sock_skb_entail(struct cxgbi_sock
*csk
, struct sk_buff
*skb
)
842 cxgbi_skcb_tcp_seq(skb
) = csk
->write_seq
;
843 __skb_queue_tail(&csk
->write_queue
, skb
);
845 EXPORT_SYMBOL_GPL(cxgbi_sock_skb_entail
);
847 void cxgbi_sock_purge_wr_queue(struct cxgbi_sock
*csk
)
851 while ((skb
= cxgbi_sock_dequeue_wr(csk
)) != NULL
)
854 EXPORT_SYMBOL_GPL(cxgbi_sock_purge_wr_queue
);
856 void cxgbi_sock_check_wr_invariants(const struct cxgbi_sock
*csk
)
858 int pending
= cxgbi_sock_count_pending_wrs(csk
);
860 if (unlikely(csk
->wr_cred
+ pending
!= csk
->wr_max_cred
))
861 pr_err("csk 0x%p, tid %u, credit %u + %u != %u.\n",
862 csk
, csk
->tid
, csk
->wr_cred
, pending
, csk
->wr_max_cred
);
864 EXPORT_SYMBOL_GPL(cxgbi_sock_check_wr_invariants
);
866 static int cxgbi_sock_send_pdus(struct cxgbi_sock
*csk
, struct sk_buff
*skb
)
868 struct cxgbi_device
*cdev
= csk
->cdev
;
869 struct sk_buff
*next
;
872 spin_lock_bh(&csk
->lock
);
874 if (csk
->state
!= CTP_ESTABLISHED
) {
875 log_debug(1 << CXGBI_DBG_PDU_TX
,
876 "csk 0x%p,%u,0x%lx,%u, EAGAIN.\n",
877 csk
, csk
->state
, csk
->flags
, csk
->tid
);
883 log_debug(1 << CXGBI_DBG_PDU_TX
,
884 "csk 0x%p,%u,0x%lx,%u, EPIPE %d.\n",
885 csk
, csk
->state
, csk
->flags
, csk
->tid
, csk
->err
);
890 if (csk
->write_seq
- csk
->snd_una
>= cdev
->snd_win
) {
891 log_debug(1 << CXGBI_DBG_PDU_TX
,
892 "csk 0x%p,%u,0x%lx,%u, FULL %u-%u >= %u.\n",
893 csk
, csk
->state
, csk
->flags
, csk
->tid
, csk
->write_seq
,
894 csk
->snd_una
, cdev
->snd_win
);
900 int frags
= skb_shinfo(skb
)->nr_frags
+
901 (skb
->len
!= skb
->data_len
);
903 if (unlikely(skb_headroom(skb
) < cdev
->skb_tx_rsvd
)) {
904 pr_err("csk 0x%p, skb head %u < %u.\n",
905 csk
, skb_headroom(skb
), cdev
->skb_tx_rsvd
);
910 if (frags
>= SKB_WR_LIST_SIZE
) {
911 pr_err("csk 0x%p, frags %d, %u,%u >%u.\n",
912 csk
, skb_shinfo(skb
)->nr_frags
, skb
->len
,
913 skb
->data_len
, (uint
)(SKB_WR_LIST_SIZE
));
920 cxgbi_skcb_set_flag(skb
, SKCBF_TX_NEED_HDR
);
921 cxgbi_sock_skb_entail(csk
, skb
);
923 csk
->write_seq
+= skb
->len
+
924 cxgbi_ulp_extra_len(cxgbi_skcb_ulp_mode(skb
));
928 if (likely(skb_queue_len(&csk
->write_queue
)))
929 cdev
->csk_push_tx_frames(csk
, 1);
930 spin_unlock_bh(&csk
->lock
);
934 if (copied
== 0 && err
== -EPIPE
)
935 copied
= csk
->err
? csk
->err
: -EPIPE
;
942 * Direct Data Placement -
943 * Directly place the iSCSI Data-In or Data-Out PDU's payload into pre-posted
944 * final destination host-memory buffers based on the Initiator Task Tag (ITT)
945 * in Data-In or Target Task Tag (TTT) in Data-Out PDUs.
946 * The host memory address is programmed into h/w in the format of pagepod
948 * The location of the pagepod entry is encoded into ddp tag which is used as
949 * the base for ITT/TTT.
952 static unsigned char ddp_page_order
[DDP_PGIDX_MAX
] = {0, 1, 2, 4};
953 static unsigned char ddp_page_shift
[DDP_PGIDX_MAX
] = {12, 13, 14, 16};
954 static unsigned char page_idx
= DDP_PGIDX_MAX
;
956 static unsigned char sw_tag_idx_bits
;
957 static unsigned char sw_tag_age_bits
;
960 * Direct-Data Placement page size adjustment
962 static int ddp_adjust_page_table(void)
965 unsigned int base_order
, order
;
967 if (PAGE_SIZE
< (1UL << ddp_page_shift
[0])) {
968 pr_info("PAGE_SIZE 0x%lx too small, min 0x%lx\n",
969 PAGE_SIZE
, 1UL << ddp_page_shift
[0]);
973 base_order
= get_order(1UL << ddp_page_shift
[0]);
974 order
= get_order(1UL << PAGE_SHIFT
);
976 for (i
= 0; i
< DDP_PGIDX_MAX
; i
++) {
977 /* first is the kernel page size, then just doubling */
978 ddp_page_order
[i
] = order
- base_order
+ i
;
979 ddp_page_shift
[i
] = PAGE_SHIFT
+ i
;
984 static int ddp_find_page_index(unsigned long pgsz
)
988 for (i
= 0; i
< DDP_PGIDX_MAX
; i
++) {
989 if (pgsz
== (1UL << ddp_page_shift
[i
]))
992 pr_info("ddp page size %lu not supported.\n", pgsz
);
993 return DDP_PGIDX_MAX
;
996 static void ddp_setup_host_page_size(void)
998 if (page_idx
== DDP_PGIDX_MAX
) {
999 page_idx
= ddp_find_page_index(PAGE_SIZE
);
1001 if (page_idx
== DDP_PGIDX_MAX
) {
1002 pr_info("system PAGE %lu, update hw.\n", PAGE_SIZE
);
1003 if (ddp_adjust_page_table() < 0) {
1004 pr_info("PAGE %lu, disable ddp.\n", PAGE_SIZE
);
1007 page_idx
= ddp_find_page_index(PAGE_SIZE
);
1009 pr_info("system PAGE %lu, ddp idx %u.\n", PAGE_SIZE
, page_idx
);
1013 void cxgbi_ddp_page_size_factor(int *pgsz_factor
)
1017 for (i
= 0; i
< DDP_PGIDX_MAX
; i
++)
1018 pgsz_factor
[i
] = ddp_page_order
[i
];
1020 EXPORT_SYMBOL_GPL(cxgbi_ddp_page_size_factor
);
1023 * DDP setup & teardown
1026 void cxgbi_ddp_ppod_set(struct cxgbi_pagepod
*ppod
,
1027 struct cxgbi_pagepod_hdr
*hdr
,
1028 struct cxgbi_gather_list
*gl
, unsigned int gidx
)
1032 memcpy(ppod
, hdr
, sizeof(*hdr
));
1033 for (i
= 0; i
< (PPOD_PAGES_MAX
+ 1); i
++, gidx
++) {
1034 ppod
->addr
[i
] = gidx
< gl
->nelem
?
1035 cpu_to_be64(gl
->phys_addr
[gidx
]) : 0ULL;
1038 EXPORT_SYMBOL_GPL(cxgbi_ddp_ppod_set
);
1040 void cxgbi_ddp_ppod_clear(struct cxgbi_pagepod
*ppod
)
1042 memset(ppod
, 0, sizeof(*ppod
));
1044 EXPORT_SYMBOL_GPL(cxgbi_ddp_ppod_clear
);
1046 static inline int ddp_find_unused_entries(struct cxgbi_ddp_info
*ddp
,
1047 unsigned int start
, unsigned int max
,
1049 struct cxgbi_gather_list
*gl
)
1051 unsigned int i
, j
, k
;
1053 /* not enough entries */
1054 if ((max
- start
) < count
) {
1055 log_debug(1 << CXGBI_DBG_DDP
,
1056 "NOT enough entries %u+%u < %u.\n", start
, count
, max
);
1061 spin_lock(&ddp
->map_lock
);
1062 for (i
= start
; i
< max
;) {
1063 for (j
= 0, k
= i
; j
< count
; j
++, k
++) {
1068 for (j
= 0, k
= i
; j
< count
; j
++, k
++)
1069 ddp
->gl_map
[k
] = gl
;
1070 spin_unlock(&ddp
->map_lock
);
1075 spin_unlock(&ddp
->map_lock
);
1076 log_debug(1 << CXGBI_DBG_DDP
,
1077 "NO suitable entries %u available.\n", count
);
1081 static inline void ddp_unmark_entries(struct cxgbi_ddp_info
*ddp
,
1082 int start
, int count
)
1084 spin_lock(&ddp
->map_lock
);
1085 memset(&ddp
->gl_map
[start
], 0,
1086 count
* sizeof(struct cxgbi_gather_list
*));
1087 spin_unlock(&ddp
->map_lock
);
1090 static inline void ddp_gl_unmap(struct pci_dev
*pdev
,
1091 struct cxgbi_gather_list
*gl
)
1095 for (i
= 0; i
< gl
->nelem
; i
++)
1096 dma_unmap_page(&pdev
->dev
, gl
->phys_addr
[i
], PAGE_SIZE
,
1097 PCI_DMA_FROMDEVICE
);
1100 static inline int ddp_gl_map(struct pci_dev
*pdev
,
1101 struct cxgbi_gather_list
*gl
)
1105 for (i
= 0; i
< gl
->nelem
; i
++) {
1106 gl
->phys_addr
[i
] = dma_map_page(&pdev
->dev
, gl
->pages
[i
], 0,
1108 PCI_DMA_FROMDEVICE
);
1109 if (unlikely(dma_mapping_error(&pdev
->dev
, gl
->phys_addr
[i
]))) {
1110 log_debug(1 << CXGBI_DBG_DDP
,
1111 "page %d 0x%p, 0x%p dma mapping err.\n",
1112 i
, gl
->pages
[i
], pdev
);
1119 unsigned int nelem
= gl
->nelem
;
1122 ddp_gl_unmap(pdev
, gl
);
1128 static void ddp_release_gl(struct cxgbi_gather_list
*gl
,
1129 struct pci_dev
*pdev
)
1131 ddp_gl_unmap(pdev
, gl
);
1135 static struct cxgbi_gather_list
*ddp_make_gl(unsigned int xferlen
,
1136 struct scatterlist
*sgl
,
1138 struct pci_dev
*pdev
,
1141 struct cxgbi_gather_list
*gl
;
1142 struct scatterlist
*sg
= sgl
;
1143 struct page
*sgpage
= sg_page(sg
);
1144 unsigned int sglen
= sg
->length
;
1145 unsigned int sgoffset
= sg
->offset
;
1146 unsigned int npages
= (xferlen
+ sgoffset
+ PAGE_SIZE
- 1) >>
1150 if (xferlen
< DDP_THRESHOLD
) {
1151 log_debug(1 << CXGBI_DBG_DDP
,
1152 "xfer %u < threshold %u, no ddp.\n",
1153 xferlen
, DDP_THRESHOLD
);
1157 gl
= kzalloc(sizeof(struct cxgbi_gather_list
) +
1158 npages
* (sizeof(dma_addr_t
) +
1159 sizeof(struct page
*)), gfp
);
1161 log_debug(1 << CXGBI_DBG_DDP
,
1162 "xfer %u, %u pages, OOM.\n", xferlen
, npages
);
1166 log_debug(1 << CXGBI_DBG_DDP
,
1167 "xfer %u, sgl %u, gl max %u.\n", xferlen
, sgcnt
, npages
);
1169 gl
->pages
= (struct page
**)&gl
->phys_addr
[npages
];
1171 gl
->length
= xferlen
;
1172 gl
->offset
= sgoffset
;
1173 gl
->pages
[0] = sgpage
;
1175 for (i
= 1, sg
= sg_next(sgl
), j
= 0; i
< sgcnt
;
1176 i
++, sg
= sg_next(sg
)) {
1177 struct page
*page
= sg_page(sg
);
1179 if (sgpage
== page
&& sg
->offset
== sgoffset
+ sglen
)
1180 sglen
+= sg
->length
;
1182 /* make sure the sgl is fit for ddp:
1183 * each has the same page size, and
1184 * all of the middle pages are used completely
1186 if ((j
&& sgoffset
) || ((i
!= sgcnt
- 1) &&
1187 ((sglen
+ sgoffset
) & ~PAGE_MASK
))) {
1188 log_debug(1 << CXGBI_DBG_DDP
,
1189 "page %d/%u, %u + %u.\n",
1190 i
, sgcnt
, sgoffset
, sglen
);
1195 if (j
== gl
->nelem
|| sg
->offset
) {
1196 log_debug(1 << CXGBI_DBG_DDP
,
1197 "page %d/%u, offset %u.\n",
1198 j
, gl
->nelem
, sg
->offset
);
1201 gl
->pages
[j
] = page
;
1203 sgoffset
= sg
->offset
;
1209 if (ddp_gl_map(pdev
, gl
) < 0)
1219 static void ddp_tag_release(struct cxgbi_hba
*chba
, u32 tag
)
1221 struct cxgbi_device
*cdev
= chba
->cdev
;
1222 struct cxgbi_ddp_info
*ddp
= cdev
->ddp
;
1225 idx
= (tag
>> PPOD_IDX_SHIFT
) & ddp
->idx_mask
;
1226 if (idx
< ddp
->nppods
) {
1227 struct cxgbi_gather_list
*gl
= ddp
->gl_map
[idx
];
1230 if (!gl
|| !gl
->nelem
) {
1231 pr_warn("tag 0x%x, idx %u, gl 0x%p, %u.\n",
1232 tag
, idx
, gl
, gl
? gl
->nelem
: 0);
1235 npods
= (gl
->nelem
+ PPOD_PAGES_MAX
- 1) >> PPOD_PAGES_SHIFT
;
1236 log_debug(1 << CXGBI_DBG_DDP
,
1237 "tag 0x%x, release idx %u, npods %u.\n",
1239 cdev
->csk_ddp_clear(chba
, tag
, idx
, npods
);
1240 ddp_unmark_entries(ddp
, idx
, npods
);
1241 ddp_release_gl(gl
, ddp
->pdev
);
1243 pr_warn("tag 0x%x, idx %u > max %u.\n", tag
, idx
, ddp
->nppods
);
1246 static int ddp_tag_reserve(struct cxgbi_sock
*csk
, unsigned int tid
,
1247 u32 sw_tag
, u32
*tagp
, struct cxgbi_gather_list
*gl
,
1250 struct cxgbi_device
*cdev
= csk
->cdev
;
1251 struct cxgbi_ddp_info
*ddp
= cdev
->ddp
;
1252 struct cxgbi_tag_format
*tformat
= &cdev
->tag_format
;
1253 struct cxgbi_pagepod_hdr hdr
;
1259 npods
= (gl
->nelem
+ PPOD_PAGES_MAX
- 1) >> PPOD_PAGES_SHIFT
;
1260 if (ddp
->idx_last
== ddp
->nppods
)
1261 idx
= ddp_find_unused_entries(ddp
, 0, ddp
->nppods
,
1264 idx
= ddp_find_unused_entries(ddp
, ddp
->idx_last
+ 1,
1267 if (idx
< 0 && ddp
->idx_last
>= npods
) {
1268 idx
= ddp_find_unused_entries(ddp
, 0,
1269 min(ddp
->idx_last
+ npods
, ddp
->nppods
),
1274 log_debug(1 << CXGBI_DBG_DDP
,
1275 "xferlen %u, gl %u, npods %u NO DDP.\n",
1276 gl
->length
, gl
->nelem
, npods
);
1280 if (cdev
->csk_ddp_alloc_gl_skb
) {
1281 err
= cdev
->csk_ddp_alloc_gl_skb(ddp
, idx
, npods
, gfp
);
1283 goto unmark_entries
;
1286 tag
= cxgbi_ddp_tag_base(tformat
, sw_tag
);
1287 tag
|= idx
<< PPOD_IDX_SHIFT
;
1290 hdr
.vld_tid
= htonl(PPOD_VALID_FLAG
| PPOD_TID(tid
));
1291 hdr
.pgsz_tag_clr
= htonl(tag
& ddp
->rsvd_tag_mask
);
1292 hdr
.max_offset
= htonl(gl
->length
);
1293 hdr
.page_offset
= htonl(gl
->offset
);
1295 err
= cdev
->csk_ddp_set(csk
, &hdr
, idx
, npods
, gl
);
1297 if (cdev
->csk_ddp_free_gl_skb
)
1298 cdev
->csk_ddp_free_gl_skb(ddp
, idx
, npods
);
1299 goto unmark_entries
;
1302 ddp
->idx_last
= idx
;
1303 log_debug(1 << CXGBI_DBG_DDP
,
1304 "xfer %u, gl %u,%u, tid 0x%x, tag 0x%x->0x%x(%u,%u).\n",
1305 gl
->length
, gl
->nelem
, gl
->offset
, tid
, sw_tag
, tag
, idx
,
1311 ddp_unmark_entries(ddp
, idx
, npods
);
1315 int cxgbi_ddp_reserve(struct cxgbi_sock
*csk
, unsigned int *tagp
,
1316 unsigned int sw_tag
, unsigned int xferlen
,
1317 struct scatterlist
*sgl
, unsigned int sgcnt
, gfp_t gfp
)
1319 struct cxgbi_device
*cdev
= csk
->cdev
;
1320 struct cxgbi_tag_format
*tformat
= &cdev
->tag_format
;
1321 struct cxgbi_gather_list
*gl
;
1324 if (page_idx
>= DDP_PGIDX_MAX
|| !cdev
->ddp
||
1325 xferlen
< DDP_THRESHOLD
) {
1326 log_debug(1 << CXGBI_DBG_DDP
,
1327 "pgidx %u, xfer %u, NO ddp.\n", page_idx
, xferlen
);
1331 if (!cxgbi_sw_tag_usable(tformat
, sw_tag
)) {
1332 log_debug(1 << CXGBI_DBG_DDP
,
1333 "sw_tag 0x%x NOT usable.\n", sw_tag
);
1337 gl
= ddp_make_gl(xferlen
, sgl
, sgcnt
, cdev
->pdev
, gfp
);
1341 err
= ddp_tag_reserve(csk
, csk
->tid
, sw_tag
, tagp
, gl
, gfp
);
1343 ddp_release_gl(gl
, cdev
->pdev
);
1348 static void ddp_destroy(struct kref
*kref
)
1350 struct cxgbi_ddp_info
*ddp
= container_of(kref
,
1351 struct cxgbi_ddp_info
,
1353 struct cxgbi_device
*cdev
= ddp
->cdev
;
1356 pr_info("kref 0, destroy ddp 0x%p, cdev 0x%p.\n", ddp
, cdev
);
1358 while (i
< ddp
->nppods
) {
1359 struct cxgbi_gather_list
*gl
= ddp
->gl_map
[i
];
1362 int npods
= (gl
->nelem
+ PPOD_PAGES_MAX
- 1)
1363 >> PPOD_PAGES_SHIFT
;
1364 pr_info("cdev 0x%p, ddp %d + %d.\n", cdev
, i
, npods
);
1366 if (cdev
->csk_ddp_free_gl_skb
)
1367 cdev
->csk_ddp_free_gl_skb(ddp
, i
, npods
);
1372 cxgbi_free_big_mem(ddp
);
1375 int cxgbi_ddp_cleanup(struct cxgbi_device
*cdev
)
1377 struct cxgbi_ddp_info
*ddp
= cdev
->ddp
;
1379 log_debug(1 << CXGBI_DBG_DDP
,
1380 "cdev 0x%p, release ddp 0x%p.\n", cdev
, ddp
);
1383 return kref_put(&ddp
->refcnt
, ddp_destroy
);
1386 EXPORT_SYMBOL_GPL(cxgbi_ddp_cleanup
);
1388 int cxgbi_ddp_init(struct cxgbi_device
*cdev
,
1389 unsigned int llimit
, unsigned int ulimit
,
1390 unsigned int max_txsz
, unsigned int max_rxsz
)
1392 struct cxgbi_ddp_info
*ddp
;
1393 unsigned int ppmax
, bits
;
1395 ppmax
= (ulimit
- llimit
+ 1) >> PPOD_SIZE_SHIFT
;
1396 bits
= __ilog2_u32(ppmax
) + 1;
1397 if (bits
> PPOD_IDX_MAX_SIZE
)
1398 bits
= PPOD_IDX_MAX_SIZE
;
1399 ppmax
= (1 << (bits
- 1)) - 1;
1401 ddp
= cxgbi_alloc_big_mem(sizeof(struct cxgbi_ddp_info
) +
1402 ppmax
* (sizeof(struct cxgbi_gather_list
*) +
1403 sizeof(struct sk_buff
*)),
1406 pr_warn("cdev 0x%p, ddp ppmax %u OOM.\n", cdev
, ppmax
);
1409 ddp
->gl_map
= (struct cxgbi_gather_list
**)(ddp
+ 1);
1410 ddp
->gl_skb
= (struct sk_buff
**)(((char *)ddp
->gl_map
) +
1411 ppmax
* sizeof(struct cxgbi_gather_list
*));
1414 spin_lock_init(&ddp
->map_lock
);
1415 kref_init(&ddp
->refcnt
);
1418 ddp
->pdev
= cdev
->pdev
;
1419 ddp
->llimit
= llimit
;
1420 ddp
->ulimit
= ulimit
;
1421 ddp
->max_txsz
= min_t(unsigned int, max_txsz
, ULP2_MAX_PKT_SIZE
);
1422 ddp
->max_rxsz
= min_t(unsigned int, max_rxsz
, ULP2_MAX_PKT_SIZE
);
1423 ddp
->nppods
= ppmax
;
1424 ddp
->idx_last
= ppmax
;
1425 ddp
->idx_bits
= bits
;
1426 ddp
->idx_mask
= (1 << bits
) - 1;
1427 ddp
->rsvd_tag_mask
= (1 << (bits
+ PPOD_IDX_SHIFT
)) - 1;
1429 cdev
->tag_format
.sw_bits
= sw_tag_idx_bits
+ sw_tag_age_bits
;
1430 cdev
->tag_format
.rsvd_bits
= ddp
->idx_bits
;
1431 cdev
->tag_format
.rsvd_shift
= PPOD_IDX_SHIFT
;
1432 cdev
->tag_format
.rsvd_mask
= (1 << cdev
->tag_format
.rsvd_bits
) - 1;
1434 pr_info("%s tag format, sw %u, rsvd %u,%u, mask 0x%x.\n",
1435 cdev
->ports
[0]->name
, cdev
->tag_format
.sw_bits
,
1436 cdev
->tag_format
.rsvd_bits
, cdev
->tag_format
.rsvd_shift
,
1437 cdev
->tag_format
.rsvd_mask
);
1439 cdev
->tx_max_size
= min_t(unsigned int, ULP2_MAX_PDU_PAYLOAD
,
1440 ddp
->max_txsz
- ISCSI_PDU_NONPAYLOAD_LEN
);
1441 cdev
->rx_max_size
= min_t(unsigned int, ULP2_MAX_PDU_PAYLOAD
,
1442 ddp
->max_rxsz
- ISCSI_PDU_NONPAYLOAD_LEN
);
1444 log_debug(1 << CXGBI_DBG_DDP
,
1445 "%s max payload size: %u/%u, %u/%u.\n",
1446 cdev
->ports
[0]->name
, cdev
->tx_max_size
, ddp
->max_txsz
,
1447 cdev
->rx_max_size
, ddp
->max_rxsz
);
1450 EXPORT_SYMBOL_GPL(cxgbi_ddp_init
);
1453 * APIs interacting with open-iscsi libraries
1456 static unsigned char padding
[4];
1458 static void task_release_itt(struct iscsi_task
*task
, itt_t hdr_itt
)
1460 struct scsi_cmnd
*sc
= task
->sc
;
1461 struct iscsi_tcp_conn
*tcp_conn
= task
->conn
->dd_data
;
1462 struct cxgbi_conn
*cconn
= tcp_conn
->dd_data
;
1463 struct cxgbi_hba
*chba
= cconn
->chba
;
1464 struct cxgbi_tag_format
*tformat
= &chba
->cdev
->tag_format
;
1465 u32 tag
= ntohl((__force u32
)hdr_itt
);
1467 log_debug(1 << CXGBI_DBG_DDP
,
1468 "cdev 0x%p, release tag 0x%x.\n", chba
->cdev
, tag
);
1470 (scsi_bidi_cmnd(sc
) || sc
->sc_data_direction
== DMA_FROM_DEVICE
) &&
1471 cxgbi_is_ddp_tag(tformat
, tag
))
1472 ddp_tag_release(chba
, tag
);
1475 static int task_reserve_itt(struct iscsi_task
*task
, itt_t
*hdr_itt
)
1477 struct scsi_cmnd
*sc
= task
->sc
;
1478 struct iscsi_conn
*conn
= task
->conn
;
1479 struct iscsi_session
*sess
= conn
->session
;
1480 struct iscsi_tcp_conn
*tcp_conn
= conn
->dd_data
;
1481 struct cxgbi_conn
*cconn
= tcp_conn
->dd_data
;
1482 struct cxgbi_hba
*chba
= cconn
->chba
;
1483 struct cxgbi_tag_format
*tformat
= &chba
->cdev
->tag_format
;
1484 u32 sw_tag
= (sess
->age
<< cconn
->task_idx_bits
) | task
->itt
;
1489 (scsi_bidi_cmnd(sc
) || sc
->sc_data_direction
== DMA_FROM_DEVICE
)) {
1490 err
= cxgbi_ddp_reserve(cconn
->cep
->csk
, &tag
, sw_tag
,
1491 scsi_in(sc
)->length
,
1492 scsi_in(sc
)->table
.sgl
,
1493 scsi_in(sc
)->table
.nents
,
1496 log_debug(1 << CXGBI_DBG_DDP
,
1497 "csk 0x%p, R task 0x%p, %u,%u, no ddp.\n",
1498 cconn
->cep
->csk
, task
, scsi_in(sc
)->length
,
1499 scsi_in(sc
)->table
.nents
);
1503 tag
= cxgbi_set_non_ddp_tag(tformat
, sw_tag
);
1504 /* the itt need to sent in big-endian order */
1505 *hdr_itt
= (__force itt_t
)htonl(tag
);
1507 log_debug(1 << CXGBI_DBG_DDP
,
1508 "cdev 0x%p, task 0x%p, 0x%x(0x%x,0x%x)->0x%x/0x%x.\n",
1509 chba
->cdev
, task
, sw_tag
, task
->itt
, sess
->age
, tag
, *hdr_itt
);
1513 void cxgbi_parse_pdu_itt(struct iscsi_conn
*conn
, itt_t itt
, int *idx
, int *age
)
1515 struct iscsi_tcp_conn
*tcp_conn
= conn
->dd_data
;
1516 struct cxgbi_conn
*cconn
= tcp_conn
->dd_data
;
1517 struct cxgbi_device
*cdev
= cconn
->chba
->cdev
;
1518 u32 tag
= ntohl((__force u32
) itt
);
1521 sw_bits
= cxgbi_tag_nonrsvd_bits(&cdev
->tag_format
, tag
);
1523 *idx
= sw_bits
& ((1 << cconn
->task_idx_bits
) - 1);
1525 *age
= (sw_bits
>> cconn
->task_idx_bits
) & ISCSI_AGE_MASK
;
1527 log_debug(1 << CXGBI_DBG_DDP
,
1528 "cdev 0x%p, tag 0x%x/0x%x, -> 0x%x(0x%x,0x%x).\n",
1529 cdev
, tag
, itt
, sw_bits
, idx
? *idx
: 0xFFFFF,
1532 EXPORT_SYMBOL_GPL(cxgbi_parse_pdu_itt
);
1534 void cxgbi_conn_tx_open(struct cxgbi_sock
*csk
)
1536 struct iscsi_conn
*conn
= csk
->user_data
;
1539 log_debug(1 << CXGBI_DBG_SOCK
,
1540 "csk 0x%p, cid %d.\n", csk
, conn
->id
);
1541 iscsi_conn_queue_work(conn
);
1544 EXPORT_SYMBOL_GPL(cxgbi_conn_tx_open
);
1547 * pdu receive, interact with libiscsi_tcp
1549 static inline int read_pdu_skb(struct iscsi_conn
*conn
,
1550 struct sk_buff
*skb
,
1551 unsigned int offset
,
1557 bytes_read
= iscsi_tcp_recv_skb(conn
, skb
, offset
, offloaded
, &status
);
1559 case ISCSI_TCP_CONN_ERR
:
1560 pr_info("skb 0x%p, off %u, %d, TCP_ERR.\n",
1561 skb
, offset
, offloaded
);
1563 case ISCSI_TCP_SUSPENDED
:
1564 log_debug(1 << CXGBI_DBG_PDU_RX
,
1565 "skb 0x%p, off %u, %d, TCP_SUSPEND, rc %d.\n",
1566 skb
, offset
, offloaded
, bytes_read
);
1567 /* no transfer - just have caller flush queue */
1569 case ISCSI_TCP_SKB_DONE
:
1570 pr_info("skb 0x%p, off %u, %d, TCP_SKB_DONE.\n",
1571 skb
, offset
, offloaded
);
1573 * pdus should always fit in the skb and we should get
1574 * segment done notifcation.
1576 iscsi_conn_printk(KERN_ERR
, conn
, "Invalid pdu or skb.");
1578 case ISCSI_TCP_SEGMENT_DONE
:
1579 log_debug(1 << CXGBI_DBG_PDU_RX
,
1580 "skb 0x%p, off %u, %d, TCP_SEG_DONE, rc %d.\n",
1581 skb
, offset
, offloaded
, bytes_read
);
1584 pr_info("skb 0x%p, off %u, %d, invalid status %d.\n",
1585 skb
, offset
, offloaded
, status
);
1590 static int skb_read_pdu_bhs(struct iscsi_conn
*conn
, struct sk_buff
*skb
)
1592 struct iscsi_tcp_conn
*tcp_conn
= conn
->dd_data
;
1594 log_debug(1 << CXGBI_DBG_PDU_RX
,
1595 "conn 0x%p, skb 0x%p, len %u, flag 0x%lx.\n",
1596 conn
, skb
, skb
->len
, cxgbi_skcb_flags(skb
));
1598 if (!iscsi_tcp_recv_segment_is_hdr(tcp_conn
)) {
1599 pr_info("conn 0x%p, skb 0x%p, not hdr.\n", conn
, skb
);
1600 iscsi_conn_failure(conn
, ISCSI_ERR_PROTO
);
1604 if (conn
->hdrdgst_en
&&
1605 cxgbi_skcb_test_flag(skb
, SKCBF_RX_HCRC_ERR
)) {
1606 pr_info("conn 0x%p, skb 0x%p, hcrc.\n", conn
, skb
);
1607 iscsi_conn_failure(conn
, ISCSI_ERR_HDR_DGST
);
1611 return read_pdu_skb(conn
, skb
, 0, 0);
1614 static int skb_read_pdu_data(struct iscsi_conn
*conn
, struct sk_buff
*lskb
,
1615 struct sk_buff
*skb
, unsigned int offset
)
1617 struct iscsi_tcp_conn
*tcp_conn
= conn
->dd_data
;
1619 int opcode
= tcp_conn
->in
.hdr
->opcode
& ISCSI_OPCODE_MASK
;
1621 log_debug(1 << CXGBI_DBG_PDU_RX
,
1622 "conn 0x%p, skb 0x%p, len %u, flag 0x%lx.\n",
1623 conn
, skb
, skb
->len
, cxgbi_skcb_flags(skb
));
1625 if (conn
->datadgst_en
&&
1626 cxgbi_skcb_test_flag(lskb
, SKCBF_RX_DCRC_ERR
)) {
1627 pr_info("conn 0x%p, skb 0x%p, dcrc 0x%lx.\n",
1628 conn
, lskb
, cxgbi_skcb_flags(lskb
));
1629 iscsi_conn_failure(conn
, ISCSI_ERR_DATA_DGST
);
1633 if (iscsi_tcp_recv_segment_is_hdr(tcp_conn
))
1636 /* coalesced, add header digest length */
1637 if (lskb
== skb
&& conn
->hdrdgst_en
)
1638 offset
+= ISCSI_DIGEST_SIZE
;
1640 if (cxgbi_skcb_test_flag(lskb
, SKCBF_RX_DATA_DDPD
))
1643 if (opcode
== ISCSI_OP_SCSI_DATA_IN
)
1644 log_debug(1 << CXGBI_DBG_PDU_RX
,
1645 "skb 0x%p, op 0x%x, itt 0x%x, %u %s ddp'ed.\n",
1646 skb
, opcode
, ntohl(tcp_conn
->in
.hdr
->itt
),
1647 tcp_conn
->in
.datalen
, offloaded
? "is" : "not");
1649 return read_pdu_skb(conn
, skb
, offset
, offloaded
);
1652 static void csk_return_rx_credits(struct cxgbi_sock
*csk
, int copied
)
1654 struct cxgbi_device
*cdev
= csk
->cdev
;
1658 log_debug(1 << CXGBI_DBG_PDU_RX
,
1659 "csk 0x%p,%u,0x%lu,%u, seq %u, wup %u, thre %u, %u.\n",
1660 csk
, csk
->state
, csk
->flags
, csk
->tid
, csk
->copied_seq
,
1661 csk
->rcv_wup
, cdev
->rx_credit_thres
,
1664 if (csk
->state
!= CTP_ESTABLISHED
)
1667 credits
= csk
->copied_seq
- csk
->rcv_wup
;
1668 if (unlikely(!credits
))
1670 if (unlikely(cdev
->rx_credit_thres
== 0))
1673 must_send
= credits
+ 16384 >= cdev
->rcv_win
;
1674 if (must_send
|| credits
>= cdev
->rx_credit_thres
)
1675 csk
->rcv_wup
+= cdev
->csk_send_rx_credits(csk
, credits
);
1678 void cxgbi_conn_pdu_ready(struct cxgbi_sock
*csk
)
1680 struct cxgbi_device
*cdev
= csk
->cdev
;
1681 struct iscsi_conn
*conn
= csk
->user_data
;
1682 struct sk_buff
*skb
;
1683 unsigned int read
= 0;
1686 log_debug(1 << CXGBI_DBG_PDU_RX
,
1687 "csk 0x%p, conn 0x%p.\n", csk
, conn
);
1689 if (unlikely(!conn
|| conn
->suspend_rx
)) {
1690 log_debug(1 << CXGBI_DBG_PDU_RX
,
1691 "csk 0x%p, conn 0x%p, id %d, suspend_rx %lu!\n",
1692 csk
, conn
, conn
? conn
->id
: 0xFF,
1693 conn
? conn
->suspend_rx
: 0xFF);
1698 skb
= skb_peek(&csk
->receive_queue
);
1700 !(cxgbi_skcb_test_flag(skb
, SKCBF_RX_STATUS
))) {
1702 log_debug(1 << CXGBI_DBG_PDU_RX
,
1703 "skb 0x%p, NOT ready 0x%lx.\n",
1704 skb
, cxgbi_skcb_flags(skb
));
1707 __skb_unlink(skb
, &csk
->receive_queue
);
1709 read
+= cxgbi_skcb_rx_pdulen(skb
);
1710 log_debug(1 << CXGBI_DBG_PDU_RX
,
1711 "csk 0x%p, skb 0x%p,%u,f 0x%lx, pdu len %u.\n",
1712 csk
, skb
, skb
->len
, cxgbi_skcb_flags(skb
),
1713 cxgbi_skcb_rx_pdulen(skb
));
1715 if (cxgbi_skcb_test_flag(skb
, SKCBF_RX_COALESCED
)) {
1716 err
= skb_read_pdu_bhs(conn
, skb
);
1718 pr_err("coalesced bhs, csk 0x%p, skb 0x%p,%u, "
1719 "f 0x%lx, plen %u.\n",
1721 cxgbi_skcb_flags(skb
),
1722 cxgbi_skcb_rx_pdulen(skb
));
1725 err
= skb_read_pdu_data(conn
, skb
, skb
,
1726 err
+ cdev
->skb_rx_extra
);
1728 pr_err("coalesced data, csk 0x%p, skb 0x%p,%u, "
1729 "f 0x%lx, plen %u.\n",
1731 cxgbi_skcb_flags(skb
),
1732 cxgbi_skcb_rx_pdulen(skb
));
1734 err
= skb_read_pdu_bhs(conn
, skb
);
1736 pr_err("bhs, csk 0x%p, skb 0x%p,%u, "
1737 "f 0x%lx, plen %u.\n",
1739 cxgbi_skcb_flags(skb
),
1740 cxgbi_skcb_rx_pdulen(skb
));
1744 if (cxgbi_skcb_test_flag(skb
, SKCBF_RX_DATA
)) {
1745 struct sk_buff
*dskb
;
1747 dskb
= skb_peek(&csk
->receive_queue
);
1749 pr_err("csk 0x%p, skb 0x%p,%u, f 0x%lx,"
1750 " plen %u, NO data.\n",
1752 cxgbi_skcb_flags(skb
),
1753 cxgbi_skcb_rx_pdulen(skb
));
1757 __skb_unlink(dskb
, &csk
->receive_queue
);
1759 err
= skb_read_pdu_data(conn
, skb
, dskb
, 0);
1761 pr_err("data, csk 0x%p, skb 0x%p,%u, "
1762 "f 0x%lx, plen %u, dskb 0x%p,"
1765 cxgbi_skcb_flags(skb
),
1766 cxgbi_skcb_rx_pdulen(skb
),
1770 err
= skb_read_pdu_data(conn
, skb
, skb
, 0);
1779 log_debug(1 << CXGBI_DBG_PDU_RX
, "csk 0x%p, read %u.\n", csk
, read
);
1781 csk
->copied_seq
+= read
;
1782 csk_return_rx_credits(csk
, read
);
1783 conn
->rxdata_octets
+= read
;
1787 pr_info("csk 0x%p, 0x%p, rx failed %d, read %u.\n",
1788 csk
, conn
, err
, read
);
1789 iscsi_conn_failure(conn
, ISCSI_ERR_CONN_FAILED
);
1792 EXPORT_SYMBOL_GPL(cxgbi_conn_pdu_ready
);
1794 static int sgl_seek_offset(struct scatterlist
*sgl
, unsigned int sgcnt
,
1795 unsigned int offset
, unsigned int *off
,
1796 struct scatterlist
**sgp
)
1799 struct scatterlist
*sg
;
1801 for_each_sg(sgl
, sg
, sgcnt
, i
) {
1802 if (offset
< sg
->length
) {
1807 offset
-= sg
->length
;
1812 static int sgl_read_to_frags(struct scatterlist
*sg
, unsigned int sgoffset
,
1813 unsigned int dlen
, skb_frag_t
*frags
,
1816 unsigned int datalen
= dlen
;
1817 unsigned int sglen
= sg
->length
- sgoffset
;
1818 struct page
*page
= sg_page(sg
);
1828 pr_warn("sg %d NULL, len %u/%u.\n",
1837 copy
= min(datalen
, sglen
);
1838 if (i
&& page
== frags
[i
- 1].page
&&
1839 sgoffset
+ sg
->offset
==
1840 frags
[i
- 1].page_offset
+ frags
[i
- 1].size
) {
1841 frags
[i
- 1].size
+= copy
;
1843 if (i
>= frag_max
) {
1844 pr_warn("too many pages %u, dlen %u.\n",
1849 frags
[i
].page
= page
;
1850 frags
[i
].page_offset
= sg
->offset
+ sgoffset
;
1851 frags
[i
].size
= copy
;
1862 int cxgbi_conn_alloc_pdu(struct iscsi_task
*task
, u8 opcode
)
1864 struct iscsi_tcp_conn
*tcp_conn
= task
->conn
->dd_data
;
1865 struct cxgbi_conn
*cconn
= tcp_conn
->dd_data
;
1866 struct cxgbi_device
*cdev
= cconn
->chba
->cdev
;
1867 struct iscsi_conn
*conn
= task
->conn
;
1868 struct iscsi_tcp_task
*tcp_task
= task
->dd_data
;
1869 struct cxgbi_task_data
*tdata
= iscsi_task_cxgbi_data(task
);
1870 struct scsi_cmnd
*sc
= task
->sc
;
1871 int headroom
= SKB_TX_ISCSI_PDU_HEADER_MAX
;
1873 tcp_task
->dd_data
= tdata
;
1876 if (SKB_MAX_HEAD(cdev
->skb_tx_rsvd
) > (512 * MAX_SKB_FRAGS
) &&
1877 (opcode
== ISCSI_OP_SCSI_DATA_OUT
||
1878 (opcode
== ISCSI_OP_SCSI_CMD
&&
1879 (scsi_bidi_cmnd(sc
) || sc
->sc_data_direction
== DMA_TO_DEVICE
))))
1880 /* data could goes into skb head */
1881 headroom
+= min_t(unsigned int,
1882 SKB_MAX_HEAD(cdev
->skb_tx_rsvd
),
1883 conn
->max_xmit_dlength
);
1885 tdata
->skb
= alloc_skb(cdev
->skb_tx_rsvd
+ headroom
, GFP_ATOMIC
);
1887 pr_warn("alloc skb %u+%u, opcode 0x%x failed.\n",
1888 cdev
->skb_tx_rsvd
, headroom
, opcode
);
1892 skb_reserve(tdata
->skb
, cdev
->skb_tx_rsvd
);
1893 task
->hdr
= (struct iscsi_hdr
*)tdata
->skb
->data
;
1894 task
->hdr_max
= SKB_TX_ISCSI_PDU_HEADER_MAX
; /* BHS + AHS */
1896 /* data_out uses scsi_cmd's itt */
1897 if (opcode
!= ISCSI_OP_SCSI_DATA_OUT
)
1898 task_reserve_itt(task
, &task
->hdr
->itt
);
1900 log_debug(1 << CXGBI_DBG_ISCSI
| 1 << CXGBI_DBG_PDU_TX
,
1901 "task 0x%p, op 0x%x, skb 0x%p,%u+%u/%u, itt 0x%x.\n",
1902 task
, opcode
, tdata
->skb
, cdev
->skb_tx_rsvd
, headroom
,
1903 conn
->max_xmit_dlength
, ntohl(task
->hdr
->itt
));
1907 EXPORT_SYMBOL_GPL(cxgbi_conn_alloc_pdu
);
1909 static inline void tx_skb_setmode(struct sk_buff
*skb
, int hcrc
, int dcrc
)
1917 cxgbi_skcb_ulp_mode(skb
) = (ULP2_MODE_ISCSI
<< 4) | submode
;
1920 int cxgbi_conn_init_pdu(struct iscsi_task
*task
, unsigned int offset
,
1923 struct iscsi_conn
*conn
= task
->conn
;
1924 struct cxgbi_task_data
*tdata
= iscsi_task_cxgbi_data(task
);
1925 struct sk_buff
*skb
= tdata
->skb
;
1926 unsigned int datalen
= count
;
1927 int i
, padlen
= iscsi_padding(count
);
1930 log_debug(1 << CXGBI_DBG_ISCSI
| 1 << CXGBI_DBG_PDU_TX
,
1931 "task 0x%p,0x%p, skb 0x%p, 0x%x,0x%x,0x%x, %u+%u.\n",
1932 task
, task
->sc
, skb
, (*skb
->data
) & ISCSI_OPCODE_MASK
,
1933 ntohl(task
->cmdsn
), ntohl(task
->hdr
->itt
), offset
, count
);
1935 skb_put(skb
, task
->hdr_len
);
1936 tx_skb_setmode(skb
, conn
->hdrdgst_en
, datalen
? conn
->datadgst_en
: 0);
1941 struct scsi_data_buffer
*sdb
= scsi_out(task
->sc
);
1942 struct scatterlist
*sg
= NULL
;
1945 tdata
->offset
= offset
;
1946 tdata
->count
= count
;
1947 err
= sgl_seek_offset(
1948 sdb
->table
.sgl
, sdb
->table
.nents
,
1949 tdata
->offset
, &tdata
->sgoffset
, &sg
);
1951 pr_warn("tpdu, sgl %u, bad offset %u/%u.\n",
1952 sdb
->table
.nents
, tdata
->offset
, sdb
->length
);
1955 err
= sgl_read_to_frags(sg
, tdata
->sgoffset
, tdata
->count
,
1956 tdata
->frags
, MAX_PDU_FRAGS
);
1958 pr_warn("tpdu, sgl %u, bad offset %u + %u.\n",
1959 sdb
->table
.nents
, tdata
->offset
, tdata
->count
);
1962 tdata
->nr_frags
= err
;
1964 if (tdata
->nr_frags
> MAX_SKB_FRAGS
||
1965 (padlen
&& tdata
->nr_frags
== MAX_SKB_FRAGS
)) {
1966 char *dst
= skb
->data
+ task
->hdr_len
;
1967 skb_frag_t
*frag
= tdata
->frags
;
1969 /* data fits in the skb's headroom */
1970 for (i
= 0; i
< tdata
->nr_frags
; i
++, frag
++) {
1971 char *src
= kmap_atomic(frag
->page
,
1974 memcpy(dst
, src
+frag
->page_offset
, frag
->size
);
1976 kunmap_atomic(src
, KM_SOFTIRQ0
);
1979 memset(dst
, 0, padlen
);
1982 skb_put(skb
, count
+ padlen
);
1984 /* data fit into frag_list */
1985 for (i
= 0; i
< tdata
->nr_frags
; i
++)
1986 get_page(tdata
->frags
[i
].page
);
1988 memcpy(skb_shinfo(skb
)->frags
, tdata
->frags
,
1989 sizeof(skb_frag_t
) * tdata
->nr_frags
);
1990 skb_shinfo(skb
)->nr_frags
= tdata
->nr_frags
;
1992 skb
->data_len
+= count
;
1993 skb
->truesize
+= count
;
1997 pg
= virt_to_page(task
->data
);
2000 skb_fill_page_desc(skb
, 0, pg
, offset_in_page(task
->data
),
2003 skb
->data_len
+= count
;
2004 skb
->truesize
+= count
;
2008 i
= skb_shinfo(skb
)->nr_frags
;
2009 skb_fill_page_desc(skb
, skb_shinfo(skb
)->nr_frags
,
2010 virt_to_page(padding
), offset_in_page(padding
),
2013 skb
->data_len
+= padlen
;
2014 skb
->truesize
+= padlen
;
2020 EXPORT_SYMBOL_GPL(cxgbi_conn_init_pdu
);
2022 int cxgbi_conn_xmit_pdu(struct iscsi_task
*task
)
2024 struct iscsi_tcp_conn
*tcp_conn
= task
->conn
->dd_data
;
2025 struct cxgbi_conn
*cconn
= tcp_conn
->dd_data
;
2026 struct cxgbi_task_data
*tdata
= iscsi_task_cxgbi_data(task
);
2027 struct sk_buff
*skb
= tdata
->skb
;
2028 unsigned int datalen
;
2032 log_debug(1 << CXGBI_DBG_ISCSI
| 1 << CXGBI_DBG_PDU_TX
,
2033 "task 0x%p, skb NULL.\n", task
);
2037 datalen
= skb
->data_len
;
2039 err
= cxgbi_sock_send_pdus(cconn
->cep
->csk
, skb
);
2043 log_debug(1 << CXGBI_DBG_PDU_TX
,
2044 "task 0x%p,0x%p, skb 0x%p, len %u/%u, rv %d.\n",
2045 task
, task
->sc
, skb
, skb
->len
, skb
->data_len
, err
);
2047 if (task
->conn
->hdrdgst_en
)
2048 pdulen
+= ISCSI_DIGEST_SIZE
;
2050 if (datalen
&& task
->conn
->datadgst_en
)
2051 pdulen
+= ISCSI_DIGEST_SIZE
;
2053 task
->conn
->txdata_octets
+= pdulen
;
2057 if (err
== -EAGAIN
|| err
== -ENOBUFS
) {
2058 log_debug(1 << CXGBI_DBG_PDU_TX
,
2059 "task 0x%p, skb 0x%p, len %u/%u, %d EAGAIN.\n",
2060 task
, skb
, skb
->len
, skb
->data_len
, err
);
2061 /* reset skb to send when we are called again */
2067 log_debug(1 << CXGBI_DBG_ISCSI
| 1 << CXGBI_DBG_PDU_TX
,
2068 "itt 0x%x, skb 0x%p, len %u/%u, xmit err %d.\n",
2069 task
->itt
, skb
, skb
->len
, skb
->data_len
, err
);
2070 iscsi_conn_printk(KERN_ERR
, task
->conn
, "xmit err %d.\n", err
);
2071 iscsi_conn_failure(task
->conn
, ISCSI_ERR_XMIT_FAILED
);
2074 EXPORT_SYMBOL_GPL(cxgbi_conn_xmit_pdu
);
2076 void cxgbi_cleanup_task(struct iscsi_task
*task
)
2078 struct cxgbi_task_data
*tdata
= iscsi_task_cxgbi_data(task
);
2080 log_debug(1 << CXGBI_DBG_ISCSI
,
2081 "task 0x%p, skb 0x%p, itt 0x%x.\n",
2082 task
, tdata
->skb
, task
->hdr_itt
);
2084 /* never reached the xmit task callout */
2086 __kfree_skb(tdata
->skb
);
2087 memset(tdata
, 0, sizeof(*tdata
));
2089 task_release_itt(task
, task
->hdr_itt
);
2090 iscsi_tcp_cleanup_task(task
);
2092 EXPORT_SYMBOL_GPL(cxgbi_cleanup_task
);
2094 void cxgbi_get_conn_stats(struct iscsi_cls_conn
*cls_conn
,
2095 struct iscsi_stats
*stats
)
2097 struct iscsi_conn
*conn
= cls_conn
->dd_data
;
2099 stats
->txdata_octets
= conn
->txdata_octets
;
2100 stats
->rxdata_octets
= conn
->rxdata_octets
;
2101 stats
->scsicmd_pdus
= conn
->scsicmd_pdus_cnt
;
2102 stats
->dataout_pdus
= conn
->dataout_pdus_cnt
;
2103 stats
->scsirsp_pdus
= conn
->scsirsp_pdus_cnt
;
2104 stats
->datain_pdus
= conn
->datain_pdus_cnt
;
2105 stats
->r2t_pdus
= conn
->r2t_pdus_cnt
;
2106 stats
->tmfcmd_pdus
= conn
->tmfcmd_pdus_cnt
;
2107 stats
->tmfrsp_pdus
= conn
->tmfrsp_pdus_cnt
;
2108 stats
->digest_err
= 0;
2109 stats
->timeout_err
= 0;
2110 stats
->custom_length
= 1;
2111 strcpy(stats
->custom
[0].desc
, "eh_abort_cnt");
2112 stats
->custom
[0].value
= conn
->eh_abort_cnt
;
2114 EXPORT_SYMBOL_GPL(cxgbi_get_conn_stats
);
2116 static int cxgbi_conn_max_xmit_dlength(struct iscsi_conn
*conn
)
2118 struct iscsi_tcp_conn
*tcp_conn
= conn
->dd_data
;
2119 struct cxgbi_conn
*cconn
= tcp_conn
->dd_data
;
2120 struct cxgbi_device
*cdev
= cconn
->chba
->cdev
;
2121 unsigned int headroom
= SKB_MAX_HEAD(cdev
->skb_tx_rsvd
);
2122 unsigned int max_def
= 512 * MAX_SKB_FRAGS
;
2123 unsigned int max
= max(max_def
, headroom
);
2125 max
= min(cconn
->chba
->cdev
->tx_max_size
, max
);
2126 if (conn
->max_xmit_dlength
)
2127 conn
->max_xmit_dlength
= min(conn
->max_xmit_dlength
, max
);
2129 conn
->max_xmit_dlength
= max
;
2130 cxgbi_align_pdu_size(conn
->max_xmit_dlength
);
2135 static int cxgbi_conn_max_recv_dlength(struct iscsi_conn
*conn
)
2137 struct iscsi_tcp_conn
*tcp_conn
= conn
->dd_data
;
2138 struct cxgbi_conn
*cconn
= tcp_conn
->dd_data
;
2139 unsigned int max
= cconn
->chba
->cdev
->rx_max_size
;
2141 cxgbi_align_pdu_size(max
);
2143 if (conn
->max_recv_dlength
) {
2144 if (conn
->max_recv_dlength
> max
) {
2145 pr_err("MaxRecvDataSegmentLength %u > %u.\n",
2146 conn
->max_recv_dlength
, max
);
2149 conn
->max_recv_dlength
= min(conn
->max_recv_dlength
, max
);
2150 cxgbi_align_pdu_size(conn
->max_recv_dlength
);
2152 conn
->max_recv_dlength
= max
;
2157 int cxgbi_set_conn_param(struct iscsi_cls_conn
*cls_conn
,
2158 enum iscsi_param param
, char *buf
, int buflen
)
2160 struct iscsi_conn
*conn
= cls_conn
->dd_data
;
2161 struct iscsi_session
*session
= conn
->session
;
2162 struct iscsi_tcp_conn
*tcp_conn
= conn
->dd_data
;
2163 struct cxgbi_conn
*cconn
= tcp_conn
->dd_data
;
2164 struct cxgbi_sock
*csk
= cconn
->cep
->csk
;
2167 log_debug(1 << CXGBI_DBG_ISCSI
,
2168 "cls_conn 0x%p, param %d, buf(%d) %s.\n",
2169 cls_conn
, param
, buflen
, buf
);
2172 case ISCSI_PARAM_HDRDGST_EN
:
2173 err
= iscsi_set_param(cls_conn
, param
, buf
, buflen
);
2174 if (!err
&& conn
->hdrdgst_en
)
2175 err
= csk
->cdev
->csk_ddp_setup_digest(csk
, csk
->tid
,
2177 conn
->datadgst_en
, 0);
2179 case ISCSI_PARAM_DATADGST_EN
:
2180 err
= iscsi_set_param(cls_conn
, param
, buf
, buflen
);
2181 if (!err
&& conn
->datadgst_en
)
2182 err
= csk
->cdev
->csk_ddp_setup_digest(csk
, csk
->tid
,
2184 conn
->datadgst_en
, 0);
2186 case ISCSI_PARAM_MAX_R2T
:
2187 sscanf(buf
, "%d", &value
);
2188 if (value
<= 0 || !is_power_of_2(value
))
2190 if (session
->max_r2t
== value
)
2192 iscsi_tcp_r2tpool_free(session
);
2193 err
= iscsi_set_param(cls_conn
, param
, buf
, buflen
);
2194 if (!err
&& iscsi_tcp_r2tpool_alloc(session
))
2196 case ISCSI_PARAM_MAX_RECV_DLENGTH
:
2197 err
= iscsi_set_param(cls_conn
, param
, buf
, buflen
);
2199 err
= cxgbi_conn_max_recv_dlength(conn
);
2201 case ISCSI_PARAM_MAX_XMIT_DLENGTH
:
2202 err
= iscsi_set_param(cls_conn
, param
, buf
, buflen
);
2204 err
= cxgbi_conn_max_xmit_dlength(conn
);
2207 return iscsi_set_param(cls_conn
, param
, buf
, buflen
);
2211 EXPORT_SYMBOL_GPL(cxgbi_set_conn_param
);
2213 int cxgbi_get_conn_param(struct iscsi_cls_conn
*cls_conn
,
2214 enum iscsi_param param
, char *buf
)
2216 struct iscsi_conn
*iconn
= cls_conn
->dd_data
;
2219 log_debug(1 << CXGBI_DBG_ISCSI
,
2220 "cls_conn 0x%p, param %d.\n", cls_conn
, param
);
2223 case ISCSI_PARAM_CONN_PORT
:
2224 spin_lock_bh(&iconn
->session
->lock
);
2225 len
= sprintf(buf
, "%hu\n", iconn
->portal_port
);
2226 spin_unlock_bh(&iconn
->session
->lock
);
2228 case ISCSI_PARAM_CONN_ADDRESS
:
2229 spin_lock_bh(&iconn
->session
->lock
);
2230 len
= sprintf(buf
, "%s\n", iconn
->portal_address
);
2231 spin_unlock_bh(&iconn
->session
->lock
);
2234 return iscsi_conn_get_param(cls_conn
, param
, buf
);
2238 EXPORT_SYMBOL_GPL(cxgbi_get_conn_param
);
2240 struct iscsi_cls_conn
*
2241 cxgbi_create_conn(struct iscsi_cls_session
*cls_session
, u32 cid
)
2243 struct iscsi_cls_conn
*cls_conn
;
2244 struct iscsi_conn
*conn
;
2245 struct iscsi_tcp_conn
*tcp_conn
;
2246 struct cxgbi_conn
*cconn
;
2248 cls_conn
= iscsi_tcp_conn_setup(cls_session
, sizeof(*cconn
), cid
);
2252 conn
= cls_conn
->dd_data
;
2253 tcp_conn
= conn
->dd_data
;
2254 cconn
= tcp_conn
->dd_data
;
2255 cconn
->iconn
= conn
;
2257 log_debug(1 << CXGBI_DBG_ISCSI
,
2258 "cid %u(0x%x), cls 0x%p,0x%p, conn 0x%p,0x%p,0x%p.\n",
2259 cid
, cid
, cls_session
, cls_conn
, conn
, tcp_conn
, cconn
);
2263 EXPORT_SYMBOL_GPL(cxgbi_create_conn
);
2265 int cxgbi_bind_conn(struct iscsi_cls_session
*cls_session
,
2266 struct iscsi_cls_conn
*cls_conn
,
2267 u64 transport_eph
, int is_leading
)
2269 struct iscsi_conn
*conn
= cls_conn
->dd_data
;
2270 struct iscsi_tcp_conn
*tcp_conn
= conn
->dd_data
;
2271 struct cxgbi_conn
*cconn
= tcp_conn
->dd_data
;
2272 struct iscsi_endpoint
*ep
;
2273 struct cxgbi_endpoint
*cep
;
2274 struct cxgbi_sock
*csk
;
2277 ep
= iscsi_lookup_endpoint(transport_eph
);
2281 /* setup ddp pagesize */
2284 err
= csk
->cdev
->csk_ddp_setup_pgidx(csk
, csk
->tid
, page_idx
, 0);
2288 err
= iscsi_conn_bind(cls_session
, cls_conn
, is_leading
);
2292 /* calculate the tag idx bits needed for this conn based on cmds_max */
2293 cconn
->task_idx_bits
= (__ilog2_u32(conn
->session
->cmds_max
- 1)) + 1;
2295 write_lock_bh(&csk
->callback_lock
);
2296 csk
->user_data
= conn
;
2297 cconn
->chba
= cep
->chba
;
2300 write_unlock_bh(&csk
->callback_lock
);
2302 cxgbi_conn_max_xmit_dlength(conn
);
2303 cxgbi_conn_max_recv_dlength(conn
);
2305 spin_lock_bh(&conn
->session
->lock
);
2306 sprintf(conn
->portal_address
, "%pI4", &csk
->daddr
.sin_addr
.s_addr
);
2307 conn
->portal_port
= ntohs(csk
->daddr
.sin_port
);
2308 spin_unlock_bh(&conn
->session
->lock
);
2310 log_debug(1 << CXGBI_DBG_ISCSI
,
2311 "cls 0x%p,0x%p, ep 0x%p, cconn 0x%p, csk 0x%p.\n",
2312 cls_session
, cls_conn
, ep
, cconn
, csk
);
2313 /* init recv engine */
2314 iscsi_tcp_hdr_recv_prep(tcp_conn
);
2318 EXPORT_SYMBOL_GPL(cxgbi_bind_conn
);
2320 struct iscsi_cls_session
*cxgbi_create_session(struct iscsi_endpoint
*ep
,
2321 u16 cmds_max
, u16 qdepth
,
2324 struct cxgbi_endpoint
*cep
;
2325 struct cxgbi_hba
*chba
;
2326 struct Scsi_Host
*shost
;
2327 struct iscsi_cls_session
*cls_session
;
2328 struct iscsi_session
*session
;
2331 pr_err("missing endpoint.\n");
2337 shost
= chba
->shost
;
2339 BUG_ON(chba
!= iscsi_host_priv(shost
));
2341 cls_session
= iscsi_session_setup(chba
->cdev
->itp
, shost
,
2343 sizeof(struct iscsi_tcp_task
) +
2344 sizeof(struct cxgbi_task_data
),
2345 initial_cmdsn
, ISCSI_MAX_TARGET
);
2349 session
= cls_session
->dd_data
;
2350 if (iscsi_tcp_r2tpool_alloc(session
))
2351 goto remove_session
;
2353 log_debug(1 << CXGBI_DBG_ISCSI
,
2354 "ep 0x%p, cls sess 0x%p.\n", ep
, cls_session
);
2358 iscsi_session_teardown(cls_session
);
2361 EXPORT_SYMBOL_GPL(cxgbi_create_session
);
2363 void cxgbi_destroy_session(struct iscsi_cls_session
*cls_session
)
2365 log_debug(1 << CXGBI_DBG_ISCSI
,
2366 "cls sess 0x%p.\n", cls_session
);
2368 iscsi_tcp_r2tpool_free(cls_session
->dd_data
);
2369 iscsi_session_teardown(cls_session
);
2371 EXPORT_SYMBOL_GPL(cxgbi_destroy_session
);
2373 int cxgbi_set_host_param(struct Scsi_Host
*shost
, enum iscsi_host_param param
,
2374 char *buf
, int buflen
)
2376 struct cxgbi_hba
*chba
= iscsi_host_priv(shost
);
2379 shost_printk(KERN_ERR
, shost
, "Could not get host param. "
2380 "netdev for host not set.\n");
2384 log_debug(1 << CXGBI_DBG_ISCSI
,
2385 "shost 0x%p, hba 0x%p,%s, param %d, buf(%d) %s.\n",
2386 shost
, chba
, chba
->ndev
->name
, param
, buflen
, buf
);
2389 case ISCSI_HOST_PARAM_IPADDRESS
:
2391 __be32 addr
= in_aton(buf
);
2392 log_debug(1 << CXGBI_DBG_ISCSI
,
2393 "hba %s, req. ipv4 %pI4.\n", chba
->ndev
->name
, &addr
);
2394 cxgbi_set_iscsi_ipv4(chba
, addr
);
2397 case ISCSI_HOST_PARAM_HWADDRESS
:
2398 case ISCSI_HOST_PARAM_NETDEV_NAME
:
2401 return iscsi_host_set_param(shost
, param
, buf
, buflen
);
2404 EXPORT_SYMBOL_GPL(cxgbi_set_host_param
);
2406 int cxgbi_get_host_param(struct Scsi_Host
*shost
, enum iscsi_host_param param
,
2409 struct cxgbi_hba
*chba
= iscsi_host_priv(shost
);
2413 shost_printk(KERN_ERR
, shost
, "Could not get host param. "
2414 "netdev for host not set.\n");
2418 log_debug(1 << CXGBI_DBG_ISCSI
,
2419 "shost 0x%p, hba 0x%p,%s, param %d.\n",
2420 shost
, chba
, chba
->ndev
->name
, param
);
2423 case ISCSI_HOST_PARAM_HWADDRESS
:
2424 len
= sysfs_format_mac(buf
, chba
->ndev
->dev_addr
, 6);
2426 case ISCSI_HOST_PARAM_NETDEV_NAME
:
2427 len
= sprintf(buf
, "%s\n", chba
->ndev
->name
);
2429 case ISCSI_HOST_PARAM_IPADDRESS
:
2433 addr
= cxgbi_get_iscsi_ipv4(chba
);
2434 len
= sprintf(buf
, "%pI4", &addr
);
2435 log_debug(1 << CXGBI_DBG_ISCSI
,
2436 "hba %s, ipv4 %pI4.\n", chba
->ndev
->name
, &addr
);
2440 return iscsi_host_get_param(shost
, param
, buf
);
2445 EXPORT_SYMBOL_GPL(cxgbi_get_host_param
);
2447 struct iscsi_endpoint
*cxgbi_ep_connect(struct Scsi_Host
*shost
,
2448 struct sockaddr
*dst_addr
,
2451 struct iscsi_endpoint
*ep
;
2452 struct cxgbi_endpoint
*cep
;
2453 struct cxgbi_hba
*hba
= NULL
;
2454 struct cxgbi_sock
*csk
;
2457 log_debug(1 << CXGBI_DBG_ISCSI
| 1 << CXGBI_DBG_SOCK
,
2458 "shost 0x%p, non_blocking %d, dst_addr 0x%p.\n",
2459 shost
, non_blocking
, dst_addr
);
2462 hba
= iscsi_host_priv(shost
);
2464 pr_info("shost 0x%p, priv NULL.\n", shost
);
2469 csk
= cxgbi_check_route(dst_addr
);
2471 return (struct iscsi_endpoint
*)csk
;
2472 cxgbi_sock_get(csk
);
2475 hba
= csk
->cdev
->hbas
[csk
->port_id
];
2476 else if (hba
!= csk
->cdev
->hbas
[csk
->port_id
]) {
2477 pr_info("Could not connect through requested host %u"
2478 "hba 0x%p != 0x%p (%u).\n",
2479 shost
->host_no
, hba
,
2480 csk
->cdev
->hbas
[csk
->port_id
], csk
->port_id
);
2485 err
= sock_get_port(csk
);
2489 cxgbi_sock_set_state(csk
, CTP_CONNECTING
);
2490 err
= csk
->cdev
->csk_init_act_open(csk
);
2494 if (cxgbi_sock_is_closing(csk
)) {
2496 pr_info("csk 0x%p is closing.\n", csk
);
2500 ep
= iscsi_create_endpoint(sizeof(*cep
));
2503 pr_info("iscsi alloc ep, OOM.\n");
2511 log_debug(1 << CXGBI_DBG_ISCSI
| 1 << CXGBI_DBG_SOCK
,
2512 "ep 0x%p, cep 0x%p, csk 0x%p, hba 0x%p,%s.\n",
2513 ep
, cep
, csk
, hba
, hba
->ndev
->name
);
2517 cxgbi_sock_put(csk
);
2518 cxgbi_sock_closed(csk
);
2520 return ERR_PTR(err
);
2522 EXPORT_SYMBOL_GPL(cxgbi_ep_connect
);
2524 int cxgbi_ep_poll(struct iscsi_endpoint
*ep
, int timeout_ms
)
2526 struct cxgbi_endpoint
*cep
= ep
->dd_data
;
2527 struct cxgbi_sock
*csk
= cep
->csk
;
2529 if (!cxgbi_sock_is_established(csk
))
2533 EXPORT_SYMBOL_GPL(cxgbi_ep_poll
);
2535 void cxgbi_ep_disconnect(struct iscsi_endpoint
*ep
)
2537 struct cxgbi_endpoint
*cep
= ep
->dd_data
;
2538 struct cxgbi_conn
*cconn
= cep
->cconn
;
2539 struct cxgbi_sock
*csk
= cep
->csk
;
2541 log_debug(1 << CXGBI_DBG_ISCSI
| 1 << CXGBI_DBG_SOCK
,
2542 "ep 0x%p, cep 0x%p, cconn 0x%p, csk 0x%p,%u,0x%lx.\n",
2543 ep
, cep
, cconn
, csk
, csk
->state
, csk
->flags
);
2545 if (cconn
&& cconn
->iconn
) {
2546 iscsi_suspend_tx(cconn
->iconn
);
2547 write_lock_bh(&csk
->callback_lock
);
2548 cep
->csk
->user_data
= NULL
;
2550 write_unlock_bh(&csk
->callback_lock
);
2552 iscsi_destroy_endpoint(ep
);
2554 if (likely(csk
->state
>= CTP_ESTABLISHED
))
2555 need_active_close(csk
);
2557 cxgbi_sock_closed(csk
);
2559 cxgbi_sock_put(csk
);
2561 EXPORT_SYMBOL_GPL(cxgbi_ep_disconnect
);
2563 int cxgbi_iscsi_init(struct iscsi_transport
*itp
,
2564 struct scsi_transport_template
**stt
)
2566 *stt
= iscsi_register_transport(itp
);
2568 pr_err("unable to register %s transport 0x%p.\n",
2572 log_debug(1 << CXGBI_DBG_ISCSI
,
2573 "%s, registered iscsi transport 0x%p.\n",
2577 EXPORT_SYMBOL_GPL(cxgbi_iscsi_init
);
2579 void cxgbi_iscsi_cleanup(struct iscsi_transport
*itp
,
2580 struct scsi_transport_template
**stt
)
2583 log_debug(1 << CXGBI_DBG_ISCSI
,
2584 "de-register transport 0x%p, %s, stt 0x%p.\n",
2585 itp
, itp
->name
, *stt
);
2587 iscsi_unregister_transport(itp
);
2590 EXPORT_SYMBOL_GPL(cxgbi_iscsi_cleanup
);
2592 static int __init
libcxgbi_init_module(void)
2594 sw_tag_idx_bits
= (__ilog2_u32(ISCSI_ITT_MASK
)) + 1;
2595 sw_tag_age_bits
= (__ilog2_u32(ISCSI_AGE_MASK
)) + 1;
2597 pr_info("tag itt 0x%x, %u bits, age 0x%x, %u bits.\n",
2598 ISCSI_ITT_MASK
, sw_tag_idx_bits
,
2599 ISCSI_AGE_MASK
, sw_tag_age_bits
);
2601 ddp_setup_host_page_size();
2605 static void __exit
libcxgbi_exit_module(void)
2607 cxgbi_device_unregister_all(0xFF);
2611 module_init(libcxgbi_init_module
);
2612 module_exit(libcxgbi_exit_module
);