2 * libcxgbi.c: Chelsio common library for T3/T4 iSCSI driver.
4 * Copyright (c) 2010-2015 Chelsio Communications, Inc.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation.
10 * Written by: Karen Xie (kxie@chelsio.com)
11 * Written by: Rakesh Ranjan (rranjan@chelsio.com)
14 #define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
16 #include <linux/skbuff.h>
17 #include <linux/crypto.h>
18 #include <linux/scatterlist.h>
19 #include <linux/pci.h>
20 #include <scsi/scsi.h>
21 #include <scsi/scsi_cmnd.h>
22 #include <scsi/scsi_host.h>
23 #include <linux/if_vlan.h>
24 #include <linux/inet.h>
26 #include <net/route.h>
28 #include <net/ip6_route.h>
29 #include <net/addrconf.h>
31 #include <linux/inetdevice.h> /* ip_dev_find */
32 #include <linux/module.h>
35 static unsigned int dbg_level
;
39 #define DRV_MODULE_NAME "libcxgbi"
40 #define DRV_MODULE_DESC "Chelsio iSCSI driver library"
41 #define DRV_MODULE_VERSION "0.9.1-ko"
42 #define DRV_MODULE_RELDATE "Apr. 2015"
44 static char version
[] =
45 DRV_MODULE_DESC
" " DRV_MODULE_NAME
46 " v" DRV_MODULE_VERSION
" (" DRV_MODULE_RELDATE
")\n";
48 MODULE_AUTHOR("Chelsio Communications, Inc.");
49 MODULE_DESCRIPTION(DRV_MODULE_DESC
);
50 MODULE_VERSION(DRV_MODULE_VERSION
);
51 MODULE_LICENSE("GPL");
53 module_param(dbg_level
, uint
, 0644);
54 MODULE_PARM_DESC(dbg_level
, "libiscsi debug level (default=0)");
58 * cxgbi device management
59 * maintains a list of the cxgbi devices
61 static LIST_HEAD(cdev_list
);
62 static DEFINE_MUTEX(cdev_mutex
);
64 static LIST_HEAD(cdev_rcu_list
);
65 static DEFINE_SPINLOCK(cdev_rcu_lock
);
67 static inline void cxgbi_decode_sw_tag(u32 sw_tag
, int *idx
, int *age
)
70 *age
= sw_tag
& 0x7FFF;
72 *idx
= (sw_tag
>> 16) & 0x7FFF;
75 int cxgbi_device_portmap_create(struct cxgbi_device
*cdev
, unsigned int base
,
76 unsigned int max_conn
)
78 struct cxgbi_ports_map
*pmap
= &cdev
->pmap
;
80 pmap
->port_csk
= cxgbi_alloc_big_mem(max_conn
*
81 sizeof(struct cxgbi_sock
*),
83 if (!pmap
->port_csk
) {
84 pr_warn("cdev 0x%p, portmap OOM %u.\n", cdev
, max_conn
);
88 pmap
->max_connect
= max_conn
;
89 pmap
->sport_base
= base
;
90 spin_lock_init(&pmap
->lock
);
93 EXPORT_SYMBOL_GPL(cxgbi_device_portmap_create
);
95 void cxgbi_device_portmap_cleanup(struct cxgbi_device
*cdev
)
97 struct cxgbi_ports_map
*pmap
= &cdev
->pmap
;
98 struct cxgbi_sock
*csk
;
101 for (i
= 0; i
< pmap
->max_connect
; i
++) {
102 if (pmap
->port_csk
[i
]) {
103 csk
= pmap
->port_csk
[i
];
104 pmap
->port_csk
[i
] = NULL
;
105 log_debug(1 << CXGBI_DBG_SOCK
,
106 "csk 0x%p, cdev 0x%p, offload down.\n",
108 spin_lock_bh(&csk
->lock
);
109 cxgbi_sock_set_flag(csk
, CTPF_OFFLOAD_DOWN
);
110 cxgbi_sock_closed(csk
);
111 spin_unlock_bh(&csk
->lock
);
116 EXPORT_SYMBOL_GPL(cxgbi_device_portmap_cleanup
);
118 static inline void cxgbi_device_destroy(struct cxgbi_device
*cdev
)
120 log_debug(1 << CXGBI_DBG_DEV
,
121 "cdev 0x%p, p# %u.\n", cdev
, cdev
->nports
);
122 cxgbi_hbas_remove(cdev
);
123 cxgbi_device_portmap_cleanup(cdev
);
125 cxgbi_ppm_release(cdev
->cdev2ppm(cdev
));
126 if (cdev
->pmap
.max_connect
)
127 cxgbi_free_big_mem(cdev
->pmap
.port_csk
);
131 struct cxgbi_device
*cxgbi_device_register(unsigned int extra
,
134 struct cxgbi_device
*cdev
;
136 cdev
= kzalloc(sizeof(*cdev
) + extra
+ nports
*
137 (sizeof(struct cxgbi_hba
*) +
138 sizeof(struct net_device
*)),
141 pr_warn("nport %d, OOM.\n", nports
);
144 cdev
->ports
= (struct net_device
**)(cdev
+ 1);
145 cdev
->hbas
= (struct cxgbi_hba
**)(((char*)cdev
->ports
) + nports
*
146 sizeof(struct net_device
*));
148 cdev
->dd_data
= ((char *)cdev
->hbas
) +
149 nports
* sizeof(struct cxgbi_hba
*);
150 spin_lock_init(&cdev
->pmap
.lock
);
152 mutex_lock(&cdev_mutex
);
153 list_add_tail(&cdev
->list_head
, &cdev_list
);
154 mutex_unlock(&cdev_mutex
);
156 spin_lock(&cdev_rcu_lock
);
157 list_add_tail_rcu(&cdev
->rcu_node
, &cdev_rcu_list
);
158 spin_unlock(&cdev_rcu_lock
);
160 log_debug(1 << CXGBI_DBG_DEV
,
161 "cdev 0x%p, p# %u.\n", cdev
, nports
);
164 EXPORT_SYMBOL_GPL(cxgbi_device_register
);
166 void cxgbi_device_unregister(struct cxgbi_device
*cdev
)
168 log_debug(1 << CXGBI_DBG_DEV
,
169 "cdev 0x%p, p# %u,%s.\n",
170 cdev
, cdev
->nports
, cdev
->nports
? cdev
->ports
[0]->name
: "");
172 mutex_lock(&cdev_mutex
);
173 list_del(&cdev
->list_head
);
174 mutex_unlock(&cdev_mutex
);
176 spin_lock(&cdev_rcu_lock
);
177 list_del_rcu(&cdev
->rcu_node
);
178 spin_unlock(&cdev_rcu_lock
);
181 cxgbi_device_destroy(cdev
);
183 EXPORT_SYMBOL_GPL(cxgbi_device_unregister
);
185 void cxgbi_device_unregister_all(unsigned int flag
)
187 struct cxgbi_device
*cdev
, *tmp
;
189 mutex_lock(&cdev_mutex
);
190 list_for_each_entry_safe(cdev
, tmp
, &cdev_list
, list_head
) {
191 if ((cdev
->flags
& flag
) == flag
) {
192 mutex_unlock(&cdev_mutex
);
193 cxgbi_device_unregister(cdev
);
194 mutex_lock(&cdev_mutex
);
197 mutex_unlock(&cdev_mutex
);
199 EXPORT_SYMBOL_GPL(cxgbi_device_unregister_all
);
201 struct cxgbi_device
*cxgbi_device_find_by_lldev(void *lldev
)
203 struct cxgbi_device
*cdev
, *tmp
;
205 mutex_lock(&cdev_mutex
);
206 list_for_each_entry_safe(cdev
, tmp
, &cdev_list
, list_head
) {
207 if (cdev
->lldev
== lldev
) {
208 mutex_unlock(&cdev_mutex
);
212 mutex_unlock(&cdev_mutex
);
214 log_debug(1 << CXGBI_DBG_DEV
,
215 "lldev 0x%p, NO match found.\n", lldev
);
218 EXPORT_SYMBOL_GPL(cxgbi_device_find_by_lldev
);
220 struct cxgbi_device
*cxgbi_device_find_by_netdev(struct net_device
*ndev
,
223 struct net_device
*vdev
= NULL
;
224 struct cxgbi_device
*cdev
, *tmp
;
227 if (is_vlan_dev(ndev
)) {
229 ndev
= vlan_dev_real_dev(ndev
);
230 log_debug(1 << CXGBI_DBG_DEV
,
231 "vlan dev %s -> %s.\n", vdev
->name
, ndev
->name
);
234 mutex_lock(&cdev_mutex
);
235 list_for_each_entry_safe(cdev
, tmp
, &cdev_list
, list_head
) {
236 for (i
= 0; i
< cdev
->nports
; i
++) {
237 if (ndev
== cdev
->ports
[i
]) {
238 cdev
->hbas
[i
]->vdev
= vdev
;
239 mutex_unlock(&cdev_mutex
);
246 mutex_unlock(&cdev_mutex
);
247 log_debug(1 << CXGBI_DBG_DEV
,
248 "ndev 0x%p, %s, NO match found.\n", ndev
, ndev
->name
);
251 EXPORT_SYMBOL_GPL(cxgbi_device_find_by_netdev
);
253 struct cxgbi_device
*cxgbi_device_find_by_netdev_rcu(struct net_device
*ndev
,
256 struct net_device
*vdev
= NULL
;
257 struct cxgbi_device
*cdev
;
260 if (is_vlan_dev(ndev
)) {
262 ndev
= vlan_dev_real_dev(ndev
);
263 pr_info("vlan dev %s -> %s.\n", vdev
->name
, ndev
->name
);
267 list_for_each_entry_rcu(cdev
, &cdev_rcu_list
, rcu_node
) {
268 for (i
= 0; i
< cdev
->nports
; i
++) {
269 if (ndev
== cdev
->ports
[i
]) {
270 cdev
->hbas
[i
]->vdev
= vdev
;
280 log_debug(1 << CXGBI_DBG_DEV
,
281 "ndev 0x%p, %s, NO match found.\n", ndev
, ndev
->name
);
284 EXPORT_SYMBOL_GPL(cxgbi_device_find_by_netdev_rcu
);
286 #if IS_ENABLED(CONFIG_IPV6)
287 static struct cxgbi_device
*cxgbi_device_find_by_mac(struct net_device
*ndev
,
290 struct net_device
*vdev
= NULL
;
291 struct cxgbi_device
*cdev
, *tmp
;
294 if (is_vlan_dev(ndev
)) {
296 ndev
= vlan_dev_real_dev(ndev
);
297 pr_info("vlan dev %s -> %s.\n", vdev
->name
, ndev
->name
);
300 mutex_lock(&cdev_mutex
);
301 list_for_each_entry_safe(cdev
, tmp
, &cdev_list
, list_head
) {
302 for (i
= 0; i
< cdev
->nports
; i
++) {
303 if (!memcmp(ndev
->dev_addr
, cdev
->ports
[i
]->dev_addr
,
305 cdev
->hbas
[i
]->vdev
= vdev
;
306 mutex_unlock(&cdev_mutex
);
313 mutex_unlock(&cdev_mutex
);
314 log_debug(1 << CXGBI_DBG_DEV
,
315 "ndev 0x%p, %s, NO match mac found.\n",
321 void cxgbi_hbas_remove(struct cxgbi_device
*cdev
)
324 struct cxgbi_hba
*chba
;
326 log_debug(1 << CXGBI_DBG_DEV
,
327 "cdev 0x%p, p#%u.\n", cdev
, cdev
->nports
);
329 for (i
= 0; i
< cdev
->nports
; i
++) {
330 chba
= cdev
->hbas
[i
];
332 cdev
->hbas
[i
] = NULL
;
333 iscsi_host_remove(chba
->shost
);
334 pci_dev_put(cdev
->pdev
);
335 iscsi_host_free(chba
->shost
);
339 EXPORT_SYMBOL_GPL(cxgbi_hbas_remove
);
341 int cxgbi_hbas_add(struct cxgbi_device
*cdev
, u64 max_lun
,
342 unsigned int max_id
, struct scsi_host_template
*sht
,
343 struct scsi_transport_template
*stt
)
345 struct cxgbi_hba
*chba
;
346 struct Scsi_Host
*shost
;
349 log_debug(1 << CXGBI_DBG_DEV
, "cdev 0x%p, p#%u.\n", cdev
, cdev
->nports
);
351 for (i
= 0; i
< cdev
->nports
; i
++) {
352 shost
= iscsi_host_alloc(sht
, sizeof(*chba
), 1);
354 pr_info("0x%p, p%d, %s, host alloc failed.\n",
355 cdev
, i
, cdev
->ports
[i
]->name
);
360 shost
->transportt
= stt
;
361 shost
->max_lun
= max_lun
;
362 shost
->max_id
= max_id
;
363 shost
->max_channel
= 0;
364 shost
->max_cmd_len
= 16;
366 chba
= iscsi_host_priv(shost
);
368 chba
->ndev
= cdev
->ports
[i
];
371 log_debug(1 << CXGBI_DBG_DEV
,
372 "cdev 0x%p, p#%d %s: chba 0x%p.\n",
373 cdev
, i
, cdev
->ports
[i
]->name
, chba
);
375 pci_dev_get(cdev
->pdev
);
376 err
= iscsi_host_add(shost
, &cdev
->pdev
->dev
);
378 pr_info("cdev 0x%p, p#%d %s, host add failed.\n",
379 cdev
, i
, cdev
->ports
[i
]->name
);
380 pci_dev_put(cdev
->pdev
);
381 scsi_host_put(shost
);
385 cdev
->hbas
[i
] = chba
;
391 cxgbi_hbas_remove(cdev
);
394 EXPORT_SYMBOL_GPL(cxgbi_hbas_add
);
399 * - source port management
400 * To find a free source port in the port allocation map we use a very simple
401 * rotor scheme to look for the next free port.
403 * If a source port has been specified make sure that it doesn't collide with
404 * our normal source port allocation map. If it's outside the range of our
405 * allocation/deallocation scheme just let them use it.
407 * If the source port is outside our allocation range, the caller is
408 * responsible for keeping track of their port usage.
411 static struct cxgbi_sock
*find_sock_on_port(struct cxgbi_device
*cdev
,
412 unsigned char port_id
)
414 struct cxgbi_ports_map
*pmap
= &cdev
->pmap
;
418 if (!pmap
->max_connect
|| !pmap
->used
)
421 spin_lock_bh(&pmap
->lock
);
423 for (i
= 0; used
&& i
< pmap
->max_connect
; i
++) {
424 struct cxgbi_sock
*csk
= pmap
->port_csk
[i
];
427 if (csk
->port_id
== port_id
) {
428 spin_unlock_bh(&pmap
->lock
);
434 spin_unlock_bh(&pmap
->lock
);
439 static int sock_get_port(struct cxgbi_sock
*csk
)
441 struct cxgbi_device
*cdev
= csk
->cdev
;
442 struct cxgbi_ports_map
*pmap
= &cdev
->pmap
;
447 if (!pmap
->max_connect
) {
448 pr_err("cdev 0x%p, p#%u %s, NO port map.\n",
449 cdev
, csk
->port_id
, cdev
->ports
[csk
->port_id
]->name
);
450 return -EADDRNOTAVAIL
;
453 if (csk
->csk_family
== AF_INET
)
454 port
= &csk
->saddr
.sin_port
;
456 port
= &csk
->saddr6
.sin6_port
;
459 pr_err("source port NON-ZERO %u.\n",
464 spin_lock_bh(&pmap
->lock
);
465 if (pmap
->used
>= pmap
->max_connect
) {
466 spin_unlock_bh(&pmap
->lock
);
467 pr_info("cdev 0x%p, p#%u %s, ALL ports used.\n",
468 cdev
, csk
->port_id
, cdev
->ports
[csk
->port_id
]->name
);
469 return -EADDRNOTAVAIL
;
472 start
= idx
= pmap
->next
;
474 if (++idx
>= pmap
->max_connect
)
476 if (!pmap
->port_csk
[idx
]) {
478 *port
= htons(pmap
->sport_base
+ idx
);
480 pmap
->port_csk
[idx
] = csk
;
481 spin_unlock_bh(&pmap
->lock
);
483 log_debug(1 << CXGBI_DBG_SOCK
,
484 "cdev 0x%p, p#%u %s, p %u, %u.\n",
486 cdev
->ports
[csk
->port_id
]->name
,
487 pmap
->sport_base
+ idx
, pmap
->next
);
490 } while (idx
!= start
);
491 spin_unlock_bh(&pmap
->lock
);
493 /* should not happen */
494 pr_warn("cdev 0x%p, p#%u %s, next %u?\n",
495 cdev
, csk
->port_id
, cdev
->ports
[csk
->port_id
]->name
,
497 return -EADDRNOTAVAIL
;
500 static void sock_put_port(struct cxgbi_sock
*csk
)
502 struct cxgbi_device
*cdev
= csk
->cdev
;
503 struct cxgbi_ports_map
*pmap
= &cdev
->pmap
;
506 if (csk
->csk_family
== AF_INET
)
507 port
= &csk
->saddr
.sin_port
;
509 port
= &csk
->saddr6
.sin6_port
;
512 int idx
= ntohs(*port
) - pmap
->sport_base
;
515 if (idx
< 0 || idx
>= pmap
->max_connect
) {
516 pr_err("cdev 0x%p, p#%u %s, port %u OOR.\n",
518 cdev
->ports
[csk
->port_id
]->name
,
523 spin_lock_bh(&pmap
->lock
);
524 pmap
->port_csk
[idx
] = NULL
;
526 spin_unlock_bh(&pmap
->lock
);
528 log_debug(1 << CXGBI_DBG_SOCK
,
529 "cdev 0x%p, p#%u %s, release %u.\n",
530 cdev
, csk
->port_id
, cdev
->ports
[csk
->port_id
]->name
,
531 pmap
->sport_base
+ idx
);
538 * iscsi tcp connection
540 void cxgbi_sock_free_cpl_skbs(struct cxgbi_sock
*csk
)
542 if (csk
->cpl_close
) {
543 kfree_skb(csk
->cpl_close
);
544 csk
->cpl_close
= NULL
;
546 if (csk
->cpl_abort_req
) {
547 kfree_skb(csk
->cpl_abort_req
);
548 csk
->cpl_abort_req
= NULL
;
550 if (csk
->cpl_abort_rpl
) {
551 kfree_skb(csk
->cpl_abort_rpl
);
552 csk
->cpl_abort_rpl
= NULL
;
555 EXPORT_SYMBOL_GPL(cxgbi_sock_free_cpl_skbs
);
557 static struct cxgbi_sock
*cxgbi_sock_create(struct cxgbi_device
*cdev
)
559 struct cxgbi_sock
*csk
= kzalloc(sizeof(*csk
), GFP_NOIO
);
562 pr_info("alloc csk %zu failed.\n", sizeof(*csk
));
566 if (cdev
->csk_alloc_cpls(csk
) < 0) {
567 pr_info("csk 0x%p, alloc cpls failed.\n", csk
);
572 spin_lock_init(&csk
->lock
);
573 kref_init(&csk
->refcnt
);
574 skb_queue_head_init(&csk
->receive_queue
);
575 skb_queue_head_init(&csk
->write_queue
);
576 timer_setup(&csk
->retry_timer
, NULL
, 0);
577 init_completion(&csk
->cmpl
);
578 rwlock_init(&csk
->callback_lock
);
581 cxgbi_sock_set_state(csk
, CTP_CLOSED
);
583 log_debug(1 << CXGBI_DBG_SOCK
, "cdev 0x%p, new csk 0x%p.\n", cdev
, csk
);
588 static struct rtable
*find_route_ipv4(struct flowi4
*fl4
,
589 __be32 saddr
, __be32 daddr
,
590 __be16 sport
, __be16 dport
, u8 tos
,
595 rt
= ip_route_output_ports(&init_net
, fl4
, NULL
, daddr
, saddr
,
596 dport
, sport
, IPPROTO_TCP
, tos
, ifindex
);
603 static struct cxgbi_sock
*
604 cxgbi_check_route(struct sockaddr
*dst_addr
, int ifindex
)
606 struct sockaddr_in
*daddr
= (struct sockaddr_in
*)dst_addr
;
607 struct dst_entry
*dst
;
608 struct net_device
*ndev
;
609 struct cxgbi_device
*cdev
;
610 struct rtable
*rt
= NULL
;
613 struct cxgbi_sock
*csk
= NULL
;
614 unsigned int mtu
= 0;
618 rt
= find_route_ipv4(&fl4
, 0, daddr
->sin_addr
.s_addr
, 0,
619 daddr
->sin_port
, 0, ifindex
);
621 pr_info("no route to ipv4 0x%x, port %u.\n",
622 be32_to_cpu(daddr
->sin_addr
.s_addr
),
623 be16_to_cpu(daddr
->sin_port
));
628 n
= dst_neigh_lookup(dst
, &daddr
->sin_addr
.s_addr
);
635 if (rt
->rt_flags
& (RTCF_MULTICAST
| RTCF_BROADCAST
)) {
636 pr_info("multi-cast route %pI4, port %u, dev %s.\n",
637 &daddr
->sin_addr
.s_addr
, ntohs(daddr
->sin_port
),
643 if (ndev
->flags
& IFF_LOOPBACK
) {
644 ndev
= ip_dev_find(&init_net
, daddr
->sin_addr
.s_addr
);
650 pr_info("rt dev %s, loopback -> %s, mtu %u.\n",
651 n
->dev
->name
, ndev
->name
, mtu
);
654 if (!(ndev
->flags
& IFF_UP
) || !netif_carrier_ok(ndev
)) {
655 pr_info("%s interface not up.\n", ndev
->name
);
660 cdev
= cxgbi_device_find_by_netdev(ndev
, &port
);
662 pr_info("dst %pI4, %s, NOT cxgbi device.\n",
663 &daddr
->sin_addr
.s_addr
, ndev
->name
);
667 log_debug(1 << CXGBI_DBG_SOCK
,
668 "route to %pI4 :%u, ndev p#%d,%s, cdev 0x%p.\n",
669 &daddr
->sin_addr
.s_addr
, ntohs(daddr
->sin_port
),
670 port
, ndev
->name
, cdev
);
672 csk
= cxgbi_sock_create(cdev
);
682 csk
->csk_family
= AF_INET
;
683 csk
->daddr
.sin_addr
.s_addr
= daddr
->sin_addr
.s_addr
;
684 csk
->daddr
.sin_port
= daddr
->sin_port
;
685 csk
->daddr
.sin_family
= daddr
->sin_family
;
686 csk
->saddr
.sin_family
= daddr
->sin_family
;
687 csk
->saddr
.sin_addr
.s_addr
= fl4
.saddr
;
701 #if IS_ENABLED(CONFIG_IPV6)
702 static struct rt6_info
*find_route_ipv6(const struct in6_addr
*saddr
,
703 const struct in6_addr
*daddr
,
708 memset(&fl
, 0, sizeof(fl
));
709 fl
.flowi6_oif
= ifindex
;
711 memcpy(&fl
.saddr
, saddr
, sizeof(struct in6_addr
));
713 memcpy(&fl
.daddr
, daddr
, sizeof(struct in6_addr
));
714 return (struct rt6_info
*)ip6_route_output(&init_net
, NULL
, &fl
);
717 static struct cxgbi_sock
*
718 cxgbi_check_route6(struct sockaddr
*dst_addr
, int ifindex
)
720 struct sockaddr_in6
*daddr6
= (struct sockaddr_in6
*)dst_addr
;
721 struct dst_entry
*dst
;
722 struct net_device
*ndev
;
723 struct cxgbi_device
*cdev
;
724 struct rt6_info
*rt
= NULL
;
726 struct in6_addr pref_saddr
;
727 struct cxgbi_sock
*csk
= NULL
;
728 unsigned int mtu
= 0;
732 rt
= find_route_ipv6(NULL
, &daddr6
->sin6_addr
, ifindex
);
735 pr_info("no route to ipv6 %pI6 port %u\n",
736 daddr6
->sin6_addr
.s6_addr
,
737 be16_to_cpu(daddr6
->sin6_port
));
744 n
= dst_neigh_lookup(dst
, &daddr6
->sin6_addr
);
747 pr_info("%pI6, port %u, dst no neighbour.\n",
748 daddr6
->sin6_addr
.s6_addr
,
749 be16_to_cpu(daddr6
->sin6_port
));
755 if (!(ndev
->flags
& IFF_UP
) || !netif_carrier_ok(ndev
)) {
756 pr_info("%s interface not up.\n", ndev
->name
);
761 if (ipv6_addr_is_multicast(&daddr6
->sin6_addr
)) {
762 pr_info("multi-cast route %pI6 port %u, dev %s.\n",
763 daddr6
->sin6_addr
.s6_addr
,
764 ntohs(daddr6
->sin6_port
), ndev
->name
);
769 cdev
= cxgbi_device_find_by_netdev(ndev
, &port
);
771 cdev
= cxgbi_device_find_by_mac(ndev
, &port
);
773 pr_info("dst %pI6 %s, NOT cxgbi device.\n",
774 daddr6
->sin6_addr
.s6_addr
, ndev
->name
);
778 log_debug(1 << CXGBI_DBG_SOCK
,
779 "route to %pI6 :%u, ndev p#%d,%s, cdev 0x%p.\n",
780 daddr6
->sin6_addr
.s6_addr
, ntohs(daddr6
->sin6_port
), port
,
783 csk
= cxgbi_sock_create(cdev
);
793 if (ipv6_addr_any(&rt
->rt6i_prefsrc
.addr
)) {
794 struct inet6_dev
*idev
= ip6_dst_idev((struct dst_entry
*)rt
);
796 err
= ipv6_dev_get_saddr(&init_net
, idev
? idev
->dev
: NULL
,
797 &daddr6
->sin6_addr
, 0, &pref_saddr
);
799 pr_info("failed to get source address to reach %pI6\n",
804 pref_saddr
= rt
->rt6i_prefsrc
.addr
;
807 csk
->csk_family
= AF_INET6
;
808 csk
->daddr6
.sin6_addr
= daddr6
->sin6_addr
;
809 csk
->daddr6
.sin6_port
= daddr6
->sin6_port
;
810 csk
->daddr6
.sin6_family
= daddr6
->sin6_family
;
811 csk
->saddr6
.sin6_family
= daddr6
->sin6_family
;
812 csk
->saddr6
.sin6_addr
= pref_saddr
;
823 cxgbi_sock_closed(csk
);
827 #endif /* IS_ENABLED(CONFIG_IPV6) */
829 void cxgbi_sock_established(struct cxgbi_sock
*csk
, unsigned int snd_isn
,
832 csk
->write_seq
= csk
->snd_nxt
= csk
->snd_una
= snd_isn
;
833 dst_confirm(csk
->dst
);
835 cxgbi_sock_set_state(csk
, CTP_ESTABLISHED
);
837 EXPORT_SYMBOL_GPL(cxgbi_sock_established
);
839 static void cxgbi_inform_iscsi_conn_closing(struct cxgbi_sock
*csk
)
841 log_debug(1 << CXGBI_DBG_SOCK
,
842 "csk 0x%p, state %u, flags 0x%lx, conn 0x%p.\n",
843 csk
, csk
->state
, csk
->flags
, csk
->user_data
);
845 if (csk
->state
!= CTP_ESTABLISHED
) {
846 read_lock_bh(&csk
->callback_lock
);
848 iscsi_conn_failure(csk
->user_data
,
849 ISCSI_ERR_TCP_CONN_CLOSE
);
850 read_unlock_bh(&csk
->callback_lock
);
854 void cxgbi_sock_closed(struct cxgbi_sock
*csk
)
856 log_debug(1 << CXGBI_DBG_SOCK
, "csk 0x%p,%u,0x%lx,%u.\n",
857 csk
, (csk
)->state
, (csk
)->flags
, (csk
)->tid
);
858 cxgbi_sock_set_flag(csk
, CTPF_ACTIVE_CLOSE_NEEDED
);
859 if (csk
->state
== CTP_ACTIVE_OPEN
|| csk
->state
== CTP_CLOSED
)
861 if (csk
->saddr
.sin_port
)
864 dst_release(csk
->dst
);
865 csk
->cdev
->csk_release_offload_resources(csk
);
866 cxgbi_sock_set_state(csk
, CTP_CLOSED
);
867 cxgbi_inform_iscsi_conn_closing(csk
);
870 EXPORT_SYMBOL_GPL(cxgbi_sock_closed
);
872 static void need_active_close(struct cxgbi_sock
*csk
)
877 log_debug(1 << CXGBI_DBG_SOCK
, "csk 0x%p,%u,0x%lx,%u.\n",
878 csk
, (csk
)->state
, (csk
)->flags
, (csk
)->tid
);
879 spin_lock_bh(&csk
->lock
);
881 dst_confirm(csk
->dst
);
882 data_lost
= skb_queue_len(&csk
->receive_queue
);
883 __skb_queue_purge(&csk
->receive_queue
);
885 if (csk
->state
== CTP_ACTIVE_OPEN
)
886 cxgbi_sock_set_flag(csk
, CTPF_ACTIVE_CLOSE_NEEDED
);
887 else if (csk
->state
== CTP_ESTABLISHED
) {
889 cxgbi_sock_set_state(csk
, CTP_ACTIVE_CLOSE
);
890 } else if (csk
->state
== CTP_PASSIVE_CLOSE
) {
892 cxgbi_sock_set_state(csk
, CTP_CLOSE_WAIT_2
);
896 if (!cxgbi_sock_flag(csk
, CTPF_LOGOUT_RSP_RCVD
) ||
898 csk
->cdev
->csk_send_abort_req(csk
);
900 csk
->cdev
->csk_send_close_req(csk
);
903 spin_unlock_bh(&csk
->lock
);
906 void cxgbi_sock_fail_act_open(struct cxgbi_sock
*csk
, int errno
)
908 pr_info("csk 0x%p,%u,%lx, %pI4:%u-%pI4:%u, err %d.\n",
909 csk
, csk
->state
, csk
->flags
,
910 &csk
->saddr
.sin_addr
.s_addr
, csk
->saddr
.sin_port
,
911 &csk
->daddr
.sin_addr
.s_addr
, csk
->daddr
.sin_port
,
914 cxgbi_sock_set_state(csk
, CTP_CONNECTING
);
916 cxgbi_sock_closed(csk
);
918 EXPORT_SYMBOL_GPL(cxgbi_sock_fail_act_open
);
920 void cxgbi_sock_act_open_req_arp_failure(void *handle
, struct sk_buff
*skb
)
922 struct cxgbi_sock
*csk
= (struct cxgbi_sock
*)skb
->sk
;
923 struct module
*owner
= csk
->cdev
->owner
;
925 log_debug(1 << CXGBI_DBG_SOCK
, "csk 0x%p,%u,0x%lx,%u.\n",
926 csk
, (csk
)->state
, (csk
)->flags
, (csk
)->tid
);
928 spin_lock_bh(&csk
->lock
);
929 if (csk
->state
== CTP_ACTIVE_OPEN
)
930 cxgbi_sock_fail_act_open(csk
, -EHOSTUNREACH
);
931 spin_unlock_bh(&csk
->lock
);
937 EXPORT_SYMBOL_GPL(cxgbi_sock_act_open_req_arp_failure
);
939 void cxgbi_sock_rcv_abort_rpl(struct cxgbi_sock
*csk
)
942 spin_lock_bh(&csk
->lock
);
944 cxgbi_sock_set_flag(csk
, CTPF_ABORT_RPL_RCVD
);
945 if (cxgbi_sock_flag(csk
, CTPF_ABORT_RPL_PENDING
)) {
946 cxgbi_sock_clear_flag(csk
, CTPF_ABORT_RPL_PENDING
);
947 if (cxgbi_sock_flag(csk
, CTPF_ABORT_REQ_RCVD
))
948 pr_err("csk 0x%p,%u,0x%lx,%u,ABT_RPL_RSS.\n",
949 csk
, csk
->state
, csk
->flags
, csk
->tid
);
950 cxgbi_sock_closed(csk
);
953 spin_unlock_bh(&csk
->lock
);
956 EXPORT_SYMBOL_GPL(cxgbi_sock_rcv_abort_rpl
);
958 void cxgbi_sock_rcv_peer_close(struct cxgbi_sock
*csk
)
960 log_debug(1 << CXGBI_DBG_SOCK
, "csk 0x%p,%u,0x%lx,%u.\n",
961 csk
, (csk
)->state
, (csk
)->flags
, (csk
)->tid
);
963 spin_lock_bh(&csk
->lock
);
965 if (cxgbi_sock_flag(csk
, CTPF_ABORT_RPL_PENDING
))
968 switch (csk
->state
) {
969 case CTP_ESTABLISHED
:
970 cxgbi_sock_set_state(csk
, CTP_PASSIVE_CLOSE
);
972 case CTP_ACTIVE_CLOSE
:
973 cxgbi_sock_set_state(csk
, CTP_CLOSE_WAIT_2
);
975 case CTP_CLOSE_WAIT_1
:
976 cxgbi_sock_closed(csk
);
981 pr_err("csk 0x%p,%u,0x%lx,%u, bad state.\n",
982 csk
, csk
->state
, csk
->flags
, csk
->tid
);
984 cxgbi_inform_iscsi_conn_closing(csk
);
986 spin_unlock_bh(&csk
->lock
);
989 EXPORT_SYMBOL_GPL(cxgbi_sock_rcv_peer_close
);
991 void cxgbi_sock_rcv_close_conn_rpl(struct cxgbi_sock
*csk
, u32 snd_nxt
)
993 log_debug(1 << CXGBI_DBG_SOCK
, "csk 0x%p,%u,0x%lx,%u.\n",
994 csk
, (csk
)->state
, (csk
)->flags
, (csk
)->tid
);
996 spin_lock_bh(&csk
->lock
);
998 csk
->snd_una
= snd_nxt
- 1;
999 if (cxgbi_sock_flag(csk
, CTPF_ABORT_RPL_PENDING
))
1002 switch (csk
->state
) {
1003 case CTP_ACTIVE_CLOSE
:
1004 cxgbi_sock_set_state(csk
, CTP_CLOSE_WAIT_1
);
1006 case CTP_CLOSE_WAIT_1
:
1007 case CTP_CLOSE_WAIT_2
:
1008 cxgbi_sock_closed(csk
);
1013 pr_err("csk 0x%p,%u,0x%lx,%u, bad state.\n",
1014 csk
, csk
->state
, csk
->flags
, csk
->tid
);
1017 spin_unlock_bh(&csk
->lock
);
1018 cxgbi_sock_put(csk
);
1020 EXPORT_SYMBOL_GPL(cxgbi_sock_rcv_close_conn_rpl
);
1022 void cxgbi_sock_rcv_wr_ack(struct cxgbi_sock
*csk
, unsigned int credits
,
1023 unsigned int snd_una
, int seq_chk
)
1025 log_debug(1 << CXGBI_DBG_TOE
| 1 << CXGBI_DBG_SOCK
,
1026 "csk 0x%p,%u,0x%lx,%u, cr %u,%u+%u, snd_una %u,%d.\n",
1027 csk
, csk
->state
, csk
->flags
, csk
->tid
, credits
,
1028 csk
->wr_cred
, csk
->wr_una_cred
, snd_una
, seq_chk
);
1030 spin_lock_bh(&csk
->lock
);
1032 csk
->wr_cred
+= credits
;
1033 if (csk
->wr_una_cred
> csk
->wr_max_cred
- csk
->wr_cred
)
1034 csk
->wr_una_cred
= csk
->wr_max_cred
- csk
->wr_cred
;
1037 struct sk_buff
*p
= cxgbi_sock_peek_wr(csk
);
1040 pr_err("csk 0x%p,%u,0x%lx,%u, cr %u,%u+%u, empty.\n",
1041 csk
, csk
->state
, csk
->flags
, csk
->tid
, credits
,
1042 csk
->wr_cred
, csk
->wr_una_cred
);
1046 if (unlikely(credits
< p
->csum
)) {
1047 pr_warn("csk 0x%p,%u,0x%lx,%u, cr %u,%u+%u, < %u.\n",
1048 csk
, csk
->state
, csk
->flags
, csk
->tid
,
1049 credits
, csk
->wr_cred
, csk
->wr_una_cred
,
1054 cxgbi_sock_dequeue_wr(csk
);
1060 cxgbi_sock_check_wr_invariants(csk
);
1063 if (unlikely(before(snd_una
, csk
->snd_una
))) {
1064 pr_warn("csk 0x%p,%u,0x%lx,%u, snd_una %u/%u.",
1065 csk
, csk
->state
, csk
->flags
, csk
->tid
, snd_una
,
1070 if (csk
->snd_una
!= snd_una
) {
1071 csk
->snd_una
= snd_una
;
1072 dst_confirm(csk
->dst
);
1076 if (skb_queue_len(&csk
->write_queue
)) {
1077 if (csk
->cdev
->csk_push_tx_frames(csk
, 0))
1078 cxgbi_conn_tx_open(csk
);
1080 cxgbi_conn_tx_open(csk
);
1082 spin_unlock_bh(&csk
->lock
);
1084 EXPORT_SYMBOL_GPL(cxgbi_sock_rcv_wr_ack
);
1086 static unsigned int cxgbi_sock_find_best_mtu(struct cxgbi_sock
*csk
,
1091 while (i
< csk
->cdev
->nmtus
- 1 && csk
->cdev
->mtus
[i
+ 1] <= mtu
)
1097 unsigned int cxgbi_sock_select_mss(struct cxgbi_sock
*csk
, unsigned int pmtu
)
1100 struct dst_entry
*dst
= csk
->dst
;
1102 csk
->advmss
= dst_metric_advmss(dst
);
1104 if (csk
->advmss
> pmtu
- 40)
1105 csk
->advmss
= pmtu
- 40;
1106 if (csk
->advmss
< csk
->cdev
->mtus
[0] - 40)
1107 csk
->advmss
= csk
->cdev
->mtus
[0] - 40;
1108 idx
= cxgbi_sock_find_best_mtu(csk
, csk
->advmss
+ 40);
1112 EXPORT_SYMBOL_GPL(cxgbi_sock_select_mss
);
1114 void cxgbi_sock_skb_entail(struct cxgbi_sock
*csk
, struct sk_buff
*skb
)
1116 cxgbi_skcb_tcp_seq(skb
) = csk
->write_seq
;
1117 __skb_queue_tail(&csk
->write_queue
, skb
);
1119 EXPORT_SYMBOL_GPL(cxgbi_sock_skb_entail
);
1121 void cxgbi_sock_purge_wr_queue(struct cxgbi_sock
*csk
)
1123 struct sk_buff
*skb
;
1125 while ((skb
= cxgbi_sock_dequeue_wr(csk
)) != NULL
)
1128 EXPORT_SYMBOL_GPL(cxgbi_sock_purge_wr_queue
);
1130 void cxgbi_sock_check_wr_invariants(const struct cxgbi_sock
*csk
)
1132 int pending
= cxgbi_sock_count_pending_wrs(csk
);
1134 if (unlikely(csk
->wr_cred
+ pending
!= csk
->wr_max_cred
))
1135 pr_err("csk 0x%p, tid %u, credit %u + %u != %u.\n",
1136 csk
, csk
->tid
, csk
->wr_cred
, pending
, csk
->wr_max_cred
);
1138 EXPORT_SYMBOL_GPL(cxgbi_sock_check_wr_invariants
);
1140 static int cxgbi_sock_send_pdus(struct cxgbi_sock
*csk
, struct sk_buff
*skb
)
1142 struct cxgbi_device
*cdev
= csk
->cdev
;
1143 struct sk_buff
*next
;
1144 int err
, copied
= 0;
1146 spin_lock_bh(&csk
->lock
);
1148 if (csk
->state
!= CTP_ESTABLISHED
) {
1149 log_debug(1 << CXGBI_DBG_PDU_TX
,
1150 "csk 0x%p,%u,0x%lx,%u, EAGAIN.\n",
1151 csk
, csk
->state
, csk
->flags
, csk
->tid
);
1157 log_debug(1 << CXGBI_DBG_PDU_TX
,
1158 "csk 0x%p,%u,0x%lx,%u, EPIPE %d.\n",
1159 csk
, csk
->state
, csk
->flags
, csk
->tid
, csk
->err
);
1164 if (csk
->write_seq
- csk
->snd_una
>= csk
->snd_win
) {
1165 log_debug(1 << CXGBI_DBG_PDU_TX
,
1166 "csk 0x%p,%u,0x%lx,%u, FULL %u-%u >= %u.\n",
1167 csk
, csk
->state
, csk
->flags
, csk
->tid
, csk
->write_seq
,
1168 csk
->snd_una
, csk
->snd_win
);
1174 int frags
= skb_shinfo(skb
)->nr_frags
+
1175 (skb
->len
!= skb
->data_len
);
1177 if (unlikely(skb_headroom(skb
) < cdev
->skb_tx_rsvd
)) {
1178 pr_err("csk 0x%p, skb head %u < %u.\n",
1179 csk
, skb_headroom(skb
), cdev
->skb_tx_rsvd
);
1184 if (frags
>= SKB_WR_LIST_SIZE
) {
1185 pr_err("csk 0x%p, frags %d, %u,%u >%u.\n",
1186 csk
, skb_shinfo(skb
)->nr_frags
, skb
->len
,
1187 skb
->data_len
, (uint
)(SKB_WR_LIST_SIZE
));
1194 cxgbi_skcb_set_flag(skb
, SKCBF_TX_NEED_HDR
);
1195 cxgbi_sock_skb_entail(csk
, skb
);
1197 csk
->write_seq
+= skb
->len
+
1198 cxgbi_ulp_extra_len(cxgbi_skcb_ulp_mode(skb
));
1202 if (likely(skb_queue_len(&csk
->write_queue
)))
1203 cdev
->csk_push_tx_frames(csk
, 1);
1205 spin_unlock_bh(&csk
->lock
);
1209 if (copied
== 0 && err
== -EPIPE
)
1210 copied
= csk
->err
? csk
->err
: -EPIPE
;
1217 scmd_get_params(struct scsi_cmnd
*sc
, struct scatterlist
**sgl
,
1218 unsigned int *sgcnt
, unsigned int *dlen
,
1221 struct scsi_data_buffer
*sdb
= prot
? scsi_prot(sc
) : scsi_out(sc
);
1223 *sgl
= sdb
->table
.sgl
;
1224 *sgcnt
= sdb
->table
.nents
;
1225 *dlen
= sdb
->length
;
1226 /* Caution: for protection sdb, sdb->length is invalid */
1229 void cxgbi_ddp_set_one_ppod(struct cxgbi_pagepod
*ppod
,
1230 struct cxgbi_task_tag_info
*ttinfo
,
1231 struct scatterlist
**sg_pp
, unsigned int *sg_off
)
1233 struct scatterlist
*sg
= sg_pp
? *sg_pp
: NULL
;
1234 unsigned int offset
= sg_off
? *sg_off
: 0;
1235 dma_addr_t addr
= 0UL;
1236 unsigned int len
= 0;
1239 memcpy(ppod
, &ttinfo
->hdr
, sizeof(struct cxgbi_pagepod_hdr
));
1242 addr
= sg_dma_address(sg
);
1243 len
= sg_dma_len(sg
);
1246 for (i
= 0; i
< PPOD_PAGES_MAX
; i
++) {
1248 ppod
->addr
[i
] = cpu_to_be64(addr
+ offset
);
1249 offset
+= PAGE_SIZE
;
1250 if (offset
== (len
+ sg
->offset
)) {
1254 addr
= sg_dma_address(sg
);
1255 len
= sg_dma_len(sg
);
1259 ppod
->addr
[i
] = 0ULL;
1264 * the fifth address needs to be repeated in the next ppod, so do
1272 if (offset
== len
) {
1276 addr
= sg_dma_address(sg
);
1277 len
= sg_dma_len(sg
);
1280 ppod
->addr
[i
] = sg
? cpu_to_be64(addr
+ offset
) : 0ULL;
1282 EXPORT_SYMBOL_GPL(cxgbi_ddp_set_one_ppod
);
1285 * APIs interacting with open-iscsi libraries
1288 static unsigned char padding
[4];
1290 void cxgbi_ddp_ppm_setup(void **ppm_pp
, struct cxgbi_device
*cdev
,
1291 struct cxgbi_tag_format
*tformat
, unsigned int ppmax
,
1292 unsigned int llimit
, unsigned int start
,
1293 unsigned int rsvd_factor
)
1295 int err
= cxgbi_ppm_init(ppm_pp
, cdev
->ports
[0], cdev
->pdev
,
1296 cdev
->lldev
, tformat
, ppmax
, llimit
, start
,
1300 struct cxgbi_ppm
*ppm
= (struct cxgbi_ppm
*)(*ppm_pp
);
1302 if (ppm
->ppmax
< 1024 ||
1303 ppm
->tformat
.pgsz_idx_dflt
>= DDP_PGIDX_MAX
)
1304 cdev
->flags
|= CXGBI_FLAG_DDP_OFF
;
1307 cdev
->flags
|= CXGBI_FLAG_DDP_OFF
;
1310 EXPORT_SYMBOL_GPL(cxgbi_ddp_ppm_setup
);
1312 static int cxgbi_ddp_sgl_check(struct scatterlist
*sgl
, int nents
)
1315 int last_sgidx
= nents
- 1;
1316 struct scatterlist
*sg
= sgl
;
1318 for (i
= 0; i
< nents
; i
++, sg
= sg_next(sg
)) {
1319 unsigned int len
= sg
->length
+ sg
->offset
;
1321 if ((sg
->offset
& 0x3) || (i
&& sg
->offset
) ||
1322 ((i
!= last_sgidx
) && len
!= PAGE_SIZE
)) {
1323 log_debug(1 << CXGBI_DBG_DDP
,
1324 "sg %u/%u, %u,%u, not aligned.\n",
1325 i
, nents
, sg
->offset
, sg
->length
);
1334 static int cxgbi_ddp_reserve(struct cxgbi_conn
*cconn
,
1335 struct cxgbi_task_data
*tdata
, u32 sw_tag
,
1336 unsigned int xferlen
)
1338 struct cxgbi_sock
*csk
= cconn
->cep
->csk
;
1339 struct cxgbi_device
*cdev
= csk
->cdev
;
1340 struct cxgbi_ppm
*ppm
= cdev
->cdev2ppm(cdev
);
1341 struct cxgbi_task_tag_info
*ttinfo
= &tdata
->ttinfo
;
1342 struct scatterlist
*sgl
= ttinfo
->sgl
;
1343 unsigned int sgcnt
= ttinfo
->nents
;
1344 unsigned int sg_offset
= sgl
->offset
;
1347 if (cdev
->flags
& CXGBI_FLAG_DDP_OFF
) {
1348 log_debug(1 << CXGBI_DBG_DDP
,
1349 "cdev 0x%p DDP off.\n", cdev
);
1353 if (!ppm
|| xferlen
< DDP_THRESHOLD
|| !sgcnt
||
1354 ppm
->tformat
.pgsz_idx_dflt
>= DDP_PGIDX_MAX
) {
1355 log_debug(1 << CXGBI_DBG_DDP
,
1356 "ppm 0x%p, pgidx %u, xfer %u, sgcnt %u, NO ddp.\n",
1357 ppm
, ppm
? ppm
->tformat
.pgsz_idx_dflt
: DDP_PGIDX_MAX
,
1358 xferlen
, ttinfo
->nents
);
1362 /* make sure the buffer is suitable for ddp */
1363 if (cxgbi_ddp_sgl_check(sgl
, sgcnt
) < 0)
1366 ttinfo
->nr_pages
= (xferlen
+ sgl
->offset
+ (1 << PAGE_SHIFT
) - 1) >>
1370 * the ddp tag will be used for the itt in the outgoing pdu,
1371 * the itt genrated by libiscsi is saved in the ppm and can be
1372 * retrieved via the ddp tag
1374 err
= cxgbi_ppm_ppods_reserve(ppm
, ttinfo
->nr_pages
, 0, &ttinfo
->idx
,
1375 &ttinfo
->tag
, (unsigned long)sw_tag
);
1380 ttinfo
->npods
= err
;
1382 /* setup dma from scsi command sgl */
1384 err
= dma_map_sg(&ppm
->pdev
->dev
, sgl
, sgcnt
, DMA_FROM_DEVICE
);
1385 sgl
->offset
= sg_offset
;
1387 pr_info("%s: 0x%x, xfer %u, sgl %u dma mapping err.\n",
1388 __func__
, sw_tag
, xferlen
, sgcnt
);
1391 if (err
!= ttinfo
->nr_pages
) {
1392 log_debug(1 << CXGBI_DBG_DDP
,
1393 "%s: sw tag 0x%x, xfer %u, sgl %u, dma count %d.\n",
1394 __func__
, sw_tag
, xferlen
, sgcnt
, err
);
1397 ttinfo
->flags
|= CXGBI_PPOD_INFO_FLAG_MAPPED
;
1398 ttinfo
->cid
= csk
->port_id
;
1400 cxgbi_ppm_make_ppod_hdr(ppm
, ttinfo
->tag
, csk
->tid
, sgl
->offset
,
1401 xferlen
, &ttinfo
->hdr
);
1403 if (cdev
->flags
& CXGBI_FLAG_USE_PPOD_OFLDQ
) {
1404 /* write ppod from xmit_pdu (of iscsi_scsi_command pdu) */
1405 ttinfo
->flags
|= CXGBI_PPOD_INFO_FLAG_VALID
;
1407 /* write ppod from control queue now */
1408 err
= cdev
->csk_ddp_set_map(ppm
, csk
, ttinfo
);
1416 cxgbi_ppm_ppod_release(ppm
, ttinfo
->idx
);
1418 if (ttinfo
->flags
& CXGBI_PPOD_INFO_FLAG_MAPPED
) {
1419 ttinfo
->flags
&= ~CXGBI_PPOD_INFO_FLAG_MAPPED
;
1420 dma_unmap_sg(&ppm
->pdev
->dev
, sgl
, sgcnt
, DMA_FROM_DEVICE
);
1425 static void task_release_itt(struct iscsi_task
*task
, itt_t hdr_itt
)
1427 struct scsi_cmnd
*sc
= task
->sc
;
1428 struct iscsi_tcp_conn
*tcp_conn
= task
->conn
->dd_data
;
1429 struct cxgbi_conn
*cconn
= tcp_conn
->dd_data
;
1430 struct cxgbi_device
*cdev
= cconn
->chba
->cdev
;
1431 struct cxgbi_ppm
*ppm
= cdev
->cdev2ppm(cdev
);
1432 u32 tag
= ntohl((__force u32
)hdr_itt
);
1434 log_debug(1 << CXGBI_DBG_DDP
,
1435 "cdev 0x%p, task 0x%p, release tag 0x%x.\n",
1438 (scsi_bidi_cmnd(sc
) || sc
->sc_data_direction
== DMA_FROM_DEVICE
) &&
1439 cxgbi_ppm_is_ddp_tag(ppm
, tag
)) {
1440 struct cxgbi_task_data
*tdata
= iscsi_task_cxgbi_data(task
);
1441 struct cxgbi_task_tag_info
*ttinfo
= &tdata
->ttinfo
;
1443 if (!(cdev
->flags
& CXGBI_FLAG_USE_PPOD_OFLDQ
))
1444 cdev
->csk_ddp_clear_map(cdev
, ppm
, ttinfo
);
1445 cxgbi_ppm_ppod_release(ppm
, ttinfo
->idx
);
1446 dma_unmap_sg(&ppm
->pdev
->dev
, ttinfo
->sgl
, ttinfo
->nents
,
1451 static inline u32
cxgbi_build_sw_tag(u32 idx
, u32 age
)
1453 /* assume idx and age both are < 0x7FFF (32767) */
1454 return (idx
<< 16) | age
;
1457 static int task_reserve_itt(struct iscsi_task
*task
, itt_t
*hdr_itt
)
1459 struct scsi_cmnd
*sc
= task
->sc
;
1460 struct iscsi_conn
*conn
= task
->conn
;
1461 struct iscsi_session
*sess
= conn
->session
;
1462 struct iscsi_tcp_conn
*tcp_conn
= conn
->dd_data
;
1463 struct cxgbi_conn
*cconn
= tcp_conn
->dd_data
;
1464 struct cxgbi_device
*cdev
= cconn
->chba
->cdev
;
1465 struct cxgbi_ppm
*ppm
= cdev
->cdev2ppm(cdev
);
1466 u32 sw_tag
= cxgbi_build_sw_tag(task
->itt
, sess
->age
);
1471 (scsi_bidi_cmnd(sc
) || sc
->sc_data_direction
== DMA_FROM_DEVICE
)
1473 struct cxgbi_task_data
*tdata
= iscsi_task_cxgbi_data(task
);
1474 struct cxgbi_task_tag_info
*ttinfo
= &tdata
->ttinfo
;
1476 scmd_get_params(sc
, &ttinfo
->sgl
, &ttinfo
->nents
,
1478 err
= cxgbi_ddp_reserve(cconn
, tdata
, sw_tag
, tdata
->dlen
);
1482 log_debug(1 << CXGBI_DBG_DDP
,
1483 "csk 0x%p, R task 0x%p, %u,%u, no ddp.\n",
1484 cconn
->cep
->csk
, task
, tdata
->dlen
,
1489 err
= cxgbi_ppm_make_non_ddp_tag(ppm
, sw_tag
, &tag
);
1493 /* the itt need to sent in big-endian order */
1494 *hdr_itt
= (__force itt_t
)htonl(tag
);
1496 log_debug(1 << CXGBI_DBG_DDP
,
1497 "cdev 0x%p, task 0x%p, 0x%x(0x%x,0x%x)->0x%x/0x%x.\n",
1498 cdev
, task
, sw_tag
, task
->itt
, sess
->age
, tag
, *hdr_itt
);
1502 void cxgbi_parse_pdu_itt(struct iscsi_conn
*conn
, itt_t itt
, int *idx
, int *age
)
1504 struct iscsi_tcp_conn
*tcp_conn
= conn
->dd_data
;
1505 struct cxgbi_conn
*cconn
= tcp_conn
->dd_data
;
1506 struct cxgbi_device
*cdev
= cconn
->chba
->cdev
;
1507 struct cxgbi_ppm
*ppm
= cdev
->cdev2ppm(cdev
);
1508 u32 tag
= ntohl((__force u32
)itt
);
1512 if (cxgbi_ppm_is_ddp_tag(ppm
, tag
))
1513 sw_bits
= cxgbi_ppm_get_tag_caller_data(ppm
, tag
);
1515 sw_bits
= cxgbi_ppm_decode_non_ddp_tag(ppm
, tag
);
1520 cxgbi_decode_sw_tag(sw_bits
, idx
, age
);
1521 log_debug(1 << CXGBI_DBG_DDP
,
1522 "cdev 0x%p, tag 0x%x/0x%x, -> 0x%x(0x%x,0x%x).\n",
1523 cdev
, tag
, itt
, sw_bits
, idx
? *idx
: 0xFFFFF,
1526 EXPORT_SYMBOL_GPL(cxgbi_parse_pdu_itt
);
1528 void cxgbi_conn_tx_open(struct cxgbi_sock
*csk
)
1530 struct iscsi_conn
*conn
= csk
->user_data
;
1533 log_debug(1 << CXGBI_DBG_SOCK
,
1534 "csk 0x%p, cid %d.\n", csk
, conn
->id
);
1535 iscsi_conn_queue_work(conn
);
1538 EXPORT_SYMBOL_GPL(cxgbi_conn_tx_open
);
1541 * pdu receive, interact with libiscsi_tcp
1543 static inline int read_pdu_skb(struct iscsi_conn
*conn
,
1544 struct sk_buff
*skb
,
1545 unsigned int offset
,
1551 bytes_read
= iscsi_tcp_recv_skb(conn
, skb
, offset
, offloaded
, &status
);
1553 case ISCSI_TCP_CONN_ERR
:
1554 pr_info("skb 0x%p, off %u, %d, TCP_ERR.\n",
1555 skb
, offset
, offloaded
);
1557 case ISCSI_TCP_SUSPENDED
:
1558 log_debug(1 << CXGBI_DBG_PDU_RX
,
1559 "skb 0x%p, off %u, %d, TCP_SUSPEND, rc %d.\n",
1560 skb
, offset
, offloaded
, bytes_read
);
1561 /* no transfer - just have caller flush queue */
1563 case ISCSI_TCP_SKB_DONE
:
1564 pr_info("skb 0x%p, off %u, %d, TCP_SKB_DONE.\n",
1565 skb
, offset
, offloaded
);
1567 * pdus should always fit in the skb and we should get
1568 * segment done notifcation.
1570 iscsi_conn_printk(KERN_ERR
, conn
, "Invalid pdu or skb.");
1572 case ISCSI_TCP_SEGMENT_DONE
:
1573 log_debug(1 << CXGBI_DBG_PDU_RX
,
1574 "skb 0x%p, off %u, %d, TCP_SEG_DONE, rc %d.\n",
1575 skb
, offset
, offloaded
, bytes_read
);
1578 pr_info("skb 0x%p, off %u, %d, invalid status %d.\n",
1579 skb
, offset
, offloaded
, status
);
1585 skb_read_pdu_bhs(struct cxgbi_sock
*csk
, struct iscsi_conn
*conn
,
1586 struct sk_buff
*skb
)
1588 struct iscsi_tcp_conn
*tcp_conn
= conn
->dd_data
;
1591 log_debug(1 << CXGBI_DBG_PDU_RX
,
1592 "conn 0x%p, skb 0x%p, len %u, flag 0x%lx.\n",
1593 conn
, skb
, skb
->len
, cxgbi_skcb_flags(skb
));
1595 if (!iscsi_tcp_recv_segment_is_hdr(tcp_conn
)) {
1596 pr_info("conn 0x%p, skb 0x%p, not hdr.\n", conn
, skb
);
1597 iscsi_conn_failure(conn
, ISCSI_ERR_PROTO
);
1601 if (conn
->hdrdgst_en
&&
1602 cxgbi_skcb_test_flag(skb
, SKCBF_RX_HCRC_ERR
)) {
1603 pr_info("conn 0x%p, skb 0x%p, hcrc.\n", conn
, skb
);
1604 iscsi_conn_failure(conn
, ISCSI_ERR_HDR_DGST
);
1608 if (cxgbi_skcb_test_flag(skb
, SKCBF_RX_ISCSI_COMPL
) &&
1609 cxgbi_skcb_test_flag(skb
, SKCBF_RX_DATA_DDPD
)) {
1610 /* If completion flag is set and data is directly
1611 * placed in to the host memory then update
1612 * task->exp_datasn to the datasn in completion
1613 * iSCSI hdr as T6 adapter generates completion only
1614 * for the last pdu of a sequence.
1616 itt_t itt
= ((struct iscsi_data
*)skb
->data
)->itt
;
1617 struct iscsi_task
*task
= iscsi_itt_to_ctask(conn
, itt
);
1618 u32 data_sn
= be32_to_cpu(((struct iscsi_data
*)
1619 skb
->data
)->datasn
);
1620 if (task
&& task
->sc
) {
1621 struct iscsi_tcp_task
*tcp_task
= task
->dd_data
;
1623 tcp_task
->exp_datasn
= data_sn
;
1627 err
= read_pdu_skb(conn
, skb
, 0, 0);
1628 if (likely(err
>= 0)) {
1629 struct iscsi_hdr
*hdr
= (struct iscsi_hdr
*)skb
->data
;
1630 u8 opcode
= hdr
->opcode
& ISCSI_OPCODE_MASK
;
1632 if (unlikely(opcode
== ISCSI_OP_LOGOUT_RSP
))
1633 cxgbi_sock_set_flag(csk
, CTPF_LOGOUT_RSP_RCVD
);
1639 static int skb_read_pdu_data(struct iscsi_conn
*conn
, struct sk_buff
*lskb
,
1640 struct sk_buff
*skb
, unsigned int offset
)
1642 struct iscsi_tcp_conn
*tcp_conn
= conn
->dd_data
;
1644 int opcode
= tcp_conn
->in
.hdr
->opcode
& ISCSI_OPCODE_MASK
;
1646 log_debug(1 << CXGBI_DBG_PDU_RX
,
1647 "conn 0x%p, skb 0x%p, len %u, flag 0x%lx.\n",
1648 conn
, skb
, skb
->len
, cxgbi_skcb_flags(skb
));
1650 if (conn
->datadgst_en
&&
1651 cxgbi_skcb_test_flag(lskb
, SKCBF_RX_DCRC_ERR
)) {
1652 pr_info("conn 0x%p, skb 0x%p, dcrc 0x%lx.\n",
1653 conn
, lskb
, cxgbi_skcb_flags(lskb
));
1654 iscsi_conn_failure(conn
, ISCSI_ERR_DATA_DGST
);
1658 if (iscsi_tcp_recv_segment_is_hdr(tcp_conn
))
1661 /* coalesced, add header digest length */
1662 if (lskb
== skb
&& conn
->hdrdgst_en
)
1663 offset
+= ISCSI_DIGEST_SIZE
;
1665 if (cxgbi_skcb_test_flag(lskb
, SKCBF_RX_DATA_DDPD
))
1668 if (opcode
== ISCSI_OP_SCSI_DATA_IN
)
1669 log_debug(1 << CXGBI_DBG_PDU_RX
,
1670 "skb 0x%p, op 0x%x, itt 0x%x, %u %s ddp'ed.\n",
1671 skb
, opcode
, ntohl(tcp_conn
->in
.hdr
->itt
),
1672 tcp_conn
->in
.datalen
, offloaded
? "is" : "not");
1674 return read_pdu_skb(conn
, skb
, offset
, offloaded
);
1677 static void csk_return_rx_credits(struct cxgbi_sock
*csk
, int copied
)
1679 struct cxgbi_device
*cdev
= csk
->cdev
;
1683 log_debug(1 << CXGBI_DBG_PDU_RX
,
1684 "csk 0x%p,%u,0x%lx,%u, seq %u, wup %u, thre %u, %u.\n",
1685 csk
, csk
->state
, csk
->flags
, csk
->tid
, csk
->copied_seq
,
1686 csk
->rcv_wup
, cdev
->rx_credit_thres
,
1689 if (!cdev
->rx_credit_thres
)
1692 if (csk
->state
!= CTP_ESTABLISHED
)
1695 credits
= csk
->copied_seq
- csk
->rcv_wup
;
1696 if (unlikely(!credits
))
1698 must_send
= credits
+ 16384 >= csk
->rcv_win
;
1699 if (must_send
|| credits
>= cdev
->rx_credit_thres
)
1700 csk
->rcv_wup
+= cdev
->csk_send_rx_credits(csk
, credits
);
1703 void cxgbi_conn_pdu_ready(struct cxgbi_sock
*csk
)
1705 struct cxgbi_device
*cdev
= csk
->cdev
;
1706 struct iscsi_conn
*conn
= csk
->user_data
;
1707 struct sk_buff
*skb
;
1708 unsigned int read
= 0;
1711 log_debug(1 << CXGBI_DBG_PDU_RX
,
1712 "csk 0x%p, conn 0x%p.\n", csk
, conn
);
1714 if (unlikely(!conn
|| conn
->suspend_rx
)) {
1715 log_debug(1 << CXGBI_DBG_PDU_RX
,
1716 "csk 0x%p, conn 0x%p, id %d, suspend_rx %lu!\n",
1717 csk
, conn
, conn
? conn
->id
: 0xFF,
1718 conn
? conn
->suspend_rx
: 0xFF);
1723 skb
= skb_peek(&csk
->receive_queue
);
1725 !(cxgbi_skcb_test_flag(skb
, SKCBF_RX_STATUS
))) {
1727 log_debug(1 << CXGBI_DBG_PDU_RX
,
1728 "skb 0x%p, NOT ready 0x%lx.\n",
1729 skb
, cxgbi_skcb_flags(skb
));
1732 __skb_unlink(skb
, &csk
->receive_queue
);
1734 read
+= cxgbi_skcb_rx_pdulen(skb
);
1735 log_debug(1 << CXGBI_DBG_PDU_RX
,
1736 "csk 0x%p, skb 0x%p,%u,f 0x%lx, pdu len %u.\n",
1737 csk
, skb
, skb
->len
, cxgbi_skcb_flags(skb
),
1738 cxgbi_skcb_rx_pdulen(skb
));
1740 if (cxgbi_skcb_test_flag(skb
, SKCBF_RX_COALESCED
)) {
1741 err
= skb_read_pdu_bhs(csk
, conn
, skb
);
1743 pr_err("coalesced bhs, csk 0x%p, skb 0x%p,%u, "
1744 "f 0x%lx, plen %u.\n",
1746 cxgbi_skcb_flags(skb
),
1747 cxgbi_skcb_rx_pdulen(skb
));
1750 err
= skb_read_pdu_data(conn
, skb
, skb
,
1751 err
+ cdev
->skb_rx_extra
);
1753 pr_err("coalesced data, csk 0x%p, skb 0x%p,%u, "
1754 "f 0x%lx, plen %u.\n",
1756 cxgbi_skcb_flags(skb
),
1757 cxgbi_skcb_rx_pdulen(skb
));
1759 err
= skb_read_pdu_bhs(csk
, conn
, skb
);
1761 pr_err("bhs, csk 0x%p, skb 0x%p,%u, "
1762 "f 0x%lx, plen %u.\n",
1764 cxgbi_skcb_flags(skb
),
1765 cxgbi_skcb_rx_pdulen(skb
));
1769 if (cxgbi_skcb_test_flag(skb
, SKCBF_RX_DATA
)) {
1770 struct sk_buff
*dskb
;
1772 dskb
= skb_peek(&csk
->receive_queue
);
1774 pr_err("csk 0x%p, skb 0x%p,%u, f 0x%lx,"
1775 " plen %u, NO data.\n",
1777 cxgbi_skcb_flags(skb
),
1778 cxgbi_skcb_rx_pdulen(skb
));
1782 __skb_unlink(dskb
, &csk
->receive_queue
);
1784 err
= skb_read_pdu_data(conn
, skb
, dskb
, 0);
1786 pr_err("data, csk 0x%p, skb 0x%p,%u, "
1787 "f 0x%lx, plen %u, dskb 0x%p,"
1790 cxgbi_skcb_flags(skb
),
1791 cxgbi_skcb_rx_pdulen(skb
),
1795 err
= skb_read_pdu_data(conn
, skb
, skb
, 0);
1804 log_debug(1 << CXGBI_DBG_PDU_RX
, "csk 0x%p, read %u.\n", csk
, read
);
1806 csk
->copied_seq
+= read
;
1807 csk_return_rx_credits(csk
, read
);
1808 conn
->rxdata_octets
+= read
;
1812 pr_info("csk 0x%p, 0x%p, rx failed %d, read %u.\n",
1813 csk
, conn
, err
, read
);
1814 iscsi_conn_failure(conn
, ISCSI_ERR_CONN_FAILED
);
1817 EXPORT_SYMBOL_GPL(cxgbi_conn_pdu_ready
);
1819 static int sgl_seek_offset(struct scatterlist
*sgl
, unsigned int sgcnt
,
1820 unsigned int offset
, unsigned int *off
,
1821 struct scatterlist
**sgp
)
1824 struct scatterlist
*sg
;
1826 for_each_sg(sgl
, sg
, sgcnt
, i
) {
1827 if (offset
< sg
->length
) {
1832 offset
-= sg
->length
;
1837 static int sgl_read_to_frags(struct scatterlist
*sg
, unsigned int sgoffset
,
1838 unsigned int dlen
, struct page_frag
*frags
,
1841 unsigned int datalen
= dlen
;
1842 unsigned int sglen
= sg
->length
- sgoffset
;
1843 struct page
*page
= sg_page(sg
);
1853 pr_warn("sg %d NULL, len %u/%u.\n",
1862 copy
= min(datalen
, sglen
);
1863 if (i
&& page
== frags
[i
- 1].page
&&
1864 sgoffset
+ sg
->offset
==
1865 frags
[i
- 1].offset
+ frags
[i
- 1].size
) {
1866 frags
[i
- 1].size
+= copy
;
1868 if (i
>= frag_max
) {
1869 pr_warn("too many pages %u, dlen %u.\n",
1874 frags
[i
].page
= page
;
1875 frags
[i
].offset
= sg
->offset
+ sgoffset
;
1876 frags
[i
].size
= copy
;
1887 int cxgbi_conn_alloc_pdu(struct iscsi_task
*task
, u8 opcode
)
1889 struct iscsi_tcp_conn
*tcp_conn
= task
->conn
->dd_data
;
1890 struct cxgbi_conn
*cconn
= tcp_conn
->dd_data
;
1891 struct cxgbi_device
*cdev
= cconn
->chba
->cdev
;
1892 struct iscsi_conn
*conn
= task
->conn
;
1893 struct iscsi_tcp_task
*tcp_task
= task
->dd_data
;
1894 struct cxgbi_task_data
*tdata
= iscsi_task_cxgbi_data(task
);
1895 struct scsi_cmnd
*sc
= task
->sc
;
1896 struct cxgbi_sock
*csk
= cconn
->cep
->csk
;
1897 struct net_device
*ndev
= cdev
->ports
[csk
->port_id
];
1898 int headroom
= SKB_TX_ISCSI_PDU_HEADER_MAX
;
1900 tcp_task
->dd_data
= tdata
;
1903 if (SKB_MAX_HEAD(cdev
->skb_tx_rsvd
) > (512 * MAX_SKB_FRAGS
) &&
1904 (opcode
== ISCSI_OP_SCSI_DATA_OUT
||
1905 (opcode
== ISCSI_OP_SCSI_CMD
&&
1906 (scsi_bidi_cmnd(sc
) || sc
->sc_data_direction
== DMA_TO_DEVICE
))))
1907 /* data could goes into skb head */
1908 headroom
+= min_t(unsigned int,
1909 SKB_MAX_HEAD(cdev
->skb_tx_rsvd
),
1910 conn
->max_xmit_dlength
);
1912 tdata
->skb
= alloc_skb(cdev
->skb_tx_rsvd
+ headroom
, GFP_ATOMIC
);
1914 ndev
->stats
.tx_dropped
++;
1918 skb_reserve(tdata
->skb
, cdev
->skb_tx_rsvd
);
1921 task
->hdr
= (struct iscsi_hdr
*)tdata
->skb
->data
;
1923 task
->hdr
= kzalloc(SKB_TX_ISCSI_PDU_HEADER_MAX
, GFP_ATOMIC
);
1925 __kfree_skb(tdata
->skb
);
1927 ndev
->stats
.tx_dropped
++;
1931 task
->hdr_max
= SKB_TX_ISCSI_PDU_HEADER_MAX
; /* BHS + AHS */
1933 /* data_out uses scsi_cmd's itt */
1934 if (opcode
!= ISCSI_OP_SCSI_DATA_OUT
)
1935 task_reserve_itt(task
, &task
->hdr
->itt
);
1937 log_debug(1 << CXGBI_DBG_ISCSI
| 1 << CXGBI_DBG_PDU_TX
,
1938 "task 0x%p, op 0x%x, skb 0x%p,%u+%u/%u, itt 0x%x.\n",
1939 task
, opcode
, tdata
->skb
, cdev
->skb_tx_rsvd
, headroom
,
1940 conn
->max_xmit_dlength
, ntohl(task
->hdr
->itt
));
1944 EXPORT_SYMBOL_GPL(cxgbi_conn_alloc_pdu
);
1946 static inline void tx_skb_setmode(struct sk_buff
*skb
, int hcrc
, int dcrc
)
1955 cxgbi_skcb_ulp_mode(skb
) = (ULP2_MODE_ISCSI
<< 4) | submode
;
1957 cxgbi_skcb_ulp_mode(skb
) = 0;
1960 int cxgbi_conn_init_pdu(struct iscsi_task
*task
, unsigned int offset
,
1963 struct iscsi_conn
*conn
= task
->conn
;
1964 struct cxgbi_task_data
*tdata
= iscsi_task_cxgbi_data(task
);
1965 struct sk_buff
*skb
= tdata
->skb
;
1966 unsigned int datalen
= count
;
1967 int i
, padlen
= iscsi_padding(count
);
1970 log_debug(1 << CXGBI_DBG_ISCSI
| 1 << CXGBI_DBG_PDU_TX
,
1971 "task 0x%p,0x%p, skb 0x%p, 0x%x,0x%x,0x%x, %u+%u.\n",
1972 task
, task
->sc
, skb
, (*skb
->data
) & ISCSI_OPCODE_MASK
,
1973 ntohl(task
->cmdsn
), ntohl(task
->hdr
->itt
), offset
, count
);
1975 skb_put(skb
, task
->hdr_len
);
1976 tx_skb_setmode(skb
, conn
->hdrdgst_en
, datalen
? conn
->datadgst_en
: 0);
1981 struct scsi_data_buffer
*sdb
= scsi_out(task
->sc
);
1982 struct scatterlist
*sg
= NULL
;
1985 tdata
->offset
= offset
;
1986 tdata
->count
= count
;
1987 err
= sgl_seek_offset(
1988 sdb
->table
.sgl
, sdb
->table
.nents
,
1989 tdata
->offset
, &tdata
->sgoffset
, &sg
);
1991 pr_warn("tpdu, sgl %u, bad offset %u/%u.\n",
1992 sdb
->table
.nents
, tdata
->offset
, sdb
->length
);
1995 err
= sgl_read_to_frags(sg
, tdata
->sgoffset
, tdata
->count
,
1996 tdata
->frags
, MAX_PDU_FRAGS
);
1998 pr_warn("tpdu, sgl %u, bad offset %u + %u.\n",
1999 sdb
->table
.nents
, tdata
->offset
, tdata
->count
);
2002 tdata
->nr_frags
= err
;
2004 if (tdata
->nr_frags
> MAX_SKB_FRAGS
||
2005 (padlen
&& tdata
->nr_frags
== MAX_SKB_FRAGS
)) {
2006 char *dst
= skb
->data
+ task
->hdr_len
;
2007 struct page_frag
*frag
= tdata
->frags
;
2009 /* data fits in the skb's headroom */
2010 for (i
= 0; i
< tdata
->nr_frags
; i
++, frag
++) {
2011 char *src
= kmap_atomic(frag
->page
);
2013 memcpy(dst
, src
+frag
->offset
, frag
->size
);
2018 memset(dst
, 0, padlen
);
2021 skb_put(skb
, count
+ padlen
);
2023 /* data fit into frag_list */
2024 for (i
= 0; i
< tdata
->nr_frags
; i
++) {
2025 __skb_fill_page_desc(skb
, i
,
2026 tdata
->frags
[i
].page
,
2027 tdata
->frags
[i
].offset
,
2028 tdata
->frags
[i
].size
);
2029 skb_frag_ref(skb
, i
);
2031 skb_shinfo(skb
)->nr_frags
= tdata
->nr_frags
;
2033 skb
->data_len
+= count
;
2034 skb
->truesize
+= count
;
2038 pg
= virt_to_page(task
->data
);
2041 skb_fill_page_desc(skb
, 0, pg
, offset_in_page(task
->data
),
2044 skb
->data_len
+= count
;
2045 skb
->truesize
+= count
;
2049 i
= skb_shinfo(skb
)->nr_frags
;
2050 skb_fill_page_desc(skb
, skb_shinfo(skb
)->nr_frags
,
2051 virt_to_page(padding
), offset_in_page(padding
),
2054 skb
->data_len
+= padlen
;
2055 skb
->truesize
+= padlen
;
2061 EXPORT_SYMBOL_GPL(cxgbi_conn_init_pdu
);
2063 int cxgbi_conn_xmit_pdu(struct iscsi_task
*task
)
2065 struct iscsi_tcp_conn
*tcp_conn
= task
->conn
->dd_data
;
2066 struct cxgbi_conn
*cconn
= tcp_conn
->dd_data
;
2067 struct cxgbi_task_data
*tdata
= iscsi_task_cxgbi_data(task
);
2068 struct cxgbi_task_tag_info
*ttinfo
= &tdata
->ttinfo
;
2069 struct sk_buff
*skb
= tdata
->skb
;
2070 struct cxgbi_sock
*csk
= NULL
;
2071 unsigned int datalen
;
2075 log_debug(1 << CXGBI_DBG_ISCSI
| 1 << CXGBI_DBG_PDU_TX
,
2076 "task 0x%p\n", task
);
2080 if (cconn
&& cconn
->cep
)
2081 csk
= cconn
->cep
->csk
;
2083 log_debug(1 << CXGBI_DBG_ISCSI
| 1 << CXGBI_DBG_PDU_TX
,
2084 "task 0x%p, csk gone.\n", task
);
2089 datalen
= skb
->data_len
;
2091 /* write ppod first if using ofldq to write ppod */
2092 if (ttinfo
->flags
& CXGBI_PPOD_INFO_FLAG_VALID
) {
2093 struct cxgbi_ppm
*ppm
= csk
->cdev
->cdev2ppm(csk
->cdev
);
2095 ttinfo
->flags
&= ~CXGBI_PPOD_INFO_FLAG_VALID
;
2096 if (csk
->cdev
->csk_ddp_set_map(ppm
, csk
, ttinfo
) < 0)
2097 pr_err("task 0x%p, ppod writing using ofldq failed.\n",
2099 /* continue. Let fl get the data */
2103 memcpy(skb
->data
, task
->hdr
, SKB_TX_ISCSI_PDU_HEADER_MAX
);
2105 err
= cxgbi_sock_send_pdus(cconn
->cep
->csk
, skb
);
2109 log_debug(1 << CXGBI_DBG_PDU_TX
,
2110 "task 0x%p,0x%p, skb 0x%p, len %u/%u, rv %d.\n",
2111 task
, task
->sc
, skb
, skb
->len
, skb
->data_len
, err
);
2113 if (task
->conn
->hdrdgst_en
)
2114 pdulen
+= ISCSI_DIGEST_SIZE
;
2116 if (datalen
&& task
->conn
->datadgst_en
)
2117 pdulen
+= ISCSI_DIGEST_SIZE
;
2119 task
->conn
->txdata_octets
+= pdulen
;
2123 if (err
== -EAGAIN
|| err
== -ENOBUFS
) {
2124 log_debug(1 << CXGBI_DBG_PDU_TX
,
2125 "task 0x%p, skb 0x%p, len %u/%u, %d EAGAIN.\n",
2126 task
, skb
, skb
->len
, skb
->data_len
, err
);
2127 /* reset skb to send when we are called again */
2132 log_debug(1 << CXGBI_DBG_ISCSI
| 1 << CXGBI_DBG_PDU_TX
,
2133 "itt 0x%x, skb 0x%p, len %u/%u, xmit err %d.\n",
2134 task
->itt
, skb
, skb
->len
, skb
->data_len
, err
);
2138 iscsi_conn_printk(KERN_ERR
, task
->conn
, "xmit err %d.\n", err
);
2139 iscsi_conn_failure(task
->conn
, ISCSI_ERR_XMIT_FAILED
);
2142 EXPORT_SYMBOL_GPL(cxgbi_conn_xmit_pdu
);
2144 void cxgbi_cleanup_task(struct iscsi_task
*task
)
2146 struct iscsi_tcp_task
*tcp_task
= task
->dd_data
;
2147 struct cxgbi_task_data
*tdata
= iscsi_task_cxgbi_data(task
);
2149 if (!tcp_task
|| !tdata
|| (tcp_task
->dd_data
!= tdata
)) {
2150 pr_info("task 0x%p,0x%p, tcp_task 0x%p, tdata 0x%p/0x%p.\n",
2151 task
, task
->sc
, tcp_task
,
2152 tcp_task
? tcp_task
->dd_data
: NULL
, tdata
);
2156 log_debug(1 << CXGBI_DBG_ISCSI
,
2157 "task 0x%p, skb 0x%p, itt 0x%x.\n",
2158 task
, tdata
->skb
, task
->hdr_itt
);
2160 tcp_task
->dd_data
= NULL
;
2166 /* never reached the xmit task callout */
2168 __kfree_skb(tdata
->skb
);
2172 task_release_itt(task
, task
->hdr_itt
);
2173 memset(tdata
, 0, sizeof(*tdata
));
2175 iscsi_tcp_cleanup_task(task
);
2177 EXPORT_SYMBOL_GPL(cxgbi_cleanup_task
);
2179 void cxgbi_get_conn_stats(struct iscsi_cls_conn
*cls_conn
,
2180 struct iscsi_stats
*stats
)
2182 struct iscsi_conn
*conn
= cls_conn
->dd_data
;
2184 stats
->txdata_octets
= conn
->txdata_octets
;
2185 stats
->rxdata_octets
= conn
->rxdata_octets
;
2186 stats
->scsicmd_pdus
= conn
->scsicmd_pdus_cnt
;
2187 stats
->dataout_pdus
= conn
->dataout_pdus_cnt
;
2188 stats
->scsirsp_pdus
= conn
->scsirsp_pdus_cnt
;
2189 stats
->datain_pdus
= conn
->datain_pdus_cnt
;
2190 stats
->r2t_pdus
= conn
->r2t_pdus_cnt
;
2191 stats
->tmfcmd_pdus
= conn
->tmfcmd_pdus_cnt
;
2192 stats
->tmfrsp_pdus
= conn
->tmfrsp_pdus_cnt
;
2193 stats
->digest_err
= 0;
2194 stats
->timeout_err
= 0;
2195 stats
->custom_length
= 1;
2196 strcpy(stats
->custom
[0].desc
, "eh_abort_cnt");
2197 stats
->custom
[0].value
= conn
->eh_abort_cnt
;
2199 EXPORT_SYMBOL_GPL(cxgbi_get_conn_stats
);
2201 static int cxgbi_conn_max_xmit_dlength(struct iscsi_conn
*conn
)
2203 struct iscsi_tcp_conn
*tcp_conn
= conn
->dd_data
;
2204 struct cxgbi_conn
*cconn
= tcp_conn
->dd_data
;
2205 struct cxgbi_device
*cdev
= cconn
->chba
->cdev
;
2206 unsigned int headroom
= SKB_MAX_HEAD(cdev
->skb_tx_rsvd
);
2207 unsigned int max_def
= 512 * MAX_SKB_FRAGS
;
2208 unsigned int max
= max(max_def
, headroom
);
2210 max
= min(cconn
->chba
->cdev
->tx_max_size
, max
);
2211 if (conn
->max_xmit_dlength
)
2212 conn
->max_xmit_dlength
= min(conn
->max_xmit_dlength
, max
);
2214 conn
->max_xmit_dlength
= max
;
2215 cxgbi_align_pdu_size(conn
->max_xmit_dlength
);
2220 static int cxgbi_conn_max_recv_dlength(struct iscsi_conn
*conn
)
2222 struct iscsi_tcp_conn
*tcp_conn
= conn
->dd_data
;
2223 struct cxgbi_conn
*cconn
= tcp_conn
->dd_data
;
2224 unsigned int max
= cconn
->chba
->cdev
->rx_max_size
;
2226 cxgbi_align_pdu_size(max
);
2228 if (conn
->max_recv_dlength
) {
2229 if (conn
->max_recv_dlength
> max
) {
2230 pr_err("MaxRecvDataSegmentLength %u > %u.\n",
2231 conn
->max_recv_dlength
, max
);
2234 conn
->max_recv_dlength
= min(conn
->max_recv_dlength
, max
);
2235 cxgbi_align_pdu_size(conn
->max_recv_dlength
);
2237 conn
->max_recv_dlength
= max
;
2242 int cxgbi_set_conn_param(struct iscsi_cls_conn
*cls_conn
,
2243 enum iscsi_param param
, char *buf
, int buflen
)
2245 struct iscsi_conn
*conn
= cls_conn
->dd_data
;
2246 struct iscsi_tcp_conn
*tcp_conn
= conn
->dd_data
;
2247 struct cxgbi_conn
*cconn
= tcp_conn
->dd_data
;
2248 struct cxgbi_sock
*csk
= cconn
->cep
->csk
;
2251 log_debug(1 << CXGBI_DBG_ISCSI
,
2252 "cls_conn 0x%p, param %d, buf(%d) %s.\n",
2253 cls_conn
, param
, buflen
, buf
);
2256 case ISCSI_PARAM_HDRDGST_EN
:
2257 err
= iscsi_set_param(cls_conn
, param
, buf
, buflen
);
2258 if (!err
&& conn
->hdrdgst_en
)
2259 err
= csk
->cdev
->csk_ddp_setup_digest(csk
, csk
->tid
,
2263 case ISCSI_PARAM_DATADGST_EN
:
2264 err
= iscsi_set_param(cls_conn
, param
, buf
, buflen
);
2265 if (!err
&& conn
->datadgst_en
)
2266 err
= csk
->cdev
->csk_ddp_setup_digest(csk
, csk
->tid
,
2270 case ISCSI_PARAM_MAX_R2T
:
2271 return iscsi_tcp_set_max_r2t(conn
, buf
);
2272 case ISCSI_PARAM_MAX_RECV_DLENGTH
:
2273 err
= iscsi_set_param(cls_conn
, param
, buf
, buflen
);
2275 err
= cxgbi_conn_max_recv_dlength(conn
);
2277 case ISCSI_PARAM_MAX_XMIT_DLENGTH
:
2278 err
= iscsi_set_param(cls_conn
, param
, buf
, buflen
);
2280 err
= cxgbi_conn_max_xmit_dlength(conn
);
2283 return iscsi_set_param(cls_conn
, param
, buf
, buflen
);
2287 EXPORT_SYMBOL_GPL(cxgbi_set_conn_param
);
2289 static inline int csk_print_port(struct cxgbi_sock
*csk
, char *buf
)
2293 cxgbi_sock_get(csk
);
2294 len
= sprintf(buf
, "%hu\n", ntohs(csk
->daddr
.sin_port
));
2295 cxgbi_sock_put(csk
);
2300 static inline int csk_print_ip(struct cxgbi_sock
*csk
, char *buf
)
2304 cxgbi_sock_get(csk
);
2305 if (csk
->csk_family
== AF_INET
)
2306 len
= sprintf(buf
, "%pI4",
2307 &csk
->daddr
.sin_addr
.s_addr
);
2309 len
= sprintf(buf
, "%pI6",
2310 &csk
->daddr6
.sin6_addr
);
2312 cxgbi_sock_put(csk
);
2317 int cxgbi_get_ep_param(struct iscsi_endpoint
*ep
, enum iscsi_param param
,
2320 struct cxgbi_endpoint
*cep
= ep
->dd_data
;
2321 struct cxgbi_sock
*csk
;
2324 log_debug(1 << CXGBI_DBG_ISCSI
,
2325 "cls_conn 0x%p, param %d.\n", ep
, param
);
2328 case ISCSI_PARAM_CONN_PORT
:
2329 case ISCSI_PARAM_CONN_ADDRESS
:
2337 return iscsi_conn_get_addr_param((struct sockaddr_storage
*)
2338 &csk
->daddr
, param
, buf
);
2344 EXPORT_SYMBOL_GPL(cxgbi_get_ep_param
);
2346 struct iscsi_cls_conn
*
2347 cxgbi_create_conn(struct iscsi_cls_session
*cls_session
, u32 cid
)
2349 struct iscsi_cls_conn
*cls_conn
;
2350 struct iscsi_conn
*conn
;
2351 struct iscsi_tcp_conn
*tcp_conn
;
2352 struct cxgbi_conn
*cconn
;
2354 cls_conn
= iscsi_tcp_conn_setup(cls_session
, sizeof(*cconn
), cid
);
2358 conn
= cls_conn
->dd_data
;
2359 tcp_conn
= conn
->dd_data
;
2360 cconn
= tcp_conn
->dd_data
;
2361 cconn
->iconn
= conn
;
2363 log_debug(1 << CXGBI_DBG_ISCSI
,
2364 "cid %u(0x%x), cls 0x%p,0x%p, conn 0x%p,0x%p,0x%p.\n",
2365 cid
, cid
, cls_session
, cls_conn
, conn
, tcp_conn
, cconn
);
2369 EXPORT_SYMBOL_GPL(cxgbi_create_conn
);
2371 int cxgbi_bind_conn(struct iscsi_cls_session
*cls_session
,
2372 struct iscsi_cls_conn
*cls_conn
,
2373 u64 transport_eph
, int is_leading
)
2375 struct iscsi_conn
*conn
= cls_conn
->dd_data
;
2376 struct iscsi_tcp_conn
*tcp_conn
= conn
->dd_data
;
2377 struct cxgbi_conn
*cconn
= tcp_conn
->dd_data
;
2378 struct cxgbi_ppm
*ppm
;
2379 struct iscsi_endpoint
*ep
;
2380 struct cxgbi_endpoint
*cep
;
2381 struct cxgbi_sock
*csk
;
2384 ep
= iscsi_lookup_endpoint(transport_eph
);
2388 /* setup ddp pagesize */
2392 ppm
= csk
->cdev
->cdev2ppm(csk
->cdev
);
2393 err
= csk
->cdev
->csk_ddp_setup_pgidx(csk
, csk
->tid
,
2394 ppm
->tformat
.pgsz_idx_dflt
);
2398 err
= iscsi_conn_bind(cls_session
, cls_conn
, is_leading
);
2402 /* calculate the tag idx bits needed for this conn based on cmds_max */
2403 cconn
->task_idx_bits
= (__ilog2_u32(conn
->session
->cmds_max
- 1)) + 1;
2405 write_lock_bh(&csk
->callback_lock
);
2406 csk
->user_data
= conn
;
2407 cconn
->chba
= cep
->chba
;
2410 write_unlock_bh(&csk
->callback_lock
);
2412 cxgbi_conn_max_xmit_dlength(conn
);
2413 cxgbi_conn_max_recv_dlength(conn
);
2415 log_debug(1 << CXGBI_DBG_ISCSI
,
2416 "cls 0x%p,0x%p, ep 0x%p, cconn 0x%p, csk 0x%p.\n",
2417 cls_session
, cls_conn
, ep
, cconn
, csk
);
2418 /* init recv engine */
2419 iscsi_tcp_hdr_recv_prep(tcp_conn
);
2423 EXPORT_SYMBOL_GPL(cxgbi_bind_conn
);
2425 struct iscsi_cls_session
*cxgbi_create_session(struct iscsi_endpoint
*ep
,
2426 u16 cmds_max
, u16 qdepth
,
2429 struct cxgbi_endpoint
*cep
;
2430 struct cxgbi_hba
*chba
;
2431 struct Scsi_Host
*shost
;
2432 struct iscsi_cls_session
*cls_session
;
2433 struct iscsi_session
*session
;
2436 pr_err("missing endpoint.\n");
2442 shost
= chba
->shost
;
2444 BUG_ON(chba
!= iscsi_host_priv(shost
));
2446 cls_session
= iscsi_session_setup(chba
->cdev
->itp
, shost
,
2448 sizeof(struct iscsi_tcp_task
) +
2449 sizeof(struct cxgbi_task_data
),
2450 initial_cmdsn
, ISCSI_MAX_TARGET
);
2454 session
= cls_session
->dd_data
;
2455 if (iscsi_tcp_r2tpool_alloc(session
))
2456 goto remove_session
;
2458 log_debug(1 << CXGBI_DBG_ISCSI
,
2459 "ep 0x%p, cls sess 0x%p.\n", ep
, cls_session
);
2463 iscsi_session_teardown(cls_session
);
2466 EXPORT_SYMBOL_GPL(cxgbi_create_session
);
2468 void cxgbi_destroy_session(struct iscsi_cls_session
*cls_session
)
2470 log_debug(1 << CXGBI_DBG_ISCSI
,
2471 "cls sess 0x%p.\n", cls_session
);
2473 iscsi_tcp_r2tpool_free(cls_session
->dd_data
);
2474 iscsi_session_teardown(cls_session
);
2476 EXPORT_SYMBOL_GPL(cxgbi_destroy_session
);
2478 int cxgbi_set_host_param(struct Scsi_Host
*shost
, enum iscsi_host_param param
,
2479 char *buf
, int buflen
)
2481 struct cxgbi_hba
*chba
= iscsi_host_priv(shost
);
2484 shost_printk(KERN_ERR
, shost
, "Could not get host param. "
2485 "netdev for host not set.\n");
2489 log_debug(1 << CXGBI_DBG_ISCSI
,
2490 "shost 0x%p, hba 0x%p,%s, param %d, buf(%d) %s.\n",
2491 shost
, chba
, chba
->ndev
->name
, param
, buflen
, buf
);
2494 case ISCSI_HOST_PARAM_IPADDRESS
:
2496 __be32 addr
= in_aton(buf
);
2497 log_debug(1 << CXGBI_DBG_ISCSI
,
2498 "hba %s, req. ipv4 %pI4.\n", chba
->ndev
->name
, &addr
);
2499 cxgbi_set_iscsi_ipv4(chba
, addr
);
2502 case ISCSI_HOST_PARAM_HWADDRESS
:
2503 case ISCSI_HOST_PARAM_NETDEV_NAME
:
2506 return iscsi_host_set_param(shost
, param
, buf
, buflen
);
2509 EXPORT_SYMBOL_GPL(cxgbi_set_host_param
);
2511 int cxgbi_get_host_param(struct Scsi_Host
*shost
, enum iscsi_host_param param
,
2514 struct cxgbi_hba
*chba
= iscsi_host_priv(shost
);
2518 shost_printk(KERN_ERR
, shost
, "Could not get host param. "
2519 "netdev for host not set.\n");
2523 log_debug(1 << CXGBI_DBG_ISCSI
,
2524 "shost 0x%p, hba 0x%p,%s, param %d.\n",
2525 shost
, chba
, chba
->ndev
->name
, param
);
2528 case ISCSI_HOST_PARAM_HWADDRESS
:
2529 len
= sysfs_format_mac(buf
, chba
->ndev
->dev_addr
, 6);
2531 case ISCSI_HOST_PARAM_NETDEV_NAME
:
2532 len
= sprintf(buf
, "%s\n", chba
->ndev
->name
);
2534 case ISCSI_HOST_PARAM_IPADDRESS
:
2536 struct cxgbi_sock
*csk
= find_sock_on_port(chba
->cdev
,
2539 len
= sprintf(buf
, "%pIS",
2540 (struct sockaddr
*)&csk
->saddr
);
2542 log_debug(1 << CXGBI_DBG_ISCSI
,
2543 "hba %s, addr %s.\n", chba
->ndev
->name
, buf
);
2547 return iscsi_host_get_param(shost
, param
, buf
);
2552 EXPORT_SYMBOL_GPL(cxgbi_get_host_param
);
2554 struct iscsi_endpoint
*cxgbi_ep_connect(struct Scsi_Host
*shost
,
2555 struct sockaddr
*dst_addr
,
2558 struct iscsi_endpoint
*ep
;
2559 struct cxgbi_endpoint
*cep
;
2560 struct cxgbi_hba
*hba
= NULL
;
2561 struct cxgbi_sock
*csk
;
2565 log_debug(1 << CXGBI_DBG_ISCSI
| 1 << CXGBI_DBG_SOCK
,
2566 "shost 0x%p, non_blocking %d, dst_addr 0x%p.\n",
2567 shost
, non_blocking
, dst_addr
);
2570 hba
= iscsi_host_priv(shost
);
2572 pr_info("shost 0x%p, priv NULL.\n", shost
);
2577 if (!vlan_uses_dev(hba
->ndev
))
2578 ifindex
= hba
->ndev
->ifindex
;
2582 if (dst_addr
->sa_family
== AF_INET
) {
2583 csk
= cxgbi_check_route(dst_addr
, ifindex
);
2584 #if IS_ENABLED(CONFIG_IPV6)
2585 } else if (dst_addr
->sa_family
== AF_INET6
) {
2586 csk
= cxgbi_check_route6(dst_addr
, ifindex
);
2589 pr_info("address family 0x%x NOT supported.\n",
2590 dst_addr
->sa_family
);
2591 err
= -EAFNOSUPPORT
;
2592 return (struct iscsi_endpoint
*)ERR_PTR(err
);
2596 return (struct iscsi_endpoint
*)csk
;
2597 cxgbi_sock_get(csk
);
2600 hba
= csk
->cdev
->hbas
[csk
->port_id
];
2601 else if (hba
!= csk
->cdev
->hbas
[csk
->port_id
]) {
2602 pr_info("Could not connect through requested host %u"
2603 "hba 0x%p != 0x%p (%u).\n",
2604 shost
->host_no
, hba
,
2605 csk
->cdev
->hbas
[csk
->port_id
], csk
->port_id
);
2610 err
= sock_get_port(csk
);
2614 cxgbi_sock_set_state(csk
, CTP_CONNECTING
);
2615 err
= csk
->cdev
->csk_init_act_open(csk
);
2619 if (cxgbi_sock_is_closing(csk
)) {
2621 pr_info("csk 0x%p is closing.\n", csk
);
2625 ep
= iscsi_create_endpoint(sizeof(*cep
));
2628 pr_info("iscsi alloc ep, OOM.\n");
2636 log_debug(1 << CXGBI_DBG_ISCSI
| 1 << CXGBI_DBG_SOCK
,
2637 "ep 0x%p, cep 0x%p, csk 0x%p, hba 0x%p,%s.\n",
2638 ep
, cep
, csk
, hba
, hba
->ndev
->name
);
2642 cxgbi_sock_put(csk
);
2643 cxgbi_sock_closed(csk
);
2645 return ERR_PTR(err
);
2647 EXPORT_SYMBOL_GPL(cxgbi_ep_connect
);
2649 int cxgbi_ep_poll(struct iscsi_endpoint
*ep
, int timeout_ms
)
2651 struct cxgbi_endpoint
*cep
= ep
->dd_data
;
2652 struct cxgbi_sock
*csk
= cep
->csk
;
2654 if (!cxgbi_sock_is_established(csk
))
2658 EXPORT_SYMBOL_GPL(cxgbi_ep_poll
);
2660 void cxgbi_ep_disconnect(struct iscsi_endpoint
*ep
)
2662 struct cxgbi_endpoint
*cep
= ep
->dd_data
;
2663 struct cxgbi_conn
*cconn
= cep
->cconn
;
2664 struct cxgbi_sock
*csk
= cep
->csk
;
2666 log_debug(1 << CXGBI_DBG_ISCSI
| 1 << CXGBI_DBG_SOCK
,
2667 "ep 0x%p, cep 0x%p, cconn 0x%p, csk 0x%p,%u,0x%lx.\n",
2668 ep
, cep
, cconn
, csk
, csk
->state
, csk
->flags
);
2670 if (cconn
&& cconn
->iconn
) {
2671 iscsi_suspend_tx(cconn
->iconn
);
2672 write_lock_bh(&csk
->callback_lock
);
2673 cep
->csk
->user_data
= NULL
;
2675 write_unlock_bh(&csk
->callback_lock
);
2677 iscsi_destroy_endpoint(ep
);
2679 if (likely(csk
->state
>= CTP_ESTABLISHED
))
2680 need_active_close(csk
);
2682 cxgbi_sock_closed(csk
);
2684 cxgbi_sock_put(csk
);
2686 EXPORT_SYMBOL_GPL(cxgbi_ep_disconnect
);
2688 int cxgbi_iscsi_init(struct iscsi_transport
*itp
,
2689 struct scsi_transport_template
**stt
)
2691 *stt
= iscsi_register_transport(itp
);
2693 pr_err("unable to register %s transport 0x%p.\n",
2697 log_debug(1 << CXGBI_DBG_ISCSI
,
2698 "%s, registered iscsi transport 0x%p.\n",
2702 EXPORT_SYMBOL_GPL(cxgbi_iscsi_init
);
2704 void cxgbi_iscsi_cleanup(struct iscsi_transport
*itp
,
2705 struct scsi_transport_template
**stt
)
2708 log_debug(1 << CXGBI_DBG_ISCSI
,
2709 "de-register transport 0x%p, %s, stt 0x%p.\n",
2710 itp
, itp
->name
, *stt
);
2712 iscsi_unregister_transport(itp
);
2715 EXPORT_SYMBOL_GPL(cxgbi_iscsi_cleanup
);
2717 umode_t
cxgbi_attr_is_visible(int param_type
, int param
)
2719 switch (param_type
) {
2720 case ISCSI_HOST_PARAM
:
2722 case ISCSI_HOST_PARAM_NETDEV_NAME
:
2723 case ISCSI_HOST_PARAM_HWADDRESS
:
2724 case ISCSI_HOST_PARAM_IPADDRESS
:
2725 case ISCSI_HOST_PARAM_INITIATOR_NAME
:
2732 case ISCSI_PARAM_MAX_RECV_DLENGTH
:
2733 case ISCSI_PARAM_MAX_XMIT_DLENGTH
:
2734 case ISCSI_PARAM_HDRDGST_EN
:
2735 case ISCSI_PARAM_DATADGST_EN
:
2736 case ISCSI_PARAM_CONN_ADDRESS
:
2737 case ISCSI_PARAM_CONN_PORT
:
2738 case ISCSI_PARAM_EXP_STATSN
:
2739 case ISCSI_PARAM_PERSISTENT_ADDRESS
:
2740 case ISCSI_PARAM_PERSISTENT_PORT
:
2741 case ISCSI_PARAM_PING_TMO
:
2742 case ISCSI_PARAM_RECV_TMO
:
2743 case ISCSI_PARAM_INITIAL_R2T_EN
:
2744 case ISCSI_PARAM_MAX_R2T
:
2745 case ISCSI_PARAM_IMM_DATA_EN
:
2746 case ISCSI_PARAM_FIRST_BURST
:
2747 case ISCSI_PARAM_MAX_BURST
:
2748 case ISCSI_PARAM_PDU_INORDER_EN
:
2749 case ISCSI_PARAM_DATASEQ_INORDER_EN
:
2750 case ISCSI_PARAM_ERL
:
2751 case ISCSI_PARAM_TARGET_NAME
:
2752 case ISCSI_PARAM_TPGT
:
2753 case ISCSI_PARAM_USERNAME
:
2754 case ISCSI_PARAM_PASSWORD
:
2755 case ISCSI_PARAM_USERNAME_IN
:
2756 case ISCSI_PARAM_PASSWORD_IN
:
2757 case ISCSI_PARAM_FAST_ABORT
:
2758 case ISCSI_PARAM_ABORT_TMO
:
2759 case ISCSI_PARAM_LU_RESET_TMO
:
2760 case ISCSI_PARAM_TGT_RESET_TMO
:
2761 case ISCSI_PARAM_IFACE_NAME
:
2762 case ISCSI_PARAM_INITIATOR_NAME
:
2771 EXPORT_SYMBOL_GPL(cxgbi_attr_is_visible
);
2773 static int __init
libcxgbi_init_module(void)
2775 pr_info("%s", version
);
2777 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff
, cb
) <
2778 sizeof(struct cxgbi_skb_cb
));
2782 static void __exit
libcxgbi_exit_module(void)
2784 cxgbi_device_unregister_all(0xFF);
2788 module_init(libcxgbi_init_module
);
2789 module_exit(libcxgbi_exit_module
);