2 * libcxgbi.c: Chelsio common library for T3/T4 iSCSI driver.
4 * Copyright (c) 2010-2015 Chelsio Communications, Inc.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation.
10 * Written by: Karen Xie (kxie@chelsio.com)
11 * Written by: Rakesh Ranjan (rranjan@chelsio.com)
14 #define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
16 #include <linux/skbuff.h>
17 #include <linux/crypto.h>
18 #include <linux/scatterlist.h>
19 #include <linux/pci.h>
20 #include <scsi/scsi.h>
21 #include <scsi/scsi_cmnd.h>
22 #include <scsi/scsi_host.h>
23 #include <linux/if_vlan.h>
24 #include <linux/inet.h>
26 #include <net/route.h>
28 #include <net/ip6_route.h>
29 #include <net/addrconf.h>
31 #include <linux/inetdevice.h> /* ip_dev_find */
32 #include <linux/module.h>
35 static unsigned int dbg_level
;
39 #define DRV_MODULE_NAME "libcxgbi"
40 #define DRV_MODULE_DESC "Chelsio iSCSI driver library"
41 #define DRV_MODULE_VERSION "0.9.1-ko"
42 #define DRV_MODULE_RELDATE "Apr. 2015"
44 static char version
[] =
45 DRV_MODULE_DESC
" " DRV_MODULE_NAME
46 " v" DRV_MODULE_VERSION
" (" DRV_MODULE_RELDATE
")\n";
48 MODULE_AUTHOR("Chelsio Communications, Inc.");
49 MODULE_DESCRIPTION(DRV_MODULE_DESC
);
50 MODULE_VERSION(DRV_MODULE_VERSION
);
51 MODULE_LICENSE("GPL");
53 module_param(dbg_level
, uint
, 0644);
54 MODULE_PARM_DESC(dbg_level
, "libiscsi debug level (default=0)");
58 * cxgbi device management
59 * maintains a list of the cxgbi devices
61 static LIST_HEAD(cdev_list
);
62 static DEFINE_MUTEX(cdev_mutex
);
64 static LIST_HEAD(cdev_rcu_list
);
65 static DEFINE_SPINLOCK(cdev_rcu_lock
);
67 static inline void cxgbi_decode_sw_tag(u32 sw_tag
, int *idx
, int *age
)
70 *age
= sw_tag
& 0x7FFF;
72 *idx
= (sw_tag
>> 16) & 0x7FFF;
75 int cxgbi_device_portmap_create(struct cxgbi_device
*cdev
, unsigned int base
,
76 unsigned int max_conn
)
78 struct cxgbi_ports_map
*pmap
= &cdev
->pmap
;
80 pmap
->port_csk
= cxgbi_alloc_big_mem(max_conn
*
81 sizeof(struct cxgbi_sock
*),
83 if (!pmap
->port_csk
) {
84 pr_warn("cdev 0x%p, portmap OOM %u.\n", cdev
, max_conn
);
88 pmap
->max_connect
= max_conn
;
89 pmap
->sport_base
= base
;
90 spin_lock_init(&pmap
->lock
);
93 EXPORT_SYMBOL_GPL(cxgbi_device_portmap_create
);
95 void cxgbi_device_portmap_cleanup(struct cxgbi_device
*cdev
)
97 struct cxgbi_ports_map
*pmap
= &cdev
->pmap
;
98 struct cxgbi_sock
*csk
;
101 for (i
= 0; i
< pmap
->max_connect
; i
++) {
102 if (pmap
->port_csk
[i
]) {
103 csk
= pmap
->port_csk
[i
];
104 pmap
->port_csk
[i
] = NULL
;
105 log_debug(1 << CXGBI_DBG_SOCK
,
106 "csk 0x%p, cdev 0x%p, offload down.\n",
108 spin_lock_bh(&csk
->lock
);
109 cxgbi_sock_set_flag(csk
, CTPF_OFFLOAD_DOWN
);
110 cxgbi_sock_closed(csk
);
111 spin_unlock_bh(&csk
->lock
);
116 EXPORT_SYMBOL_GPL(cxgbi_device_portmap_cleanup
);
118 static inline void cxgbi_device_destroy(struct cxgbi_device
*cdev
)
120 log_debug(1 << CXGBI_DBG_DEV
,
121 "cdev 0x%p, p# %u.\n", cdev
, cdev
->nports
);
122 cxgbi_hbas_remove(cdev
);
123 cxgbi_device_portmap_cleanup(cdev
);
124 cxgbi_ppm_release(cdev
->cdev2ppm(cdev
));
125 if (cdev
->pmap
.max_connect
)
126 cxgbi_free_big_mem(cdev
->pmap
.port_csk
);
130 struct cxgbi_device
*cxgbi_device_register(unsigned int extra
,
133 struct cxgbi_device
*cdev
;
135 cdev
= kzalloc(sizeof(*cdev
) + extra
+ nports
*
136 (sizeof(struct cxgbi_hba
*) +
137 sizeof(struct net_device
*)),
140 pr_warn("nport %d, OOM.\n", nports
);
143 cdev
->ports
= (struct net_device
**)(cdev
+ 1);
144 cdev
->hbas
= (struct cxgbi_hba
**)(((char*)cdev
->ports
) + nports
*
145 sizeof(struct net_device
*));
147 cdev
->dd_data
= ((char *)cdev
->hbas
) +
148 nports
* sizeof(struct cxgbi_hba
*);
149 spin_lock_init(&cdev
->pmap
.lock
);
151 mutex_lock(&cdev_mutex
);
152 list_add_tail(&cdev
->list_head
, &cdev_list
);
153 mutex_unlock(&cdev_mutex
);
155 spin_lock(&cdev_rcu_lock
);
156 list_add_tail_rcu(&cdev
->rcu_node
, &cdev_rcu_list
);
157 spin_unlock(&cdev_rcu_lock
);
159 log_debug(1 << CXGBI_DBG_DEV
,
160 "cdev 0x%p, p# %u.\n", cdev
, nports
);
163 EXPORT_SYMBOL_GPL(cxgbi_device_register
);
165 void cxgbi_device_unregister(struct cxgbi_device
*cdev
)
167 log_debug(1 << CXGBI_DBG_DEV
,
168 "cdev 0x%p, p# %u,%s.\n",
169 cdev
, cdev
->nports
, cdev
->nports
? cdev
->ports
[0]->name
: "");
171 mutex_lock(&cdev_mutex
);
172 list_del(&cdev
->list_head
);
173 mutex_unlock(&cdev_mutex
);
175 spin_lock(&cdev_rcu_lock
);
176 list_del_rcu(&cdev
->rcu_node
);
177 spin_unlock(&cdev_rcu_lock
);
180 cxgbi_device_destroy(cdev
);
182 EXPORT_SYMBOL_GPL(cxgbi_device_unregister
);
184 void cxgbi_device_unregister_all(unsigned int flag
)
186 struct cxgbi_device
*cdev
, *tmp
;
188 mutex_lock(&cdev_mutex
);
189 list_for_each_entry_safe(cdev
, tmp
, &cdev_list
, list_head
) {
190 if ((cdev
->flags
& flag
) == flag
) {
191 mutex_unlock(&cdev_mutex
);
192 cxgbi_device_unregister(cdev
);
193 mutex_lock(&cdev_mutex
);
196 mutex_unlock(&cdev_mutex
);
198 EXPORT_SYMBOL_GPL(cxgbi_device_unregister_all
);
200 struct cxgbi_device
*cxgbi_device_find_by_lldev(void *lldev
)
202 struct cxgbi_device
*cdev
, *tmp
;
204 mutex_lock(&cdev_mutex
);
205 list_for_each_entry_safe(cdev
, tmp
, &cdev_list
, list_head
) {
206 if (cdev
->lldev
== lldev
) {
207 mutex_unlock(&cdev_mutex
);
211 mutex_unlock(&cdev_mutex
);
213 log_debug(1 << CXGBI_DBG_DEV
,
214 "lldev 0x%p, NO match found.\n", lldev
);
217 EXPORT_SYMBOL_GPL(cxgbi_device_find_by_lldev
);
219 struct cxgbi_device
*cxgbi_device_find_by_netdev(struct net_device
*ndev
,
222 struct net_device
*vdev
= NULL
;
223 struct cxgbi_device
*cdev
, *tmp
;
226 if (is_vlan_dev(ndev
)) {
228 ndev
= vlan_dev_real_dev(ndev
);
229 log_debug(1 << CXGBI_DBG_DEV
,
230 "vlan dev %s -> %s.\n", vdev
->name
, ndev
->name
);
233 mutex_lock(&cdev_mutex
);
234 list_for_each_entry_safe(cdev
, tmp
, &cdev_list
, list_head
) {
235 for (i
= 0; i
< cdev
->nports
; i
++) {
236 if (ndev
== cdev
->ports
[i
]) {
237 cdev
->hbas
[i
]->vdev
= vdev
;
238 mutex_unlock(&cdev_mutex
);
245 mutex_unlock(&cdev_mutex
);
246 log_debug(1 << CXGBI_DBG_DEV
,
247 "ndev 0x%p, %s, NO match found.\n", ndev
, ndev
->name
);
250 EXPORT_SYMBOL_GPL(cxgbi_device_find_by_netdev
);
252 struct cxgbi_device
*cxgbi_device_find_by_netdev_rcu(struct net_device
*ndev
,
255 struct net_device
*vdev
= NULL
;
256 struct cxgbi_device
*cdev
;
259 if (is_vlan_dev(ndev
)) {
261 ndev
= vlan_dev_real_dev(ndev
);
262 pr_info("vlan dev %s -> %s.\n", vdev
->name
, ndev
->name
);
266 list_for_each_entry_rcu(cdev
, &cdev_rcu_list
, rcu_node
) {
267 for (i
= 0; i
< cdev
->nports
; i
++) {
268 if (ndev
== cdev
->ports
[i
]) {
269 cdev
->hbas
[i
]->vdev
= vdev
;
279 log_debug(1 << CXGBI_DBG_DEV
,
280 "ndev 0x%p, %s, NO match found.\n", ndev
, ndev
->name
);
283 EXPORT_SYMBOL_GPL(cxgbi_device_find_by_netdev_rcu
);
285 #if IS_ENABLED(CONFIG_IPV6)
286 static struct cxgbi_device
*cxgbi_device_find_by_mac(struct net_device
*ndev
,
289 struct net_device
*vdev
= NULL
;
290 struct cxgbi_device
*cdev
, *tmp
;
293 if (is_vlan_dev(ndev
)) {
295 ndev
= vlan_dev_real_dev(ndev
);
296 pr_info("vlan dev %s -> %s.\n", vdev
->name
, ndev
->name
);
299 mutex_lock(&cdev_mutex
);
300 list_for_each_entry_safe(cdev
, tmp
, &cdev_list
, list_head
) {
301 for (i
= 0; i
< cdev
->nports
; i
++) {
302 if (!memcmp(ndev
->dev_addr
, cdev
->ports
[i
]->dev_addr
,
304 cdev
->hbas
[i
]->vdev
= vdev
;
305 mutex_unlock(&cdev_mutex
);
312 mutex_unlock(&cdev_mutex
);
313 log_debug(1 << CXGBI_DBG_DEV
,
314 "ndev 0x%p, %s, NO match mac found.\n",
320 void cxgbi_hbas_remove(struct cxgbi_device
*cdev
)
323 struct cxgbi_hba
*chba
;
325 log_debug(1 << CXGBI_DBG_DEV
,
326 "cdev 0x%p, p#%u.\n", cdev
, cdev
->nports
);
328 for (i
= 0; i
< cdev
->nports
; i
++) {
329 chba
= cdev
->hbas
[i
];
331 cdev
->hbas
[i
] = NULL
;
332 iscsi_host_remove(chba
->shost
);
333 pci_dev_put(cdev
->pdev
);
334 iscsi_host_free(chba
->shost
);
338 EXPORT_SYMBOL_GPL(cxgbi_hbas_remove
);
340 int cxgbi_hbas_add(struct cxgbi_device
*cdev
, u64 max_lun
,
341 unsigned int max_id
, struct scsi_host_template
*sht
,
342 struct scsi_transport_template
*stt
)
344 struct cxgbi_hba
*chba
;
345 struct Scsi_Host
*shost
;
348 log_debug(1 << CXGBI_DBG_DEV
, "cdev 0x%p, p#%u.\n", cdev
, cdev
->nports
);
350 for (i
= 0; i
< cdev
->nports
; i
++) {
351 shost
= iscsi_host_alloc(sht
, sizeof(*chba
), 1);
353 pr_info("0x%p, p%d, %s, host alloc failed.\n",
354 cdev
, i
, cdev
->ports
[i
]->name
);
359 shost
->transportt
= stt
;
360 shost
->max_lun
= max_lun
;
361 shost
->max_id
= max_id
;
362 shost
->max_channel
= 0;
363 shost
->max_cmd_len
= 16;
365 chba
= iscsi_host_priv(shost
);
367 chba
->ndev
= cdev
->ports
[i
];
370 log_debug(1 << CXGBI_DBG_DEV
,
371 "cdev 0x%p, p#%d %s: chba 0x%p.\n",
372 cdev
, i
, cdev
->ports
[i
]->name
, chba
);
374 pci_dev_get(cdev
->pdev
);
375 err
= iscsi_host_add(shost
, &cdev
->pdev
->dev
);
377 pr_info("cdev 0x%p, p#%d %s, host add failed.\n",
378 cdev
, i
, cdev
->ports
[i
]->name
);
379 pci_dev_put(cdev
->pdev
);
380 scsi_host_put(shost
);
384 cdev
->hbas
[i
] = chba
;
390 cxgbi_hbas_remove(cdev
);
393 EXPORT_SYMBOL_GPL(cxgbi_hbas_add
);
398 * - source port management
399 * To find a free source port in the port allocation map we use a very simple
400 * rotor scheme to look for the next free port.
402 * If a source port has been specified make sure that it doesn't collide with
403 * our normal source port allocation map. If it's outside the range of our
404 * allocation/deallocation scheme just let them use it.
406 * If the source port is outside our allocation range, the caller is
407 * responsible for keeping track of their port usage.
410 static struct cxgbi_sock
*find_sock_on_port(struct cxgbi_device
*cdev
,
411 unsigned char port_id
)
413 struct cxgbi_ports_map
*pmap
= &cdev
->pmap
;
417 if (!pmap
->max_connect
|| !pmap
->used
)
420 spin_lock_bh(&pmap
->lock
);
422 for (i
= 0; used
&& i
< pmap
->max_connect
; i
++) {
423 struct cxgbi_sock
*csk
= pmap
->port_csk
[i
];
426 if (csk
->port_id
== port_id
) {
427 spin_unlock_bh(&pmap
->lock
);
433 spin_unlock_bh(&pmap
->lock
);
438 static int sock_get_port(struct cxgbi_sock
*csk
)
440 struct cxgbi_device
*cdev
= csk
->cdev
;
441 struct cxgbi_ports_map
*pmap
= &cdev
->pmap
;
446 if (!pmap
->max_connect
) {
447 pr_err("cdev 0x%p, p#%u %s, NO port map.\n",
448 cdev
, csk
->port_id
, cdev
->ports
[csk
->port_id
]->name
);
449 return -EADDRNOTAVAIL
;
452 if (csk
->csk_family
== AF_INET
)
453 port
= &csk
->saddr
.sin_port
;
455 port
= &csk
->saddr6
.sin6_port
;
458 pr_err("source port NON-ZERO %u.\n",
463 spin_lock_bh(&pmap
->lock
);
464 if (pmap
->used
>= pmap
->max_connect
) {
465 spin_unlock_bh(&pmap
->lock
);
466 pr_info("cdev 0x%p, p#%u %s, ALL ports used.\n",
467 cdev
, csk
->port_id
, cdev
->ports
[csk
->port_id
]->name
);
468 return -EADDRNOTAVAIL
;
471 start
= idx
= pmap
->next
;
473 if (++idx
>= pmap
->max_connect
)
475 if (!pmap
->port_csk
[idx
]) {
477 *port
= htons(pmap
->sport_base
+ idx
);
479 pmap
->port_csk
[idx
] = csk
;
480 spin_unlock_bh(&pmap
->lock
);
482 log_debug(1 << CXGBI_DBG_SOCK
,
483 "cdev 0x%p, p#%u %s, p %u, %u.\n",
485 cdev
->ports
[csk
->port_id
]->name
,
486 pmap
->sport_base
+ idx
, pmap
->next
);
489 } while (idx
!= start
);
490 spin_unlock_bh(&pmap
->lock
);
492 /* should not happen */
493 pr_warn("cdev 0x%p, p#%u %s, next %u?\n",
494 cdev
, csk
->port_id
, cdev
->ports
[csk
->port_id
]->name
,
496 return -EADDRNOTAVAIL
;
499 static void sock_put_port(struct cxgbi_sock
*csk
)
501 struct cxgbi_device
*cdev
= csk
->cdev
;
502 struct cxgbi_ports_map
*pmap
= &cdev
->pmap
;
505 if (csk
->csk_family
== AF_INET
)
506 port
= &csk
->saddr
.sin_port
;
508 port
= &csk
->saddr6
.sin6_port
;
511 int idx
= ntohs(*port
) - pmap
->sport_base
;
514 if (idx
< 0 || idx
>= pmap
->max_connect
) {
515 pr_err("cdev 0x%p, p#%u %s, port %u OOR.\n",
517 cdev
->ports
[csk
->port_id
]->name
,
522 spin_lock_bh(&pmap
->lock
);
523 pmap
->port_csk
[idx
] = NULL
;
525 spin_unlock_bh(&pmap
->lock
);
527 log_debug(1 << CXGBI_DBG_SOCK
,
528 "cdev 0x%p, p#%u %s, release %u.\n",
529 cdev
, csk
->port_id
, cdev
->ports
[csk
->port_id
]->name
,
530 pmap
->sport_base
+ idx
);
537 * iscsi tcp connection
539 void cxgbi_sock_free_cpl_skbs(struct cxgbi_sock
*csk
)
541 if (csk
->cpl_close
) {
542 kfree_skb(csk
->cpl_close
);
543 csk
->cpl_close
= NULL
;
545 if (csk
->cpl_abort_req
) {
546 kfree_skb(csk
->cpl_abort_req
);
547 csk
->cpl_abort_req
= NULL
;
549 if (csk
->cpl_abort_rpl
) {
550 kfree_skb(csk
->cpl_abort_rpl
);
551 csk
->cpl_abort_rpl
= NULL
;
554 EXPORT_SYMBOL_GPL(cxgbi_sock_free_cpl_skbs
);
556 static struct cxgbi_sock
*cxgbi_sock_create(struct cxgbi_device
*cdev
)
558 struct cxgbi_sock
*csk
= kzalloc(sizeof(*csk
), GFP_NOIO
);
561 pr_info("alloc csk %zu failed.\n", sizeof(*csk
));
565 if (cdev
->csk_alloc_cpls(csk
) < 0) {
566 pr_info("csk 0x%p, alloc cpls failed.\n", csk
);
571 spin_lock_init(&csk
->lock
);
572 kref_init(&csk
->refcnt
);
573 skb_queue_head_init(&csk
->receive_queue
);
574 skb_queue_head_init(&csk
->write_queue
);
575 timer_setup(&csk
->retry_timer
, NULL
, 0);
576 rwlock_init(&csk
->callback_lock
);
579 cxgbi_sock_set_state(csk
, CTP_CLOSED
);
581 log_debug(1 << CXGBI_DBG_SOCK
, "cdev 0x%p, new csk 0x%p.\n", cdev
, csk
);
586 static struct rtable
*find_route_ipv4(struct flowi4
*fl4
,
587 __be32 saddr
, __be32 daddr
,
588 __be16 sport
, __be16 dport
, u8 tos
,
593 rt
= ip_route_output_ports(&init_net
, fl4
, NULL
, daddr
, saddr
,
594 dport
, sport
, IPPROTO_TCP
, tos
, ifindex
);
601 static struct cxgbi_sock
*
602 cxgbi_check_route(struct sockaddr
*dst_addr
, int ifindex
)
604 struct sockaddr_in
*daddr
= (struct sockaddr_in
*)dst_addr
;
605 struct dst_entry
*dst
;
606 struct net_device
*ndev
;
607 struct cxgbi_device
*cdev
;
608 struct rtable
*rt
= NULL
;
611 struct cxgbi_sock
*csk
= NULL
;
612 unsigned int mtu
= 0;
616 rt
= find_route_ipv4(&fl4
, 0, daddr
->sin_addr
.s_addr
, 0,
617 daddr
->sin_port
, 0, ifindex
);
619 pr_info("no route to ipv4 0x%x, port %u.\n",
620 be32_to_cpu(daddr
->sin_addr
.s_addr
),
621 be16_to_cpu(daddr
->sin_port
));
626 n
= dst_neigh_lookup(dst
, &daddr
->sin_addr
.s_addr
);
633 if (rt
->rt_flags
& (RTCF_MULTICAST
| RTCF_BROADCAST
)) {
634 pr_info("multi-cast route %pI4, port %u, dev %s.\n",
635 &daddr
->sin_addr
.s_addr
, ntohs(daddr
->sin_port
),
641 if (ndev
->flags
& IFF_LOOPBACK
) {
642 ndev
= ip_dev_find(&init_net
, daddr
->sin_addr
.s_addr
);
644 pr_info("rt dev %s, loopback -> %s, mtu %u.\n",
645 n
->dev
->name
, ndev
->name
, mtu
);
648 if (!(ndev
->flags
& IFF_UP
) || !netif_carrier_ok(ndev
)) {
649 pr_info("%s interface not up.\n", ndev
->name
);
654 cdev
= cxgbi_device_find_by_netdev(ndev
, &port
);
656 pr_info("dst %pI4, %s, NOT cxgbi device.\n",
657 &daddr
->sin_addr
.s_addr
, ndev
->name
);
661 log_debug(1 << CXGBI_DBG_SOCK
,
662 "route to %pI4 :%u, ndev p#%d,%s, cdev 0x%p.\n",
663 &daddr
->sin_addr
.s_addr
, ntohs(daddr
->sin_port
),
664 port
, ndev
->name
, cdev
);
666 csk
= cxgbi_sock_create(cdev
);
676 csk
->csk_family
= AF_INET
;
677 csk
->daddr
.sin_addr
.s_addr
= daddr
->sin_addr
.s_addr
;
678 csk
->daddr
.sin_port
= daddr
->sin_port
;
679 csk
->daddr
.sin_family
= daddr
->sin_family
;
680 csk
->saddr
.sin_family
= daddr
->sin_family
;
681 csk
->saddr
.sin_addr
.s_addr
= fl4
.saddr
;
695 #if IS_ENABLED(CONFIG_IPV6)
696 static struct rt6_info
*find_route_ipv6(const struct in6_addr
*saddr
,
697 const struct in6_addr
*daddr
,
702 memset(&fl
, 0, sizeof(fl
));
703 fl
.flowi6_oif
= ifindex
;
705 memcpy(&fl
.saddr
, saddr
, sizeof(struct in6_addr
));
707 memcpy(&fl
.daddr
, daddr
, sizeof(struct in6_addr
));
708 return (struct rt6_info
*)ip6_route_output(&init_net
, NULL
, &fl
);
711 static struct cxgbi_sock
*
712 cxgbi_check_route6(struct sockaddr
*dst_addr
, int ifindex
)
714 struct sockaddr_in6
*daddr6
= (struct sockaddr_in6
*)dst_addr
;
715 struct dst_entry
*dst
;
716 struct net_device
*ndev
;
717 struct cxgbi_device
*cdev
;
718 struct rt6_info
*rt
= NULL
;
720 struct in6_addr pref_saddr
;
721 struct cxgbi_sock
*csk
= NULL
;
722 unsigned int mtu
= 0;
726 rt
= find_route_ipv6(NULL
, &daddr6
->sin6_addr
, ifindex
);
729 pr_info("no route to ipv6 %pI6 port %u\n",
730 daddr6
->sin6_addr
.s6_addr
,
731 be16_to_cpu(daddr6
->sin6_port
));
738 n
= dst_neigh_lookup(dst
, &daddr6
->sin6_addr
);
741 pr_info("%pI6, port %u, dst no neighbour.\n",
742 daddr6
->sin6_addr
.s6_addr
,
743 be16_to_cpu(daddr6
->sin6_port
));
749 if (!(ndev
->flags
& IFF_UP
) || !netif_carrier_ok(ndev
)) {
750 pr_info("%s interface not up.\n", ndev
->name
);
755 if (ipv6_addr_is_multicast(&daddr6
->sin6_addr
)) {
756 pr_info("multi-cast route %pI6 port %u, dev %s.\n",
757 daddr6
->sin6_addr
.s6_addr
,
758 ntohs(daddr6
->sin6_port
), ndev
->name
);
763 cdev
= cxgbi_device_find_by_netdev(ndev
, &port
);
765 cdev
= cxgbi_device_find_by_mac(ndev
, &port
);
767 pr_info("dst %pI6 %s, NOT cxgbi device.\n",
768 daddr6
->sin6_addr
.s6_addr
, ndev
->name
);
772 log_debug(1 << CXGBI_DBG_SOCK
,
773 "route to %pI6 :%u, ndev p#%d,%s, cdev 0x%p.\n",
774 daddr6
->sin6_addr
.s6_addr
, ntohs(daddr6
->sin6_port
), port
,
777 csk
= cxgbi_sock_create(cdev
);
787 if (ipv6_addr_any(&rt
->rt6i_prefsrc
.addr
)) {
788 struct inet6_dev
*idev
= ip6_dst_idev((struct dst_entry
*)rt
);
790 err
= ipv6_dev_get_saddr(&init_net
, idev
? idev
->dev
: NULL
,
791 &daddr6
->sin6_addr
, 0, &pref_saddr
);
793 pr_info("failed to get source address to reach %pI6\n",
798 pref_saddr
= rt
->rt6i_prefsrc
.addr
;
801 csk
->csk_family
= AF_INET6
;
802 csk
->daddr6
.sin6_addr
= daddr6
->sin6_addr
;
803 csk
->daddr6
.sin6_port
= daddr6
->sin6_port
;
804 csk
->daddr6
.sin6_family
= daddr6
->sin6_family
;
805 csk
->saddr6
.sin6_family
= daddr6
->sin6_family
;
806 csk
->saddr6
.sin6_addr
= pref_saddr
;
817 cxgbi_sock_closed(csk
);
821 #endif /* IS_ENABLED(CONFIG_IPV6) */
823 void cxgbi_sock_established(struct cxgbi_sock
*csk
, unsigned int snd_isn
,
826 csk
->write_seq
= csk
->snd_nxt
= csk
->snd_una
= snd_isn
;
827 dst_confirm(csk
->dst
);
829 cxgbi_sock_set_state(csk
, CTP_ESTABLISHED
);
831 EXPORT_SYMBOL_GPL(cxgbi_sock_established
);
833 static void cxgbi_inform_iscsi_conn_closing(struct cxgbi_sock
*csk
)
835 log_debug(1 << CXGBI_DBG_SOCK
,
836 "csk 0x%p, state %u, flags 0x%lx, conn 0x%p.\n",
837 csk
, csk
->state
, csk
->flags
, csk
->user_data
);
839 if (csk
->state
!= CTP_ESTABLISHED
) {
840 read_lock_bh(&csk
->callback_lock
);
842 iscsi_conn_failure(csk
->user_data
,
843 ISCSI_ERR_TCP_CONN_CLOSE
);
844 read_unlock_bh(&csk
->callback_lock
);
848 void cxgbi_sock_closed(struct cxgbi_sock
*csk
)
850 log_debug(1 << CXGBI_DBG_SOCK
, "csk 0x%p,%u,0x%lx,%u.\n",
851 csk
, (csk
)->state
, (csk
)->flags
, (csk
)->tid
);
852 cxgbi_sock_set_flag(csk
, CTPF_ACTIVE_CLOSE_NEEDED
);
853 if (csk
->state
== CTP_ACTIVE_OPEN
|| csk
->state
== CTP_CLOSED
)
855 if (csk
->saddr
.sin_port
)
858 dst_release(csk
->dst
);
859 csk
->cdev
->csk_release_offload_resources(csk
);
860 cxgbi_sock_set_state(csk
, CTP_CLOSED
);
861 cxgbi_inform_iscsi_conn_closing(csk
);
864 EXPORT_SYMBOL_GPL(cxgbi_sock_closed
);
866 static void need_active_close(struct cxgbi_sock
*csk
)
871 log_debug(1 << CXGBI_DBG_SOCK
, "csk 0x%p,%u,0x%lx,%u.\n",
872 csk
, (csk
)->state
, (csk
)->flags
, (csk
)->tid
);
873 spin_lock_bh(&csk
->lock
);
875 dst_confirm(csk
->dst
);
876 data_lost
= skb_queue_len(&csk
->receive_queue
);
877 __skb_queue_purge(&csk
->receive_queue
);
879 if (csk
->state
== CTP_ACTIVE_OPEN
)
880 cxgbi_sock_set_flag(csk
, CTPF_ACTIVE_CLOSE_NEEDED
);
881 else if (csk
->state
== CTP_ESTABLISHED
) {
883 cxgbi_sock_set_state(csk
, CTP_ACTIVE_CLOSE
);
884 } else if (csk
->state
== CTP_PASSIVE_CLOSE
) {
886 cxgbi_sock_set_state(csk
, CTP_CLOSE_WAIT_2
);
890 if (!cxgbi_sock_flag(csk
, CTPF_LOGOUT_RSP_RCVD
) ||
892 csk
->cdev
->csk_send_abort_req(csk
);
894 csk
->cdev
->csk_send_close_req(csk
);
897 spin_unlock_bh(&csk
->lock
);
900 void cxgbi_sock_fail_act_open(struct cxgbi_sock
*csk
, int errno
)
902 pr_info("csk 0x%p,%u,%lx, %pI4:%u-%pI4:%u, err %d.\n",
903 csk
, csk
->state
, csk
->flags
,
904 &csk
->saddr
.sin_addr
.s_addr
, csk
->saddr
.sin_port
,
905 &csk
->daddr
.sin_addr
.s_addr
, csk
->daddr
.sin_port
,
908 cxgbi_sock_set_state(csk
, CTP_CONNECTING
);
910 cxgbi_sock_closed(csk
);
912 EXPORT_SYMBOL_GPL(cxgbi_sock_fail_act_open
);
914 void cxgbi_sock_act_open_req_arp_failure(void *handle
, struct sk_buff
*skb
)
916 struct cxgbi_sock
*csk
= (struct cxgbi_sock
*)skb
->sk
;
917 struct module
*owner
= csk
->cdev
->owner
;
919 log_debug(1 << CXGBI_DBG_SOCK
, "csk 0x%p,%u,0x%lx,%u.\n",
920 csk
, (csk
)->state
, (csk
)->flags
, (csk
)->tid
);
922 spin_lock_bh(&csk
->lock
);
923 if (csk
->state
== CTP_ACTIVE_OPEN
)
924 cxgbi_sock_fail_act_open(csk
, -EHOSTUNREACH
);
925 spin_unlock_bh(&csk
->lock
);
931 EXPORT_SYMBOL_GPL(cxgbi_sock_act_open_req_arp_failure
);
933 void cxgbi_sock_rcv_abort_rpl(struct cxgbi_sock
*csk
)
936 spin_lock_bh(&csk
->lock
);
938 cxgbi_sock_set_flag(csk
, CTPF_ABORT_RPL_RCVD
);
939 if (cxgbi_sock_flag(csk
, CTPF_ABORT_RPL_PENDING
)) {
940 cxgbi_sock_clear_flag(csk
, CTPF_ABORT_RPL_PENDING
);
941 if (cxgbi_sock_flag(csk
, CTPF_ABORT_REQ_RCVD
))
942 pr_err("csk 0x%p,%u,0x%lx,%u,ABT_RPL_RSS.\n",
943 csk
, csk
->state
, csk
->flags
, csk
->tid
);
944 cxgbi_sock_closed(csk
);
947 spin_unlock_bh(&csk
->lock
);
950 EXPORT_SYMBOL_GPL(cxgbi_sock_rcv_abort_rpl
);
952 void cxgbi_sock_rcv_peer_close(struct cxgbi_sock
*csk
)
954 log_debug(1 << CXGBI_DBG_SOCK
, "csk 0x%p,%u,0x%lx,%u.\n",
955 csk
, (csk
)->state
, (csk
)->flags
, (csk
)->tid
);
957 spin_lock_bh(&csk
->lock
);
959 if (cxgbi_sock_flag(csk
, CTPF_ABORT_RPL_PENDING
))
962 switch (csk
->state
) {
963 case CTP_ESTABLISHED
:
964 cxgbi_sock_set_state(csk
, CTP_PASSIVE_CLOSE
);
966 case CTP_ACTIVE_CLOSE
:
967 cxgbi_sock_set_state(csk
, CTP_CLOSE_WAIT_2
);
969 case CTP_CLOSE_WAIT_1
:
970 cxgbi_sock_closed(csk
);
975 pr_err("csk 0x%p,%u,0x%lx,%u, bad state.\n",
976 csk
, csk
->state
, csk
->flags
, csk
->tid
);
978 cxgbi_inform_iscsi_conn_closing(csk
);
980 spin_unlock_bh(&csk
->lock
);
983 EXPORT_SYMBOL_GPL(cxgbi_sock_rcv_peer_close
);
985 void cxgbi_sock_rcv_close_conn_rpl(struct cxgbi_sock
*csk
, u32 snd_nxt
)
987 log_debug(1 << CXGBI_DBG_SOCK
, "csk 0x%p,%u,0x%lx,%u.\n",
988 csk
, (csk
)->state
, (csk
)->flags
, (csk
)->tid
);
990 spin_lock_bh(&csk
->lock
);
992 csk
->snd_una
= snd_nxt
- 1;
993 if (cxgbi_sock_flag(csk
, CTPF_ABORT_RPL_PENDING
))
996 switch (csk
->state
) {
997 case CTP_ACTIVE_CLOSE
:
998 cxgbi_sock_set_state(csk
, CTP_CLOSE_WAIT_1
);
1000 case CTP_CLOSE_WAIT_1
:
1001 case CTP_CLOSE_WAIT_2
:
1002 cxgbi_sock_closed(csk
);
1007 pr_err("csk 0x%p,%u,0x%lx,%u, bad state.\n",
1008 csk
, csk
->state
, csk
->flags
, csk
->tid
);
1011 spin_unlock_bh(&csk
->lock
);
1012 cxgbi_sock_put(csk
);
1014 EXPORT_SYMBOL_GPL(cxgbi_sock_rcv_close_conn_rpl
);
1016 void cxgbi_sock_rcv_wr_ack(struct cxgbi_sock
*csk
, unsigned int credits
,
1017 unsigned int snd_una
, int seq_chk
)
1019 log_debug(1 << CXGBI_DBG_TOE
| 1 << CXGBI_DBG_SOCK
,
1020 "csk 0x%p,%u,0x%lx,%u, cr %u,%u+%u, snd_una %u,%d.\n",
1021 csk
, csk
->state
, csk
->flags
, csk
->tid
, credits
,
1022 csk
->wr_cred
, csk
->wr_una_cred
, snd_una
, seq_chk
);
1024 spin_lock_bh(&csk
->lock
);
1026 csk
->wr_cred
+= credits
;
1027 if (csk
->wr_una_cred
> csk
->wr_max_cred
- csk
->wr_cred
)
1028 csk
->wr_una_cred
= csk
->wr_max_cred
- csk
->wr_cred
;
1031 struct sk_buff
*p
= cxgbi_sock_peek_wr(csk
);
1034 pr_err("csk 0x%p,%u,0x%lx,%u, cr %u,%u+%u, empty.\n",
1035 csk
, csk
->state
, csk
->flags
, csk
->tid
, credits
,
1036 csk
->wr_cred
, csk
->wr_una_cred
);
1040 if (unlikely(credits
< p
->csum
)) {
1041 pr_warn("csk 0x%p,%u,0x%lx,%u, cr %u,%u+%u, < %u.\n",
1042 csk
, csk
->state
, csk
->flags
, csk
->tid
,
1043 credits
, csk
->wr_cred
, csk
->wr_una_cred
,
1048 cxgbi_sock_dequeue_wr(csk
);
1054 cxgbi_sock_check_wr_invariants(csk
);
1057 if (unlikely(before(snd_una
, csk
->snd_una
))) {
1058 pr_warn("csk 0x%p,%u,0x%lx,%u, snd_una %u/%u.",
1059 csk
, csk
->state
, csk
->flags
, csk
->tid
, snd_una
,
1064 if (csk
->snd_una
!= snd_una
) {
1065 csk
->snd_una
= snd_una
;
1066 dst_confirm(csk
->dst
);
1070 if (skb_queue_len(&csk
->write_queue
)) {
1071 if (csk
->cdev
->csk_push_tx_frames(csk
, 0))
1072 cxgbi_conn_tx_open(csk
);
1074 cxgbi_conn_tx_open(csk
);
1076 spin_unlock_bh(&csk
->lock
);
1078 EXPORT_SYMBOL_GPL(cxgbi_sock_rcv_wr_ack
);
1080 static unsigned int cxgbi_sock_find_best_mtu(struct cxgbi_sock
*csk
,
1085 while (i
< csk
->cdev
->nmtus
- 1 && csk
->cdev
->mtus
[i
+ 1] <= mtu
)
1091 unsigned int cxgbi_sock_select_mss(struct cxgbi_sock
*csk
, unsigned int pmtu
)
1094 struct dst_entry
*dst
= csk
->dst
;
1096 csk
->advmss
= dst_metric_advmss(dst
);
1098 if (csk
->advmss
> pmtu
- 40)
1099 csk
->advmss
= pmtu
- 40;
1100 if (csk
->advmss
< csk
->cdev
->mtus
[0] - 40)
1101 csk
->advmss
= csk
->cdev
->mtus
[0] - 40;
1102 idx
= cxgbi_sock_find_best_mtu(csk
, csk
->advmss
+ 40);
1106 EXPORT_SYMBOL_GPL(cxgbi_sock_select_mss
);
1108 void cxgbi_sock_skb_entail(struct cxgbi_sock
*csk
, struct sk_buff
*skb
)
1110 cxgbi_skcb_tcp_seq(skb
) = csk
->write_seq
;
1111 __skb_queue_tail(&csk
->write_queue
, skb
);
1113 EXPORT_SYMBOL_GPL(cxgbi_sock_skb_entail
);
1115 void cxgbi_sock_purge_wr_queue(struct cxgbi_sock
*csk
)
1117 struct sk_buff
*skb
;
1119 while ((skb
= cxgbi_sock_dequeue_wr(csk
)) != NULL
)
1122 EXPORT_SYMBOL_GPL(cxgbi_sock_purge_wr_queue
);
1124 void cxgbi_sock_check_wr_invariants(const struct cxgbi_sock
*csk
)
1126 int pending
= cxgbi_sock_count_pending_wrs(csk
);
1128 if (unlikely(csk
->wr_cred
+ pending
!= csk
->wr_max_cred
))
1129 pr_err("csk 0x%p, tid %u, credit %u + %u != %u.\n",
1130 csk
, csk
->tid
, csk
->wr_cred
, pending
, csk
->wr_max_cred
);
1132 EXPORT_SYMBOL_GPL(cxgbi_sock_check_wr_invariants
);
1134 static int cxgbi_sock_send_pdus(struct cxgbi_sock
*csk
, struct sk_buff
*skb
)
1136 struct cxgbi_device
*cdev
= csk
->cdev
;
1137 struct sk_buff
*next
;
1138 int err
, copied
= 0;
1140 spin_lock_bh(&csk
->lock
);
1142 if (csk
->state
!= CTP_ESTABLISHED
) {
1143 log_debug(1 << CXGBI_DBG_PDU_TX
,
1144 "csk 0x%p,%u,0x%lx,%u, EAGAIN.\n",
1145 csk
, csk
->state
, csk
->flags
, csk
->tid
);
1151 log_debug(1 << CXGBI_DBG_PDU_TX
,
1152 "csk 0x%p,%u,0x%lx,%u, EPIPE %d.\n",
1153 csk
, csk
->state
, csk
->flags
, csk
->tid
, csk
->err
);
1158 if (csk
->write_seq
- csk
->snd_una
>= csk
->snd_win
) {
1159 log_debug(1 << CXGBI_DBG_PDU_TX
,
1160 "csk 0x%p,%u,0x%lx,%u, FULL %u-%u >= %u.\n",
1161 csk
, csk
->state
, csk
->flags
, csk
->tid
, csk
->write_seq
,
1162 csk
->snd_una
, csk
->snd_win
);
1168 int frags
= skb_shinfo(skb
)->nr_frags
+
1169 (skb
->len
!= skb
->data_len
);
1171 if (unlikely(skb_headroom(skb
) < cdev
->skb_tx_rsvd
)) {
1172 pr_err("csk 0x%p, skb head %u < %u.\n",
1173 csk
, skb_headroom(skb
), cdev
->skb_tx_rsvd
);
1178 if (frags
>= SKB_WR_LIST_SIZE
) {
1179 pr_err("csk 0x%p, frags %d, %u,%u >%u.\n",
1180 csk
, skb_shinfo(skb
)->nr_frags
, skb
->len
,
1181 skb
->data_len
, (uint
)(SKB_WR_LIST_SIZE
));
1188 cxgbi_skcb_set_flag(skb
, SKCBF_TX_NEED_HDR
);
1189 cxgbi_sock_skb_entail(csk
, skb
);
1191 csk
->write_seq
+= skb
->len
+
1192 cxgbi_ulp_extra_len(cxgbi_skcb_ulp_mode(skb
));
1196 if (likely(skb_queue_len(&csk
->write_queue
)))
1197 cdev
->csk_push_tx_frames(csk
, 1);
1199 spin_unlock_bh(&csk
->lock
);
1203 if (copied
== 0 && err
== -EPIPE
)
1204 copied
= csk
->err
? csk
->err
: -EPIPE
;
1211 scmd_get_params(struct scsi_cmnd
*sc
, struct scatterlist
**sgl
,
1212 unsigned int *sgcnt
, unsigned int *dlen
,
1215 struct scsi_data_buffer
*sdb
= prot
? scsi_prot(sc
) : scsi_out(sc
);
1217 *sgl
= sdb
->table
.sgl
;
1218 *sgcnt
= sdb
->table
.nents
;
1219 *dlen
= sdb
->length
;
1220 /* Caution: for protection sdb, sdb->length is invalid */
1223 void cxgbi_ddp_set_one_ppod(struct cxgbi_pagepod
*ppod
,
1224 struct cxgbi_task_tag_info
*ttinfo
,
1225 struct scatterlist
**sg_pp
, unsigned int *sg_off
)
1227 struct scatterlist
*sg
= sg_pp
? *sg_pp
: NULL
;
1228 unsigned int offset
= sg_off
? *sg_off
: 0;
1229 dma_addr_t addr
= 0UL;
1230 unsigned int len
= 0;
1233 memcpy(ppod
, &ttinfo
->hdr
, sizeof(struct cxgbi_pagepod_hdr
));
1236 addr
= sg_dma_address(sg
);
1237 len
= sg_dma_len(sg
);
1240 for (i
= 0; i
< PPOD_PAGES_MAX
; i
++) {
1242 ppod
->addr
[i
] = cpu_to_be64(addr
+ offset
);
1243 offset
+= PAGE_SIZE
;
1244 if (offset
== (len
+ sg
->offset
)) {
1248 addr
= sg_dma_address(sg
);
1249 len
= sg_dma_len(sg
);
1253 ppod
->addr
[i
] = 0ULL;
1258 * the fifth address needs to be repeated in the next ppod, so do
1266 if (offset
== len
) {
1270 addr
= sg_dma_address(sg
);
1271 len
= sg_dma_len(sg
);
1274 ppod
->addr
[i
] = sg
? cpu_to_be64(addr
+ offset
) : 0ULL;
1276 EXPORT_SYMBOL_GPL(cxgbi_ddp_set_one_ppod
);
1279 * APIs interacting with open-iscsi libraries
1282 static unsigned char padding
[4];
1284 void cxgbi_ddp_ppm_setup(void **ppm_pp
, struct cxgbi_device
*cdev
,
1285 struct cxgbi_tag_format
*tformat
, unsigned int ppmax
,
1286 unsigned int llimit
, unsigned int start
,
1287 unsigned int rsvd_factor
)
1289 int err
= cxgbi_ppm_init(ppm_pp
, cdev
->ports
[0], cdev
->pdev
,
1290 cdev
->lldev
, tformat
, ppmax
, llimit
, start
,
1294 struct cxgbi_ppm
*ppm
= (struct cxgbi_ppm
*)(*ppm_pp
);
1296 if (ppm
->ppmax
< 1024 ||
1297 ppm
->tformat
.pgsz_idx_dflt
>= DDP_PGIDX_MAX
)
1298 cdev
->flags
|= CXGBI_FLAG_DDP_OFF
;
1301 cdev
->flags
|= CXGBI_FLAG_DDP_OFF
;
1304 EXPORT_SYMBOL_GPL(cxgbi_ddp_ppm_setup
);
1306 static int cxgbi_ddp_sgl_check(struct scatterlist
*sgl
, int nents
)
1309 int last_sgidx
= nents
- 1;
1310 struct scatterlist
*sg
= sgl
;
1312 for (i
= 0; i
< nents
; i
++, sg
= sg_next(sg
)) {
1313 unsigned int len
= sg
->length
+ sg
->offset
;
1315 if ((sg
->offset
& 0x3) || (i
&& sg
->offset
) ||
1316 ((i
!= last_sgidx
) && len
!= PAGE_SIZE
)) {
1317 log_debug(1 << CXGBI_DBG_DDP
,
1318 "sg %u/%u, %u,%u, not aligned.\n",
1319 i
, nents
, sg
->offset
, sg
->length
);
1328 static int cxgbi_ddp_reserve(struct cxgbi_conn
*cconn
,
1329 struct cxgbi_task_data
*tdata
, u32 sw_tag
,
1330 unsigned int xferlen
)
1332 struct cxgbi_sock
*csk
= cconn
->cep
->csk
;
1333 struct cxgbi_device
*cdev
= csk
->cdev
;
1334 struct cxgbi_ppm
*ppm
= cdev
->cdev2ppm(cdev
);
1335 struct cxgbi_task_tag_info
*ttinfo
= &tdata
->ttinfo
;
1336 struct scatterlist
*sgl
= ttinfo
->sgl
;
1337 unsigned int sgcnt
= ttinfo
->nents
;
1338 unsigned int sg_offset
= sgl
->offset
;
1341 if (cdev
->flags
& CXGBI_FLAG_DDP_OFF
) {
1342 log_debug(1 << CXGBI_DBG_DDP
,
1343 "cdev 0x%p DDP off.\n", cdev
);
1347 if (!ppm
|| xferlen
< DDP_THRESHOLD
|| !sgcnt
||
1348 ppm
->tformat
.pgsz_idx_dflt
>= DDP_PGIDX_MAX
) {
1349 log_debug(1 << CXGBI_DBG_DDP
,
1350 "ppm 0x%p, pgidx %u, xfer %u, sgcnt %u, NO ddp.\n",
1351 ppm
, ppm
? ppm
->tformat
.pgsz_idx_dflt
: DDP_PGIDX_MAX
,
1352 xferlen
, ttinfo
->nents
);
1356 /* make sure the buffer is suitable for ddp */
1357 if (cxgbi_ddp_sgl_check(sgl
, sgcnt
) < 0)
1360 ttinfo
->nr_pages
= (xferlen
+ sgl
->offset
+ (1 << PAGE_SHIFT
) - 1) >>
1364 * the ddp tag will be used for the itt in the outgoing pdu,
1365 * the itt genrated by libiscsi is saved in the ppm and can be
1366 * retrieved via the ddp tag
1368 err
= cxgbi_ppm_ppods_reserve(ppm
, ttinfo
->nr_pages
, 0, &ttinfo
->idx
,
1369 &ttinfo
->tag
, (unsigned long)sw_tag
);
1374 ttinfo
->npods
= err
;
1376 /* setup dma from scsi command sgl */
1378 err
= dma_map_sg(&ppm
->pdev
->dev
, sgl
, sgcnt
, DMA_FROM_DEVICE
);
1379 sgl
->offset
= sg_offset
;
1381 pr_info("%s: 0x%x, xfer %u, sgl %u dma mapping err.\n",
1382 __func__
, sw_tag
, xferlen
, sgcnt
);
1385 if (err
!= ttinfo
->nr_pages
) {
1386 log_debug(1 << CXGBI_DBG_DDP
,
1387 "%s: sw tag 0x%x, xfer %u, sgl %u, dma count %d.\n",
1388 __func__
, sw_tag
, xferlen
, sgcnt
, err
);
1391 ttinfo
->flags
|= CXGBI_PPOD_INFO_FLAG_MAPPED
;
1392 ttinfo
->cid
= csk
->port_id
;
1394 cxgbi_ppm_make_ppod_hdr(ppm
, ttinfo
->tag
, csk
->tid
, sgl
->offset
,
1395 xferlen
, &ttinfo
->hdr
);
1397 if (cdev
->flags
& CXGBI_FLAG_USE_PPOD_OFLDQ
) {
1398 /* write ppod from xmit_pdu (of iscsi_scsi_command pdu) */
1399 ttinfo
->flags
|= CXGBI_PPOD_INFO_FLAG_VALID
;
1401 /* write ppod from control queue now */
1402 err
= cdev
->csk_ddp_set_map(ppm
, csk
, ttinfo
);
1410 cxgbi_ppm_ppod_release(ppm
, ttinfo
->idx
);
1412 if (ttinfo
->flags
& CXGBI_PPOD_INFO_FLAG_MAPPED
) {
1413 ttinfo
->flags
&= ~CXGBI_PPOD_INFO_FLAG_MAPPED
;
1414 dma_unmap_sg(&ppm
->pdev
->dev
, sgl
, sgcnt
, DMA_FROM_DEVICE
);
1419 static void task_release_itt(struct iscsi_task
*task
, itt_t hdr_itt
)
1421 struct scsi_cmnd
*sc
= task
->sc
;
1422 struct iscsi_tcp_conn
*tcp_conn
= task
->conn
->dd_data
;
1423 struct cxgbi_conn
*cconn
= tcp_conn
->dd_data
;
1424 struct cxgbi_device
*cdev
= cconn
->chba
->cdev
;
1425 struct cxgbi_ppm
*ppm
= cdev
->cdev2ppm(cdev
);
1426 u32 tag
= ntohl((__force u32
)hdr_itt
);
1428 log_debug(1 << CXGBI_DBG_DDP
,
1429 "cdev 0x%p, task 0x%p, release tag 0x%x.\n",
1432 (scsi_bidi_cmnd(sc
) || sc
->sc_data_direction
== DMA_FROM_DEVICE
) &&
1433 cxgbi_ppm_is_ddp_tag(ppm
, tag
)) {
1434 struct cxgbi_task_data
*tdata
= iscsi_task_cxgbi_data(task
);
1435 struct cxgbi_task_tag_info
*ttinfo
= &tdata
->ttinfo
;
1437 if (!(cdev
->flags
& CXGBI_FLAG_USE_PPOD_OFLDQ
))
1438 cdev
->csk_ddp_clear_map(cdev
, ppm
, ttinfo
);
1439 cxgbi_ppm_ppod_release(ppm
, ttinfo
->idx
);
1440 dma_unmap_sg(&ppm
->pdev
->dev
, ttinfo
->sgl
, ttinfo
->nents
,
1445 static inline u32
cxgbi_build_sw_tag(u32 idx
, u32 age
)
1447 /* assume idx and age both are < 0x7FFF (32767) */
1448 return (idx
<< 16) | age
;
1451 static int task_reserve_itt(struct iscsi_task
*task
, itt_t
*hdr_itt
)
1453 struct scsi_cmnd
*sc
= task
->sc
;
1454 struct iscsi_conn
*conn
= task
->conn
;
1455 struct iscsi_session
*sess
= conn
->session
;
1456 struct iscsi_tcp_conn
*tcp_conn
= conn
->dd_data
;
1457 struct cxgbi_conn
*cconn
= tcp_conn
->dd_data
;
1458 struct cxgbi_device
*cdev
= cconn
->chba
->cdev
;
1459 struct cxgbi_ppm
*ppm
= cdev
->cdev2ppm(cdev
);
1460 u32 sw_tag
= cxgbi_build_sw_tag(task
->itt
, sess
->age
);
1465 (scsi_bidi_cmnd(sc
) || sc
->sc_data_direction
== DMA_FROM_DEVICE
)
1467 struct cxgbi_task_data
*tdata
= iscsi_task_cxgbi_data(task
);
1468 struct cxgbi_task_tag_info
*ttinfo
= &tdata
->ttinfo
;
1470 scmd_get_params(sc
, &ttinfo
->sgl
, &ttinfo
->nents
,
1472 err
= cxgbi_ddp_reserve(cconn
, tdata
, sw_tag
, tdata
->dlen
);
1476 log_debug(1 << CXGBI_DBG_DDP
,
1477 "csk 0x%p, R task 0x%p, %u,%u, no ddp.\n",
1478 cconn
->cep
->csk
, task
, tdata
->dlen
,
1483 err
= cxgbi_ppm_make_non_ddp_tag(ppm
, sw_tag
, &tag
);
1487 /* the itt need to sent in big-endian order */
1488 *hdr_itt
= (__force itt_t
)htonl(tag
);
1490 log_debug(1 << CXGBI_DBG_DDP
,
1491 "cdev 0x%p, task 0x%p, 0x%x(0x%x,0x%x)->0x%x/0x%x.\n",
1492 cdev
, task
, sw_tag
, task
->itt
, sess
->age
, tag
, *hdr_itt
);
1496 void cxgbi_parse_pdu_itt(struct iscsi_conn
*conn
, itt_t itt
, int *idx
, int *age
)
1498 struct iscsi_tcp_conn
*tcp_conn
= conn
->dd_data
;
1499 struct cxgbi_conn
*cconn
= tcp_conn
->dd_data
;
1500 struct cxgbi_device
*cdev
= cconn
->chba
->cdev
;
1501 struct cxgbi_ppm
*ppm
= cdev
->cdev2ppm(cdev
);
1502 u32 tag
= ntohl((__force u32
)itt
);
1506 if (cxgbi_ppm_is_ddp_tag(ppm
, tag
))
1507 sw_bits
= cxgbi_ppm_get_tag_caller_data(ppm
, tag
);
1509 sw_bits
= cxgbi_ppm_decode_non_ddp_tag(ppm
, tag
);
1514 cxgbi_decode_sw_tag(sw_bits
, idx
, age
);
1515 log_debug(1 << CXGBI_DBG_DDP
,
1516 "cdev 0x%p, tag 0x%x/0x%x, -> 0x%x(0x%x,0x%x).\n",
1517 cdev
, tag
, itt
, sw_bits
, idx
? *idx
: 0xFFFFF,
1520 EXPORT_SYMBOL_GPL(cxgbi_parse_pdu_itt
);
1522 void cxgbi_conn_tx_open(struct cxgbi_sock
*csk
)
1524 struct iscsi_conn
*conn
= csk
->user_data
;
1527 log_debug(1 << CXGBI_DBG_SOCK
,
1528 "csk 0x%p, cid %d.\n", csk
, conn
->id
);
1529 iscsi_conn_queue_work(conn
);
1532 EXPORT_SYMBOL_GPL(cxgbi_conn_tx_open
);
1535 * pdu receive, interact with libiscsi_tcp
1537 static inline int read_pdu_skb(struct iscsi_conn
*conn
,
1538 struct sk_buff
*skb
,
1539 unsigned int offset
,
1545 bytes_read
= iscsi_tcp_recv_skb(conn
, skb
, offset
, offloaded
, &status
);
1547 case ISCSI_TCP_CONN_ERR
:
1548 pr_info("skb 0x%p, off %u, %d, TCP_ERR.\n",
1549 skb
, offset
, offloaded
);
1551 case ISCSI_TCP_SUSPENDED
:
1552 log_debug(1 << CXGBI_DBG_PDU_RX
,
1553 "skb 0x%p, off %u, %d, TCP_SUSPEND, rc %d.\n",
1554 skb
, offset
, offloaded
, bytes_read
);
1555 /* no transfer - just have caller flush queue */
1557 case ISCSI_TCP_SKB_DONE
:
1558 pr_info("skb 0x%p, off %u, %d, TCP_SKB_DONE.\n",
1559 skb
, offset
, offloaded
);
1561 * pdus should always fit in the skb and we should get
1562 * segment done notifcation.
1564 iscsi_conn_printk(KERN_ERR
, conn
, "Invalid pdu or skb.");
1566 case ISCSI_TCP_SEGMENT_DONE
:
1567 log_debug(1 << CXGBI_DBG_PDU_RX
,
1568 "skb 0x%p, off %u, %d, TCP_SEG_DONE, rc %d.\n",
1569 skb
, offset
, offloaded
, bytes_read
);
1572 pr_info("skb 0x%p, off %u, %d, invalid status %d.\n",
1573 skb
, offset
, offloaded
, status
);
1579 skb_read_pdu_bhs(struct cxgbi_sock
*csk
, struct iscsi_conn
*conn
,
1580 struct sk_buff
*skb
)
1582 struct iscsi_tcp_conn
*tcp_conn
= conn
->dd_data
;
1585 log_debug(1 << CXGBI_DBG_PDU_RX
,
1586 "conn 0x%p, skb 0x%p, len %u, flag 0x%lx.\n",
1587 conn
, skb
, skb
->len
, cxgbi_skcb_flags(skb
));
1589 if (!iscsi_tcp_recv_segment_is_hdr(tcp_conn
)) {
1590 pr_info("conn 0x%p, skb 0x%p, not hdr.\n", conn
, skb
);
1591 iscsi_conn_failure(conn
, ISCSI_ERR_PROTO
);
1595 if (conn
->hdrdgst_en
&&
1596 cxgbi_skcb_test_flag(skb
, SKCBF_RX_HCRC_ERR
)) {
1597 pr_info("conn 0x%p, skb 0x%p, hcrc.\n", conn
, skb
);
1598 iscsi_conn_failure(conn
, ISCSI_ERR_HDR_DGST
);
1602 if (cxgbi_skcb_test_flag(skb
, SKCBF_RX_ISCSI_COMPL
) &&
1603 cxgbi_skcb_test_flag(skb
, SKCBF_RX_DATA_DDPD
)) {
1604 /* If completion flag is set and data is directly
1605 * placed in to the host memory then update
1606 * task->exp_datasn to the datasn in completion
1607 * iSCSI hdr as T6 adapter generates completion only
1608 * for the last pdu of a sequence.
1610 itt_t itt
= ((struct iscsi_data
*)skb
->data
)->itt
;
1611 struct iscsi_task
*task
= iscsi_itt_to_ctask(conn
, itt
);
1612 u32 data_sn
= be32_to_cpu(((struct iscsi_data
*)
1613 skb
->data
)->datasn
);
1614 if (task
&& task
->sc
) {
1615 struct iscsi_tcp_task
*tcp_task
= task
->dd_data
;
1617 tcp_task
->exp_datasn
= data_sn
;
1621 err
= read_pdu_skb(conn
, skb
, 0, 0);
1622 if (likely(err
>= 0)) {
1623 struct iscsi_hdr
*hdr
= (struct iscsi_hdr
*)skb
->data
;
1624 u8 opcode
= hdr
->opcode
& ISCSI_OPCODE_MASK
;
1626 if (unlikely(opcode
== ISCSI_OP_LOGOUT_RSP
))
1627 cxgbi_sock_set_flag(csk
, CTPF_LOGOUT_RSP_RCVD
);
1633 static int skb_read_pdu_data(struct iscsi_conn
*conn
, struct sk_buff
*lskb
,
1634 struct sk_buff
*skb
, unsigned int offset
)
1636 struct iscsi_tcp_conn
*tcp_conn
= conn
->dd_data
;
1638 int opcode
= tcp_conn
->in
.hdr
->opcode
& ISCSI_OPCODE_MASK
;
1640 log_debug(1 << CXGBI_DBG_PDU_RX
,
1641 "conn 0x%p, skb 0x%p, len %u, flag 0x%lx.\n",
1642 conn
, skb
, skb
->len
, cxgbi_skcb_flags(skb
));
1644 if (conn
->datadgst_en
&&
1645 cxgbi_skcb_test_flag(lskb
, SKCBF_RX_DCRC_ERR
)) {
1646 pr_info("conn 0x%p, skb 0x%p, dcrc 0x%lx.\n",
1647 conn
, lskb
, cxgbi_skcb_flags(lskb
));
1648 iscsi_conn_failure(conn
, ISCSI_ERR_DATA_DGST
);
1652 if (iscsi_tcp_recv_segment_is_hdr(tcp_conn
))
1655 /* coalesced, add header digest length */
1656 if (lskb
== skb
&& conn
->hdrdgst_en
)
1657 offset
+= ISCSI_DIGEST_SIZE
;
1659 if (cxgbi_skcb_test_flag(lskb
, SKCBF_RX_DATA_DDPD
))
1662 if (opcode
== ISCSI_OP_SCSI_DATA_IN
)
1663 log_debug(1 << CXGBI_DBG_PDU_RX
,
1664 "skb 0x%p, op 0x%x, itt 0x%x, %u %s ddp'ed.\n",
1665 skb
, opcode
, ntohl(tcp_conn
->in
.hdr
->itt
),
1666 tcp_conn
->in
.datalen
, offloaded
? "is" : "not");
1668 return read_pdu_skb(conn
, skb
, offset
, offloaded
);
1671 static void csk_return_rx_credits(struct cxgbi_sock
*csk
, int copied
)
1673 struct cxgbi_device
*cdev
= csk
->cdev
;
1677 log_debug(1 << CXGBI_DBG_PDU_RX
,
1678 "csk 0x%p,%u,0x%lx,%u, seq %u, wup %u, thre %u, %u.\n",
1679 csk
, csk
->state
, csk
->flags
, csk
->tid
, csk
->copied_seq
,
1680 csk
->rcv_wup
, cdev
->rx_credit_thres
,
1683 if (!cdev
->rx_credit_thres
)
1686 if (csk
->state
!= CTP_ESTABLISHED
)
1689 credits
= csk
->copied_seq
- csk
->rcv_wup
;
1690 if (unlikely(!credits
))
1692 must_send
= credits
+ 16384 >= csk
->rcv_win
;
1693 if (must_send
|| credits
>= cdev
->rx_credit_thres
)
1694 csk
->rcv_wup
+= cdev
->csk_send_rx_credits(csk
, credits
);
1697 void cxgbi_conn_pdu_ready(struct cxgbi_sock
*csk
)
1699 struct cxgbi_device
*cdev
= csk
->cdev
;
1700 struct iscsi_conn
*conn
= csk
->user_data
;
1701 struct sk_buff
*skb
;
1702 unsigned int read
= 0;
1705 log_debug(1 << CXGBI_DBG_PDU_RX
,
1706 "csk 0x%p, conn 0x%p.\n", csk
, conn
);
1708 if (unlikely(!conn
|| conn
->suspend_rx
)) {
1709 log_debug(1 << CXGBI_DBG_PDU_RX
,
1710 "csk 0x%p, conn 0x%p, id %d, suspend_rx %lu!\n",
1711 csk
, conn
, conn
? conn
->id
: 0xFF,
1712 conn
? conn
->suspend_rx
: 0xFF);
1717 skb
= skb_peek(&csk
->receive_queue
);
1719 !(cxgbi_skcb_test_flag(skb
, SKCBF_RX_STATUS
))) {
1721 log_debug(1 << CXGBI_DBG_PDU_RX
,
1722 "skb 0x%p, NOT ready 0x%lx.\n",
1723 skb
, cxgbi_skcb_flags(skb
));
1726 __skb_unlink(skb
, &csk
->receive_queue
);
1728 read
+= cxgbi_skcb_rx_pdulen(skb
);
1729 log_debug(1 << CXGBI_DBG_PDU_RX
,
1730 "csk 0x%p, skb 0x%p,%u,f 0x%lx, pdu len %u.\n",
1731 csk
, skb
, skb
->len
, cxgbi_skcb_flags(skb
),
1732 cxgbi_skcb_rx_pdulen(skb
));
1734 if (cxgbi_skcb_test_flag(skb
, SKCBF_RX_COALESCED
)) {
1735 err
= skb_read_pdu_bhs(csk
, conn
, skb
);
1737 pr_err("coalesced bhs, csk 0x%p, skb 0x%p,%u, "
1738 "f 0x%lx, plen %u.\n",
1740 cxgbi_skcb_flags(skb
),
1741 cxgbi_skcb_rx_pdulen(skb
));
1744 err
= skb_read_pdu_data(conn
, skb
, skb
,
1745 err
+ cdev
->skb_rx_extra
);
1747 pr_err("coalesced data, csk 0x%p, skb 0x%p,%u, "
1748 "f 0x%lx, plen %u.\n",
1750 cxgbi_skcb_flags(skb
),
1751 cxgbi_skcb_rx_pdulen(skb
));
1753 err
= skb_read_pdu_bhs(csk
, conn
, skb
);
1755 pr_err("bhs, csk 0x%p, skb 0x%p,%u, "
1756 "f 0x%lx, plen %u.\n",
1758 cxgbi_skcb_flags(skb
),
1759 cxgbi_skcb_rx_pdulen(skb
));
1763 if (cxgbi_skcb_test_flag(skb
, SKCBF_RX_DATA
)) {
1764 struct sk_buff
*dskb
;
1766 dskb
= skb_peek(&csk
->receive_queue
);
1768 pr_err("csk 0x%p, skb 0x%p,%u, f 0x%lx,"
1769 " plen %u, NO data.\n",
1771 cxgbi_skcb_flags(skb
),
1772 cxgbi_skcb_rx_pdulen(skb
));
1776 __skb_unlink(dskb
, &csk
->receive_queue
);
1778 err
= skb_read_pdu_data(conn
, skb
, dskb
, 0);
1780 pr_err("data, csk 0x%p, skb 0x%p,%u, "
1781 "f 0x%lx, plen %u, dskb 0x%p,"
1784 cxgbi_skcb_flags(skb
),
1785 cxgbi_skcb_rx_pdulen(skb
),
1789 err
= skb_read_pdu_data(conn
, skb
, skb
, 0);
1798 log_debug(1 << CXGBI_DBG_PDU_RX
, "csk 0x%p, read %u.\n", csk
, read
);
1800 csk
->copied_seq
+= read
;
1801 csk_return_rx_credits(csk
, read
);
1802 conn
->rxdata_octets
+= read
;
1806 pr_info("csk 0x%p, 0x%p, rx failed %d, read %u.\n",
1807 csk
, conn
, err
, read
);
1808 iscsi_conn_failure(conn
, ISCSI_ERR_CONN_FAILED
);
1811 EXPORT_SYMBOL_GPL(cxgbi_conn_pdu_ready
);
1813 static int sgl_seek_offset(struct scatterlist
*sgl
, unsigned int sgcnt
,
1814 unsigned int offset
, unsigned int *off
,
1815 struct scatterlist
**sgp
)
1818 struct scatterlist
*sg
;
1820 for_each_sg(sgl
, sg
, sgcnt
, i
) {
1821 if (offset
< sg
->length
) {
1826 offset
-= sg
->length
;
1831 static int sgl_read_to_frags(struct scatterlist
*sg
, unsigned int sgoffset
,
1832 unsigned int dlen
, struct page_frag
*frags
,
1835 unsigned int datalen
= dlen
;
1836 unsigned int sglen
= sg
->length
- sgoffset
;
1837 struct page
*page
= sg_page(sg
);
1847 pr_warn("sg %d NULL, len %u/%u.\n",
1856 copy
= min(datalen
, sglen
);
1857 if (i
&& page
== frags
[i
- 1].page
&&
1858 sgoffset
+ sg
->offset
==
1859 frags
[i
- 1].offset
+ frags
[i
- 1].size
) {
1860 frags
[i
- 1].size
+= copy
;
1862 if (i
>= frag_max
) {
1863 pr_warn("too many pages %u, dlen %u.\n",
1868 frags
[i
].page
= page
;
1869 frags
[i
].offset
= sg
->offset
+ sgoffset
;
1870 frags
[i
].size
= copy
;
1881 int cxgbi_conn_alloc_pdu(struct iscsi_task
*task
, u8 opcode
)
1883 struct iscsi_tcp_conn
*tcp_conn
= task
->conn
->dd_data
;
1884 struct cxgbi_conn
*cconn
= tcp_conn
->dd_data
;
1885 struct cxgbi_device
*cdev
= cconn
->chba
->cdev
;
1886 struct iscsi_conn
*conn
= task
->conn
;
1887 struct iscsi_tcp_task
*tcp_task
= task
->dd_data
;
1888 struct cxgbi_task_data
*tdata
= iscsi_task_cxgbi_data(task
);
1889 struct scsi_cmnd
*sc
= task
->sc
;
1890 struct cxgbi_sock
*csk
= cconn
->cep
->csk
;
1891 struct net_device
*ndev
= cdev
->ports
[csk
->port_id
];
1892 int headroom
= SKB_TX_ISCSI_PDU_HEADER_MAX
;
1894 tcp_task
->dd_data
= tdata
;
1897 if (SKB_MAX_HEAD(cdev
->skb_tx_rsvd
) > (512 * MAX_SKB_FRAGS
) &&
1898 (opcode
== ISCSI_OP_SCSI_DATA_OUT
||
1899 (opcode
== ISCSI_OP_SCSI_CMD
&&
1900 (scsi_bidi_cmnd(sc
) || sc
->sc_data_direction
== DMA_TO_DEVICE
))))
1901 /* data could goes into skb head */
1902 headroom
+= min_t(unsigned int,
1903 SKB_MAX_HEAD(cdev
->skb_tx_rsvd
),
1904 conn
->max_xmit_dlength
);
1906 tdata
->skb
= alloc_skb(cdev
->skb_tx_rsvd
+ headroom
, GFP_ATOMIC
);
1908 ndev
->stats
.tx_dropped
++;
1912 skb_reserve(tdata
->skb
, cdev
->skb_tx_rsvd
);
1915 task
->hdr
= (struct iscsi_hdr
*)tdata
->skb
->data
;
1917 task
->hdr
= kzalloc(SKB_TX_ISCSI_PDU_HEADER_MAX
, GFP_ATOMIC
);
1919 __kfree_skb(tdata
->skb
);
1921 ndev
->stats
.tx_dropped
++;
1925 task
->hdr_max
= SKB_TX_ISCSI_PDU_HEADER_MAX
; /* BHS + AHS */
1927 /* data_out uses scsi_cmd's itt */
1928 if (opcode
!= ISCSI_OP_SCSI_DATA_OUT
)
1929 task_reserve_itt(task
, &task
->hdr
->itt
);
1931 log_debug(1 << CXGBI_DBG_ISCSI
| 1 << CXGBI_DBG_PDU_TX
,
1932 "task 0x%p, op 0x%x, skb 0x%p,%u+%u/%u, itt 0x%x.\n",
1933 task
, opcode
, tdata
->skb
, cdev
->skb_tx_rsvd
, headroom
,
1934 conn
->max_xmit_dlength
, ntohl(task
->hdr
->itt
));
1938 EXPORT_SYMBOL_GPL(cxgbi_conn_alloc_pdu
);
1940 static inline void tx_skb_setmode(struct sk_buff
*skb
, int hcrc
, int dcrc
)
1949 cxgbi_skcb_ulp_mode(skb
) = (ULP2_MODE_ISCSI
<< 4) | submode
;
1951 cxgbi_skcb_ulp_mode(skb
) = 0;
1954 int cxgbi_conn_init_pdu(struct iscsi_task
*task
, unsigned int offset
,
1957 struct iscsi_conn
*conn
= task
->conn
;
1958 struct cxgbi_task_data
*tdata
= iscsi_task_cxgbi_data(task
);
1959 struct sk_buff
*skb
= tdata
->skb
;
1960 unsigned int datalen
= count
;
1961 int i
, padlen
= iscsi_padding(count
);
1964 log_debug(1 << CXGBI_DBG_ISCSI
| 1 << CXGBI_DBG_PDU_TX
,
1965 "task 0x%p,0x%p, skb 0x%p, 0x%x,0x%x,0x%x, %u+%u.\n",
1966 task
, task
->sc
, skb
, (*skb
->data
) & ISCSI_OPCODE_MASK
,
1967 ntohl(task
->cmdsn
), ntohl(task
->hdr
->itt
), offset
, count
);
1969 skb_put(skb
, task
->hdr_len
);
1970 tx_skb_setmode(skb
, conn
->hdrdgst_en
, datalen
? conn
->datadgst_en
: 0);
1975 struct scsi_data_buffer
*sdb
= scsi_out(task
->sc
);
1976 struct scatterlist
*sg
= NULL
;
1979 tdata
->offset
= offset
;
1980 tdata
->count
= count
;
1981 err
= sgl_seek_offset(
1982 sdb
->table
.sgl
, sdb
->table
.nents
,
1983 tdata
->offset
, &tdata
->sgoffset
, &sg
);
1985 pr_warn("tpdu, sgl %u, bad offset %u/%u.\n",
1986 sdb
->table
.nents
, tdata
->offset
, sdb
->length
);
1989 err
= sgl_read_to_frags(sg
, tdata
->sgoffset
, tdata
->count
,
1990 tdata
->frags
, MAX_PDU_FRAGS
);
1992 pr_warn("tpdu, sgl %u, bad offset %u + %u.\n",
1993 sdb
->table
.nents
, tdata
->offset
, tdata
->count
);
1996 tdata
->nr_frags
= err
;
1998 if (tdata
->nr_frags
> MAX_SKB_FRAGS
||
1999 (padlen
&& tdata
->nr_frags
== MAX_SKB_FRAGS
)) {
2000 char *dst
= skb
->data
+ task
->hdr_len
;
2001 struct page_frag
*frag
= tdata
->frags
;
2003 /* data fits in the skb's headroom */
2004 for (i
= 0; i
< tdata
->nr_frags
; i
++, frag
++) {
2005 char *src
= kmap_atomic(frag
->page
);
2007 memcpy(dst
, src
+frag
->offset
, frag
->size
);
2012 memset(dst
, 0, padlen
);
2015 skb_put(skb
, count
+ padlen
);
2017 /* data fit into frag_list */
2018 for (i
= 0; i
< tdata
->nr_frags
; i
++) {
2019 __skb_fill_page_desc(skb
, i
,
2020 tdata
->frags
[i
].page
,
2021 tdata
->frags
[i
].offset
,
2022 tdata
->frags
[i
].size
);
2023 skb_frag_ref(skb
, i
);
2025 skb_shinfo(skb
)->nr_frags
= tdata
->nr_frags
;
2027 skb
->data_len
+= count
;
2028 skb
->truesize
+= count
;
2032 pg
= virt_to_page(task
->data
);
2035 skb_fill_page_desc(skb
, 0, pg
, offset_in_page(task
->data
),
2038 skb
->data_len
+= count
;
2039 skb
->truesize
+= count
;
2043 i
= skb_shinfo(skb
)->nr_frags
;
2044 skb_fill_page_desc(skb
, skb_shinfo(skb
)->nr_frags
,
2045 virt_to_page(padding
), offset_in_page(padding
),
2048 skb
->data_len
+= padlen
;
2049 skb
->truesize
+= padlen
;
2055 EXPORT_SYMBOL_GPL(cxgbi_conn_init_pdu
);
2057 int cxgbi_conn_xmit_pdu(struct iscsi_task
*task
)
2059 struct iscsi_tcp_conn
*tcp_conn
= task
->conn
->dd_data
;
2060 struct cxgbi_conn
*cconn
= tcp_conn
->dd_data
;
2061 struct cxgbi_task_data
*tdata
= iscsi_task_cxgbi_data(task
);
2062 struct cxgbi_task_tag_info
*ttinfo
= &tdata
->ttinfo
;
2063 struct sk_buff
*skb
= tdata
->skb
;
2064 struct cxgbi_sock
*csk
= NULL
;
2065 unsigned int datalen
;
2069 log_debug(1 << CXGBI_DBG_ISCSI
| 1 << CXGBI_DBG_PDU_TX
,
2070 "task 0x%p\n", task
);
2074 if (cconn
&& cconn
->cep
)
2075 csk
= cconn
->cep
->csk
;
2077 log_debug(1 << CXGBI_DBG_ISCSI
| 1 << CXGBI_DBG_PDU_TX
,
2078 "task 0x%p, csk gone.\n", task
);
2083 datalen
= skb
->data_len
;
2085 /* write ppod first if using ofldq to write ppod */
2086 if (ttinfo
->flags
& CXGBI_PPOD_INFO_FLAG_VALID
) {
2087 struct cxgbi_ppm
*ppm
= csk
->cdev
->cdev2ppm(csk
->cdev
);
2089 ttinfo
->flags
&= ~CXGBI_PPOD_INFO_FLAG_VALID
;
2090 if (csk
->cdev
->csk_ddp_set_map(ppm
, csk
, ttinfo
) < 0)
2091 pr_err("task 0x%p, ppod writing using ofldq failed.\n",
2093 /* continue. Let fl get the data */
2097 memcpy(skb
->data
, task
->hdr
, SKB_TX_ISCSI_PDU_HEADER_MAX
);
2099 err
= cxgbi_sock_send_pdus(cconn
->cep
->csk
, skb
);
2103 log_debug(1 << CXGBI_DBG_PDU_TX
,
2104 "task 0x%p,0x%p, skb 0x%p, len %u/%u, rv %d.\n",
2105 task
, task
->sc
, skb
, skb
->len
, skb
->data_len
, err
);
2107 if (task
->conn
->hdrdgst_en
)
2108 pdulen
+= ISCSI_DIGEST_SIZE
;
2110 if (datalen
&& task
->conn
->datadgst_en
)
2111 pdulen
+= ISCSI_DIGEST_SIZE
;
2113 task
->conn
->txdata_octets
+= pdulen
;
2117 if (err
== -EAGAIN
|| err
== -ENOBUFS
) {
2118 log_debug(1 << CXGBI_DBG_PDU_TX
,
2119 "task 0x%p, skb 0x%p, len %u/%u, %d EAGAIN.\n",
2120 task
, skb
, skb
->len
, skb
->data_len
, err
);
2121 /* reset skb to send when we are called again */
2126 log_debug(1 << CXGBI_DBG_ISCSI
| 1 << CXGBI_DBG_PDU_TX
,
2127 "itt 0x%x, skb 0x%p, len %u/%u, xmit err %d.\n",
2128 task
->itt
, skb
, skb
->len
, skb
->data_len
, err
);
2132 iscsi_conn_printk(KERN_ERR
, task
->conn
, "xmit err %d.\n", err
);
2133 iscsi_conn_failure(task
->conn
, ISCSI_ERR_XMIT_FAILED
);
2136 EXPORT_SYMBOL_GPL(cxgbi_conn_xmit_pdu
);
2138 void cxgbi_cleanup_task(struct iscsi_task
*task
)
2140 struct iscsi_tcp_task
*tcp_task
= task
->dd_data
;
2141 struct cxgbi_task_data
*tdata
= iscsi_task_cxgbi_data(task
);
2143 if (!tcp_task
|| !tdata
|| (tcp_task
->dd_data
!= tdata
)) {
2144 pr_info("task 0x%p,0x%p, tcp_task 0x%p, tdata 0x%p/0x%p.\n",
2145 task
, task
->sc
, tcp_task
,
2146 tcp_task
? tcp_task
->dd_data
: NULL
, tdata
);
2150 log_debug(1 << CXGBI_DBG_ISCSI
,
2151 "task 0x%p, skb 0x%p, itt 0x%x.\n",
2152 task
, tdata
->skb
, task
->hdr_itt
);
2154 tcp_task
->dd_data
= NULL
;
2160 /* never reached the xmit task callout */
2162 __kfree_skb(tdata
->skb
);
2166 task_release_itt(task
, task
->hdr_itt
);
2167 memset(tdata
, 0, sizeof(*tdata
));
2169 iscsi_tcp_cleanup_task(task
);
2171 EXPORT_SYMBOL_GPL(cxgbi_cleanup_task
);
2173 void cxgbi_get_conn_stats(struct iscsi_cls_conn
*cls_conn
,
2174 struct iscsi_stats
*stats
)
2176 struct iscsi_conn
*conn
= cls_conn
->dd_data
;
2178 stats
->txdata_octets
= conn
->txdata_octets
;
2179 stats
->rxdata_octets
= conn
->rxdata_octets
;
2180 stats
->scsicmd_pdus
= conn
->scsicmd_pdus_cnt
;
2181 stats
->dataout_pdus
= conn
->dataout_pdus_cnt
;
2182 stats
->scsirsp_pdus
= conn
->scsirsp_pdus_cnt
;
2183 stats
->datain_pdus
= conn
->datain_pdus_cnt
;
2184 stats
->r2t_pdus
= conn
->r2t_pdus_cnt
;
2185 stats
->tmfcmd_pdus
= conn
->tmfcmd_pdus_cnt
;
2186 stats
->tmfrsp_pdus
= conn
->tmfrsp_pdus_cnt
;
2187 stats
->digest_err
= 0;
2188 stats
->timeout_err
= 0;
2189 stats
->custom_length
= 1;
2190 strcpy(stats
->custom
[0].desc
, "eh_abort_cnt");
2191 stats
->custom
[0].value
= conn
->eh_abort_cnt
;
2193 EXPORT_SYMBOL_GPL(cxgbi_get_conn_stats
);
2195 static int cxgbi_conn_max_xmit_dlength(struct iscsi_conn
*conn
)
2197 struct iscsi_tcp_conn
*tcp_conn
= conn
->dd_data
;
2198 struct cxgbi_conn
*cconn
= tcp_conn
->dd_data
;
2199 struct cxgbi_device
*cdev
= cconn
->chba
->cdev
;
2200 unsigned int headroom
= SKB_MAX_HEAD(cdev
->skb_tx_rsvd
);
2201 unsigned int max_def
= 512 * MAX_SKB_FRAGS
;
2202 unsigned int max
= max(max_def
, headroom
);
2204 max
= min(cconn
->chba
->cdev
->tx_max_size
, max
);
2205 if (conn
->max_xmit_dlength
)
2206 conn
->max_xmit_dlength
= min(conn
->max_xmit_dlength
, max
);
2208 conn
->max_xmit_dlength
= max
;
2209 cxgbi_align_pdu_size(conn
->max_xmit_dlength
);
2214 static int cxgbi_conn_max_recv_dlength(struct iscsi_conn
*conn
)
2216 struct iscsi_tcp_conn
*tcp_conn
= conn
->dd_data
;
2217 struct cxgbi_conn
*cconn
= tcp_conn
->dd_data
;
2218 unsigned int max
= cconn
->chba
->cdev
->rx_max_size
;
2220 cxgbi_align_pdu_size(max
);
2222 if (conn
->max_recv_dlength
) {
2223 if (conn
->max_recv_dlength
> max
) {
2224 pr_err("MaxRecvDataSegmentLength %u > %u.\n",
2225 conn
->max_recv_dlength
, max
);
2228 conn
->max_recv_dlength
= min(conn
->max_recv_dlength
, max
);
2229 cxgbi_align_pdu_size(conn
->max_recv_dlength
);
2231 conn
->max_recv_dlength
= max
;
2236 int cxgbi_set_conn_param(struct iscsi_cls_conn
*cls_conn
,
2237 enum iscsi_param param
, char *buf
, int buflen
)
2239 struct iscsi_conn
*conn
= cls_conn
->dd_data
;
2240 struct iscsi_tcp_conn
*tcp_conn
= conn
->dd_data
;
2241 struct cxgbi_conn
*cconn
= tcp_conn
->dd_data
;
2242 struct cxgbi_sock
*csk
= cconn
->cep
->csk
;
2245 log_debug(1 << CXGBI_DBG_ISCSI
,
2246 "cls_conn 0x%p, param %d, buf(%d) %s.\n",
2247 cls_conn
, param
, buflen
, buf
);
2250 case ISCSI_PARAM_HDRDGST_EN
:
2251 err
= iscsi_set_param(cls_conn
, param
, buf
, buflen
);
2252 if (!err
&& conn
->hdrdgst_en
)
2253 err
= csk
->cdev
->csk_ddp_setup_digest(csk
, csk
->tid
,
2255 conn
->datadgst_en
, 0);
2257 case ISCSI_PARAM_DATADGST_EN
:
2258 err
= iscsi_set_param(cls_conn
, param
, buf
, buflen
);
2259 if (!err
&& conn
->datadgst_en
)
2260 err
= csk
->cdev
->csk_ddp_setup_digest(csk
, csk
->tid
,
2262 conn
->datadgst_en
, 0);
2264 case ISCSI_PARAM_MAX_R2T
:
2265 return iscsi_tcp_set_max_r2t(conn
, buf
);
2266 case ISCSI_PARAM_MAX_RECV_DLENGTH
:
2267 err
= iscsi_set_param(cls_conn
, param
, buf
, buflen
);
2269 err
= cxgbi_conn_max_recv_dlength(conn
);
2271 case ISCSI_PARAM_MAX_XMIT_DLENGTH
:
2272 err
= iscsi_set_param(cls_conn
, param
, buf
, buflen
);
2274 err
= cxgbi_conn_max_xmit_dlength(conn
);
2277 return iscsi_set_param(cls_conn
, param
, buf
, buflen
);
2281 EXPORT_SYMBOL_GPL(cxgbi_set_conn_param
);
2283 static inline int csk_print_port(struct cxgbi_sock
*csk
, char *buf
)
2287 cxgbi_sock_get(csk
);
2288 len
= sprintf(buf
, "%hu\n", ntohs(csk
->daddr
.sin_port
));
2289 cxgbi_sock_put(csk
);
2294 static inline int csk_print_ip(struct cxgbi_sock
*csk
, char *buf
)
2298 cxgbi_sock_get(csk
);
2299 if (csk
->csk_family
== AF_INET
)
2300 len
= sprintf(buf
, "%pI4",
2301 &csk
->daddr
.sin_addr
.s_addr
);
2303 len
= sprintf(buf
, "%pI6",
2304 &csk
->daddr6
.sin6_addr
);
2306 cxgbi_sock_put(csk
);
2311 int cxgbi_get_ep_param(struct iscsi_endpoint
*ep
, enum iscsi_param param
,
2314 struct cxgbi_endpoint
*cep
= ep
->dd_data
;
2315 struct cxgbi_sock
*csk
;
2318 log_debug(1 << CXGBI_DBG_ISCSI
,
2319 "cls_conn 0x%p, param %d.\n", ep
, param
);
2322 case ISCSI_PARAM_CONN_PORT
:
2323 case ISCSI_PARAM_CONN_ADDRESS
:
2331 return iscsi_conn_get_addr_param((struct sockaddr_storage
*)
2332 &csk
->daddr
, param
, buf
);
2338 EXPORT_SYMBOL_GPL(cxgbi_get_ep_param
);
2340 struct iscsi_cls_conn
*
2341 cxgbi_create_conn(struct iscsi_cls_session
*cls_session
, u32 cid
)
2343 struct iscsi_cls_conn
*cls_conn
;
2344 struct iscsi_conn
*conn
;
2345 struct iscsi_tcp_conn
*tcp_conn
;
2346 struct cxgbi_conn
*cconn
;
2348 cls_conn
= iscsi_tcp_conn_setup(cls_session
, sizeof(*cconn
), cid
);
2352 conn
= cls_conn
->dd_data
;
2353 tcp_conn
= conn
->dd_data
;
2354 cconn
= tcp_conn
->dd_data
;
2355 cconn
->iconn
= conn
;
2357 log_debug(1 << CXGBI_DBG_ISCSI
,
2358 "cid %u(0x%x), cls 0x%p,0x%p, conn 0x%p,0x%p,0x%p.\n",
2359 cid
, cid
, cls_session
, cls_conn
, conn
, tcp_conn
, cconn
);
2363 EXPORT_SYMBOL_GPL(cxgbi_create_conn
);
2365 int cxgbi_bind_conn(struct iscsi_cls_session
*cls_session
,
2366 struct iscsi_cls_conn
*cls_conn
,
2367 u64 transport_eph
, int is_leading
)
2369 struct iscsi_conn
*conn
= cls_conn
->dd_data
;
2370 struct iscsi_tcp_conn
*tcp_conn
= conn
->dd_data
;
2371 struct cxgbi_conn
*cconn
= tcp_conn
->dd_data
;
2372 struct cxgbi_ppm
*ppm
;
2373 struct iscsi_endpoint
*ep
;
2374 struct cxgbi_endpoint
*cep
;
2375 struct cxgbi_sock
*csk
;
2378 ep
= iscsi_lookup_endpoint(transport_eph
);
2382 /* setup ddp pagesize */
2386 ppm
= csk
->cdev
->cdev2ppm(csk
->cdev
);
2387 err
= csk
->cdev
->csk_ddp_setup_pgidx(csk
, csk
->tid
,
2388 ppm
->tformat
.pgsz_idx_dflt
, 0);
2392 err
= iscsi_conn_bind(cls_session
, cls_conn
, is_leading
);
2396 /* calculate the tag idx bits needed for this conn based on cmds_max */
2397 cconn
->task_idx_bits
= (__ilog2_u32(conn
->session
->cmds_max
- 1)) + 1;
2399 write_lock_bh(&csk
->callback_lock
);
2400 csk
->user_data
= conn
;
2401 cconn
->chba
= cep
->chba
;
2404 write_unlock_bh(&csk
->callback_lock
);
2406 cxgbi_conn_max_xmit_dlength(conn
);
2407 cxgbi_conn_max_recv_dlength(conn
);
2409 log_debug(1 << CXGBI_DBG_ISCSI
,
2410 "cls 0x%p,0x%p, ep 0x%p, cconn 0x%p, csk 0x%p.\n",
2411 cls_session
, cls_conn
, ep
, cconn
, csk
);
2412 /* init recv engine */
2413 iscsi_tcp_hdr_recv_prep(tcp_conn
);
2417 EXPORT_SYMBOL_GPL(cxgbi_bind_conn
);
2419 struct iscsi_cls_session
*cxgbi_create_session(struct iscsi_endpoint
*ep
,
2420 u16 cmds_max
, u16 qdepth
,
2423 struct cxgbi_endpoint
*cep
;
2424 struct cxgbi_hba
*chba
;
2425 struct Scsi_Host
*shost
;
2426 struct iscsi_cls_session
*cls_session
;
2427 struct iscsi_session
*session
;
2430 pr_err("missing endpoint.\n");
2436 shost
= chba
->shost
;
2438 BUG_ON(chba
!= iscsi_host_priv(shost
));
2440 cls_session
= iscsi_session_setup(chba
->cdev
->itp
, shost
,
2442 sizeof(struct iscsi_tcp_task
) +
2443 sizeof(struct cxgbi_task_data
),
2444 initial_cmdsn
, ISCSI_MAX_TARGET
);
2448 session
= cls_session
->dd_data
;
2449 if (iscsi_tcp_r2tpool_alloc(session
))
2450 goto remove_session
;
2452 log_debug(1 << CXGBI_DBG_ISCSI
,
2453 "ep 0x%p, cls sess 0x%p.\n", ep
, cls_session
);
2457 iscsi_session_teardown(cls_session
);
2460 EXPORT_SYMBOL_GPL(cxgbi_create_session
);
2462 void cxgbi_destroy_session(struct iscsi_cls_session
*cls_session
)
2464 log_debug(1 << CXGBI_DBG_ISCSI
,
2465 "cls sess 0x%p.\n", cls_session
);
2467 iscsi_tcp_r2tpool_free(cls_session
->dd_data
);
2468 iscsi_session_teardown(cls_session
);
2470 EXPORT_SYMBOL_GPL(cxgbi_destroy_session
);
2472 int cxgbi_set_host_param(struct Scsi_Host
*shost
, enum iscsi_host_param param
,
2473 char *buf
, int buflen
)
2475 struct cxgbi_hba
*chba
= iscsi_host_priv(shost
);
2478 shost_printk(KERN_ERR
, shost
, "Could not get host param. "
2479 "netdev for host not set.\n");
2483 log_debug(1 << CXGBI_DBG_ISCSI
,
2484 "shost 0x%p, hba 0x%p,%s, param %d, buf(%d) %s.\n",
2485 shost
, chba
, chba
->ndev
->name
, param
, buflen
, buf
);
2488 case ISCSI_HOST_PARAM_IPADDRESS
:
2490 __be32 addr
= in_aton(buf
);
2491 log_debug(1 << CXGBI_DBG_ISCSI
,
2492 "hba %s, req. ipv4 %pI4.\n", chba
->ndev
->name
, &addr
);
2493 cxgbi_set_iscsi_ipv4(chba
, addr
);
2496 case ISCSI_HOST_PARAM_HWADDRESS
:
2497 case ISCSI_HOST_PARAM_NETDEV_NAME
:
2500 return iscsi_host_set_param(shost
, param
, buf
, buflen
);
2503 EXPORT_SYMBOL_GPL(cxgbi_set_host_param
);
2505 int cxgbi_get_host_param(struct Scsi_Host
*shost
, enum iscsi_host_param param
,
2508 struct cxgbi_hba
*chba
= iscsi_host_priv(shost
);
2512 shost_printk(KERN_ERR
, shost
, "Could not get host param. "
2513 "netdev for host not set.\n");
2517 log_debug(1 << CXGBI_DBG_ISCSI
,
2518 "shost 0x%p, hba 0x%p,%s, param %d.\n",
2519 shost
, chba
, chba
->ndev
->name
, param
);
2522 case ISCSI_HOST_PARAM_HWADDRESS
:
2523 len
= sysfs_format_mac(buf
, chba
->ndev
->dev_addr
, 6);
2525 case ISCSI_HOST_PARAM_NETDEV_NAME
:
2526 len
= sprintf(buf
, "%s\n", chba
->ndev
->name
);
2528 case ISCSI_HOST_PARAM_IPADDRESS
:
2530 struct cxgbi_sock
*csk
= find_sock_on_port(chba
->cdev
,
2533 len
= sprintf(buf
, "%pIS",
2534 (struct sockaddr
*)&csk
->saddr
);
2536 log_debug(1 << CXGBI_DBG_ISCSI
,
2537 "hba %s, addr %s.\n", chba
->ndev
->name
, buf
);
2541 return iscsi_host_get_param(shost
, param
, buf
);
2546 EXPORT_SYMBOL_GPL(cxgbi_get_host_param
);
2548 struct iscsi_endpoint
*cxgbi_ep_connect(struct Scsi_Host
*shost
,
2549 struct sockaddr
*dst_addr
,
2552 struct iscsi_endpoint
*ep
;
2553 struct cxgbi_endpoint
*cep
;
2554 struct cxgbi_hba
*hba
= NULL
;
2555 struct cxgbi_sock
*csk
;
2559 log_debug(1 << CXGBI_DBG_ISCSI
| 1 << CXGBI_DBG_SOCK
,
2560 "shost 0x%p, non_blocking %d, dst_addr 0x%p.\n",
2561 shost
, non_blocking
, dst_addr
);
2564 hba
= iscsi_host_priv(shost
);
2566 pr_info("shost 0x%p, priv NULL.\n", shost
);
2571 if (!vlan_uses_dev(hba
->ndev
))
2572 ifindex
= hba
->ndev
->ifindex
;
2576 if (dst_addr
->sa_family
== AF_INET
) {
2577 csk
= cxgbi_check_route(dst_addr
, ifindex
);
2578 #if IS_ENABLED(CONFIG_IPV6)
2579 } else if (dst_addr
->sa_family
== AF_INET6
) {
2580 csk
= cxgbi_check_route6(dst_addr
, ifindex
);
2583 pr_info("address family 0x%x NOT supported.\n",
2584 dst_addr
->sa_family
);
2585 err
= -EAFNOSUPPORT
;
2586 return (struct iscsi_endpoint
*)ERR_PTR(err
);
2590 return (struct iscsi_endpoint
*)csk
;
2591 cxgbi_sock_get(csk
);
2594 hba
= csk
->cdev
->hbas
[csk
->port_id
];
2595 else if (hba
!= csk
->cdev
->hbas
[csk
->port_id
]) {
2596 pr_info("Could not connect through requested host %u"
2597 "hba 0x%p != 0x%p (%u).\n",
2598 shost
->host_no
, hba
,
2599 csk
->cdev
->hbas
[csk
->port_id
], csk
->port_id
);
2604 err
= sock_get_port(csk
);
2608 cxgbi_sock_set_state(csk
, CTP_CONNECTING
);
2609 err
= csk
->cdev
->csk_init_act_open(csk
);
2613 if (cxgbi_sock_is_closing(csk
)) {
2615 pr_info("csk 0x%p is closing.\n", csk
);
2619 ep
= iscsi_create_endpoint(sizeof(*cep
));
2622 pr_info("iscsi alloc ep, OOM.\n");
2630 log_debug(1 << CXGBI_DBG_ISCSI
| 1 << CXGBI_DBG_SOCK
,
2631 "ep 0x%p, cep 0x%p, csk 0x%p, hba 0x%p,%s.\n",
2632 ep
, cep
, csk
, hba
, hba
->ndev
->name
);
2636 cxgbi_sock_put(csk
);
2637 cxgbi_sock_closed(csk
);
2639 return ERR_PTR(err
);
2641 EXPORT_SYMBOL_GPL(cxgbi_ep_connect
);
2643 int cxgbi_ep_poll(struct iscsi_endpoint
*ep
, int timeout_ms
)
2645 struct cxgbi_endpoint
*cep
= ep
->dd_data
;
2646 struct cxgbi_sock
*csk
= cep
->csk
;
2648 if (!cxgbi_sock_is_established(csk
))
2652 EXPORT_SYMBOL_GPL(cxgbi_ep_poll
);
2654 void cxgbi_ep_disconnect(struct iscsi_endpoint
*ep
)
2656 struct cxgbi_endpoint
*cep
= ep
->dd_data
;
2657 struct cxgbi_conn
*cconn
= cep
->cconn
;
2658 struct cxgbi_sock
*csk
= cep
->csk
;
2660 log_debug(1 << CXGBI_DBG_ISCSI
| 1 << CXGBI_DBG_SOCK
,
2661 "ep 0x%p, cep 0x%p, cconn 0x%p, csk 0x%p,%u,0x%lx.\n",
2662 ep
, cep
, cconn
, csk
, csk
->state
, csk
->flags
);
2664 if (cconn
&& cconn
->iconn
) {
2665 iscsi_suspend_tx(cconn
->iconn
);
2666 write_lock_bh(&csk
->callback_lock
);
2667 cep
->csk
->user_data
= NULL
;
2669 write_unlock_bh(&csk
->callback_lock
);
2671 iscsi_destroy_endpoint(ep
);
2673 if (likely(csk
->state
>= CTP_ESTABLISHED
))
2674 need_active_close(csk
);
2676 cxgbi_sock_closed(csk
);
2678 cxgbi_sock_put(csk
);
2680 EXPORT_SYMBOL_GPL(cxgbi_ep_disconnect
);
2682 int cxgbi_iscsi_init(struct iscsi_transport
*itp
,
2683 struct scsi_transport_template
**stt
)
2685 *stt
= iscsi_register_transport(itp
);
2687 pr_err("unable to register %s transport 0x%p.\n",
2691 log_debug(1 << CXGBI_DBG_ISCSI
,
2692 "%s, registered iscsi transport 0x%p.\n",
2696 EXPORT_SYMBOL_GPL(cxgbi_iscsi_init
);
2698 void cxgbi_iscsi_cleanup(struct iscsi_transport
*itp
,
2699 struct scsi_transport_template
**stt
)
2702 log_debug(1 << CXGBI_DBG_ISCSI
,
2703 "de-register transport 0x%p, %s, stt 0x%p.\n",
2704 itp
, itp
->name
, *stt
);
2706 iscsi_unregister_transport(itp
);
2709 EXPORT_SYMBOL_GPL(cxgbi_iscsi_cleanup
);
2711 umode_t
cxgbi_attr_is_visible(int param_type
, int param
)
2713 switch (param_type
) {
2714 case ISCSI_HOST_PARAM
:
2716 case ISCSI_HOST_PARAM_NETDEV_NAME
:
2717 case ISCSI_HOST_PARAM_HWADDRESS
:
2718 case ISCSI_HOST_PARAM_IPADDRESS
:
2719 case ISCSI_HOST_PARAM_INITIATOR_NAME
:
2726 case ISCSI_PARAM_MAX_RECV_DLENGTH
:
2727 case ISCSI_PARAM_MAX_XMIT_DLENGTH
:
2728 case ISCSI_PARAM_HDRDGST_EN
:
2729 case ISCSI_PARAM_DATADGST_EN
:
2730 case ISCSI_PARAM_CONN_ADDRESS
:
2731 case ISCSI_PARAM_CONN_PORT
:
2732 case ISCSI_PARAM_EXP_STATSN
:
2733 case ISCSI_PARAM_PERSISTENT_ADDRESS
:
2734 case ISCSI_PARAM_PERSISTENT_PORT
:
2735 case ISCSI_PARAM_PING_TMO
:
2736 case ISCSI_PARAM_RECV_TMO
:
2737 case ISCSI_PARAM_INITIAL_R2T_EN
:
2738 case ISCSI_PARAM_MAX_R2T
:
2739 case ISCSI_PARAM_IMM_DATA_EN
:
2740 case ISCSI_PARAM_FIRST_BURST
:
2741 case ISCSI_PARAM_MAX_BURST
:
2742 case ISCSI_PARAM_PDU_INORDER_EN
:
2743 case ISCSI_PARAM_DATASEQ_INORDER_EN
:
2744 case ISCSI_PARAM_ERL
:
2745 case ISCSI_PARAM_TARGET_NAME
:
2746 case ISCSI_PARAM_TPGT
:
2747 case ISCSI_PARAM_USERNAME
:
2748 case ISCSI_PARAM_PASSWORD
:
2749 case ISCSI_PARAM_USERNAME_IN
:
2750 case ISCSI_PARAM_PASSWORD_IN
:
2751 case ISCSI_PARAM_FAST_ABORT
:
2752 case ISCSI_PARAM_ABORT_TMO
:
2753 case ISCSI_PARAM_LU_RESET_TMO
:
2754 case ISCSI_PARAM_TGT_RESET_TMO
:
2755 case ISCSI_PARAM_IFACE_NAME
:
2756 case ISCSI_PARAM_INITIATOR_NAME
:
2765 EXPORT_SYMBOL_GPL(cxgbi_attr_is_visible
);
2767 static int __init
libcxgbi_init_module(void)
2769 pr_info("%s", version
);
2771 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff
, cb
) <
2772 sizeof(struct cxgbi_skb_cb
));
2776 static void __exit
libcxgbi_exit_module(void)
2778 cxgbi_device_unregister_all(0xFF);
2782 module_init(libcxgbi_init_module
);
2783 module_exit(libcxgbi_exit_module
);