2 * libcxgbi.c: Chelsio common library for T3/T4 iSCSI driver.
4 * Copyright (c) 2010-2015 Chelsio Communications, Inc.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation.
10 * Written by: Karen Xie (kxie@chelsio.com)
11 * Written by: Rakesh Ranjan (rranjan@chelsio.com)
14 #define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
16 #include <linux/skbuff.h>
17 #include <linux/crypto.h>
18 #include <linux/scatterlist.h>
19 #include <linux/pci.h>
20 #include <scsi/scsi.h>
21 #include <scsi/scsi_cmnd.h>
22 #include <scsi/scsi_host.h>
23 #include <linux/if_vlan.h>
24 #include <linux/inet.h>
26 #include <net/route.h>
28 #include <net/ip6_route.h>
29 #include <net/addrconf.h>
31 #include <linux/inetdevice.h> /* ip_dev_find */
32 #include <linux/module.h>
35 static unsigned int dbg_level
;
39 #define DRV_MODULE_NAME "libcxgbi"
40 #define DRV_MODULE_DESC "Chelsio iSCSI driver library"
41 #define DRV_MODULE_VERSION "0.9.1-ko"
42 #define DRV_MODULE_RELDATE "Apr. 2015"
44 static char version
[] =
45 DRV_MODULE_DESC
" " DRV_MODULE_NAME
46 " v" DRV_MODULE_VERSION
" (" DRV_MODULE_RELDATE
")\n";
48 MODULE_AUTHOR("Chelsio Communications, Inc.");
49 MODULE_DESCRIPTION(DRV_MODULE_DESC
);
50 MODULE_VERSION(DRV_MODULE_VERSION
);
51 MODULE_LICENSE("GPL");
53 module_param(dbg_level
, uint
, 0644);
54 MODULE_PARM_DESC(dbg_level
, "libiscsi debug level (default=0)");
58 * cxgbi device management
59 * maintains a list of the cxgbi devices
61 static LIST_HEAD(cdev_list
);
62 static DEFINE_MUTEX(cdev_mutex
);
64 static LIST_HEAD(cdev_rcu_list
);
65 static DEFINE_SPINLOCK(cdev_rcu_lock
);
67 int cxgbi_device_portmap_create(struct cxgbi_device
*cdev
, unsigned int base
,
68 unsigned int max_conn
)
70 struct cxgbi_ports_map
*pmap
= &cdev
->pmap
;
72 pmap
->port_csk
= cxgbi_alloc_big_mem(max_conn
*
73 sizeof(struct cxgbi_sock
*),
75 if (!pmap
->port_csk
) {
76 pr_warn("cdev 0x%p, portmap OOM %u.\n", cdev
, max_conn
);
80 pmap
->max_connect
= max_conn
;
81 pmap
->sport_base
= base
;
82 spin_lock_init(&pmap
->lock
);
85 EXPORT_SYMBOL_GPL(cxgbi_device_portmap_create
);
87 void cxgbi_device_portmap_cleanup(struct cxgbi_device
*cdev
)
89 struct cxgbi_ports_map
*pmap
= &cdev
->pmap
;
90 struct cxgbi_sock
*csk
;
93 for (i
= 0; i
< pmap
->max_connect
; i
++) {
94 if (pmap
->port_csk
[i
]) {
95 csk
= pmap
->port_csk
[i
];
96 pmap
->port_csk
[i
] = NULL
;
97 log_debug(1 << CXGBI_DBG_SOCK
,
98 "csk 0x%p, cdev 0x%p, offload down.\n",
100 spin_lock_bh(&csk
->lock
);
101 cxgbi_sock_set_flag(csk
, CTPF_OFFLOAD_DOWN
);
102 cxgbi_sock_closed(csk
);
103 spin_unlock_bh(&csk
->lock
);
108 EXPORT_SYMBOL_GPL(cxgbi_device_portmap_cleanup
);
110 static inline void cxgbi_device_destroy(struct cxgbi_device
*cdev
)
112 log_debug(1 << CXGBI_DBG_DEV
,
113 "cdev 0x%p, p# %u.\n", cdev
, cdev
->nports
);
114 cxgbi_hbas_remove(cdev
);
115 cxgbi_device_portmap_cleanup(cdev
);
116 if (cdev
->dev_ddp_cleanup
)
117 cdev
->dev_ddp_cleanup(cdev
);
119 cxgbi_ddp_cleanup(cdev
);
121 cxgbi_ddp_cleanup(cdev
);
122 if (cdev
->pmap
.max_connect
)
123 cxgbi_free_big_mem(cdev
->pmap
.port_csk
);
127 struct cxgbi_device
*cxgbi_device_register(unsigned int extra
,
130 struct cxgbi_device
*cdev
;
132 cdev
= kzalloc(sizeof(*cdev
) + extra
+ nports
*
133 (sizeof(struct cxgbi_hba
*) +
134 sizeof(struct net_device
*)),
137 pr_warn("nport %d, OOM.\n", nports
);
140 cdev
->ports
= (struct net_device
**)(cdev
+ 1);
141 cdev
->hbas
= (struct cxgbi_hba
**)(((char*)cdev
->ports
) + nports
*
142 sizeof(struct net_device
*));
144 cdev
->dd_data
= ((char *)cdev
->hbas
) +
145 nports
* sizeof(struct cxgbi_hba
*);
146 spin_lock_init(&cdev
->pmap
.lock
);
148 mutex_lock(&cdev_mutex
);
149 list_add_tail(&cdev
->list_head
, &cdev_list
);
150 mutex_unlock(&cdev_mutex
);
152 spin_lock(&cdev_rcu_lock
);
153 list_add_tail_rcu(&cdev
->rcu_node
, &cdev_rcu_list
);
154 spin_unlock(&cdev_rcu_lock
);
156 log_debug(1 << CXGBI_DBG_DEV
,
157 "cdev 0x%p, p# %u.\n", cdev
, nports
);
160 EXPORT_SYMBOL_GPL(cxgbi_device_register
);
162 void cxgbi_device_unregister(struct cxgbi_device
*cdev
)
164 log_debug(1 << CXGBI_DBG_DEV
,
165 "cdev 0x%p, p# %u,%s.\n",
166 cdev
, cdev
->nports
, cdev
->nports
? cdev
->ports
[0]->name
: "");
168 mutex_lock(&cdev_mutex
);
169 list_del(&cdev
->list_head
);
170 mutex_unlock(&cdev_mutex
);
172 spin_lock(&cdev_rcu_lock
);
173 list_del_rcu(&cdev
->rcu_node
);
174 spin_unlock(&cdev_rcu_lock
);
177 cxgbi_device_destroy(cdev
);
179 EXPORT_SYMBOL_GPL(cxgbi_device_unregister
);
181 void cxgbi_device_unregister_all(unsigned int flag
)
183 struct cxgbi_device
*cdev
, *tmp
;
185 mutex_lock(&cdev_mutex
);
186 list_for_each_entry_safe(cdev
, tmp
, &cdev_list
, list_head
) {
187 if ((cdev
->flags
& flag
) == flag
) {
188 mutex_unlock(&cdev_mutex
);
189 cxgbi_device_unregister(cdev
);
190 mutex_lock(&cdev_mutex
);
193 mutex_unlock(&cdev_mutex
);
195 EXPORT_SYMBOL_GPL(cxgbi_device_unregister_all
);
197 struct cxgbi_device
*cxgbi_device_find_by_lldev(void *lldev
)
199 struct cxgbi_device
*cdev
, *tmp
;
201 mutex_lock(&cdev_mutex
);
202 list_for_each_entry_safe(cdev
, tmp
, &cdev_list
, list_head
) {
203 if (cdev
->lldev
== lldev
) {
204 mutex_unlock(&cdev_mutex
);
208 mutex_unlock(&cdev_mutex
);
210 log_debug(1 << CXGBI_DBG_DEV
,
211 "lldev 0x%p, NO match found.\n", lldev
);
214 EXPORT_SYMBOL_GPL(cxgbi_device_find_by_lldev
);
216 struct cxgbi_device
*cxgbi_device_find_by_netdev(struct net_device
*ndev
,
219 struct net_device
*vdev
= NULL
;
220 struct cxgbi_device
*cdev
, *tmp
;
223 if (ndev
->priv_flags
& IFF_802_1Q_VLAN
) {
225 ndev
= vlan_dev_real_dev(ndev
);
226 log_debug(1 << CXGBI_DBG_DEV
,
227 "vlan dev %s -> %s.\n", vdev
->name
, ndev
->name
);
230 mutex_lock(&cdev_mutex
);
231 list_for_each_entry_safe(cdev
, tmp
, &cdev_list
, list_head
) {
232 for (i
= 0; i
< cdev
->nports
; i
++) {
233 if (ndev
== cdev
->ports
[i
]) {
234 cdev
->hbas
[i
]->vdev
= vdev
;
235 mutex_unlock(&cdev_mutex
);
242 mutex_unlock(&cdev_mutex
);
243 log_debug(1 << CXGBI_DBG_DEV
,
244 "ndev 0x%p, %s, NO match found.\n", ndev
, ndev
->name
);
247 EXPORT_SYMBOL_GPL(cxgbi_device_find_by_netdev
);
249 struct cxgbi_device
*cxgbi_device_find_by_netdev_rcu(struct net_device
*ndev
,
252 struct net_device
*vdev
= NULL
;
253 struct cxgbi_device
*cdev
;
256 if (ndev
->priv_flags
& IFF_802_1Q_VLAN
) {
258 ndev
= vlan_dev_real_dev(ndev
);
259 pr_info("vlan dev %s -> %s.\n", vdev
->name
, ndev
->name
);
263 list_for_each_entry_rcu(cdev
, &cdev_rcu_list
, rcu_node
) {
264 for (i
= 0; i
< cdev
->nports
; i
++) {
265 if (ndev
== cdev
->ports
[i
]) {
266 cdev
->hbas
[i
]->vdev
= vdev
;
276 log_debug(1 << CXGBI_DBG_DEV
,
277 "ndev 0x%p, %s, NO match found.\n", ndev
, ndev
->name
);
280 EXPORT_SYMBOL_GPL(cxgbi_device_find_by_netdev_rcu
);
282 #if IS_ENABLED(CONFIG_IPV6)
283 static struct cxgbi_device
*cxgbi_device_find_by_mac(struct net_device
*ndev
,
286 struct net_device
*vdev
= NULL
;
287 struct cxgbi_device
*cdev
, *tmp
;
290 if (ndev
->priv_flags
& IFF_802_1Q_VLAN
) {
292 ndev
= vlan_dev_real_dev(ndev
);
293 pr_info("vlan dev %s -> %s.\n", vdev
->name
, ndev
->name
);
296 mutex_lock(&cdev_mutex
);
297 list_for_each_entry_safe(cdev
, tmp
, &cdev_list
, list_head
) {
298 for (i
= 0; i
< cdev
->nports
; i
++) {
299 if (!memcmp(ndev
->dev_addr
, cdev
->ports
[i
]->dev_addr
,
301 cdev
->hbas
[i
]->vdev
= vdev
;
302 mutex_unlock(&cdev_mutex
);
309 mutex_unlock(&cdev_mutex
);
310 log_debug(1 << CXGBI_DBG_DEV
,
311 "ndev 0x%p, %s, NO match mac found.\n",
317 void cxgbi_hbas_remove(struct cxgbi_device
*cdev
)
320 struct cxgbi_hba
*chba
;
322 log_debug(1 << CXGBI_DBG_DEV
,
323 "cdev 0x%p, p#%u.\n", cdev
, cdev
->nports
);
325 for (i
= 0; i
< cdev
->nports
; i
++) {
326 chba
= cdev
->hbas
[i
];
328 cdev
->hbas
[i
] = NULL
;
329 iscsi_host_remove(chba
->shost
);
330 pci_dev_put(cdev
->pdev
);
331 iscsi_host_free(chba
->shost
);
335 EXPORT_SYMBOL_GPL(cxgbi_hbas_remove
);
337 int cxgbi_hbas_add(struct cxgbi_device
*cdev
, u64 max_lun
,
338 unsigned int max_id
, struct scsi_host_template
*sht
,
339 struct scsi_transport_template
*stt
)
341 struct cxgbi_hba
*chba
;
342 struct Scsi_Host
*shost
;
345 log_debug(1 << CXGBI_DBG_DEV
, "cdev 0x%p, p#%u.\n", cdev
, cdev
->nports
);
347 for (i
= 0; i
< cdev
->nports
; i
++) {
348 shost
= iscsi_host_alloc(sht
, sizeof(*chba
), 1);
350 pr_info("0x%p, p%d, %s, host alloc failed.\n",
351 cdev
, i
, cdev
->ports
[i
]->name
);
356 shost
->transportt
= stt
;
357 shost
->max_lun
= max_lun
;
358 shost
->max_id
= max_id
;
359 shost
->max_channel
= 0;
360 shost
->max_cmd_len
= 16;
362 chba
= iscsi_host_priv(shost
);
364 chba
->ndev
= cdev
->ports
[i
];
367 log_debug(1 << CXGBI_DBG_DEV
,
368 "cdev 0x%p, p#%d %s: chba 0x%p.\n",
369 cdev
, i
, cdev
->ports
[i
]->name
, chba
);
371 pci_dev_get(cdev
->pdev
);
372 err
= iscsi_host_add(shost
, &cdev
->pdev
->dev
);
374 pr_info("cdev 0x%p, p#%d %s, host add failed.\n",
375 cdev
, i
, cdev
->ports
[i
]->name
);
376 pci_dev_put(cdev
->pdev
);
377 scsi_host_put(shost
);
381 cdev
->hbas
[i
] = chba
;
387 cxgbi_hbas_remove(cdev
);
390 EXPORT_SYMBOL_GPL(cxgbi_hbas_add
);
395 * - source port management
396 * To find a free source port in the port allocation map we use a very simple
397 * rotor scheme to look for the next free port.
399 * If a source port has been specified make sure that it doesn't collide with
400 * our normal source port allocation map. If it's outside the range of our
401 * allocation/deallocation scheme just let them use it.
403 * If the source port is outside our allocation range, the caller is
404 * responsible for keeping track of their port usage.
407 static struct cxgbi_sock
*find_sock_on_port(struct cxgbi_device
*cdev
,
408 unsigned char port_id
)
410 struct cxgbi_ports_map
*pmap
= &cdev
->pmap
;
414 if (!pmap
->max_connect
|| !pmap
->used
)
417 spin_lock_bh(&pmap
->lock
);
419 for (i
= 0; used
&& i
< pmap
->max_connect
; i
++) {
420 struct cxgbi_sock
*csk
= pmap
->port_csk
[i
];
423 if (csk
->port_id
== port_id
) {
424 spin_unlock_bh(&pmap
->lock
);
430 spin_unlock_bh(&pmap
->lock
);
435 static int sock_get_port(struct cxgbi_sock
*csk
)
437 struct cxgbi_device
*cdev
= csk
->cdev
;
438 struct cxgbi_ports_map
*pmap
= &cdev
->pmap
;
443 if (!pmap
->max_connect
) {
444 pr_err("cdev 0x%p, p#%u %s, NO port map.\n",
445 cdev
, csk
->port_id
, cdev
->ports
[csk
->port_id
]->name
);
446 return -EADDRNOTAVAIL
;
449 if (csk
->csk_family
== AF_INET
)
450 port
= &csk
->saddr
.sin_port
;
452 port
= &csk
->saddr6
.sin6_port
;
455 pr_err("source port NON-ZERO %u.\n",
460 spin_lock_bh(&pmap
->lock
);
461 if (pmap
->used
>= pmap
->max_connect
) {
462 spin_unlock_bh(&pmap
->lock
);
463 pr_info("cdev 0x%p, p#%u %s, ALL ports used.\n",
464 cdev
, csk
->port_id
, cdev
->ports
[csk
->port_id
]->name
);
465 return -EADDRNOTAVAIL
;
468 start
= idx
= pmap
->next
;
470 if (++idx
>= pmap
->max_connect
)
472 if (!pmap
->port_csk
[idx
]) {
474 *port
= htons(pmap
->sport_base
+ idx
);
476 pmap
->port_csk
[idx
] = csk
;
477 spin_unlock_bh(&pmap
->lock
);
479 log_debug(1 << CXGBI_DBG_SOCK
,
480 "cdev 0x%p, p#%u %s, p %u, %u.\n",
482 cdev
->ports
[csk
->port_id
]->name
,
483 pmap
->sport_base
+ idx
, pmap
->next
);
486 } while (idx
!= start
);
487 spin_unlock_bh(&pmap
->lock
);
489 /* should not happen */
490 pr_warn("cdev 0x%p, p#%u %s, next %u?\n",
491 cdev
, csk
->port_id
, cdev
->ports
[csk
->port_id
]->name
,
493 return -EADDRNOTAVAIL
;
496 static void sock_put_port(struct cxgbi_sock
*csk
)
498 struct cxgbi_device
*cdev
= csk
->cdev
;
499 struct cxgbi_ports_map
*pmap
= &cdev
->pmap
;
502 if (csk
->csk_family
== AF_INET
)
503 port
= &csk
->saddr
.sin_port
;
505 port
= &csk
->saddr6
.sin6_port
;
508 int idx
= ntohs(*port
) - pmap
->sport_base
;
511 if (idx
< 0 || idx
>= pmap
->max_connect
) {
512 pr_err("cdev 0x%p, p#%u %s, port %u OOR.\n",
514 cdev
->ports
[csk
->port_id
]->name
,
519 spin_lock_bh(&pmap
->lock
);
520 pmap
->port_csk
[idx
] = NULL
;
522 spin_unlock_bh(&pmap
->lock
);
524 log_debug(1 << CXGBI_DBG_SOCK
,
525 "cdev 0x%p, p#%u %s, release %u.\n",
526 cdev
, csk
->port_id
, cdev
->ports
[csk
->port_id
]->name
,
527 pmap
->sport_base
+ idx
);
534 * iscsi tcp connection
536 void cxgbi_sock_free_cpl_skbs(struct cxgbi_sock
*csk
)
538 if (csk
->cpl_close
) {
539 kfree_skb(csk
->cpl_close
);
540 csk
->cpl_close
= NULL
;
542 if (csk
->cpl_abort_req
) {
543 kfree_skb(csk
->cpl_abort_req
);
544 csk
->cpl_abort_req
= NULL
;
546 if (csk
->cpl_abort_rpl
) {
547 kfree_skb(csk
->cpl_abort_rpl
);
548 csk
->cpl_abort_rpl
= NULL
;
551 EXPORT_SYMBOL_GPL(cxgbi_sock_free_cpl_skbs
);
553 static struct cxgbi_sock
*cxgbi_sock_create(struct cxgbi_device
*cdev
)
555 struct cxgbi_sock
*csk
= kzalloc(sizeof(*csk
), GFP_NOIO
);
558 pr_info("alloc csk %zu failed.\n", sizeof(*csk
));
562 if (cdev
->csk_alloc_cpls(csk
) < 0) {
563 pr_info("csk 0x%p, alloc cpls failed.\n", csk
);
568 spin_lock_init(&csk
->lock
);
569 kref_init(&csk
->refcnt
);
570 skb_queue_head_init(&csk
->receive_queue
);
571 skb_queue_head_init(&csk
->write_queue
);
572 setup_timer(&csk
->retry_timer
, NULL
, (unsigned long)csk
);
573 rwlock_init(&csk
->callback_lock
);
576 cxgbi_sock_set_state(csk
, CTP_CLOSED
);
578 log_debug(1 << CXGBI_DBG_SOCK
, "cdev 0x%p, new csk 0x%p.\n", cdev
, csk
);
583 static struct rtable
*find_route_ipv4(struct flowi4
*fl4
,
584 __be32 saddr
, __be32 daddr
,
585 __be16 sport
, __be16 dport
, u8 tos
)
589 rt
= ip_route_output_ports(&init_net
, fl4
, NULL
, daddr
, saddr
,
590 dport
, sport
, IPPROTO_TCP
, tos
, 0);
597 static struct cxgbi_sock
*cxgbi_check_route(struct sockaddr
*dst_addr
)
599 struct sockaddr_in
*daddr
= (struct sockaddr_in
*)dst_addr
;
600 struct dst_entry
*dst
;
601 struct net_device
*ndev
;
602 struct cxgbi_device
*cdev
;
603 struct rtable
*rt
= NULL
;
606 struct cxgbi_sock
*csk
= NULL
;
607 unsigned int mtu
= 0;
611 rt
= find_route_ipv4(&fl4
, 0, daddr
->sin_addr
.s_addr
, 0, daddr
->sin_port
, 0);
613 pr_info("no route to ipv4 0x%x, port %u.\n",
614 be32_to_cpu(daddr
->sin_addr
.s_addr
),
615 be16_to_cpu(daddr
->sin_port
));
620 n
= dst_neigh_lookup(dst
, &daddr
->sin_addr
.s_addr
);
627 if (rt
->rt_flags
& (RTCF_MULTICAST
| RTCF_BROADCAST
)) {
628 pr_info("multi-cast route %pI4, port %u, dev %s.\n",
629 &daddr
->sin_addr
.s_addr
, ntohs(daddr
->sin_port
),
635 if (ndev
->flags
& IFF_LOOPBACK
) {
636 ndev
= ip_dev_find(&init_net
, daddr
->sin_addr
.s_addr
);
638 pr_info("rt dev %s, loopback -> %s, mtu %u.\n",
639 n
->dev
->name
, ndev
->name
, mtu
);
642 cdev
= cxgbi_device_find_by_netdev(ndev
, &port
);
644 pr_info("dst %pI4, %s, NOT cxgbi device.\n",
645 &daddr
->sin_addr
.s_addr
, ndev
->name
);
649 log_debug(1 << CXGBI_DBG_SOCK
,
650 "route to %pI4 :%u, ndev p#%d,%s, cdev 0x%p.\n",
651 &daddr
->sin_addr
.s_addr
, ntohs(daddr
->sin_port
),
652 port
, ndev
->name
, cdev
);
654 csk
= cxgbi_sock_create(cdev
);
664 csk
->csk_family
= AF_INET
;
665 csk
->daddr
.sin_addr
.s_addr
= daddr
->sin_addr
.s_addr
;
666 csk
->daddr
.sin_port
= daddr
->sin_port
;
667 csk
->daddr
.sin_family
= daddr
->sin_family
;
668 csk
->saddr
.sin_family
= daddr
->sin_family
;
669 csk
->saddr
.sin_addr
.s_addr
= fl4
.saddr
;
680 cxgbi_sock_closed(csk
);
685 #if IS_ENABLED(CONFIG_IPV6)
686 static struct rt6_info
*find_route_ipv6(const struct in6_addr
*saddr
,
687 const struct in6_addr
*daddr
)
692 memcpy(&fl
.saddr
, saddr
, sizeof(struct in6_addr
));
694 memcpy(&fl
.daddr
, daddr
, sizeof(struct in6_addr
));
695 return (struct rt6_info
*)ip6_route_output(&init_net
, NULL
, &fl
);
698 static struct cxgbi_sock
*cxgbi_check_route6(struct sockaddr
*dst_addr
)
700 struct sockaddr_in6
*daddr6
= (struct sockaddr_in6
*)dst_addr
;
701 struct dst_entry
*dst
;
702 struct net_device
*ndev
;
703 struct cxgbi_device
*cdev
;
704 struct rt6_info
*rt
= NULL
;
706 struct in6_addr pref_saddr
;
707 struct cxgbi_sock
*csk
= NULL
;
708 unsigned int mtu
= 0;
712 rt
= find_route_ipv6(NULL
, &daddr6
->sin6_addr
);
715 pr_info("no route to ipv6 %pI6 port %u\n",
716 daddr6
->sin6_addr
.s6_addr
,
717 be16_to_cpu(daddr6
->sin6_port
));
724 n
= dst_neigh_lookup(dst
, &daddr6
->sin6_addr
);
727 pr_info("%pI6, port %u, dst no neighbour.\n",
728 daddr6
->sin6_addr
.s6_addr
,
729 be16_to_cpu(daddr6
->sin6_port
));
735 if (ipv6_addr_is_multicast(&daddr6
->sin6_addr
)) {
736 pr_info("multi-cast route %pI6 port %u, dev %s.\n",
737 daddr6
->sin6_addr
.s6_addr
,
738 ntohs(daddr6
->sin6_port
), ndev
->name
);
743 cdev
= cxgbi_device_find_by_netdev(ndev
, &port
);
745 cdev
= cxgbi_device_find_by_mac(ndev
, &port
);
747 pr_info("dst %pI6 %s, NOT cxgbi device.\n",
748 daddr6
->sin6_addr
.s6_addr
, ndev
->name
);
752 log_debug(1 << CXGBI_DBG_SOCK
,
753 "route to %pI6 :%u, ndev p#%d,%s, cdev 0x%p.\n",
754 daddr6
->sin6_addr
.s6_addr
, ntohs(daddr6
->sin6_port
), port
,
757 csk
= cxgbi_sock_create(cdev
);
767 if (ipv6_addr_any(&rt
->rt6i_prefsrc
.addr
)) {
768 struct inet6_dev
*idev
= ip6_dst_idev((struct dst_entry
*)rt
);
770 err
= ipv6_dev_get_saddr(&init_net
, idev
? idev
->dev
: NULL
,
771 &daddr6
->sin6_addr
, 0, &pref_saddr
);
773 pr_info("failed to get source address to reach %pI6\n",
778 pref_saddr
= rt
->rt6i_prefsrc
.addr
;
781 csk
->csk_family
= AF_INET6
;
782 csk
->daddr6
.sin6_addr
= daddr6
->sin6_addr
;
783 csk
->daddr6
.sin6_port
= daddr6
->sin6_port
;
784 csk
->daddr6
.sin6_family
= daddr6
->sin6_family
;
785 csk
->saddr6
.sin6_family
= daddr6
->sin6_family
;
786 csk
->saddr6
.sin6_addr
= pref_saddr
;
797 cxgbi_sock_closed(csk
);
801 #endif /* IS_ENABLED(CONFIG_IPV6) */
803 void cxgbi_sock_established(struct cxgbi_sock
*csk
, unsigned int snd_isn
,
806 csk
->write_seq
= csk
->snd_nxt
= csk
->snd_una
= snd_isn
;
807 dst_confirm(csk
->dst
);
809 cxgbi_sock_set_state(csk
, CTP_ESTABLISHED
);
811 EXPORT_SYMBOL_GPL(cxgbi_sock_established
);
813 static void cxgbi_inform_iscsi_conn_closing(struct cxgbi_sock
*csk
)
815 log_debug(1 << CXGBI_DBG_SOCK
,
816 "csk 0x%p, state %u, flags 0x%lx, conn 0x%p.\n",
817 csk
, csk
->state
, csk
->flags
, csk
->user_data
);
819 if (csk
->state
!= CTP_ESTABLISHED
) {
820 read_lock_bh(&csk
->callback_lock
);
822 iscsi_conn_failure(csk
->user_data
,
823 ISCSI_ERR_TCP_CONN_CLOSE
);
824 read_unlock_bh(&csk
->callback_lock
);
828 void cxgbi_sock_closed(struct cxgbi_sock
*csk
)
830 log_debug(1 << CXGBI_DBG_SOCK
, "csk 0x%p,%u,0x%lx,%u.\n",
831 csk
, (csk
)->state
, (csk
)->flags
, (csk
)->tid
);
832 cxgbi_sock_set_flag(csk
, CTPF_ACTIVE_CLOSE_NEEDED
);
833 if (csk
->state
== CTP_ACTIVE_OPEN
|| csk
->state
== CTP_CLOSED
)
835 if (csk
->saddr
.sin_port
)
838 dst_release(csk
->dst
);
839 csk
->cdev
->csk_release_offload_resources(csk
);
840 cxgbi_sock_set_state(csk
, CTP_CLOSED
);
841 cxgbi_inform_iscsi_conn_closing(csk
);
844 EXPORT_SYMBOL_GPL(cxgbi_sock_closed
);
846 static void need_active_close(struct cxgbi_sock
*csk
)
851 log_debug(1 << CXGBI_DBG_SOCK
, "csk 0x%p,%u,0x%lx,%u.\n",
852 csk
, (csk
)->state
, (csk
)->flags
, (csk
)->tid
);
853 spin_lock_bh(&csk
->lock
);
854 dst_confirm(csk
->dst
);
855 data_lost
= skb_queue_len(&csk
->receive_queue
);
856 __skb_queue_purge(&csk
->receive_queue
);
858 if (csk
->state
== CTP_ACTIVE_OPEN
)
859 cxgbi_sock_set_flag(csk
, CTPF_ACTIVE_CLOSE_NEEDED
);
860 else if (csk
->state
== CTP_ESTABLISHED
) {
862 cxgbi_sock_set_state(csk
, CTP_ACTIVE_CLOSE
);
863 } else if (csk
->state
== CTP_PASSIVE_CLOSE
) {
865 cxgbi_sock_set_state(csk
, CTP_CLOSE_WAIT_2
);
870 csk
->cdev
->csk_send_abort_req(csk
);
872 csk
->cdev
->csk_send_close_req(csk
);
875 spin_unlock_bh(&csk
->lock
);
878 void cxgbi_sock_fail_act_open(struct cxgbi_sock
*csk
, int errno
)
880 pr_info("csk 0x%p,%u,%lx, %pI4:%u-%pI4:%u, err %d.\n",
881 csk
, csk
->state
, csk
->flags
,
882 &csk
->saddr
.sin_addr
.s_addr
, csk
->saddr
.sin_port
,
883 &csk
->daddr
.sin_addr
.s_addr
, csk
->daddr
.sin_port
,
886 cxgbi_sock_set_state(csk
, CTP_CONNECTING
);
888 cxgbi_sock_closed(csk
);
890 EXPORT_SYMBOL_GPL(cxgbi_sock_fail_act_open
);
892 void cxgbi_sock_act_open_req_arp_failure(void *handle
, struct sk_buff
*skb
)
894 struct cxgbi_sock
*csk
= (struct cxgbi_sock
*)skb
->sk
;
896 log_debug(1 << CXGBI_DBG_SOCK
, "csk 0x%p,%u,0x%lx,%u.\n",
897 csk
, (csk
)->state
, (csk
)->flags
, (csk
)->tid
);
899 spin_lock_bh(&csk
->lock
);
900 if (csk
->state
== CTP_ACTIVE_OPEN
)
901 cxgbi_sock_fail_act_open(csk
, -EHOSTUNREACH
);
902 spin_unlock_bh(&csk
->lock
);
906 EXPORT_SYMBOL_GPL(cxgbi_sock_act_open_req_arp_failure
);
908 void cxgbi_sock_rcv_abort_rpl(struct cxgbi_sock
*csk
)
911 spin_lock_bh(&csk
->lock
);
913 cxgbi_sock_set_flag(csk
, CTPF_ABORT_RPL_RCVD
);
914 if (cxgbi_sock_flag(csk
, CTPF_ABORT_RPL_PENDING
)) {
915 cxgbi_sock_clear_flag(csk
, CTPF_ABORT_RPL_PENDING
);
916 if (cxgbi_sock_flag(csk
, CTPF_ABORT_REQ_RCVD
))
917 pr_err("csk 0x%p,%u,0x%lx,%u,ABT_RPL_RSS.\n",
918 csk
, csk
->state
, csk
->flags
, csk
->tid
);
919 cxgbi_sock_closed(csk
);
922 spin_unlock_bh(&csk
->lock
);
925 EXPORT_SYMBOL_GPL(cxgbi_sock_rcv_abort_rpl
);
927 void cxgbi_sock_rcv_peer_close(struct cxgbi_sock
*csk
)
929 log_debug(1 << CXGBI_DBG_SOCK
, "csk 0x%p,%u,0x%lx,%u.\n",
930 csk
, (csk
)->state
, (csk
)->flags
, (csk
)->tid
);
932 spin_lock_bh(&csk
->lock
);
934 if (cxgbi_sock_flag(csk
, CTPF_ABORT_RPL_PENDING
))
937 switch (csk
->state
) {
938 case CTP_ESTABLISHED
:
939 cxgbi_sock_set_state(csk
, CTP_PASSIVE_CLOSE
);
941 case CTP_ACTIVE_CLOSE
:
942 cxgbi_sock_set_state(csk
, CTP_CLOSE_WAIT_2
);
944 case CTP_CLOSE_WAIT_1
:
945 cxgbi_sock_closed(csk
);
950 pr_err("csk 0x%p,%u,0x%lx,%u, bad state.\n",
951 csk
, csk
->state
, csk
->flags
, csk
->tid
);
953 cxgbi_inform_iscsi_conn_closing(csk
);
955 spin_unlock_bh(&csk
->lock
);
958 EXPORT_SYMBOL_GPL(cxgbi_sock_rcv_peer_close
);
960 void cxgbi_sock_rcv_close_conn_rpl(struct cxgbi_sock
*csk
, u32 snd_nxt
)
962 log_debug(1 << CXGBI_DBG_SOCK
, "csk 0x%p,%u,0x%lx,%u.\n",
963 csk
, (csk
)->state
, (csk
)->flags
, (csk
)->tid
);
965 spin_lock_bh(&csk
->lock
);
967 csk
->snd_una
= snd_nxt
- 1;
968 if (cxgbi_sock_flag(csk
, CTPF_ABORT_RPL_PENDING
))
971 switch (csk
->state
) {
972 case CTP_ACTIVE_CLOSE
:
973 cxgbi_sock_set_state(csk
, CTP_CLOSE_WAIT_1
);
975 case CTP_CLOSE_WAIT_1
:
976 case CTP_CLOSE_WAIT_2
:
977 cxgbi_sock_closed(csk
);
982 pr_err("csk 0x%p,%u,0x%lx,%u, bad state.\n",
983 csk
, csk
->state
, csk
->flags
, csk
->tid
);
986 spin_unlock_bh(&csk
->lock
);
989 EXPORT_SYMBOL_GPL(cxgbi_sock_rcv_close_conn_rpl
);
991 void cxgbi_sock_rcv_wr_ack(struct cxgbi_sock
*csk
, unsigned int credits
,
992 unsigned int snd_una
, int seq_chk
)
994 log_debug(1 << CXGBI_DBG_TOE
| 1 << CXGBI_DBG_SOCK
,
995 "csk 0x%p,%u,0x%lx,%u, cr %u,%u+%u, snd_una %u,%d.\n",
996 csk
, csk
->state
, csk
->flags
, csk
->tid
, credits
,
997 csk
->wr_cred
, csk
->wr_una_cred
, snd_una
, seq_chk
);
999 spin_lock_bh(&csk
->lock
);
1001 csk
->wr_cred
+= credits
;
1002 if (csk
->wr_una_cred
> csk
->wr_max_cred
- csk
->wr_cred
)
1003 csk
->wr_una_cred
= csk
->wr_max_cred
- csk
->wr_cred
;
1006 struct sk_buff
*p
= cxgbi_sock_peek_wr(csk
);
1009 pr_err("csk 0x%p,%u,0x%lx,%u, cr %u,%u+%u, empty.\n",
1010 csk
, csk
->state
, csk
->flags
, csk
->tid
, credits
,
1011 csk
->wr_cred
, csk
->wr_una_cred
);
1015 if (unlikely(credits
< p
->csum
)) {
1016 pr_warn("csk 0x%p,%u,0x%lx,%u, cr %u,%u+%u, < %u.\n",
1017 csk
, csk
->state
, csk
->flags
, csk
->tid
,
1018 credits
, csk
->wr_cred
, csk
->wr_una_cred
,
1023 cxgbi_sock_dequeue_wr(csk
);
1029 cxgbi_sock_check_wr_invariants(csk
);
1032 if (unlikely(before(snd_una
, csk
->snd_una
))) {
1033 pr_warn("csk 0x%p,%u,0x%lx,%u, snd_una %u/%u.",
1034 csk
, csk
->state
, csk
->flags
, csk
->tid
, snd_una
,
1039 if (csk
->snd_una
!= snd_una
) {
1040 csk
->snd_una
= snd_una
;
1041 dst_confirm(csk
->dst
);
1045 if (skb_queue_len(&csk
->write_queue
)) {
1046 if (csk
->cdev
->csk_push_tx_frames(csk
, 0))
1047 cxgbi_conn_tx_open(csk
);
1049 cxgbi_conn_tx_open(csk
);
1051 spin_unlock_bh(&csk
->lock
);
1053 EXPORT_SYMBOL_GPL(cxgbi_sock_rcv_wr_ack
);
1055 static unsigned int cxgbi_sock_find_best_mtu(struct cxgbi_sock
*csk
,
1060 while (i
< csk
->cdev
->nmtus
- 1 && csk
->cdev
->mtus
[i
+ 1] <= mtu
)
1066 unsigned int cxgbi_sock_select_mss(struct cxgbi_sock
*csk
, unsigned int pmtu
)
1069 struct dst_entry
*dst
= csk
->dst
;
1071 csk
->advmss
= dst_metric_advmss(dst
);
1073 if (csk
->advmss
> pmtu
- 40)
1074 csk
->advmss
= pmtu
- 40;
1075 if (csk
->advmss
< csk
->cdev
->mtus
[0] - 40)
1076 csk
->advmss
= csk
->cdev
->mtus
[0] - 40;
1077 idx
= cxgbi_sock_find_best_mtu(csk
, csk
->advmss
+ 40);
1081 EXPORT_SYMBOL_GPL(cxgbi_sock_select_mss
);
1083 void cxgbi_sock_skb_entail(struct cxgbi_sock
*csk
, struct sk_buff
*skb
)
1085 cxgbi_skcb_tcp_seq(skb
) = csk
->write_seq
;
1086 __skb_queue_tail(&csk
->write_queue
, skb
);
1088 EXPORT_SYMBOL_GPL(cxgbi_sock_skb_entail
);
1090 void cxgbi_sock_purge_wr_queue(struct cxgbi_sock
*csk
)
1092 struct sk_buff
*skb
;
1094 while ((skb
= cxgbi_sock_dequeue_wr(csk
)) != NULL
)
1097 EXPORT_SYMBOL_GPL(cxgbi_sock_purge_wr_queue
);
1099 void cxgbi_sock_check_wr_invariants(const struct cxgbi_sock
*csk
)
1101 int pending
= cxgbi_sock_count_pending_wrs(csk
);
1103 if (unlikely(csk
->wr_cred
+ pending
!= csk
->wr_max_cred
))
1104 pr_err("csk 0x%p, tid %u, credit %u + %u != %u.\n",
1105 csk
, csk
->tid
, csk
->wr_cred
, pending
, csk
->wr_max_cred
);
1107 EXPORT_SYMBOL_GPL(cxgbi_sock_check_wr_invariants
);
1109 static int cxgbi_sock_send_pdus(struct cxgbi_sock
*csk
, struct sk_buff
*skb
)
1111 struct cxgbi_device
*cdev
= csk
->cdev
;
1112 struct sk_buff
*next
;
1113 int err
, copied
= 0;
1115 spin_lock_bh(&csk
->lock
);
1117 if (csk
->state
!= CTP_ESTABLISHED
) {
1118 log_debug(1 << CXGBI_DBG_PDU_TX
,
1119 "csk 0x%p,%u,0x%lx,%u, EAGAIN.\n",
1120 csk
, csk
->state
, csk
->flags
, csk
->tid
);
1126 log_debug(1 << CXGBI_DBG_PDU_TX
,
1127 "csk 0x%p,%u,0x%lx,%u, EPIPE %d.\n",
1128 csk
, csk
->state
, csk
->flags
, csk
->tid
, csk
->err
);
1133 if (csk
->write_seq
- csk
->snd_una
>= csk
->snd_win
) {
1134 log_debug(1 << CXGBI_DBG_PDU_TX
,
1135 "csk 0x%p,%u,0x%lx,%u, FULL %u-%u >= %u.\n",
1136 csk
, csk
->state
, csk
->flags
, csk
->tid
, csk
->write_seq
,
1137 csk
->snd_una
, csk
->snd_win
);
1143 int frags
= skb_shinfo(skb
)->nr_frags
+
1144 (skb
->len
!= skb
->data_len
);
1146 if (unlikely(skb_headroom(skb
) < cdev
->skb_tx_rsvd
)) {
1147 pr_err("csk 0x%p, skb head %u < %u.\n",
1148 csk
, skb_headroom(skb
), cdev
->skb_tx_rsvd
);
1153 if (frags
>= SKB_WR_LIST_SIZE
) {
1154 pr_err("csk 0x%p, frags %d, %u,%u >%u.\n",
1155 csk
, skb_shinfo(skb
)->nr_frags
, skb
->len
,
1156 skb
->data_len
, (uint
)(SKB_WR_LIST_SIZE
));
1163 cxgbi_skcb_set_flag(skb
, SKCBF_TX_NEED_HDR
);
1164 cxgbi_sock_skb_entail(csk
, skb
);
1166 csk
->write_seq
+= skb
->len
+
1167 cxgbi_ulp_extra_len(cxgbi_skcb_ulp_mode(skb
));
1171 if (likely(skb_queue_len(&csk
->write_queue
)))
1172 cdev
->csk_push_tx_frames(csk
, 1);
1173 spin_unlock_bh(&csk
->lock
);
1177 if (copied
== 0 && err
== -EPIPE
)
1178 copied
= csk
->err
? csk
->err
: -EPIPE
;
1185 * Direct Data Placement -
1186 * Directly place the iSCSI Data-In or Data-Out PDU's payload into pre-posted
1187 * final destination host-memory buffers based on the Initiator Task Tag (ITT)
1188 * in Data-In or Target Task Tag (TTT) in Data-Out PDUs.
1189 * The host memory address is programmed into h/w in the format of pagepod
1191 * The location of the pagepod entry is encoded into ddp tag which is used as
1192 * the base for ITT/TTT.
1195 static unsigned char ddp_page_order
[DDP_PGIDX_MAX
] = {0, 1, 2, 4};
1196 static unsigned char ddp_page_shift
[DDP_PGIDX_MAX
] = {12, 13, 14, 16};
1197 static unsigned char page_idx
= DDP_PGIDX_MAX
;
1199 static unsigned char sw_tag_idx_bits
;
1200 static unsigned char sw_tag_age_bits
;
1203 * Direct-Data Placement page size adjustment
1205 static int ddp_adjust_page_table(void)
1208 unsigned int base_order
, order
;
1210 if (PAGE_SIZE
< (1UL << ddp_page_shift
[0])) {
1211 pr_info("PAGE_SIZE 0x%lx too small, min 0x%lx\n",
1212 PAGE_SIZE
, 1UL << ddp_page_shift
[0]);
1216 base_order
= get_order(1UL << ddp_page_shift
[0]);
1217 order
= get_order(1UL << PAGE_SHIFT
);
1219 for (i
= 0; i
< DDP_PGIDX_MAX
; i
++) {
1220 /* first is the kernel page size, then just doubling */
1221 ddp_page_order
[i
] = order
- base_order
+ i
;
1222 ddp_page_shift
[i
] = PAGE_SHIFT
+ i
;
1227 static int ddp_find_page_index(unsigned long pgsz
)
1231 for (i
= 0; i
< DDP_PGIDX_MAX
; i
++) {
1232 if (pgsz
== (1UL << ddp_page_shift
[i
]))
1235 pr_info("ddp page size %lu not supported.\n", pgsz
);
1236 return DDP_PGIDX_MAX
;
1239 static void ddp_setup_host_page_size(void)
1241 if (page_idx
== DDP_PGIDX_MAX
) {
1242 page_idx
= ddp_find_page_index(PAGE_SIZE
);
1244 if (page_idx
== DDP_PGIDX_MAX
) {
1245 pr_info("system PAGE %lu, update hw.\n", PAGE_SIZE
);
1246 if (ddp_adjust_page_table() < 0) {
1247 pr_info("PAGE %lu, disable ddp.\n", PAGE_SIZE
);
1250 page_idx
= ddp_find_page_index(PAGE_SIZE
);
1252 pr_info("system PAGE %lu, ddp idx %u.\n", PAGE_SIZE
, page_idx
);
1256 void cxgbi_ddp_page_size_factor(int *pgsz_factor
)
1260 for (i
= 0; i
< DDP_PGIDX_MAX
; i
++)
1261 pgsz_factor
[i
] = ddp_page_order
[i
];
1263 EXPORT_SYMBOL_GPL(cxgbi_ddp_page_size_factor
);
1266 * DDP setup & teardown
1269 void cxgbi_ddp_ppod_set(struct cxgbi_pagepod
*ppod
,
1270 struct cxgbi_pagepod_hdr
*hdr
,
1271 struct cxgbi_gather_list
*gl
, unsigned int gidx
)
1275 memcpy(ppod
, hdr
, sizeof(*hdr
));
1276 for (i
= 0; i
< (PPOD_PAGES_MAX
+ 1); i
++, gidx
++) {
1277 ppod
->addr
[i
] = gidx
< gl
->nelem
?
1278 cpu_to_be64(gl
->phys_addr
[gidx
]) : 0ULL;
1281 EXPORT_SYMBOL_GPL(cxgbi_ddp_ppod_set
);
1283 void cxgbi_ddp_ppod_clear(struct cxgbi_pagepod
*ppod
)
1285 memset(ppod
, 0, sizeof(*ppod
));
1287 EXPORT_SYMBOL_GPL(cxgbi_ddp_ppod_clear
);
1289 static inline int ddp_find_unused_entries(struct cxgbi_ddp_info
*ddp
,
1290 unsigned int start
, unsigned int max
,
1292 struct cxgbi_gather_list
*gl
)
1294 unsigned int i
, j
, k
;
1296 /* not enough entries */
1297 if ((max
- start
) < count
) {
1298 log_debug(1 << CXGBI_DBG_DDP
,
1299 "NOT enough entries %u+%u < %u.\n", start
, count
, max
);
1304 spin_lock(&ddp
->map_lock
);
1305 for (i
= start
; i
< max
;) {
1306 for (j
= 0, k
= i
; j
< count
; j
++, k
++) {
1311 for (j
= 0, k
= i
; j
< count
; j
++, k
++)
1312 ddp
->gl_map
[k
] = gl
;
1313 spin_unlock(&ddp
->map_lock
);
1318 spin_unlock(&ddp
->map_lock
);
1319 log_debug(1 << CXGBI_DBG_DDP
,
1320 "NO suitable entries %u available.\n", count
);
1324 static inline void ddp_unmark_entries(struct cxgbi_ddp_info
*ddp
,
1325 int start
, int count
)
1327 spin_lock(&ddp
->map_lock
);
1328 memset(&ddp
->gl_map
[start
], 0,
1329 count
* sizeof(struct cxgbi_gather_list
*));
1330 spin_unlock(&ddp
->map_lock
);
1333 static inline void ddp_gl_unmap(struct pci_dev
*pdev
,
1334 struct cxgbi_gather_list
*gl
)
1338 for (i
= 0; i
< gl
->nelem
; i
++)
1339 dma_unmap_page(&pdev
->dev
, gl
->phys_addr
[i
], PAGE_SIZE
,
1340 PCI_DMA_FROMDEVICE
);
1343 static inline int ddp_gl_map(struct pci_dev
*pdev
,
1344 struct cxgbi_gather_list
*gl
)
1348 for (i
= 0; i
< gl
->nelem
; i
++) {
1349 gl
->phys_addr
[i
] = dma_map_page(&pdev
->dev
, gl
->pages
[i
], 0,
1351 PCI_DMA_FROMDEVICE
);
1352 if (unlikely(dma_mapping_error(&pdev
->dev
, gl
->phys_addr
[i
]))) {
1353 log_debug(1 << CXGBI_DBG_DDP
,
1354 "page %d 0x%p, 0x%p dma mapping err.\n",
1355 i
, gl
->pages
[i
], pdev
);
1362 unsigned int nelem
= gl
->nelem
;
1365 ddp_gl_unmap(pdev
, gl
);
1371 static void ddp_release_gl(struct cxgbi_gather_list
*gl
,
1372 struct pci_dev
*pdev
)
1374 ddp_gl_unmap(pdev
, gl
);
1378 static struct cxgbi_gather_list
*ddp_make_gl(unsigned int xferlen
,
1379 struct scatterlist
*sgl
,
1381 struct pci_dev
*pdev
,
1384 struct cxgbi_gather_list
*gl
;
1385 struct scatterlist
*sg
= sgl
;
1386 struct page
*sgpage
= sg_page(sg
);
1387 unsigned int sglen
= sg
->length
;
1388 unsigned int sgoffset
= sg
->offset
;
1389 unsigned int npages
= (xferlen
+ sgoffset
+ PAGE_SIZE
- 1) >>
1393 if (xferlen
< DDP_THRESHOLD
) {
1394 log_debug(1 << CXGBI_DBG_DDP
,
1395 "xfer %u < threshold %u, no ddp.\n",
1396 xferlen
, DDP_THRESHOLD
);
1400 gl
= kzalloc(sizeof(struct cxgbi_gather_list
) +
1401 npages
* (sizeof(dma_addr_t
) +
1402 sizeof(struct page
*)), gfp
);
1404 log_debug(1 << CXGBI_DBG_DDP
,
1405 "xfer %u, %u pages, OOM.\n", xferlen
, npages
);
1409 log_debug(1 << CXGBI_DBG_DDP
,
1410 "xfer %u, sgl %u, gl max %u.\n", xferlen
, sgcnt
, npages
);
1412 gl
->pages
= (struct page
**)&gl
->phys_addr
[npages
];
1414 gl
->length
= xferlen
;
1415 gl
->offset
= sgoffset
;
1416 gl
->pages
[0] = sgpage
;
1418 for (i
= 1, sg
= sg_next(sgl
), j
= 0; i
< sgcnt
;
1419 i
++, sg
= sg_next(sg
)) {
1420 struct page
*page
= sg_page(sg
);
1422 if (sgpage
== page
&& sg
->offset
== sgoffset
+ sglen
)
1423 sglen
+= sg
->length
;
1425 /* make sure the sgl is fit for ddp:
1426 * each has the same page size, and
1427 * all of the middle pages are used completely
1429 if ((j
&& sgoffset
) || ((i
!= sgcnt
- 1) &&
1430 ((sglen
+ sgoffset
) & ~PAGE_MASK
))) {
1431 log_debug(1 << CXGBI_DBG_DDP
,
1432 "page %d/%u, %u + %u.\n",
1433 i
, sgcnt
, sgoffset
, sglen
);
1438 if (j
== gl
->nelem
|| sg
->offset
) {
1439 log_debug(1 << CXGBI_DBG_DDP
,
1440 "page %d/%u, offset %u.\n",
1441 j
, gl
->nelem
, sg
->offset
);
1444 gl
->pages
[j
] = page
;
1446 sgoffset
= sg
->offset
;
1452 if (ddp_gl_map(pdev
, gl
) < 0)
1462 static void ddp_tag_release(struct cxgbi_hba
*chba
, u32 tag
)
1464 struct cxgbi_device
*cdev
= chba
->cdev
;
1465 struct cxgbi_ddp_info
*ddp
= cdev
->ddp
;
1468 idx
= (tag
>> PPOD_IDX_SHIFT
) & ddp
->idx_mask
;
1469 if (idx
< ddp
->nppods
) {
1470 struct cxgbi_gather_list
*gl
= ddp
->gl_map
[idx
];
1473 if (!gl
|| !gl
->nelem
) {
1474 pr_warn("tag 0x%x, idx %u, gl 0x%p, %u.\n",
1475 tag
, idx
, gl
, gl
? gl
->nelem
: 0);
1478 npods
= (gl
->nelem
+ PPOD_PAGES_MAX
- 1) >> PPOD_PAGES_SHIFT
;
1479 log_debug(1 << CXGBI_DBG_DDP
,
1480 "tag 0x%x, release idx %u, npods %u.\n",
1482 cdev
->csk_ddp_clear(chba
, tag
, idx
, npods
);
1483 ddp_unmark_entries(ddp
, idx
, npods
);
1484 ddp_release_gl(gl
, ddp
->pdev
);
1486 pr_warn("tag 0x%x, idx %u > max %u.\n", tag
, idx
, ddp
->nppods
);
1489 static int ddp_tag_reserve(struct cxgbi_sock
*csk
, unsigned int tid
,
1490 u32 sw_tag
, u32
*tagp
, struct cxgbi_gather_list
*gl
,
1493 struct cxgbi_device
*cdev
= csk
->cdev
;
1494 struct cxgbi_ddp_info
*ddp
= cdev
->ddp
;
1495 struct cxgbi_tag_format
*tformat
= &cdev
->tag_format
;
1496 struct cxgbi_pagepod_hdr hdr
;
1502 npods
= (gl
->nelem
+ PPOD_PAGES_MAX
- 1) >> PPOD_PAGES_SHIFT
;
1503 if (ddp
->idx_last
== ddp
->nppods
)
1504 idx
= ddp_find_unused_entries(ddp
, 0, ddp
->nppods
,
1507 idx
= ddp_find_unused_entries(ddp
, ddp
->idx_last
+ 1,
1510 if (idx
< 0 && ddp
->idx_last
>= npods
) {
1511 idx
= ddp_find_unused_entries(ddp
, 0,
1512 min(ddp
->idx_last
+ npods
, ddp
->nppods
),
1517 log_debug(1 << CXGBI_DBG_DDP
,
1518 "xferlen %u, gl %u, npods %u NO DDP.\n",
1519 gl
->length
, gl
->nelem
, npods
);
1523 tag
= cxgbi_ddp_tag_base(tformat
, sw_tag
);
1524 tag
|= idx
<< PPOD_IDX_SHIFT
;
1527 hdr
.vld_tid
= htonl(PPOD_VALID_FLAG
| PPOD_TID(tid
));
1528 hdr
.pgsz_tag_clr
= htonl(tag
& ddp
->rsvd_tag_mask
);
1529 hdr
.max_offset
= htonl(gl
->length
);
1530 hdr
.page_offset
= htonl(gl
->offset
);
1532 err
= cdev
->csk_ddp_set(csk
, &hdr
, idx
, npods
, gl
);
1534 goto unmark_entries
;
1536 ddp
->idx_last
= idx
;
1537 log_debug(1 << CXGBI_DBG_DDP
,
1538 "xfer %u, gl %u,%u, tid 0x%x, tag 0x%x->0x%x(%u,%u).\n",
1539 gl
->length
, gl
->nelem
, gl
->offset
, tid
, sw_tag
, tag
, idx
,
1545 ddp_unmark_entries(ddp
, idx
, npods
);
1549 int cxgbi_ddp_reserve(struct cxgbi_sock
*csk
, unsigned int *tagp
,
1550 unsigned int sw_tag
, unsigned int xferlen
,
1551 struct scatterlist
*sgl
, unsigned int sgcnt
, gfp_t gfp
)
1553 struct cxgbi_device
*cdev
= csk
->cdev
;
1554 struct cxgbi_tag_format
*tformat
= &cdev
->tag_format
;
1555 struct cxgbi_gather_list
*gl
;
1558 if (page_idx
>= DDP_PGIDX_MAX
|| !cdev
->ddp
||
1559 xferlen
< DDP_THRESHOLD
) {
1560 log_debug(1 << CXGBI_DBG_DDP
,
1561 "pgidx %u, xfer %u, NO ddp.\n", page_idx
, xferlen
);
1565 if (!cxgbi_sw_tag_usable(tformat
, sw_tag
)) {
1566 log_debug(1 << CXGBI_DBG_DDP
,
1567 "sw_tag 0x%x NOT usable.\n", sw_tag
);
1571 gl
= ddp_make_gl(xferlen
, sgl
, sgcnt
, cdev
->pdev
, gfp
);
1575 err
= ddp_tag_reserve(csk
, csk
->tid
, sw_tag
, tagp
, gl
, gfp
);
1577 ddp_release_gl(gl
, cdev
->pdev
);
1582 static void ddp_destroy(struct kref
*kref
)
1584 struct cxgbi_ddp_info
*ddp
= container_of(kref
,
1585 struct cxgbi_ddp_info
,
1587 struct cxgbi_device
*cdev
= ddp
->cdev
;
1590 pr_info("kref 0, destroy ddp 0x%p, cdev 0x%p.\n", ddp
, cdev
);
1592 while (i
< ddp
->nppods
) {
1593 struct cxgbi_gather_list
*gl
= ddp
->gl_map
[i
];
1596 int npods
= (gl
->nelem
+ PPOD_PAGES_MAX
- 1)
1597 >> PPOD_PAGES_SHIFT
;
1598 pr_info("cdev 0x%p, ddp %d + %d.\n", cdev
, i
, npods
);
1604 cxgbi_free_big_mem(ddp
);
1607 int cxgbi_ddp_cleanup(struct cxgbi_device
*cdev
)
1609 struct cxgbi_ddp_info
*ddp
= cdev
->ddp
;
1611 log_debug(1 << CXGBI_DBG_DDP
,
1612 "cdev 0x%p, release ddp 0x%p.\n", cdev
, ddp
);
1615 return kref_put(&ddp
->refcnt
, ddp_destroy
);
1618 EXPORT_SYMBOL_GPL(cxgbi_ddp_cleanup
);
1620 int cxgbi_ddp_init(struct cxgbi_device
*cdev
,
1621 unsigned int llimit
, unsigned int ulimit
,
1622 unsigned int max_txsz
, unsigned int max_rxsz
)
1624 struct cxgbi_ddp_info
*ddp
;
1625 unsigned int ppmax
, bits
;
1627 ppmax
= (ulimit
- llimit
+ 1) >> PPOD_SIZE_SHIFT
;
1628 bits
= __ilog2_u32(ppmax
) + 1;
1629 if (bits
> PPOD_IDX_MAX_SIZE
)
1630 bits
= PPOD_IDX_MAX_SIZE
;
1631 ppmax
= (1 << (bits
- 1)) - 1;
1633 ddp
= cxgbi_alloc_big_mem(sizeof(struct cxgbi_ddp_info
) +
1634 ppmax
* (sizeof(struct cxgbi_gather_list
*) +
1635 sizeof(struct sk_buff
*)),
1638 pr_warn("cdev 0x%p, ddp ppmax %u OOM.\n", cdev
, ppmax
);
1641 ddp
->gl_map
= (struct cxgbi_gather_list
**)(ddp
+ 1);
1644 spin_lock_init(&ddp
->map_lock
);
1645 kref_init(&ddp
->refcnt
);
1648 ddp
->pdev
= cdev
->pdev
;
1649 ddp
->llimit
= llimit
;
1650 ddp
->ulimit
= ulimit
;
1651 ddp
->max_txsz
= min_t(unsigned int, max_txsz
, ULP2_MAX_PKT_SIZE
);
1652 ddp
->max_rxsz
= min_t(unsigned int, max_rxsz
, ULP2_MAX_PKT_SIZE
);
1653 ddp
->nppods
= ppmax
;
1654 ddp
->idx_last
= ppmax
;
1655 ddp
->idx_bits
= bits
;
1656 ddp
->idx_mask
= (1 << bits
) - 1;
1657 ddp
->rsvd_tag_mask
= (1 << (bits
+ PPOD_IDX_SHIFT
)) - 1;
1659 cdev
->tag_format
.sw_bits
= sw_tag_idx_bits
+ sw_tag_age_bits
;
1660 cdev
->tag_format
.rsvd_bits
= ddp
->idx_bits
;
1661 cdev
->tag_format
.rsvd_shift
= PPOD_IDX_SHIFT
;
1662 cdev
->tag_format
.rsvd_mask
= (1 << cdev
->tag_format
.rsvd_bits
) - 1;
1664 pr_info("%s tag format, sw %u, rsvd %u,%u, mask 0x%x.\n",
1665 cdev
->ports
[0]->name
, cdev
->tag_format
.sw_bits
,
1666 cdev
->tag_format
.rsvd_bits
, cdev
->tag_format
.rsvd_shift
,
1667 cdev
->tag_format
.rsvd_mask
);
1669 cdev
->tx_max_size
= min_t(unsigned int, ULP2_MAX_PDU_PAYLOAD
,
1670 ddp
->max_txsz
- ISCSI_PDU_NONPAYLOAD_LEN
);
1671 cdev
->rx_max_size
= min_t(unsigned int, ULP2_MAX_PDU_PAYLOAD
,
1672 ddp
->max_rxsz
- ISCSI_PDU_NONPAYLOAD_LEN
);
1674 log_debug(1 << CXGBI_DBG_DDP
,
1675 "%s max payload size: %u/%u, %u/%u.\n",
1676 cdev
->ports
[0]->name
, cdev
->tx_max_size
, ddp
->max_txsz
,
1677 cdev
->rx_max_size
, ddp
->max_rxsz
);
1680 EXPORT_SYMBOL_GPL(cxgbi_ddp_init
);
1683 * APIs interacting with open-iscsi libraries
1686 static unsigned char padding
[4];
1688 static void task_release_itt(struct iscsi_task
*task
, itt_t hdr_itt
)
1690 struct scsi_cmnd
*sc
= task
->sc
;
1691 struct iscsi_tcp_conn
*tcp_conn
= task
->conn
->dd_data
;
1692 struct cxgbi_conn
*cconn
= tcp_conn
->dd_data
;
1693 struct cxgbi_hba
*chba
= cconn
->chba
;
1694 struct cxgbi_tag_format
*tformat
= &chba
->cdev
->tag_format
;
1695 u32 tag
= ntohl((__force u32
)hdr_itt
);
1697 log_debug(1 << CXGBI_DBG_DDP
,
1698 "cdev 0x%p, release tag 0x%x.\n", chba
->cdev
, tag
);
1700 (scsi_bidi_cmnd(sc
) || sc
->sc_data_direction
== DMA_FROM_DEVICE
) &&
1701 cxgbi_is_ddp_tag(tformat
, tag
))
1702 ddp_tag_release(chba
, tag
);
1705 static int task_reserve_itt(struct iscsi_task
*task
, itt_t
*hdr_itt
)
1707 struct scsi_cmnd
*sc
= task
->sc
;
1708 struct iscsi_conn
*conn
= task
->conn
;
1709 struct iscsi_session
*sess
= conn
->session
;
1710 struct iscsi_tcp_conn
*tcp_conn
= conn
->dd_data
;
1711 struct cxgbi_conn
*cconn
= tcp_conn
->dd_data
;
1712 struct cxgbi_hba
*chba
= cconn
->chba
;
1713 struct cxgbi_tag_format
*tformat
= &chba
->cdev
->tag_format
;
1714 u32 sw_tag
= (sess
->age
<< cconn
->task_idx_bits
) | task
->itt
;
1719 (scsi_bidi_cmnd(sc
) || sc
->sc_data_direction
== DMA_FROM_DEVICE
)) {
1720 err
= cxgbi_ddp_reserve(cconn
->cep
->csk
, &tag
, sw_tag
,
1721 scsi_in(sc
)->length
,
1722 scsi_in(sc
)->table
.sgl
,
1723 scsi_in(sc
)->table
.nents
,
1726 log_debug(1 << CXGBI_DBG_DDP
,
1727 "csk 0x%p, R task 0x%p, %u,%u, no ddp.\n",
1728 cconn
->cep
->csk
, task
, scsi_in(sc
)->length
,
1729 scsi_in(sc
)->table
.nents
);
1733 tag
= cxgbi_set_non_ddp_tag(tformat
, sw_tag
);
1734 /* the itt need to sent in big-endian order */
1735 *hdr_itt
= (__force itt_t
)htonl(tag
);
1737 log_debug(1 << CXGBI_DBG_DDP
,
1738 "cdev 0x%p, task 0x%p, 0x%x(0x%x,0x%x)->0x%x/0x%x.\n",
1739 chba
->cdev
, task
, sw_tag
, task
->itt
, sess
->age
, tag
, *hdr_itt
);
1743 void cxgbi_parse_pdu_itt(struct iscsi_conn
*conn
, itt_t itt
, int *idx
, int *age
)
1745 struct iscsi_tcp_conn
*tcp_conn
= conn
->dd_data
;
1746 struct cxgbi_conn
*cconn
= tcp_conn
->dd_data
;
1747 struct cxgbi_device
*cdev
= cconn
->chba
->cdev
;
1748 u32 tag
= ntohl((__force u32
) itt
);
1751 sw_bits
= cxgbi_tag_nonrsvd_bits(&cdev
->tag_format
, tag
);
1753 *idx
= sw_bits
& ((1 << cconn
->task_idx_bits
) - 1);
1755 *age
= (sw_bits
>> cconn
->task_idx_bits
) & ISCSI_AGE_MASK
;
1757 log_debug(1 << CXGBI_DBG_DDP
,
1758 "cdev 0x%p, tag 0x%x/0x%x, -> 0x%x(0x%x,0x%x).\n",
1759 cdev
, tag
, itt
, sw_bits
, idx
? *idx
: 0xFFFFF,
1762 EXPORT_SYMBOL_GPL(cxgbi_parse_pdu_itt
);
1764 void cxgbi_conn_tx_open(struct cxgbi_sock
*csk
)
1766 struct iscsi_conn
*conn
= csk
->user_data
;
1769 log_debug(1 << CXGBI_DBG_SOCK
,
1770 "csk 0x%p, cid %d.\n", csk
, conn
->id
);
1771 iscsi_conn_queue_work(conn
);
1774 EXPORT_SYMBOL_GPL(cxgbi_conn_tx_open
);
1777 * pdu receive, interact with libiscsi_tcp
1779 static inline int read_pdu_skb(struct iscsi_conn
*conn
,
1780 struct sk_buff
*skb
,
1781 unsigned int offset
,
1787 bytes_read
= iscsi_tcp_recv_skb(conn
, skb
, offset
, offloaded
, &status
);
1789 case ISCSI_TCP_CONN_ERR
:
1790 pr_info("skb 0x%p, off %u, %d, TCP_ERR.\n",
1791 skb
, offset
, offloaded
);
1793 case ISCSI_TCP_SUSPENDED
:
1794 log_debug(1 << CXGBI_DBG_PDU_RX
,
1795 "skb 0x%p, off %u, %d, TCP_SUSPEND, rc %d.\n",
1796 skb
, offset
, offloaded
, bytes_read
);
1797 /* no transfer - just have caller flush queue */
1799 case ISCSI_TCP_SKB_DONE
:
1800 pr_info("skb 0x%p, off %u, %d, TCP_SKB_DONE.\n",
1801 skb
, offset
, offloaded
);
1803 * pdus should always fit in the skb and we should get
1804 * segment done notifcation.
1806 iscsi_conn_printk(KERN_ERR
, conn
, "Invalid pdu or skb.");
1808 case ISCSI_TCP_SEGMENT_DONE
:
1809 log_debug(1 << CXGBI_DBG_PDU_RX
,
1810 "skb 0x%p, off %u, %d, TCP_SEG_DONE, rc %d.\n",
1811 skb
, offset
, offloaded
, bytes_read
);
1814 pr_info("skb 0x%p, off %u, %d, invalid status %d.\n",
1815 skb
, offset
, offloaded
, status
);
1820 static int skb_read_pdu_bhs(struct iscsi_conn
*conn
, struct sk_buff
*skb
)
1822 struct iscsi_tcp_conn
*tcp_conn
= conn
->dd_data
;
1824 log_debug(1 << CXGBI_DBG_PDU_RX
,
1825 "conn 0x%p, skb 0x%p, len %u, flag 0x%lx.\n",
1826 conn
, skb
, skb
->len
, cxgbi_skcb_flags(skb
));
1828 if (!iscsi_tcp_recv_segment_is_hdr(tcp_conn
)) {
1829 pr_info("conn 0x%p, skb 0x%p, not hdr.\n", conn
, skb
);
1830 iscsi_conn_failure(conn
, ISCSI_ERR_PROTO
);
1834 if (conn
->hdrdgst_en
&&
1835 cxgbi_skcb_test_flag(skb
, SKCBF_RX_HCRC_ERR
)) {
1836 pr_info("conn 0x%p, skb 0x%p, hcrc.\n", conn
, skb
);
1837 iscsi_conn_failure(conn
, ISCSI_ERR_HDR_DGST
);
1841 return read_pdu_skb(conn
, skb
, 0, 0);
1844 static int skb_read_pdu_data(struct iscsi_conn
*conn
, struct sk_buff
*lskb
,
1845 struct sk_buff
*skb
, unsigned int offset
)
1847 struct iscsi_tcp_conn
*tcp_conn
= conn
->dd_data
;
1849 int opcode
= tcp_conn
->in
.hdr
->opcode
& ISCSI_OPCODE_MASK
;
1851 log_debug(1 << CXGBI_DBG_PDU_RX
,
1852 "conn 0x%p, skb 0x%p, len %u, flag 0x%lx.\n",
1853 conn
, skb
, skb
->len
, cxgbi_skcb_flags(skb
));
1855 if (conn
->datadgst_en
&&
1856 cxgbi_skcb_test_flag(lskb
, SKCBF_RX_DCRC_ERR
)) {
1857 pr_info("conn 0x%p, skb 0x%p, dcrc 0x%lx.\n",
1858 conn
, lskb
, cxgbi_skcb_flags(lskb
));
1859 iscsi_conn_failure(conn
, ISCSI_ERR_DATA_DGST
);
1863 if (iscsi_tcp_recv_segment_is_hdr(tcp_conn
))
1866 /* coalesced, add header digest length */
1867 if (lskb
== skb
&& conn
->hdrdgst_en
)
1868 offset
+= ISCSI_DIGEST_SIZE
;
1870 if (cxgbi_skcb_test_flag(lskb
, SKCBF_RX_DATA_DDPD
))
1873 if (opcode
== ISCSI_OP_SCSI_DATA_IN
)
1874 log_debug(1 << CXGBI_DBG_PDU_RX
,
1875 "skb 0x%p, op 0x%x, itt 0x%x, %u %s ddp'ed.\n",
1876 skb
, opcode
, ntohl(tcp_conn
->in
.hdr
->itt
),
1877 tcp_conn
->in
.datalen
, offloaded
? "is" : "not");
1879 return read_pdu_skb(conn
, skb
, offset
, offloaded
);
1882 static void csk_return_rx_credits(struct cxgbi_sock
*csk
, int copied
)
1884 struct cxgbi_device
*cdev
= csk
->cdev
;
1888 log_debug(1 << CXGBI_DBG_PDU_RX
,
1889 "csk 0x%p,%u,0x%lx,%u, seq %u, wup %u, thre %u, %u.\n",
1890 csk
, csk
->state
, csk
->flags
, csk
->tid
, csk
->copied_seq
,
1891 csk
->rcv_wup
, cdev
->rx_credit_thres
,
1894 if (csk
->state
!= CTP_ESTABLISHED
)
1897 credits
= csk
->copied_seq
- csk
->rcv_wup
;
1898 if (unlikely(!credits
))
1900 if (unlikely(cdev
->rx_credit_thres
== 0))
1903 must_send
= credits
+ 16384 >= csk
->rcv_win
;
1904 if (must_send
|| credits
>= cdev
->rx_credit_thres
)
1905 csk
->rcv_wup
+= cdev
->csk_send_rx_credits(csk
, credits
);
1908 void cxgbi_conn_pdu_ready(struct cxgbi_sock
*csk
)
1910 struct cxgbi_device
*cdev
= csk
->cdev
;
1911 struct iscsi_conn
*conn
= csk
->user_data
;
1912 struct sk_buff
*skb
;
1913 unsigned int read
= 0;
1916 log_debug(1 << CXGBI_DBG_PDU_RX
,
1917 "csk 0x%p, conn 0x%p.\n", csk
, conn
);
1919 if (unlikely(!conn
|| conn
->suspend_rx
)) {
1920 log_debug(1 << CXGBI_DBG_PDU_RX
,
1921 "csk 0x%p, conn 0x%p, id %d, suspend_rx %lu!\n",
1922 csk
, conn
, conn
? conn
->id
: 0xFF,
1923 conn
? conn
->suspend_rx
: 0xFF);
1928 skb
= skb_peek(&csk
->receive_queue
);
1930 !(cxgbi_skcb_test_flag(skb
, SKCBF_RX_STATUS
))) {
1932 log_debug(1 << CXGBI_DBG_PDU_RX
,
1933 "skb 0x%p, NOT ready 0x%lx.\n",
1934 skb
, cxgbi_skcb_flags(skb
));
1937 __skb_unlink(skb
, &csk
->receive_queue
);
1939 read
+= cxgbi_skcb_rx_pdulen(skb
);
1940 log_debug(1 << CXGBI_DBG_PDU_RX
,
1941 "csk 0x%p, skb 0x%p,%u,f 0x%lx, pdu len %u.\n",
1942 csk
, skb
, skb
->len
, cxgbi_skcb_flags(skb
),
1943 cxgbi_skcb_rx_pdulen(skb
));
1945 if (cxgbi_skcb_test_flag(skb
, SKCBF_RX_COALESCED
)) {
1946 err
= skb_read_pdu_bhs(conn
, skb
);
1948 pr_err("coalesced bhs, csk 0x%p, skb 0x%p,%u, "
1949 "f 0x%lx, plen %u.\n",
1951 cxgbi_skcb_flags(skb
),
1952 cxgbi_skcb_rx_pdulen(skb
));
1955 err
= skb_read_pdu_data(conn
, skb
, skb
,
1956 err
+ cdev
->skb_rx_extra
);
1958 pr_err("coalesced data, csk 0x%p, skb 0x%p,%u, "
1959 "f 0x%lx, plen %u.\n",
1961 cxgbi_skcb_flags(skb
),
1962 cxgbi_skcb_rx_pdulen(skb
));
1964 err
= skb_read_pdu_bhs(conn
, skb
);
1966 pr_err("bhs, csk 0x%p, skb 0x%p,%u, "
1967 "f 0x%lx, plen %u.\n",
1969 cxgbi_skcb_flags(skb
),
1970 cxgbi_skcb_rx_pdulen(skb
));
1974 if (cxgbi_skcb_test_flag(skb
, SKCBF_RX_DATA
)) {
1975 struct sk_buff
*dskb
;
1977 dskb
= skb_peek(&csk
->receive_queue
);
1979 pr_err("csk 0x%p, skb 0x%p,%u, f 0x%lx,"
1980 " plen %u, NO data.\n",
1982 cxgbi_skcb_flags(skb
),
1983 cxgbi_skcb_rx_pdulen(skb
));
1987 __skb_unlink(dskb
, &csk
->receive_queue
);
1989 err
= skb_read_pdu_data(conn
, skb
, dskb
, 0);
1991 pr_err("data, csk 0x%p, skb 0x%p,%u, "
1992 "f 0x%lx, plen %u, dskb 0x%p,"
1995 cxgbi_skcb_flags(skb
),
1996 cxgbi_skcb_rx_pdulen(skb
),
2000 err
= skb_read_pdu_data(conn
, skb
, skb
, 0);
2009 log_debug(1 << CXGBI_DBG_PDU_RX
, "csk 0x%p, read %u.\n", csk
, read
);
2011 csk
->copied_seq
+= read
;
2012 csk_return_rx_credits(csk
, read
);
2013 conn
->rxdata_octets
+= read
;
2017 pr_info("csk 0x%p, 0x%p, rx failed %d, read %u.\n",
2018 csk
, conn
, err
, read
);
2019 iscsi_conn_failure(conn
, ISCSI_ERR_CONN_FAILED
);
2022 EXPORT_SYMBOL_GPL(cxgbi_conn_pdu_ready
);
2024 static int sgl_seek_offset(struct scatterlist
*sgl
, unsigned int sgcnt
,
2025 unsigned int offset
, unsigned int *off
,
2026 struct scatterlist
**sgp
)
2029 struct scatterlist
*sg
;
2031 for_each_sg(sgl
, sg
, sgcnt
, i
) {
2032 if (offset
< sg
->length
) {
2037 offset
-= sg
->length
;
2042 static int sgl_read_to_frags(struct scatterlist
*sg
, unsigned int sgoffset
,
2043 unsigned int dlen
, struct page_frag
*frags
,
2046 unsigned int datalen
= dlen
;
2047 unsigned int sglen
= sg
->length
- sgoffset
;
2048 struct page
*page
= sg_page(sg
);
2058 pr_warn("sg %d NULL, len %u/%u.\n",
2067 copy
= min(datalen
, sglen
);
2068 if (i
&& page
== frags
[i
- 1].page
&&
2069 sgoffset
+ sg
->offset
==
2070 frags
[i
- 1].offset
+ frags
[i
- 1].size
) {
2071 frags
[i
- 1].size
+= copy
;
2073 if (i
>= frag_max
) {
2074 pr_warn("too many pages %u, dlen %u.\n",
2079 frags
[i
].page
= page
;
2080 frags
[i
].offset
= sg
->offset
+ sgoffset
;
2081 frags
[i
].size
= copy
;
2092 int cxgbi_conn_alloc_pdu(struct iscsi_task
*task
, u8 opcode
)
2094 struct iscsi_tcp_conn
*tcp_conn
= task
->conn
->dd_data
;
2095 struct cxgbi_conn
*cconn
= tcp_conn
->dd_data
;
2096 struct cxgbi_device
*cdev
= cconn
->chba
->cdev
;
2097 struct iscsi_conn
*conn
= task
->conn
;
2098 struct iscsi_tcp_task
*tcp_task
= task
->dd_data
;
2099 struct cxgbi_task_data
*tdata
= iscsi_task_cxgbi_data(task
);
2100 struct scsi_cmnd
*sc
= task
->sc
;
2101 int headroom
= SKB_TX_ISCSI_PDU_HEADER_MAX
;
2103 tcp_task
->dd_data
= tdata
;
2106 if (SKB_MAX_HEAD(cdev
->skb_tx_rsvd
) > (512 * MAX_SKB_FRAGS
) &&
2107 (opcode
== ISCSI_OP_SCSI_DATA_OUT
||
2108 (opcode
== ISCSI_OP_SCSI_CMD
&&
2109 (scsi_bidi_cmnd(sc
) || sc
->sc_data_direction
== DMA_TO_DEVICE
))))
2110 /* data could goes into skb head */
2111 headroom
+= min_t(unsigned int,
2112 SKB_MAX_HEAD(cdev
->skb_tx_rsvd
),
2113 conn
->max_xmit_dlength
);
2115 tdata
->skb
= alloc_skb(cdev
->skb_tx_rsvd
+ headroom
, GFP_ATOMIC
);
2117 struct cxgbi_sock
*csk
= cconn
->cep
->csk
;
2118 struct net_device
*ndev
= cdev
->ports
[csk
->port_id
];
2119 ndev
->stats
.tx_dropped
++;
2123 skb_reserve(tdata
->skb
, cdev
->skb_tx_rsvd
);
2124 task
->hdr
= (struct iscsi_hdr
*)tdata
->skb
->data
;
2125 task
->hdr_max
= SKB_TX_ISCSI_PDU_HEADER_MAX
; /* BHS + AHS */
2127 /* data_out uses scsi_cmd's itt */
2128 if (opcode
!= ISCSI_OP_SCSI_DATA_OUT
)
2129 task_reserve_itt(task
, &task
->hdr
->itt
);
2131 log_debug(1 << CXGBI_DBG_ISCSI
| 1 << CXGBI_DBG_PDU_TX
,
2132 "task 0x%p, op 0x%x, skb 0x%p,%u+%u/%u, itt 0x%x.\n",
2133 task
, opcode
, tdata
->skb
, cdev
->skb_tx_rsvd
, headroom
,
2134 conn
->max_xmit_dlength
, ntohl(task
->hdr
->itt
));
2138 EXPORT_SYMBOL_GPL(cxgbi_conn_alloc_pdu
);
2140 static inline void tx_skb_setmode(struct sk_buff
*skb
, int hcrc
, int dcrc
)
2149 cxgbi_skcb_ulp_mode(skb
) = (ULP2_MODE_ISCSI
<< 4) | submode
;
2151 cxgbi_skcb_ulp_mode(skb
) = 0;
2154 int cxgbi_conn_init_pdu(struct iscsi_task
*task
, unsigned int offset
,
2157 struct iscsi_conn
*conn
= task
->conn
;
2158 struct cxgbi_task_data
*tdata
= iscsi_task_cxgbi_data(task
);
2159 struct sk_buff
*skb
= tdata
->skb
;
2160 unsigned int datalen
= count
;
2161 int i
, padlen
= iscsi_padding(count
);
2164 log_debug(1 << CXGBI_DBG_ISCSI
| 1 << CXGBI_DBG_PDU_TX
,
2165 "task 0x%p,0x%p, skb 0x%p, 0x%x,0x%x,0x%x, %u+%u.\n",
2166 task
, task
->sc
, skb
, (*skb
->data
) & ISCSI_OPCODE_MASK
,
2167 ntohl(task
->cmdsn
), ntohl(task
->hdr
->itt
), offset
, count
);
2169 skb_put(skb
, task
->hdr_len
);
2170 tx_skb_setmode(skb
, conn
->hdrdgst_en
, datalen
? conn
->datadgst_en
: 0);
2175 struct scsi_data_buffer
*sdb
= scsi_out(task
->sc
);
2176 struct scatterlist
*sg
= NULL
;
2179 tdata
->offset
= offset
;
2180 tdata
->count
= count
;
2181 err
= sgl_seek_offset(
2182 sdb
->table
.sgl
, sdb
->table
.nents
,
2183 tdata
->offset
, &tdata
->sgoffset
, &sg
);
2185 pr_warn("tpdu, sgl %u, bad offset %u/%u.\n",
2186 sdb
->table
.nents
, tdata
->offset
, sdb
->length
);
2189 err
= sgl_read_to_frags(sg
, tdata
->sgoffset
, tdata
->count
,
2190 tdata
->frags
, MAX_PDU_FRAGS
);
2192 pr_warn("tpdu, sgl %u, bad offset %u + %u.\n",
2193 sdb
->table
.nents
, tdata
->offset
, tdata
->count
);
2196 tdata
->nr_frags
= err
;
2198 if (tdata
->nr_frags
> MAX_SKB_FRAGS
||
2199 (padlen
&& tdata
->nr_frags
== MAX_SKB_FRAGS
)) {
2200 char *dst
= skb
->data
+ task
->hdr_len
;
2201 struct page_frag
*frag
= tdata
->frags
;
2203 /* data fits in the skb's headroom */
2204 for (i
= 0; i
< tdata
->nr_frags
; i
++, frag
++) {
2205 char *src
= kmap_atomic(frag
->page
);
2207 memcpy(dst
, src
+frag
->offset
, frag
->size
);
2212 memset(dst
, 0, padlen
);
2215 skb_put(skb
, count
+ padlen
);
2217 /* data fit into frag_list */
2218 for (i
= 0; i
< tdata
->nr_frags
; i
++) {
2219 __skb_fill_page_desc(skb
, i
,
2220 tdata
->frags
[i
].page
,
2221 tdata
->frags
[i
].offset
,
2222 tdata
->frags
[i
].size
);
2223 skb_frag_ref(skb
, i
);
2225 skb_shinfo(skb
)->nr_frags
= tdata
->nr_frags
;
2227 skb
->data_len
+= count
;
2228 skb
->truesize
+= count
;
2232 pg
= virt_to_page(task
->data
);
2235 skb_fill_page_desc(skb
, 0, pg
, offset_in_page(task
->data
),
2238 skb
->data_len
+= count
;
2239 skb
->truesize
+= count
;
2243 i
= skb_shinfo(skb
)->nr_frags
;
2244 skb_fill_page_desc(skb
, skb_shinfo(skb
)->nr_frags
,
2245 virt_to_page(padding
), offset_in_page(padding
),
2248 skb
->data_len
+= padlen
;
2249 skb
->truesize
+= padlen
;
2255 EXPORT_SYMBOL_GPL(cxgbi_conn_init_pdu
);
2257 int cxgbi_conn_xmit_pdu(struct iscsi_task
*task
)
2259 struct iscsi_tcp_conn
*tcp_conn
= task
->conn
->dd_data
;
2260 struct cxgbi_conn
*cconn
= tcp_conn
->dd_data
;
2261 struct cxgbi_task_data
*tdata
= iscsi_task_cxgbi_data(task
);
2262 struct sk_buff
*skb
= tdata
->skb
;
2263 unsigned int datalen
;
2267 log_debug(1 << CXGBI_DBG_ISCSI
| 1 << CXGBI_DBG_PDU_TX
,
2268 "task 0x%p, skb NULL.\n", task
);
2272 datalen
= skb
->data_len
;
2274 err
= cxgbi_sock_send_pdus(cconn
->cep
->csk
, skb
);
2278 log_debug(1 << CXGBI_DBG_PDU_TX
,
2279 "task 0x%p,0x%p, skb 0x%p, len %u/%u, rv %d.\n",
2280 task
, task
->sc
, skb
, skb
->len
, skb
->data_len
, err
);
2282 if (task
->conn
->hdrdgst_en
)
2283 pdulen
+= ISCSI_DIGEST_SIZE
;
2285 if (datalen
&& task
->conn
->datadgst_en
)
2286 pdulen
+= ISCSI_DIGEST_SIZE
;
2288 task
->conn
->txdata_octets
+= pdulen
;
2292 if (err
== -EAGAIN
|| err
== -ENOBUFS
) {
2293 log_debug(1 << CXGBI_DBG_PDU_TX
,
2294 "task 0x%p, skb 0x%p, len %u/%u, %d EAGAIN.\n",
2295 task
, skb
, skb
->len
, skb
->data_len
, err
);
2296 /* reset skb to send when we are called again */
2301 log_debug(1 << CXGBI_DBG_ISCSI
| 1 << CXGBI_DBG_PDU_TX
,
2302 "itt 0x%x, skb 0x%p, len %u/%u, xmit err %d.\n",
2303 task
->itt
, skb
, skb
->len
, skb
->data_len
, err
);
2307 iscsi_conn_printk(KERN_ERR
, task
->conn
, "xmit err %d.\n", err
);
2308 iscsi_conn_failure(task
->conn
, ISCSI_ERR_XMIT_FAILED
);
2311 EXPORT_SYMBOL_GPL(cxgbi_conn_xmit_pdu
);
2313 void cxgbi_cleanup_task(struct iscsi_task
*task
)
2315 struct cxgbi_task_data
*tdata
= iscsi_task_cxgbi_data(task
);
2317 log_debug(1 << CXGBI_DBG_ISCSI
,
2318 "task 0x%p, skb 0x%p, itt 0x%x.\n",
2319 task
, tdata
->skb
, task
->hdr_itt
);
2321 /* never reached the xmit task callout */
2323 __kfree_skb(tdata
->skb
);
2324 memset(tdata
, 0, sizeof(*tdata
));
2326 task_release_itt(task
, task
->hdr_itt
);
2327 iscsi_tcp_cleanup_task(task
);
2329 EXPORT_SYMBOL_GPL(cxgbi_cleanup_task
);
2331 void cxgbi_get_conn_stats(struct iscsi_cls_conn
*cls_conn
,
2332 struct iscsi_stats
*stats
)
2334 struct iscsi_conn
*conn
= cls_conn
->dd_data
;
2336 stats
->txdata_octets
= conn
->txdata_octets
;
2337 stats
->rxdata_octets
= conn
->rxdata_octets
;
2338 stats
->scsicmd_pdus
= conn
->scsicmd_pdus_cnt
;
2339 stats
->dataout_pdus
= conn
->dataout_pdus_cnt
;
2340 stats
->scsirsp_pdus
= conn
->scsirsp_pdus_cnt
;
2341 stats
->datain_pdus
= conn
->datain_pdus_cnt
;
2342 stats
->r2t_pdus
= conn
->r2t_pdus_cnt
;
2343 stats
->tmfcmd_pdus
= conn
->tmfcmd_pdus_cnt
;
2344 stats
->tmfrsp_pdus
= conn
->tmfrsp_pdus_cnt
;
2345 stats
->digest_err
= 0;
2346 stats
->timeout_err
= 0;
2347 stats
->custom_length
= 1;
2348 strcpy(stats
->custom
[0].desc
, "eh_abort_cnt");
2349 stats
->custom
[0].value
= conn
->eh_abort_cnt
;
2351 EXPORT_SYMBOL_GPL(cxgbi_get_conn_stats
);
2353 static int cxgbi_conn_max_xmit_dlength(struct iscsi_conn
*conn
)
2355 struct iscsi_tcp_conn
*tcp_conn
= conn
->dd_data
;
2356 struct cxgbi_conn
*cconn
= tcp_conn
->dd_data
;
2357 struct cxgbi_device
*cdev
= cconn
->chba
->cdev
;
2358 unsigned int headroom
= SKB_MAX_HEAD(cdev
->skb_tx_rsvd
);
2359 unsigned int max_def
= 512 * MAX_SKB_FRAGS
;
2360 unsigned int max
= max(max_def
, headroom
);
2362 max
= min(cconn
->chba
->cdev
->tx_max_size
, max
);
2363 if (conn
->max_xmit_dlength
)
2364 conn
->max_xmit_dlength
= min(conn
->max_xmit_dlength
, max
);
2366 conn
->max_xmit_dlength
= max
;
2367 cxgbi_align_pdu_size(conn
->max_xmit_dlength
);
2372 static int cxgbi_conn_max_recv_dlength(struct iscsi_conn
*conn
)
2374 struct iscsi_tcp_conn
*tcp_conn
= conn
->dd_data
;
2375 struct cxgbi_conn
*cconn
= tcp_conn
->dd_data
;
2376 unsigned int max
= cconn
->chba
->cdev
->rx_max_size
;
2378 cxgbi_align_pdu_size(max
);
2380 if (conn
->max_recv_dlength
) {
2381 if (conn
->max_recv_dlength
> max
) {
2382 pr_err("MaxRecvDataSegmentLength %u > %u.\n",
2383 conn
->max_recv_dlength
, max
);
2386 conn
->max_recv_dlength
= min(conn
->max_recv_dlength
, max
);
2387 cxgbi_align_pdu_size(conn
->max_recv_dlength
);
2389 conn
->max_recv_dlength
= max
;
2394 int cxgbi_set_conn_param(struct iscsi_cls_conn
*cls_conn
,
2395 enum iscsi_param param
, char *buf
, int buflen
)
2397 struct iscsi_conn
*conn
= cls_conn
->dd_data
;
2398 struct iscsi_tcp_conn
*tcp_conn
= conn
->dd_data
;
2399 struct cxgbi_conn
*cconn
= tcp_conn
->dd_data
;
2400 struct cxgbi_sock
*csk
= cconn
->cep
->csk
;
2403 log_debug(1 << CXGBI_DBG_ISCSI
,
2404 "cls_conn 0x%p, param %d, buf(%d) %s.\n",
2405 cls_conn
, param
, buflen
, buf
);
2408 case ISCSI_PARAM_HDRDGST_EN
:
2409 err
= iscsi_set_param(cls_conn
, param
, buf
, buflen
);
2410 if (!err
&& conn
->hdrdgst_en
)
2411 err
= csk
->cdev
->csk_ddp_setup_digest(csk
, csk
->tid
,
2413 conn
->datadgst_en
, 0);
2415 case ISCSI_PARAM_DATADGST_EN
:
2416 err
= iscsi_set_param(cls_conn
, param
, buf
, buflen
);
2417 if (!err
&& conn
->datadgst_en
)
2418 err
= csk
->cdev
->csk_ddp_setup_digest(csk
, csk
->tid
,
2420 conn
->datadgst_en
, 0);
2422 case ISCSI_PARAM_MAX_R2T
:
2423 return iscsi_tcp_set_max_r2t(conn
, buf
);
2424 case ISCSI_PARAM_MAX_RECV_DLENGTH
:
2425 err
= iscsi_set_param(cls_conn
, param
, buf
, buflen
);
2427 err
= cxgbi_conn_max_recv_dlength(conn
);
2429 case ISCSI_PARAM_MAX_XMIT_DLENGTH
:
2430 err
= iscsi_set_param(cls_conn
, param
, buf
, buflen
);
2432 err
= cxgbi_conn_max_xmit_dlength(conn
);
2435 return iscsi_set_param(cls_conn
, param
, buf
, buflen
);
2439 EXPORT_SYMBOL_GPL(cxgbi_set_conn_param
);
2441 static inline int csk_print_port(struct cxgbi_sock
*csk
, char *buf
)
2445 cxgbi_sock_get(csk
);
2446 len
= sprintf(buf
, "%hu\n", ntohs(csk
->daddr
.sin_port
));
2447 cxgbi_sock_put(csk
);
2452 static inline int csk_print_ip(struct cxgbi_sock
*csk
, char *buf
)
2456 cxgbi_sock_get(csk
);
2457 if (csk
->csk_family
== AF_INET
)
2458 len
= sprintf(buf
, "%pI4",
2459 &csk
->daddr
.sin_addr
.s_addr
);
2461 len
= sprintf(buf
, "%pI6",
2462 &csk
->daddr6
.sin6_addr
);
2464 cxgbi_sock_put(csk
);
2469 int cxgbi_get_ep_param(struct iscsi_endpoint
*ep
, enum iscsi_param param
,
2472 struct cxgbi_endpoint
*cep
= ep
->dd_data
;
2473 struct cxgbi_sock
*csk
;
2476 log_debug(1 << CXGBI_DBG_ISCSI
,
2477 "cls_conn 0x%p, param %d.\n", ep
, param
);
2480 case ISCSI_PARAM_CONN_PORT
:
2481 case ISCSI_PARAM_CONN_ADDRESS
:
2489 return iscsi_conn_get_addr_param((struct sockaddr_storage
*)
2490 &csk
->daddr
, param
, buf
);
2496 EXPORT_SYMBOL_GPL(cxgbi_get_ep_param
);
2498 struct iscsi_cls_conn
*
2499 cxgbi_create_conn(struct iscsi_cls_session
*cls_session
, u32 cid
)
2501 struct iscsi_cls_conn
*cls_conn
;
2502 struct iscsi_conn
*conn
;
2503 struct iscsi_tcp_conn
*tcp_conn
;
2504 struct cxgbi_conn
*cconn
;
2506 cls_conn
= iscsi_tcp_conn_setup(cls_session
, sizeof(*cconn
), cid
);
2510 conn
= cls_conn
->dd_data
;
2511 tcp_conn
= conn
->dd_data
;
2512 cconn
= tcp_conn
->dd_data
;
2513 cconn
->iconn
= conn
;
2515 log_debug(1 << CXGBI_DBG_ISCSI
,
2516 "cid %u(0x%x), cls 0x%p,0x%p, conn 0x%p,0x%p,0x%p.\n",
2517 cid
, cid
, cls_session
, cls_conn
, conn
, tcp_conn
, cconn
);
2521 EXPORT_SYMBOL_GPL(cxgbi_create_conn
);
2523 int cxgbi_bind_conn(struct iscsi_cls_session
*cls_session
,
2524 struct iscsi_cls_conn
*cls_conn
,
2525 u64 transport_eph
, int is_leading
)
2527 struct iscsi_conn
*conn
= cls_conn
->dd_data
;
2528 struct iscsi_tcp_conn
*tcp_conn
= conn
->dd_data
;
2529 struct cxgbi_conn
*cconn
= tcp_conn
->dd_data
;
2530 struct iscsi_endpoint
*ep
;
2531 struct cxgbi_endpoint
*cep
;
2532 struct cxgbi_sock
*csk
;
2535 ep
= iscsi_lookup_endpoint(transport_eph
);
2539 /* setup ddp pagesize */
2542 err
= csk
->cdev
->csk_ddp_setup_pgidx(csk
, csk
->tid
, page_idx
, 0);
2546 err
= iscsi_conn_bind(cls_session
, cls_conn
, is_leading
);
2550 /* calculate the tag idx bits needed for this conn based on cmds_max */
2551 cconn
->task_idx_bits
= (__ilog2_u32(conn
->session
->cmds_max
- 1)) + 1;
2553 write_lock_bh(&csk
->callback_lock
);
2554 csk
->user_data
= conn
;
2555 cconn
->chba
= cep
->chba
;
2558 write_unlock_bh(&csk
->callback_lock
);
2560 cxgbi_conn_max_xmit_dlength(conn
);
2561 cxgbi_conn_max_recv_dlength(conn
);
2563 log_debug(1 << CXGBI_DBG_ISCSI
,
2564 "cls 0x%p,0x%p, ep 0x%p, cconn 0x%p, csk 0x%p.\n",
2565 cls_session
, cls_conn
, ep
, cconn
, csk
);
2566 /* init recv engine */
2567 iscsi_tcp_hdr_recv_prep(tcp_conn
);
2571 EXPORT_SYMBOL_GPL(cxgbi_bind_conn
);
2573 struct iscsi_cls_session
*cxgbi_create_session(struct iscsi_endpoint
*ep
,
2574 u16 cmds_max
, u16 qdepth
,
2577 struct cxgbi_endpoint
*cep
;
2578 struct cxgbi_hba
*chba
;
2579 struct Scsi_Host
*shost
;
2580 struct iscsi_cls_session
*cls_session
;
2581 struct iscsi_session
*session
;
2584 pr_err("missing endpoint.\n");
2590 shost
= chba
->shost
;
2592 BUG_ON(chba
!= iscsi_host_priv(shost
));
2594 cls_session
= iscsi_session_setup(chba
->cdev
->itp
, shost
,
2596 sizeof(struct iscsi_tcp_task
) +
2597 sizeof(struct cxgbi_task_data
),
2598 initial_cmdsn
, ISCSI_MAX_TARGET
);
2602 session
= cls_session
->dd_data
;
2603 if (iscsi_tcp_r2tpool_alloc(session
))
2604 goto remove_session
;
2606 log_debug(1 << CXGBI_DBG_ISCSI
,
2607 "ep 0x%p, cls sess 0x%p.\n", ep
, cls_session
);
2611 iscsi_session_teardown(cls_session
);
2614 EXPORT_SYMBOL_GPL(cxgbi_create_session
);
2616 void cxgbi_destroy_session(struct iscsi_cls_session
*cls_session
)
2618 log_debug(1 << CXGBI_DBG_ISCSI
,
2619 "cls sess 0x%p.\n", cls_session
);
2621 iscsi_tcp_r2tpool_free(cls_session
->dd_data
);
2622 iscsi_session_teardown(cls_session
);
2624 EXPORT_SYMBOL_GPL(cxgbi_destroy_session
);
2626 int cxgbi_set_host_param(struct Scsi_Host
*shost
, enum iscsi_host_param param
,
2627 char *buf
, int buflen
)
2629 struct cxgbi_hba
*chba
= iscsi_host_priv(shost
);
2632 shost_printk(KERN_ERR
, shost
, "Could not get host param. "
2633 "netdev for host not set.\n");
2637 log_debug(1 << CXGBI_DBG_ISCSI
,
2638 "shost 0x%p, hba 0x%p,%s, param %d, buf(%d) %s.\n",
2639 shost
, chba
, chba
->ndev
->name
, param
, buflen
, buf
);
2642 case ISCSI_HOST_PARAM_IPADDRESS
:
2644 __be32 addr
= in_aton(buf
);
2645 log_debug(1 << CXGBI_DBG_ISCSI
,
2646 "hba %s, req. ipv4 %pI4.\n", chba
->ndev
->name
, &addr
);
2647 cxgbi_set_iscsi_ipv4(chba
, addr
);
2650 case ISCSI_HOST_PARAM_HWADDRESS
:
2651 case ISCSI_HOST_PARAM_NETDEV_NAME
:
2654 return iscsi_host_set_param(shost
, param
, buf
, buflen
);
2657 EXPORT_SYMBOL_GPL(cxgbi_set_host_param
);
2659 int cxgbi_get_host_param(struct Scsi_Host
*shost
, enum iscsi_host_param param
,
2662 struct cxgbi_hba
*chba
= iscsi_host_priv(shost
);
2666 shost_printk(KERN_ERR
, shost
, "Could not get host param. "
2667 "netdev for host not set.\n");
2671 log_debug(1 << CXGBI_DBG_ISCSI
,
2672 "shost 0x%p, hba 0x%p,%s, param %d.\n",
2673 shost
, chba
, chba
->ndev
->name
, param
);
2676 case ISCSI_HOST_PARAM_HWADDRESS
:
2677 len
= sysfs_format_mac(buf
, chba
->ndev
->dev_addr
, 6);
2679 case ISCSI_HOST_PARAM_NETDEV_NAME
:
2680 len
= sprintf(buf
, "%s\n", chba
->ndev
->name
);
2682 case ISCSI_HOST_PARAM_IPADDRESS
:
2684 struct cxgbi_sock
*csk
= find_sock_on_port(chba
->cdev
,
2687 len
= sprintf(buf
, "%pIS",
2688 (struct sockaddr
*)&csk
->saddr
);
2690 log_debug(1 << CXGBI_DBG_ISCSI
,
2691 "hba %s, addr %s.\n", chba
->ndev
->name
, buf
);
2695 return iscsi_host_get_param(shost
, param
, buf
);
2700 EXPORT_SYMBOL_GPL(cxgbi_get_host_param
);
2702 struct iscsi_endpoint
*cxgbi_ep_connect(struct Scsi_Host
*shost
,
2703 struct sockaddr
*dst_addr
,
2706 struct iscsi_endpoint
*ep
;
2707 struct cxgbi_endpoint
*cep
;
2708 struct cxgbi_hba
*hba
= NULL
;
2709 struct cxgbi_sock
*csk
;
2712 log_debug(1 << CXGBI_DBG_ISCSI
| 1 << CXGBI_DBG_SOCK
,
2713 "shost 0x%p, non_blocking %d, dst_addr 0x%p.\n",
2714 shost
, non_blocking
, dst_addr
);
2717 hba
= iscsi_host_priv(shost
);
2719 pr_info("shost 0x%p, priv NULL.\n", shost
);
2724 if (dst_addr
->sa_family
== AF_INET
) {
2725 csk
= cxgbi_check_route(dst_addr
);
2726 #if IS_ENABLED(CONFIG_IPV6)
2727 } else if (dst_addr
->sa_family
== AF_INET6
) {
2728 csk
= cxgbi_check_route6(dst_addr
);
2731 pr_info("address family 0x%x NOT supported.\n",
2732 dst_addr
->sa_family
);
2733 err
= -EAFNOSUPPORT
;
2734 return (struct iscsi_endpoint
*)ERR_PTR(err
);
2738 return (struct iscsi_endpoint
*)csk
;
2739 cxgbi_sock_get(csk
);
2742 hba
= csk
->cdev
->hbas
[csk
->port_id
];
2743 else if (hba
!= csk
->cdev
->hbas
[csk
->port_id
]) {
2744 pr_info("Could not connect through requested host %u"
2745 "hba 0x%p != 0x%p (%u).\n",
2746 shost
->host_no
, hba
,
2747 csk
->cdev
->hbas
[csk
->port_id
], csk
->port_id
);
2752 err
= sock_get_port(csk
);
2756 cxgbi_sock_set_state(csk
, CTP_CONNECTING
);
2757 err
= csk
->cdev
->csk_init_act_open(csk
);
2761 if (cxgbi_sock_is_closing(csk
)) {
2763 pr_info("csk 0x%p is closing.\n", csk
);
2767 ep
= iscsi_create_endpoint(sizeof(*cep
));
2770 pr_info("iscsi alloc ep, OOM.\n");
2778 log_debug(1 << CXGBI_DBG_ISCSI
| 1 << CXGBI_DBG_SOCK
,
2779 "ep 0x%p, cep 0x%p, csk 0x%p, hba 0x%p,%s.\n",
2780 ep
, cep
, csk
, hba
, hba
->ndev
->name
);
2784 cxgbi_sock_put(csk
);
2785 cxgbi_sock_closed(csk
);
2787 return ERR_PTR(err
);
2789 EXPORT_SYMBOL_GPL(cxgbi_ep_connect
);
2791 int cxgbi_ep_poll(struct iscsi_endpoint
*ep
, int timeout_ms
)
2793 struct cxgbi_endpoint
*cep
= ep
->dd_data
;
2794 struct cxgbi_sock
*csk
= cep
->csk
;
2796 if (!cxgbi_sock_is_established(csk
))
2800 EXPORT_SYMBOL_GPL(cxgbi_ep_poll
);
2802 void cxgbi_ep_disconnect(struct iscsi_endpoint
*ep
)
2804 struct cxgbi_endpoint
*cep
= ep
->dd_data
;
2805 struct cxgbi_conn
*cconn
= cep
->cconn
;
2806 struct cxgbi_sock
*csk
= cep
->csk
;
2808 log_debug(1 << CXGBI_DBG_ISCSI
| 1 << CXGBI_DBG_SOCK
,
2809 "ep 0x%p, cep 0x%p, cconn 0x%p, csk 0x%p,%u,0x%lx.\n",
2810 ep
, cep
, cconn
, csk
, csk
->state
, csk
->flags
);
2812 if (cconn
&& cconn
->iconn
) {
2813 iscsi_suspend_tx(cconn
->iconn
);
2814 write_lock_bh(&csk
->callback_lock
);
2815 cep
->csk
->user_data
= NULL
;
2817 write_unlock_bh(&csk
->callback_lock
);
2819 iscsi_destroy_endpoint(ep
);
2821 if (likely(csk
->state
>= CTP_ESTABLISHED
))
2822 need_active_close(csk
);
2824 cxgbi_sock_closed(csk
);
2826 cxgbi_sock_put(csk
);
2828 EXPORT_SYMBOL_GPL(cxgbi_ep_disconnect
);
2830 int cxgbi_iscsi_init(struct iscsi_transport
*itp
,
2831 struct scsi_transport_template
**stt
)
2833 *stt
= iscsi_register_transport(itp
);
2835 pr_err("unable to register %s transport 0x%p.\n",
2839 log_debug(1 << CXGBI_DBG_ISCSI
,
2840 "%s, registered iscsi transport 0x%p.\n",
2844 EXPORT_SYMBOL_GPL(cxgbi_iscsi_init
);
2846 void cxgbi_iscsi_cleanup(struct iscsi_transport
*itp
,
2847 struct scsi_transport_template
**stt
)
2850 log_debug(1 << CXGBI_DBG_ISCSI
,
2851 "de-register transport 0x%p, %s, stt 0x%p.\n",
2852 itp
, itp
->name
, *stt
);
2854 iscsi_unregister_transport(itp
);
2857 EXPORT_SYMBOL_GPL(cxgbi_iscsi_cleanup
);
2859 umode_t
cxgbi_attr_is_visible(int param_type
, int param
)
2861 switch (param_type
) {
2862 case ISCSI_HOST_PARAM
:
2864 case ISCSI_HOST_PARAM_NETDEV_NAME
:
2865 case ISCSI_HOST_PARAM_HWADDRESS
:
2866 case ISCSI_HOST_PARAM_IPADDRESS
:
2867 case ISCSI_HOST_PARAM_INITIATOR_NAME
:
2874 case ISCSI_PARAM_MAX_RECV_DLENGTH
:
2875 case ISCSI_PARAM_MAX_XMIT_DLENGTH
:
2876 case ISCSI_PARAM_HDRDGST_EN
:
2877 case ISCSI_PARAM_DATADGST_EN
:
2878 case ISCSI_PARAM_CONN_ADDRESS
:
2879 case ISCSI_PARAM_CONN_PORT
:
2880 case ISCSI_PARAM_EXP_STATSN
:
2881 case ISCSI_PARAM_PERSISTENT_ADDRESS
:
2882 case ISCSI_PARAM_PERSISTENT_PORT
:
2883 case ISCSI_PARAM_PING_TMO
:
2884 case ISCSI_PARAM_RECV_TMO
:
2885 case ISCSI_PARAM_INITIAL_R2T_EN
:
2886 case ISCSI_PARAM_MAX_R2T
:
2887 case ISCSI_PARAM_IMM_DATA_EN
:
2888 case ISCSI_PARAM_FIRST_BURST
:
2889 case ISCSI_PARAM_MAX_BURST
:
2890 case ISCSI_PARAM_PDU_INORDER_EN
:
2891 case ISCSI_PARAM_DATASEQ_INORDER_EN
:
2892 case ISCSI_PARAM_ERL
:
2893 case ISCSI_PARAM_TARGET_NAME
:
2894 case ISCSI_PARAM_TPGT
:
2895 case ISCSI_PARAM_USERNAME
:
2896 case ISCSI_PARAM_PASSWORD
:
2897 case ISCSI_PARAM_USERNAME_IN
:
2898 case ISCSI_PARAM_PASSWORD_IN
:
2899 case ISCSI_PARAM_FAST_ABORT
:
2900 case ISCSI_PARAM_ABORT_TMO
:
2901 case ISCSI_PARAM_LU_RESET_TMO
:
2902 case ISCSI_PARAM_TGT_RESET_TMO
:
2903 case ISCSI_PARAM_IFACE_NAME
:
2904 case ISCSI_PARAM_INITIATOR_NAME
:
2913 EXPORT_SYMBOL_GPL(cxgbi_attr_is_visible
);
2915 static int __init
libcxgbi_init_module(void)
2917 sw_tag_idx_bits
= (__ilog2_u32(ISCSI_ITT_MASK
)) + 1;
2918 sw_tag_age_bits
= (__ilog2_u32(ISCSI_AGE_MASK
)) + 1;
2920 pr_info("%s", version
);
2922 pr_info("tag itt 0x%x, %u bits, age 0x%x, %u bits.\n",
2923 ISCSI_ITT_MASK
, sw_tag_idx_bits
,
2924 ISCSI_AGE_MASK
, sw_tag_age_bits
);
2926 ddp_setup_host_page_size();
2930 static void __exit
libcxgbi_exit_module(void)
2932 cxgbi_device_unregister_all(0xFF);
2936 module_init(libcxgbi_init_module
);
2937 module_exit(libcxgbi_exit_module
);