2 * libcxgbi.c: Chelsio common library for T3/T4 iSCSI driver.
4 * Copyright (c) 2010-2015 Chelsio Communications, Inc.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation.
10 * Written by: Karen Xie (kxie@chelsio.com)
11 * Written by: Rakesh Ranjan (rranjan@chelsio.com)
14 #define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
16 #include <linux/skbuff.h>
17 #include <linux/crypto.h>
18 #include <linux/scatterlist.h>
19 #include <linux/pci.h>
20 #include <scsi/scsi.h>
21 #include <scsi/scsi_cmnd.h>
22 #include <scsi/scsi_host.h>
23 #include <linux/if_vlan.h>
24 #include <linux/inet.h>
26 #include <net/route.h>
28 #include <net/ip6_route.h>
29 #include <net/addrconf.h>
31 #include <linux/inetdevice.h> /* ip_dev_find */
32 #include <linux/module.h>
35 static unsigned int dbg_level
;
39 #define DRV_MODULE_NAME "libcxgbi"
40 #define DRV_MODULE_DESC "Chelsio iSCSI driver library"
41 #define DRV_MODULE_VERSION "0.9.1-ko"
42 #define DRV_MODULE_RELDATE "Apr. 2015"
44 static char version
[] =
45 DRV_MODULE_DESC
" " DRV_MODULE_NAME
46 " v" DRV_MODULE_VERSION
" (" DRV_MODULE_RELDATE
")\n";
48 MODULE_AUTHOR("Chelsio Communications, Inc.");
49 MODULE_DESCRIPTION(DRV_MODULE_DESC
);
50 MODULE_VERSION(DRV_MODULE_VERSION
);
51 MODULE_LICENSE("GPL");
53 module_param(dbg_level
, uint
, 0644);
54 MODULE_PARM_DESC(dbg_level
, "libiscsi debug level (default=0)");
58 * cxgbi device management
59 * maintains a list of the cxgbi devices
61 static LIST_HEAD(cdev_list
);
62 static DEFINE_MUTEX(cdev_mutex
);
64 static LIST_HEAD(cdev_rcu_list
);
65 static DEFINE_SPINLOCK(cdev_rcu_lock
);
67 static inline void cxgbi_decode_sw_tag(u32 sw_tag
, int *idx
, int *age
)
70 *age
= sw_tag
& 0x7FFF;
72 *idx
= (sw_tag
>> 16) & 0x7FFF;
75 int cxgbi_device_portmap_create(struct cxgbi_device
*cdev
, unsigned int base
,
76 unsigned int max_conn
)
78 struct cxgbi_ports_map
*pmap
= &cdev
->pmap
;
80 pmap
->port_csk
= kvzalloc(array_size(max_conn
,
81 sizeof(struct cxgbi_sock
*)),
82 GFP_KERNEL
| __GFP_NOWARN
);
83 if (!pmap
->port_csk
) {
84 pr_warn("cdev 0x%p, portmap OOM %u.\n", cdev
, max_conn
);
88 pmap
->max_connect
= max_conn
;
89 pmap
->sport_base
= base
;
90 spin_lock_init(&pmap
->lock
);
93 EXPORT_SYMBOL_GPL(cxgbi_device_portmap_create
);
95 void cxgbi_device_portmap_cleanup(struct cxgbi_device
*cdev
)
97 struct cxgbi_ports_map
*pmap
= &cdev
->pmap
;
98 struct cxgbi_sock
*csk
;
101 for (i
= 0; i
< pmap
->max_connect
; i
++) {
102 if (pmap
->port_csk
[i
]) {
103 csk
= pmap
->port_csk
[i
];
104 pmap
->port_csk
[i
] = NULL
;
105 log_debug(1 << CXGBI_DBG_SOCK
,
106 "csk 0x%p, cdev 0x%p, offload down.\n",
108 spin_lock_bh(&csk
->lock
);
109 cxgbi_sock_set_flag(csk
, CTPF_OFFLOAD_DOWN
);
110 cxgbi_sock_closed(csk
);
111 spin_unlock_bh(&csk
->lock
);
116 EXPORT_SYMBOL_GPL(cxgbi_device_portmap_cleanup
);
118 static inline void cxgbi_device_destroy(struct cxgbi_device
*cdev
)
120 log_debug(1 << CXGBI_DBG_DEV
,
121 "cdev 0x%p, p# %u.\n", cdev
, cdev
->nports
);
122 cxgbi_hbas_remove(cdev
);
123 cxgbi_device_portmap_cleanup(cdev
);
125 cxgbi_ppm_release(cdev
->cdev2ppm(cdev
));
126 if (cdev
->pmap
.max_connect
)
127 kvfree(cdev
->pmap
.port_csk
);
131 struct cxgbi_device
*cxgbi_device_register(unsigned int extra
,
134 struct cxgbi_device
*cdev
;
136 cdev
= kzalloc(sizeof(*cdev
) + extra
+ nports
*
137 (sizeof(struct cxgbi_hba
*) +
138 sizeof(struct net_device
*)),
141 pr_warn("nport %d, OOM.\n", nports
);
144 cdev
->ports
= (struct net_device
**)(cdev
+ 1);
145 cdev
->hbas
= (struct cxgbi_hba
**)(((char*)cdev
->ports
) + nports
*
146 sizeof(struct net_device
*));
148 cdev
->dd_data
= ((char *)cdev
->hbas
) +
149 nports
* sizeof(struct cxgbi_hba
*);
150 spin_lock_init(&cdev
->pmap
.lock
);
152 mutex_lock(&cdev_mutex
);
153 list_add_tail(&cdev
->list_head
, &cdev_list
);
154 mutex_unlock(&cdev_mutex
);
156 spin_lock(&cdev_rcu_lock
);
157 list_add_tail_rcu(&cdev
->rcu_node
, &cdev_rcu_list
);
158 spin_unlock(&cdev_rcu_lock
);
160 log_debug(1 << CXGBI_DBG_DEV
,
161 "cdev 0x%p, p# %u.\n", cdev
, nports
);
164 EXPORT_SYMBOL_GPL(cxgbi_device_register
);
166 void cxgbi_device_unregister(struct cxgbi_device
*cdev
)
168 log_debug(1 << CXGBI_DBG_DEV
,
169 "cdev 0x%p, p# %u,%s.\n",
170 cdev
, cdev
->nports
, cdev
->nports
? cdev
->ports
[0]->name
: "");
172 mutex_lock(&cdev_mutex
);
173 list_del(&cdev
->list_head
);
174 mutex_unlock(&cdev_mutex
);
176 spin_lock(&cdev_rcu_lock
);
177 list_del_rcu(&cdev
->rcu_node
);
178 spin_unlock(&cdev_rcu_lock
);
181 cxgbi_device_destroy(cdev
);
183 EXPORT_SYMBOL_GPL(cxgbi_device_unregister
);
185 void cxgbi_device_unregister_all(unsigned int flag
)
187 struct cxgbi_device
*cdev
, *tmp
;
189 mutex_lock(&cdev_mutex
);
190 list_for_each_entry_safe(cdev
, tmp
, &cdev_list
, list_head
) {
191 if ((cdev
->flags
& flag
) == flag
) {
192 mutex_unlock(&cdev_mutex
);
193 cxgbi_device_unregister(cdev
);
194 mutex_lock(&cdev_mutex
);
197 mutex_unlock(&cdev_mutex
);
199 EXPORT_SYMBOL_GPL(cxgbi_device_unregister_all
);
201 struct cxgbi_device
*cxgbi_device_find_by_lldev(void *lldev
)
203 struct cxgbi_device
*cdev
, *tmp
;
205 mutex_lock(&cdev_mutex
);
206 list_for_each_entry_safe(cdev
, tmp
, &cdev_list
, list_head
) {
207 if (cdev
->lldev
== lldev
) {
208 mutex_unlock(&cdev_mutex
);
212 mutex_unlock(&cdev_mutex
);
214 log_debug(1 << CXGBI_DBG_DEV
,
215 "lldev 0x%p, NO match found.\n", lldev
);
218 EXPORT_SYMBOL_GPL(cxgbi_device_find_by_lldev
);
220 struct cxgbi_device
*cxgbi_device_find_by_netdev(struct net_device
*ndev
,
223 struct net_device
*vdev
= NULL
;
224 struct cxgbi_device
*cdev
, *tmp
;
227 if (is_vlan_dev(ndev
)) {
229 ndev
= vlan_dev_real_dev(ndev
);
230 log_debug(1 << CXGBI_DBG_DEV
,
231 "vlan dev %s -> %s.\n", vdev
->name
, ndev
->name
);
234 mutex_lock(&cdev_mutex
);
235 list_for_each_entry_safe(cdev
, tmp
, &cdev_list
, list_head
) {
236 for (i
= 0; i
< cdev
->nports
; i
++) {
237 if (ndev
== cdev
->ports
[i
]) {
238 cdev
->hbas
[i
]->vdev
= vdev
;
239 mutex_unlock(&cdev_mutex
);
246 mutex_unlock(&cdev_mutex
);
247 log_debug(1 << CXGBI_DBG_DEV
,
248 "ndev 0x%p, %s, NO match found.\n", ndev
, ndev
->name
);
251 EXPORT_SYMBOL_GPL(cxgbi_device_find_by_netdev
);
253 struct cxgbi_device
*cxgbi_device_find_by_netdev_rcu(struct net_device
*ndev
,
256 struct net_device
*vdev
= NULL
;
257 struct cxgbi_device
*cdev
;
260 if (is_vlan_dev(ndev
)) {
262 ndev
= vlan_dev_real_dev(ndev
);
263 pr_info("vlan dev %s -> %s.\n", vdev
->name
, ndev
->name
);
267 list_for_each_entry_rcu(cdev
, &cdev_rcu_list
, rcu_node
) {
268 for (i
= 0; i
< cdev
->nports
; i
++) {
269 if (ndev
== cdev
->ports
[i
]) {
270 cdev
->hbas
[i
]->vdev
= vdev
;
280 log_debug(1 << CXGBI_DBG_DEV
,
281 "ndev 0x%p, %s, NO match found.\n", ndev
, ndev
->name
);
284 EXPORT_SYMBOL_GPL(cxgbi_device_find_by_netdev_rcu
);
286 static struct cxgbi_device
*cxgbi_device_find_by_mac(struct net_device
*ndev
,
289 struct net_device
*vdev
= NULL
;
290 struct cxgbi_device
*cdev
, *tmp
;
293 if (is_vlan_dev(ndev
)) {
295 ndev
= vlan_dev_real_dev(ndev
);
296 pr_info("vlan dev %s -> %s.\n", vdev
->name
, ndev
->name
);
299 mutex_lock(&cdev_mutex
);
300 list_for_each_entry_safe(cdev
, tmp
, &cdev_list
, list_head
) {
301 for (i
= 0; i
< cdev
->nports
; i
++) {
302 if (!memcmp(ndev
->dev_addr
, cdev
->ports
[i
]->dev_addr
,
304 cdev
->hbas
[i
]->vdev
= vdev
;
305 mutex_unlock(&cdev_mutex
);
312 mutex_unlock(&cdev_mutex
);
313 log_debug(1 << CXGBI_DBG_DEV
,
314 "ndev 0x%p, %s, NO match mac found.\n",
319 void cxgbi_hbas_remove(struct cxgbi_device
*cdev
)
322 struct cxgbi_hba
*chba
;
324 log_debug(1 << CXGBI_DBG_DEV
,
325 "cdev 0x%p, p#%u.\n", cdev
, cdev
->nports
);
327 for (i
= 0; i
< cdev
->nports
; i
++) {
328 chba
= cdev
->hbas
[i
];
330 cdev
->hbas
[i
] = NULL
;
331 iscsi_host_remove(chba
->shost
, false);
332 pci_dev_put(cdev
->pdev
);
333 iscsi_host_free(chba
->shost
);
337 EXPORT_SYMBOL_GPL(cxgbi_hbas_remove
);
339 int cxgbi_hbas_add(struct cxgbi_device
*cdev
, u64 max_lun
,
340 unsigned int max_conns
, const struct scsi_host_template
*sht
,
341 struct scsi_transport_template
*stt
)
343 struct cxgbi_hba
*chba
;
344 struct Scsi_Host
*shost
;
347 log_debug(1 << CXGBI_DBG_DEV
, "cdev 0x%p, p#%u.\n", cdev
, cdev
->nports
);
349 for (i
= 0; i
< cdev
->nports
; i
++) {
350 shost
= iscsi_host_alloc(sht
, sizeof(*chba
), 1);
352 pr_info("0x%p, p%d, %s, host alloc failed.\n",
353 cdev
, i
, cdev
->ports
[i
]->name
);
358 shost
->transportt
= stt
;
359 shost
->max_lun
= max_lun
;
360 shost
->max_id
= max_conns
- 1;
361 shost
->max_channel
= 0;
362 shost
->max_cmd_len
= SCSI_MAX_VARLEN_CDB_SIZE
;
364 chba
= iscsi_host_priv(shost
);
366 chba
->ndev
= cdev
->ports
[i
];
369 shost
->can_queue
= sht
->can_queue
- ISCSI_MGMT_CMDS_MAX
;
371 log_debug(1 << CXGBI_DBG_DEV
,
372 "cdev 0x%p, p#%d %s: chba 0x%p.\n",
373 cdev
, i
, cdev
->ports
[i
]->name
, chba
);
375 pci_dev_get(cdev
->pdev
);
376 err
= iscsi_host_add(shost
, &cdev
->pdev
->dev
);
378 pr_info("cdev 0x%p, p#%d %s, host add failed.\n",
379 cdev
, i
, cdev
->ports
[i
]->name
);
380 pci_dev_put(cdev
->pdev
);
381 scsi_host_put(shost
);
385 cdev
->hbas
[i
] = chba
;
391 cxgbi_hbas_remove(cdev
);
394 EXPORT_SYMBOL_GPL(cxgbi_hbas_add
);
399 * - source port management
400 * To find a free source port in the port allocation map we use a very simple
401 * rotor scheme to look for the next free port.
403 * If a source port has been specified make sure that it doesn't collide with
404 * our normal source port allocation map. If it's outside the range of our
405 * allocation/deallocation scheme just let them use it.
407 * If the source port is outside our allocation range, the caller is
408 * responsible for keeping track of their port usage.
411 static struct cxgbi_sock
*find_sock_on_port(struct cxgbi_device
*cdev
,
412 unsigned char port_id
)
414 struct cxgbi_ports_map
*pmap
= &cdev
->pmap
;
418 if (!pmap
->max_connect
|| !pmap
->used
)
421 spin_lock_bh(&pmap
->lock
);
423 for (i
= 0; used
&& i
< pmap
->max_connect
; i
++) {
424 struct cxgbi_sock
*csk
= pmap
->port_csk
[i
];
427 if (csk
->port_id
== port_id
) {
428 spin_unlock_bh(&pmap
->lock
);
434 spin_unlock_bh(&pmap
->lock
);
439 static int sock_get_port(struct cxgbi_sock
*csk
)
441 struct cxgbi_device
*cdev
= csk
->cdev
;
442 struct cxgbi_ports_map
*pmap
= &cdev
->pmap
;
447 if (!pmap
->max_connect
) {
448 pr_err("cdev 0x%p, p#%u %s, NO port map.\n",
449 cdev
, csk
->port_id
, cdev
->ports
[csk
->port_id
]->name
);
450 return -EADDRNOTAVAIL
;
453 if (csk
->csk_family
== AF_INET
)
454 port
= &csk
->saddr
.sin_port
;
456 port
= &csk
->saddr6
.sin6_port
;
459 pr_err("source port NON-ZERO %u.\n",
464 spin_lock_bh(&pmap
->lock
);
465 if (pmap
->used
>= pmap
->max_connect
) {
466 spin_unlock_bh(&pmap
->lock
);
467 pr_info("cdev 0x%p, p#%u %s, ALL ports used.\n",
468 cdev
, csk
->port_id
, cdev
->ports
[csk
->port_id
]->name
);
469 return -EADDRNOTAVAIL
;
472 start
= idx
= pmap
->next
;
474 if (++idx
>= pmap
->max_connect
)
476 if (!pmap
->port_csk
[idx
]) {
478 *port
= htons(pmap
->sport_base
+ idx
);
480 pmap
->port_csk
[idx
] = csk
;
481 spin_unlock_bh(&pmap
->lock
);
483 log_debug(1 << CXGBI_DBG_SOCK
,
484 "cdev 0x%p, p#%u %s, p %u, %u.\n",
486 cdev
->ports
[csk
->port_id
]->name
,
487 pmap
->sport_base
+ idx
, pmap
->next
);
490 } while (idx
!= start
);
491 spin_unlock_bh(&pmap
->lock
);
493 /* should not happen */
494 pr_warn("cdev 0x%p, p#%u %s, next %u?\n",
495 cdev
, csk
->port_id
, cdev
->ports
[csk
->port_id
]->name
,
497 return -EADDRNOTAVAIL
;
500 static void sock_put_port(struct cxgbi_sock
*csk
)
502 struct cxgbi_device
*cdev
= csk
->cdev
;
503 struct cxgbi_ports_map
*pmap
= &cdev
->pmap
;
506 if (csk
->csk_family
== AF_INET
)
507 port
= &csk
->saddr
.sin_port
;
509 port
= &csk
->saddr6
.sin6_port
;
512 int idx
= ntohs(*port
) - pmap
->sport_base
;
515 if (idx
< 0 || idx
>= pmap
->max_connect
) {
516 pr_err("cdev 0x%p, p#%u %s, port %u OOR.\n",
518 cdev
->ports
[csk
->port_id
]->name
,
523 spin_lock_bh(&pmap
->lock
);
524 pmap
->port_csk
[idx
] = NULL
;
526 spin_unlock_bh(&pmap
->lock
);
528 log_debug(1 << CXGBI_DBG_SOCK
,
529 "cdev 0x%p, p#%u %s, release %u.\n",
530 cdev
, csk
->port_id
, cdev
->ports
[csk
->port_id
]->name
,
531 pmap
->sport_base
+ idx
);
538 * iscsi tcp connection
540 void cxgbi_sock_free_cpl_skbs(struct cxgbi_sock
*csk
)
542 if (csk
->cpl_close
) {
543 kfree_skb(csk
->cpl_close
);
544 csk
->cpl_close
= NULL
;
546 if (csk
->cpl_abort_req
) {
547 kfree_skb(csk
->cpl_abort_req
);
548 csk
->cpl_abort_req
= NULL
;
550 if (csk
->cpl_abort_rpl
) {
551 kfree_skb(csk
->cpl_abort_rpl
);
552 csk
->cpl_abort_rpl
= NULL
;
555 EXPORT_SYMBOL_GPL(cxgbi_sock_free_cpl_skbs
);
557 static struct cxgbi_sock
*cxgbi_sock_create(struct cxgbi_device
*cdev
)
559 struct cxgbi_sock
*csk
= kzalloc(sizeof(*csk
), GFP_NOIO
);
562 pr_info("alloc csk %zu failed.\n", sizeof(*csk
));
566 if (cdev
->csk_alloc_cpls(csk
) < 0) {
567 pr_info("csk 0x%p, alloc cpls failed.\n", csk
);
572 spin_lock_init(&csk
->lock
);
573 kref_init(&csk
->refcnt
);
574 skb_queue_head_init(&csk
->receive_queue
);
575 skb_queue_head_init(&csk
->write_queue
);
576 timer_setup(&csk
->retry_timer
, NULL
, 0);
577 init_completion(&csk
->cmpl
);
578 rwlock_init(&csk
->callback_lock
);
581 cxgbi_sock_set_state(csk
, CTP_CLOSED
);
583 log_debug(1 << CXGBI_DBG_SOCK
, "cdev 0x%p, new csk 0x%p.\n", cdev
, csk
);
588 static struct rtable
*find_route_ipv4(struct flowi4
*fl4
,
589 __be32 saddr
, __be32 daddr
,
590 __be16 sport
, __be16 dport
, u8 tos
,
595 rt
= ip_route_output_ports(&init_net
, fl4
, NULL
, daddr
, saddr
,
596 dport
, sport
, IPPROTO_TCP
, tos
, ifindex
);
603 static struct cxgbi_sock
*
604 cxgbi_check_route(struct sockaddr
*dst_addr
, int ifindex
)
606 struct sockaddr_in
*daddr
= (struct sockaddr_in
*)dst_addr
;
607 struct dst_entry
*dst
;
608 struct net_device
*ndev
;
609 struct cxgbi_device
*cdev
;
610 struct rtable
*rt
= NULL
;
613 struct cxgbi_sock
*csk
= NULL
;
614 unsigned int mtu
= 0;
618 rt
= find_route_ipv4(&fl4
, 0, daddr
->sin_addr
.s_addr
, 0,
619 daddr
->sin_port
, 0, ifindex
);
621 pr_info("no route to ipv4 0x%x, port %u.\n",
622 be32_to_cpu(daddr
->sin_addr
.s_addr
),
623 be16_to_cpu(daddr
->sin_port
));
628 n
= dst_neigh_lookup(dst
, &daddr
->sin_addr
.s_addr
);
635 if (rt
->rt_flags
& (RTCF_MULTICAST
| RTCF_BROADCAST
)) {
636 pr_info("multi-cast route %pI4, port %u, dev %s.\n",
637 &daddr
->sin_addr
.s_addr
, ntohs(daddr
->sin_port
),
643 if (ndev
->flags
& IFF_LOOPBACK
) {
644 ndev
= ip_dev_find(&init_net
, daddr
->sin_addr
.s_addr
);
650 pr_info("rt dev %s, loopback -> %s, mtu %u.\n",
651 n
->dev
->name
, ndev
->name
, mtu
);
654 if (!(ndev
->flags
& IFF_UP
) || !netif_carrier_ok(ndev
)) {
655 pr_info("%s interface not up.\n", ndev
->name
);
660 cdev
= cxgbi_device_find_by_netdev(ndev
, &port
);
662 cdev
= cxgbi_device_find_by_mac(ndev
, &port
);
664 pr_info("dst %pI4, %s, NOT cxgbi device.\n",
665 &daddr
->sin_addr
.s_addr
, ndev
->name
);
669 log_debug(1 << CXGBI_DBG_SOCK
,
670 "route to %pI4 :%u, ndev p#%d,%s, cdev 0x%p.\n",
671 &daddr
->sin_addr
.s_addr
, ntohs(daddr
->sin_port
),
672 port
, ndev
->name
, cdev
);
674 csk
= cxgbi_sock_create(cdev
);
684 csk
->csk_family
= AF_INET
;
685 csk
->daddr
.sin_addr
.s_addr
= daddr
->sin_addr
.s_addr
;
686 csk
->daddr
.sin_port
= daddr
->sin_port
;
687 csk
->daddr
.sin_family
= daddr
->sin_family
;
688 csk
->saddr
.sin_family
= daddr
->sin_family
;
689 csk
->saddr
.sin_addr
.s_addr
= fl4
.saddr
;
703 #if IS_ENABLED(CONFIG_IPV6)
704 static struct rt6_info
*find_route_ipv6(const struct in6_addr
*saddr
,
705 const struct in6_addr
*daddr
,
710 memset(&fl
, 0, sizeof(fl
));
711 fl
.flowi6_oif
= ifindex
;
713 memcpy(&fl
.saddr
, saddr
, sizeof(struct in6_addr
));
715 memcpy(&fl
.daddr
, daddr
, sizeof(struct in6_addr
));
716 return (struct rt6_info
*)ip6_route_output(&init_net
, NULL
, &fl
);
719 static struct cxgbi_sock
*
720 cxgbi_check_route6(struct sockaddr
*dst_addr
, int ifindex
)
722 struct sockaddr_in6
*daddr6
= (struct sockaddr_in6
*)dst_addr
;
723 struct dst_entry
*dst
;
724 struct net_device
*ndev
;
725 struct cxgbi_device
*cdev
;
726 struct rt6_info
*rt
= NULL
;
728 struct in6_addr pref_saddr
;
729 struct cxgbi_sock
*csk
= NULL
;
730 unsigned int mtu
= 0;
734 rt
= find_route_ipv6(NULL
, &daddr6
->sin6_addr
, ifindex
);
737 pr_info("no route to ipv6 %pI6 port %u\n",
738 daddr6
->sin6_addr
.s6_addr
,
739 be16_to_cpu(daddr6
->sin6_port
));
746 n
= dst_neigh_lookup(dst
, &daddr6
->sin6_addr
);
749 pr_info("%pI6, port %u, dst no neighbour.\n",
750 daddr6
->sin6_addr
.s6_addr
,
751 be16_to_cpu(daddr6
->sin6_port
));
757 if (!(ndev
->flags
& IFF_UP
) || !netif_carrier_ok(ndev
)) {
758 pr_info("%s interface not up.\n", ndev
->name
);
763 if (ipv6_addr_is_multicast(&daddr6
->sin6_addr
)) {
764 pr_info("multi-cast route %pI6 port %u, dev %s.\n",
765 daddr6
->sin6_addr
.s6_addr
,
766 ntohs(daddr6
->sin6_port
), ndev
->name
);
771 cdev
= cxgbi_device_find_by_netdev(ndev
, &port
);
773 cdev
= cxgbi_device_find_by_mac(ndev
, &port
);
775 pr_info("dst %pI6 %s, NOT cxgbi device.\n",
776 daddr6
->sin6_addr
.s6_addr
, ndev
->name
);
780 log_debug(1 << CXGBI_DBG_SOCK
,
781 "route to %pI6 :%u, ndev p#%d,%s, cdev 0x%p.\n",
782 daddr6
->sin6_addr
.s6_addr
, ntohs(daddr6
->sin6_port
), port
,
785 csk
= cxgbi_sock_create(cdev
);
795 rt6_get_prefsrc(rt
, &pref_saddr
);
796 if (ipv6_addr_any(&pref_saddr
)) {
797 struct inet6_dev
*idev
= ip6_dst_idev((struct dst_entry
*)rt
);
799 err
= ipv6_dev_get_saddr(&init_net
, idev
? idev
->dev
: NULL
,
800 &daddr6
->sin6_addr
, 0, &pref_saddr
);
802 pr_info("failed to get source address to reach %pI6\n",
808 csk
->csk_family
= AF_INET6
;
809 csk
->daddr6
.sin6_addr
= daddr6
->sin6_addr
;
810 csk
->daddr6
.sin6_port
= daddr6
->sin6_port
;
811 csk
->daddr6
.sin6_family
= daddr6
->sin6_family
;
812 csk
->saddr6
.sin6_family
= daddr6
->sin6_family
;
813 csk
->saddr6
.sin6_addr
= pref_saddr
;
824 cxgbi_sock_closed(csk
);
828 #endif /* IS_ENABLED(CONFIG_IPV6) */
830 void cxgbi_sock_established(struct cxgbi_sock
*csk
, unsigned int snd_isn
,
833 csk
->write_seq
= csk
->snd_nxt
= csk
->snd_una
= snd_isn
;
834 dst_confirm(csk
->dst
);
836 cxgbi_sock_set_state(csk
, CTP_ESTABLISHED
);
838 EXPORT_SYMBOL_GPL(cxgbi_sock_established
);
840 static void cxgbi_inform_iscsi_conn_closing(struct cxgbi_sock
*csk
)
842 log_debug(1 << CXGBI_DBG_SOCK
,
843 "csk 0x%p, state %u, flags 0x%lx, conn 0x%p.\n",
844 csk
, csk
->state
, csk
->flags
, csk
->user_data
);
846 if (csk
->state
!= CTP_ESTABLISHED
) {
847 read_lock_bh(&csk
->callback_lock
);
849 iscsi_conn_failure(csk
->user_data
,
850 ISCSI_ERR_TCP_CONN_CLOSE
);
851 read_unlock_bh(&csk
->callback_lock
);
855 void cxgbi_sock_closed(struct cxgbi_sock
*csk
)
857 log_debug(1 << CXGBI_DBG_SOCK
, "csk 0x%p,%u,0x%lx,%u.\n",
858 csk
, (csk
)->state
, (csk
)->flags
, (csk
)->tid
);
859 cxgbi_sock_set_flag(csk
, CTPF_ACTIVE_CLOSE_NEEDED
);
860 if (csk
->state
== CTP_ACTIVE_OPEN
|| csk
->state
== CTP_CLOSED
)
862 if (csk
->saddr
.sin_port
)
865 dst_release(csk
->dst
);
866 csk
->cdev
->csk_release_offload_resources(csk
);
867 cxgbi_sock_set_state(csk
, CTP_CLOSED
);
868 cxgbi_inform_iscsi_conn_closing(csk
);
871 EXPORT_SYMBOL_GPL(cxgbi_sock_closed
);
873 static void need_active_close(struct cxgbi_sock
*csk
)
878 log_debug(1 << CXGBI_DBG_SOCK
, "csk 0x%p,%u,0x%lx,%u.\n",
879 csk
, (csk
)->state
, (csk
)->flags
, (csk
)->tid
);
880 spin_lock_bh(&csk
->lock
);
882 dst_confirm(csk
->dst
);
883 data_lost
= skb_queue_len(&csk
->receive_queue
);
884 __skb_queue_purge(&csk
->receive_queue
);
886 if (csk
->state
== CTP_ACTIVE_OPEN
)
887 cxgbi_sock_set_flag(csk
, CTPF_ACTIVE_CLOSE_NEEDED
);
888 else if (csk
->state
== CTP_ESTABLISHED
) {
890 cxgbi_sock_set_state(csk
, CTP_ACTIVE_CLOSE
);
891 } else if (csk
->state
== CTP_PASSIVE_CLOSE
) {
893 cxgbi_sock_set_state(csk
, CTP_CLOSE_WAIT_2
);
897 if (!cxgbi_sock_flag(csk
, CTPF_LOGOUT_RSP_RCVD
) ||
899 csk
->cdev
->csk_send_abort_req(csk
);
901 csk
->cdev
->csk_send_close_req(csk
);
904 spin_unlock_bh(&csk
->lock
);
907 void cxgbi_sock_fail_act_open(struct cxgbi_sock
*csk
, int errno
)
909 pr_info("csk 0x%p,%u,%lx, %pI4:%u-%pI4:%u, err %d.\n",
910 csk
, csk
->state
, csk
->flags
,
911 &csk
->saddr
.sin_addr
.s_addr
, csk
->saddr
.sin_port
,
912 &csk
->daddr
.sin_addr
.s_addr
, csk
->daddr
.sin_port
,
915 cxgbi_sock_set_state(csk
, CTP_CONNECTING
);
917 cxgbi_sock_closed(csk
);
919 EXPORT_SYMBOL_GPL(cxgbi_sock_fail_act_open
);
921 void cxgbi_sock_act_open_req_arp_failure(void *handle
, struct sk_buff
*skb
)
923 struct cxgbi_sock
*csk
= (struct cxgbi_sock
*)skb
->sk
;
924 struct module
*owner
= csk
->cdev
->owner
;
926 log_debug(1 << CXGBI_DBG_SOCK
, "csk 0x%p,%u,0x%lx,%u.\n",
927 csk
, (csk
)->state
, (csk
)->flags
, (csk
)->tid
);
929 spin_lock_bh(&csk
->lock
);
930 if (csk
->state
== CTP_ACTIVE_OPEN
)
931 cxgbi_sock_fail_act_open(csk
, -EHOSTUNREACH
);
932 spin_unlock_bh(&csk
->lock
);
938 EXPORT_SYMBOL_GPL(cxgbi_sock_act_open_req_arp_failure
);
940 void cxgbi_sock_rcv_abort_rpl(struct cxgbi_sock
*csk
)
943 spin_lock_bh(&csk
->lock
);
945 cxgbi_sock_set_flag(csk
, CTPF_ABORT_RPL_RCVD
);
946 if (cxgbi_sock_flag(csk
, CTPF_ABORT_RPL_PENDING
)) {
947 cxgbi_sock_clear_flag(csk
, CTPF_ABORT_RPL_PENDING
);
948 if (cxgbi_sock_flag(csk
, CTPF_ABORT_REQ_RCVD
))
949 pr_err("csk 0x%p,%u,0x%lx,%u,ABT_RPL_RSS.\n",
950 csk
, csk
->state
, csk
->flags
, csk
->tid
);
951 cxgbi_sock_closed(csk
);
954 spin_unlock_bh(&csk
->lock
);
957 EXPORT_SYMBOL_GPL(cxgbi_sock_rcv_abort_rpl
);
959 void cxgbi_sock_rcv_peer_close(struct cxgbi_sock
*csk
)
961 log_debug(1 << CXGBI_DBG_SOCK
, "csk 0x%p,%u,0x%lx,%u.\n",
962 csk
, (csk
)->state
, (csk
)->flags
, (csk
)->tid
);
964 spin_lock_bh(&csk
->lock
);
966 if (cxgbi_sock_flag(csk
, CTPF_ABORT_RPL_PENDING
))
969 switch (csk
->state
) {
970 case CTP_ESTABLISHED
:
971 cxgbi_sock_set_state(csk
, CTP_PASSIVE_CLOSE
);
973 case CTP_ACTIVE_CLOSE
:
974 cxgbi_sock_set_state(csk
, CTP_CLOSE_WAIT_2
);
976 case CTP_CLOSE_WAIT_1
:
977 cxgbi_sock_closed(csk
);
982 pr_err("csk 0x%p,%u,0x%lx,%u, bad state.\n",
983 csk
, csk
->state
, csk
->flags
, csk
->tid
);
985 cxgbi_inform_iscsi_conn_closing(csk
);
987 spin_unlock_bh(&csk
->lock
);
990 EXPORT_SYMBOL_GPL(cxgbi_sock_rcv_peer_close
);
992 void cxgbi_sock_rcv_close_conn_rpl(struct cxgbi_sock
*csk
, u32 snd_nxt
)
994 log_debug(1 << CXGBI_DBG_SOCK
, "csk 0x%p,%u,0x%lx,%u.\n",
995 csk
, (csk
)->state
, (csk
)->flags
, (csk
)->tid
);
997 spin_lock_bh(&csk
->lock
);
999 csk
->snd_una
= snd_nxt
- 1;
1000 if (cxgbi_sock_flag(csk
, CTPF_ABORT_RPL_PENDING
))
1003 switch (csk
->state
) {
1004 case CTP_ACTIVE_CLOSE
:
1005 cxgbi_sock_set_state(csk
, CTP_CLOSE_WAIT_1
);
1007 case CTP_CLOSE_WAIT_1
:
1008 case CTP_CLOSE_WAIT_2
:
1009 cxgbi_sock_closed(csk
);
1014 pr_err("csk 0x%p,%u,0x%lx,%u, bad state.\n",
1015 csk
, csk
->state
, csk
->flags
, csk
->tid
);
1018 spin_unlock_bh(&csk
->lock
);
1019 cxgbi_sock_put(csk
);
1021 EXPORT_SYMBOL_GPL(cxgbi_sock_rcv_close_conn_rpl
);
1023 void cxgbi_sock_rcv_wr_ack(struct cxgbi_sock
*csk
, unsigned int credits
,
1024 unsigned int snd_una
, int seq_chk
)
1026 log_debug(1 << CXGBI_DBG_TOE
| 1 << CXGBI_DBG_SOCK
,
1027 "csk 0x%p,%u,0x%lx,%u, cr %u,%u+%u, snd_una %u,%d.\n",
1028 csk
, csk
->state
, csk
->flags
, csk
->tid
, credits
,
1029 csk
->wr_cred
, csk
->wr_una_cred
, snd_una
, seq_chk
);
1031 spin_lock_bh(&csk
->lock
);
1033 csk
->wr_cred
+= credits
;
1034 if (csk
->wr_una_cred
> csk
->wr_max_cred
- csk
->wr_cred
)
1035 csk
->wr_una_cred
= csk
->wr_max_cred
- csk
->wr_cred
;
1038 struct sk_buff
*p
= cxgbi_sock_peek_wr(csk
);
1041 pr_err("csk 0x%p,%u,0x%lx,%u, cr %u,%u+%u, empty.\n",
1042 csk
, csk
->state
, csk
->flags
, csk
->tid
, credits
,
1043 csk
->wr_cred
, csk
->wr_una_cred
);
1047 if (unlikely(credits
< p
->csum
)) {
1048 pr_warn("csk 0x%p,%u,0x%lx,%u, cr %u,%u+%u, < %u.\n",
1049 csk
, csk
->state
, csk
->flags
, csk
->tid
,
1050 credits
, csk
->wr_cred
, csk
->wr_una_cred
,
1055 cxgbi_sock_dequeue_wr(csk
);
1061 cxgbi_sock_check_wr_invariants(csk
);
1064 if (unlikely(before(snd_una
, csk
->snd_una
))) {
1065 pr_warn("csk 0x%p,%u,0x%lx,%u, snd_una %u/%u.",
1066 csk
, csk
->state
, csk
->flags
, csk
->tid
, snd_una
,
1071 if (csk
->snd_una
!= snd_una
) {
1072 csk
->snd_una
= snd_una
;
1073 dst_confirm(csk
->dst
);
1077 if (skb_queue_len(&csk
->write_queue
)) {
1078 if (csk
->cdev
->csk_push_tx_frames(csk
, 0))
1079 cxgbi_conn_tx_open(csk
);
1081 cxgbi_conn_tx_open(csk
);
1083 spin_unlock_bh(&csk
->lock
);
1085 EXPORT_SYMBOL_GPL(cxgbi_sock_rcv_wr_ack
);
1087 static unsigned int cxgbi_sock_find_best_mtu(struct cxgbi_sock
*csk
,
1092 while (i
< csk
->cdev
->nmtus
- 1 && csk
->cdev
->mtus
[i
+ 1] <= mtu
)
1098 unsigned int cxgbi_sock_select_mss(struct cxgbi_sock
*csk
, unsigned int pmtu
)
1101 struct dst_entry
*dst
= csk
->dst
;
1103 csk
->advmss
= dst_metric_advmss(dst
);
1105 if (csk
->advmss
> pmtu
- 40)
1106 csk
->advmss
= pmtu
- 40;
1107 if (csk
->advmss
< csk
->cdev
->mtus
[0] - 40)
1108 csk
->advmss
= csk
->cdev
->mtus
[0] - 40;
1109 idx
= cxgbi_sock_find_best_mtu(csk
, csk
->advmss
+ 40);
1113 EXPORT_SYMBOL_GPL(cxgbi_sock_select_mss
);
1115 void cxgbi_sock_skb_entail(struct cxgbi_sock
*csk
, struct sk_buff
*skb
)
1117 cxgbi_skcb_tcp_seq(skb
) = csk
->write_seq
;
1118 __skb_queue_tail(&csk
->write_queue
, skb
);
1120 EXPORT_SYMBOL_GPL(cxgbi_sock_skb_entail
);
1122 void cxgbi_sock_purge_wr_queue(struct cxgbi_sock
*csk
)
1124 struct sk_buff
*skb
;
1126 while ((skb
= cxgbi_sock_dequeue_wr(csk
)) != NULL
)
1129 EXPORT_SYMBOL_GPL(cxgbi_sock_purge_wr_queue
);
1131 void cxgbi_sock_check_wr_invariants(const struct cxgbi_sock
*csk
)
1133 int pending
= cxgbi_sock_count_pending_wrs(csk
);
1135 if (unlikely(csk
->wr_cred
+ pending
!= csk
->wr_max_cred
))
1136 pr_err("csk 0x%p, tid %u, credit %u + %u != %u.\n",
1137 csk
, csk
->tid
, csk
->wr_cred
, pending
, csk
->wr_max_cred
);
1139 EXPORT_SYMBOL_GPL(cxgbi_sock_check_wr_invariants
);
1142 scmd_get_params(struct scsi_cmnd
*sc
, struct scatterlist
**sgl
,
1143 unsigned int *sgcnt
, unsigned int *dlen
,
1146 struct scsi_data_buffer
*sdb
= prot
? scsi_prot(sc
) : &sc
->sdb
;
1148 *sgl
= sdb
->table
.sgl
;
1149 *sgcnt
= sdb
->table
.nents
;
1150 *dlen
= sdb
->length
;
1151 /* Caution: for protection sdb, sdb->length is invalid */
1154 void cxgbi_ddp_set_one_ppod(struct cxgbi_pagepod
*ppod
,
1155 struct cxgbi_task_tag_info
*ttinfo
,
1156 struct scatterlist
**sg_pp
, unsigned int *sg_off
)
1158 struct scatterlist
*sg
= sg_pp
? *sg_pp
: NULL
;
1159 unsigned int offset
= sg_off
? *sg_off
: 0;
1160 dma_addr_t addr
= 0UL;
1161 unsigned int len
= 0;
1164 memcpy(ppod
, &ttinfo
->hdr
, sizeof(struct cxgbi_pagepod_hdr
));
1167 addr
= sg_dma_address(sg
);
1168 len
= sg_dma_len(sg
);
1171 for (i
= 0; i
< PPOD_PAGES_MAX
; i
++) {
1173 ppod
->addr
[i
] = cpu_to_be64(addr
+ offset
);
1174 offset
+= PAGE_SIZE
;
1175 if (offset
== (len
+ sg
->offset
)) {
1179 addr
= sg_dma_address(sg
);
1180 len
= sg_dma_len(sg
);
1184 ppod
->addr
[i
] = 0ULL;
1189 * the fifth address needs to be repeated in the next ppod, so do
1197 if (offset
== len
) {
1201 addr
= sg_dma_address(sg
);
1202 len
= sg_dma_len(sg
);
1205 ppod
->addr
[i
] = sg
? cpu_to_be64(addr
+ offset
) : 0ULL;
1207 EXPORT_SYMBOL_GPL(cxgbi_ddp_set_one_ppod
);
1210 * APIs interacting with open-iscsi libraries
1213 int cxgbi_ddp_ppm_setup(void **ppm_pp
, struct cxgbi_device
*cdev
,
1214 struct cxgbi_tag_format
*tformat
,
1215 unsigned int iscsi_size
, unsigned int llimit
,
1216 unsigned int start
, unsigned int rsvd_factor
,
1217 unsigned int edram_start
, unsigned int edram_size
)
1219 int err
= cxgbi_ppm_init(ppm_pp
, cdev
->ports
[0], cdev
->pdev
,
1220 cdev
->lldev
, tformat
, iscsi_size
, llimit
, start
,
1221 rsvd_factor
, edram_start
, edram_size
);
1224 struct cxgbi_ppm
*ppm
= (struct cxgbi_ppm
*)(*ppm_pp
);
1226 if (ppm
->ppmax
< 1024 ||
1227 ppm
->tformat
.pgsz_idx_dflt
>= DDP_PGIDX_MAX
)
1228 cdev
->flags
|= CXGBI_FLAG_DDP_OFF
;
1231 cdev
->flags
|= CXGBI_FLAG_DDP_OFF
;
1236 EXPORT_SYMBOL_GPL(cxgbi_ddp_ppm_setup
);
1238 static int cxgbi_ddp_sgl_check(struct scatterlist
*sgl
, int nents
)
1241 int last_sgidx
= nents
- 1;
1242 struct scatterlist
*sg
= sgl
;
1244 for (i
= 0; i
< nents
; i
++, sg
= sg_next(sg
)) {
1245 unsigned int len
= sg
->length
+ sg
->offset
;
1247 if ((sg
->offset
& 0x3) || (i
&& sg
->offset
) ||
1248 ((i
!= last_sgidx
) && len
!= PAGE_SIZE
)) {
1249 log_debug(1 << CXGBI_DBG_DDP
,
1250 "sg %u/%u, %u,%u, not aligned.\n",
1251 i
, nents
, sg
->offset
, sg
->length
);
1260 static int cxgbi_ddp_reserve(struct cxgbi_conn
*cconn
,
1261 struct cxgbi_task_data
*tdata
, u32 sw_tag
,
1262 unsigned int xferlen
)
1264 struct cxgbi_sock
*csk
= cconn
->cep
->csk
;
1265 struct cxgbi_device
*cdev
= csk
->cdev
;
1266 struct cxgbi_ppm
*ppm
= cdev
->cdev2ppm(cdev
);
1267 struct cxgbi_task_tag_info
*ttinfo
= &tdata
->ttinfo
;
1268 struct scatterlist
*sgl
= ttinfo
->sgl
;
1269 unsigned int sgcnt
= ttinfo
->nents
;
1270 unsigned int sg_offset
= sgl
->offset
;
1273 if (cdev
->flags
& CXGBI_FLAG_DDP_OFF
) {
1274 log_debug(1 << CXGBI_DBG_DDP
,
1275 "cdev 0x%p DDP off.\n", cdev
);
1279 if (!ppm
|| xferlen
< DDP_THRESHOLD
|| !sgcnt
||
1280 ppm
->tformat
.pgsz_idx_dflt
>= DDP_PGIDX_MAX
) {
1281 log_debug(1 << CXGBI_DBG_DDP
,
1282 "ppm 0x%p, pgidx %u, xfer %u, sgcnt %u, NO ddp.\n",
1283 ppm
, ppm
? ppm
->tformat
.pgsz_idx_dflt
: DDP_PGIDX_MAX
,
1284 xferlen
, ttinfo
->nents
);
1288 /* make sure the buffer is suitable for ddp */
1289 if (cxgbi_ddp_sgl_check(sgl
, sgcnt
) < 0)
1292 ttinfo
->nr_pages
= (xferlen
+ sgl
->offset
+ (1 << PAGE_SHIFT
) - 1) >>
1296 * the ddp tag will be used for the itt in the outgoing pdu,
1297 * the itt generated by libiscsi is saved in the ppm and can be
1298 * retrieved via the ddp tag
1300 err
= cxgbi_ppm_ppods_reserve(ppm
, ttinfo
->nr_pages
, 0, &ttinfo
->idx
,
1301 &ttinfo
->tag
, (unsigned long)sw_tag
);
1306 ttinfo
->npods
= err
;
1308 /* setup dma from scsi command sgl */
1310 err
= dma_map_sg(&ppm
->pdev
->dev
, sgl
, sgcnt
, DMA_FROM_DEVICE
);
1311 sgl
->offset
= sg_offset
;
1313 pr_info("%s: 0x%x, xfer %u, sgl %u dma mapping err.\n",
1314 __func__
, sw_tag
, xferlen
, sgcnt
);
1317 if (err
!= ttinfo
->nr_pages
) {
1318 log_debug(1 << CXGBI_DBG_DDP
,
1319 "%s: sw tag 0x%x, xfer %u, sgl %u, dma count %d.\n",
1320 __func__
, sw_tag
, xferlen
, sgcnt
, err
);
1323 ttinfo
->flags
|= CXGBI_PPOD_INFO_FLAG_MAPPED
;
1324 ttinfo
->cid
= csk
->port_id
;
1326 cxgbi_ppm_make_ppod_hdr(ppm
, ttinfo
->tag
, csk
->tid
, sgl
->offset
,
1327 xferlen
, &ttinfo
->hdr
);
1329 if (cdev
->flags
& CXGBI_FLAG_USE_PPOD_OFLDQ
) {
1330 /* write ppod from xmit_pdu (of iscsi_scsi_command pdu) */
1331 ttinfo
->flags
|= CXGBI_PPOD_INFO_FLAG_VALID
;
1333 /* write ppod from control queue now */
1334 err
= cdev
->csk_ddp_set_map(ppm
, csk
, ttinfo
);
1342 cxgbi_ppm_ppod_release(ppm
, ttinfo
->idx
);
1344 if (ttinfo
->flags
& CXGBI_PPOD_INFO_FLAG_MAPPED
) {
1345 ttinfo
->flags
&= ~CXGBI_PPOD_INFO_FLAG_MAPPED
;
1346 dma_unmap_sg(&ppm
->pdev
->dev
, sgl
, sgcnt
, DMA_FROM_DEVICE
);
1351 static void task_release_itt(struct iscsi_task
*task
, itt_t hdr_itt
)
1353 struct scsi_cmnd
*sc
= task
->sc
;
1354 struct iscsi_tcp_conn
*tcp_conn
= task
->conn
->dd_data
;
1355 struct cxgbi_conn
*cconn
= tcp_conn
->dd_data
;
1356 struct cxgbi_device
*cdev
= cconn
->chba
->cdev
;
1357 struct cxgbi_ppm
*ppm
= cdev
->cdev2ppm(cdev
);
1358 u32 tag
= ntohl((__force u32
)hdr_itt
);
1360 log_debug(1 << CXGBI_DBG_DDP
,
1361 "cdev 0x%p, task 0x%p, release tag 0x%x.\n",
1363 if (sc
&& sc
->sc_data_direction
== DMA_FROM_DEVICE
&&
1364 cxgbi_ppm_is_ddp_tag(ppm
, tag
)) {
1365 struct cxgbi_task_data
*tdata
= iscsi_task_cxgbi_data(task
);
1366 struct cxgbi_task_tag_info
*ttinfo
= &tdata
->ttinfo
;
1368 if (!(cdev
->flags
& CXGBI_FLAG_USE_PPOD_OFLDQ
))
1369 cdev
->csk_ddp_clear_map(cdev
, ppm
, ttinfo
);
1370 cxgbi_ppm_ppod_release(ppm
, ttinfo
->idx
);
1371 dma_unmap_sg(&ppm
->pdev
->dev
, ttinfo
->sgl
, ttinfo
->nents
,
1376 static inline u32
cxgbi_build_sw_tag(u32 idx
, u32 age
)
1378 /* assume idx and age both are < 0x7FFF (32767) */
1379 return (idx
<< 16) | age
;
1382 static int task_reserve_itt(struct iscsi_task
*task
, itt_t
*hdr_itt
)
1384 struct scsi_cmnd
*sc
= task
->sc
;
1385 struct iscsi_conn
*conn
= task
->conn
;
1386 struct iscsi_session
*sess
= conn
->session
;
1387 struct iscsi_tcp_conn
*tcp_conn
= conn
->dd_data
;
1388 struct cxgbi_conn
*cconn
= tcp_conn
->dd_data
;
1389 struct cxgbi_device
*cdev
= cconn
->chba
->cdev
;
1390 struct cxgbi_ppm
*ppm
= cdev
->cdev2ppm(cdev
);
1391 u32 sw_tag
= cxgbi_build_sw_tag(task
->itt
, sess
->age
);
1395 if (sc
&& sc
->sc_data_direction
== DMA_FROM_DEVICE
) {
1396 struct cxgbi_task_data
*tdata
= iscsi_task_cxgbi_data(task
);
1397 struct cxgbi_task_tag_info
*ttinfo
= &tdata
->ttinfo
;
1399 scmd_get_params(sc
, &ttinfo
->sgl
, &ttinfo
->nents
,
1401 err
= cxgbi_ddp_reserve(cconn
, tdata
, sw_tag
, tdata
->dlen
);
1405 log_debug(1 << CXGBI_DBG_DDP
,
1406 "csk 0x%p, R task 0x%p, %u,%u, no ddp.\n",
1407 cconn
->cep
->csk
, task
, tdata
->dlen
,
1412 err
= cxgbi_ppm_make_non_ddp_tag(ppm
, sw_tag
, &tag
);
1416 /* the itt need to sent in big-endian order */
1417 *hdr_itt
= (__force itt_t
)htonl(tag
);
1419 log_debug(1 << CXGBI_DBG_DDP
,
1420 "cdev 0x%p, task 0x%p, 0x%x(0x%x,0x%x)->0x%x/0x%x.\n",
1421 cdev
, task
, sw_tag
, task
->itt
, sess
->age
, tag
, *hdr_itt
);
1425 void cxgbi_parse_pdu_itt(struct iscsi_conn
*conn
, itt_t itt
, int *idx
, int *age
)
1427 struct iscsi_tcp_conn
*tcp_conn
= conn
->dd_data
;
1428 struct cxgbi_conn
*cconn
= tcp_conn
->dd_data
;
1429 struct cxgbi_device
*cdev
= cconn
->chba
->cdev
;
1430 struct cxgbi_ppm
*ppm
= cdev
->cdev2ppm(cdev
);
1431 u32 tag
= ntohl((__force u32
)itt
);
1435 if (cxgbi_ppm_is_ddp_tag(ppm
, tag
))
1436 sw_bits
= cxgbi_ppm_get_tag_caller_data(ppm
, tag
);
1438 sw_bits
= cxgbi_ppm_decode_non_ddp_tag(ppm
, tag
);
1443 cxgbi_decode_sw_tag(sw_bits
, idx
, age
);
1444 log_debug(1 << CXGBI_DBG_DDP
,
1445 "cdev 0x%p, tag 0x%x/0x%x, -> 0x%x(0x%x,0x%x).\n",
1446 cdev
, tag
, itt
, sw_bits
, idx
? *idx
: 0xFFFFF,
1449 EXPORT_SYMBOL_GPL(cxgbi_parse_pdu_itt
);
1451 void cxgbi_conn_tx_open(struct cxgbi_sock
*csk
)
1453 struct iscsi_conn
*conn
= csk
->user_data
;
1456 log_debug(1 << CXGBI_DBG_SOCK
,
1457 "csk 0x%p, cid %d.\n", csk
, conn
->id
);
1458 iscsi_conn_queue_xmit(conn
);
1461 EXPORT_SYMBOL_GPL(cxgbi_conn_tx_open
);
1464 * pdu receive, interact with libiscsi_tcp
1466 static inline int read_pdu_skb(struct iscsi_conn
*conn
,
1467 struct sk_buff
*skb
,
1468 unsigned int offset
,
1474 bytes_read
= iscsi_tcp_recv_skb(conn
, skb
, offset
, offloaded
, &status
);
1476 case ISCSI_TCP_CONN_ERR
:
1477 pr_info("skb 0x%p, off %u, %d, TCP_ERR.\n",
1478 skb
, offset
, offloaded
);
1480 case ISCSI_TCP_SUSPENDED
:
1481 log_debug(1 << CXGBI_DBG_PDU_RX
,
1482 "skb 0x%p, off %u, %d, TCP_SUSPEND, rc %d.\n",
1483 skb
, offset
, offloaded
, bytes_read
);
1484 /* no transfer - just have caller flush queue */
1486 case ISCSI_TCP_SKB_DONE
:
1487 pr_info("skb 0x%p, off %u, %d, TCP_SKB_DONE.\n",
1488 skb
, offset
, offloaded
);
1490 * pdus should always fit in the skb and we should get
1491 * segment done notifcation.
1493 iscsi_conn_printk(KERN_ERR
, conn
, "Invalid pdu or skb.");
1495 case ISCSI_TCP_SEGMENT_DONE
:
1496 log_debug(1 << CXGBI_DBG_PDU_RX
,
1497 "skb 0x%p, off %u, %d, TCP_SEG_DONE, rc %d.\n",
1498 skb
, offset
, offloaded
, bytes_read
);
1501 pr_info("skb 0x%p, off %u, %d, invalid status %d.\n",
1502 skb
, offset
, offloaded
, status
);
1508 skb_read_pdu_bhs(struct cxgbi_sock
*csk
, struct iscsi_conn
*conn
,
1509 struct sk_buff
*skb
)
1511 struct iscsi_tcp_conn
*tcp_conn
= conn
->dd_data
;
1514 log_debug(1 << CXGBI_DBG_PDU_RX
,
1515 "conn 0x%p, skb 0x%p, len %u, flag 0x%lx.\n",
1516 conn
, skb
, skb
->len
, cxgbi_skcb_flags(skb
));
1518 if (!iscsi_tcp_recv_segment_is_hdr(tcp_conn
)) {
1519 pr_info("conn 0x%p, skb 0x%p, not hdr.\n", conn
, skb
);
1520 iscsi_conn_failure(conn
, ISCSI_ERR_PROTO
);
1524 if (conn
->hdrdgst_en
&&
1525 cxgbi_skcb_test_flag(skb
, SKCBF_RX_HCRC_ERR
)) {
1526 pr_info("conn 0x%p, skb 0x%p, hcrc.\n", conn
, skb
);
1527 iscsi_conn_failure(conn
, ISCSI_ERR_HDR_DGST
);
1531 if (cxgbi_skcb_test_flag(skb
, SKCBF_RX_ISCSI_COMPL
) &&
1532 cxgbi_skcb_test_flag(skb
, SKCBF_RX_DATA_DDPD
)) {
1533 /* If completion flag is set and data is directly
1534 * placed in to the host memory then update
1535 * task->exp_datasn to the datasn in completion
1536 * iSCSI hdr as T6 adapter generates completion only
1537 * for the last pdu of a sequence.
1539 itt_t itt
= ((struct iscsi_data
*)skb
->data
)->itt
;
1540 struct iscsi_task
*task
= iscsi_itt_to_ctask(conn
, itt
);
1541 u32 data_sn
= be32_to_cpu(((struct iscsi_data
*)
1542 skb
->data
)->datasn
);
1543 if (task
&& task
->sc
) {
1544 struct iscsi_tcp_task
*tcp_task
= task
->dd_data
;
1546 tcp_task
->exp_datasn
= data_sn
;
1550 err
= read_pdu_skb(conn
, skb
, 0, 0);
1551 if (likely(err
>= 0)) {
1552 struct iscsi_hdr
*hdr
= (struct iscsi_hdr
*)skb
->data
;
1553 u8 opcode
= hdr
->opcode
& ISCSI_OPCODE_MASK
;
1555 if (unlikely(opcode
== ISCSI_OP_LOGOUT_RSP
))
1556 cxgbi_sock_set_flag(csk
, CTPF_LOGOUT_RSP_RCVD
);
1562 static int skb_read_pdu_data(struct iscsi_conn
*conn
, struct sk_buff
*lskb
,
1563 struct sk_buff
*skb
, unsigned int offset
)
1565 struct iscsi_tcp_conn
*tcp_conn
= conn
->dd_data
;
1567 int opcode
= tcp_conn
->in
.hdr
->opcode
& ISCSI_OPCODE_MASK
;
1569 log_debug(1 << CXGBI_DBG_PDU_RX
,
1570 "conn 0x%p, skb 0x%p, len %u, flag 0x%lx.\n",
1571 conn
, skb
, skb
->len
, cxgbi_skcb_flags(skb
));
1573 if (conn
->datadgst_en
&&
1574 cxgbi_skcb_test_flag(lskb
, SKCBF_RX_DCRC_ERR
)) {
1575 pr_info("conn 0x%p, skb 0x%p, dcrc 0x%lx.\n",
1576 conn
, lskb
, cxgbi_skcb_flags(lskb
));
1577 iscsi_conn_failure(conn
, ISCSI_ERR_DATA_DGST
);
1581 if (iscsi_tcp_recv_segment_is_hdr(tcp_conn
))
1584 /* coalesced, add header digest length */
1585 if (lskb
== skb
&& conn
->hdrdgst_en
)
1586 offset
+= ISCSI_DIGEST_SIZE
;
1588 if (cxgbi_skcb_test_flag(lskb
, SKCBF_RX_DATA_DDPD
))
1591 if (opcode
== ISCSI_OP_SCSI_DATA_IN
)
1592 log_debug(1 << CXGBI_DBG_PDU_RX
,
1593 "skb 0x%p, op 0x%x, itt 0x%x, %u %s ddp'ed.\n",
1594 skb
, opcode
, ntohl(tcp_conn
->in
.hdr
->itt
),
1595 tcp_conn
->in
.datalen
, offloaded
? "is" : "not");
1597 return read_pdu_skb(conn
, skb
, offset
, offloaded
);
1600 static void csk_return_rx_credits(struct cxgbi_sock
*csk
, int copied
)
1602 struct cxgbi_device
*cdev
= csk
->cdev
;
1606 log_debug(1 << CXGBI_DBG_PDU_RX
,
1607 "csk 0x%p,%u,0x%lx,%u, seq %u, wup %u, thre %u, %u.\n",
1608 csk
, csk
->state
, csk
->flags
, csk
->tid
, csk
->copied_seq
,
1609 csk
->rcv_wup
, cdev
->rx_credit_thres
,
1612 if (!cdev
->rx_credit_thres
)
1615 if (csk
->state
!= CTP_ESTABLISHED
)
1618 credits
= csk
->copied_seq
- csk
->rcv_wup
;
1619 if (unlikely(!credits
))
1621 must_send
= credits
+ 16384 >= csk
->rcv_win
;
1622 if (must_send
|| credits
>= cdev
->rx_credit_thres
)
1623 csk
->rcv_wup
+= cdev
->csk_send_rx_credits(csk
, credits
);
1626 void cxgbi_conn_pdu_ready(struct cxgbi_sock
*csk
)
1628 struct cxgbi_device
*cdev
= csk
->cdev
;
1629 struct iscsi_conn
*conn
= csk
->user_data
;
1630 struct sk_buff
*skb
;
1631 unsigned int read
= 0;
1634 log_debug(1 << CXGBI_DBG_PDU_RX
,
1635 "csk 0x%p, conn 0x%p.\n", csk
, conn
);
1637 if (unlikely(!conn
|| test_bit(ISCSI_CONN_FLAG_SUSPEND_RX
, &conn
->flags
))) {
1638 log_debug(1 << CXGBI_DBG_PDU_RX
,
1639 "csk 0x%p, conn 0x%p, id %d, conn flags 0x%lx!\n",
1640 csk
, conn
, conn
? conn
->id
: 0xFF,
1641 conn
? conn
->flags
: 0xFF);
1646 skb
= skb_peek(&csk
->receive_queue
);
1648 !(cxgbi_skcb_test_flag(skb
, SKCBF_RX_STATUS
))) {
1650 log_debug(1 << CXGBI_DBG_PDU_RX
,
1651 "skb 0x%p, NOT ready 0x%lx.\n",
1652 skb
, cxgbi_skcb_flags(skb
));
1655 __skb_unlink(skb
, &csk
->receive_queue
);
1657 read
+= cxgbi_skcb_rx_pdulen(skb
);
1658 log_debug(1 << CXGBI_DBG_PDU_RX
,
1659 "csk 0x%p, skb 0x%p,%u,f 0x%lx, pdu len %u.\n",
1660 csk
, skb
, skb
->len
, cxgbi_skcb_flags(skb
),
1661 cxgbi_skcb_rx_pdulen(skb
));
1663 if (cxgbi_skcb_test_flag(skb
, SKCBF_RX_COALESCED
)) {
1664 err
= skb_read_pdu_bhs(csk
, conn
, skb
);
1666 pr_err("coalesced bhs, csk 0x%p, skb 0x%p,%u, "
1667 "f 0x%lx, plen %u.\n",
1669 cxgbi_skcb_flags(skb
),
1670 cxgbi_skcb_rx_pdulen(skb
));
1673 err
= skb_read_pdu_data(conn
, skb
, skb
,
1674 err
+ cdev
->skb_rx_extra
);
1676 pr_err("coalesced data, csk 0x%p, skb 0x%p,%u, "
1677 "f 0x%lx, plen %u.\n",
1679 cxgbi_skcb_flags(skb
),
1680 cxgbi_skcb_rx_pdulen(skb
));
1682 err
= skb_read_pdu_bhs(csk
, conn
, skb
);
1684 pr_err("bhs, csk 0x%p, skb 0x%p,%u, "
1685 "f 0x%lx, plen %u.\n",
1687 cxgbi_skcb_flags(skb
),
1688 cxgbi_skcb_rx_pdulen(skb
));
1692 if (cxgbi_skcb_test_flag(skb
, SKCBF_RX_DATA
)) {
1693 struct sk_buff
*dskb
;
1695 dskb
= skb_peek(&csk
->receive_queue
);
1697 pr_err("csk 0x%p, skb 0x%p,%u, f 0x%lx,"
1698 " plen %u, NO data.\n",
1700 cxgbi_skcb_flags(skb
),
1701 cxgbi_skcb_rx_pdulen(skb
));
1705 __skb_unlink(dskb
, &csk
->receive_queue
);
1707 err
= skb_read_pdu_data(conn
, skb
, dskb
, 0);
1709 pr_err("data, csk 0x%p, skb 0x%p,%u, "
1710 "f 0x%lx, plen %u, dskb 0x%p,"
1713 cxgbi_skcb_flags(skb
),
1714 cxgbi_skcb_rx_pdulen(skb
),
1718 err
= skb_read_pdu_data(conn
, skb
, skb
, 0);
1727 log_debug(1 << CXGBI_DBG_PDU_RX
, "csk 0x%p, read %u.\n", csk
, read
);
1729 csk
->copied_seq
+= read
;
1730 csk_return_rx_credits(csk
, read
);
1731 conn
->rxdata_octets
+= read
;
1735 pr_info("csk 0x%p, 0x%p, rx failed %d, read %u.\n",
1736 csk
, conn
, err
, read
);
1737 iscsi_conn_failure(conn
, ISCSI_ERR_CONN_FAILED
);
1740 EXPORT_SYMBOL_GPL(cxgbi_conn_pdu_ready
);
1742 static int sgl_seek_offset(struct scatterlist
*sgl
, unsigned int sgcnt
,
1743 unsigned int offset
, unsigned int *off
,
1744 struct scatterlist
**sgp
)
1747 struct scatterlist
*sg
;
1749 for_each_sg(sgl
, sg
, sgcnt
, i
) {
1750 if (offset
< sg
->length
) {
1755 offset
-= sg
->length
;
1761 sgl_read_to_frags(struct scatterlist
*sg
, unsigned int sgoffset
,
1762 unsigned int dlen
, struct page_frag
*frags
,
1763 int frag_max
, u32
*dlimit
)
1765 unsigned int datalen
= dlen
;
1766 unsigned int sglen
= sg
->length
- sgoffset
;
1767 struct page
*page
= sg_page(sg
);
1777 pr_warn("sg %d NULL, len %u/%u.\n",
1786 copy
= min(datalen
, sglen
);
1787 if (i
&& page
== frags
[i
- 1].page
&&
1788 sgoffset
+ sg
->offset
==
1789 frags
[i
- 1].offset
+ frags
[i
- 1].size
) {
1790 frags
[i
- 1].size
+= copy
;
1792 if (i
>= frag_max
) {
1793 pr_warn("too many pages %u, dlen %u.\n",
1795 *dlimit
= dlen
- datalen
;
1799 frags
[i
].page
= page
;
1800 frags
[i
].offset
= sg
->offset
+ sgoffset
;
1801 frags
[i
].size
= copy
;
1812 static void cxgbi_task_data_sgl_check(struct iscsi_task
*task
)
1814 struct scsi_cmnd
*sc
= task
->sc
;
1815 struct cxgbi_task_data
*tdata
= iscsi_task_cxgbi_data(task
);
1816 struct scatterlist
*sg
, *sgl
= NULL
;
1820 tdata
->flags
= CXGBI_TASK_SGL_CHECKED
;
1824 scmd_get_params(sc
, &sgl
, &sgcnt
, &tdata
->dlen
, 0);
1825 if (!sgl
|| !sgcnt
) {
1826 tdata
->flags
|= CXGBI_TASK_SGL_COPY
;
1830 for_each_sg(sgl
, sg
, sgcnt
, i
) {
1831 if (page_count(sg_page(sg
)) < 1) {
1832 tdata
->flags
|= CXGBI_TASK_SGL_COPY
;
1839 cxgbi_task_data_sgl_read(struct iscsi_task
*task
, u32 offset
, u32 count
,
1842 struct scsi_cmnd
*sc
= task
->sc
;
1843 struct cxgbi_task_data
*tdata
= iscsi_task_cxgbi_data(task
);
1844 struct scatterlist
*sgl
= NULL
;
1845 struct scatterlist
*sg
;
1853 scmd_get_params(sc
, &sgl
, &sgcnt
, &dlen
, 0);
1857 err
= sgl_seek_offset(sgl
, sgcnt
, offset
, &tdata
->sgoffset
, &sg
);
1859 pr_warn("tpdu max, sgl %u, bad offset %u/%u.\n",
1860 sgcnt
, offset
, tdata
->dlen
);
1863 err
= sgl_read_to_frags(sg
, tdata
->sgoffset
, count
,
1864 tdata
->frags
, MAX_SKB_FRAGS
, dlimit
);
1866 log_debug(1 << CXGBI_DBG_ISCSI
,
1867 "sgl max limit, sgl %u, offset %u, %u/%u, dlimit %u.\n",
1868 sgcnt
, offset
, count
, tdata
->dlen
, *dlimit
);
1871 tdata
->offset
= offset
;
1872 tdata
->count
= count
;
1873 tdata
->nr_frags
= err
;
1874 tdata
->total_count
= count
;
1875 tdata
->total_offset
= offset
;
1877 log_debug(1 << CXGBI_DBG_ISCSI
| 1 << CXGBI_DBG_PDU_TX
,
1878 "%s: offset %u, count %u,\n"
1879 "err %u, total_count %u, total_offset %u\n",
1880 __func__
, offset
, count
, err
, tdata
->total_count
, tdata
->total_offset
);
1885 int cxgbi_conn_alloc_pdu(struct iscsi_task
*task
, u8 op
)
1887 struct iscsi_conn
*conn
= task
->conn
;
1888 struct iscsi_session
*session
= task
->conn
->session
;
1889 struct iscsi_tcp_conn
*tcp_conn
= conn
->dd_data
;
1890 struct cxgbi_conn
*cconn
= tcp_conn
->dd_data
;
1891 struct cxgbi_device
*cdev
= cconn
->chba
->cdev
;
1892 struct cxgbi_sock
*csk
= cconn
->cep
? cconn
->cep
->csk
: NULL
;
1893 struct iscsi_tcp_task
*tcp_task
= task
->dd_data
;
1894 struct cxgbi_task_data
*tdata
= iscsi_task_cxgbi_data(task
);
1895 struct scsi_cmnd
*sc
= task
->sc
;
1896 u32 headroom
= SKB_TX_ISCSI_PDU_HEADER_MAX
;
1897 u32 max_txdata_len
= conn
->max_xmit_dlength
;
1898 u32 iso_tx_rsvd
= 0, local_iso_info
= 0;
1899 u32 last_tdata_offset
, last_tdata_count
;
1903 pr_err("task 0x%p, tcp_task 0x%p, tdata 0x%p.\n",
1904 task
, tcp_task
, tdata
);
1908 pr_err("task 0x%p, csk gone.\n", task
);
1912 op
&= ISCSI_OPCODE_MASK
;
1914 tcp_task
->dd_data
= tdata
;
1917 last_tdata_count
= tdata
->count
;
1918 last_tdata_offset
= tdata
->offset
;
1920 if ((op
== ISCSI_OP_SCSI_DATA_OUT
) ||
1921 ((op
== ISCSI_OP_SCSI_CMD
) &&
1922 (sc
->sc_data_direction
== DMA_TO_DEVICE
))) {
1923 u32 remaining_data_tosend
, dlimit
= 0;
1924 u32 max_pdu_size
, max_num_pdu
, num_pdu
;
1927 /* Preserve conn->max_xmit_dlength because it can get updated to
1930 if (task
->state
== ISCSI_TASK_PENDING
)
1931 tdata
->max_xmit_dlength
= conn
->max_xmit_dlength
;
1934 cxgbi_task_data_sgl_check(task
);
1936 remaining_data_tosend
=
1937 tdata
->dlen
- tdata
->offset
- tdata
->count
;
1940 max_txdata_len
= tdata
->max_xmit_dlength
;
1941 log_debug(1 << CXGBI_DBG_ISCSI
| 1 << CXGBI_DBG_PDU_TX
,
1942 "tdata->dlen %u, remaining to send %u "
1943 "conn->max_xmit_dlength %u, "
1944 "tdata->max_xmit_dlength %u\n",
1945 tdata
->dlen
, remaining_data_tosend
,
1946 conn
->max_xmit_dlength
, tdata
->max_xmit_dlength
);
1948 if (cdev
->skb_iso_txhdr
&& !csk
->disable_iso
&&
1949 (remaining_data_tosend
> tdata
->max_xmit_dlength
) &&
1950 !(remaining_data_tosend
% 4)) {
1953 if ((op
== ISCSI_OP_SCSI_CMD
) &&
1954 session
->initial_r2t_en
)
1957 max_pdu_size
= tdata
->max_xmit_dlength
+
1958 ISCSI_PDU_NONPAYLOAD_LEN
;
1959 max_iso_data
= rounddown(CXGBI_MAX_ISO_DATA_IN_SKB
,
1961 max_num_pdu
= max_iso_data
/ max_pdu_size
;
1963 num_pdu
= (remaining_data_tosend
+
1964 tdata
->max_xmit_dlength
- 1) /
1965 tdata
->max_xmit_dlength
;
1967 if (num_pdu
> max_num_pdu
)
1968 num_pdu
= max_num_pdu
;
1970 conn
->max_xmit_dlength
= tdata
->max_xmit_dlength
* num_pdu
;
1971 max_txdata_len
= conn
->max_xmit_dlength
;
1972 iso_tx_rsvd
= cdev
->skb_iso_txhdr
;
1973 local_iso_info
= sizeof(struct cxgbi_iso_info
);
1975 log_debug(1 << CXGBI_DBG_ISCSI
| 1 << CXGBI_DBG_PDU_TX
,
1976 "max_pdu_size %u, max_num_pdu %u, "
1977 "max_txdata %u, num_pdu %u\n",
1978 max_pdu_size
, max_num_pdu
,
1979 max_txdata_len
, num_pdu
);
1982 count
= min_t(u32
, max_txdata_len
, remaining_data_tosend
);
1983 err
= cxgbi_task_data_sgl_read(task
,
1984 tdata
->offset
+ tdata
->count
,
1986 if (unlikely(err
< 0)) {
1987 log_debug(1 << CXGBI_DBG_ISCSI
,
1988 "task 0x%p, tcp_task 0x%p, tdata 0x%p, "
1989 "sgl err %d, count %u, dlimit %u\n",
1990 task
, tcp_task
, tdata
, err
, count
, dlimit
);
1992 remaining_data_tosend
=
1994 tdata
->max_xmit_dlength
);
1995 if (!remaining_data_tosend
)
1996 remaining_data_tosend
= dlimit
;
2000 conn
->max_xmit_dlength
= remaining_data_tosend
;
2001 goto recalculate_sgl
;
2004 pr_err("task 0x%p, tcp_task 0x%p, tdata 0x%p, "
2006 task
, tcp_task
, tdata
, err
);
2010 if ((tdata
->flags
& CXGBI_TASK_SGL_COPY
) ||
2011 (tdata
->nr_frags
> MAX_SKB_FRAGS
))
2012 headroom
+= conn
->max_xmit_dlength
;
2015 tdata
->skb
= alloc_skb(local_iso_info
+ cdev
->skb_tx_rsvd
+
2016 iso_tx_rsvd
+ headroom
, GFP_ATOMIC
);
2018 tdata
->count
= last_tdata_count
;
2019 tdata
->offset
= last_tdata_offset
;
2024 skb_reserve(tdata
->skb
, local_iso_info
+ cdev
->skb_tx_rsvd
+
2028 task
->hdr
= (struct iscsi_hdr
*)tdata
->skb
->data
;
2030 task
->hdr
= kzalloc(SKB_TX_ISCSI_PDU_HEADER_MAX
, GFP_ATOMIC
);
2032 __kfree_skb(tdata
->skb
);
2038 task
->hdr_max
= SKB_TX_ISCSI_PDU_HEADER_MAX
;
2041 cxgbi_skcb_set_flag(tdata
->skb
, SKCBF_TX_ISO
);
2043 /* data_out uses scsi_cmd's itt */
2044 if (op
!= ISCSI_OP_SCSI_DATA_OUT
)
2045 task_reserve_itt(task
, &task
->hdr
->itt
);
2047 log_debug(1 << CXGBI_DBG_ISCSI
| 1 << CXGBI_DBG_PDU_TX
,
2048 "task 0x%p, op 0x%x, skb 0x%p,%u+%u/%u, itt 0x%x.\n",
2049 task
, op
, tdata
->skb
, cdev
->skb_tx_rsvd
, headroom
,
2050 conn
->max_xmit_dlength
, be32_to_cpu(task
->hdr
->itt
));
2055 conn
->max_xmit_dlength
= tdata
->max_xmit_dlength
;
2058 EXPORT_SYMBOL_GPL(cxgbi_conn_alloc_pdu
);
2061 cxgbi_prep_iso_info(struct iscsi_task
*task
, struct sk_buff
*skb
,
2064 struct cxgbi_iso_info
*iso_info
= (struct cxgbi_iso_info
*)skb
->head
;
2065 struct iscsi_r2t_info
*r2t
;
2066 struct cxgbi_task_data
*tdata
= iscsi_task_cxgbi_data(task
);
2067 struct iscsi_conn
*conn
= task
->conn
;
2068 struct iscsi_session
*session
= conn
->session
;
2069 struct iscsi_tcp_task
*tcp_task
= task
->dd_data
;
2070 u32 burst_size
= 0, r2t_dlength
= 0, dlength
;
2071 u32 max_pdu_len
= tdata
->max_xmit_dlength
;
2072 u32 segment_offset
= 0;
2075 if (unlikely(!cxgbi_skcb_test_flag(skb
, SKCBF_TX_ISO
)))
2078 memset(iso_info
, 0, sizeof(struct cxgbi_iso_info
));
2080 if (task
->hdr
->opcode
== ISCSI_OP_SCSI_CMD
&& session
->imm_data_en
) {
2081 iso_info
->flags
|= CXGBI_ISO_INFO_IMM_ENABLE
;
2085 dlength
= ntoh24(task
->hdr
->dlength
);
2086 dlength
= min(dlength
, max_pdu_len
);
2087 hton24(task
->hdr
->dlength
, dlength
);
2089 num_pdu
= (count
+ max_pdu_len
- 1) / max_pdu_len
;
2091 if (iscsi_task_has_unsol_data(task
))
2092 r2t
= &task
->unsol_r2t
;
2094 r2t
= tcp_task
->r2t
;
2097 log_debug(1 << CXGBI_DBG_ISCSI
| 1 << CXGBI_DBG_PDU_TX
,
2098 "count %u, tdata->count %u, num_pdu %u,"
2099 "task->hdr_len %u, r2t->data_length %u, r2t->sent %u\n",
2100 count
, tdata
->count
, num_pdu
, task
->hdr_len
,
2101 r2t
->data_length
, r2t
->sent
);
2103 r2t_dlength
= r2t
->data_length
- r2t
->sent
;
2104 segment_offset
= r2t
->sent
;
2105 r2t
->datasn
+= num_pdu
- 1;
2108 if (!r2t
|| !r2t
->sent
)
2109 iso_info
->flags
|= CXGBI_ISO_INFO_FSLICE
;
2111 if (task
->hdr
->flags
& ISCSI_FLAG_CMD_FINAL
)
2112 iso_info
->flags
|= CXGBI_ISO_INFO_LSLICE
;
2114 task
->hdr
->flags
&= ~ISCSI_FLAG_CMD_FINAL
;
2116 iso_info
->op
= task
->hdr
->opcode
;
2117 iso_info
->ahs
= task
->hdr
->hlength
;
2118 iso_info
->num_pdu
= num_pdu
;
2119 iso_info
->mpdu
= max_pdu_len
;
2120 iso_info
->burst_size
= (burst_size
+ r2t_dlength
) >> 2;
2121 iso_info
->len
= count
+ task
->hdr_len
;
2122 iso_info
->segment_offset
= segment_offset
;
2124 cxgbi_skcb_tx_iscsi_hdrlen(skb
) = task
->hdr_len
;
2128 static inline void tx_skb_setmode(struct sk_buff
*skb
, int hcrc
, int dcrc
)
2137 cxgbi_skcb_tx_ulp_mode(skb
) = (ULP2_MODE_ISCSI
<< 4) | submode
;
2139 cxgbi_skcb_tx_ulp_mode(skb
) = 0;
2142 static struct page
*rsvd_page
;
2144 int cxgbi_conn_init_pdu(struct iscsi_task
*task
, unsigned int offset
,
2147 struct iscsi_conn
*conn
= task
->conn
;
2148 struct iscsi_tcp_task
*tcp_task
= task
->dd_data
;
2149 struct cxgbi_task_data
*tdata
= iscsi_task_cxgbi_data(task
);
2150 struct sk_buff
*skb
;
2151 struct scsi_cmnd
*sc
= task
->sc
;
2152 u32 expected_count
, expected_offset
;
2153 u32 datalen
= count
, dlimit
= 0;
2154 u32 i
, padlen
= iscsi_padding(count
);
2158 if (!tcp_task
|| (tcp_task
->dd_data
!= tdata
)) {
2159 pr_err("task 0x%p,0x%p, tcp_task 0x%p, tdata 0x%p/0x%p.\n",
2160 task
, task
->sc
, tcp_task
,
2161 tcp_task
? tcp_task
->dd_data
: NULL
, tdata
);
2166 log_debug(1 << CXGBI_DBG_ISCSI
| 1 << CXGBI_DBG_PDU_TX
,
2167 "task 0x%p,0x%p, skb 0x%p, 0x%x,0x%x,0x%x, %u+%u.\n",
2168 task
, task
->sc
, skb
, (*skb
->data
) & ISCSI_OPCODE_MASK
,
2169 be32_to_cpu(task
->cmdsn
), be32_to_cpu(task
->hdr
->itt
), offset
, count
);
2171 skb_put(skb
, task
->hdr_len
);
2172 tx_skb_setmode(skb
, conn
->hdrdgst_en
, datalen
? conn
->datadgst_en
: 0);
2174 tdata
->count
= count
;
2175 tdata
->offset
= offset
;
2176 tdata
->nr_frags
= 0;
2177 tdata
->total_offset
= 0;
2178 tdata
->total_count
= 0;
2179 if (tdata
->max_xmit_dlength
)
2180 conn
->max_xmit_dlength
= tdata
->max_xmit_dlength
;
2181 cxgbi_skcb_clear_flag(skb
, SKCBF_TX_ISO
);
2185 log_debug(1 << CXGBI_DBG_ISCSI
| 1 << CXGBI_DBG_PDU_TX
,
2186 "data->total_count %u, tdata->total_offset %u\n",
2187 tdata
->total_count
, tdata
->total_offset
);
2189 expected_count
= tdata
->total_count
;
2190 expected_offset
= tdata
->total_offset
;
2192 if ((count
!= expected_count
) ||
2193 (offset
!= expected_offset
)) {
2194 err
= cxgbi_task_data_sgl_read(task
, offset
, count
, &dlimit
);
2196 pr_err("task 0x%p,0x%p, tcp_task 0x%p, tdata 0x%p/0x%p "
2197 "dlimit %u, sgl err %d.\n", task
, task
->sc
,
2198 tcp_task
, tcp_task
? tcp_task
->dd_data
: NULL
,
2199 tdata
, dlimit
, err
);
2204 /* Restore original value of conn->max_xmit_dlength because
2205 * it can get updated to ISO data size.
2207 conn
->max_xmit_dlength
= tdata
->max_xmit_dlength
;
2210 struct page_frag
*frag
= tdata
->frags
;
2212 if ((tdata
->flags
& CXGBI_TASK_SGL_COPY
) ||
2213 (tdata
->nr_frags
> MAX_SKB_FRAGS
) ||
2214 (padlen
&& (tdata
->nr_frags
==
2216 char *dst
= skb
->data
+ task
->hdr_len
;
2218 /* data fits in the skb's headroom */
2219 for (i
= 0; i
< tdata
->nr_frags
; i
++, frag
++) {
2220 char *src
= kmap_atomic(frag
->page
);
2222 memcpy(dst
, src
+ frag
->offset
, frag
->size
);
2228 memset(dst
, 0, padlen
);
2231 skb_put(skb
, count
+ padlen
);
2233 for (i
= 0; i
< tdata
->nr_frags
; i
++, frag
++) {
2234 get_page(frag
->page
);
2235 skb_fill_page_desc(skb
, i
, frag
->page
,
2236 frag
->offset
, frag
->size
);
2240 skb
->data_len
+= count
;
2241 skb
->truesize
+= count
;
2244 pg
= virt_to_head_page(task
->data
);
2246 skb_fill_page_desc(skb
, 0, pg
,
2247 task
->data
- (char *)page_address(pg
),
2250 skb
->data_len
+= count
;
2251 skb
->truesize
+= count
;
2255 get_page(rsvd_page
);
2256 skb_fill_page_desc(skb
, skb_shinfo(skb
)->nr_frags
,
2257 rsvd_page
, 0, padlen
);
2259 skb
->data_len
+= padlen
;
2260 skb
->truesize
+= padlen
;
2264 if (likely(count
> tdata
->max_xmit_dlength
))
2265 cxgbi_prep_iso_info(task
, skb
, count
);
2267 cxgbi_skcb_clear_flag(skb
, SKCBF_TX_ISO
);
2271 EXPORT_SYMBOL_GPL(cxgbi_conn_init_pdu
);
2273 static int cxgbi_sock_tx_queue_up(struct cxgbi_sock
*csk
, struct sk_buff
*skb
)
2275 struct cxgbi_device
*cdev
= csk
->cdev
;
2276 struct cxgbi_iso_info
*iso_cpl
;
2277 u32 frags
= skb_shinfo(skb
)->nr_frags
;
2278 u32 extra_len
, num_pdu
, hdr_len
;
2279 u32 iso_tx_rsvd
= 0;
2281 if (csk
->state
!= CTP_ESTABLISHED
) {
2282 log_debug(1 << CXGBI_DBG_PDU_TX
,
2283 "csk 0x%p,%u,0x%lx,%u, EAGAIN.\n",
2284 csk
, csk
->state
, csk
->flags
, csk
->tid
);
2289 log_debug(1 << CXGBI_DBG_PDU_TX
,
2290 "csk 0x%p,%u,0x%lx,%u, EPIPE %d.\n",
2291 csk
, csk
->state
, csk
->flags
, csk
->tid
, csk
->err
);
2295 if ((cdev
->flags
& CXGBI_FLAG_DEV_T3
) &&
2296 before((csk
->snd_win
+ csk
->snd_una
), csk
->write_seq
)) {
2297 log_debug(1 << CXGBI_DBG_PDU_TX
,
2298 "csk 0x%p,%u,0x%lx,%u, FULL %u-%u >= %u.\n",
2299 csk
, csk
->state
, csk
->flags
, csk
->tid
, csk
->write_seq
,
2300 csk
->snd_una
, csk
->snd_win
);
2304 if (cxgbi_skcb_test_flag(skb
, SKCBF_TX_ISO
))
2305 iso_tx_rsvd
= cdev
->skb_iso_txhdr
;
2307 if (unlikely(skb_headroom(skb
) < (cdev
->skb_tx_rsvd
+ iso_tx_rsvd
))) {
2308 pr_err("csk 0x%p, skb head %u < %u.\n",
2309 csk
, skb_headroom(skb
), cdev
->skb_tx_rsvd
);
2313 if (skb
->len
!= skb
->data_len
)
2316 if (frags
>= SKB_WR_LIST_SIZE
) {
2317 pr_err("csk 0x%p, frags %u, %u,%u >%u.\n",
2318 csk
, skb_shinfo(skb
)->nr_frags
, skb
->len
,
2319 skb
->data_len
, (unsigned int)SKB_WR_LIST_SIZE
);
2323 cxgbi_skcb_set_flag(skb
, SKCBF_TX_NEED_HDR
);
2324 skb_reset_transport_header(skb
);
2325 cxgbi_sock_skb_entail(csk
, skb
);
2327 extra_len
= cxgbi_ulp_extra_len(cxgbi_skcb_tx_ulp_mode(skb
));
2329 if (likely(cxgbi_skcb_test_flag(skb
, SKCBF_TX_ISO
))) {
2330 iso_cpl
= (struct cxgbi_iso_info
*)skb
->head
;
2331 num_pdu
= iso_cpl
->num_pdu
;
2332 hdr_len
= cxgbi_skcb_tx_iscsi_hdrlen(skb
);
2333 extra_len
= (cxgbi_ulp_extra_len(cxgbi_skcb_tx_ulp_mode(skb
)) *
2334 num_pdu
) + (hdr_len
* (num_pdu
- 1));
2337 csk
->write_seq
+= (skb
->len
+ extra_len
);
2342 static int cxgbi_sock_send_skb(struct cxgbi_sock
*csk
, struct sk_buff
*skb
)
2344 struct cxgbi_device
*cdev
= csk
->cdev
;
2348 spin_lock_bh(&csk
->lock
);
2349 err
= cxgbi_sock_tx_queue_up(csk
, skb
);
2351 spin_unlock_bh(&csk
->lock
);
2355 if (likely(skb_queue_len(&csk
->write_queue
)))
2356 cdev
->csk_push_tx_frames(csk
, 0);
2357 spin_unlock_bh(&csk
->lock
);
2361 int cxgbi_conn_xmit_pdu(struct iscsi_task
*task
)
2363 struct iscsi_tcp_conn
*tcp_conn
= task
->conn
->dd_data
;
2364 struct cxgbi_conn
*cconn
= tcp_conn
->dd_data
;
2365 struct iscsi_tcp_task
*tcp_task
= task
->dd_data
;
2366 struct cxgbi_task_data
*tdata
= iscsi_task_cxgbi_data(task
);
2367 struct cxgbi_task_tag_info
*ttinfo
= &tdata
->ttinfo
;
2368 struct sk_buff
*skb
;
2369 struct cxgbi_sock
*csk
= NULL
;
2374 if (!tcp_task
|| (tcp_task
->dd_data
!= tdata
)) {
2375 pr_err("task 0x%p,0x%p, tcp_task 0x%p, tdata 0x%p/0x%p.\n",
2376 task
, task
->sc
, tcp_task
,
2377 tcp_task
? tcp_task
->dd_data
: NULL
, tdata
);
2383 log_debug(1 << CXGBI_DBG_ISCSI
| 1 << CXGBI_DBG_PDU_TX
,
2384 "task 0x%p, skb NULL.\n", task
);
2388 if (cconn
&& cconn
->cep
)
2389 csk
= cconn
->cep
->csk
;
2392 log_debug(1 << CXGBI_DBG_ISCSI
| 1 << CXGBI_DBG_PDU_TX
,
2393 "task 0x%p, csk gone.\n", task
);
2398 datalen
= skb
->data_len
;
2400 /* write ppod first if using ofldq to write ppod */
2401 if (ttinfo
->flags
& CXGBI_PPOD_INFO_FLAG_VALID
) {
2402 struct cxgbi_ppm
*ppm
= csk
->cdev
->cdev2ppm(csk
->cdev
);
2404 ttinfo
->flags
&= ~CXGBI_PPOD_INFO_FLAG_VALID
;
2405 if (csk
->cdev
->csk_ddp_set_map(ppm
, csk
, ttinfo
) < 0)
2406 pr_err("task 0x%p, ppod writing using ofldq failed.\n",
2408 /* continue. Let fl get the data */
2412 memcpy(skb
->data
, task
->hdr
, SKB_TX_ISCSI_PDU_HEADER_MAX
);
2414 err
= cxgbi_sock_send_skb(csk
, skb
);
2418 log_debug(1 << CXGBI_DBG_PDU_TX
, "task 0x%p,0x%p, rv %d.\n",
2419 task
, task
->sc
, err
);
2421 if (task
->conn
->hdrdgst_en
)
2422 pdulen
+= ISCSI_DIGEST_SIZE
;
2424 if (datalen
&& task
->conn
->datadgst_en
)
2425 pdulen
+= ISCSI_DIGEST_SIZE
;
2427 task
->conn
->txdata_octets
+= pdulen
;
2429 if (unlikely(cxgbi_is_iso_config(csk
) && cxgbi_is_iso_disabled(csk
))) {
2430 if (time_after(jiffies
, csk
->prev_iso_ts
+ HZ
)) {
2431 csk
->disable_iso
= false;
2432 csk
->prev_iso_ts
= 0;
2433 log_debug(1 << CXGBI_DBG_PDU_TX
,
2434 "enable iso: csk 0x%p\n", csk
);
2441 if (err
== -EAGAIN
|| err
== -ENOBUFS
) {
2442 log_debug(1 << CXGBI_DBG_PDU_TX
,
2443 "task 0x%p, skb 0x%p, len %u/%u, %d EAGAIN.\n",
2444 task
, skb
, skb
->len
, skb
->data_len
, err
);
2445 /* reset skb to send when we are called again */
2448 if (cxgbi_is_iso_config(csk
) && !cxgbi_is_iso_disabled(csk
) &&
2449 (csk
->no_tx_credits
++ >= 2)) {
2450 csk
->disable_iso
= true;
2451 csk
->prev_iso_ts
= jiffies
;
2452 log_debug(1 << CXGBI_DBG_PDU_TX
,
2453 "disable iso:csk 0x%p, ts:%lu\n",
2454 csk
, csk
->prev_iso_ts
);
2460 log_debug(1 << CXGBI_DBG_ISCSI
| 1 << CXGBI_DBG_PDU_TX
,
2461 "itt 0x%x, skb 0x%p, len %u/%u, xmit err %d.\n",
2462 task
->itt
, skb
, skb
->len
, skb
->data_len
, err
);
2464 iscsi_conn_printk(KERN_ERR
, task
->conn
, "xmit err %d.\n", err
);
2465 iscsi_conn_failure(task
->conn
, ISCSI_ERR_XMIT_FAILED
);
2468 EXPORT_SYMBOL_GPL(cxgbi_conn_xmit_pdu
);
2470 void cxgbi_cleanup_task(struct iscsi_task
*task
)
2472 struct iscsi_tcp_task
*tcp_task
= task
->dd_data
;
2473 struct cxgbi_task_data
*tdata
= iscsi_task_cxgbi_data(task
);
2475 if (!tcp_task
|| (tcp_task
->dd_data
!= tdata
)) {
2476 pr_info("task 0x%p,0x%p, tcp_task 0x%p, tdata 0x%p/0x%p.\n",
2477 task
, task
->sc
, tcp_task
,
2478 tcp_task
? tcp_task
->dd_data
: NULL
, tdata
);
2482 log_debug(1 << CXGBI_DBG_ISCSI
,
2483 "task 0x%p, skb 0x%p, itt 0x%x.\n",
2484 task
, tdata
->skb
, task
->hdr_itt
);
2486 tcp_task
->dd_data
= NULL
;
2492 /* never reached the xmit task callout */
2494 __kfree_skb(tdata
->skb
);
2498 task_release_itt(task
, task
->hdr_itt
);
2499 memset(tdata
, 0, sizeof(*tdata
));
2501 iscsi_tcp_cleanup_task(task
);
2503 EXPORT_SYMBOL_GPL(cxgbi_cleanup_task
);
2505 void cxgbi_get_conn_stats(struct iscsi_cls_conn
*cls_conn
,
2506 struct iscsi_stats
*stats
)
2508 struct iscsi_conn
*conn
= cls_conn
->dd_data
;
2510 stats
->txdata_octets
= conn
->txdata_octets
;
2511 stats
->rxdata_octets
= conn
->rxdata_octets
;
2512 stats
->scsicmd_pdus
= conn
->scsicmd_pdus_cnt
;
2513 stats
->dataout_pdus
= conn
->dataout_pdus_cnt
;
2514 stats
->scsirsp_pdus
= conn
->scsirsp_pdus_cnt
;
2515 stats
->datain_pdus
= conn
->datain_pdus_cnt
;
2516 stats
->r2t_pdus
= conn
->r2t_pdus_cnt
;
2517 stats
->tmfcmd_pdus
= conn
->tmfcmd_pdus_cnt
;
2518 stats
->tmfrsp_pdus
= conn
->tmfrsp_pdus_cnt
;
2519 stats
->digest_err
= 0;
2520 stats
->timeout_err
= 0;
2521 stats
->custom_length
= 1;
2522 strcpy(stats
->custom
[0].desc
, "eh_abort_cnt");
2523 stats
->custom
[0].value
= conn
->eh_abort_cnt
;
2525 EXPORT_SYMBOL_GPL(cxgbi_get_conn_stats
);
2527 static int cxgbi_conn_max_xmit_dlength(struct iscsi_conn
*conn
)
2529 struct iscsi_tcp_conn
*tcp_conn
= conn
->dd_data
;
2530 struct cxgbi_conn
*cconn
= tcp_conn
->dd_data
;
2531 struct cxgbi_device
*cdev
= cconn
->chba
->cdev
;
2532 unsigned int headroom
= SKB_MAX_HEAD(cdev
->skb_tx_rsvd
);
2533 unsigned int max_def
= 512 * MAX_SKB_FRAGS
;
2534 unsigned int max
= max(max_def
, headroom
);
2536 max
= min(cconn
->chba
->cdev
->tx_max_size
, max
);
2537 if (conn
->max_xmit_dlength
)
2538 conn
->max_xmit_dlength
= min(conn
->max_xmit_dlength
, max
);
2540 conn
->max_xmit_dlength
= max
;
2541 cxgbi_align_pdu_size(conn
->max_xmit_dlength
);
2546 static int cxgbi_conn_max_recv_dlength(struct iscsi_conn
*conn
)
2548 struct iscsi_tcp_conn
*tcp_conn
= conn
->dd_data
;
2549 struct cxgbi_conn
*cconn
= tcp_conn
->dd_data
;
2550 unsigned int max
= cconn
->chba
->cdev
->rx_max_size
;
2552 cxgbi_align_pdu_size(max
);
2554 if (conn
->max_recv_dlength
) {
2555 if (conn
->max_recv_dlength
> max
) {
2556 pr_err("MaxRecvDataSegmentLength %u > %u.\n",
2557 conn
->max_recv_dlength
, max
);
2560 conn
->max_recv_dlength
= min(conn
->max_recv_dlength
, max
);
2561 cxgbi_align_pdu_size(conn
->max_recv_dlength
);
2563 conn
->max_recv_dlength
= max
;
2568 int cxgbi_set_conn_param(struct iscsi_cls_conn
*cls_conn
,
2569 enum iscsi_param param
, char *buf
, int buflen
)
2571 struct iscsi_conn
*conn
= cls_conn
->dd_data
;
2572 struct iscsi_tcp_conn
*tcp_conn
= conn
->dd_data
;
2573 struct cxgbi_conn
*cconn
= tcp_conn
->dd_data
;
2574 struct cxgbi_sock
*csk
= cconn
->cep
->csk
;
2577 log_debug(1 << CXGBI_DBG_ISCSI
,
2578 "cls_conn 0x%p, param %d, buf(%d) %s.\n",
2579 cls_conn
, param
, buflen
, buf
);
2582 case ISCSI_PARAM_HDRDGST_EN
:
2583 err
= iscsi_set_param(cls_conn
, param
, buf
, buflen
);
2584 if (!err
&& conn
->hdrdgst_en
)
2585 err
= csk
->cdev
->csk_ddp_setup_digest(csk
, csk
->tid
,
2589 case ISCSI_PARAM_DATADGST_EN
:
2590 err
= iscsi_set_param(cls_conn
, param
, buf
, buflen
);
2591 if (!err
&& conn
->datadgst_en
)
2592 err
= csk
->cdev
->csk_ddp_setup_digest(csk
, csk
->tid
,
2596 case ISCSI_PARAM_MAX_R2T
:
2597 return iscsi_tcp_set_max_r2t(conn
, buf
);
2598 case ISCSI_PARAM_MAX_RECV_DLENGTH
:
2599 err
= iscsi_set_param(cls_conn
, param
, buf
, buflen
);
2601 err
= cxgbi_conn_max_recv_dlength(conn
);
2603 case ISCSI_PARAM_MAX_XMIT_DLENGTH
:
2604 err
= iscsi_set_param(cls_conn
, param
, buf
, buflen
);
2606 err
= cxgbi_conn_max_xmit_dlength(conn
);
2609 return iscsi_set_param(cls_conn
, param
, buf
, buflen
);
2613 EXPORT_SYMBOL_GPL(cxgbi_set_conn_param
);
2615 int cxgbi_get_ep_param(struct iscsi_endpoint
*ep
, enum iscsi_param param
,
2618 struct cxgbi_endpoint
*cep
= ep
->dd_data
;
2619 struct cxgbi_sock
*csk
;
2621 log_debug(1 << CXGBI_DBG_ISCSI
,
2622 "cls_conn 0x%p, param %d.\n", ep
, param
);
2625 case ISCSI_PARAM_CONN_PORT
:
2626 case ISCSI_PARAM_CONN_ADDRESS
:
2634 return iscsi_conn_get_addr_param((struct sockaddr_storage
*)
2635 &csk
->daddr
, param
, buf
);
2641 EXPORT_SYMBOL_GPL(cxgbi_get_ep_param
);
2643 struct iscsi_cls_conn
*
2644 cxgbi_create_conn(struct iscsi_cls_session
*cls_session
, u32 cid
)
2646 struct iscsi_cls_conn
*cls_conn
;
2647 struct iscsi_conn
*conn
;
2648 struct iscsi_tcp_conn
*tcp_conn
;
2649 struct cxgbi_conn
*cconn
;
2651 cls_conn
= iscsi_tcp_conn_setup(cls_session
, sizeof(*cconn
), cid
);
2655 conn
= cls_conn
->dd_data
;
2656 tcp_conn
= conn
->dd_data
;
2657 cconn
= tcp_conn
->dd_data
;
2658 cconn
->iconn
= conn
;
2660 log_debug(1 << CXGBI_DBG_ISCSI
,
2661 "cid %u(0x%x), cls 0x%p,0x%p, conn 0x%p,0x%p,0x%p.\n",
2662 cid
, cid
, cls_session
, cls_conn
, conn
, tcp_conn
, cconn
);
2666 EXPORT_SYMBOL_GPL(cxgbi_create_conn
);
2668 int cxgbi_bind_conn(struct iscsi_cls_session
*cls_session
,
2669 struct iscsi_cls_conn
*cls_conn
,
2670 u64 transport_eph
, int is_leading
)
2672 struct iscsi_conn
*conn
= cls_conn
->dd_data
;
2673 struct iscsi_tcp_conn
*tcp_conn
= conn
->dd_data
;
2674 struct cxgbi_conn
*cconn
= tcp_conn
->dd_data
;
2675 struct cxgbi_ppm
*ppm
;
2676 struct iscsi_endpoint
*ep
;
2677 struct cxgbi_endpoint
*cep
;
2678 struct cxgbi_sock
*csk
;
2681 ep
= iscsi_lookup_endpoint(transport_eph
);
2685 /* setup ddp pagesize */
2689 ppm
= csk
->cdev
->cdev2ppm(csk
->cdev
);
2690 err
= csk
->cdev
->csk_ddp_setup_pgidx(csk
, csk
->tid
,
2691 ppm
->tformat
.pgsz_idx_dflt
);
2695 err
= iscsi_conn_bind(cls_session
, cls_conn
, is_leading
);
2701 /* calculate the tag idx bits needed for this conn based on cmds_max */
2702 cconn
->task_idx_bits
= (__ilog2_u32(conn
->session
->cmds_max
- 1)) + 1;
2704 write_lock_bh(&csk
->callback_lock
);
2705 csk
->user_data
= conn
;
2706 cconn
->chba
= cep
->chba
;
2709 write_unlock_bh(&csk
->callback_lock
);
2711 cxgbi_conn_max_xmit_dlength(conn
);
2712 cxgbi_conn_max_recv_dlength(conn
);
2714 log_debug(1 << CXGBI_DBG_ISCSI
,
2715 "cls 0x%p,0x%p, ep 0x%p, cconn 0x%p, csk 0x%p.\n",
2716 cls_session
, cls_conn
, ep
, cconn
, csk
);
2717 /* init recv engine */
2718 iscsi_tcp_hdr_recv_prep(tcp_conn
);
2721 iscsi_put_endpoint(ep
);
2724 EXPORT_SYMBOL_GPL(cxgbi_bind_conn
);
2726 struct iscsi_cls_session
*cxgbi_create_session(struct iscsi_endpoint
*ep
,
2727 u16 cmds_max
, u16 qdepth
,
2730 struct cxgbi_endpoint
*cep
;
2731 struct cxgbi_hba
*chba
;
2732 struct Scsi_Host
*shost
;
2733 struct iscsi_cls_session
*cls_session
;
2734 struct iscsi_session
*session
;
2737 pr_err("missing endpoint.\n");
2743 shost
= chba
->shost
;
2745 BUG_ON(chba
!= iscsi_host_priv(shost
));
2747 cls_session
= iscsi_session_setup(chba
->cdev
->itp
, shost
,
2749 sizeof(struct iscsi_tcp_task
) +
2750 sizeof(struct cxgbi_task_data
),
2751 initial_cmdsn
, ISCSI_MAX_TARGET
);
2755 session
= cls_session
->dd_data
;
2756 if (iscsi_tcp_r2tpool_alloc(session
))
2757 goto remove_session
;
2759 log_debug(1 << CXGBI_DBG_ISCSI
,
2760 "ep 0x%p, cls sess 0x%p.\n", ep
, cls_session
);
2764 iscsi_session_teardown(cls_session
);
2767 EXPORT_SYMBOL_GPL(cxgbi_create_session
);
2769 void cxgbi_destroy_session(struct iscsi_cls_session
*cls_session
)
2771 log_debug(1 << CXGBI_DBG_ISCSI
,
2772 "cls sess 0x%p.\n", cls_session
);
2774 iscsi_tcp_r2tpool_free(cls_session
->dd_data
);
2775 iscsi_session_teardown(cls_session
);
2777 EXPORT_SYMBOL_GPL(cxgbi_destroy_session
);
2779 int cxgbi_set_host_param(struct Scsi_Host
*shost
, enum iscsi_host_param param
,
2780 char *buf
, int buflen
)
2782 struct cxgbi_hba
*chba
= iscsi_host_priv(shost
);
2785 shost_printk(KERN_ERR
, shost
, "Could not get host param. "
2786 "netdev for host not set.\n");
2790 log_debug(1 << CXGBI_DBG_ISCSI
,
2791 "shost 0x%p, hba 0x%p,%s, param %d, buf(%d) %s.\n",
2792 shost
, chba
, chba
->ndev
->name
, param
, buflen
, buf
);
2795 case ISCSI_HOST_PARAM_IPADDRESS
:
2797 __be32 addr
= in_aton(buf
);
2798 log_debug(1 << CXGBI_DBG_ISCSI
,
2799 "hba %s, req. ipv4 %pI4.\n", chba
->ndev
->name
, &addr
);
2800 cxgbi_set_iscsi_ipv4(chba
, addr
);
2803 case ISCSI_HOST_PARAM_HWADDRESS
:
2804 case ISCSI_HOST_PARAM_NETDEV_NAME
:
2807 return iscsi_host_set_param(shost
, param
, buf
, buflen
);
2810 EXPORT_SYMBOL_GPL(cxgbi_set_host_param
);
2812 int cxgbi_get_host_param(struct Scsi_Host
*shost
, enum iscsi_host_param param
,
2815 struct cxgbi_hba
*chba
= iscsi_host_priv(shost
);
2819 shost_printk(KERN_ERR
, shost
, "Could not get host param. "
2820 "netdev for host not set.\n");
2824 log_debug(1 << CXGBI_DBG_ISCSI
,
2825 "shost 0x%p, hba 0x%p,%s, param %d.\n",
2826 shost
, chba
, chba
->ndev
->name
, param
);
2829 case ISCSI_HOST_PARAM_HWADDRESS
:
2830 len
= sysfs_format_mac(buf
, chba
->ndev
->dev_addr
, 6);
2832 case ISCSI_HOST_PARAM_NETDEV_NAME
:
2833 len
= sprintf(buf
, "%s\n", chba
->ndev
->name
);
2835 case ISCSI_HOST_PARAM_IPADDRESS
:
2837 struct cxgbi_sock
*csk
= find_sock_on_port(chba
->cdev
,
2840 len
= sprintf(buf
, "%pIS",
2841 (struct sockaddr
*)&csk
->saddr
);
2843 log_debug(1 << CXGBI_DBG_ISCSI
,
2844 "hba %s, addr %s.\n", chba
->ndev
->name
, buf
);
2848 return iscsi_host_get_param(shost
, param
, buf
);
2853 EXPORT_SYMBOL_GPL(cxgbi_get_host_param
);
2855 struct iscsi_endpoint
*cxgbi_ep_connect(struct Scsi_Host
*shost
,
2856 struct sockaddr
*dst_addr
,
2859 struct iscsi_endpoint
*ep
;
2860 struct cxgbi_endpoint
*cep
;
2861 struct cxgbi_hba
*hba
= NULL
;
2862 struct cxgbi_sock
*csk
;
2866 log_debug(1 << CXGBI_DBG_ISCSI
| 1 << CXGBI_DBG_SOCK
,
2867 "shost 0x%p, non_blocking %d, dst_addr 0x%p.\n",
2868 shost
, non_blocking
, dst_addr
);
2871 hba
= iscsi_host_priv(shost
);
2873 pr_info("shost 0x%p, priv NULL.\n", shost
);
2879 if (dst_addr
->sa_family
== AF_INET
) {
2880 csk
= cxgbi_check_route(dst_addr
, ifindex
);
2881 #if IS_ENABLED(CONFIG_IPV6)
2882 } else if (dst_addr
->sa_family
== AF_INET6
) {
2883 csk
= cxgbi_check_route6(dst_addr
, ifindex
);
2886 pr_info("address family 0x%x NOT supported.\n",
2887 dst_addr
->sa_family
);
2888 err
= -EAFNOSUPPORT
;
2889 return (struct iscsi_endpoint
*)ERR_PTR(err
);
2893 return (struct iscsi_endpoint
*)csk
;
2894 cxgbi_sock_get(csk
);
2897 hba
= csk
->cdev
->hbas
[csk
->port_id
];
2898 else if (hba
!= csk
->cdev
->hbas
[csk
->port_id
]) {
2899 if (ifindex
!= hba
->ndev
->ifindex
) {
2900 cxgbi_sock_put(csk
);
2901 cxgbi_sock_closed(csk
);
2902 ifindex
= hba
->ndev
->ifindex
;
2906 pr_info("Could not connect through requested host %u"
2907 "hba 0x%p != 0x%p (%u).\n",
2908 shost
->host_no
, hba
,
2909 csk
->cdev
->hbas
[csk
->port_id
], csk
->port_id
);
2914 err
= sock_get_port(csk
);
2918 cxgbi_sock_set_state(csk
, CTP_CONNECTING
);
2919 err
= csk
->cdev
->csk_init_act_open(csk
);
2923 if (cxgbi_sock_is_closing(csk
)) {
2925 pr_info("csk 0x%p is closing.\n", csk
);
2929 ep
= iscsi_create_endpoint(sizeof(*cep
));
2932 pr_info("iscsi alloc ep, OOM.\n");
2940 log_debug(1 << CXGBI_DBG_ISCSI
| 1 << CXGBI_DBG_SOCK
,
2941 "ep 0x%p, cep 0x%p, csk 0x%p, hba 0x%p,%s.\n",
2942 ep
, cep
, csk
, hba
, hba
->ndev
->name
);
2946 cxgbi_sock_put(csk
);
2947 cxgbi_sock_closed(csk
);
2949 return ERR_PTR(err
);
2951 EXPORT_SYMBOL_GPL(cxgbi_ep_connect
);
2953 int cxgbi_ep_poll(struct iscsi_endpoint
*ep
, int timeout_ms
)
2955 struct cxgbi_endpoint
*cep
= ep
->dd_data
;
2956 struct cxgbi_sock
*csk
= cep
->csk
;
2958 if (!cxgbi_sock_is_established(csk
))
2962 EXPORT_SYMBOL_GPL(cxgbi_ep_poll
);
2964 void cxgbi_ep_disconnect(struct iscsi_endpoint
*ep
)
2966 struct cxgbi_endpoint
*cep
= ep
->dd_data
;
2967 struct cxgbi_conn
*cconn
= cep
->cconn
;
2968 struct cxgbi_sock
*csk
= cep
->csk
;
2970 log_debug(1 << CXGBI_DBG_ISCSI
| 1 << CXGBI_DBG_SOCK
,
2971 "ep 0x%p, cep 0x%p, cconn 0x%p, csk 0x%p,%u,0x%lx.\n",
2972 ep
, cep
, cconn
, csk
, csk
->state
, csk
->flags
);
2974 if (cconn
&& cconn
->iconn
) {
2975 write_lock_bh(&csk
->callback_lock
);
2976 cep
->csk
->user_data
= NULL
;
2978 write_unlock_bh(&csk
->callback_lock
);
2980 iscsi_destroy_endpoint(ep
);
2982 if (likely(csk
->state
>= CTP_ESTABLISHED
))
2983 need_active_close(csk
);
2985 cxgbi_sock_closed(csk
);
2987 cxgbi_sock_put(csk
);
2989 EXPORT_SYMBOL_GPL(cxgbi_ep_disconnect
);
2991 int cxgbi_iscsi_init(struct iscsi_transport
*itp
,
2992 struct scsi_transport_template
**stt
)
2994 *stt
= iscsi_register_transport(itp
);
2996 pr_err("unable to register %s transport 0x%p.\n",
3000 log_debug(1 << CXGBI_DBG_ISCSI
,
3001 "%s, registered iscsi transport 0x%p.\n",
3005 EXPORT_SYMBOL_GPL(cxgbi_iscsi_init
);
3007 void cxgbi_iscsi_cleanup(struct iscsi_transport
*itp
,
3008 struct scsi_transport_template
**stt
)
3011 log_debug(1 << CXGBI_DBG_ISCSI
,
3012 "de-register transport 0x%p, %s, stt 0x%p.\n",
3013 itp
, itp
->name
, *stt
);
3015 iscsi_unregister_transport(itp
);
3018 EXPORT_SYMBOL_GPL(cxgbi_iscsi_cleanup
);
3020 umode_t
cxgbi_attr_is_visible(int param_type
, int param
)
3022 switch (param_type
) {
3023 case ISCSI_HOST_PARAM
:
3025 case ISCSI_HOST_PARAM_NETDEV_NAME
:
3026 case ISCSI_HOST_PARAM_HWADDRESS
:
3027 case ISCSI_HOST_PARAM_IPADDRESS
:
3028 case ISCSI_HOST_PARAM_INITIATOR_NAME
:
3035 case ISCSI_PARAM_MAX_RECV_DLENGTH
:
3036 case ISCSI_PARAM_MAX_XMIT_DLENGTH
:
3037 case ISCSI_PARAM_HDRDGST_EN
:
3038 case ISCSI_PARAM_DATADGST_EN
:
3039 case ISCSI_PARAM_CONN_ADDRESS
:
3040 case ISCSI_PARAM_CONN_PORT
:
3041 case ISCSI_PARAM_EXP_STATSN
:
3042 case ISCSI_PARAM_PERSISTENT_ADDRESS
:
3043 case ISCSI_PARAM_PERSISTENT_PORT
:
3044 case ISCSI_PARAM_PING_TMO
:
3045 case ISCSI_PARAM_RECV_TMO
:
3046 case ISCSI_PARAM_INITIAL_R2T_EN
:
3047 case ISCSI_PARAM_MAX_R2T
:
3048 case ISCSI_PARAM_IMM_DATA_EN
:
3049 case ISCSI_PARAM_FIRST_BURST
:
3050 case ISCSI_PARAM_MAX_BURST
:
3051 case ISCSI_PARAM_PDU_INORDER_EN
:
3052 case ISCSI_PARAM_DATASEQ_INORDER_EN
:
3053 case ISCSI_PARAM_ERL
:
3054 case ISCSI_PARAM_TARGET_NAME
:
3055 case ISCSI_PARAM_TPGT
:
3056 case ISCSI_PARAM_USERNAME
:
3057 case ISCSI_PARAM_PASSWORD
:
3058 case ISCSI_PARAM_USERNAME_IN
:
3059 case ISCSI_PARAM_PASSWORD_IN
:
3060 case ISCSI_PARAM_FAST_ABORT
:
3061 case ISCSI_PARAM_ABORT_TMO
:
3062 case ISCSI_PARAM_LU_RESET_TMO
:
3063 case ISCSI_PARAM_TGT_RESET_TMO
:
3064 case ISCSI_PARAM_IFACE_NAME
:
3065 case ISCSI_PARAM_INITIATOR_NAME
:
3074 EXPORT_SYMBOL_GPL(cxgbi_attr_is_visible
);
3076 static int __init
libcxgbi_init_module(void)
3078 pr_info("%s", version
);
3080 BUILD_BUG_ON(sizeof_field(struct sk_buff
, cb
) <
3081 sizeof(struct cxgbi_skb_cb
));
3082 rsvd_page
= alloc_page(GFP_KERNEL
| __GFP_ZERO
);
3089 static void __exit
libcxgbi_exit_module(void)
3091 cxgbi_device_unregister_all(0xFF);
3092 put_page(rsvd_page
);
3096 module_init(libcxgbi_init_module
);
3097 module_exit(libcxgbi_exit_module
);