1 /* SPDX-License-Identifier: GPL-2.0-only */
3 * Copyright (c) 2016 Chelsio Communications, Inc.
9 #include <linux/mutex.h>
10 #include <linux/list.h>
11 #include <linux/spinlock.h>
12 #include <linux/idr.h>
13 #include <linux/completion.h>
14 #include <linux/netdevice.h>
15 #include <linux/sched.h>
16 #include <linux/pci.h>
17 #include <linux/dma-mapping.h>
18 #include <linux/inet.h>
19 #include <linux/wait.h>
20 #include <linux/kref.h>
21 #include <linux/timer.h>
24 #include <asm/byteorder.h>
26 #include <net/net_namespace.h>
28 #include <target/iscsi/iscsi_transport.h>
29 #include <iscsi_target_parameters.h>
30 #include <iscsi_target_login.h>
35 #include "cxgb4_uld.h"
37 #include "libcxgb_ppm.h"
38 #include "cxgbit_lro.h"
40 extern struct mutex cdev_list_lock
;
41 extern struct list_head cdev_list_head
;
47 struct scatterlist sg
;
48 struct cxgbi_task_tag_info ttinfo
;
53 #define CXGBIT_MAX_ISO_PAYLOAD \
54 min_t(u32, MAX_SKB_FRAGS * PAGE_SIZE, 65535)
56 struct cxgbit_iso_info
{
63 enum cxgbit_skcb_flags
{
64 SKCBF_TX_NEED_HDR
= (1 << 0), /* packet needs a header */
65 SKCBF_TX_FLAG_COMPL
= (1 << 1), /* wr completion flag */
66 SKCBF_TX_ISO
= (1 << 2), /* iso cpl in tx skb */
67 SKCBF_RX_LRO
= (1 << 3), /* lro skb */
70 struct cxgbit_skb_rx_cb
{
73 void (*backlog_fn
)(struct cxgbit_sock
*, struct sk_buff
*);
76 struct cxgbit_skb_tx_cb
{
85 struct cxgbit_skb_tx_cb tx
;
86 struct cxgbit_skb_rx_cb rx
;
91 /* This member must be first. */
92 struct l2t_skb_cb l2t
;
93 struct sk_buff
*wr_next
;
97 #define CXGBIT_SKB_CB(skb) ((union cxgbit_skb_cb *)&((skb)->cb[0]))
98 #define cxgbit_skcb_flags(skb) (CXGBIT_SKB_CB(skb)->flags)
99 #define cxgbit_skcb_submode(skb) (CXGBIT_SKB_CB(skb)->tx.submode)
100 #define cxgbit_skcb_tx_wr_next(skb) (CXGBIT_SKB_CB(skb)->wr_next)
101 #define cxgbit_skcb_tx_extralen(skb) (CXGBIT_SKB_CB(skb)->tx.extra_len)
102 #define cxgbit_skcb_rx_opcode(skb) (CXGBIT_SKB_CB(skb)->rx.opcode)
103 #define cxgbit_skcb_rx_backlog_fn(skb) (CXGBIT_SKB_CB(skb)->rx.backlog_fn)
104 #define cxgbit_rx_pdu_cb(skb) (CXGBIT_SKB_CB(skb)->rx.pdu_cb)
106 static inline void *cplhdr(struct sk_buff
*skb
)
111 enum cxgbit_cdev_flags
{
117 #define NP_INFO_HASH_SIZE 32
120 struct np_info
*next
;
121 struct cxgbit_np
*cnp
;
125 struct cxgbit_list_head
{
126 struct list_head list
;
131 struct cxgbit_device
{
132 struct list_head list
;
133 struct cxgb4_lld_info lldi
;
134 struct np_info
*np_hash_tab
[NP_INFO_HASH_SIZE
];
137 u8 selectq
[MAX_NPORTS
][2];
138 struct cxgbit_list_head cskq
;
144 struct cxgbit_wr_wait
{
145 struct completion completion
;
149 enum cxgbit_csk_state
{
152 CSK_STATE_CONNECTING
,
153 CSK_STATE_ESTABLISHED
,
160 enum cxgbit_csk_flags
{
161 CSK_TX_DATA_SENT
= 0,
168 struct cxgbit_sock_common
{
169 struct cxgbit_device
*cdev
;
170 struct sockaddr_storage local_addr
;
171 struct sockaddr_storage remote_addr
;
172 struct cxgbit_wr_wait wr_wait
;
173 enum cxgbit_csk_state state
;
178 struct cxgbit_sock_common com
;
179 wait_queue_head_t accept_wait
;
181 struct completion accept_comp
;
182 struct list_head np_accept_list
;
184 spinlock_t np_accept_lock
;
190 struct cxgbit_sock_common com
;
191 struct cxgbit_np
*cnp
;
192 struct iscsi_conn
*conn
;
193 struct l2t_entry
*l2t
;
194 struct dst_entry
*dst
;
195 struct list_head list
;
196 struct sk_buff_head rxq
;
197 struct sk_buff_head txq
;
198 struct sk_buff_head ppodq
;
199 struct sk_buff_head backlogq
;
200 struct sk_buff_head skbq
;
201 struct sk_buff
*wr_pending_head
;
202 struct sk_buff
*wr_pending_tail
;
204 struct sk_buff
*lro_skb
;
205 struct sk_buff
*lro_hskb
;
206 struct list_head accept_node
;
209 wait_queue_head_t waitq
;
235 #define CXGBIT_SUBMODE_HCRC 0x1
236 #define CXGBIT_SUBMODE_DCRC 0x2
238 #ifdef CONFIG_CHELSIO_T4_DCB
244 void _cxgbit_free_cdev(struct kref
*kref
);
245 void _cxgbit_free_csk(struct kref
*kref
);
246 void _cxgbit_free_cnp(struct kref
*kref
);
248 static inline void cxgbit_get_cdev(struct cxgbit_device
*cdev
)
250 kref_get(&cdev
->kref
);
253 static inline void cxgbit_put_cdev(struct cxgbit_device
*cdev
)
255 kref_put(&cdev
->kref
, _cxgbit_free_cdev
);
258 static inline void cxgbit_get_csk(struct cxgbit_sock
*csk
)
260 kref_get(&csk
->kref
);
263 static inline void cxgbit_put_csk(struct cxgbit_sock
*csk
)
265 kref_put(&csk
->kref
, _cxgbit_free_csk
);
268 static inline void cxgbit_get_cnp(struct cxgbit_np
*cnp
)
270 kref_get(&cnp
->kref
);
273 static inline void cxgbit_put_cnp(struct cxgbit_np
*cnp
)
275 kref_put(&cnp
->kref
, _cxgbit_free_cnp
);
278 static inline void cxgbit_sock_reset_wr_list(struct cxgbit_sock
*csk
)
280 csk
->wr_pending_tail
= NULL
;
281 csk
->wr_pending_head
= NULL
;
284 static inline struct sk_buff
*cxgbit_sock_peek_wr(const struct cxgbit_sock
*csk
)
286 return csk
->wr_pending_head
;
290 cxgbit_sock_enqueue_wr(struct cxgbit_sock
*csk
, struct sk_buff
*skb
)
292 cxgbit_skcb_tx_wr_next(skb
) = NULL
;
296 if (!csk
->wr_pending_head
)
297 csk
->wr_pending_head
= skb
;
299 cxgbit_skcb_tx_wr_next(csk
->wr_pending_tail
) = skb
;
300 csk
->wr_pending_tail
= skb
;
303 static inline struct sk_buff
*cxgbit_sock_dequeue_wr(struct cxgbit_sock
*csk
)
305 struct sk_buff
*skb
= csk
->wr_pending_head
;
308 csk
->wr_pending_head
= cxgbit_skcb_tx_wr_next(skb
);
309 cxgbit_skcb_tx_wr_next(skb
) = NULL
;
314 typedef void (*cxgbit_cplhandler_func
)(struct cxgbit_device
*,
317 int cxgbit_setup_np(struct iscsi_np
*, struct sockaddr_storage
*);
318 int cxgbit_setup_conn_digest(struct cxgbit_sock
*);
319 int cxgbit_accept_np(struct iscsi_np
*, struct iscsi_conn
*);
320 void cxgbit_free_np(struct iscsi_np
*);
321 void cxgbit_abort_conn(struct cxgbit_sock
*csk
);
322 void cxgbit_free_conn(struct iscsi_conn
*);
323 extern cxgbit_cplhandler_func cxgbit_cplhandlers
[NUM_CPL_CMDS
];
324 int cxgbit_get_login_rx(struct iscsi_conn
*, struct iscsi_login
*);
325 int cxgbit_rx_data_ack(struct cxgbit_sock
*);
326 int cxgbit_l2t_send(struct cxgbit_device
*, struct sk_buff
*,
328 void cxgbit_push_tx_frames(struct cxgbit_sock
*);
329 int cxgbit_put_login_tx(struct iscsi_conn
*, struct iscsi_login
*, u32
);
330 int cxgbit_xmit_pdu(struct iscsi_conn
*, struct iscsi_cmd
*,
331 struct iscsi_datain_req
*, const void *, u32
);
332 void cxgbit_get_r2t_ttt(struct iscsi_conn
*, struct iscsi_cmd
*,
334 u32
cxgbit_send_tx_flowc_wr(struct cxgbit_sock
*);
335 int cxgbit_ofld_send(struct cxgbit_device
*, struct sk_buff
*);
336 void cxgbit_get_rx_pdu(struct iscsi_conn
*);
337 int cxgbit_validate_params(struct iscsi_conn
*);
338 struct cxgbit_device
*cxgbit_find_device(struct net_device
*, u8
*);
341 int cxgbit_ddp_init(struct cxgbit_device
*);
342 int cxgbit_setup_conn_pgidx(struct cxgbit_sock
*, u32
);
343 int cxgbit_reserve_ttt(struct cxgbit_sock
*, struct iscsi_cmd
*);
344 void cxgbit_unmap_cmd(struct iscsi_conn
*, struct iscsi_cmd
*);
347 struct cxgbi_ppm
*cdev2ppm(struct cxgbit_device
*cdev
)
349 return (struct cxgbi_ppm
*)(*cdev
->lldi
.iscsi_ppm
);
351 #endif /* __CXGBIT_H__ */