1 /* SPDX-License-Identifier: GPL-2.0-only */
3 * Copyright (c) 2016 Chelsio Communications, Inc.
9 #include <linux/mutex.h>
10 #include <linux/list.h>
11 #include <linux/spinlock.h>
12 #include <linux/idr.h>
13 #include <linux/completion.h>
14 #include <linux/netdevice.h>
15 #include <linux/sched.h>
16 #include <linux/pci.h>
17 #include <linux/dma-mapping.h>
18 #include <linux/inet.h>
19 #include <linux/wait.h>
20 #include <linux/kref.h>
21 #include <linux/timer.h>
24 #include <asm/byteorder.h>
26 #include <net/net_namespace.h>
28 #include <target/iscsi/iscsi_transport.h>
29 #include <iscsi_target_parameters.h>
30 #include <iscsi_target_login.h>
35 #include "cxgb4_uld.h"
37 #include "libcxgb_ppm.h"
38 #include "cxgbit_lro.h"
40 extern struct mutex cdev_list_lock
;
41 extern struct list_head cdev_list_head
;
47 struct scatterlist sg
;
48 struct cxgbi_task_tag_info ttinfo
;
53 #define CXGBIT_MAX_ISO_PAYLOAD \
54 min_t(u32, MAX_SKB_FRAGS * PAGE_SIZE, 65535)
56 struct cxgbit_iso_info
{
63 enum cxgbit_skcb_flags
{
64 SKCBF_TX_NEED_HDR
= (1 << 0), /* packet needs a header */
65 SKCBF_TX_FLAG_COMPL
= (1 << 1), /* wr completion flag */
66 SKCBF_TX_ISO
= (1 << 2), /* iso cpl in tx skb */
67 SKCBF_RX_LRO
= (1 << 3), /* lro skb */
70 struct cxgbit_skb_rx_cb
{
73 void (*backlog_fn
)(struct cxgbit_sock
*, struct sk_buff
*);
76 struct cxgbit_skb_tx_cb
{
85 struct cxgbit_skb_tx_cb tx
;
86 struct cxgbit_skb_rx_cb rx
;
91 /* This member must be first. */
92 struct l2t_skb_cb l2t
;
93 struct sk_buff
*wr_next
;
97 #define CXGBIT_SKB_CB(skb) ((union cxgbit_skb_cb *)&((skb)->cb[0]))
98 #define cxgbit_skcb_flags(skb) (CXGBIT_SKB_CB(skb)->flags)
99 #define cxgbit_skcb_submode(skb) (CXGBIT_SKB_CB(skb)->tx.submode)
100 #define cxgbit_skcb_tx_wr_next(skb) (CXGBIT_SKB_CB(skb)->wr_next)
101 #define cxgbit_skcb_tx_extralen(skb) (CXGBIT_SKB_CB(skb)->tx.extra_len)
102 #define cxgbit_skcb_rx_opcode(skb) (CXGBIT_SKB_CB(skb)->rx.opcode)
103 #define cxgbit_skcb_rx_backlog_fn(skb) (CXGBIT_SKB_CB(skb)->rx.backlog_fn)
104 #define cxgbit_rx_pdu_cb(skb) (CXGBIT_SKB_CB(skb)->rx.pdu_cb)
106 static inline void *cplhdr(struct sk_buff
*skb
)
111 enum cxgbit_cdev_flags
{
117 #define NP_INFO_HASH_SIZE 32
120 struct np_info
*next
;
121 struct cxgbit_np
*cnp
;
125 struct cxgbit_list_head
{
126 struct list_head list
;
131 struct cxgbit_device
{
132 struct list_head list
;
133 struct cxgb4_lld_info lldi
;
134 struct np_info
*np_hash_tab
[NP_INFO_HASH_SIZE
];
137 u8 selectq
[MAX_NPORTS
][2];
138 struct cxgbit_list_head cskq
;
144 struct cxgbit_wr_wait
{
145 struct completion completion
;
149 enum cxgbit_csk_state
{
152 CSK_STATE_CONNECTING
,
153 CSK_STATE_ESTABLISHED
,
160 enum cxgbit_csk_flags
{
161 CSK_TX_DATA_SENT
= 0,
168 struct cxgbit_sock_common
{
169 struct cxgbit_device
*cdev
;
170 struct sockaddr_storage local_addr
;
171 struct sockaddr_storage remote_addr
;
172 struct cxgbit_wr_wait wr_wait
;
173 enum cxgbit_csk_state state
;
178 struct cxgbit_sock_common com
;
179 wait_queue_head_t accept_wait
;
181 struct completion accept_comp
;
182 struct list_head np_accept_list
;
184 spinlock_t np_accept_lock
;
190 struct cxgbit_sock_common com
;
191 struct cxgbit_np
*cnp
;
192 struct iscsi_conn
*conn
;
193 struct l2t_entry
*l2t
;
194 struct dst_entry
*dst
;
195 struct list_head list
;
196 struct sk_buff_head rxq
;
197 struct sk_buff_head txq
;
198 struct sk_buff_head ppodq
;
199 struct sk_buff_head backlogq
;
200 struct sk_buff_head skbq
;
201 struct sk_buff
*wr_pending_head
;
202 struct sk_buff
*wr_pending_tail
;
204 struct sk_buff
*lro_skb
;
205 struct sk_buff
*lro_hskb
;
206 struct list_head accept_node
;
209 wait_queue_head_t waitq
;
210 wait_queue_head_t ack_waitq
;
236 #define CXGBIT_SUBMODE_HCRC 0x1
237 #define CXGBIT_SUBMODE_DCRC 0x2
239 #ifdef CONFIG_CHELSIO_T4_DCB
245 void _cxgbit_free_cdev(struct kref
*kref
);
246 void _cxgbit_free_csk(struct kref
*kref
);
247 void _cxgbit_free_cnp(struct kref
*kref
);
249 static inline void cxgbit_get_cdev(struct cxgbit_device
*cdev
)
251 kref_get(&cdev
->kref
);
254 static inline void cxgbit_put_cdev(struct cxgbit_device
*cdev
)
256 kref_put(&cdev
->kref
, _cxgbit_free_cdev
);
259 static inline void cxgbit_get_csk(struct cxgbit_sock
*csk
)
261 kref_get(&csk
->kref
);
264 static inline void cxgbit_put_csk(struct cxgbit_sock
*csk
)
266 kref_put(&csk
->kref
, _cxgbit_free_csk
);
269 static inline void cxgbit_get_cnp(struct cxgbit_np
*cnp
)
271 kref_get(&cnp
->kref
);
274 static inline void cxgbit_put_cnp(struct cxgbit_np
*cnp
)
276 kref_put(&cnp
->kref
, _cxgbit_free_cnp
);
279 static inline void cxgbit_sock_reset_wr_list(struct cxgbit_sock
*csk
)
281 csk
->wr_pending_tail
= NULL
;
282 csk
->wr_pending_head
= NULL
;
285 static inline struct sk_buff
*cxgbit_sock_peek_wr(const struct cxgbit_sock
*csk
)
287 return csk
->wr_pending_head
;
291 cxgbit_sock_enqueue_wr(struct cxgbit_sock
*csk
, struct sk_buff
*skb
)
293 cxgbit_skcb_tx_wr_next(skb
) = NULL
;
297 if (!csk
->wr_pending_head
)
298 csk
->wr_pending_head
= skb
;
300 cxgbit_skcb_tx_wr_next(csk
->wr_pending_tail
) = skb
;
301 csk
->wr_pending_tail
= skb
;
304 static inline struct sk_buff
*cxgbit_sock_dequeue_wr(struct cxgbit_sock
*csk
)
306 struct sk_buff
*skb
= csk
->wr_pending_head
;
309 csk
->wr_pending_head
= cxgbit_skcb_tx_wr_next(skb
);
310 cxgbit_skcb_tx_wr_next(skb
) = NULL
;
315 typedef void (*cxgbit_cplhandler_func
)(struct cxgbit_device
*,
318 int cxgbit_setup_np(struct iscsi_np
*, struct sockaddr_storage
*);
319 int cxgbit_setup_conn_digest(struct cxgbit_sock
*);
320 int cxgbit_accept_np(struct iscsi_np
*, struct iscsi_conn
*);
321 void cxgbit_free_np(struct iscsi_np
*);
322 void cxgbit_abort_conn(struct cxgbit_sock
*csk
);
323 void cxgbit_free_conn(struct iscsi_conn
*);
324 extern cxgbit_cplhandler_func cxgbit_cplhandlers
[NUM_CPL_CMDS
];
325 int cxgbit_get_login_rx(struct iscsi_conn
*, struct iscsi_login
*);
326 int cxgbit_rx_data_ack(struct cxgbit_sock
*);
327 int cxgbit_l2t_send(struct cxgbit_device
*, struct sk_buff
*,
329 void cxgbit_push_tx_frames(struct cxgbit_sock
*);
330 int cxgbit_put_login_tx(struct iscsi_conn
*, struct iscsi_login
*, u32
);
331 int cxgbit_xmit_pdu(struct iscsi_conn
*, struct iscsi_cmd
*,
332 struct iscsi_datain_req
*, const void *, u32
);
333 void cxgbit_get_r2t_ttt(struct iscsi_conn
*, struct iscsi_cmd
*,
335 u32
cxgbit_send_tx_flowc_wr(struct cxgbit_sock
*);
336 int cxgbit_ofld_send(struct cxgbit_device
*, struct sk_buff
*);
337 void cxgbit_get_rx_pdu(struct iscsi_conn
*);
338 int cxgbit_validate_params(struct iscsi_conn
*);
339 struct cxgbit_device
*cxgbit_find_device(struct net_device
*, u8
*);
342 int cxgbit_ddp_init(struct cxgbit_device
*);
343 int cxgbit_setup_conn_pgidx(struct cxgbit_sock
*, u32
);
344 int cxgbit_reserve_ttt(struct cxgbit_sock
*, struct iscsi_cmd
*);
345 void cxgbit_unmap_cmd(struct iscsi_conn
*, struct iscsi_cmd
*);
348 struct cxgbi_ppm
*cdev2ppm(struct cxgbit_device
*cdev
)
350 return (struct cxgbi_ppm
*)(*cdev
->lldi
.iscsi_ppm
);
352 #endif /* __CXGBIT_H__ */