4 #include <linux/interrupt.h>
5 #include <rdma/ib_verbs.h>
6 #include <rdma/rdma_cm.h>
8 #include "rdma_transport.h"
10 #define RDS_FASTREG_SIZE 20
11 #define RDS_FASTREG_POOL_SIZE 2048
13 #define RDS_IW_MAX_SGE 8
14 #define RDS_IW_RECV_SGE 2
16 #define RDS_IW_DEFAULT_RECV_WR 1024
17 #define RDS_IW_DEFAULT_SEND_WR 256
19 #define RDS_IW_SUPPORTED_PROTOCOLS 0x00000003 /* minor versions supported */
21 extern struct list_head rds_iw_devices
;
24 * IB posts RDS_FRAG_SIZE fragments of pages to the receive queues to
25 * try and minimize the amount of memory tied up both the device and
26 * socket receive queues.
28 /* page offset of the final full frag that fits in the page */
29 #define RDS_PAGE_LAST_OFF (((PAGE_SIZE / RDS_FRAG_SIZE) - 1) * RDS_FRAG_SIZE)
30 struct rds_page_frag
{
31 struct list_head f_item
;
33 unsigned long f_offset
;
37 struct rds_iw_incoming
{
38 struct list_head ii_frags
;
39 struct rds_incoming ii_inc
;
42 struct rds_iw_connect_private
{
43 /* Add new fields at the end, and don't permute existing fields. */
48 __be16 dp_protocol_minor_mask
; /* bitmask */
51 __be32 dp_credit
; /* non-zero enables flow ctl */
54 struct rds_iw_scatterlist
{
55 struct scatterlist
*list
;
58 unsigned int dma_npages
;
62 struct rds_iw_mapping
{
63 spinlock_t m_lock
; /* protect the mapping struct */
64 struct list_head m_list
;
65 struct rds_iw_mr
*m_mr
;
67 struct rds_iw_scatterlist m_sg
;
70 struct rds_iw_send_work
{
71 struct rds_message
*s_rm
;
73 /* We should really put these into a union: */
74 struct rm_rdma_op
*s_op
;
75 struct rds_iw_mapping
*s_mapping
;
77 unsigned char s_remap_count
;
80 struct ib_send_wr s_send_wr
;
81 struct ib_rdma_wr s_rdma_wr
;
82 struct ib_reg_wr s_reg_wr
;
84 struct ib_sge s_sge
[RDS_IW_MAX_SGE
];
85 unsigned long s_queued
;
88 struct rds_iw_recv_work
{
89 struct rds_iw_incoming
*r_iwinc
;
90 struct rds_page_frag
*r_frag
;
91 struct ib_recv_wr r_wr
;
92 struct ib_sge r_sge
[2];
95 struct rds_iw_work_ring
{
103 struct rds_iw_device
;
105 struct rds_iw_connection
{
107 struct list_head iw_node
;
108 struct rds_iw_device
*rds_iwdev
;
109 struct rds_connection
*conn
;
111 /* alphabet soup, IBTA style */
112 struct rdma_cm_id
*i_cm_id
;
115 struct ib_cq
*i_send_cq
;
116 struct ib_cq
*i_recv_cq
;
119 struct rds_iw_work_ring i_send_ring
;
120 struct rds_message
*i_rm
;
121 struct rds_header
*i_send_hdrs
;
123 struct rds_iw_send_work
*i_sends
;
126 struct tasklet_struct i_recv_tasklet
;
127 struct mutex i_recv_mutex
;
128 struct rds_iw_work_ring i_recv_ring
;
129 struct rds_iw_incoming
*i_iwinc
;
131 struct rds_header
*i_recv_hdrs
;
133 struct rds_iw_recv_work
*i_recvs
;
134 struct rds_page_frag i_frag
;
135 u64 i_ack_recv
; /* last ACK received */
138 unsigned long i_ack_flags
;
139 #ifdef KERNEL_HAS_ATOMIC64
140 atomic64_t i_ack_next
; /* next ACK to send */
142 spinlock_t i_ack_lock
; /* protect i_ack_next */
143 u64 i_ack_next
; /* next ACK to send */
145 struct rds_header
*i_ack
;
146 struct ib_send_wr i_ack_wr
;
147 struct ib_sge i_ack_sge
;
149 unsigned long i_ack_queued
;
151 /* Flow control related information
153 * Our algorithm uses a pair variables that we need to access
154 * atomically - one for the send credits, and one posted
155 * recv credits we need to transfer to remote.
156 * Rather than protect them using a slow spinlock, we put both into
157 * a single atomic_t and update it using cmpxchg
161 /* Protocol version specific information */
162 unsigned int i_flowctl
:1; /* enable/disable flow ctl */
163 unsigned int i_dma_local_lkey
:1;
164 unsigned int i_fastreg_posted
:1; /* fastreg posted on this connection */
165 /* Batched completions */
166 unsigned int i_unsignaled_wrs
;
167 long i_unsignaled_bytes
;
170 /* This assumes that atomic_t is at least 32 bits */
171 #define IB_GET_SEND_CREDITS(v) ((v) & 0xffff)
172 #define IB_GET_POST_CREDITS(v) ((v) >> 16)
173 #define IB_SET_SEND_CREDITS(v) ((v) & 0xffff)
174 #define IB_SET_POST_CREDITS(v) ((v) << 16)
176 struct rds_iw_cm_id
{
177 struct list_head list
;
178 struct rdma_cm_id
*cm_id
;
181 struct rds_iw_device
{
182 struct list_head list
;
183 struct list_head cm_id_list
;
184 struct list_head conn_list
;
185 struct ib_device
*dev
;
188 struct rds_iw_mr_pool
*mr_pool
;
190 unsigned int max_wrs
;
191 unsigned int dma_local_lkey
:1;
192 spinlock_t spinlock
; /* protect the above */
195 /* bits for i_ack_flags */
196 #define IB_ACK_IN_FLIGHT 0
197 #define IB_ACK_REQUESTED 1
199 /* Magic WR_ID for ACKs */
200 #define RDS_IW_ACK_WR_ID ((u64)0xffffffffffffffffULL)
201 #define RDS_IW_REG_WR_ID ((u64)0xefefefefefefefefULL)
202 #define RDS_IW_LOCAL_INV_WR_ID ((u64)0xdfdfdfdfdfdfdfdfULL)
204 struct rds_iw_statistics
{
205 uint64_t s_iw_connect_raced
;
206 uint64_t s_iw_listen_closed_stale
;
207 uint64_t s_iw_tx_cq_call
;
208 uint64_t s_iw_tx_cq_event
;
209 uint64_t s_iw_tx_ring_full
;
210 uint64_t s_iw_tx_throttle
;
211 uint64_t s_iw_tx_sg_mapping_failure
;
212 uint64_t s_iw_tx_stalled
;
213 uint64_t s_iw_tx_credit_updates
;
214 uint64_t s_iw_rx_cq_call
;
215 uint64_t s_iw_rx_cq_event
;
216 uint64_t s_iw_rx_ring_empty
;
217 uint64_t s_iw_rx_refill_from_cq
;
218 uint64_t s_iw_rx_refill_from_thread
;
219 uint64_t s_iw_rx_alloc_limit
;
220 uint64_t s_iw_rx_credit_updates
;
221 uint64_t s_iw_ack_sent
;
222 uint64_t s_iw_ack_send_failure
;
223 uint64_t s_iw_ack_send_delayed
;
224 uint64_t s_iw_ack_send_piggybacked
;
225 uint64_t s_iw_ack_received
;
226 uint64_t s_iw_rdma_mr_alloc
;
227 uint64_t s_iw_rdma_mr_free
;
228 uint64_t s_iw_rdma_mr_used
;
229 uint64_t s_iw_rdma_mr_pool_flush
;
230 uint64_t s_iw_rdma_mr_pool_wait
;
231 uint64_t s_iw_rdma_mr_pool_depleted
;
234 extern struct workqueue_struct
*rds_iw_wq
;
237 * Fake ib_dma_sync_sg_for_{cpu,device} as long as ib_verbs.h
240 static inline void rds_iw_dma_sync_sg_for_cpu(struct ib_device
*dev
,
241 struct scatterlist
*sg
, unsigned int sg_dma_len
, int direction
)
245 for (i
= 0; i
< sg_dma_len
; ++i
) {
246 ib_dma_sync_single_for_cpu(dev
,
247 ib_sg_dma_address(dev
, &sg
[i
]),
248 ib_sg_dma_len(dev
, &sg
[i
]),
252 #define ib_dma_sync_sg_for_cpu rds_iw_dma_sync_sg_for_cpu
254 static inline void rds_iw_dma_sync_sg_for_device(struct ib_device
*dev
,
255 struct scatterlist
*sg
, unsigned int sg_dma_len
, int direction
)
259 for (i
= 0; i
< sg_dma_len
; ++i
) {
260 ib_dma_sync_single_for_device(dev
,
261 ib_sg_dma_address(dev
, &sg
[i
]),
262 ib_sg_dma_len(dev
, &sg
[i
]),
266 #define ib_dma_sync_sg_for_device rds_iw_dma_sync_sg_for_device
268 static inline u32
rds_iw_local_dma_lkey(struct rds_iw_connection
*ic
)
270 return ic
->i_dma_local_lkey
? ic
->i_cm_id
->device
->local_dma_lkey
: ic
->i_mr
->lkey
;
274 extern struct rds_transport rds_iw_transport
;
275 extern struct ib_client rds_iw_client
;
277 extern unsigned int fastreg_pool_size
;
278 extern unsigned int fastreg_message_size
;
280 extern spinlock_t iw_nodev_conns_lock
;
281 extern struct list_head iw_nodev_conns
;
284 int rds_iw_conn_alloc(struct rds_connection
*conn
, gfp_t gfp
);
285 void rds_iw_conn_free(void *arg
);
286 int rds_iw_conn_connect(struct rds_connection
*conn
);
287 void rds_iw_conn_shutdown(struct rds_connection
*conn
);
288 void rds_iw_state_change(struct sock
*sk
);
289 int rds_iw_listen_init(void);
290 void rds_iw_listen_stop(void);
291 void __rds_iw_conn_error(struct rds_connection
*conn
, const char *, ...);
292 int rds_iw_cm_handle_connect(struct rdma_cm_id
*cm_id
,
293 struct rdma_cm_event
*event
);
294 int rds_iw_cm_initiate_connect(struct rdma_cm_id
*cm_id
);
295 void rds_iw_cm_connect_complete(struct rds_connection
*conn
,
296 struct rdma_cm_event
*event
);
299 #define rds_iw_conn_error(conn, fmt...) \
300 __rds_iw_conn_error(conn, KERN_WARNING "RDS/IW: " fmt)
303 int rds_iw_update_cm_id(struct rds_iw_device
*rds_iwdev
, struct rdma_cm_id
*cm_id
);
304 void rds_iw_add_conn(struct rds_iw_device
*rds_iwdev
, struct rds_connection
*conn
);
305 void rds_iw_remove_conn(struct rds_iw_device
*rds_iwdev
, struct rds_connection
*conn
);
306 void __rds_iw_destroy_conns(struct list_head
*list
, spinlock_t
*list_lock
);
307 static inline void rds_iw_destroy_nodev_conns(void)
309 __rds_iw_destroy_conns(&iw_nodev_conns
, &iw_nodev_conns_lock
);
311 static inline void rds_iw_destroy_conns(struct rds_iw_device
*rds_iwdev
)
313 __rds_iw_destroy_conns(&rds_iwdev
->conn_list
, &rds_iwdev
->spinlock
);
315 struct rds_iw_mr_pool
*rds_iw_create_mr_pool(struct rds_iw_device
*);
316 void rds_iw_get_mr_info(struct rds_iw_device
*rds_iwdev
, struct rds_info_rdma_connection
*iinfo
);
317 void rds_iw_destroy_mr_pool(struct rds_iw_mr_pool
*);
318 void *rds_iw_get_mr(struct scatterlist
*sg
, unsigned long nents
,
319 struct rds_sock
*rs
, u32
*key_ret
);
320 void rds_iw_sync_mr(void *trans_private
, int dir
);
321 void rds_iw_free_mr(void *trans_private
, int invalidate
);
322 void rds_iw_flush_mrs(void);
325 int rds_iw_recv_init(void);
326 void rds_iw_recv_exit(void);
327 int rds_iw_recv(struct rds_connection
*conn
);
328 int rds_iw_recv_refill(struct rds_connection
*conn
, gfp_t kptr_gfp
,
329 gfp_t page_gfp
, int prefill
);
330 void rds_iw_inc_free(struct rds_incoming
*inc
);
331 int rds_iw_inc_copy_to_user(struct rds_incoming
*inc
, struct iov_iter
*to
);
332 void rds_iw_recv_cq_comp_handler(struct ib_cq
*cq
, void *context
);
333 void rds_iw_recv_tasklet_fn(unsigned long data
);
334 void rds_iw_recv_init_ring(struct rds_iw_connection
*ic
);
335 void rds_iw_recv_clear_ring(struct rds_iw_connection
*ic
);
336 void rds_iw_recv_init_ack(struct rds_iw_connection
*ic
);
337 void rds_iw_attempt_ack(struct rds_iw_connection
*ic
);
338 void rds_iw_ack_send_complete(struct rds_iw_connection
*ic
);
339 u64
rds_iw_piggyb_ack(struct rds_iw_connection
*ic
);
342 void rds_iw_ring_init(struct rds_iw_work_ring
*ring
, u32 nr
);
343 void rds_iw_ring_resize(struct rds_iw_work_ring
*ring
, u32 nr
);
344 u32
rds_iw_ring_alloc(struct rds_iw_work_ring
*ring
, u32 val
, u32
*pos
);
345 void rds_iw_ring_free(struct rds_iw_work_ring
*ring
, u32 val
);
346 void rds_iw_ring_unalloc(struct rds_iw_work_ring
*ring
, u32 val
);
347 int rds_iw_ring_empty(struct rds_iw_work_ring
*ring
);
348 int rds_iw_ring_low(struct rds_iw_work_ring
*ring
);
349 u32
rds_iw_ring_oldest(struct rds_iw_work_ring
*ring
);
350 u32
rds_iw_ring_completed(struct rds_iw_work_ring
*ring
, u32 wr_id
, u32 oldest
);
351 extern wait_queue_head_t rds_iw_ring_empty_wait
;
354 void rds_iw_xmit_complete(struct rds_connection
*conn
);
355 int rds_iw_xmit(struct rds_connection
*conn
, struct rds_message
*rm
,
356 unsigned int hdr_off
, unsigned int sg
, unsigned int off
);
357 void rds_iw_send_cq_comp_handler(struct ib_cq
*cq
, void *context
);
358 void rds_iw_send_init_ring(struct rds_iw_connection
*ic
);
359 void rds_iw_send_clear_ring(struct rds_iw_connection
*ic
);
360 int rds_iw_xmit_rdma(struct rds_connection
*conn
, struct rm_rdma_op
*op
);
361 void rds_iw_send_add_credits(struct rds_connection
*conn
, unsigned int credits
);
362 void rds_iw_advertise_credits(struct rds_connection
*conn
, unsigned int posted
);
363 int rds_iw_send_grab_credits(struct rds_iw_connection
*ic
, u32 wanted
,
364 u32
*adv_credits
, int need_posted
, int max_posted
);
367 DECLARE_PER_CPU(struct rds_iw_statistics
, rds_iw_stats
);
368 #define rds_iw_stats_inc(member) rds_stats_inc_which(rds_iw_stats, member)
369 unsigned int rds_iw_stats_info_copy(struct rds_info_iterator
*iter
,
373 int rds_iw_sysctl_init(void);
374 void rds_iw_sysctl_exit(void);
375 extern unsigned long rds_iw_sysctl_max_send_wr
;
376 extern unsigned long rds_iw_sysctl_max_recv_wr
;
377 extern unsigned long rds_iw_sysctl_max_unsig_wrs
;
378 extern unsigned long rds_iw_sysctl_max_unsig_bytes
;
379 extern unsigned long rds_iw_sysctl_max_recv_allocation
;
380 extern unsigned int rds_iw_sysctl_flow_control
;
383 * Helper functions for getting/setting the header and data SGEs in
384 * RDS packets (not RDMA)
386 static inline struct ib_sge
*
387 rds_iw_header_sge(struct rds_iw_connection
*ic
, struct ib_sge
*sge
)
392 static inline struct ib_sge
*
393 rds_iw_data_sge(struct rds_iw_connection
*ic
, struct ib_sge
*sge
)