2 * Copyright (c) 2006 Oracle. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/kernel.h>
35 #include <linux/device.h>
36 #include <linux/dmapool.h>
37 #include <linux/ratelimit.h>
39 #include "rds_single_path.h"
44 * Convert IB-specific error message to RDS error message and call core
47 static void rds_ib_send_complete(struct rds_message
*rm
,
49 void (*complete
)(struct rds_message
*rm
, int status
))
54 case IB_WC_WR_FLUSH_ERR
:
58 notify_status
= RDS_RDMA_SUCCESS
;
61 case IB_WC_REM_ACCESS_ERR
:
62 notify_status
= RDS_RDMA_REMOTE_ERROR
;
66 notify_status
= RDS_RDMA_OTHER_ERROR
;
69 complete(rm
, notify_status
);
72 static void rds_ib_send_unmap_data(struct rds_ib_connection
*ic
,
73 struct rm_data_op
*op
,
77 ib_dma_unmap_sg(ic
->i_cm_id
->device
,
78 op
->op_sg
, op
->op_nents
,
82 static void rds_ib_send_unmap_rdma(struct rds_ib_connection
*ic
,
83 struct rm_rdma_op
*op
,
87 ib_dma_unmap_sg(ic
->i_cm_id
->device
,
88 op
->op_sg
, op
->op_nents
,
89 op
->op_write
? DMA_TO_DEVICE
: DMA_FROM_DEVICE
);
93 /* If the user asked for a completion notification on this
94 * message, we can implement three different semantics:
95 * 1. Notify when we received the ACK on the RDS message
96 * that was queued with the RDMA. This provides reliable
97 * notification of RDMA status at the expense of a one-way
99 * 2. Notify when the IB stack gives us the completion event for
100 * the RDMA operation.
101 * 3. Notify when the IB stack gives us the completion event for
102 * the accompanying RDS messages.
103 * Here, we implement approach #3. To implement approach #2,
104 * we would need to take an event for the rdma WR. To implement #1,
105 * don't call rds_rdma_send_complete at all, and fall back to the notify
106 * handling in the ACK processing code.
108 * Note: There's no need to explicitly sync any RDMA buffers using
109 * ib_dma_sync_sg_for_cpu - the completion for the RDMA
110 * operation itself unmapped the RDMA buffers, which takes care
113 rds_ib_send_complete(container_of(op
, struct rds_message
, rdma
),
114 wc_status
, rds_rdma_send_complete
);
117 rds_stats_add(s_send_rdma_bytes
, op
->op_bytes
);
119 rds_stats_add(s_recv_rdma_bytes
, op
->op_bytes
);
122 static void rds_ib_send_unmap_atomic(struct rds_ib_connection
*ic
,
123 struct rm_atomic_op
*op
,
126 /* unmap atomic recvbuf */
128 ib_dma_unmap_sg(ic
->i_cm_id
->device
, op
->op_sg
, 1,
133 rds_ib_send_complete(container_of(op
, struct rds_message
, atomic
),
134 wc_status
, rds_atomic_send_complete
);
136 if (op
->op_type
== RDS_ATOMIC_TYPE_CSWP
)
137 rds_ib_stats_inc(s_ib_atomic_cswp
);
139 rds_ib_stats_inc(s_ib_atomic_fadd
);
143 * Unmap the resources associated with a struct send_work.
145 * Returns the rm for no good reason other than it is unobtainable
146 * other than by switching on wr.opcode, currently, and the caller,
147 * the event handler, needs it.
149 static struct rds_message
*rds_ib_send_unmap_op(struct rds_ib_connection
*ic
,
150 struct rds_ib_send_work
*send
,
153 struct rds_message
*rm
= NULL
;
155 /* In the error case, wc.opcode sometimes contains garbage */
156 switch (send
->s_wr
.opcode
) {
159 rm
= container_of(send
->s_op
, struct rds_message
, data
);
160 rds_ib_send_unmap_data(ic
, send
->s_op
, wc_status
);
163 case IB_WR_RDMA_WRITE
:
164 case IB_WR_RDMA_READ
:
166 rm
= container_of(send
->s_op
, struct rds_message
, rdma
);
167 rds_ib_send_unmap_rdma(ic
, send
->s_op
, wc_status
);
170 case IB_WR_ATOMIC_FETCH_AND_ADD
:
171 case IB_WR_ATOMIC_CMP_AND_SWP
:
173 rm
= container_of(send
->s_op
, struct rds_message
, atomic
);
174 rds_ib_send_unmap_atomic(ic
, send
->s_op
, wc_status
);
178 printk_ratelimited(KERN_NOTICE
179 "RDS/IB: %s: unexpected opcode 0x%x in WR!\n",
180 __func__
, send
->s_wr
.opcode
);
184 send
->s_wr
.opcode
= 0xdead;
189 void rds_ib_send_init_ring(struct rds_ib_connection
*ic
)
191 struct rds_ib_send_work
*send
;
194 for (i
= 0, send
= ic
->i_sends
; i
< ic
->i_send_ring
.w_nr
; i
++, send
++) {
199 send
->s_wr
.wr_id
= i
;
200 send
->s_wr
.sg_list
= send
->s_sge
;
201 send
->s_wr
.ex
.imm_data
= 0;
203 sge
= &send
->s_sge
[0];
204 sge
->addr
= ic
->i_send_hdrs_dma
+ (i
* sizeof(struct rds_header
));
205 sge
->length
= sizeof(struct rds_header
);
206 sge
->lkey
= ic
->i_pd
->local_dma_lkey
;
208 send
->s_sge
[1].lkey
= ic
->i_pd
->local_dma_lkey
;
212 void rds_ib_send_clear_ring(struct rds_ib_connection
*ic
)
214 struct rds_ib_send_work
*send
;
217 for (i
= 0, send
= ic
->i_sends
; i
< ic
->i_send_ring
.w_nr
; i
++, send
++) {
218 if (send
->s_op
&& send
->s_wr
.opcode
!= 0xdead)
219 rds_ib_send_unmap_op(ic
, send
, IB_WC_WR_FLUSH_ERR
);
224 * The only fast path caller always has a non-zero nr, so we don't
225 * bother testing nr before performing the atomic sub.
227 static void rds_ib_sub_signaled(struct rds_ib_connection
*ic
, int nr
)
229 if ((atomic_sub_return(nr
, &ic
->i_signaled_sends
) == 0) &&
230 waitqueue_active(&rds_ib_ring_empty_wait
))
231 wake_up(&rds_ib_ring_empty_wait
);
232 BUG_ON(atomic_read(&ic
->i_signaled_sends
) < 0);
236 * The _oldest/_free ring operations here race cleanly with the alloc/unalloc
237 * operations performed in the send path. As the sender allocs and potentially
238 * unallocs the next free entry in the ring it doesn't alter which is
239 * the next to be freed, which is what this is concerned with.
241 void rds_ib_send_cqe_handler(struct rds_ib_connection
*ic
, struct ib_wc
*wc
)
243 struct rds_message
*rm
= NULL
;
244 struct rds_connection
*conn
= ic
->conn
;
245 struct rds_ib_send_work
*send
;
252 rdsdebug("wc wr_id 0x%llx status %u (%s) byte_len %u imm_data %u\n",
253 (unsigned long long)wc
->wr_id
, wc
->status
,
254 ib_wc_status_msg(wc
->status
), wc
->byte_len
,
255 be32_to_cpu(wc
->ex
.imm_data
));
256 rds_ib_stats_inc(s_ib_tx_cq_event
);
258 if (wc
->wr_id
== RDS_IB_ACK_WR_ID
) {
259 if (time_after(jiffies
, ic
->i_ack_queued
+ HZ
/ 2))
260 rds_ib_stats_inc(s_ib_tx_stalled
);
261 rds_ib_ack_send_complete(ic
);
265 oldest
= rds_ib_ring_oldest(&ic
->i_send_ring
);
267 completed
= rds_ib_ring_completed(&ic
->i_send_ring
, wc
->wr_id
, oldest
);
269 for (i
= 0; i
< completed
; i
++) {
270 send
= &ic
->i_sends
[oldest
];
271 if (send
->s_wr
.send_flags
& IB_SEND_SIGNALED
)
274 rm
= rds_ib_send_unmap_op(ic
, send
, wc
->status
);
276 if (time_after(jiffies
, send
->s_queued
+ HZ
/ 2))
277 rds_ib_stats_inc(s_ib_tx_stalled
);
280 if (send
->s_op
== rm
->m_final_op
) {
281 /* If anyone waited for this message to get
282 * flushed out, wake them up now
284 rds_message_unmapped(rm
);
290 oldest
= (oldest
+ 1) % ic
->i_send_ring
.w_nr
;
293 rds_ib_ring_free(&ic
->i_send_ring
, completed
);
294 rds_ib_sub_signaled(ic
, nr_sig
);
297 if (test_and_clear_bit(RDS_LL_SEND_FULL
, &conn
->c_flags
) ||
298 test_bit(0, &conn
->c_map_queued
))
299 queue_delayed_work(rds_wq
, &conn
->c_send_w
, 0);
301 /* We expect errors as the qp is drained during shutdown */
302 if (wc
->status
!= IB_WC_SUCCESS
&& rds_conn_up(conn
)) {
303 rds_ib_conn_error(conn
, "send completion on %pI4 had status %u (%s), disconnecting and reconnecting\n",
304 &conn
->c_faddr
, wc
->status
,
305 ib_wc_status_msg(wc
->status
));
310 * This is the main function for allocating credits when sending
313 * Conceptually, we have two counters:
314 * - send credits: this tells us how many WRs we're allowed
315 * to submit without overruning the receiver's queue. For
316 * each SEND WR we post, we decrement this by one.
318 * - posted credits: this tells us how many WRs we recently
319 * posted to the receive queue. This value is transferred
320 * to the peer as a "credit update" in a RDS header field.
321 * Every time we transmit credits to the peer, we subtract
322 * the amount of transferred credits from this counter.
324 * It is essential that we avoid situations where both sides have
325 * exhausted their send credits, and are unable to send new credits
326 * to the peer. We achieve this by requiring that we send at least
327 * one credit update to the peer before exhausting our credits.
328 * When new credits arrive, we subtract one credit that is withheld
329 * until we've posted new buffers and are ready to transmit these
330 * credits (see rds_ib_send_add_credits below).
332 * The RDS send code is essentially single-threaded; rds_send_xmit
333 * sets RDS_IN_XMIT to ensure exclusive access to the send ring.
334 * However, the ACK sending code is independent and can race with
337 * In the send path, we need to update the counters for send credits
338 * and the counter of posted buffers atomically - when we use the
339 * last available credit, we cannot allow another thread to race us
340 * and grab the posted credits counter. Hence, we have to use a
341 * spinlock to protect the credit counter, or use atomics.
343 * Spinlocks shared between the send and the receive path are bad,
344 * because they create unnecessary delays. An early implementation
345 * using a spinlock showed a 5% degradation in throughput at some
348 * This implementation avoids spinlocks completely, putting both
349 * counters into a single atomic, and updating that atomic using
350 * atomic_add (in the receive path, when receiving fresh credits),
351 * and using atomic_cmpxchg when updating the two counters.
353 int rds_ib_send_grab_credits(struct rds_ib_connection
*ic
,
354 u32 wanted
, u32
*adv_credits
, int need_posted
, int max_posted
)
356 unsigned int avail
, posted
, got
= 0, advertise
;
365 oldval
= newval
= atomic_read(&ic
->i_credits
);
366 posted
= IB_GET_POST_CREDITS(oldval
);
367 avail
= IB_GET_SEND_CREDITS(oldval
);
369 rdsdebug("wanted=%u credits=%u posted=%u\n",
370 wanted
, avail
, posted
);
372 /* The last credit must be used to send a credit update. */
373 if (avail
&& !posted
)
376 if (avail
< wanted
) {
377 struct rds_connection
*conn
= ic
->i_cm_id
->context
;
379 /* Oops, there aren't that many credits left! */
380 set_bit(RDS_LL_SEND_FULL
, &conn
->c_flags
);
383 /* Sometimes you get what you want, lalala. */
386 newval
-= IB_SET_SEND_CREDITS(got
);
389 * If need_posted is non-zero, then the caller wants
390 * the posted regardless of whether any send credits are
393 if (posted
&& (got
|| need_posted
)) {
394 advertise
= min_t(unsigned int, posted
, max_posted
);
395 newval
-= IB_SET_POST_CREDITS(advertise
);
398 /* Finally bill everything */
399 if (atomic_cmpxchg(&ic
->i_credits
, oldval
, newval
) != oldval
)
402 *adv_credits
= advertise
;
406 void rds_ib_send_add_credits(struct rds_connection
*conn
, unsigned int credits
)
408 struct rds_ib_connection
*ic
= conn
->c_transport_data
;
413 rdsdebug("credits=%u current=%u%s\n",
415 IB_GET_SEND_CREDITS(atomic_read(&ic
->i_credits
)),
416 test_bit(RDS_LL_SEND_FULL
, &conn
->c_flags
) ? ", ll_send_full" : "");
418 atomic_add(IB_SET_SEND_CREDITS(credits
), &ic
->i_credits
);
419 if (test_and_clear_bit(RDS_LL_SEND_FULL
, &conn
->c_flags
))
420 queue_delayed_work(rds_wq
, &conn
->c_send_w
, 0);
422 WARN_ON(IB_GET_SEND_CREDITS(credits
) >= 16384);
424 rds_ib_stats_inc(s_ib_rx_credit_updates
);
427 void rds_ib_advertise_credits(struct rds_connection
*conn
, unsigned int posted
)
429 struct rds_ib_connection
*ic
= conn
->c_transport_data
;
434 atomic_add(IB_SET_POST_CREDITS(posted
), &ic
->i_credits
);
436 /* Decide whether to send an update to the peer now.
437 * If we would send a credit update for every single buffer we
438 * post, we would end up with an ACK storm (ACK arrives,
439 * consumes buffer, we refill the ring, send ACK to remote
440 * advertising the newly posted buffer... ad inf)
442 * Performance pretty much depends on how often we send
443 * credit updates - too frequent updates mean lots of ACKs.
444 * Too infrequent updates, and the peer will run out of
445 * credits and has to throttle.
446 * For the time being, 16 seems to be a good compromise.
448 if (IB_GET_POST_CREDITS(atomic_read(&ic
->i_credits
)) >= 16)
449 set_bit(IB_ACK_REQUESTED
, &ic
->i_ack_flags
);
452 static inline int rds_ib_set_wr_signal_state(struct rds_ib_connection
*ic
,
453 struct rds_ib_send_work
*send
,
457 * We want to delay signaling completions just enough to get
458 * the batching benefits but not so much that we create dead time
461 if (ic
->i_unsignaled_wrs
-- == 0 || notify
) {
462 ic
->i_unsignaled_wrs
= rds_ib_sysctl_max_unsig_wrs
;
463 send
->s_wr
.send_flags
|= IB_SEND_SIGNALED
;
470 * This can be called multiple times for a given message. The first time
471 * we see a message we map its scatterlist into the IB device so that
472 * we can provide that mapped address to the IB scatter gather entries
473 * in the IB work requests. We translate the scatterlist into a series
474 * of work requests that fragment the message. These work requests complete
475 * in order so we pass ownership of the message to the completion handler
476 * once we send the final fragment.
478 * The RDS core uses the c_send_lock to only enter this function once
479 * per connection. This makes sure that the tx ring alloc/unalloc pairs
480 * don't get out of sync and confuse the ring.
482 int rds_ib_xmit(struct rds_connection
*conn
, struct rds_message
*rm
,
483 unsigned int hdr_off
, unsigned int sg
, unsigned int off
)
485 struct rds_ib_connection
*ic
= conn
->c_transport_data
;
486 struct ib_device
*dev
= ic
->i_cm_id
->device
;
487 struct rds_ib_send_work
*send
= NULL
;
488 struct rds_ib_send_work
*first
;
489 struct rds_ib_send_work
*prev
;
490 struct ib_send_wr
*failed_wr
;
491 struct scatterlist
*scat
;
495 u32 credit_alloc
= 0;
501 int flow_controlled
= 0;
504 BUG_ON(off
% RDS_FRAG_SIZE
);
505 BUG_ON(hdr_off
!= 0 && hdr_off
!= sizeof(struct rds_header
));
507 /* Do not send cong updates to IB loopback */
509 && rm
->m_inc
.i_hdr
.h_flags
& RDS_FLAG_CONG_BITMAP
) {
510 rds_cong_map_updated(conn
->c_fcong
, ~(u64
) 0);
511 scat
= &rm
->data
.op_sg
[sg
];
512 ret
= max_t(int, RDS_CONG_MAP_BYTES
, scat
->length
);
513 return sizeof(struct rds_header
) + ret
;
516 /* FIXME we may overallocate here */
517 if (be32_to_cpu(rm
->m_inc
.i_hdr
.h_len
) == 0)
520 i
= ceil(be32_to_cpu(rm
->m_inc
.i_hdr
.h_len
), RDS_FRAG_SIZE
);
522 work_alloc
= rds_ib_ring_alloc(&ic
->i_send_ring
, i
, &pos
);
523 if (work_alloc
== 0) {
524 set_bit(RDS_LL_SEND_FULL
, &conn
->c_flags
);
525 rds_ib_stats_inc(s_ib_tx_ring_full
);
531 credit_alloc
= rds_ib_send_grab_credits(ic
, work_alloc
, &posted
, 0, RDS_MAX_ADV_CREDIT
);
532 adv_credits
+= posted
;
533 if (credit_alloc
< work_alloc
) {
534 rds_ib_ring_unalloc(&ic
->i_send_ring
, work_alloc
- credit_alloc
);
535 work_alloc
= credit_alloc
;
538 if (work_alloc
== 0) {
539 set_bit(RDS_LL_SEND_FULL
, &conn
->c_flags
);
540 rds_ib_stats_inc(s_ib_tx_throttle
);
546 /* map the message the first time we see it */
547 if (!ic
->i_data_op
) {
548 if (rm
->data
.op_nents
) {
549 rm
->data
.op_count
= ib_dma_map_sg(dev
,
553 rdsdebug("ic %p mapping rm %p: %d\n", ic
, rm
, rm
->data
.op_count
);
554 if (rm
->data
.op_count
== 0) {
555 rds_ib_stats_inc(s_ib_tx_sg_mapping_failure
);
556 rds_ib_ring_unalloc(&ic
->i_send_ring
, work_alloc
);
557 ret
= -ENOMEM
; /* XXX ? */
561 rm
->data
.op_count
= 0;
564 rds_message_addref(rm
);
565 rm
->data
.op_dmasg
= 0;
566 rm
->data
.op_dmaoff
= 0;
567 ic
->i_data_op
= &rm
->data
;
569 /* Finalize the header */
570 if (test_bit(RDS_MSG_ACK_REQUIRED
, &rm
->m_flags
))
571 rm
->m_inc
.i_hdr
.h_flags
|= RDS_FLAG_ACK_REQUIRED
;
572 if (test_bit(RDS_MSG_RETRANSMITTED
, &rm
->m_flags
))
573 rm
->m_inc
.i_hdr
.h_flags
|= RDS_FLAG_RETRANSMITTED
;
575 /* If it has a RDMA op, tell the peer we did it. This is
576 * used by the peer to release use-once RDMA MRs. */
577 if (rm
->rdma
.op_active
) {
578 struct rds_ext_header_rdma ext_hdr
;
580 ext_hdr
.h_rdma_rkey
= cpu_to_be32(rm
->rdma
.op_rkey
);
581 rds_message_add_extension(&rm
->m_inc
.i_hdr
,
582 RDS_EXTHDR_RDMA
, &ext_hdr
, sizeof(ext_hdr
));
584 if (rm
->m_rdma_cookie
) {
585 rds_message_add_rdma_dest_extension(&rm
->m_inc
.i_hdr
,
586 rds_rdma_cookie_key(rm
->m_rdma_cookie
),
587 rds_rdma_cookie_offset(rm
->m_rdma_cookie
));
590 /* Note - rds_ib_piggyb_ack clears the ACK_REQUIRED bit, so
591 * we should not do this unless we have a chance of at least
592 * sticking the header into the send ring. Which is why we
593 * should call rds_ib_ring_alloc first. */
594 rm
->m_inc
.i_hdr
.h_ack
= cpu_to_be64(rds_ib_piggyb_ack(ic
));
595 rds_message_make_checksum(&rm
->m_inc
.i_hdr
);
598 * Update adv_credits since we reset the ACK_REQUIRED bit.
601 rds_ib_send_grab_credits(ic
, 0, &posted
, 1, RDS_MAX_ADV_CREDIT
- adv_credits
);
602 adv_credits
+= posted
;
603 BUG_ON(adv_credits
> 255);
607 /* Sometimes you want to put a fence between an RDMA
608 * READ and the following SEND.
609 * We could either do this all the time
610 * or when requested by the user. Right now, we let
611 * the application choose.
613 if (rm
->rdma
.op_active
&& rm
->rdma
.op_fence
)
614 send_flags
= IB_SEND_FENCE
;
616 /* Each frag gets a header. Msgs may be 0 bytes */
617 send
= &ic
->i_sends
[pos
];
620 scat
= &ic
->i_data_op
->op_sg
[rm
->data
.op_dmasg
];
623 unsigned int len
= 0;
625 /* Set up the header */
626 send
->s_wr
.send_flags
= send_flags
;
627 send
->s_wr
.opcode
= IB_WR_SEND
;
628 send
->s_wr
.num_sge
= 1;
629 send
->s_wr
.next
= NULL
;
630 send
->s_queued
= jiffies
;
633 send
->s_sge
[0].addr
= ic
->i_send_hdrs_dma
634 + (pos
* sizeof(struct rds_header
));
635 send
->s_sge
[0].length
= sizeof(struct rds_header
);
637 memcpy(&ic
->i_send_hdrs
[pos
], &rm
->m_inc
.i_hdr
, sizeof(struct rds_header
));
639 /* Set up the data, if present */
641 && scat
!= &rm
->data
.op_sg
[rm
->data
.op_count
]) {
642 len
= min(RDS_FRAG_SIZE
,
643 ib_sg_dma_len(dev
, scat
) - rm
->data
.op_dmaoff
);
644 send
->s_wr
.num_sge
= 2;
646 send
->s_sge
[1].addr
= ib_sg_dma_address(dev
, scat
);
647 send
->s_sge
[1].addr
+= rm
->data
.op_dmaoff
;
648 send
->s_sge
[1].length
= len
;
651 rm
->data
.op_dmaoff
+= len
;
652 if (rm
->data
.op_dmaoff
== ib_sg_dma_len(dev
, scat
)) {
655 rm
->data
.op_dmaoff
= 0;
659 rds_ib_set_wr_signal_state(ic
, send
, 0);
662 * Always signal the last one if we're stopping due to flow control.
664 if (ic
->i_flowctl
&& flow_controlled
&& i
== (work_alloc
-1))
665 send
->s_wr
.send_flags
|= IB_SEND_SIGNALED
| IB_SEND_SOLICITED
;
667 if (send
->s_wr
.send_flags
& IB_SEND_SIGNALED
)
670 rdsdebug("send %p wr %p num_sge %u next %p\n", send
,
671 &send
->s_wr
, send
->s_wr
.num_sge
, send
->s_wr
.next
);
673 if (ic
->i_flowctl
&& adv_credits
) {
674 struct rds_header
*hdr
= &ic
->i_send_hdrs
[pos
];
676 /* add credit and redo the header checksum */
677 hdr
->h_credit
= adv_credits
;
678 rds_message_make_checksum(hdr
);
680 rds_ib_stats_inc(s_ib_tx_credit_updates
);
684 prev
->s_wr
.next
= &send
->s_wr
;
687 pos
= (pos
+ 1) % ic
->i_send_ring
.w_nr
;
688 send
= &ic
->i_sends
[pos
];
691 } while (i
< work_alloc
692 && scat
!= &rm
->data
.op_sg
[rm
->data
.op_count
]);
694 /* Account the RDS header in the number of bytes we sent, but just once.
695 * The caller has no concept of fragmentation. */
697 bytes_sent
+= sizeof(struct rds_header
);
699 /* if we finished the message then send completion owns it */
700 if (scat
== &rm
->data
.op_sg
[rm
->data
.op_count
]) {
701 prev
->s_op
= ic
->i_data_op
;
702 prev
->s_wr
.send_flags
|= IB_SEND_SOLICITED
;
703 if (!(prev
->s_wr
.send_flags
& IB_SEND_SIGNALED
)) {
704 ic
->i_unsignaled_wrs
= rds_ib_sysctl_max_unsig_wrs
;
705 prev
->s_wr
.send_flags
|= IB_SEND_SIGNALED
;
708 ic
->i_data_op
= NULL
;
711 /* Put back wrs & credits we didn't use */
712 if (i
< work_alloc
) {
713 rds_ib_ring_unalloc(&ic
->i_send_ring
, work_alloc
- i
);
716 if (ic
->i_flowctl
&& i
< credit_alloc
)
717 rds_ib_send_add_credits(conn
, credit_alloc
- i
);
720 atomic_add(nr_sig
, &ic
->i_signaled_sends
);
722 /* XXX need to worry about failed_wr and partial sends. */
723 failed_wr
= &first
->s_wr
;
724 ret
= ib_post_send(ic
->i_cm_id
->qp
, &first
->s_wr
, &failed_wr
);
725 rdsdebug("ic %p first %p (wr %p) ret %d wr %p\n", ic
,
726 first
, &first
->s_wr
, ret
, failed_wr
);
727 BUG_ON(failed_wr
!= &first
->s_wr
);
729 printk(KERN_WARNING
"RDS/IB: ib_post_send to %pI4 "
730 "returned %d\n", &conn
->c_faddr
, ret
);
731 rds_ib_ring_unalloc(&ic
->i_send_ring
, work_alloc
);
732 rds_ib_sub_signaled(ic
, nr_sig
);
734 ic
->i_data_op
= prev
->s_op
;
738 rds_ib_conn_error(ic
->conn
, "ib_post_send failed\n");
749 * Issue atomic operation.
750 * A simplified version of the rdma case, we always map 1 SG, and
751 * only 8 bytes, for the return value from the atomic operation.
753 int rds_ib_xmit_atomic(struct rds_connection
*conn
, struct rm_atomic_op
*op
)
755 struct rds_ib_connection
*ic
= conn
->c_transport_data
;
756 struct rds_ib_send_work
*send
= NULL
;
757 struct ib_send_wr
*failed_wr
;
758 struct rds_ib_device
*rds_ibdev
;
764 rds_ibdev
= ib_get_client_data(ic
->i_cm_id
->device
, &rds_ib_client
);
766 work_alloc
= rds_ib_ring_alloc(&ic
->i_send_ring
, 1, &pos
);
767 if (work_alloc
!= 1) {
768 rds_ib_ring_unalloc(&ic
->i_send_ring
, work_alloc
);
769 rds_ib_stats_inc(s_ib_tx_ring_full
);
774 /* address of send request in ring */
775 send
= &ic
->i_sends
[pos
];
776 send
->s_queued
= jiffies
;
778 if (op
->op_type
== RDS_ATOMIC_TYPE_CSWP
) {
779 send
->s_atomic_wr
.wr
.opcode
= IB_WR_MASKED_ATOMIC_CMP_AND_SWP
;
780 send
->s_atomic_wr
.compare_add
= op
->op_m_cswp
.compare
;
781 send
->s_atomic_wr
.swap
= op
->op_m_cswp
.swap
;
782 send
->s_atomic_wr
.compare_add_mask
= op
->op_m_cswp
.compare_mask
;
783 send
->s_atomic_wr
.swap_mask
= op
->op_m_cswp
.swap_mask
;
785 send
->s_atomic_wr
.wr
.opcode
= IB_WR_MASKED_ATOMIC_FETCH_AND_ADD
;
786 send
->s_atomic_wr
.compare_add
= op
->op_m_fadd
.add
;
787 send
->s_atomic_wr
.swap
= 0;
788 send
->s_atomic_wr
.compare_add_mask
= op
->op_m_fadd
.nocarry_mask
;
789 send
->s_atomic_wr
.swap_mask
= 0;
791 nr_sig
= rds_ib_set_wr_signal_state(ic
, send
, op
->op_notify
);
792 send
->s_atomic_wr
.wr
.num_sge
= 1;
793 send
->s_atomic_wr
.wr
.next
= NULL
;
794 send
->s_atomic_wr
.remote_addr
= op
->op_remote_addr
;
795 send
->s_atomic_wr
.rkey
= op
->op_rkey
;
797 rds_message_addref(container_of(send
->s_op
, struct rds_message
, atomic
));
799 /* map 8 byte retval buffer to the device */
800 ret
= ib_dma_map_sg(ic
->i_cm_id
->device
, op
->op_sg
, 1, DMA_FROM_DEVICE
);
801 rdsdebug("ic %p mapping atomic op %p. mapped %d pg\n", ic
, op
, ret
);
803 rds_ib_ring_unalloc(&ic
->i_send_ring
, work_alloc
);
804 rds_ib_stats_inc(s_ib_tx_sg_mapping_failure
);
805 ret
= -ENOMEM
; /* XXX ? */
809 /* Convert our struct scatterlist to struct ib_sge */
810 send
->s_sge
[0].addr
= ib_sg_dma_address(ic
->i_cm_id
->device
, op
->op_sg
);
811 send
->s_sge
[0].length
= ib_sg_dma_len(ic
->i_cm_id
->device
, op
->op_sg
);
812 send
->s_sge
[0].lkey
= ic
->i_pd
->local_dma_lkey
;
814 rdsdebug("rva %Lx rpa %Lx len %u\n", op
->op_remote_addr
,
815 send
->s_sge
[0].addr
, send
->s_sge
[0].length
);
818 atomic_add(nr_sig
, &ic
->i_signaled_sends
);
820 failed_wr
= &send
->s_atomic_wr
.wr
;
821 ret
= ib_post_send(ic
->i_cm_id
->qp
, &send
->s_atomic_wr
.wr
, &failed_wr
);
822 rdsdebug("ic %p send %p (wr %p) ret %d wr %p\n", ic
,
823 send
, &send
->s_atomic_wr
, ret
, failed_wr
);
824 BUG_ON(failed_wr
!= &send
->s_atomic_wr
.wr
);
826 printk(KERN_WARNING
"RDS/IB: atomic ib_post_send to %pI4 "
827 "returned %d\n", &conn
->c_faddr
, ret
);
828 rds_ib_ring_unalloc(&ic
->i_send_ring
, work_alloc
);
829 rds_ib_sub_signaled(ic
, nr_sig
);
833 if (unlikely(failed_wr
!= &send
->s_atomic_wr
.wr
)) {
834 printk(KERN_WARNING
"RDS/IB: atomic ib_post_send() rc=%d, but failed_wqe updated!\n", ret
);
835 BUG_ON(failed_wr
!= &send
->s_atomic_wr
.wr
);
842 int rds_ib_xmit_rdma(struct rds_connection
*conn
, struct rm_rdma_op
*op
)
844 struct rds_ib_connection
*ic
= conn
->c_transport_data
;
845 struct rds_ib_send_work
*send
= NULL
;
846 struct rds_ib_send_work
*first
;
847 struct rds_ib_send_work
*prev
;
848 struct ib_send_wr
*failed_wr
;
849 struct scatterlist
*scat
;
851 u64 remote_addr
= op
->op_remote_addr
;
852 u32 max_sge
= ic
->rds_ibdev
->max_sge
;
862 /* map the op the first time we see it */
863 if (!op
->op_mapped
) {
864 op
->op_count
= ib_dma_map_sg(ic
->i_cm_id
->device
,
865 op
->op_sg
, op
->op_nents
, (op
->op_write
) ?
866 DMA_TO_DEVICE
: DMA_FROM_DEVICE
);
867 rdsdebug("ic %p mapping op %p: %d\n", ic
, op
, op
->op_count
);
868 if (op
->op_count
== 0) {
869 rds_ib_stats_inc(s_ib_tx_sg_mapping_failure
);
870 ret
= -ENOMEM
; /* XXX ? */
878 * Instead of knowing how to return a partial rdma read/write we insist that there
879 * be enough work requests to send the entire message.
881 i
= ceil(op
->op_count
, max_sge
);
883 work_alloc
= rds_ib_ring_alloc(&ic
->i_send_ring
, i
, &pos
);
884 if (work_alloc
!= i
) {
885 rds_ib_ring_unalloc(&ic
->i_send_ring
, work_alloc
);
886 rds_ib_stats_inc(s_ib_tx_ring_full
);
891 send
= &ic
->i_sends
[pos
];
894 scat
= &op
->op_sg
[0];
896 num_sge
= op
->op_count
;
898 for (i
= 0; i
< work_alloc
&& scat
!= &op
->op_sg
[op
->op_count
]; i
++) {
899 send
->s_wr
.send_flags
= 0;
900 send
->s_queued
= jiffies
;
903 nr_sig
+= rds_ib_set_wr_signal_state(ic
, send
, op
->op_notify
);
905 send
->s_wr
.opcode
= op
->op_write
? IB_WR_RDMA_WRITE
: IB_WR_RDMA_READ
;
906 send
->s_rdma_wr
.remote_addr
= remote_addr
;
907 send
->s_rdma_wr
.rkey
= op
->op_rkey
;
909 if (num_sge
> max_sge
) {
910 send
->s_rdma_wr
.wr
.num_sge
= max_sge
;
913 send
->s_rdma_wr
.wr
.num_sge
= num_sge
;
916 send
->s_rdma_wr
.wr
.next
= NULL
;
919 prev
->s_rdma_wr
.wr
.next
= &send
->s_rdma_wr
.wr
;
921 for (j
= 0; j
< send
->s_rdma_wr
.wr
.num_sge
&&
922 scat
!= &op
->op_sg
[op
->op_count
]; j
++) {
923 len
= ib_sg_dma_len(ic
->i_cm_id
->device
, scat
);
924 send
->s_sge
[j
].addr
=
925 ib_sg_dma_address(ic
->i_cm_id
->device
, scat
);
926 send
->s_sge
[j
].length
= len
;
927 send
->s_sge
[j
].lkey
= ic
->i_pd
->local_dma_lkey
;
930 rdsdebug("ic %p sent %d remote_addr %llu\n", ic
, sent
, remote_addr
);
936 rdsdebug("send %p wr %p num_sge %u next %p\n", send
,
938 send
->s_rdma_wr
.wr
.num_sge
,
939 send
->s_rdma_wr
.wr
.next
);
942 if (++send
== &ic
->i_sends
[ic
->i_send_ring
.w_nr
])
946 /* give a reference to the last op */
947 if (scat
== &op
->op_sg
[op
->op_count
]) {
949 rds_message_addref(container_of(op
, struct rds_message
, rdma
));
952 if (i
< work_alloc
) {
953 rds_ib_ring_unalloc(&ic
->i_send_ring
, work_alloc
- i
);
958 atomic_add(nr_sig
, &ic
->i_signaled_sends
);
960 failed_wr
= &first
->s_rdma_wr
.wr
;
961 ret
= ib_post_send(ic
->i_cm_id
->qp
, &first
->s_rdma_wr
.wr
, &failed_wr
);
962 rdsdebug("ic %p first %p (wr %p) ret %d wr %p\n", ic
,
963 first
, &first
->s_rdma_wr
.wr
, ret
, failed_wr
);
964 BUG_ON(failed_wr
!= &first
->s_rdma_wr
.wr
);
966 printk(KERN_WARNING
"RDS/IB: rdma ib_post_send to %pI4 "
967 "returned %d\n", &conn
->c_faddr
, ret
);
968 rds_ib_ring_unalloc(&ic
->i_send_ring
, work_alloc
);
969 rds_ib_sub_signaled(ic
, nr_sig
);
973 if (unlikely(failed_wr
!= &first
->s_rdma_wr
.wr
)) {
974 printk(KERN_WARNING
"RDS/IB: ib_post_send() rc=%d, but failed_wqe updated!\n", ret
);
975 BUG_ON(failed_wr
!= &first
->s_rdma_wr
.wr
);
983 void rds_ib_xmit_path_complete(struct rds_conn_path
*cp
)
985 struct rds_connection
*conn
= cp
->cp_conn
;
986 struct rds_ib_connection
*ic
= conn
->c_transport_data
;
988 /* We may have a pending ACK or window update we were unable
989 * to send previously (due to flow control). Try again. */
990 rds_ib_attempt_ack(ic
);