2 * Copyright (c) 2006 Oracle. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/kernel.h>
34 #include <linux/slab.h>
35 #include <linux/pci.h>
36 #include <linux/dma-mapping.h>
37 #include <rdma/rdma_cm.h>
42 static struct kmem_cache
*rds_iw_incoming_slab
;
43 static struct kmem_cache
*rds_iw_frag_slab
;
44 static atomic_t rds_iw_allocation
= ATOMIC_INIT(0);
46 static void rds_iw_frag_drop_page(struct rds_page_frag
*frag
)
48 rdsdebug("frag %p page %p\n", frag
, frag
->f_page
);
49 __free_page(frag
->f_page
);
53 static void rds_iw_frag_free(struct rds_page_frag
*frag
)
55 rdsdebug("frag %p page %p\n", frag
, frag
->f_page
);
57 kmem_cache_free(rds_iw_frag_slab
, frag
);
61 * We map a page at a time. Its fragments are posted in order. This
62 * is called in fragment order as the fragments get send completion events.
63 * Only the last frag in the page performs the unmapping.
65 * It's OK for ring cleanup to call this in whatever order it likes because
66 * DMA is not in flight and so we can unmap while other ring entries still
67 * hold page references in their frags.
69 static void rds_iw_recv_unmap_page(struct rds_iw_connection
*ic
,
70 struct rds_iw_recv_work
*recv
)
72 struct rds_page_frag
*frag
= recv
->r_frag
;
74 rdsdebug("recv %p frag %p page %p\n", recv
, frag
, frag
->f_page
);
76 ib_dma_unmap_page(ic
->i_cm_id
->device
,
78 RDS_FRAG_SIZE
, DMA_FROM_DEVICE
);
82 void rds_iw_recv_init_ring(struct rds_iw_connection
*ic
)
84 struct rds_iw_recv_work
*recv
;
87 for (i
= 0, recv
= ic
->i_recvs
; i
< ic
->i_recv_ring
.w_nr
; i
++, recv
++) {
93 recv
->r_wr
.next
= NULL
;
95 recv
->r_wr
.sg_list
= recv
->r_sge
;
96 recv
->r_wr
.num_sge
= RDS_IW_RECV_SGE
;
98 sge
= rds_iw_data_sge(ic
, recv
->r_sge
);
100 sge
->length
= RDS_FRAG_SIZE
;
103 sge
= rds_iw_header_sge(ic
, recv
->r_sge
);
104 sge
->addr
= ic
->i_recv_hdrs_dma
+ (i
* sizeof(struct rds_header
));
105 sge
->length
= sizeof(struct rds_header
);
110 static void rds_iw_recv_clear_one(struct rds_iw_connection
*ic
,
111 struct rds_iw_recv_work
*recv
)
114 rds_inc_put(&recv
->r_iwinc
->ii_inc
);
115 recv
->r_iwinc
= NULL
;
118 rds_iw_recv_unmap_page(ic
, recv
);
119 if (recv
->r_frag
->f_page
)
120 rds_iw_frag_drop_page(recv
->r_frag
);
121 rds_iw_frag_free(recv
->r_frag
);
126 void rds_iw_recv_clear_ring(struct rds_iw_connection
*ic
)
130 for (i
= 0; i
< ic
->i_recv_ring
.w_nr
; i
++)
131 rds_iw_recv_clear_one(ic
, &ic
->i_recvs
[i
]);
133 if (ic
->i_frag
.f_page
)
134 rds_iw_frag_drop_page(&ic
->i_frag
);
137 static int rds_iw_recv_refill_one(struct rds_connection
*conn
,
138 struct rds_iw_recv_work
*recv
,
139 gfp_t kptr_gfp
, gfp_t page_gfp
)
141 struct rds_iw_connection
*ic
= conn
->c_transport_data
;
146 if (!recv
->r_iwinc
) {
147 if (!atomic_add_unless(&rds_iw_allocation
, 1, rds_iw_sysctl_max_recv_allocation
)) {
148 rds_iw_stats_inc(s_iw_rx_alloc_limit
);
151 recv
->r_iwinc
= kmem_cache_alloc(rds_iw_incoming_slab
,
153 if (!recv
->r_iwinc
) {
154 atomic_dec(&rds_iw_allocation
);
157 INIT_LIST_HEAD(&recv
->r_iwinc
->ii_frags
);
158 rds_inc_init(&recv
->r_iwinc
->ii_inc
, conn
, conn
->c_faddr
);
162 recv
->r_frag
= kmem_cache_alloc(rds_iw_frag_slab
, kptr_gfp
);
165 INIT_LIST_HEAD(&recv
->r_frag
->f_item
);
166 recv
->r_frag
->f_page
= NULL
;
169 if (!ic
->i_frag
.f_page
) {
170 ic
->i_frag
.f_page
= alloc_page(page_gfp
);
171 if (!ic
->i_frag
.f_page
)
173 ic
->i_frag
.f_offset
= 0;
176 dma_addr
= ib_dma_map_page(ic
->i_cm_id
->device
,
181 if (ib_dma_mapping_error(ic
->i_cm_id
->device
, dma_addr
))
185 * Once we get the RDS_PAGE_LAST_OFF frag then rds_iw_frag_unmap()
186 * must be called on this recv. This happens as completions hit
187 * in order or on connection shutdown.
189 recv
->r_frag
->f_page
= ic
->i_frag
.f_page
;
190 recv
->r_frag
->f_offset
= ic
->i_frag
.f_offset
;
191 recv
->r_frag
->f_mapped
= dma_addr
;
193 sge
= rds_iw_data_sge(ic
, recv
->r_sge
);
194 sge
->addr
= dma_addr
;
195 sge
->length
= RDS_FRAG_SIZE
;
197 sge
= rds_iw_header_sge(ic
, recv
->r_sge
);
198 sge
->addr
= ic
->i_recv_hdrs_dma
+ (recv
- ic
->i_recvs
) * sizeof(struct rds_header
);
199 sge
->length
= sizeof(struct rds_header
);
201 get_page(recv
->r_frag
->f_page
);
203 if (ic
->i_frag
.f_offset
< RDS_PAGE_LAST_OFF
) {
204 ic
->i_frag
.f_offset
+= RDS_FRAG_SIZE
;
206 put_page(ic
->i_frag
.f_page
);
207 ic
->i_frag
.f_page
= NULL
;
208 ic
->i_frag
.f_offset
= 0;
217 * This tries to allocate and post unused work requests after making sure that
218 * they have all the allocations they need to queue received fragments into
219 * sockets. The i_recv_mutex is held here so that ring_alloc and _unalloc
220 * pairs don't go unmatched.
222 * -1 is returned if posting fails due to temporary resource exhaustion.
224 int rds_iw_recv_refill(struct rds_connection
*conn
, gfp_t kptr_gfp
,
225 gfp_t page_gfp
, int prefill
)
227 struct rds_iw_connection
*ic
= conn
->c_transport_data
;
228 struct rds_iw_recv_work
*recv
;
229 struct ib_recv_wr
*failed_wr
;
230 unsigned int posted
= 0;
234 while ((prefill
|| rds_conn_up(conn
)) &&
235 rds_iw_ring_alloc(&ic
->i_recv_ring
, 1, &pos
)) {
236 if (pos
>= ic
->i_recv_ring
.w_nr
) {
237 printk(KERN_NOTICE
"Argh - ring alloc returned pos=%u\n",
243 recv
= &ic
->i_recvs
[pos
];
244 ret
= rds_iw_recv_refill_one(conn
, recv
, kptr_gfp
, page_gfp
);
250 /* XXX when can this fail? */
251 ret
= ib_post_recv(ic
->i_cm_id
->qp
, &recv
->r_wr
, &failed_wr
);
252 rdsdebug("recv %p iwinc %p page %p addr %lu ret %d\n", recv
,
253 recv
->r_iwinc
, recv
->r_frag
->f_page
,
254 (long) recv
->r_frag
->f_mapped
, ret
);
256 rds_iw_conn_error(conn
, "recv post on "
257 "%pI4 returned %d, disconnecting and "
258 "reconnecting\n", &conn
->c_faddr
,
267 /* We're doing flow control - update the window. */
268 if (ic
->i_flowctl
&& posted
)
269 rds_iw_advertise_credits(conn
, posted
);
272 rds_iw_ring_unalloc(&ic
->i_recv_ring
, 1);
276 static void rds_iw_inc_purge(struct rds_incoming
*inc
)
278 struct rds_iw_incoming
*iwinc
;
279 struct rds_page_frag
*frag
;
280 struct rds_page_frag
*pos
;
282 iwinc
= container_of(inc
, struct rds_iw_incoming
, ii_inc
);
283 rdsdebug("purging iwinc %p inc %p\n", iwinc
, inc
);
285 list_for_each_entry_safe(frag
, pos
, &iwinc
->ii_frags
, f_item
) {
286 list_del_init(&frag
->f_item
);
287 rds_iw_frag_drop_page(frag
);
288 rds_iw_frag_free(frag
);
292 void rds_iw_inc_free(struct rds_incoming
*inc
)
294 struct rds_iw_incoming
*iwinc
;
296 iwinc
= container_of(inc
, struct rds_iw_incoming
, ii_inc
);
298 rds_iw_inc_purge(inc
);
299 rdsdebug("freeing iwinc %p inc %p\n", iwinc
, inc
);
300 BUG_ON(!list_empty(&iwinc
->ii_frags
));
301 kmem_cache_free(rds_iw_incoming_slab
, iwinc
);
302 atomic_dec(&rds_iw_allocation
);
303 BUG_ON(atomic_read(&rds_iw_allocation
) < 0);
306 int rds_iw_inc_copy_to_user(struct rds_incoming
*inc
, struct iovec
*first_iov
,
309 struct rds_iw_incoming
*iwinc
;
310 struct rds_page_frag
*frag
;
311 struct iovec
*iov
= first_iov
;
312 unsigned long to_copy
;
313 unsigned long frag_off
= 0;
314 unsigned long iov_off
= 0;
319 iwinc
= container_of(inc
, struct rds_iw_incoming
, ii_inc
);
320 frag
= list_entry(iwinc
->ii_frags
.next
, struct rds_page_frag
, f_item
);
321 len
= be32_to_cpu(inc
->i_hdr
.h_len
);
323 while (copied
< size
&& copied
< len
) {
324 if (frag_off
== RDS_FRAG_SIZE
) {
325 frag
= list_entry(frag
->f_item
.next
,
326 struct rds_page_frag
, f_item
);
329 while (iov_off
== iov
->iov_len
) {
334 to_copy
= min(iov
->iov_len
- iov_off
, RDS_FRAG_SIZE
- frag_off
);
335 to_copy
= min_t(size_t, to_copy
, size
- copied
);
336 to_copy
= min_t(unsigned long, to_copy
, len
- copied
);
338 rdsdebug("%lu bytes to user [%p, %zu] + %lu from frag "
340 to_copy
, iov
->iov_base
, iov
->iov_len
, iov_off
,
341 frag
->f_page
, frag
->f_offset
, frag_off
);
343 /* XXX needs + offset for multiple recvs per page */
344 ret
= rds_page_copy_to_user(frag
->f_page
,
345 frag
->f_offset
+ frag_off
,
346 iov
->iov_base
+ iov_off
,
361 /* ic starts out kzalloc()ed */
362 void rds_iw_recv_init_ack(struct rds_iw_connection
*ic
)
364 struct ib_send_wr
*wr
= &ic
->i_ack_wr
;
365 struct ib_sge
*sge
= &ic
->i_ack_sge
;
367 sge
->addr
= ic
->i_ack_dma
;
368 sge
->length
= sizeof(struct rds_header
);
369 sge
->lkey
= rds_iw_local_dma_lkey(ic
);
373 wr
->opcode
= IB_WR_SEND
;
374 wr
->wr_id
= RDS_IW_ACK_WR_ID
;
375 wr
->send_flags
= IB_SEND_SIGNALED
| IB_SEND_SOLICITED
;
379 * You'd think that with reliable IB connections you wouldn't need to ack
380 * messages that have been received. The problem is that IB hardware generates
381 * an ack message before it has DMAed the message into memory. This creates a
382 * potential message loss if the HCA is disabled for any reason between when it
383 * sends the ack and before the message is DMAed and processed. This is only a
384 * potential issue if another HCA is available for fail-over.
386 * When the remote host receives our ack they'll free the sent message from
387 * their send queue. To decrease the latency of this we always send an ack
388 * immediately after we've received messages.
390 * For simplicity, we only have one ack in flight at a time. This puts
391 * pressure on senders to have deep enough send queues to absorb the latency of
392 * a single ack frame being in flight. This might not be good enough.
394 * This is implemented by have a long-lived send_wr and sge which point to a
395 * statically allocated ack frame. This ack wr does not fall under the ring
396 * accounting that the tx and rx wrs do. The QP attribute specifically makes
397 * room for it beyond the ring size. Send completion notices its special
398 * wr_id and avoids working with the ring in that case.
400 #ifndef KERNEL_HAS_ATOMIC64
401 static void rds_iw_set_ack(struct rds_iw_connection
*ic
, u64 seq
,
406 spin_lock_irqsave(&ic
->i_ack_lock
, flags
);
407 ic
->i_ack_next
= seq
;
409 set_bit(IB_ACK_REQUESTED
, &ic
->i_ack_flags
);
410 spin_unlock_irqrestore(&ic
->i_ack_lock
, flags
);
413 static u64
rds_iw_get_ack(struct rds_iw_connection
*ic
)
418 clear_bit(IB_ACK_REQUESTED
, &ic
->i_ack_flags
);
420 spin_lock_irqsave(&ic
->i_ack_lock
, flags
);
421 seq
= ic
->i_ack_next
;
422 spin_unlock_irqrestore(&ic
->i_ack_lock
, flags
);
427 static void rds_iw_set_ack(struct rds_iw_connection
*ic
, u64 seq
,
430 atomic64_set(&ic
->i_ack_next
, seq
);
432 smp_mb__before_atomic();
433 set_bit(IB_ACK_REQUESTED
, &ic
->i_ack_flags
);
437 static u64
rds_iw_get_ack(struct rds_iw_connection
*ic
)
439 clear_bit(IB_ACK_REQUESTED
, &ic
->i_ack_flags
);
440 smp_mb__after_atomic();
442 return atomic64_read(&ic
->i_ack_next
);
447 static void rds_iw_send_ack(struct rds_iw_connection
*ic
, unsigned int adv_credits
)
449 struct rds_header
*hdr
= ic
->i_ack
;
450 struct ib_send_wr
*failed_wr
;
454 seq
= rds_iw_get_ack(ic
);
456 rdsdebug("send_ack: ic %p ack %llu\n", ic
, (unsigned long long) seq
);
457 rds_message_populate_header(hdr
, 0, 0, 0);
458 hdr
->h_ack
= cpu_to_be64(seq
);
459 hdr
->h_credit
= adv_credits
;
460 rds_message_make_checksum(hdr
);
461 ic
->i_ack_queued
= jiffies
;
463 ret
= ib_post_send(ic
->i_cm_id
->qp
, &ic
->i_ack_wr
, &failed_wr
);
465 /* Failed to send. Release the WR, and
468 clear_bit(IB_ACK_IN_FLIGHT
, &ic
->i_ack_flags
);
469 set_bit(IB_ACK_REQUESTED
, &ic
->i_ack_flags
);
471 rds_iw_stats_inc(s_iw_ack_send_failure
);
473 rds_iw_conn_error(ic
->conn
, "sending ack failed\n");
475 rds_iw_stats_inc(s_iw_ack_sent
);
479 * There are 3 ways of getting acknowledgements to the peer:
480 * 1. We call rds_iw_attempt_ack from the recv completion handler
481 * to send an ACK-only frame.
482 * However, there can be only one such frame in the send queue
483 * at any time, so we may have to postpone it.
484 * 2. When another (data) packet is transmitted while there's
485 * an ACK in the queue, we piggyback the ACK sequence number
486 * on the data packet.
487 * 3. If the ACK WR is done sending, we get called from the
488 * send queue completion handler, and check whether there's
489 * another ACK pending (postponed because the WR was on the
490 * queue). If so, we transmit it.
492 * We maintain 2 variables:
493 * - i_ack_flags, which keeps track of whether the ACK WR
494 * is currently in the send queue or not (IB_ACK_IN_FLIGHT)
495 * - i_ack_next, which is the last sequence number we received
497 * Potentially, send queue and receive queue handlers can run concurrently.
498 * It would be nice to not have to use a spinlock to synchronize things,
499 * but the one problem that rules this out is that 64bit updates are
500 * not atomic on all platforms. Things would be a lot simpler if
501 * we had atomic64 or maybe cmpxchg64 everywhere.
503 * Reconnecting complicates this picture just slightly. When we
504 * reconnect, we may be seeing duplicate packets. The peer
505 * is retransmitting them, because it hasn't seen an ACK for
506 * them. It is important that we ACK these.
508 * ACK mitigation adds a header flag "ACK_REQUIRED"; any packet with
509 * this flag set *MUST* be acknowledged immediately.
513 * When we get here, we're called from the recv queue handler.
514 * Check whether we ought to transmit an ACK.
516 void rds_iw_attempt_ack(struct rds_iw_connection
*ic
)
518 unsigned int adv_credits
;
520 if (!test_bit(IB_ACK_REQUESTED
, &ic
->i_ack_flags
))
523 if (test_and_set_bit(IB_ACK_IN_FLIGHT
, &ic
->i_ack_flags
)) {
524 rds_iw_stats_inc(s_iw_ack_send_delayed
);
528 /* Can we get a send credit? */
529 if (!rds_iw_send_grab_credits(ic
, 1, &adv_credits
, 0, RDS_MAX_ADV_CREDIT
)) {
530 rds_iw_stats_inc(s_iw_tx_throttle
);
531 clear_bit(IB_ACK_IN_FLIGHT
, &ic
->i_ack_flags
);
535 clear_bit(IB_ACK_REQUESTED
, &ic
->i_ack_flags
);
536 rds_iw_send_ack(ic
, adv_credits
);
540 * We get here from the send completion handler, when the
541 * adapter tells us the ACK frame was sent.
543 void rds_iw_ack_send_complete(struct rds_iw_connection
*ic
)
545 clear_bit(IB_ACK_IN_FLIGHT
, &ic
->i_ack_flags
);
546 rds_iw_attempt_ack(ic
);
550 * This is called by the regular xmit code when it wants to piggyback
551 * an ACK on an outgoing frame.
553 u64
rds_iw_piggyb_ack(struct rds_iw_connection
*ic
)
555 if (test_and_clear_bit(IB_ACK_REQUESTED
, &ic
->i_ack_flags
))
556 rds_iw_stats_inc(s_iw_ack_send_piggybacked
);
557 return rds_iw_get_ack(ic
);
561 * It's kind of lame that we're copying from the posted receive pages into
562 * long-lived bitmaps. We could have posted the bitmaps and rdma written into
563 * them. But receiving new congestion bitmaps should be a *rare* event, so
564 * hopefully we won't need to invest that complexity in making it more
565 * efficient. By copying we can share a simpler core with TCP which has to
568 static void rds_iw_cong_recv(struct rds_connection
*conn
,
569 struct rds_iw_incoming
*iwinc
)
571 struct rds_cong_map
*map
;
572 unsigned int map_off
;
573 unsigned int map_page
;
574 struct rds_page_frag
*frag
;
575 unsigned long frag_off
;
576 unsigned long to_copy
;
577 unsigned long copied
;
578 uint64_t uncongested
= 0;
581 /* catch completely corrupt packets */
582 if (be32_to_cpu(iwinc
->ii_inc
.i_hdr
.h_len
) != RDS_CONG_MAP_BYTES
)
589 frag
= list_entry(iwinc
->ii_frags
.next
, struct rds_page_frag
, f_item
);
594 while (copied
< RDS_CONG_MAP_BYTES
) {
598 to_copy
= min(RDS_FRAG_SIZE
- frag_off
, PAGE_SIZE
- map_off
);
599 BUG_ON(to_copy
& 7); /* Must be 64bit aligned. */
601 addr
= kmap_atomic(frag
->f_page
);
603 src
= addr
+ frag_off
;
604 dst
= (void *)map
->m_page_addrs
[map_page
] + map_off
;
605 for (k
= 0; k
< to_copy
; k
+= 8) {
606 /* Record ports that became uncongested, ie
607 * bits that changed from 0 to 1. */
608 uncongested
|= ~(*src
) & *dst
;
616 if (map_off
== PAGE_SIZE
) {
622 if (frag_off
== RDS_FRAG_SIZE
) {
623 frag
= list_entry(frag
->f_item
.next
,
624 struct rds_page_frag
, f_item
);
629 /* the congestion map is in little endian order */
630 uncongested
= le64_to_cpu(uncongested
);
632 rds_cong_map_updated(map
, uncongested
);
636 * Rings are posted with all the allocations they'll need to queue the
637 * incoming message to the receiving socket so this can't fail.
638 * All fragments start with a header, so we can make sure we're not receiving
639 * garbage, and we can tell a small 8 byte fragment from an ACK frame.
641 struct rds_iw_ack_state
{
644 unsigned int ack_required
:1;
645 unsigned int ack_next_valid
:1;
646 unsigned int ack_recv_valid
:1;
649 static void rds_iw_process_recv(struct rds_connection
*conn
,
650 struct rds_iw_recv_work
*recv
, u32 byte_len
,
651 struct rds_iw_ack_state
*state
)
653 struct rds_iw_connection
*ic
= conn
->c_transport_data
;
654 struct rds_iw_incoming
*iwinc
= ic
->i_iwinc
;
655 struct rds_header
*ihdr
, *hdr
;
657 /* XXX shut down the connection if port 0,0 are seen? */
659 rdsdebug("ic %p iwinc %p recv %p byte len %u\n", ic
, iwinc
, recv
,
662 if (byte_len
< sizeof(struct rds_header
)) {
663 rds_iw_conn_error(conn
, "incoming message "
664 "from %pI4 didn't include a "
665 "header, disconnecting and "
670 byte_len
-= sizeof(struct rds_header
);
672 ihdr
= &ic
->i_recv_hdrs
[recv
- ic
->i_recvs
];
674 /* Validate the checksum. */
675 if (!rds_message_verify_checksum(ihdr
)) {
676 rds_iw_conn_error(conn
, "incoming message "
677 "from %pI4 has corrupted header - "
678 "forcing a reconnect\n",
680 rds_stats_inc(s_recv_drop_bad_checksum
);
684 /* Process the ACK sequence which comes with every packet */
685 state
->ack_recv
= be64_to_cpu(ihdr
->h_ack
);
686 state
->ack_recv_valid
= 1;
688 /* Process the credits update if there was one */
690 rds_iw_send_add_credits(conn
, ihdr
->h_credit
);
692 if (ihdr
->h_sport
== 0 && ihdr
->h_dport
== 0 && byte_len
== 0) {
693 /* This is an ACK-only packet. The fact that it gets
694 * special treatment here is that historically, ACKs
695 * were rather special beasts.
697 rds_iw_stats_inc(s_iw_ack_received
);
700 * Usually the frags make their way on to incs and are then freed as
701 * the inc is freed. We don't go that route, so we have to drop the
702 * page ref ourselves. We can't just leave the page on the recv
703 * because that confuses the dma mapping of pages and each recv's use
704 * of a partial page. We can leave the frag, though, it will be
707 * FIXME: Fold this into the code path below.
709 rds_iw_frag_drop_page(recv
->r_frag
);
714 * If we don't already have an inc on the connection then this
715 * fragment has a header and starts a message.. copy its header
716 * into the inc and save the inc so we can hang upcoming fragments
720 iwinc
= recv
->r_iwinc
;
721 recv
->r_iwinc
= NULL
;
724 hdr
= &iwinc
->ii_inc
.i_hdr
;
725 memcpy(hdr
, ihdr
, sizeof(*hdr
));
726 ic
->i_recv_data_rem
= be32_to_cpu(hdr
->h_len
);
728 rdsdebug("ic %p iwinc %p rem %u flag 0x%x\n", ic
, iwinc
,
729 ic
->i_recv_data_rem
, hdr
->h_flags
);
731 hdr
= &iwinc
->ii_inc
.i_hdr
;
732 /* We can't just use memcmp here; fragments of a
733 * single message may carry different ACKs */
734 if (hdr
->h_sequence
!= ihdr
->h_sequence
||
735 hdr
->h_len
!= ihdr
->h_len
||
736 hdr
->h_sport
!= ihdr
->h_sport
||
737 hdr
->h_dport
!= ihdr
->h_dport
) {
738 rds_iw_conn_error(conn
,
739 "fragment header mismatch; forcing reconnect\n");
744 list_add_tail(&recv
->r_frag
->f_item
, &iwinc
->ii_frags
);
747 if (ic
->i_recv_data_rem
> RDS_FRAG_SIZE
)
748 ic
->i_recv_data_rem
-= RDS_FRAG_SIZE
;
750 ic
->i_recv_data_rem
= 0;
753 if (iwinc
->ii_inc
.i_hdr
.h_flags
== RDS_FLAG_CONG_BITMAP
)
754 rds_iw_cong_recv(conn
, iwinc
);
756 rds_recv_incoming(conn
, conn
->c_faddr
, conn
->c_laddr
,
757 &iwinc
->ii_inc
, GFP_ATOMIC
);
758 state
->ack_next
= be64_to_cpu(hdr
->h_sequence
);
759 state
->ack_next_valid
= 1;
762 /* Evaluate the ACK_REQUIRED flag *after* we received
763 * the complete frame, and after bumping the next_rx
765 if (hdr
->h_flags
& RDS_FLAG_ACK_REQUIRED
) {
766 rds_stats_inc(s_recv_ack_required
);
767 state
->ack_required
= 1;
770 rds_inc_put(&iwinc
->ii_inc
);
775 * Plucking the oldest entry from the ring can be done concurrently with
776 * the thread refilling the ring. Each ring operation is protected by
777 * spinlocks and the transient state of refilling doesn't change the
778 * recording of which entry is oldest.
780 * This relies on IB only calling one cq comp_handler for each cq so that
781 * there will only be one caller of rds_recv_incoming() per RDS connection.
783 void rds_iw_recv_cq_comp_handler(struct ib_cq
*cq
, void *context
)
785 struct rds_connection
*conn
= context
;
786 struct rds_iw_connection
*ic
= conn
->c_transport_data
;
788 rdsdebug("conn %p cq %p\n", conn
, cq
);
790 rds_iw_stats_inc(s_iw_rx_cq_call
);
792 tasklet_schedule(&ic
->i_recv_tasklet
);
795 static inline void rds_poll_cq(struct rds_iw_connection
*ic
,
796 struct rds_iw_ack_state
*state
)
798 struct rds_connection
*conn
= ic
->conn
;
800 struct rds_iw_recv_work
*recv
;
802 while (ib_poll_cq(ic
->i_recv_cq
, 1, &wc
) > 0) {
803 rdsdebug("wc wr_id 0x%llx status %u byte_len %u imm_data %u\n",
804 (unsigned long long)wc
.wr_id
, wc
.status
, wc
.byte_len
,
805 be32_to_cpu(wc
.ex
.imm_data
));
806 rds_iw_stats_inc(s_iw_rx_cq_event
);
808 recv
= &ic
->i_recvs
[rds_iw_ring_oldest(&ic
->i_recv_ring
)];
810 rds_iw_recv_unmap_page(ic
, recv
);
813 * Also process recvs in connecting state because it is possible
814 * to get a recv completion _before_ the rdmacm ESTABLISHED
815 * event is processed.
817 if (rds_conn_up(conn
) || rds_conn_connecting(conn
)) {
818 /* We expect errors as the qp is drained during shutdown */
819 if (wc
.status
== IB_WC_SUCCESS
) {
820 rds_iw_process_recv(conn
, recv
, wc
.byte_len
, state
);
822 rds_iw_conn_error(conn
, "recv completion on "
823 "%pI4 had status %u, disconnecting and "
824 "reconnecting\n", &conn
->c_faddr
,
829 rds_iw_ring_free(&ic
->i_recv_ring
, 1);
833 void rds_iw_recv_tasklet_fn(unsigned long data
)
835 struct rds_iw_connection
*ic
= (struct rds_iw_connection
*) data
;
836 struct rds_connection
*conn
= ic
->conn
;
837 struct rds_iw_ack_state state
= { 0, };
839 rds_poll_cq(ic
, &state
);
840 ib_req_notify_cq(ic
->i_recv_cq
, IB_CQ_SOLICITED
);
841 rds_poll_cq(ic
, &state
);
843 if (state
.ack_next_valid
)
844 rds_iw_set_ack(ic
, state
.ack_next
, state
.ack_required
);
845 if (state
.ack_recv_valid
&& state
.ack_recv
> ic
->i_ack_recv
) {
846 rds_send_drop_acked(conn
, state
.ack_recv
, NULL
);
847 ic
->i_ack_recv
= state
.ack_recv
;
849 if (rds_conn_up(conn
))
850 rds_iw_attempt_ack(ic
);
852 /* If we ever end up with a really empty receive ring, we're
853 * in deep trouble, as the sender will definitely see RNR
855 if (rds_iw_ring_empty(&ic
->i_recv_ring
))
856 rds_iw_stats_inc(s_iw_rx_ring_empty
);
859 * If the ring is running low, then schedule the thread to refill.
861 if (rds_iw_ring_low(&ic
->i_recv_ring
))
862 queue_delayed_work(rds_wq
, &conn
->c_recv_w
, 0);
865 int rds_iw_recv(struct rds_connection
*conn
)
867 struct rds_iw_connection
*ic
= conn
->c_transport_data
;
870 rdsdebug("conn %p\n", conn
);
873 * If we get a temporary posting failure in this context then
874 * we're really low and we want the caller to back off for a bit.
876 mutex_lock(&ic
->i_recv_mutex
);
877 if (rds_iw_recv_refill(conn
, GFP_KERNEL
, GFP_HIGHUSER
, 0))
880 rds_iw_stats_inc(s_iw_rx_refill_from_thread
);
881 mutex_unlock(&ic
->i_recv_mutex
);
883 if (rds_conn_up(conn
))
884 rds_iw_attempt_ack(ic
);
889 int rds_iw_recv_init(void)
894 /* Default to 30% of all available RAM for recv memory */
896 rds_iw_sysctl_max_recv_allocation
= si
.totalram
/ 3 * PAGE_SIZE
/ RDS_FRAG_SIZE
;
898 rds_iw_incoming_slab
= kmem_cache_create("rds_iw_incoming",
899 sizeof(struct rds_iw_incoming
),
901 if (!rds_iw_incoming_slab
)
904 rds_iw_frag_slab
= kmem_cache_create("rds_iw_frag",
905 sizeof(struct rds_page_frag
),
907 if (!rds_iw_frag_slab
)
908 kmem_cache_destroy(rds_iw_incoming_slab
);
915 void rds_iw_recv_exit(void)
917 kmem_cache_destroy(rds_iw_incoming_slab
);
918 kmem_cache_destroy(rds_iw_frag_slab
);