2 * Copyright (c) 2007 Oracle. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/slab.h>
34 #include <linux/types.h>
35 #include <linux/rbtree.h>
36 #include <linux/bitops.h>
37 #include <linux/export.h>
42 * This file implements the receive side of the unconventional congestion
45 * Messages waiting in the receive queue on the receiving socket are accounted
46 * against the sockets SO_RCVBUF option value. Only the payload bytes in the
47 * message are accounted for. If the number of bytes queued equals or exceeds
48 * rcvbuf then the socket is congested. All sends attempted to this socket's
49 * address should return block or return -EWOULDBLOCK.
51 * Applications are expected to be reasonably tuned such that this situation
52 * very rarely occurs. An application encountering this "back-pressure" is
55 * This is implemented by having each node maintain bitmaps which indicate
56 * which ports on bound addresses are congested. As the bitmap changes it is
57 * sent through all the connections which terminate in the local address of the
58 * bitmap which changed.
60 * The bitmaps are allocated as connections are brought up. This avoids
61 * allocation in the interrupt handling path which queues messages on sockets.
62 * The dense bitmaps let transports send the entire bitmap on any bitmap change
63 * reasonably efficiently. This is much easier to implement than some
64 * finer-grained communication of per-port congestion. The sender does a very
65 * inexpensive bit test to test if the port it's about to send to is congested
70 * Interaction with poll is a tad tricky. We want all processes stuck in
71 * poll to wake up and check whether a congested destination became uncongested.
72 * The really sad thing is we have no idea which destinations the application
73 * wants to send to - we don't even know which rds_connections are involved.
74 * So until we implement a more flexible rds poll interface, we have to make
76 * We maintain a global counter that is incremented each time a congestion map
77 * update is received. Each rds socket tracks this value, and if rds_poll
78 * finds that the saved generation number is smaller than the global generation
79 * number, it wakes up the process.
81 static atomic_t rds_cong_generation
= ATOMIC_INIT(0);
84 * Congestion monitoring
86 static LIST_HEAD(rds_cong_monitor
);
87 static DEFINE_RWLOCK(rds_cong_monitor_lock
);
90 * Yes, a global lock. It's used so infrequently that it's worth keeping it
91 * global to simplify the locking. It's only used in the following
94 * - on connection buildup to associate a conn with its maps
95 * - on map changes to inform conns of a new map to send
97 * It's sadly ordered under the socket callback lock and the connection lock.
98 * Receive paths can mark ports congested from interrupt context so the
99 * lock masks interrupts.
101 static DEFINE_SPINLOCK(rds_cong_lock
);
102 static struct rb_root rds_cong_tree
= RB_ROOT
;
104 static struct rds_cong_map
*rds_cong_tree_walk(__be32 addr
,
105 struct rds_cong_map
*insert
)
107 struct rb_node
**p
= &rds_cong_tree
.rb_node
;
108 struct rb_node
*parent
= NULL
;
109 struct rds_cong_map
*map
;
113 map
= rb_entry(parent
, struct rds_cong_map
, m_rb_node
);
115 if (addr
< map
->m_addr
)
117 else if (addr
> map
->m_addr
)
124 rb_link_node(&insert
->m_rb_node
, parent
, p
);
125 rb_insert_color(&insert
->m_rb_node
, &rds_cong_tree
);
131 * There is only ever one bitmap for any address. Connections try and allocate
132 * these bitmaps in the process getting pointers to them. The bitmaps are only
133 * ever freed as the module is removed after all connections have been freed.
135 static struct rds_cong_map
*rds_cong_from_addr(__be32 addr
)
137 struct rds_cong_map
*map
;
138 struct rds_cong_map
*ret
= NULL
;
143 map
= kzalloc(sizeof(struct rds_cong_map
), GFP_KERNEL
);
148 init_waitqueue_head(&map
->m_waitq
);
149 INIT_LIST_HEAD(&map
->m_conn_list
);
151 for (i
= 0; i
< RDS_CONG_MAP_PAGES
; i
++) {
152 zp
= get_zeroed_page(GFP_KERNEL
);
155 map
->m_page_addrs
[i
] = zp
;
158 spin_lock_irqsave(&rds_cong_lock
, flags
);
159 ret
= rds_cong_tree_walk(addr
, map
);
160 spin_unlock_irqrestore(&rds_cong_lock
, flags
);
169 for (i
= 0; i
< RDS_CONG_MAP_PAGES
&& map
->m_page_addrs
[i
]; i
++)
170 free_page(map
->m_page_addrs
[i
]);
174 rdsdebug("map %p for addr %x\n", ret
, be32_to_cpu(addr
));
180 * Put the conn on its local map's list. This is called when the conn is
181 * really added to the hash. It's nested under the rds_conn_lock, sadly.
183 void rds_cong_add_conn(struct rds_connection
*conn
)
187 rdsdebug("conn %p now on map %p\n", conn
, conn
->c_lcong
);
188 spin_lock_irqsave(&rds_cong_lock
, flags
);
189 list_add_tail(&conn
->c_map_item
, &conn
->c_lcong
->m_conn_list
);
190 spin_unlock_irqrestore(&rds_cong_lock
, flags
);
193 void rds_cong_remove_conn(struct rds_connection
*conn
)
197 rdsdebug("removing conn %p from map %p\n", conn
, conn
->c_lcong
);
198 spin_lock_irqsave(&rds_cong_lock
, flags
);
199 list_del_init(&conn
->c_map_item
);
200 spin_unlock_irqrestore(&rds_cong_lock
, flags
);
203 int rds_cong_get_maps(struct rds_connection
*conn
)
205 conn
->c_lcong
= rds_cong_from_addr(conn
->c_laddr
);
206 conn
->c_fcong
= rds_cong_from_addr(conn
->c_faddr
);
208 if (!(conn
->c_lcong
&& conn
->c_fcong
))
214 void rds_cong_queue_updates(struct rds_cong_map
*map
)
216 struct rds_connection
*conn
;
219 spin_lock_irqsave(&rds_cong_lock
, flags
);
221 list_for_each_entry(conn
, &map
->m_conn_list
, c_map_item
) {
222 if (!test_and_set_bit(0, &conn
->c_map_queued
)) {
223 rds_stats_inc(s_cong_update_queued
);
224 /* We cannot inline the call to rds_send_xmit() here
225 * for two reasons (both pertaining to a TCP transport):
226 * 1. When we get here from the receive path, we
227 * are already holding the sock_lock (held by
228 * tcp_v4_rcv()). So inlining calls to
229 * tcp_setsockopt and/or tcp_sendmsg will deadlock
230 * when it tries to get the sock_lock())
231 * 2. Interrupts are masked so that we can mark the
232 * the port congested from both send and recv paths.
233 * (See comment around declaration of rdc_cong_lock).
234 * An attempt to get the sock_lock() here will
235 * therefore trigger warnings.
236 * Defer the xmit to rds_send_worker() instead.
238 queue_delayed_work(rds_wq
, &conn
->c_send_w
, 0);
242 spin_unlock_irqrestore(&rds_cong_lock
, flags
);
245 void rds_cong_map_updated(struct rds_cong_map
*map
, uint64_t portmask
)
247 rdsdebug("waking map %p for %pI4\n",
249 rds_stats_inc(s_cong_update_received
);
250 atomic_inc(&rds_cong_generation
);
251 if (waitqueue_active(&map
->m_waitq
))
252 wake_up(&map
->m_waitq
);
253 if (waitqueue_active(&rds_poll_waitq
))
254 wake_up_all(&rds_poll_waitq
);
256 if (portmask
&& !list_empty(&rds_cong_monitor
)) {
260 read_lock_irqsave(&rds_cong_monitor_lock
, flags
);
261 list_for_each_entry(rs
, &rds_cong_monitor
, rs_cong_list
) {
262 spin_lock(&rs
->rs_lock
);
263 rs
->rs_cong_notify
|= (rs
->rs_cong_mask
& portmask
);
264 rs
->rs_cong_mask
&= ~portmask
;
265 spin_unlock(&rs
->rs_lock
);
266 if (rs
->rs_cong_notify
)
267 rds_wake_sk_sleep(rs
);
269 read_unlock_irqrestore(&rds_cong_monitor_lock
, flags
);
272 EXPORT_SYMBOL_GPL(rds_cong_map_updated
);
274 int rds_cong_updated_since(unsigned long *recent
)
276 unsigned long gen
= atomic_read(&rds_cong_generation
);
278 if (likely(*recent
== gen
))
285 * We're called under the locking that protects the sockets receive buffer
286 * consumption. This makes it a lot easier for the caller to only call us
287 * when it knows that an existing set bit needs to be cleared, and vice versa.
288 * We can't block and we need to deal with concurrent sockets working against
289 * the same per-address map.
291 void rds_cong_set_bit(struct rds_cong_map
*map
, __be16 port
)
296 rdsdebug("setting congestion for %pI4:%u in map %p\n",
297 &map
->m_addr
, ntohs(port
), map
);
299 i
= be16_to_cpu(port
) / RDS_CONG_MAP_PAGE_BITS
;
300 off
= be16_to_cpu(port
) % RDS_CONG_MAP_PAGE_BITS
;
302 __set_bit_le(off
, (void *)map
->m_page_addrs
[i
]);
305 void rds_cong_clear_bit(struct rds_cong_map
*map
, __be16 port
)
310 rdsdebug("clearing congestion for %pI4:%u in map %p\n",
311 &map
->m_addr
, ntohs(port
), map
);
313 i
= be16_to_cpu(port
) / RDS_CONG_MAP_PAGE_BITS
;
314 off
= be16_to_cpu(port
) % RDS_CONG_MAP_PAGE_BITS
;
316 __clear_bit_le(off
, (void *)map
->m_page_addrs
[i
]);
319 static int rds_cong_test_bit(struct rds_cong_map
*map
, __be16 port
)
324 i
= be16_to_cpu(port
) / RDS_CONG_MAP_PAGE_BITS
;
325 off
= be16_to_cpu(port
) % RDS_CONG_MAP_PAGE_BITS
;
327 return test_bit_le(off
, (void *)map
->m_page_addrs
[i
]);
330 void rds_cong_add_socket(struct rds_sock
*rs
)
334 write_lock_irqsave(&rds_cong_monitor_lock
, flags
);
335 if (list_empty(&rs
->rs_cong_list
))
336 list_add(&rs
->rs_cong_list
, &rds_cong_monitor
);
337 write_unlock_irqrestore(&rds_cong_monitor_lock
, flags
);
340 void rds_cong_remove_socket(struct rds_sock
*rs
)
343 struct rds_cong_map
*map
;
345 write_lock_irqsave(&rds_cong_monitor_lock
, flags
);
346 list_del_init(&rs
->rs_cong_list
);
347 write_unlock_irqrestore(&rds_cong_monitor_lock
, flags
);
349 /* update congestion map for now-closed port */
350 spin_lock_irqsave(&rds_cong_lock
, flags
);
351 map
= rds_cong_tree_walk(rs
->rs_bound_addr
, NULL
);
352 spin_unlock_irqrestore(&rds_cong_lock
, flags
);
354 if (map
&& rds_cong_test_bit(map
, rs
->rs_bound_port
)) {
355 rds_cong_clear_bit(map
, rs
->rs_bound_port
);
356 rds_cong_queue_updates(map
);
360 int rds_cong_wait(struct rds_cong_map
*map
, __be16 port
, int nonblock
,
363 if (!rds_cong_test_bit(map
, port
))
366 if (rs
&& rs
->rs_cong_monitor
) {
369 /* It would have been nice to have an atomic set_bit on
371 spin_lock_irqsave(&rs
->rs_lock
, flags
);
372 rs
->rs_cong_mask
|= RDS_CONG_MONITOR_MASK(ntohs(port
));
373 spin_unlock_irqrestore(&rs
->rs_lock
, flags
);
375 /* Test again - a congestion update may have arrived in
377 if (!rds_cong_test_bit(map
, port
))
380 rds_stats_inc(s_cong_send_error
);
384 rds_stats_inc(s_cong_send_blocked
);
385 rdsdebug("waiting on map %p for port %u\n", map
, be16_to_cpu(port
));
387 return wait_event_interruptible(map
->m_waitq
,
388 !rds_cong_test_bit(map
, port
));
391 void rds_cong_exit(void)
393 struct rb_node
*node
;
394 struct rds_cong_map
*map
;
397 while ((node
= rb_first(&rds_cong_tree
))) {
398 map
= rb_entry(node
, struct rds_cong_map
, m_rb_node
);
399 rdsdebug("freeing map %p\n", map
);
400 rb_erase(&map
->m_rb_node
, &rds_cong_tree
);
401 for (i
= 0; i
< RDS_CONG_MAP_PAGES
&& map
->m_page_addrs
[i
]; i
++)
402 free_page(map
->m_page_addrs
[i
]);
408 * Allocate a RDS message containing a congestion update.
410 struct rds_message
*rds_cong_update_alloc(struct rds_connection
*conn
)
412 struct rds_cong_map
*map
= conn
->c_lcong
;
413 struct rds_message
*rm
;
415 rm
= rds_message_map_pages(map
->m_page_addrs
, RDS_CONG_MAP_BYTES
);
417 rm
->m_inc
.i_hdr
.h_flags
= RDS_FLAG_CONG_BITMAP
;