delete_files bug fix
[libtorrent.git] / src / kademlia / routing_table.cpp
blob6c4d93f2be29fe9bde0d2b3e0932b4b5946ce62d
1 /*
3 Copyright (c) 2006, Arvid Norberg
4 All rights reserved.
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions
8 are met:
10 * Redistributions of source code must retain the above copyright
11 notice, this list of conditions and the following disclaimer.
12 * Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in
14 the documentation and/or other materials provided with the distribution.
15 * Neither the name of the author nor the names of its
16 contributors may be used to endorse or promote products derived
17 from this software without specific prior written permission.
19 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
23 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 POSSIBILITY OF SUCH DAMAGE.
33 #include "libtorrent/pch.hpp"
35 #include <vector>
36 #include <deque>
37 #include <algorithm>
38 #include <functional>
39 #include <numeric>
40 #include <boost/cstdint.hpp>
41 #include <boost/bind.hpp>
43 #include "libtorrent/kademlia/routing_table.hpp"
44 #include "libtorrent/kademlia/node_id.hpp"
45 #include "libtorrent/session_settings.hpp"
47 using boost::bind;
48 using boost::uint8_t;
50 namespace libtorrent { namespace dht
53 #ifdef TORRENT_DHT_VERBOSE_LOGGING
54 TORRENT_DEFINE_LOG(table)
55 #endif
57 routing_table::routing_table(node_id const& id, int bucket_size
58 , dht_settings const& settings)
59 : m_bucket_size(bucket_size)
60 , m_settings(settings)
61 , m_id(id)
62 , m_lowest_active_bucket(160)
64 // distribute the refresh times for the buckets in an
65 // attempt do even out the network load
66 for (int i = 0; i < 160; ++i)
67 m_bucket_activity[i] = time_now() - milliseconds(i*5625);
68 m_bucket_activity[0] = time_now() - minutes(15);
71 boost::tuple<int, int> routing_table::size() const
73 int nodes = 0;
74 int replacements = 0;
75 for (table_t::const_iterator i = m_buckets.begin()
76 , end(m_buckets.end()); i != end; ++i)
78 nodes += i->first.size();
79 replacements += i->second.size();
81 return boost::make_tuple(nodes, replacements);
84 size_type routing_table::num_global_nodes() const
86 int first_full = m_lowest_active_bucket;
87 int num_nodes = 1; // we are one of the nodes
88 for (; first_full < 160
89 && int(m_buckets[first_full].first.size()) < m_bucket_size;
90 ++first_full)
92 num_nodes += m_buckets[first_full].first.size();
95 return (2 << (160 - first_full)) * num_nodes;
98 #ifdef TORRENT_DHT_VERBOSE_LOGGING
100 void routing_table::print_state(std::ostream& os) const
102 os << "kademlia routing table state\n"
103 << "bucket_size: " << m_bucket_size << "\n"
104 << "global node count: " << num_global_nodes() << "\n"
105 << "node_id: " << m_id << "\n\n";
107 os << "number of nodes per bucket:\n-- live ";
108 for (int i = 8; i < 160; ++i)
109 os << "-";
110 os << "\n";
112 for (int k = 0; k < 8; ++k)
114 for (table_t::const_iterator i = m_buckets.begin(), end(m_buckets.end());
115 i != end; ++i)
117 os << (int(i->first.size()) > (7 - k) ? "|" : " ");
119 os << "\n";
121 for (table_t::const_iterator i = m_buckets.begin(), end(m_buckets.end());
122 i != end; ++i)
124 os << "+";
126 os << "\n";
127 for (int k = 0; k < 8; ++k)
129 for (table_t::const_iterator i = m_buckets.begin(), end(m_buckets.end());
130 i != end; ++i)
132 os << (int(i->second.size()) > k ? "|" : " ");
134 os << "\n";
136 os << "-- cached ";
137 for (int i = 10; i < 160; ++i)
138 os << "-";
139 os << "\n\n";
141 os << "nodes:\n";
142 for (table_t::const_iterator i = m_buckets.begin(), end(m_buckets.end());
143 i != end; ++i)
145 int bucket_index = int(i - m_buckets.begin());
146 os << "=== BUCKET = " << bucket_index
147 << " = " << (bucket_index >= m_lowest_active_bucket?"active":"inactive")
148 << " = " << total_seconds(time_now() - m_bucket_activity[bucket_index])
149 << " s ago ===== \n";
150 for (bucket_t::const_iterator j = i->first.begin()
151 , end(i->first.end()); j != end; ++j)
153 os << "ip: " << j->addr << " fails: " << j->fail_count
154 << " id: " << j->id << "\n";
159 #endif
161 void routing_table::touch_bucket(int bucket)
163 m_bucket_activity[bucket] = time_now();
166 ptime routing_table::next_refresh(int bucket)
168 TORRENT_ASSERT(bucket < 160);
169 TORRENT_ASSERT(bucket >= 0);
170 // lower than or equal to since a refresh of bucket 0 will
171 // effectively refresh the lowest active bucket as well
172 if (bucket < m_lowest_active_bucket && bucket > 0)
173 return time_now() + minutes(15);
174 return m_bucket_activity[bucket] + minutes(15);
177 void routing_table::replacement_cache(bucket_t& nodes) const
179 for (table_t::const_iterator i = m_buckets.begin()
180 , end(m_buckets.end()); i != end; ++i)
182 std::copy(i->second.begin(), i->second.end()
183 , std::back_inserter(nodes));
187 bool routing_table::need_node(node_id const& id)
189 int bucket_index = distance_exp(m_id, id);
190 TORRENT_ASSERT(bucket_index < (int)m_buckets.size());
191 TORRENT_ASSERT(bucket_index >= 0);
192 bucket_t& b = m_buckets[bucket_index].first;
193 bucket_t& rb = m_buckets[bucket_index].second;
195 // if the replacement cache is full, we don't
196 // need another node. The table is fine the
197 // way it is.
198 if ((int)rb.size() >= m_bucket_size) return false;
200 // if the node already exists, we don't need it
201 if (std::find_if(b.begin(), b.end(), bind(&node_entry::id, _1) == id)
202 != b.end()) return false;
204 if (std::find_if(rb.begin(), rb.end(), bind(&node_entry::id, _1) == id)
205 != rb.end()) return false;
207 return true;
210 void routing_table::node_failed(node_id const& id)
212 int bucket_index = distance_exp(m_id, id);
213 TORRENT_ASSERT(bucket_index < (int)m_buckets.size());
214 TORRENT_ASSERT(bucket_index >= 0);
215 bucket_t& b = m_buckets[bucket_index].first;
216 bucket_t& rb = m_buckets[bucket_index].second;
218 bucket_t::iterator i = std::find_if(b.begin(), b.end()
219 , bind(&node_entry::id, _1) == id);
221 if (i == b.end()) return;
223 // if messages to ourself fails, ignore it
224 if (bucket_index == 0) return;
226 if (rb.empty())
228 ++i->fail_count;
230 #ifdef TORRENT_DHT_VERBOSE_LOGGING
231 TORRENT_LOG(table) << " NODE FAILED"
232 " id: " << id <<
233 " ip: " << i->addr <<
234 " fails: " << i->fail_count <<
235 " up-time: " << total_seconds(time_now() - i->first_seen);
236 #endif
238 if (i->fail_count >= m_settings.max_fail_count)
240 b.erase(i);
241 TORRENT_ASSERT(m_lowest_active_bucket <= bucket_index);
242 while (m_lowest_active_bucket < 160
243 && m_buckets[m_lowest_active_bucket].first.empty())
245 ++m_lowest_active_bucket;
248 return;
251 b.erase(i);
252 b.push_back(rb.back());
253 rb.erase(rb.end() - 1);
256 void routing_table::add_router_node(udp::endpoint router)
258 m_router_nodes.insert(router);
261 // this function is called every time the node sees
262 // a sign of a node being alive. This node will either
263 // be inserted in the k-buckets or be moved to the top
264 // of its bucket.
265 // the return value indicates if the table needs a refresh.
266 // if true, the node should refresh the table (i.e. do a find_node
267 // on its own id)
268 bool routing_table::node_seen(node_id const& id, udp::endpoint addr)
270 if (m_router_nodes.find(addr) != m_router_nodes.end()) return false;
271 int bucket_index = distance_exp(m_id, id);
272 TORRENT_ASSERT(bucket_index < (int)m_buckets.size());
273 TORRENT_ASSERT(bucket_index >= 0);
274 bucket_t& b = m_buckets[bucket_index].first;
276 bucket_t::iterator i = std::find_if(b.begin(), b.end()
277 , bind(&node_entry::id, _1) == id);
279 bool ret = need_bootstrap();
281 //m_bucket_activity[bucket_index] = time_now();
283 if (i != b.end())
285 // TODO: what do we do if we see a node with
286 // the same id as a node at a different address?
287 // TORRENT_ASSERT(i->addr == addr);
289 // we already have the node in our bucket
290 // just move it to the back since it was
291 // the last node we had any contact with
292 // in this bucket
293 b.erase(i);
294 b.push_back(node_entry(id, addr));
295 // TORRENT_LOG(table) << "replacing node: " << id << " " << addr;
296 return ret;
299 // if the node was not present in our list
300 // we will only insert it if there is room
301 // for it, or if some of our nodes have gone
302 // offline
303 if ((int)b.size() < m_bucket_size)
305 if (b.empty()) b.reserve(m_bucket_size);
306 b.push_back(node_entry(id, addr));
307 // if bucket index is 0, the node is ourselves
308 // don't updated m_lowest_active_bucket
309 if (bucket_index < m_lowest_active_bucket
310 && bucket_index > 0)
311 m_lowest_active_bucket = bucket_index;
312 // TORRENT_LOG(table) << "inserting node: " << id << " " << addr;
313 return ret;
316 // if there is no room, we look for nodes marked as stale
317 // in the k-bucket. If we find one, we can replace it.
318 // A node is considered stale if it has failed at least one
319 // time. Here we choose the node that has failed most times.
320 // If we don't find one, place this node in the replacement-
321 // cache and replace any nodes that will fail in the future
322 // with nodes from that cache.
324 i = std::max_element(b.begin(), b.end()
325 , bind(&node_entry::fail_count, _1)
326 < bind(&node_entry::fail_count, _2));
328 if (i != b.end() && i->fail_count > 0)
330 // i points to a node that has been marked
331 // as stale. Replace it with this new one
332 b.erase(i);
333 b.push_back(node_entry(id, addr));
334 // TORRENT_LOG(table) << "replacing stale node: " << id << " " << addr;
335 return ret;
338 // if we don't have any identified stale nodes in
339 // the bucket, and the bucket is full, we have to
340 // cache this node and wait until some node fails
341 // and then replace it.
343 bucket_t& rb = m_buckets[bucket_index].second;
345 i = std::find_if(rb.begin(), rb.end()
346 , bind(&node_entry::id, _1) == id);
348 // if the node is already in the replacement bucket
349 // just return.
350 if (i != rb.end()) return ret;
352 if ((int)rb.size() > m_bucket_size) rb.erase(rb.begin());
353 if (rb.empty()) rb.reserve(m_bucket_size);
354 rb.push_back(node_entry(id, addr));
355 // TORRENT_LOG(table) << "inserting node in replacement cache: " << id << " " << addr;
356 return ret;
359 bool routing_table::need_bootstrap() const
361 for (const_iterator i = begin(); i != end(); ++i)
363 if (i->fail_count == 0) return false;
365 return true;
368 template <class SrcIter, class DstIter, class Pred>
369 DstIter copy_if_n(SrcIter begin, SrcIter end, DstIter target, size_t n, Pred p)
371 for (; n > 0 && begin != end; ++begin)
373 if (!p(*begin)) continue;
374 *target = *begin;
375 --n;
376 ++target;
378 return target;
381 // fills the vector with the k nodes from our buckets that
382 // are nearest to the given id.
383 void routing_table::find_node(node_id const& target
384 , std::vector<node_entry>& l, bool include_self, int count)
386 l.clear();
387 if (count == 0) count = m_bucket_size;
388 l.reserve(count);
390 int bucket_index = distance_exp(m_id, target);
391 bucket_t& b = m_buckets[bucket_index].first;
393 // copy all nodes that hasn't failed into the target
394 // vector.
395 copy_if_n(b.begin(), b.end(), std::back_inserter(l)
396 , (std::min)(size_t(count), b.size()), bind(&node_entry::fail_count, _1) == 0);
397 TORRENT_ASSERT((int)l.size() <= count);
399 if ((int)l.size() == count)
401 TORRENT_ASSERT(std::count_if(l.begin(), l.end()
402 , boost::bind(&node_entry::fail_count, _1) != 0) == 0);
403 return;
406 // if we didn't have enough nodes in that bucket
407 // we have to reply with nodes from buckets closer
408 // to us. i.e. all the buckets in the range
409 // [0, bucket_index) if we are to include ourself
410 // or [1, bucket_index) if not.
411 bucket_t tmpb;
412 for (int i = include_self?0:1; i < bucket_index; ++i)
414 bucket_t& b = m_buckets[i].first;
415 std::remove_copy_if(b.begin(), b.end(), std::back_inserter(tmpb)
416 , bind(&node_entry::fail_count, _1));
419 if (count - l.size() < tmpb.size())
421 std::random_shuffle(tmpb.begin(), tmpb.end());
422 size_t to_copy = count - l.size();
423 std::copy(tmpb.begin(), tmpb.begin() + to_copy, std::back_inserter(l));
425 else
427 std::copy(tmpb.begin(), tmpb.end(), std::back_inserter(l));
430 TORRENT_ASSERT((int)l.size() <= count);
432 // return if we have enough nodes or if the bucket index
433 // is the biggest index available (there are no more buckets)
434 // to look in.
435 if ((int)l.size() == count)
437 TORRENT_ASSERT(std::count_if(l.begin(), l.end()
438 , boost::bind(&node_entry::fail_count, _1) != 0) == 0);
439 return;
442 for (size_t i = bucket_index + 1; i < m_buckets.size(); ++i)
444 bucket_t& b = m_buckets[i].first;
446 size_t to_copy = (std::min)(count - l.size(), b.size());
447 copy_if_n(b.begin(), b.end(), std::back_inserter(l)
448 , to_copy, bind(&node_entry::fail_count, _1) == 0);
449 TORRENT_ASSERT((int)l.size() <= count);
450 if ((int)l.size() == count)
452 TORRENT_ASSERT(std::count_if(l.begin(), l.end()
453 , boost::bind(&node_entry::fail_count, _1) != 0) == 0);
454 return;
457 TORRENT_ASSERT((int)l.size() <= count);
459 TORRENT_ASSERT(std::count_if(l.begin(), l.end()
460 , boost::bind(&node_entry::fail_count, _1) != 0) == 0);
463 routing_table::iterator routing_table::begin() const
465 // +1 to avoid ourself
466 return iterator(m_buckets.begin() + 1, m_buckets.end());
469 routing_table::iterator routing_table::end() const
471 return iterator(m_buckets.end(), m_buckets.end());
474 } } // namespace libtorrent::dht