formatting fixes in client test, and made the test build when resolve countries is...
[libtorrent-kjk.git] / src / storage.cpp
blob0623dfeb6ef25c4b14976a2572ee00b5a26e8de9
1 /*
3 Copyright (c) 2003, Arvid Norberg, Daniel Wallin
4 All rights reserved.
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions
8 are met:
10 * Redistributions of source code must retain the above copyright
11 notice, this list of conditions and the following disclaimer.
12 * Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in
14 the documentation and/or other materials provided with the distribution.
15 * Neither the name of the author nor the names of its
16 contributors may be used to endorse or promote products derived
17 from this software without specific prior written permission.
19 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
23 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 POSSIBILITY OF SUCH DAMAGE.
33 #include "libtorrent/pch.hpp"
35 #include <ctime>
36 #include <iterator>
37 #include <algorithm>
38 #include <set>
39 #include <functional>
41 #ifdef _MSC_VER
42 #pragma warning(push, 1)
43 #endif
45 #include <boost/lexical_cast.hpp>
46 #include <boost/filesystem/convenience.hpp>
47 #include <boost/filesystem/operations.hpp>
48 #include <boost/filesystem/fstream.hpp>
49 #include <boost/thread/mutex.hpp>
50 #include <boost/ref.hpp>
51 #include <boost/bind.hpp>
52 #include <boost/version.hpp>
53 #include <boost/multi_index_container.hpp>
54 #include <boost/multi_index/member.hpp>
55 #include <boost/multi_index/ordered_index.hpp>
57 #ifdef _MSC_VER
58 #pragma warning(pop)
59 #endif
61 #include "libtorrent/storage.hpp"
62 #include "libtorrent/torrent.hpp"
63 #include "libtorrent/hasher.hpp"
64 #include "libtorrent/session.hpp"
65 #include "libtorrent/peer_id.hpp"
66 #include "libtorrent/file.hpp"
67 #include "libtorrent/invariant_check.hpp"
68 #include "libtorrent/file_pool.hpp"
69 #include "libtorrent/aux_/session_impl.hpp"
71 #ifndef NDEBUG
72 #include <ios>
73 #include <iostream>
74 #include <iomanip>
75 #include <cstdio>
76 #endif
78 #if defined(__APPLE__)
79 // for getattrlist()
80 #include <sys/attr.h>
81 #include <unistd.h>
82 // for statfs()
83 #include <sys/param.h>
84 #include <sys/mount.h>
85 #endif
87 #if defined(__linux__)
88 #include <sys/statfs.h>
89 #endif
91 #if defined(_WIN32) && defined(UNICODE)
93 #include <windows.h>
94 #include <boost/filesystem/exception.hpp>
95 #include "libtorrent/utf8.hpp"
97 namespace libtorrent
99 std::wstring safe_convert(std::string const& s)
103 return libtorrent::utf8_wchar(s);
105 catch (std::exception)
107 std::wstring ret;
108 const char* end = &s[0] + s.size();
109 for (const char* i = &s[0]; i < end;)
111 wchar_t c = '.';
112 int result = std::mbtowc(&c, i, end - i);
113 if (result > 0) i += result;
114 else ++i;
115 ret += c;
117 return ret;
121 #endif
123 #if defined(_WIN32) && defined(UNICODE) && BOOST_VERSION < 103400
124 namespace
126 using libtorrent::safe_convert;
127 using namespace boost::filesystem;
129 // based on code from Boost.Fileystem
130 bool create_directories_win(const path& ph)
132 if (ph.empty() || exists(ph))
134 if ( !ph.empty() && !is_directory(ph) )
135 boost::throw_exception( filesystem_error(
136 "boost::filesystem::create_directories",
137 ph, "path exists and is not a directory",
138 not_directory_error ) );
139 return false;
142 // First create branch, by calling ourself recursively
143 create_directories_win(ph.branch_path());
144 // Now that parent's path exists, create the directory
145 std::wstring wph(safe_convert(ph.native_directory_string()));
146 CreateDirectory(wph.c_str(), 0);
147 return true;
150 bool exists_win( const path & ph )
152 std::wstring wpath(safe_convert(ph.string()));
153 if(::GetFileAttributes( wpath.c_str() ) == 0xFFFFFFFF)
155 UINT err = ::GetLastError();
156 if((err == ERROR_FILE_NOT_FOUND)
157 || (err == ERROR_INVALID_PARAMETER)
158 || (err == ERROR_NOT_READY)
159 || (err == ERROR_PATH_NOT_FOUND)
160 || (err == ERROR_INVALID_NAME)
161 || (err == ERROR_BAD_NETPATH ))
162 return false; // GetFileAttributes failed because the path does not exist
163 // for any other error we assume the file does exist and fall through,
164 // this may not be the best policy though... (JM 20040330)
165 return true;
167 return true;
170 boost::intmax_t file_size_win( const path & ph )
172 std::wstring wpath(safe_convert(ph.string()));
173 // by now, intmax_t is 64-bits on all Windows compilers
174 WIN32_FILE_ATTRIBUTE_DATA fad;
175 if ( !::GetFileAttributesExW( wpath.c_str(),
176 ::GetFileExInfoStandard, &fad ) )
177 boost::throw_exception( filesystem_error(
178 "boost::filesystem::file_size",
179 ph, detail::system_error_code() ) );
180 if ( (fad.dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY) !=0 )
181 boost::throw_exception( filesystem_error(
182 "boost::filesystem::file_size",
183 ph, "invalid: is a directory",
184 is_directory_error ) );
185 return (static_cast<boost::intmax_t>(fad.nFileSizeHigh)
186 << (sizeof(fad.nFileSizeLow)*8))
187 + fad.nFileSizeLow;
190 std::time_t last_write_time_win( const path & ph )
192 struct _stat path_stat;
193 std::wstring wph(safe_convert(ph.native_file_string()));
194 if ( ::_wstat( wph.c_str(), &path_stat ) != 0 )
195 boost::throw_exception( filesystem_error(
196 "boost::filesystem::last_write_time",
197 ph, detail::system_error_code() ) );
198 return path_stat.st_mtime;
201 void rename_win( const path & old_path,
202 const path & new_path )
204 std::wstring wold_path(safe_convert(old_path.string()));
205 std::wstring wnew_path(safe_convert(new_path.string()));
206 if ( !::MoveFile( wold_path.c_str(), wnew_path.c_str() ) )
207 boost::throw_exception( filesystem_error(
208 "boost::filesystem::rename",
209 old_path, new_path, detail::system_error_code() ) );
212 } // anonymous namespace
214 #endif
216 #if BOOST_VERSION < 103200
217 bool operator<(boost::filesystem::path const& lhs
218 , boost::filesystem::path const& rhs)
220 return lhs.string() < rhs.string();
222 #endif
224 using namespace boost::filesystem;
225 using boost::bind;
226 using namespace ::boost::multi_index;
227 using boost::multi_index::multi_index_container;
229 #if !defined(NDEBUG) && defined(TORRENT_STORAGE_DEBUG)
230 namespace
232 using namespace libtorrent;
234 void print_to_log(const std::string& s)
236 static std::ofstream log("log.txt");
237 log << s;
238 log.flush();
241 #endif
243 namespace libtorrent
246 std::vector<std::pair<size_type, std::time_t> > get_filesizes(
247 torrent_info const& t, path p)
249 p = complete(p);
250 std::vector<std::pair<size_type, std::time_t> > sizes;
251 for (torrent_info::file_iterator i = t.begin_files();
252 i != t.end_files(); ++i)
254 size_type size = 0;
255 std::time_t time = 0;
258 path f = p / i->path;
259 #if defined(_WIN32) && defined(UNICODE) && BOOST_VERSION < 103400
260 size = file_size_win(f);
261 time = last_write_time_win(f);
262 #else
263 size = file_size(f);
264 time = last_write_time(f);
265 #endif
267 catch (std::exception&) {}
268 sizes.push_back(std::make_pair(size, time));
270 return sizes;
273 // matches the sizes and timestamps of the files passed in
274 // in non-compact mode, actual file sizes and timestamps
275 // are allowed to be bigger and more recent than the fast
276 // resume data. This is because full allocation will not move
277 // pieces, so any older version of the resume data will
278 // still be a correct subset of the actual data on disk.
279 bool match_filesizes(
280 torrent_info const& t
281 , path p
282 , std::vector<std::pair<size_type, std::time_t> > const& sizes
283 , bool compact_mode
284 , std::string* error)
286 if ((int)sizes.size() != t.num_files())
288 if (error) *error = "mismatching number of files";
289 return false;
291 p = complete(p);
293 std::vector<std::pair<size_type, std::time_t> >::const_iterator s
294 = sizes.begin();
295 for (torrent_info::file_iterator i = t.begin_files();
296 i != t.end_files(); ++i, ++s)
298 size_type size = 0;
299 std::time_t time = 0;
302 path f = p / i->path;
303 #if defined(_WIN32) && defined(UNICODE) && BOOST_VERSION < 103400
304 size = file_size_win(f);
305 time = last_write_time_win(f);
306 #else
307 size = file_size(f);
308 time = last_write_time(f);
309 #endif
311 catch (std::exception&) {}
312 if (size != s->first
313 || (!compact_mode && size < s->first))
315 if (error) *error = "filesize mismatch for file '"
316 + i->path.native_file_string()
317 + "', size: " + boost::lexical_cast<std::string>(size)
318 + ", expected to be " + boost::lexical_cast<std::string>(s->first)
319 + " bytes";
320 return false;
322 if (time != s->second
323 || (!compact_mode && time < s->second))
325 if (error) *error = "timestamp mismatch for file '"
326 + i->path.native_file_string()
327 + "', modification date: " + boost::lexical_cast<std::string>(time)
328 + ", expected to have modification date "
329 + boost::lexical_cast<std::string>(s->second);
330 return false;
333 return true;
336 struct thread_safe_storage
338 thread_safe_storage(std::size_t n)
339 : slots(n, false)
342 boost::mutex mutex;
343 boost::condition condition;
344 std::vector<bool> slots;
347 struct slot_lock
349 slot_lock(thread_safe_storage& s, int slot_)
350 : storage_(s)
351 , slot(slot_)
353 assert(slot_>=0 && slot_ < (int)s.slots.size());
354 boost::mutex::scoped_lock lock(storage_.mutex);
356 while (storage_.slots[slot])
357 storage_.condition.wait(lock);
358 storage_.slots[slot] = true;
361 ~slot_lock()
363 storage_.slots[slot] = false;
364 storage_.condition.notify_all();
367 thread_safe_storage& storage_;
368 int slot;
371 class storage : public storage_interface, thread_safe_storage, boost::noncopyable
373 public:
374 storage(torrent_info const& info, path const& path, file_pool& fp)
375 : thread_safe_storage(info.num_pieces())
376 , m_info(info)
377 , m_files(fp)
379 assert(info.begin_files() != info.end_files());
380 m_save_path = complete(path);
381 assert(m_save_path.is_complete());
384 void release_files();
385 void initialize(bool allocate_files);
386 bool move_storage(path save_path);
387 size_type read(char* buf, int slot, int offset, int size);
388 void write(const char* buf, int slot, int offset, int size);
389 void move_slot(int src_slot, int dst_slot);
390 void swap_slots(int slot1, int slot2);
391 void swap_slots3(int slot1, int slot2, int slot3);
392 bool verify_resume_data(entry& rd, std::string& error);
393 void write_resume_data(entry& rd) const;
395 size_type read_impl(char* buf, int slot, int offset, int size, bool fill_zero);
397 ~storage()
399 m_files.release(this);
402 torrent_info const& m_info;
403 path m_save_path;
404 // the file pool is typically stored in
405 // the session, to make all storage
406 // instances use the same pool
407 file_pool& m_files;
409 // temporary storage for moving pieces
410 std::vector<char> m_scratch_buffer;
413 void storage::initialize(bool allocate_files)
415 // first, create all missing directories
416 path last_path;
417 for (torrent_info::file_iterator file_iter = m_info.begin_files(),
418 end_iter = m_info.end_files(); file_iter != end_iter; ++file_iter)
420 path dir = (m_save_path / file_iter->path).branch_path();
422 if (dir != last_path)
424 last_path = dir;
426 #if defined(_WIN32) && defined(UNICODE) && BOOST_VERSION < 103400
427 if (!exists_win(last_path))
428 create_directories_win(last_path);
429 #else
430 if (!exists(last_path))
431 create_directories(last_path);
432 #endif
435 // if the file is empty, just create it. But also make sure
436 // the directory exits.
437 if (file_iter->size == 0)
439 file(m_save_path / file_iter->path, file::out);
440 continue;
443 if (allocate_files)
445 m_files.open_file(this, m_save_path / file_iter->path, file::in | file::out)
446 ->set_size(file_iter->size);
451 void storage::release_files()
453 m_files.release(this);
454 std::vector<char>().swap(m_scratch_buffer);
457 void storage::write_resume_data(entry& rd) const
459 std::vector<std::pair<size_type, std::time_t> > file_sizes
460 = get_filesizes(m_info, m_save_path);
462 rd["file sizes"] = entry::list_type();
463 entry::list_type& fl = rd["file sizes"].list();
464 for (std::vector<std::pair<size_type, std::time_t> >::iterator i
465 = file_sizes.begin(), end(file_sizes.end()); i != end; ++i)
467 entry::list_type p;
468 p.push_back(entry(i->first));
469 p.push_back(entry(i->second));
470 fl.push_back(entry(p));
474 bool storage::verify_resume_data(entry& rd, std::string& error)
476 std::vector<std::pair<size_type, std::time_t> > file_sizes;
477 entry::list_type& l = rd["file sizes"].list();
479 for (entry::list_type::iterator i = l.begin();
480 i != l.end(); ++i)
482 file_sizes.push_back(std::pair<size_type, std::time_t>(
483 i->list().front().integer()
484 , i->list().back().integer()));
487 if (file_sizes.empty())
489 error = "the number of files in resume data is 0";
490 return false;
493 entry::list_type& slots = rd["slots"].list();
494 bool seed = int(slots.size()) == m_info.num_pieces()
495 && std::find_if(slots.begin(), slots.end()
496 , boost::bind<bool>(std::less<int>()
497 , boost::bind((size_type const& (entry::*)() const)
498 &entry::integer, _1), 0)) == slots.end();
500 bool full_allocation_mode = false;
503 full_allocation_mode = rd["allocation"].string() == "full";
505 catch (std::exception&) {}
507 if (seed)
509 if (m_info.num_files() != (int)file_sizes.size())
511 error = "the number of files does not match the torrent (num: "
512 + boost::lexical_cast<std::string>(file_sizes.size()) + " actual: "
513 + boost::lexical_cast<std::string>(m_info.num_files()) + ")";
514 return false;
517 std::vector<std::pair<size_type, std::time_t> >::iterator
518 fs = file_sizes.begin();
519 // the resume data says we have the entire torrent
520 // make sure the file sizes are the right ones
521 for (torrent_info::file_iterator i = m_info.begin_files()
522 , end(m_info.end_files()); i != end; ++i, ++fs)
524 if (i->size != fs->first)
526 error = "file size for '" + i->path.native_file_string()
527 + "' was expected to be "
528 + boost::lexical_cast<std::string>(i->size) + " bytes";
529 return false;
532 return true;
535 return match_filesizes(m_info, m_save_path, file_sizes
536 , !full_allocation_mode, &error);
539 // returns true on success
540 bool storage::move_storage(path save_path)
542 path old_path;
543 path new_path;
545 save_path = complete(save_path);
547 #if defined(_WIN32) && defined(UNICODE) && BOOST_VERSION < 103400
548 std::wstring wsave_path(safe_convert(save_path.native_file_string()));
549 if (!exists_win(save_path))
551 CreateDirectory(wsave_path.c_str(), 0);
553 else if ((GetFileAttributes(wsave_path.c_str()) & FILE_ATTRIBUTE_DIRECTORY) == 0)
555 return false;
557 #else
558 if (!exists(save_path))
559 create_directory(save_path);
560 else if (!is_directory(save_path))
561 return false;
562 #endif
564 m_files.release(this);
566 old_path = m_save_path / m_info.name();
567 new_path = save_path / m_info.name();
571 #if defined(_WIN32) && defined(UNICODE) && BOOST_VERSION < 103400
572 rename_win(old_path, new_path);
573 #else
574 rename(old_path, new_path);
575 #endif
576 m_save_path = save_path;
577 return true;
579 catch (std::exception&) {}
580 return false;
583 #ifndef NDEBUG
585 void storage::shuffle()
587 int num_pieces = m_info.num_pieces();
589 std::vector<int> pieces(num_pieces);
590 for (std::vector<int>::iterator i = pieces.begin();
591 i != pieces.end(); ++i)
593 *i = static_cast<int>(i - pieces.begin());
595 std::srand((unsigned int)std::time(0));
596 std::vector<int> targets(pieces);
597 std::random_shuffle(pieces.begin(), pieces.end());
598 std::random_shuffle(targets.begin(), targets.end());
600 for (int i = 0; i < (std::max)(num_pieces / 50, 1); ++i)
602 const int slot_index = targets[i];
603 const int piece_index = pieces[i];
604 const int slot_size =static_cast<int>(m_info.piece_size(slot_index));
605 std::vector<char> buf(slot_size);
606 read(&buf[0], piece_index, 0, slot_size);
607 write(&buf[0], slot_index, 0, slot_size);
611 #endif
613 void storage::move_slot(int src_slot, int dst_slot)
615 int piece_size = m_info.piece_size(dst_slot);
616 m_scratch_buffer.resize(piece_size);
617 read_impl(&m_scratch_buffer[0], src_slot, 0, piece_size, true);
618 write(&m_scratch_buffer[0], dst_slot, 0, piece_size);
621 void storage::swap_slots(int slot1, int slot2)
623 // the size of the target slot is the size of the piece
624 int piece_size = m_info.piece_length();
625 int piece1_size = m_info.piece_size(slot2);
626 int piece2_size = m_info.piece_size(slot1);
627 m_scratch_buffer.resize(piece_size * 2);
628 read_impl(&m_scratch_buffer[0], slot1, 0, piece1_size, true);
629 read_impl(&m_scratch_buffer[piece_size], slot2, 0, piece2_size, true);
630 write(&m_scratch_buffer[0], slot2, 0, piece1_size);
631 write(&m_scratch_buffer[piece_size], slot1, 0, piece2_size);
634 void storage::swap_slots3(int slot1, int slot2, int slot3)
636 // the size of the target slot is the size of the piece
637 int piece_size = m_info.piece_length();
638 int piece1_size = m_info.piece_size(slot2);
639 int piece2_size = m_info.piece_size(slot3);
640 int piece3_size = m_info.piece_size(slot1);
641 m_scratch_buffer.resize(piece_size * 2);
642 read_impl(&m_scratch_buffer[0], slot1, 0, piece1_size, true);
643 read_impl(&m_scratch_buffer[piece_size], slot2, 0, piece2_size, true);
644 write(&m_scratch_buffer[0], slot2, 0, piece1_size);
645 read_impl(&m_scratch_buffer[0], slot3, 0, piece3_size, true);
646 write(&m_scratch_buffer[piece_size], slot3, 0, piece2_size);
647 write(&m_scratch_buffer[0], slot1, 0, piece3_size);
650 size_type storage::read(
651 char* buf
652 , int slot
653 , int offset
654 , int size)
656 return read_impl(buf, slot, offset, size, false);
659 size_type storage::read_impl(
660 char* buf
661 , int slot
662 , int offset
663 , int size
664 , bool fill_zero)
666 assert(buf != 0);
667 assert(slot >= 0 && slot < m_info.num_pieces());
668 assert(offset >= 0);
669 assert(offset < m_info.piece_size(slot));
670 assert(size > 0);
672 slot_lock lock(*this, slot);
674 #ifndef NDEBUG
675 std::vector<file_slice> slices
676 = m_info.map_block(slot, offset, size);
677 assert(!slices.empty());
678 #endif
680 size_type start = slot * (size_type)m_info.piece_length() + offset;
681 assert(start + size <= m_info.total_size());
683 // find the file iterator and file offset
684 size_type file_offset = start;
685 std::vector<file_entry>::const_iterator file_iter;
687 for (file_iter = m_info.begin_files();;)
689 if (file_offset < file_iter->size)
690 break;
692 file_offset -= file_iter->size;
693 ++file_iter;
696 int buf_pos = 0;
697 boost::shared_ptr<file> in(m_files.open_file(
698 this, m_save_path / file_iter->path, file::in));
700 assert(file_offset < file_iter->size);
702 assert(slices[0].offset == file_offset);
704 size_type new_pos = in->seek(file_offset);
705 if (new_pos != file_offset)
707 // the file was not big enough
708 if (!fill_zero)
709 throw file_error("slot has no storage");
710 std::memset(buf + buf_pos, 0, size - buf_pos);
711 return size;
714 #ifndef NDEBUG
715 size_type in_tell = in->tell();
716 assert(in_tell == file_offset);
717 #endif
719 int left_to_read = size;
720 int slot_size = static_cast<int>(m_info.piece_size(slot));
722 if (offset + left_to_read > slot_size)
723 left_to_read = slot_size - offset;
725 assert(left_to_read >= 0);
727 size_type result = left_to_read;
729 #ifndef NDEBUG
730 int counter = 0;
731 #endif
733 while (left_to_read > 0)
735 int read_bytes = left_to_read;
736 if (file_offset + read_bytes > file_iter->size)
737 read_bytes = static_cast<int>(file_iter->size - file_offset);
739 if (read_bytes > 0)
741 #ifndef NDEBUG
742 assert(int(slices.size()) > counter);
743 size_type slice_size = slices[counter].size;
744 assert(slice_size == read_bytes);
745 assert(m_info.file_at(slices[counter].file_index).path
746 == file_iter->path);
747 #endif
749 size_type actual_read = in->read(buf + buf_pos, read_bytes);
751 if (read_bytes != actual_read)
753 // the file was not big enough
754 if (actual_read > 0) buf_pos += actual_read;
755 if (!fill_zero)
756 throw file_error("slot has no storage");
757 std::memset(buf + buf_pos, 0, size - buf_pos);
758 return size;
761 left_to_read -= read_bytes;
762 buf_pos += read_bytes;
763 assert(buf_pos >= 0);
764 file_offset += read_bytes;
767 if (left_to_read > 0)
769 ++file_iter;
770 #ifndef NDEBUG
771 // empty files are not returned by map_block, so if
772 // this file was empty, don't increment the slice counter
773 if (read_bytes > 0) ++counter;
774 #endif
775 path path = m_save_path / file_iter->path;
777 file_offset = 0;
778 in = m_files.open_file(
779 this, path, file::in);
780 in->seek(0);
783 return result;
786 // throws file_error if it fails to write
787 void storage::write(
788 const char* buf
789 , int slot
790 , int offset
791 , int size)
793 assert(buf != 0);
794 assert(slot >= 0);
795 assert(slot < m_info.num_pieces());
796 assert(offset >= 0);
797 assert(size > 0);
799 slot_lock lock(*this, slot);
801 #ifndef NDEBUG
802 std::vector<file_slice> slices
803 = m_info.map_block(slot, offset, size);
804 assert(!slices.empty());
805 #endif
807 size_type start = slot * (size_type)m_info.piece_length() + offset;
809 // find the file iterator and file offset
810 size_type file_offset = start;
811 std::vector<file_entry>::const_iterator file_iter;
813 for (file_iter = m_info.begin_files();;)
815 if (file_offset < file_iter->size)
816 break;
818 file_offset -= file_iter->size;
819 ++file_iter;
820 assert(file_iter != m_info.end_files());
823 path p(m_save_path / file_iter->path);
824 boost::shared_ptr<file> out = m_files.open_file(
825 this, p, file::out | file::in);
827 assert(file_offset < file_iter->size);
828 assert(slices[0].offset == file_offset);
830 size_type pos = out->seek(file_offset);
832 if (pos != file_offset)
834 std::stringstream s;
835 s << "no storage for slot " << slot;
836 throw file_error(s.str());
839 int left_to_write = size;
840 int slot_size = static_cast<int>(m_info.piece_size(slot));
842 if (offset + left_to_write > slot_size)
843 left_to_write = slot_size - offset;
845 assert(left_to_write >= 0);
847 int buf_pos = 0;
848 #ifndef NDEBUG
849 int counter = 0;
850 #endif
851 while (left_to_write > 0)
853 int write_bytes = left_to_write;
854 if (file_offset + write_bytes > file_iter->size)
856 assert(file_iter->size >= file_offset);
857 write_bytes = static_cast<int>(file_iter->size - file_offset);
860 if (write_bytes > 0)
862 assert(int(slices.size()) > counter);
863 assert(slices[counter].size == write_bytes);
864 assert(m_info.file_at(slices[counter].file_index).path
865 == file_iter->path);
867 assert(buf_pos >= 0);
868 assert(write_bytes >= 0);
869 size_type written = out->write(buf + buf_pos, write_bytes);
871 if (written != write_bytes)
873 std::stringstream s;
874 s << "no storage for slot " << slot;
875 throw file_error(s.str());
878 left_to_write -= write_bytes;
879 buf_pos += write_bytes;
880 assert(buf_pos >= 0);
881 file_offset += write_bytes;
882 assert(file_offset <= file_iter->size);
885 if (left_to_write > 0)
887 #ifndef NDEBUG
888 if (write_bytes > 0) ++counter;
889 #endif
890 ++file_iter;
892 assert(file_iter != m_info.end_files());
893 path p = m_save_path / file_iter->path;
894 file_offset = 0;
895 out = m_files.open_file(
896 this, p, file::out | file::in);
898 out->seek(0);
903 storage_interface* default_storage_constructor(torrent_info const& ti
904 , boost::filesystem::path const& path, file_pool& fp)
906 return new storage(ti, path, fp);
909 bool supports_sparse_files(path const& p)
911 assert(p.is_complete());
912 #if defined(_WIN32)
913 // assume windows API is available
914 DWORD max_component_len = 0;
915 DWORD volume_flags = 0;
916 std::string root_device = p.root_name() + "\\";
917 #if defined(UNICODE)
918 std::wstring wph(safe_convert(root_device));
919 bool ret = ::GetVolumeInformation(wph.c_str(), 0
920 , 0, 0, &max_component_len, &volume_flags, 0, 0);
921 #else
922 bool ret = ::GetVolumeInformation(root_device.c_str(), 0
923 , 0, 0, &max_component_len, &volume_flags, 0, 0);
924 #endif
926 if (!ret) return false;
927 if (volume_flags & FILE_SUPPORTS_SPARSE_FILES)
928 return true;
929 #endif
931 #if defined(__APPLE__) || defined(__linux__)
932 // find the last existing directory of the save path
933 path query_path = p;
934 while (!query_path.empty() && !exists(query_path))
935 query_path = query_path.branch_path();
936 #endif
938 #if defined(__APPLE__)
940 struct statfs fsinfo;
941 int ret = statfs(query_path.native_directory_string().c_str(), &fsinfo);
942 if (ret != 0) return false;
944 attrlist request;
945 request.bitmapcount = ATTR_BIT_MAP_COUNT;
946 request.reserved = 0;
947 request.commonattr = 0;
948 request.volattr = ATTR_VOL_CAPABILITIES;
949 request.dirattr = 0;
950 request.fileattr = 0;
951 request.forkattr = 0;
953 struct vol_capabilities_attr_buf
955 unsigned long length;
956 vol_capabilities_attr_t info;
957 } vol_cap;
959 ret = getattrlist(fsinfo.f_mntonname, &request, &vol_cap
960 , sizeof(vol_cap), 0);
961 if (ret != 0) return false;
963 if (vol_cap.info.capabilities[VOL_CAPABILITIES_FORMAT]
964 & (VOL_CAP_FMT_SPARSE_FILES | VOL_CAP_FMT_ZERO_RUNS))
966 return true;
969 return true;
970 #endif
972 #if defined(__linux__)
973 struct statfs buf;
974 int err = statfs(query_path.native_directory_string().c_str(), &buf);
975 if (err == 0)
977 #ifndef NDEBUG
978 std::cerr << "buf.f_type " << std::hex << buf.f_type << std::endl;
979 #endif
980 switch (buf.f_type)
982 case 0x5346544e: // NTFS
983 case 0xEF51: // EXT2 OLD
984 case 0xEF53: // EXT2 and EXT3
985 case 0x00011954: // UFS
986 case 0x52654973: // ReiserFS
987 case 0x58465342: // XFS
988 return true;
991 #ifndef NDEBUG
992 else
994 std::cerr << "statfs returned " << err << std::endl;
995 std::cerr << "errno: " << errno << std::endl;
996 std::cerr << "path: " << query_path.native_directory_string() << std::endl;
998 #endif
999 #endif
1001 // TODO: POSIX implementation
1002 return false;
1005 // -- piece_manager -----------------------------------------------------
1007 class piece_manager::impl
1009 friend class invariant_access;
1010 public:
1012 impl(
1013 torrent_info const& info
1014 , path const& path
1015 , file_pool& fp
1016 , storage_constructor_type sc);
1018 bool check_fastresume(
1019 aux::piece_checker_data& d
1020 , std::vector<bool>& pieces
1021 , int& num_pieces
1022 , bool compact_mode);
1024 std::pair<bool, float> check_files(
1025 std::vector<bool>& pieces
1026 , int& num_pieces, boost::recursive_mutex& mutex);
1028 void release_files();
1030 bool allocate_slots(int num_slots, bool abort_on_disk = false);
1031 void mark_failed(int index);
1032 unsigned long piece_crc(
1033 int slot_index
1034 , int block_size
1035 , piece_picker::block_info const* bi);
1037 int slot_for_piece(int piece_index) const;
1039 size_type read(
1040 char* buf
1041 , int piece_index
1042 , int offset
1043 , int size);
1045 void write(
1046 const char* buf
1047 , int piece_index
1048 , int offset
1049 , int size);
1051 path const& save_path() const
1052 { return m_save_path; }
1054 bool move_storage(path save_path)
1056 if (m_storage->move_storage(save_path))
1058 m_save_path = complete(save_path);
1059 return true;
1061 return false;
1064 void export_piece_map(std::vector<int>& p) const;
1066 // returns the slot currently associated with the given
1067 // piece or assigns the given piece_index to a free slot
1069 int identify_data(
1070 const std::vector<char>& piece_data
1071 , int current_slot
1072 , std::vector<bool>& have_pieces
1073 , int& num_pieces
1074 , const std::multimap<sha1_hash, int>& hash_to_piece
1075 , boost::recursive_mutex& mutex);
1077 int allocate_slot_for_piece(int piece_index);
1078 #ifndef NDEBUG
1079 void check_invariant() const;
1080 #ifdef TORRENT_STORAGE_DEBUG
1081 void debug_log() const;
1082 #endif
1083 #endif
1084 boost::scoped_ptr<storage_interface> m_storage;
1086 // if this is true, pieces are always allocated at the
1087 // lowest possible slot index. If it is false, pieces
1088 // are always written to their final place immediately
1089 bool m_compact_mode;
1091 // if this is true, pieces that haven't been downloaded
1092 // will be filled with zeroes. Not filling with zeroes
1093 // will not work in some cases (where a seek cannot pass
1094 // the end of the file).
1095 bool m_fill_mode;
1097 // a bitmask representing the pieces we have
1098 std::vector<bool> m_have_piece;
1100 torrent_info const& m_info;
1102 // slots that haven't had any file storage allocated
1103 std::vector<int> m_unallocated_slots;
1104 // slots that have file storage, but isn't assigned to a piece
1105 std::vector<int> m_free_slots;
1107 enum
1109 has_no_slot = -3 // the piece has no storage
1112 // maps piece indices to slots. If a piece doesn't
1113 // have any storage, it is set to 'has_no_slot'
1114 std::vector<int> m_piece_to_slot;
1116 enum
1118 unallocated = -1, // the slot is unallocated
1119 unassigned = -2 // the slot is allocated but not assigned to a piece
1122 // maps slots to piece indices, if a slot doesn't have a piece
1123 // it can either be 'unassigned' or 'unallocated'
1124 std::vector<int> m_slot_to_piece;
1126 path m_save_path;
1128 mutable boost::recursive_mutex m_mutex;
1130 bool m_allocating;
1131 boost::mutex m_allocating_monitor;
1132 boost::condition m_allocating_condition;
1134 // these states are used while checking/allocating the torrent
1136 enum {
1137 // the default initial state
1138 state_none,
1139 // the file checking is complete
1140 state_finished,
1141 // creating the directories
1142 state_create_files,
1143 // checking the files
1144 state_full_check,
1145 // allocating files (in non-compact mode)
1146 state_allocating
1147 } m_state;
1148 int m_current_slot;
1150 std::vector<char> m_piece_data;
1152 // this maps a piece hash to piece index. It will be
1153 // build the first time it is used (to save time if it
1154 // isn't needed)
1155 std::multimap<sha1_hash, int> m_hash_to_piece;
1158 piece_manager::impl::impl(
1159 torrent_info const& info
1160 , path const& save_path
1161 , file_pool& fp
1162 , storage_constructor_type sc)
1163 : m_storage(sc(info, save_path, fp))
1164 , m_compact_mode(false)
1165 , m_fill_mode(true)
1166 , m_info(info)
1167 , m_save_path(complete(save_path))
1168 , m_allocating(false)
1170 m_fill_mode = !supports_sparse_files(save_path);
1173 piece_manager::piece_manager(
1174 torrent_info const& info
1175 , path const& save_path
1176 , file_pool& fp
1177 , storage_constructor_type sc)
1178 : m_pimpl(new impl(info, save_path, fp, sc))
1182 piece_manager::~piece_manager()
1186 void piece_manager::write_resume_data(entry& rd) const
1188 m_pimpl->m_storage->write_resume_data(rd);
1191 bool piece_manager::verify_resume_data(entry& rd, std::string& error)
1193 return m_pimpl->m_storage->verify_resume_data(rd, error);
1196 void piece_manager::release_files()
1198 m_pimpl->release_files();
1201 void piece_manager::impl::release_files()
1203 // synchronization ------------------------------------------------------
1204 boost::recursive_mutex::scoped_lock lock(m_mutex);
1205 // ----------------------------------------------------------------------
1207 m_storage->release_files();
1210 void piece_manager::impl::export_piece_map(
1211 std::vector<int>& p) const
1213 // synchronization ------------------------------------------------------
1214 boost::recursive_mutex::scoped_lock lock(m_mutex);
1215 // ----------------------------------------------------------------------
1217 INVARIANT_CHECK;
1219 p.clear();
1220 std::vector<int>::const_reverse_iterator last;
1221 for (last = m_slot_to_piece.rbegin();
1222 last != m_slot_to_piece.rend(); ++last)
1224 if (*last != unallocated) break;
1227 for (std::vector<int>::const_iterator i =
1228 m_slot_to_piece.begin();
1229 i != last.base(); ++i)
1231 p.push_back(*i);
1235 bool piece_manager::compact_allocation() const
1236 { return m_pimpl->m_compact_mode; }
1238 void piece_manager::export_piece_map(
1239 std::vector<int>& p) const
1241 m_pimpl->export_piece_map(p);
1244 void piece_manager::impl::mark_failed(int piece_index)
1246 // synchronization ------------------------------------------------------
1247 boost::recursive_mutex::scoped_lock lock(m_mutex);
1248 // ----------------------------------------------------------------------
1250 INVARIANT_CHECK;
1252 assert(piece_index >= 0 && piece_index < (int)m_piece_to_slot.size());
1253 assert(m_piece_to_slot[piece_index] >= 0);
1255 int slot_index = m_piece_to_slot[piece_index];
1257 assert(slot_index >= 0);
1259 m_slot_to_piece[slot_index] = unassigned;
1260 m_piece_to_slot[piece_index] = has_no_slot;
1261 m_free_slots.push_back(slot_index);
1264 void piece_manager::mark_failed(int index)
1266 m_pimpl->mark_failed(index);
1269 bool piece_manager::is_allocating() const
1271 return m_pimpl->m_state
1272 == impl::state_allocating;
1275 int piece_manager::slot_for_piece(int piece_index) const
1277 return m_pimpl->slot_for_piece(piece_index);
1280 int piece_manager::impl::slot_for_piece(int piece_index) const
1282 assert(piece_index >= 0 && piece_index < m_info.num_pieces());
1283 return m_piece_to_slot[piece_index];
1286 unsigned long piece_manager::piece_crc(
1287 int index
1288 , int block_size
1289 , piece_picker::block_info const* bi)
1291 return m_pimpl->piece_crc(index, block_size, bi);
1294 unsigned long piece_manager::impl::piece_crc(
1295 int slot_index
1296 , int block_size
1297 , piece_picker::block_info const* bi)
1300 assert(slot_index >= 0);
1301 assert(slot_index < m_info.num_pieces());
1302 assert(block_size > 0);
1304 adler32_crc crc;
1305 std::vector<char> buf(block_size);
1306 int num_blocks = static_cast<int>(m_info.piece_size(slot_index)) / block_size;
1307 int last_block_size = static_cast<int>(m_info.piece_size(slot_index)) % block_size;
1308 if (last_block_size == 0) last_block_size = block_size;
1310 for (int i = 0; i < num_blocks-1; ++i)
1312 if (!bi[i].finished) continue;
1313 m_storage->read(
1314 &buf[0]
1315 , slot_index
1316 , i * block_size
1317 , block_size);
1318 crc.update(&buf[0], block_size);
1320 if (bi[num_blocks - 1].finished)
1322 m_storage->read(
1323 &buf[0]
1324 , slot_index
1325 , block_size * (num_blocks - 1)
1326 , last_block_size);
1327 crc.update(&buf[0], last_block_size);
1329 return crc.final();
1331 catch (std::exception&)
1333 return 0;
1336 size_type piece_manager::impl::read(
1337 char* buf
1338 , int piece_index
1339 , int offset
1340 , int size)
1342 assert(buf);
1343 assert(offset >= 0);
1344 assert(size > 0);
1345 assert(piece_index >= 0 && piece_index < (int)m_piece_to_slot.size());
1346 assert(m_piece_to_slot[piece_index] >= 0
1347 && m_piece_to_slot[piece_index] < (int)m_slot_to_piece.size());
1348 int slot = m_piece_to_slot[piece_index];
1349 assert(slot >= 0 && slot < (int)m_slot_to_piece.size());
1350 return m_storage->read(buf, slot, offset, size);
1353 size_type piece_manager::read(
1354 char* buf
1355 , int piece_index
1356 , int offset
1357 , int size)
1359 return m_pimpl->read(buf, piece_index, offset, size);
1362 void piece_manager::impl::write(
1363 const char* buf
1364 , int piece_index
1365 , int offset
1366 , int size)
1368 assert(buf);
1369 assert(offset >= 0);
1370 assert(size > 0);
1371 assert(piece_index >= 0 && piece_index < (int)m_piece_to_slot.size());
1372 int slot = allocate_slot_for_piece(piece_index);
1373 assert(slot >= 0 && slot < (int)m_slot_to_piece.size());
1374 m_storage->write(buf, slot, offset, size);
1377 void piece_manager::write(
1378 const char* buf
1379 , int piece_index
1380 , int offset
1381 , int size)
1383 m_pimpl->write(buf, piece_index, offset, size);
1386 int piece_manager::impl::identify_data(
1387 const std::vector<char>& piece_data
1388 , int current_slot
1389 , std::vector<bool>& have_pieces
1390 , int& num_pieces
1391 , const std::multimap<sha1_hash, int>& hash_to_piece
1392 , boost::recursive_mutex& mutex)
1394 // INVARIANT_CHECK;
1396 assert((int)have_pieces.size() == m_info.num_pieces());
1398 const int piece_size = static_cast<int>(m_info.piece_length());
1399 const int last_piece_size = static_cast<int>(m_info.piece_size(
1400 m_info.num_pieces() - 1));
1402 assert((int)piece_data.size() >= last_piece_size);
1404 // calculate a small digest, with the same
1405 // size as the last piece. And a large digest
1406 // which has the same size as a normal piece
1407 hasher small_digest;
1408 small_digest.update(&piece_data[0], last_piece_size);
1409 hasher large_digest(small_digest);
1410 assert(piece_size - last_piece_size >= 0);
1411 if (piece_size - last_piece_size > 0)
1413 large_digest.update(
1414 &piece_data[last_piece_size]
1415 , piece_size - last_piece_size);
1417 sha1_hash large_hash = large_digest.final();
1418 sha1_hash small_hash = small_digest.final();
1420 typedef std::multimap<sha1_hash, int>::const_iterator map_iter;
1421 map_iter begin1;
1422 map_iter end1;
1423 map_iter begin2;
1424 map_iter end2;
1426 // makes the lookups for the small digest and the large digest
1427 boost::tie(begin1, end1) = hash_to_piece.equal_range(small_hash);
1428 boost::tie(begin2, end2) = hash_to_piece.equal_range(large_hash);
1430 // copy all potential piece indices into this vector
1431 std::vector<int> matching_pieces;
1432 for (map_iter i = begin1; i != end1; ++i)
1433 matching_pieces.push_back(i->second);
1434 for (map_iter i = begin2; i != end2; ++i)
1435 matching_pieces.push_back(i->second);
1437 // no piece matched the data in the slot
1438 if (matching_pieces.empty())
1439 return unassigned;
1441 // ------------------------------------------
1442 // CHECK IF THE PIECE IS IN ITS CORRECT PLACE
1443 // ------------------------------------------
1445 if (std::find(
1446 matching_pieces.begin()
1447 , matching_pieces.end()
1448 , current_slot) != matching_pieces.end())
1450 const int piece_index = current_slot;
1452 // lock because we're writing to have_pieces
1453 boost::recursive_mutex::scoped_lock l(mutex);
1455 if (have_pieces[piece_index])
1457 // we have already found a piece with
1458 // this index.
1459 int other_slot = m_piece_to_slot[piece_index];
1460 assert(other_slot >= 0);
1462 // take one of the other matching pieces
1463 // that hasn't already been assigned
1464 int other_piece = -1;
1465 for (std::vector<int>::iterator i = matching_pieces.begin();
1466 i != matching_pieces.end(); ++i)
1468 if (have_pieces[*i] || *i == piece_index) continue;
1469 other_piece = *i;
1470 break;
1472 if (other_piece >= 0)
1474 // replace the old slot with 'other_piece'
1475 assert(have_pieces[other_piece] == false);
1476 have_pieces[other_piece] = true;
1477 m_slot_to_piece[other_slot] = other_piece;
1478 m_piece_to_slot[other_piece] = other_slot;
1479 ++num_pieces;
1481 else
1483 // this index is the only piece with this
1484 // hash. The previous slot we found with
1485 // this hash must be the same piece. Mark
1486 // that piece as unassigned, since this slot
1487 // is the correct place for the piece.
1488 m_slot_to_piece[other_slot] = unassigned;
1489 m_free_slots.push_back(other_slot);
1491 assert(m_piece_to_slot[piece_index] != current_slot);
1492 assert(m_piece_to_slot[piece_index] >= 0);
1493 m_piece_to_slot[piece_index] = has_no_slot;
1494 #ifndef NDEBUG
1495 // to make the assert happy, a few lines down
1496 have_pieces[piece_index] = false;
1497 #endif
1499 else
1501 ++num_pieces;
1504 assert(have_pieces[piece_index] == false);
1505 assert(m_piece_to_slot[piece_index] == has_no_slot);
1506 have_pieces[piece_index] = true;
1508 return piece_index;
1511 // find a matching piece that hasn't
1512 // already been assigned
1513 int free_piece = unassigned;
1514 for (std::vector<int>::iterator i = matching_pieces.begin();
1515 i != matching_pieces.end(); ++i)
1517 if (have_pieces[*i]) continue;
1518 free_piece = *i;
1519 break;
1522 if (free_piece >= 0)
1524 // lock because we're writing to have_pieces
1525 boost::recursive_mutex::scoped_lock l(mutex);
1527 assert(have_pieces[free_piece] == false);
1528 assert(m_piece_to_slot[free_piece] == has_no_slot);
1529 have_pieces[free_piece] = true;
1530 ++num_pieces;
1532 return free_piece;
1534 else
1536 assert(free_piece == unassigned);
1537 return unassigned;
1541 // check if the fastresume data is up to date
1542 // if it is, use it and return true. If it
1543 // isn't return false and the full check
1544 // will be run
1545 bool piece_manager::impl::check_fastresume(
1546 aux::piece_checker_data& data
1547 , std::vector<bool>& pieces
1548 , int& num_pieces, bool compact_mode)
1550 assert(m_info.piece_length() > 0);
1551 // synchronization ------------------------------------------------------
1552 boost::recursive_mutex::scoped_lock lock(m_mutex);
1553 // ----------------------------------------------------------------------
1555 INVARIANT_CHECK;
1557 m_compact_mode = compact_mode;
1559 // This will corrupt the storage
1560 // use while debugging to find
1561 // states that cannot be scanned
1562 // by check_pieces.
1563 // m_storage->shuffle();
1565 m_piece_to_slot.resize(m_info.num_pieces(), has_no_slot);
1566 m_slot_to_piece.resize(m_info.num_pieces(), unallocated);
1567 m_free_slots.clear();
1568 m_unallocated_slots.clear();
1570 pieces.clear();
1571 pieces.resize(m_info.num_pieces(), false);
1572 num_pieces = 0;
1574 // if we have fast-resume info
1575 // use it instead of doing the actual checking
1576 if (!data.piece_map.empty()
1577 && data.piece_map.size() <= m_slot_to_piece.size())
1579 for (int i = 0; i < (int)data.piece_map.size(); ++i)
1581 m_slot_to_piece[i] = data.piece_map[i];
1582 if (data.piece_map[i] >= 0)
1584 m_piece_to_slot[data.piece_map[i]] = i;
1585 int found_piece = data.piece_map[i];
1587 // if the piece is not in the unfinished list
1588 // we have all of it
1589 if (std::find_if(
1590 data.unfinished_pieces.begin()
1591 , data.unfinished_pieces.end()
1592 , piece_picker::has_index(found_piece))
1593 == data.unfinished_pieces.end())
1595 ++num_pieces;
1596 pieces[found_piece] = true;
1599 else if (data.piece_map[i] == unassigned)
1601 m_free_slots.push_back(i);
1603 else
1605 assert(data.piece_map[i] == unallocated);
1606 m_unallocated_slots.push_back(i);
1610 m_unallocated_slots.reserve(int(pieces.size() - data.piece_map.size()));
1611 for (int i = (int)data.piece_map.size(); i < (int)pieces.size(); ++i)
1613 m_unallocated_slots.push_back(i);
1616 if (!m_compact_mode && !m_unallocated_slots.empty())
1618 m_state = state_allocating;
1619 return false;
1621 else
1623 m_state = state_finished;
1624 return true;
1628 m_state = state_create_files;
1629 return false;
1632 // performs the full check and full allocation
1633 // (if necessary). returns true if finished and
1634 // false if it should be called again
1635 // the second return value is the progress the
1636 // file check is at. 0 is nothing done, and 1
1637 // is finished
1638 std::pair<bool, float> piece_manager::impl::check_files(
1639 std::vector<bool>& pieces, int& num_pieces, boost::recursive_mutex& mutex)
1641 assert(num_pieces == std::count(pieces.begin(), pieces.end(), true));
1643 if (m_state == state_allocating)
1645 if (m_compact_mode)
1647 m_state = state_finished;
1648 return std::make_pair(true, 1.f);
1651 if (m_unallocated_slots.empty())
1653 m_state = state_finished;
1654 return std::make_pair(true, 1.f);
1657 // if we're not in compact mode, make sure the
1658 // pieces are spread out and placed at their
1659 // final position.
1660 assert(!m_unallocated_slots.empty());
1662 if (!m_fill_mode)
1664 // if we're not filling the allocation
1665 // just make sure we move the current pieces
1666 // into place, and just skip all other
1667 // allocation
1668 // allocate_slots returns true if it had to
1669 // move any data
1670 allocate_slots(m_unallocated_slots.size(), true);
1672 else
1674 allocate_slots(1);
1677 return std::make_pair(false, 1.f - (float)m_unallocated_slots.size()
1678 / (float)m_slot_to_piece.size());
1681 if (m_state == state_create_files)
1683 m_storage->initialize(!m_fill_mode && !m_compact_mode);
1685 m_current_slot = 0;
1686 m_state = state_full_check;
1687 m_piece_data.resize(int(m_info.piece_length()));
1688 return std::make_pair(false, 0.f);
1691 assert(m_state == state_full_check);
1693 // ------------------------
1694 // DO THE FULL CHECK
1695 // ------------------------
1700 int piece_size = int(m_info.piece_size(m_current_slot));
1701 int num_read = m_storage->read(&m_piece_data[0]
1702 , m_current_slot, 0, piece_size);
1704 // if the file is incomplete, skip the rest of it
1705 if (num_read != piece_size)
1706 throw file_error("");
1708 if (m_hash_to_piece.empty())
1710 for (int i = 0; i < m_info.num_pieces(); ++i)
1712 m_hash_to_piece.insert(std::make_pair(m_info.hash_for_piece(i), i));
1716 int piece_index = identify_data(m_piece_data, m_current_slot
1717 , pieces, num_pieces, m_hash_to_piece, mutex);
1719 assert(num_pieces == std::count(pieces.begin(), pieces.end(), true));
1720 assert(piece_index == unassigned || piece_index >= 0);
1722 const bool this_should_move = piece_index >= 0 && m_slot_to_piece[piece_index] != unallocated;
1723 const bool other_should_move = m_piece_to_slot[m_current_slot] != has_no_slot;
1725 // check if this piece should be swapped with any other slot
1726 // this section will ensure that the storage is correctly sorted
1727 // libtorrent will never leave the storage in a state that
1728 // requires this sorting, but other clients may.
1730 // example of worst case:
1731 // | m_current_slot = 5
1732 // V
1733 // +---+- - - +---+- - - +---+- -
1734 // | x | | 5 | | 3 | <- piece data in slots
1735 // +---+- - - +---+- - - +---+- -
1736 // 3 y 5 <- slot index
1738 // in this example, the data in the m_current_slot (5)
1739 // is piece 3. It has to be moved into slot 3. The data
1740 // in slot y (piece 5) should be moved into the m_current_slot.
1741 // and the data in slot 3 (piece x) should be moved to slot y.
1743 // there are three possible cases.
1744 // 1. There's another piece that should be placed into this slot
1745 // 2. This piece should be placed into another slot.
1746 // 3. There's another piece that should be placed into this slot
1747 // and this piece should be placed into another slot
1749 // swap piece_index with this slot
1751 // case 1
1752 if (this_should_move && !other_should_move)
1754 assert(piece_index != m_current_slot);
1756 const int other_slot = piece_index;
1757 assert(other_slot >= 0);
1758 int other_piece = m_slot_to_piece[other_slot];
1760 m_slot_to_piece[other_slot] = piece_index;
1761 m_slot_to_piece[m_current_slot] = other_piece;
1762 m_piece_to_slot[piece_index] = piece_index;
1763 if (other_piece >= 0) m_piece_to_slot[other_piece] = m_current_slot;
1765 if (other_piece == unassigned)
1767 std::vector<int>::iterator i =
1768 std::find(m_free_slots.begin(), m_free_slots.end(), other_slot);
1769 assert(i != m_free_slots.end());
1770 m_free_slots.erase(i);
1771 m_free_slots.push_back(m_current_slot);
1774 if (other_piece >= 0)
1775 m_storage->swap_slots(other_slot, m_current_slot);
1776 else
1777 m_storage->move_slot(m_current_slot, other_slot);
1779 assert(m_slot_to_piece[m_current_slot] == unassigned
1780 || m_piece_to_slot[m_slot_to_piece[m_current_slot]] == m_current_slot);
1782 // case 2
1783 else if (!this_should_move && other_should_move)
1785 assert(piece_index != m_current_slot);
1787 const int other_piece = m_current_slot;
1788 const int other_slot = m_piece_to_slot[other_piece];
1789 assert(other_slot >= 0);
1791 m_slot_to_piece[m_current_slot] = other_piece;
1792 m_slot_to_piece[other_slot] = piece_index;
1793 m_piece_to_slot[other_piece] = m_current_slot;
1795 if (piece_index == unassigned)
1796 m_free_slots.push_back(other_slot);
1798 if (piece_index >= 0)
1800 m_piece_to_slot[piece_index] = other_slot;
1801 m_storage->swap_slots(other_slot, m_current_slot);
1803 else
1805 m_storage->move_slot(other_slot, m_current_slot);
1807 assert(m_slot_to_piece[m_current_slot] == unassigned
1808 || m_piece_to_slot[m_slot_to_piece[m_current_slot]] == m_current_slot);
1810 else if (this_should_move && other_should_move)
1812 assert(piece_index != m_current_slot);
1813 assert(piece_index >= 0);
1815 const int piece1 = m_slot_to_piece[piece_index];
1816 const int piece2 = m_current_slot;
1817 const int slot1 = piece_index;
1818 const int slot2 = m_piece_to_slot[piece2];
1820 assert(slot1 >= 0);
1821 assert(slot2 >= 0);
1822 assert(piece2 >= 0);
1824 if (slot1 == slot2)
1826 // this means there are only two pieces involved in the swap
1827 assert(piece1 >= 0);
1829 // movement diagram:
1830 // +-------------------------------+
1831 // | |
1832 // +--> slot1 --> m_current_slot --+
1834 m_slot_to_piece[slot1] = piece_index;
1835 m_slot_to_piece[m_current_slot] = piece1;
1837 m_piece_to_slot[piece_index] = slot1;
1838 m_piece_to_slot[piece1] = m_current_slot;
1840 assert(piece1 == m_current_slot);
1841 assert(piece_index == slot1);
1843 m_storage->swap_slots(m_current_slot, slot1);
1845 assert(m_slot_to_piece[m_current_slot] == unassigned
1846 || m_piece_to_slot[m_slot_to_piece[m_current_slot]] == m_current_slot);
1848 else
1850 assert(slot1 != slot2);
1851 assert(piece1 != piece2);
1853 // movement diagram:
1854 // +-----------------------------------------+
1855 // | |
1856 // +--> slot1 --> slot2 --> m_current_slot --+
1858 m_slot_to_piece[slot1] = piece_index;
1859 m_slot_to_piece[slot2] = piece1;
1860 m_slot_to_piece[m_current_slot] = piece2;
1862 m_piece_to_slot[piece_index] = slot1;
1863 m_piece_to_slot[m_current_slot] = piece2;
1865 if (piece1 == unassigned)
1867 std::vector<int>::iterator i =
1868 std::find(m_free_slots.begin(), m_free_slots.end(), slot1);
1869 assert(i != m_free_slots.end());
1870 m_free_slots.erase(i);
1871 m_free_slots.push_back(slot2);
1874 if (piece1 >= 0)
1876 m_piece_to_slot[piece1] = slot2;
1877 m_storage->swap_slots3(m_current_slot, slot1, slot2);
1879 else
1881 m_storage->move_slot(m_current_slot, slot1);
1882 m_storage->move_slot(slot2, m_current_slot);
1885 assert(m_slot_to_piece[m_current_slot] == unassigned
1886 || m_piece_to_slot[m_slot_to_piece[m_current_slot]] == m_current_slot);
1889 else
1891 assert(m_piece_to_slot[m_current_slot] == has_no_slot || piece_index != m_current_slot);
1892 assert(m_slot_to_piece[m_current_slot] == unallocated);
1893 assert(piece_index == unassigned || m_piece_to_slot[piece_index] == has_no_slot);
1895 // the slot was identified as piece 'piece_index'
1896 if (piece_index != unassigned)
1897 m_piece_to_slot[piece_index] = m_current_slot;
1898 else
1899 m_free_slots.push_back(m_current_slot);
1901 m_slot_to_piece[m_current_slot] = piece_index;
1903 assert(m_slot_to_piece[m_current_slot] == unassigned
1904 || m_piece_to_slot[m_slot_to_piece[m_current_slot]] == m_current_slot);
1907 catch (file_error&)
1909 // find the file that failed, and skip all the blocks in that file
1910 size_type file_offset = 0;
1911 size_type current_offset = m_current_slot * m_info.piece_length();
1912 for (torrent_info::file_iterator i = m_info.begin_files();
1913 i != m_info.end_files(); ++i)
1915 file_offset += i->size;
1916 if (file_offset > current_offset) break;
1919 assert(file_offset > current_offset);
1920 int skip_blocks = static_cast<int>(
1921 (file_offset - current_offset + m_info.piece_length() - 1)
1922 / m_info.piece_length());
1924 for (int i = m_current_slot; i < m_current_slot + skip_blocks; ++i)
1926 assert(m_slot_to_piece[i] == unallocated);
1927 m_unallocated_slots.push_back(i);
1930 // current slot will increase by one at the end of the for-loop too
1931 m_current_slot += skip_blocks - 1;
1933 ++m_current_slot;
1935 if (m_current_slot >= m_info.num_pieces())
1937 assert(m_current_slot == m_info.num_pieces());
1939 // clear the memory we've been using
1940 std::vector<char>().swap(m_piece_data);
1941 std::multimap<sha1_hash, int>().swap(m_hash_to_piece);
1942 m_state = state_allocating;
1943 assert(num_pieces == std::count(pieces.begin(), pieces.end(), true));
1944 return std::make_pair(false, 1.f);
1947 assert(num_pieces == std::count(pieces.begin(), pieces.end(), true));
1949 return std::make_pair(false, (float)m_current_slot / m_info.num_pieces());
1952 bool piece_manager::check_fastresume(
1953 aux::piece_checker_data& d, std::vector<bool>& pieces
1954 , int& num_pieces, bool compact_mode)
1956 return m_pimpl->check_fastresume(d, pieces, num_pieces, compact_mode);
1959 std::pair<bool, float> piece_manager::check_files(
1960 std::vector<bool>& pieces
1961 , int& num_pieces
1962 , boost::recursive_mutex& mutex)
1964 return m_pimpl->check_files(pieces, num_pieces, mutex);
1967 int piece_manager::impl::allocate_slot_for_piece(int piece_index)
1969 // synchronization ------------------------------------------------------
1970 boost::recursive_mutex::scoped_lock lock(m_mutex);
1971 // ----------------------------------------------------------------------
1973 // INVARIANT_CHECK;
1975 assert(piece_index >= 0);
1976 assert(piece_index < (int)m_piece_to_slot.size());
1977 assert(m_piece_to_slot.size() == m_slot_to_piece.size());
1979 int slot_index = m_piece_to_slot[piece_index];
1981 if (slot_index != has_no_slot)
1983 assert(slot_index >= 0);
1984 assert(slot_index < (int)m_slot_to_piece.size());
1985 return slot_index;
1988 if (m_free_slots.empty())
1990 allocate_slots(1);
1991 assert(!m_free_slots.empty());
1994 std::vector<int>::iterator iter(
1995 std::find(
1996 m_free_slots.begin()
1997 , m_free_slots.end()
1998 , piece_index));
2000 if (iter == m_free_slots.end())
2002 assert(m_slot_to_piece[piece_index] != unassigned);
2003 assert(!m_free_slots.empty());
2004 iter = m_free_slots.end() - 1;
2006 // special case to make sure we don't use the last slot
2007 // when we shouldn't, since it's smaller than ordinary slots
2008 if (*iter == m_info.num_pieces() - 1 && piece_index != *iter)
2010 if (m_free_slots.size() == 1)
2011 allocate_slots(1);
2012 assert(m_free_slots.size() > 1);
2013 // assumes that all allocated slots
2014 // are put at the end of the free_slots vector
2015 iter = m_free_slots.end() - 1;
2019 slot_index = *iter;
2020 m_free_slots.erase(iter);
2022 assert(m_slot_to_piece[slot_index] == unassigned);
2024 m_slot_to_piece[slot_index] = piece_index;
2025 m_piece_to_slot[piece_index] = slot_index;
2027 // there is another piece already assigned to
2028 // the slot we are interested in, swap positions
2029 if (slot_index != piece_index
2030 && m_slot_to_piece[piece_index] >= 0)
2033 #if !defined(NDEBUG) && defined(TORRENT_STORAGE_DEBUG)
2034 std::stringstream s;
2036 s << "there is another piece at our slot, swapping..";
2038 s << "\n piece_index: ";
2039 s << piece_index;
2040 s << "\n slot_index: ";
2041 s << slot_index;
2042 s << "\n piece at our slot: ";
2043 s << m_slot_to_piece[piece_index];
2044 s << "\n";
2046 print_to_log(s.str());
2047 debug_log();
2048 #endif
2050 int piece_at_our_slot = m_slot_to_piece[piece_index];
2051 assert(m_piece_to_slot[piece_at_our_slot] == piece_index);
2053 std::swap(
2054 m_slot_to_piece[piece_index]
2055 , m_slot_to_piece[slot_index]);
2057 std::swap(
2058 m_piece_to_slot[piece_index]
2059 , m_piece_to_slot[piece_at_our_slot]);
2061 m_storage->move_slot(piece_index, slot_index);
2063 assert(m_slot_to_piece[piece_index] == piece_index);
2064 assert(m_piece_to_slot[piece_index] == piece_index);
2066 slot_index = piece_index;
2068 #if !defined(NDEBUG) && defined(TORRENT_STORAGE_DEBUG)
2069 debug_log();
2070 #endif
2073 assert(slot_index >= 0);
2074 assert(slot_index < (int)m_slot_to_piece.size());
2075 return slot_index;
2078 namespace
2080 // this is used to notify potential other
2081 // threads that the allocation-function has exited
2082 struct allocation_syncronization
2084 allocation_syncronization(
2085 bool& flag
2086 , boost::condition& cond
2087 , boost::mutex& monitor)
2088 : m_flag(flag)
2089 , m_cond(cond)
2090 , m_monitor(monitor)
2092 boost::mutex::scoped_lock lock(m_monitor);
2094 while (m_flag)
2095 m_cond.wait(lock);
2097 m_flag = true;
2100 ~allocation_syncronization()
2102 boost::mutex::scoped_lock lock(m_monitor);
2103 m_flag = false;
2104 m_cond.notify_one();
2107 bool& m_flag;
2108 boost::condition& m_cond;
2109 boost::mutex& m_monitor;
2114 bool piece_manager::impl::allocate_slots(int num_slots, bool abort_on_disk)
2116 assert(num_slots > 0);
2118 // this object will syncronize the allocation with
2119 // potential other threads
2120 allocation_syncronization sync_obj(
2121 m_allocating
2122 , m_allocating_condition
2123 , m_allocating_monitor);
2125 // synchronization ------------------------------------------------------
2126 boost::recursive_mutex::scoped_lock lock(m_mutex);
2127 // ----------------------------------------------------------------------
2129 // INVARIANT_CHECK;
2131 assert(!m_unallocated_slots.empty());
2133 const int stack_buffer_size = 16*1024;
2134 char zeroes[stack_buffer_size];
2135 memset(zeroes, 0, stack_buffer_size);
2137 bool written = false;
2139 for (int i = 0; i < num_slots && !m_unallocated_slots.empty(); ++i)
2141 // INVARIANT_CHECK;
2143 int pos = m_unallocated_slots.front();
2144 assert(m_slot_to_piece[pos] == unallocated);
2145 assert(m_piece_to_slot[pos] != pos);
2147 int new_free_slot = pos;
2148 if (m_piece_to_slot[pos] != has_no_slot)
2150 new_free_slot = m_piece_to_slot[pos];
2151 m_storage->move_slot(new_free_slot, pos);
2152 m_slot_to_piece[pos] = pos;
2153 m_piece_to_slot[pos] = pos;
2154 written = true;
2156 else if (m_fill_mode)
2158 int piece_size = int(m_info.piece_size(pos));
2159 int offset = 0;
2160 for (; piece_size > 0; piece_size -= stack_buffer_size
2161 , offset += stack_buffer_size)
2163 m_storage->write(zeroes, pos, offset
2164 , std::min(piece_size, stack_buffer_size));
2166 written = true;
2168 m_unallocated_slots.erase(m_unallocated_slots.begin());
2169 m_slot_to_piece[new_free_slot] = unassigned;
2170 m_free_slots.push_back(new_free_slot);
2171 if (abort_on_disk && written) return true;
2174 assert(m_free_slots.size() > 0);
2175 return written;
2178 bool piece_manager::allocate_slots(int num_slots, bool abort_on_disk)
2180 return m_pimpl->allocate_slots(num_slots, abort_on_disk);
2183 path const& piece_manager::save_path() const
2185 return m_pimpl->save_path();
2188 bool piece_manager::move_storage(path const& save_path)
2190 return m_pimpl->move_storage(save_path);
2193 #ifndef NDEBUG
2194 void piece_manager::impl::check_invariant() const
2196 // synchronization ------------------------------------------------------
2197 boost::recursive_mutex::scoped_lock lock(m_mutex);
2198 // ----------------------------------------------------------------------
2199 if (m_piece_to_slot.empty()) return;
2201 assert((int)m_piece_to_slot.size() == m_info.num_pieces());
2202 assert((int)m_slot_to_piece.size() == m_info.num_pieces());
2204 for (std::vector<int>::const_iterator i = m_free_slots.begin();
2205 i != m_free_slots.end(); ++i)
2207 assert(*i < (int)m_slot_to_piece.size());
2208 assert(*i >= 0);
2209 assert(m_slot_to_piece[*i] == unassigned);
2210 assert(std::find(i+1, m_free_slots.end(), *i)
2211 == m_free_slots.end());
2214 for (std::vector<int>::const_iterator i = m_unallocated_slots.begin();
2215 i != m_unallocated_slots.end(); ++i)
2217 assert(*i < (int)m_slot_to_piece.size());
2218 assert(*i >= 0);
2219 assert(m_slot_to_piece[*i] == unallocated);
2220 assert(std::find(i+1, m_unallocated_slots.end(), *i)
2221 == m_unallocated_slots.end());
2224 for (int i = 0; i < m_info.num_pieces(); ++i)
2226 // Check domain of piece_to_slot's elements
2227 if (m_piece_to_slot[i] != has_no_slot)
2229 assert(m_piece_to_slot[i] >= 0);
2230 assert(m_piece_to_slot[i] < (int)m_slot_to_piece.size());
2233 // Check domain of slot_to_piece's elements
2234 if (m_slot_to_piece[i] != unallocated
2235 && m_slot_to_piece[i] != unassigned)
2237 assert(m_slot_to_piece[i] >= 0);
2238 assert(m_slot_to_piece[i] < (int)m_piece_to_slot.size());
2241 // do more detailed checks on piece_to_slot
2242 if (m_piece_to_slot[i] >= 0)
2244 assert(m_slot_to_piece[m_piece_to_slot[i]] == i);
2245 if (m_piece_to_slot[i] != i)
2247 assert(m_slot_to_piece[i] == unallocated);
2250 else
2252 assert(m_piece_to_slot[i] == has_no_slot);
2255 // do more detailed checks on slot_to_piece
2257 if (m_slot_to_piece[i] >= 0)
2259 assert(m_slot_to_piece[i] < (int)m_piece_to_slot.size());
2260 assert(m_piece_to_slot[m_slot_to_piece[i]] == i);
2261 #ifdef TORRENT_STORAGE_DEBUG
2262 assert(
2263 std::find(
2264 m_unallocated_slots.begin()
2265 , m_unallocated_slots.end()
2266 , i) == m_unallocated_slots.end()
2268 assert(
2269 std::find(
2270 m_free_slots.begin()
2271 , m_free_slots.end()
2272 , i) == m_free_slots.end()
2274 #endif
2276 else if (m_slot_to_piece[i] == unallocated)
2278 #ifdef TORRENT_STORAGE_DEBUG
2279 assert(m_unallocated_slots.empty()
2280 || (std::find(
2281 m_unallocated_slots.begin()
2282 , m_unallocated_slots.end()
2283 , i) != m_unallocated_slots.end())
2285 #endif
2287 else if (m_slot_to_piece[i] == unassigned)
2289 #ifdef TORRENT_STORAGE_DEBUG
2290 assert(
2291 std::find(
2292 m_free_slots.begin()
2293 , m_free_slots.end()
2294 , i) != m_free_slots.end()
2296 #endif
2298 else
2300 assert(false && "m_slot_to_piece[i] is invalid");
2305 #ifdef TORRENT_STORAGE_DEBUG
2306 void piece_manager::impl::debug_log() const
2308 std::stringstream s;
2310 s << "index\tslot\tpiece\n";
2312 for (int i = 0; i < m_info.num_pieces(); ++i)
2314 s << i << "\t" << m_slot_to_piece[i] << "\t";
2315 s << m_piece_to_slot[i] << "\n";
2318 s << "---------------------------------\n";
2320 print_to_log(s.str());
2322 #endif
2323 #endif
2324 } // namespace libtorrent