3 Copyright (c) 2003, Arvid Norberg, Daniel Wallin
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions
10 * Redistributions of source code must retain the above copyright
11 notice, this list of conditions and the following disclaimer.
12 * Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in
14 the documentation and/or other materials provided with the distribution.
15 * Neither the name of the author nor the names of its
16 contributors may be used to endorse or promote products derived
17 from this software without specific prior written permission.
19 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
23 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 POSSIBILITY OF SUCH DAMAGE.
33 #include "libtorrent/pch.hpp"
42 #pragma warning(push, 1)
45 #include <boost/lexical_cast.hpp>
46 #include <boost/filesystem/convenience.hpp>
47 #include <boost/filesystem/operations.hpp>
48 #include <boost/filesystem/fstream.hpp>
49 #include <boost/thread/mutex.hpp>
50 #include <boost/ref.hpp>
51 #include <boost/bind.hpp>
52 #include <boost/version.hpp>
53 #include <boost/multi_index_container.hpp>
54 #include <boost/multi_index/member.hpp>
55 #include <boost/multi_index/ordered_index.hpp>
61 #include "libtorrent/storage.hpp"
62 #include "libtorrent/torrent.hpp"
63 #include "libtorrent/hasher.hpp"
64 #include "libtorrent/session.hpp"
65 #include "libtorrent/peer_id.hpp"
66 #include "libtorrent/file.hpp"
67 #include "libtorrent/invariant_check.hpp"
68 #include "libtorrent/file_pool.hpp"
69 #include "libtorrent/aux_/session_impl.hpp"
78 #if defined(__APPLE__)
83 #include <sys/param.h>
84 #include <sys/mount.h>
87 #if defined(__linux__)
88 #include <sys/statfs.h>
91 #if defined(_WIN32) && defined(UNICODE)
94 #include <boost/filesystem/exception.hpp>
95 #include "libtorrent/utf8.hpp"
99 std::wstring
safe_convert(std::string
const& s
)
103 return libtorrent::utf8_wchar(s
);
105 catch (std::exception
)
108 const char* end
= &s
[0] + s
.size();
109 for (const char* i
= &s
[0]; i
< end
;)
112 int result
= std::mbtowc(&c
, i
, end
- i
);
113 if (result
> 0) i
+= result
;
123 #if defined(_WIN32) && defined(UNICODE) && BOOST_VERSION < 103400
126 using libtorrent::safe_convert
;
127 using namespace boost::filesystem
;
129 // based on code from Boost.Fileystem
130 bool create_directories_win(const path
& ph
)
132 if (ph
.empty() || exists(ph
))
134 if ( !ph
.empty() && !is_directory(ph
) )
135 boost::throw_exception( filesystem_error(
136 "boost::filesystem::create_directories",
137 ph
, "path exists and is not a directory",
138 not_directory_error
) );
142 // First create branch, by calling ourself recursively
143 create_directories_win(ph
.branch_path());
144 // Now that parent's path exists, create the directory
145 std::wstring
wph(safe_convert(ph
.native_directory_string()));
146 CreateDirectory(wph
.c_str(), 0);
150 bool exists_win( const path
& ph
)
152 std::wstring
wpath(safe_convert(ph
.string()));
153 if(::GetFileAttributes( wpath
.c_str() ) == 0xFFFFFFFF)
155 UINT err
= ::GetLastError();
156 if((err
== ERROR_FILE_NOT_FOUND
)
157 || (err
== ERROR_INVALID_PARAMETER
)
158 || (err
== ERROR_NOT_READY
)
159 || (err
== ERROR_PATH_NOT_FOUND
)
160 || (err
== ERROR_INVALID_NAME
)
161 || (err
== ERROR_BAD_NETPATH
))
162 return false; // GetFileAttributes failed because the path does not exist
163 // for any other error we assume the file does exist and fall through,
164 // this may not be the best policy though... (JM 20040330)
170 boost::intmax_t file_size_win( const path
& ph
)
172 std::wstring
wpath(safe_convert(ph
.string()));
173 // by now, intmax_t is 64-bits on all Windows compilers
174 WIN32_FILE_ATTRIBUTE_DATA fad
;
175 if ( !::GetFileAttributesExW( wpath
.c_str(),
176 ::GetFileExInfoStandard
, &fad
) )
177 boost::throw_exception( filesystem_error(
178 "boost::filesystem::file_size",
179 ph
, detail::system_error_code() ) );
180 if ( (fad
.dwFileAttributes
& FILE_ATTRIBUTE_DIRECTORY
) !=0 )
181 boost::throw_exception( filesystem_error(
182 "boost::filesystem::file_size",
183 ph
, "invalid: is a directory",
184 is_directory_error
) );
185 return (static_cast<boost::intmax_t>(fad
.nFileSizeHigh
)
186 << (sizeof(fad
.nFileSizeLow
)*8))
190 std::time_t last_write_time_win( const path
& ph
)
192 struct _stat path_stat
;
193 std::wstring
wph(safe_convert(ph
.native_file_string()));
194 if ( ::_wstat( wph
.c_str(), &path_stat
) != 0 )
195 boost::throw_exception( filesystem_error(
196 "boost::filesystem::last_write_time",
197 ph
, detail::system_error_code() ) );
198 return path_stat
.st_mtime
;
201 void rename_win( const path
& old_path
,
202 const path
& new_path
)
204 std::wstring
wold_path(safe_convert(old_path
.string()));
205 std::wstring
wnew_path(safe_convert(new_path
.string()));
206 if ( !::MoveFile( wold_path
.c_str(), wnew_path
.c_str() ) )
207 boost::throw_exception( filesystem_error(
208 "boost::filesystem::rename",
209 old_path
, new_path
, detail::system_error_code() ) );
212 } // anonymous namespace
216 #if BOOST_VERSION < 103200
217 bool operator<(boost::filesystem::path
const& lhs
218 , boost::filesystem::path
const& rhs
)
220 return lhs
.string() < rhs
.string();
224 using namespace boost::filesystem
;
226 using namespace ::boost::multi_index
;
227 using boost::multi_index::multi_index_container
;
229 #if !defined(NDEBUG) && defined(TORRENT_STORAGE_DEBUG)
232 using namespace libtorrent
;
234 void print_to_log(const std::string
& s
)
236 static std::ofstream
log("log.txt");
246 std::vector
<std::pair
<size_type
, std::time_t> > get_filesizes(
247 torrent_info
const& t
, path p
)
250 std::vector
<std::pair
<size_type
, std::time_t> > sizes
;
251 for (torrent_info::file_iterator i
= t
.begin_files();
252 i
!= t
.end_files(); ++i
)
255 std::time_t time
= 0;
258 path f
= p
/ i
->path
;
259 #if defined(_WIN32) && defined(UNICODE) && BOOST_VERSION < 103400
260 size
= file_size_win(f
);
261 time
= last_write_time_win(f
);
264 time
= last_write_time(f
);
267 catch (std::exception
&) {}
268 sizes
.push_back(std::make_pair(size
, time
));
273 // matches the sizes and timestamps of the files passed in
274 // in non-compact mode, actual file sizes and timestamps
275 // are allowed to be bigger and more recent than the fast
276 // resume data. This is because full allocation will not move
277 // pieces, so any older version of the resume data will
278 // still be a correct subset of the actual data on disk.
279 bool match_filesizes(
280 torrent_info
const& t
282 , std::vector
<std::pair
<size_type
, std::time_t> > const& sizes
284 , std::string
* error
)
286 if ((int)sizes
.size() != t
.num_files())
288 if (error
) *error
= "mismatching number of files";
293 std::vector
<std::pair
<size_type
, std::time_t> >::const_iterator s
295 for (torrent_info::file_iterator i
= t
.begin_files();
296 i
!= t
.end_files(); ++i
, ++s
)
299 std::time_t time
= 0;
302 path f
= p
/ i
->path
;
303 #if defined(_WIN32) && defined(UNICODE) && BOOST_VERSION < 103400
304 size
= file_size_win(f
);
305 time
= last_write_time_win(f
);
308 time
= last_write_time(f
);
311 catch (std::exception
&) {}
313 || (!compact_mode
&& size
< s
->first
))
315 if (error
) *error
= "filesize mismatch for file '"
316 + i
->path
.native_file_string()
317 + "', size: " + boost::lexical_cast
<std::string
>(size
)
318 + ", expected to be " + boost::lexical_cast
<std::string
>(s
->first
)
322 if (time
!= s
->second
323 || (!compact_mode
&& time
< s
->second
))
325 if (error
) *error
= "timestamp mismatch for file '"
326 + i
->path
.native_file_string()
327 + "', modification date: " + boost::lexical_cast
<std::string
>(time
)
328 + ", expected to have modification date "
329 + boost::lexical_cast
<std::string
>(s
->second
);
336 struct thread_safe_storage
338 thread_safe_storage(std::size_t n
)
343 boost::condition condition
;
344 std::vector
<bool> slots
;
349 slot_lock(thread_safe_storage
& s
, int slot_
)
353 assert(slot_
>=0 && slot_
< (int)s
.slots
.size());
354 boost::mutex::scoped_lock
lock(storage_
.mutex
);
356 while (storage_
.slots
[slot
])
357 storage_
.condition
.wait(lock
);
358 storage_
.slots
[slot
] = true;
363 storage_
.slots
[slot
] = false;
364 storage_
.condition
.notify_all();
367 thread_safe_storage
& storage_
;
371 class storage
: public storage_interface
, thread_safe_storage
, boost::noncopyable
374 storage(torrent_info
const& info
, path
const& path
, file_pool
& fp
)
375 : thread_safe_storage(info
.num_pieces())
379 assert(info
.begin_files() != info
.end_files());
380 m_save_path
= complete(path
);
381 assert(m_save_path
.is_complete());
384 void release_files();
385 void initialize(bool allocate_files
);
386 bool move_storage(path save_path
);
387 size_type
read(char* buf
, int slot
, int offset
, int size
);
388 void write(const char* buf
, int slot
, int offset
, int size
);
389 void move_slot(int src_slot
, int dst_slot
);
390 void swap_slots(int slot1
, int slot2
);
391 void swap_slots3(int slot1
, int slot2
, int slot3
);
392 bool verify_resume_data(entry
& rd
, std::string
& error
);
393 void write_resume_data(entry
& rd
) const;
395 size_type
read_impl(char* buf
, int slot
, int offset
, int size
, bool fill_zero
);
399 m_files
.release(this);
402 torrent_info
const& m_info
;
404 // the file pool is typically stored in
405 // the session, to make all storage
406 // instances use the same pool
409 // temporary storage for moving pieces
410 std::vector
<char> m_scratch_buffer
;
413 void storage::initialize(bool allocate_files
)
415 // first, create all missing directories
417 for (torrent_info::file_iterator file_iter
= m_info
.begin_files(),
418 end_iter
= m_info
.end_files(); file_iter
!= end_iter
; ++file_iter
)
420 path dir
= (m_save_path
/ file_iter
->path
).branch_path();
422 if (dir
!= last_path
)
426 #if defined(_WIN32) && defined(UNICODE) && BOOST_VERSION < 103400
427 if (!exists_win(last_path
))
428 create_directories_win(last_path
);
430 if (!exists(last_path
))
431 create_directories(last_path
);
435 // if the file is empty, just create it. But also make sure
436 // the directory exits.
437 if (file_iter
->size
== 0)
439 file(m_save_path
/ file_iter
->path
, file::out
);
445 m_files
.open_file(this, m_save_path
/ file_iter
->path
, file::in
| file::out
)
446 ->set_size(file_iter
->size
);
451 void storage::release_files()
453 m_files
.release(this);
454 std::vector
<char>().swap(m_scratch_buffer
);
457 void storage::write_resume_data(entry
& rd
) const
459 std::vector
<std::pair
<size_type
, std::time_t> > file_sizes
460 = get_filesizes(m_info
, m_save_path
);
462 rd
["file sizes"] = entry::list_type();
463 entry::list_type
& fl
= rd
["file sizes"].list();
464 for (std::vector
<std::pair
<size_type
, std::time_t> >::iterator i
465 = file_sizes
.begin(), end(file_sizes
.end()); i
!= end
; ++i
)
468 p
.push_back(entry(i
->first
));
469 p
.push_back(entry(i
->second
));
470 fl
.push_back(entry(p
));
474 bool storage::verify_resume_data(entry
& rd
, std::string
& error
)
476 std::vector
<std::pair
<size_type
, std::time_t> > file_sizes
;
477 entry::list_type
& l
= rd
["file sizes"].list();
479 for (entry::list_type::iterator i
= l
.begin();
482 file_sizes
.push_back(std::pair
<size_type
, std::time_t>(
483 i
->list().front().integer()
484 , i
->list().back().integer()));
487 if (file_sizes
.empty())
489 error
= "the number of files in resume data is 0";
493 entry::list_type
& slots
= rd
["slots"].list();
494 bool seed
= int(slots
.size()) == m_info
.num_pieces()
495 && std::find_if(slots
.begin(), slots
.end()
496 , boost::bind
<bool>(std::less
<int>()
497 , boost::bind((size_type
const& (entry::*)() const)
498 &entry::integer
, _1
), 0)) == slots
.end();
500 bool full_allocation_mode
= false;
503 full_allocation_mode
= rd
["allocation"].string() == "full";
505 catch (std::exception
&) {}
509 if (m_info
.num_files() != (int)file_sizes
.size())
511 error
= "the number of files does not match the torrent (num: "
512 + boost::lexical_cast
<std::string
>(file_sizes
.size()) + " actual: "
513 + boost::lexical_cast
<std::string
>(m_info
.num_files()) + ")";
517 std::vector
<std::pair
<size_type
, std::time_t> >::iterator
518 fs
= file_sizes
.begin();
519 // the resume data says we have the entire torrent
520 // make sure the file sizes are the right ones
521 for (torrent_info::file_iterator i
= m_info
.begin_files()
522 , end(m_info
.end_files()); i
!= end
; ++i
, ++fs
)
524 if (i
->size
!= fs
->first
)
526 error
= "file size for '" + i
->path
.native_file_string()
527 + "' was expected to be "
528 + boost::lexical_cast
<std::string
>(i
->size
) + " bytes";
535 return match_filesizes(m_info
, m_save_path
, file_sizes
536 , !full_allocation_mode
, &error
);
539 // returns true on success
540 bool storage::move_storage(path save_path
)
545 save_path
= complete(save_path
);
547 #if defined(_WIN32) && defined(UNICODE) && BOOST_VERSION < 103400
548 std::wstring
wsave_path(safe_convert(save_path
.native_file_string()));
549 if (!exists_win(save_path
))
551 CreateDirectory(wsave_path
.c_str(), 0);
553 else if ((GetFileAttributes(wsave_path
.c_str()) & FILE_ATTRIBUTE_DIRECTORY
) == 0)
558 if (!exists(save_path
))
559 create_directory(save_path
);
560 else if (!is_directory(save_path
))
564 m_files
.release(this);
566 old_path
= m_save_path
/ m_info
.name();
567 new_path
= save_path
/ m_info
.name();
571 #if defined(_WIN32) && defined(UNICODE) && BOOST_VERSION < 103400
572 rename_win(old_path
, new_path
);
574 rename(old_path
, new_path
);
576 m_save_path
= save_path
;
579 catch (std::exception
&) {}
585 void storage::shuffle()
587 int num_pieces = m_info.num_pieces();
589 std::vector<int> pieces(num_pieces);
590 for (std::vector<int>::iterator i = pieces.begin();
591 i != pieces.end(); ++i)
593 *i = static_cast<int>(i - pieces.begin());
595 std::srand((unsigned int)std::time(0));
596 std::vector<int> targets(pieces);
597 std::random_shuffle(pieces.begin(), pieces.end());
598 std::random_shuffle(targets.begin(), targets.end());
600 for (int i = 0; i < (std::max)(num_pieces / 50, 1); ++i)
602 const int slot_index = targets[i];
603 const int piece_index = pieces[i];
604 const int slot_size =static_cast<int>(m_info.piece_size(slot_index));
605 std::vector<char> buf(slot_size);
606 read(&buf[0], piece_index, 0, slot_size);
607 write(&buf[0], slot_index, 0, slot_size);
613 void storage::move_slot(int src_slot
, int dst_slot
)
615 int piece_size
= m_info
.piece_size(dst_slot
);
616 m_scratch_buffer
.resize(piece_size
);
617 read_impl(&m_scratch_buffer
[0], src_slot
, 0, piece_size
, true);
618 write(&m_scratch_buffer
[0], dst_slot
, 0, piece_size
);
621 void storage::swap_slots(int slot1
, int slot2
)
623 // the size of the target slot is the size of the piece
624 int piece_size
= m_info
.piece_length();
625 int piece1_size
= m_info
.piece_size(slot2
);
626 int piece2_size
= m_info
.piece_size(slot1
);
627 m_scratch_buffer
.resize(piece_size
* 2);
628 read_impl(&m_scratch_buffer
[0], slot1
, 0, piece1_size
, true);
629 read_impl(&m_scratch_buffer
[piece_size
], slot2
, 0, piece2_size
, true);
630 write(&m_scratch_buffer
[0], slot2
, 0, piece1_size
);
631 write(&m_scratch_buffer
[piece_size
], slot1
, 0, piece2_size
);
634 void storage::swap_slots3(int slot1
, int slot2
, int slot3
)
636 // the size of the target slot is the size of the piece
637 int piece_size
= m_info
.piece_length();
638 int piece1_size
= m_info
.piece_size(slot2
);
639 int piece2_size
= m_info
.piece_size(slot3
);
640 int piece3_size
= m_info
.piece_size(slot1
);
641 m_scratch_buffer
.resize(piece_size
* 2);
642 read_impl(&m_scratch_buffer
[0], slot1
, 0, piece1_size
, true);
643 read_impl(&m_scratch_buffer
[piece_size
], slot2
, 0, piece2_size
, true);
644 write(&m_scratch_buffer
[0], slot2
, 0, piece1_size
);
645 read_impl(&m_scratch_buffer
[0], slot3
, 0, piece3_size
, true);
646 write(&m_scratch_buffer
[piece_size
], slot3
, 0, piece2_size
);
647 write(&m_scratch_buffer
[0], slot1
, 0, piece3_size
);
650 size_type
storage::read(
656 return read_impl(buf
, slot
, offset
, size
, false);
659 size_type
storage::read_impl(
667 assert(slot
>= 0 && slot
< m_info
.num_pieces());
669 assert(offset
< m_info
.piece_size(slot
));
672 slot_lock
lock(*this, slot
);
675 std::vector
<file_slice
> slices
676 = m_info
.map_block(slot
, offset
, size
);
677 assert(!slices
.empty());
680 size_type start
= slot
* (size_type
)m_info
.piece_length() + offset
;
681 assert(start
+ size
<= m_info
.total_size());
683 // find the file iterator and file offset
684 size_type file_offset
= start
;
685 std::vector
<file_entry
>::const_iterator file_iter
;
687 for (file_iter
= m_info
.begin_files();;)
689 if (file_offset
< file_iter
->size
)
692 file_offset
-= file_iter
->size
;
697 boost::shared_ptr
<file
> in(m_files
.open_file(
698 this, m_save_path
/ file_iter
->path
, file::in
));
700 assert(file_offset
< file_iter
->size
);
702 assert(slices
[0].offset
== file_offset
);
704 size_type new_pos
= in
->seek(file_offset
);
705 if (new_pos
!= file_offset
)
707 // the file was not big enough
709 throw file_error("slot has no storage");
710 std::memset(buf
+ buf_pos
, 0, size
- buf_pos
);
715 size_type in_tell
= in
->tell();
716 assert(in_tell
== file_offset
);
719 int left_to_read
= size
;
720 int slot_size
= static_cast<int>(m_info
.piece_size(slot
));
722 if (offset
+ left_to_read
> slot_size
)
723 left_to_read
= slot_size
- offset
;
725 assert(left_to_read
>= 0);
727 size_type result
= left_to_read
;
733 while (left_to_read
> 0)
735 int read_bytes
= left_to_read
;
736 if (file_offset
+ read_bytes
> file_iter
->size
)
737 read_bytes
= static_cast<int>(file_iter
->size
- file_offset
);
742 assert(int(slices
.size()) > counter
);
743 size_type slice_size
= slices
[counter
].size
;
744 assert(slice_size
== read_bytes
);
745 assert(m_info
.file_at(slices
[counter
].file_index
).path
749 size_type actual_read
= in
->read(buf
+ buf_pos
, read_bytes
);
751 if (read_bytes
!= actual_read
)
753 // the file was not big enough
754 if (actual_read
> 0) buf_pos
+= actual_read
;
756 throw file_error("slot has no storage");
757 std::memset(buf
+ buf_pos
, 0, size
- buf_pos
);
761 left_to_read
-= read_bytes
;
762 buf_pos
+= read_bytes
;
763 assert(buf_pos
>= 0);
764 file_offset
+= read_bytes
;
767 if (left_to_read
> 0)
771 // empty files are not returned by map_block, so if
772 // this file was empty, don't increment the slice counter
773 if (read_bytes
> 0) ++counter
;
775 path path
= m_save_path
/ file_iter
->path
;
778 in
= m_files
.open_file(
779 this, path
, file::in
);
786 // throws file_error if it fails to write
795 assert(slot
< m_info
.num_pieces());
799 slot_lock
lock(*this, slot
);
802 std::vector
<file_slice
> slices
803 = m_info
.map_block(slot
, offset
, size
);
804 assert(!slices
.empty());
807 size_type start
= slot
* (size_type
)m_info
.piece_length() + offset
;
809 // find the file iterator and file offset
810 size_type file_offset
= start
;
811 std::vector
<file_entry
>::const_iterator file_iter
;
813 for (file_iter
= m_info
.begin_files();;)
815 if (file_offset
< file_iter
->size
)
818 file_offset
-= file_iter
->size
;
820 assert(file_iter
!= m_info
.end_files());
823 path
p(m_save_path
/ file_iter
->path
);
824 boost::shared_ptr
<file
> out
= m_files
.open_file(
825 this, p
, file::out
| file::in
);
827 assert(file_offset
< file_iter
->size
);
828 assert(slices
[0].offset
== file_offset
);
830 size_type pos
= out
->seek(file_offset
);
832 if (pos
!= file_offset
)
835 s
<< "no storage for slot " << slot
;
836 throw file_error(s
.str());
839 int left_to_write
= size
;
840 int slot_size
= static_cast<int>(m_info
.piece_size(slot
));
842 if (offset
+ left_to_write
> slot_size
)
843 left_to_write
= slot_size
- offset
;
845 assert(left_to_write
>= 0);
851 while (left_to_write
> 0)
853 int write_bytes
= left_to_write
;
854 if (file_offset
+ write_bytes
> file_iter
->size
)
856 assert(file_iter
->size
>= file_offset
);
857 write_bytes
= static_cast<int>(file_iter
->size
- file_offset
);
862 assert(int(slices
.size()) > counter
);
863 assert(slices
[counter
].size
== write_bytes
);
864 assert(m_info
.file_at(slices
[counter
].file_index
).path
867 assert(buf_pos
>= 0);
868 assert(write_bytes
>= 0);
869 size_type written
= out
->write(buf
+ buf_pos
, write_bytes
);
871 if (written
!= write_bytes
)
874 s
<< "no storage for slot " << slot
;
875 throw file_error(s
.str());
878 left_to_write
-= write_bytes
;
879 buf_pos
+= write_bytes
;
880 assert(buf_pos
>= 0);
881 file_offset
+= write_bytes
;
882 assert(file_offset
<= file_iter
->size
);
885 if (left_to_write
> 0)
888 if (write_bytes
> 0) ++counter
;
892 assert(file_iter
!= m_info
.end_files());
893 path p
= m_save_path
/ file_iter
->path
;
895 out
= m_files
.open_file(
896 this, p
, file::out
| file::in
);
903 storage_interface
* default_storage_constructor(torrent_info
const& ti
904 , boost::filesystem::path
const& path
, file_pool
& fp
)
906 return new storage(ti
, path
, fp
);
909 bool supports_sparse_files(path
const& p
)
911 assert(p
.is_complete());
913 // assume windows API is available
914 DWORD max_component_len
= 0;
915 DWORD volume_flags
= 0;
916 std::string root_device
= p
.root_name() + "\\";
918 std::wstring
wph(safe_convert(root_device
));
919 bool ret
= ::GetVolumeInformation(wph
.c_str(), 0
920 , 0, 0, &max_component_len
, &volume_flags
, 0, 0);
922 bool ret
= ::GetVolumeInformation(root_device
.c_str(), 0
923 , 0, 0, &max_component_len
, &volume_flags
, 0, 0);
926 if (!ret
) return false;
927 if (volume_flags
& FILE_SUPPORTS_SPARSE_FILES
)
931 #if defined(__APPLE__) || defined(__linux__)
932 // find the last existing directory of the save path
934 while (!query_path
.empty() && !exists(query_path
))
935 query_path
= query_path
.branch_path();
938 #if defined(__APPLE__)
940 struct statfs fsinfo
;
941 int ret
= statfs(query_path
.native_directory_string().c_str(), &fsinfo
);
942 if (ret
!= 0) return false;
945 request
.bitmapcount
= ATTR_BIT_MAP_COUNT
;
946 request
.reserved
= 0;
947 request
.commonattr
= 0;
948 request
.volattr
= ATTR_VOL_CAPABILITIES
;
950 request
.fileattr
= 0;
951 request
.forkattr
= 0;
953 struct vol_capabilities_attr_buf
955 unsigned long length
;
956 vol_capabilities_attr_t info
;
959 ret
= getattrlist(fsinfo
.f_mntonname
, &request
, &vol_cap
960 , sizeof(vol_cap
), 0);
961 if (ret
!= 0) return false;
963 if (vol_cap
.info
.capabilities
[VOL_CAPABILITIES_FORMAT
]
964 & (VOL_CAP_FMT_SPARSE_FILES
| VOL_CAP_FMT_ZERO_RUNS
))
972 #if defined(__linux__)
974 int err
= statfs(query_path
.native_directory_string().c_str(), &buf
);
978 std::cerr
<< "buf.f_type " << std::hex
<< buf
.f_type
<< std::endl
;
982 case 0x5346544e: // NTFS
983 case 0xEF51: // EXT2 OLD
984 case 0xEF53: // EXT2 and EXT3
985 case 0x00011954: // UFS
986 case 0x52654973: // ReiserFS
987 case 0x58465342: // XFS
994 std::cerr
<< "statfs returned " << err
<< std::endl
;
995 std::cerr
<< "errno: " << errno
<< std::endl
;
996 std::cerr
<< "path: " << query_path
.native_directory_string() << std::endl
;
1001 // TODO: POSIX implementation
1005 // -- piece_manager -----------------------------------------------------
1007 class piece_manager::impl
1009 friend class invariant_access
;
1013 torrent_info
const& info
1016 , storage_constructor_type sc
);
1018 bool check_fastresume(
1019 aux::piece_checker_data
& d
1020 , std::vector
<bool>& pieces
1022 , bool compact_mode
);
1024 std::pair
<bool, float> check_files(
1025 std::vector
<bool>& pieces
1026 , int& num_pieces
, boost::recursive_mutex
& mutex
);
1028 void release_files();
1030 bool allocate_slots(int num_slots
, bool abort_on_disk
= false);
1031 void mark_failed(int index
);
1032 unsigned long piece_crc(
1035 , piece_picker::block_info
const* bi
);
1037 int slot_for_piece(int piece_index
) const;
1051 path
const& save_path() const
1052 { return m_save_path
; }
1054 bool move_storage(path save_path
)
1056 if (m_storage
->move_storage(save_path
))
1058 m_save_path
= complete(save_path
);
1064 void export_piece_map(std::vector
<int>& p
) const;
1066 // returns the slot currently associated with the given
1067 // piece or assigns the given piece_index to a free slot
1070 const std::vector
<char>& piece_data
1072 , std::vector
<bool>& have_pieces
1074 , const std::multimap
<sha1_hash
, int>& hash_to_piece
1075 , boost::recursive_mutex
& mutex
);
1077 int allocate_slot_for_piece(int piece_index
);
1079 void check_invariant() const;
1080 #ifdef TORRENT_STORAGE_DEBUG
1081 void debug_log() const;
1084 boost::scoped_ptr
<storage_interface
> m_storage
;
1086 // if this is true, pieces are always allocated at the
1087 // lowest possible slot index. If it is false, pieces
1088 // are always written to their final place immediately
1089 bool m_compact_mode
;
1091 // if this is true, pieces that haven't been downloaded
1092 // will be filled with zeroes. Not filling with zeroes
1093 // will not work in some cases (where a seek cannot pass
1094 // the end of the file).
1097 // a bitmask representing the pieces we have
1098 std::vector
<bool> m_have_piece
;
1100 torrent_info
const& m_info
;
1102 // slots that haven't had any file storage allocated
1103 std::vector
<int> m_unallocated_slots
;
1104 // slots that have file storage, but isn't assigned to a piece
1105 std::vector
<int> m_free_slots
;
1109 has_no_slot
= -3 // the piece has no storage
1112 // maps piece indices to slots. If a piece doesn't
1113 // have any storage, it is set to 'has_no_slot'
1114 std::vector
<int> m_piece_to_slot
;
1118 unallocated
= -1, // the slot is unallocated
1119 unassigned
= -2 // the slot is allocated but not assigned to a piece
1122 // maps slots to piece indices, if a slot doesn't have a piece
1123 // it can either be 'unassigned' or 'unallocated'
1124 std::vector
<int> m_slot_to_piece
;
1128 mutable boost::recursive_mutex m_mutex
;
1131 boost::mutex m_allocating_monitor
;
1132 boost::condition m_allocating_condition
;
1134 // these states are used while checking/allocating the torrent
1137 // the default initial state
1139 // the file checking is complete
1141 // creating the directories
1143 // checking the files
1145 // allocating files (in non-compact mode)
1150 std::vector
<char> m_piece_data
;
1152 // this maps a piece hash to piece index. It will be
1153 // build the first time it is used (to save time if it
1155 std::multimap
<sha1_hash
, int> m_hash_to_piece
;
1158 piece_manager::impl::impl(
1159 torrent_info
const& info
1160 , path
const& save_path
1162 , storage_constructor_type sc
)
1163 : m_storage(sc(info
, save_path
, fp
))
1164 , m_compact_mode(false)
1167 , m_save_path(complete(save_path
))
1168 , m_allocating(false)
1170 m_fill_mode
= !supports_sparse_files(save_path
);
1173 piece_manager::piece_manager(
1174 torrent_info
const& info
1175 , path
const& save_path
1177 , storage_constructor_type sc
)
1178 : m_pimpl(new impl(info
, save_path
, fp
, sc
))
1182 piece_manager::~piece_manager()
1186 void piece_manager::write_resume_data(entry
& rd
) const
1188 m_pimpl
->m_storage
->write_resume_data(rd
);
1191 bool piece_manager::verify_resume_data(entry
& rd
, std::string
& error
)
1193 return m_pimpl
->m_storage
->verify_resume_data(rd
, error
);
1196 void piece_manager::release_files()
1198 m_pimpl
->release_files();
1201 void piece_manager::impl::release_files()
1203 // synchronization ------------------------------------------------------
1204 boost::recursive_mutex::scoped_lock
lock(m_mutex
);
1205 // ----------------------------------------------------------------------
1207 m_storage
->release_files();
1210 void piece_manager::impl::export_piece_map(
1211 std::vector
<int>& p
) const
1213 // synchronization ------------------------------------------------------
1214 boost::recursive_mutex::scoped_lock
lock(m_mutex
);
1215 // ----------------------------------------------------------------------
1220 std::vector
<int>::const_reverse_iterator last
;
1221 for (last
= m_slot_to_piece
.rbegin();
1222 last
!= m_slot_to_piece
.rend(); ++last
)
1224 if (*last
!= unallocated
) break;
1227 for (std::vector
<int>::const_iterator i
=
1228 m_slot_to_piece
.begin();
1229 i
!= last
.base(); ++i
)
1235 bool piece_manager::compact_allocation() const
1236 { return m_pimpl
->m_compact_mode
; }
1238 void piece_manager::export_piece_map(
1239 std::vector
<int>& p
) const
1241 m_pimpl
->export_piece_map(p
);
1244 void piece_manager::impl::mark_failed(int piece_index
)
1246 // synchronization ------------------------------------------------------
1247 boost::recursive_mutex::scoped_lock
lock(m_mutex
);
1248 // ----------------------------------------------------------------------
1252 assert(piece_index
>= 0 && piece_index
< (int)m_piece_to_slot
.size());
1253 assert(m_piece_to_slot
[piece_index
] >= 0);
1255 int slot_index
= m_piece_to_slot
[piece_index
];
1257 assert(slot_index
>= 0);
1259 m_slot_to_piece
[slot_index
] = unassigned
;
1260 m_piece_to_slot
[piece_index
] = has_no_slot
;
1261 m_free_slots
.push_back(slot_index
);
1264 void piece_manager::mark_failed(int index
)
1266 m_pimpl
->mark_failed(index
);
1269 bool piece_manager::is_allocating() const
1271 return m_pimpl
->m_state
1272 == impl::state_allocating
;
1275 int piece_manager::slot_for_piece(int piece_index
) const
1277 return m_pimpl
->slot_for_piece(piece_index
);
1280 int piece_manager::impl::slot_for_piece(int piece_index
) const
1282 assert(piece_index
>= 0 && piece_index
< m_info
.num_pieces());
1283 return m_piece_to_slot
[piece_index
];
1286 unsigned long piece_manager::piece_crc(
1289 , piece_picker::block_info
const* bi
)
1291 return m_pimpl
->piece_crc(index
, block_size
, bi
);
1294 unsigned long piece_manager::impl::piece_crc(
1297 , piece_picker::block_info
const* bi
)
1300 assert(slot_index
>= 0);
1301 assert(slot_index
< m_info
.num_pieces());
1302 assert(block_size
> 0);
1305 std::vector
<char> buf(block_size
);
1306 int num_blocks
= static_cast<int>(m_info
.piece_size(slot_index
)) / block_size
;
1307 int last_block_size
= static_cast<int>(m_info
.piece_size(slot_index
)) % block_size
;
1308 if (last_block_size
== 0) last_block_size
= block_size
;
1310 for (int i
= 0; i
< num_blocks
-1; ++i
)
1312 if (!bi
[i
].finished
) continue;
1318 crc
.update(&buf
[0], block_size
);
1320 if (bi
[num_blocks
- 1].finished
)
1325 , block_size
* (num_blocks
- 1)
1327 crc
.update(&buf
[0], last_block_size
);
1331 catch (std::exception
&)
1336 size_type
piece_manager::impl::read(
1343 assert(offset
>= 0);
1345 assert(piece_index
>= 0 && piece_index
< (int)m_piece_to_slot
.size());
1346 assert(m_piece_to_slot
[piece_index
] >= 0
1347 && m_piece_to_slot
[piece_index
] < (int)m_slot_to_piece
.size());
1348 int slot
= m_piece_to_slot
[piece_index
];
1349 assert(slot
>= 0 && slot
< (int)m_slot_to_piece
.size());
1350 return m_storage
->read(buf
, slot
, offset
, size
);
1353 size_type
piece_manager::read(
1359 return m_pimpl
->read(buf
, piece_index
, offset
, size
);
1362 void piece_manager::impl::write(
1369 assert(offset
>= 0);
1371 assert(piece_index
>= 0 && piece_index
< (int)m_piece_to_slot
.size());
1372 int slot
= allocate_slot_for_piece(piece_index
);
1373 assert(slot
>= 0 && slot
< (int)m_slot_to_piece
.size());
1374 m_storage
->write(buf
, slot
, offset
, size
);
1377 void piece_manager::write(
1383 m_pimpl
->write(buf
, piece_index
, offset
, size
);
1386 int piece_manager::impl::identify_data(
1387 const std::vector
<char>& piece_data
1389 , std::vector
<bool>& have_pieces
1391 , const std::multimap
<sha1_hash
, int>& hash_to_piece
1392 , boost::recursive_mutex
& mutex
)
1396 assert((int)have_pieces
.size() == m_info
.num_pieces());
1398 const int piece_size
= static_cast<int>(m_info
.piece_length());
1399 const int last_piece_size
= static_cast<int>(m_info
.piece_size(
1400 m_info
.num_pieces() - 1));
1402 assert((int)piece_data
.size() >= last_piece_size
);
1404 // calculate a small digest, with the same
1405 // size as the last piece. And a large digest
1406 // which has the same size as a normal piece
1407 hasher small_digest
;
1408 small_digest
.update(&piece_data
[0], last_piece_size
);
1409 hasher
large_digest(small_digest
);
1410 assert(piece_size
- last_piece_size
>= 0);
1411 if (piece_size
- last_piece_size
> 0)
1413 large_digest
.update(
1414 &piece_data
[last_piece_size
]
1415 , piece_size
- last_piece_size
);
1417 sha1_hash large_hash
= large_digest
.final();
1418 sha1_hash small_hash
= small_digest
.final();
1420 typedef std::multimap
<sha1_hash
, int>::const_iterator map_iter
;
1426 // makes the lookups for the small digest and the large digest
1427 boost::tie(begin1
, end1
) = hash_to_piece
.equal_range(small_hash
);
1428 boost::tie(begin2
, end2
) = hash_to_piece
.equal_range(large_hash
);
1430 // copy all potential piece indices into this vector
1431 std::vector
<int> matching_pieces
;
1432 for (map_iter i
= begin1
; i
!= end1
; ++i
)
1433 matching_pieces
.push_back(i
->second
);
1434 for (map_iter i
= begin2
; i
!= end2
; ++i
)
1435 matching_pieces
.push_back(i
->second
);
1437 // no piece matched the data in the slot
1438 if (matching_pieces
.empty())
1441 // ------------------------------------------
1442 // CHECK IF THE PIECE IS IN ITS CORRECT PLACE
1443 // ------------------------------------------
1446 matching_pieces
.begin()
1447 , matching_pieces
.end()
1448 , current_slot
) != matching_pieces
.end())
1450 const int piece_index
= current_slot
;
1452 // lock because we're writing to have_pieces
1453 boost::recursive_mutex::scoped_lock
l(mutex
);
1455 if (have_pieces
[piece_index
])
1457 // we have already found a piece with
1459 int other_slot
= m_piece_to_slot
[piece_index
];
1460 assert(other_slot
>= 0);
1462 // take one of the other matching pieces
1463 // that hasn't already been assigned
1464 int other_piece
= -1;
1465 for (std::vector
<int>::iterator i
= matching_pieces
.begin();
1466 i
!= matching_pieces
.end(); ++i
)
1468 if (have_pieces
[*i
] || *i
== piece_index
) continue;
1472 if (other_piece
>= 0)
1474 // replace the old slot with 'other_piece'
1475 assert(have_pieces
[other_piece
] == false);
1476 have_pieces
[other_piece
] = true;
1477 m_slot_to_piece
[other_slot
] = other_piece
;
1478 m_piece_to_slot
[other_piece
] = other_slot
;
1483 // this index is the only piece with this
1484 // hash. The previous slot we found with
1485 // this hash must be the same piece. Mark
1486 // that piece as unassigned, since this slot
1487 // is the correct place for the piece.
1488 m_slot_to_piece
[other_slot
] = unassigned
;
1489 m_free_slots
.push_back(other_slot
);
1491 assert(m_piece_to_slot
[piece_index
] != current_slot
);
1492 assert(m_piece_to_slot
[piece_index
] >= 0);
1493 m_piece_to_slot
[piece_index
] = has_no_slot
;
1495 // to make the assert happy, a few lines down
1496 have_pieces
[piece_index
] = false;
1504 assert(have_pieces
[piece_index
] == false);
1505 assert(m_piece_to_slot
[piece_index
] == has_no_slot
);
1506 have_pieces
[piece_index
] = true;
1511 // find a matching piece that hasn't
1512 // already been assigned
1513 int free_piece
= unassigned
;
1514 for (std::vector
<int>::iterator i
= matching_pieces
.begin();
1515 i
!= matching_pieces
.end(); ++i
)
1517 if (have_pieces
[*i
]) continue;
1522 if (free_piece
>= 0)
1524 // lock because we're writing to have_pieces
1525 boost::recursive_mutex::scoped_lock
l(mutex
);
1527 assert(have_pieces
[free_piece
] == false);
1528 assert(m_piece_to_slot
[free_piece
] == has_no_slot
);
1529 have_pieces
[free_piece
] = true;
1536 assert(free_piece
== unassigned
);
1541 // check if the fastresume data is up to date
1542 // if it is, use it and return true. If it
1543 // isn't return false and the full check
1545 bool piece_manager::impl::check_fastresume(
1546 aux::piece_checker_data
& data
1547 , std::vector
<bool>& pieces
1548 , int& num_pieces
, bool compact_mode
)
1550 assert(m_info
.piece_length() > 0);
1551 // synchronization ------------------------------------------------------
1552 boost::recursive_mutex::scoped_lock
lock(m_mutex
);
1553 // ----------------------------------------------------------------------
1557 m_compact_mode
= compact_mode
;
1559 // This will corrupt the storage
1560 // use while debugging to find
1561 // states that cannot be scanned
1563 // m_storage->shuffle();
1565 m_piece_to_slot
.resize(m_info
.num_pieces(), has_no_slot
);
1566 m_slot_to_piece
.resize(m_info
.num_pieces(), unallocated
);
1567 m_free_slots
.clear();
1568 m_unallocated_slots
.clear();
1571 pieces
.resize(m_info
.num_pieces(), false);
1574 // if we have fast-resume info
1575 // use it instead of doing the actual checking
1576 if (!data
.piece_map
.empty()
1577 && data
.piece_map
.size() <= m_slot_to_piece
.size())
1579 for (int i
= 0; i
< (int)data
.piece_map
.size(); ++i
)
1581 m_slot_to_piece
[i
] = data
.piece_map
[i
];
1582 if (data
.piece_map
[i
] >= 0)
1584 m_piece_to_slot
[data
.piece_map
[i
]] = i
;
1585 int found_piece
= data
.piece_map
[i
];
1587 // if the piece is not in the unfinished list
1588 // we have all of it
1590 data
.unfinished_pieces
.begin()
1591 , data
.unfinished_pieces
.end()
1592 , piece_picker::has_index(found_piece
))
1593 == data
.unfinished_pieces
.end())
1596 pieces
[found_piece
] = true;
1599 else if (data
.piece_map
[i
] == unassigned
)
1601 m_free_slots
.push_back(i
);
1605 assert(data
.piece_map
[i
] == unallocated
);
1606 m_unallocated_slots
.push_back(i
);
1610 m_unallocated_slots
.reserve(int(pieces
.size() - data
.piece_map
.size()));
1611 for (int i
= (int)data
.piece_map
.size(); i
< (int)pieces
.size(); ++i
)
1613 m_unallocated_slots
.push_back(i
);
1616 if (!m_compact_mode
&& !m_unallocated_slots
.empty())
1618 m_state
= state_allocating
;
1623 m_state
= state_finished
;
1628 m_state
= state_create_files
;
1632 // performs the full check and full allocation
1633 // (if necessary). returns true if finished and
1634 // false if it should be called again
1635 // the second return value is the progress the
1636 // file check is at. 0 is nothing done, and 1
1638 std::pair
<bool, float> piece_manager::impl::check_files(
1639 std::vector
<bool>& pieces
, int& num_pieces
, boost::recursive_mutex
& mutex
)
1641 assert(num_pieces
== std::count(pieces
.begin(), pieces
.end(), true));
1643 if (m_state
== state_allocating
)
1647 m_state
= state_finished
;
1648 return std::make_pair(true, 1.f
);
1651 if (m_unallocated_slots
.empty())
1653 m_state
= state_finished
;
1654 return std::make_pair(true, 1.f
);
1657 // if we're not in compact mode, make sure the
1658 // pieces are spread out and placed at their
1660 assert(!m_unallocated_slots
.empty());
1664 // if we're not filling the allocation
1665 // just make sure we move the current pieces
1666 // into place, and just skip all other
1668 // allocate_slots returns true if it had to
1670 allocate_slots(m_unallocated_slots
.size(), true);
1677 return std::make_pair(false, 1.f
- (float)m_unallocated_slots
.size()
1678 / (float)m_slot_to_piece
.size());
1681 if (m_state
== state_create_files
)
1683 m_storage
->initialize(!m_fill_mode
&& !m_compact_mode
);
1686 m_state
= state_full_check
;
1687 m_piece_data
.resize(int(m_info
.piece_length()));
1688 return std::make_pair(false, 0.f
);
1691 assert(m_state
== state_full_check
);
1693 // ------------------------
1694 // DO THE FULL CHECK
1695 // ------------------------
1700 int piece_size
= int(m_info
.piece_size(m_current_slot
));
1701 int num_read
= m_storage
->read(&m_piece_data
[0]
1702 , m_current_slot
, 0, piece_size
);
1704 // if the file is incomplete, skip the rest of it
1705 if (num_read
!= piece_size
)
1706 throw file_error("");
1708 if (m_hash_to_piece
.empty())
1710 for (int i
= 0; i
< m_info
.num_pieces(); ++i
)
1712 m_hash_to_piece
.insert(std::make_pair(m_info
.hash_for_piece(i
), i
));
1716 int piece_index
= identify_data(m_piece_data
, m_current_slot
1717 , pieces
, num_pieces
, m_hash_to_piece
, mutex
);
1719 assert(num_pieces
== std::count(pieces
.begin(), pieces
.end(), true));
1720 assert(piece_index
== unassigned
|| piece_index
>= 0);
1722 const bool this_should_move
= piece_index
>= 0 && m_slot_to_piece
[piece_index
] != unallocated
;
1723 const bool other_should_move
= m_piece_to_slot
[m_current_slot
] != has_no_slot
;
1725 // check if this piece should be swapped with any other slot
1726 // this section will ensure that the storage is correctly sorted
1727 // libtorrent will never leave the storage in a state that
1728 // requires this sorting, but other clients may.
1730 // example of worst case:
1731 // | m_current_slot = 5
1733 // +---+- - - +---+- - - +---+- -
1734 // | x | | 5 | | 3 | <- piece data in slots
1735 // +---+- - - +---+- - - +---+- -
1736 // 3 y 5 <- slot index
1738 // in this example, the data in the m_current_slot (5)
1739 // is piece 3. It has to be moved into slot 3. The data
1740 // in slot y (piece 5) should be moved into the m_current_slot.
1741 // and the data in slot 3 (piece x) should be moved to slot y.
1743 // there are three possible cases.
1744 // 1. There's another piece that should be placed into this slot
1745 // 2. This piece should be placed into another slot.
1746 // 3. There's another piece that should be placed into this slot
1747 // and this piece should be placed into another slot
1749 // swap piece_index with this slot
1752 if (this_should_move
&& !other_should_move
)
1754 assert(piece_index
!= m_current_slot
);
1756 const int other_slot
= piece_index
;
1757 assert(other_slot
>= 0);
1758 int other_piece
= m_slot_to_piece
[other_slot
];
1760 m_slot_to_piece
[other_slot
] = piece_index
;
1761 m_slot_to_piece
[m_current_slot
] = other_piece
;
1762 m_piece_to_slot
[piece_index
] = piece_index
;
1763 if (other_piece
>= 0) m_piece_to_slot
[other_piece
] = m_current_slot
;
1765 if (other_piece
== unassigned
)
1767 std::vector
<int>::iterator i
=
1768 std::find(m_free_slots
.begin(), m_free_slots
.end(), other_slot
);
1769 assert(i
!= m_free_slots
.end());
1770 m_free_slots
.erase(i
);
1771 m_free_slots
.push_back(m_current_slot
);
1774 if (other_piece
>= 0)
1775 m_storage
->swap_slots(other_slot
, m_current_slot
);
1777 m_storage
->move_slot(m_current_slot
, other_slot
);
1779 assert(m_slot_to_piece
[m_current_slot
] == unassigned
1780 || m_piece_to_slot
[m_slot_to_piece
[m_current_slot
]] == m_current_slot
);
1783 else if (!this_should_move
&& other_should_move
)
1785 assert(piece_index
!= m_current_slot
);
1787 const int other_piece
= m_current_slot
;
1788 const int other_slot
= m_piece_to_slot
[other_piece
];
1789 assert(other_slot
>= 0);
1791 m_slot_to_piece
[m_current_slot
] = other_piece
;
1792 m_slot_to_piece
[other_slot
] = piece_index
;
1793 m_piece_to_slot
[other_piece
] = m_current_slot
;
1795 if (piece_index
== unassigned
)
1796 m_free_slots
.push_back(other_slot
);
1798 if (piece_index
>= 0)
1800 m_piece_to_slot
[piece_index
] = other_slot
;
1801 m_storage
->swap_slots(other_slot
, m_current_slot
);
1805 m_storage
->move_slot(other_slot
, m_current_slot
);
1807 assert(m_slot_to_piece
[m_current_slot
] == unassigned
1808 || m_piece_to_slot
[m_slot_to_piece
[m_current_slot
]] == m_current_slot
);
1810 else if (this_should_move
&& other_should_move
)
1812 assert(piece_index
!= m_current_slot
);
1813 assert(piece_index
>= 0);
1815 const int piece1
= m_slot_to_piece
[piece_index
];
1816 const int piece2
= m_current_slot
;
1817 const int slot1
= piece_index
;
1818 const int slot2
= m_piece_to_slot
[piece2
];
1822 assert(piece2
>= 0);
1826 // this means there are only two pieces involved in the swap
1827 assert(piece1
>= 0);
1829 // movement diagram:
1830 // +-------------------------------+
1832 // +--> slot1 --> m_current_slot --+
1834 m_slot_to_piece
[slot1
] = piece_index
;
1835 m_slot_to_piece
[m_current_slot
] = piece1
;
1837 m_piece_to_slot
[piece_index
] = slot1
;
1838 m_piece_to_slot
[piece1
] = m_current_slot
;
1840 assert(piece1
== m_current_slot
);
1841 assert(piece_index
== slot1
);
1843 m_storage
->swap_slots(m_current_slot
, slot1
);
1845 assert(m_slot_to_piece
[m_current_slot
] == unassigned
1846 || m_piece_to_slot
[m_slot_to_piece
[m_current_slot
]] == m_current_slot
);
1850 assert(slot1
!= slot2
);
1851 assert(piece1
!= piece2
);
1853 // movement diagram:
1854 // +-----------------------------------------+
1856 // +--> slot1 --> slot2 --> m_current_slot --+
1858 m_slot_to_piece
[slot1
] = piece_index
;
1859 m_slot_to_piece
[slot2
] = piece1
;
1860 m_slot_to_piece
[m_current_slot
] = piece2
;
1862 m_piece_to_slot
[piece_index
] = slot1
;
1863 m_piece_to_slot
[m_current_slot
] = piece2
;
1865 if (piece1
== unassigned
)
1867 std::vector
<int>::iterator i
=
1868 std::find(m_free_slots
.begin(), m_free_slots
.end(), slot1
);
1869 assert(i
!= m_free_slots
.end());
1870 m_free_slots
.erase(i
);
1871 m_free_slots
.push_back(slot2
);
1876 m_piece_to_slot
[piece1
] = slot2
;
1877 m_storage
->swap_slots3(m_current_slot
, slot1
, slot2
);
1881 m_storage
->move_slot(m_current_slot
, slot1
);
1882 m_storage
->move_slot(slot2
, m_current_slot
);
1885 assert(m_slot_to_piece
[m_current_slot
] == unassigned
1886 || m_piece_to_slot
[m_slot_to_piece
[m_current_slot
]] == m_current_slot
);
1891 assert(m_piece_to_slot
[m_current_slot
] == has_no_slot
|| piece_index
!= m_current_slot
);
1892 assert(m_slot_to_piece
[m_current_slot
] == unallocated
);
1893 assert(piece_index
== unassigned
|| m_piece_to_slot
[piece_index
] == has_no_slot
);
1895 // the slot was identified as piece 'piece_index'
1896 if (piece_index
!= unassigned
)
1897 m_piece_to_slot
[piece_index
] = m_current_slot
;
1899 m_free_slots
.push_back(m_current_slot
);
1901 m_slot_to_piece
[m_current_slot
] = piece_index
;
1903 assert(m_slot_to_piece
[m_current_slot
] == unassigned
1904 || m_piece_to_slot
[m_slot_to_piece
[m_current_slot
]] == m_current_slot
);
1909 // find the file that failed, and skip all the blocks in that file
1910 size_type file_offset
= 0;
1911 size_type current_offset
= m_current_slot
* m_info
.piece_length();
1912 for (torrent_info::file_iterator i
= m_info
.begin_files();
1913 i
!= m_info
.end_files(); ++i
)
1915 file_offset
+= i
->size
;
1916 if (file_offset
> current_offset
) break;
1919 assert(file_offset
> current_offset
);
1920 int skip_blocks
= static_cast<int>(
1921 (file_offset
- current_offset
+ m_info
.piece_length() - 1)
1922 / m_info
.piece_length());
1924 for (int i
= m_current_slot
; i
< m_current_slot
+ skip_blocks
; ++i
)
1926 assert(m_slot_to_piece
[i
] == unallocated
);
1927 m_unallocated_slots
.push_back(i
);
1930 // current slot will increase by one at the end of the for-loop too
1931 m_current_slot
+= skip_blocks
- 1;
1935 if (m_current_slot
>= m_info
.num_pieces())
1937 assert(m_current_slot
== m_info
.num_pieces());
1939 // clear the memory we've been using
1940 std::vector
<char>().swap(m_piece_data
);
1941 std::multimap
<sha1_hash
, int>().swap(m_hash_to_piece
);
1942 m_state
= state_allocating
;
1943 assert(num_pieces
== std::count(pieces
.begin(), pieces
.end(), true));
1944 return std::make_pair(false, 1.f
);
1947 assert(num_pieces
== std::count(pieces
.begin(), pieces
.end(), true));
1949 return std::make_pair(false, (float)m_current_slot
/ m_info
.num_pieces());
1952 bool piece_manager::check_fastresume(
1953 aux::piece_checker_data
& d
, std::vector
<bool>& pieces
1954 , int& num_pieces
, bool compact_mode
)
1956 return m_pimpl
->check_fastresume(d
, pieces
, num_pieces
, compact_mode
);
1959 std::pair
<bool, float> piece_manager::check_files(
1960 std::vector
<bool>& pieces
1962 , boost::recursive_mutex
& mutex
)
1964 return m_pimpl
->check_files(pieces
, num_pieces
, mutex
);
1967 int piece_manager::impl::allocate_slot_for_piece(int piece_index
)
1969 // synchronization ------------------------------------------------------
1970 boost::recursive_mutex::scoped_lock
lock(m_mutex
);
1971 // ----------------------------------------------------------------------
1975 assert(piece_index
>= 0);
1976 assert(piece_index
< (int)m_piece_to_slot
.size());
1977 assert(m_piece_to_slot
.size() == m_slot_to_piece
.size());
1979 int slot_index
= m_piece_to_slot
[piece_index
];
1981 if (slot_index
!= has_no_slot
)
1983 assert(slot_index
>= 0);
1984 assert(slot_index
< (int)m_slot_to_piece
.size());
1988 if (m_free_slots
.empty())
1991 assert(!m_free_slots
.empty());
1994 std::vector
<int>::iterator
iter(
1996 m_free_slots
.begin()
1997 , m_free_slots
.end()
2000 if (iter
== m_free_slots
.end())
2002 assert(m_slot_to_piece
[piece_index
] != unassigned
);
2003 assert(!m_free_slots
.empty());
2004 iter
= m_free_slots
.end() - 1;
2006 // special case to make sure we don't use the last slot
2007 // when we shouldn't, since it's smaller than ordinary slots
2008 if (*iter
== m_info
.num_pieces() - 1 && piece_index
!= *iter
)
2010 if (m_free_slots
.size() == 1)
2012 assert(m_free_slots
.size() > 1);
2013 // assumes that all allocated slots
2014 // are put at the end of the free_slots vector
2015 iter
= m_free_slots
.end() - 1;
2020 m_free_slots
.erase(iter
);
2022 assert(m_slot_to_piece
[slot_index
] == unassigned
);
2024 m_slot_to_piece
[slot_index
] = piece_index
;
2025 m_piece_to_slot
[piece_index
] = slot_index
;
2027 // there is another piece already assigned to
2028 // the slot we are interested in, swap positions
2029 if (slot_index
!= piece_index
2030 && m_slot_to_piece
[piece_index
] >= 0)
2033 #if !defined(NDEBUG) && defined(TORRENT_STORAGE_DEBUG)
2034 std::stringstream s
;
2036 s
<< "there is another piece at our slot, swapping..";
2038 s
<< "\n piece_index: ";
2040 s
<< "\n slot_index: ";
2042 s
<< "\n piece at our slot: ";
2043 s
<< m_slot_to_piece
[piece_index
];
2046 print_to_log(s
.str());
2050 int piece_at_our_slot
= m_slot_to_piece
[piece_index
];
2051 assert(m_piece_to_slot
[piece_at_our_slot
] == piece_index
);
2054 m_slot_to_piece
[piece_index
]
2055 , m_slot_to_piece
[slot_index
]);
2058 m_piece_to_slot
[piece_index
]
2059 , m_piece_to_slot
[piece_at_our_slot
]);
2061 m_storage
->move_slot(piece_index
, slot_index
);
2063 assert(m_slot_to_piece
[piece_index
] == piece_index
);
2064 assert(m_piece_to_slot
[piece_index
] == piece_index
);
2066 slot_index
= piece_index
;
2068 #if !defined(NDEBUG) && defined(TORRENT_STORAGE_DEBUG)
2073 assert(slot_index
>= 0);
2074 assert(slot_index
< (int)m_slot_to_piece
.size());
2080 // this is used to notify potential other
2081 // threads that the allocation-function has exited
2082 struct allocation_syncronization
2084 allocation_syncronization(
2086 , boost::condition
& cond
2087 , boost::mutex
& monitor
)
2090 , m_monitor(monitor
)
2092 boost::mutex::scoped_lock
lock(m_monitor
);
2100 ~allocation_syncronization()
2102 boost::mutex::scoped_lock
lock(m_monitor
);
2104 m_cond
.notify_one();
2108 boost::condition
& m_cond
;
2109 boost::mutex
& m_monitor
;
2114 bool piece_manager::impl::allocate_slots(int num_slots
, bool abort_on_disk
)
2116 assert(num_slots
> 0);
2118 // this object will syncronize the allocation with
2119 // potential other threads
2120 allocation_syncronization
sync_obj(
2122 , m_allocating_condition
2123 , m_allocating_monitor
);
2125 // synchronization ------------------------------------------------------
2126 boost::recursive_mutex::scoped_lock
lock(m_mutex
);
2127 // ----------------------------------------------------------------------
2131 assert(!m_unallocated_slots
.empty());
2133 const int stack_buffer_size
= 16*1024;
2134 char zeroes
[stack_buffer_size
];
2135 memset(zeroes
, 0, stack_buffer_size
);
2137 bool written
= false;
2139 for (int i
= 0; i
< num_slots
&& !m_unallocated_slots
.empty(); ++i
)
2143 int pos
= m_unallocated_slots
.front();
2144 assert(m_slot_to_piece
[pos
] == unallocated
);
2145 assert(m_piece_to_slot
[pos
] != pos
);
2147 int new_free_slot
= pos
;
2148 if (m_piece_to_slot
[pos
] != has_no_slot
)
2150 new_free_slot
= m_piece_to_slot
[pos
];
2151 m_storage
->move_slot(new_free_slot
, pos
);
2152 m_slot_to_piece
[pos
] = pos
;
2153 m_piece_to_slot
[pos
] = pos
;
2156 else if (m_fill_mode
)
2158 int piece_size
= int(m_info
.piece_size(pos
));
2160 for (; piece_size
> 0; piece_size
-= stack_buffer_size
2161 , offset
+= stack_buffer_size
)
2163 m_storage
->write(zeroes
, pos
, offset
2164 , std::min(piece_size
, stack_buffer_size
));
2168 m_unallocated_slots
.erase(m_unallocated_slots
.begin());
2169 m_slot_to_piece
[new_free_slot
] = unassigned
;
2170 m_free_slots
.push_back(new_free_slot
);
2171 if (abort_on_disk
&& written
) return true;
2174 assert(m_free_slots
.size() > 0);
2178 bool piece_manager::allocate_slots(int num_slots
, bool abort_on_disk
)
2180 return m_pimpl
->allocate_slots(num_slots
, abort_on_disk
);
2183 path
const& piece_manager::save_path() const
2185 return m_pimpl
->save_path();
2188 bool piece_manager::move_storage(path
const& save_path
)
2190 return m_pimpl
->move_storage(save_path
);
2194 void piece_manager::impl::check_invariant() const
2196 // synchronization ------------------------------------------------------
2197 boost::recursive_mutex::scoped_lock
lock(m_mutex
);
2198 // ----------------------------------------------------------------------
2199 if (m_piece_to_slot
.empty()) return;
2201 assert((int)m_piece_to_slot
.size() == m_info
.num_pieces());
2202 assert((int)m_slot_to_piece
.size() == m_info
.num_pieces());
2204 for (std::vector
<int>::const_iterator i
= m_free_slots
.begin();
2205 i
!= m_free_slots
.end(); ++i
)
2207 assert(*i
< (int)m_slot_to_piece
.size());
2209 assert(m_slot_to_piece
[*i
] == unassigned
);
2210 assert(std::find(i
+1, m_free_slots
.end(), *i
)
2211 == m_free_slots
.end());
2214 for (std::vector
<int>::const_iterator i
= m_unallocated_slots
.begin();
2215 i
!= m_unallocated_slots
.end(); ++i
)
2217 assert(*i
< (int)m_slot_to_piece
.size());
2219 assert(m_slot_to_piece
[*i
] == unallocated
);
2220 assert(std::find(i
+1, m_unallocated_slots
.end(), *i
)
2221 == m_unallocated_slots
.end());
2224 for (int i
= 0; i
< m_info
.num_pieces(); ++i
)
2226 // Check domain of piece_to_slot's elements
2227 if (m_piece_to_slot
[i
] != has_no_slot
)
2229 assert(m_piece_to_slot
[i
] >= 0);
2230 assert(m_piece_to_slot
[i
] < (int)m_slot_to_piece
.size());
2233 // Check domain of slot_to_piece's elements
2234 if (m_slot_to_piece
[i
] != unallocated
2235 && m_slot_to_piece
[i
] != unassigned
)
2237 assert(m_slot_to_piece
[i
] >= 0);
2238 assert(m_slot_to_piece
[i
] < (int)m_piece_to_slot
.size());
2241 // do more detailed checks on piece_to_slot
2242 if (m_piece_to_slot
[i
] >= 0)
2244 assert(m_slot_to_piece
[m_piece_to_slot
[i
]] == i
);
2245 if (m_piece_to_slot
[i
] != i
)
2247 assert(m_slot_to_piece
[i
] == unallocated
);
2252 assert(m_piece_to_slot
[i
] == has_no_slot
);
2255 // do more detailed checks on slot_to_piece
2257 if (m_slot_to_piece
[i
] >= 0)
2259 assert(m_slot_to_piece
[i
] < (int)m_piece_to_slot
.size());
2260 assert(m_piece_to_slot
[m_slot_to_piece
[i
]] == i
);
2261 #ifdef TORRENT_STORAGE_DEBUG
2264 m_unallocated_slots
.begin()
2265 , m_unallocated_slots
.end()
2266 , i
) == m_unallocated_slots
.end()
2270 m_free_slots
.begin()
2271 , m_free_slots
.end()
2272 , i
) == m_free_slots
.end()
2276 else if (m_slot_to_piece
[i
] == unallocated
)
2278 #ifdef TORRENT_STORAGE_DEBUG
2279 assert(m_unallocated_slots
.empty()
2281 m_unallocated_slots
.begin()
2282 , m_unallocated_slots
.end()
2283 , i
) != m_unallocated_slots
.end())
2287 else if (m_slot_to_piece
[i
] == unassigned
)
2289 #ifdef TORRENT_STORAGE_DEBUG
2292 m_free_slots
.begin()
2293 , m_free_slots
.end()
2294 , i
) != m_free_slots
.end()
2300 assert(false && "m_slot_to_piece[i] is invalid");
2305 #ifdef TORRENT_STORAGE_DEBUG
2306 void piece_manager::impl::debug_log() const
2308 std::stringstream s
;
2310 s
<< "index\tslot\tpiece\n";
2312 for (int i
= 0; i
< m_info
.num_pieces(); ++i
)
2314 s
<< i
<< "\t" << m_slot_to_piece
[i
] << "\t";
2315 s
<< m_piece_to_slot
[i
] << "\n";
2318 s
<< "---------------------------------\n";
2320 print_to_log(s
.str());
2324 } // namespace libtorrent