2 // This file is part of the aMule Project.
4 // Copyright (c) 2003-2008 aMule Team ( admin@amule.org / http://www.amule.org )
5 // Copyright (c) 2002-2008 Merkur ( devs@emule-project.net / http://www.emule-project.net )
7 // Any parts of this program derived from the xMule, lMule or eMule project,
8 // or contributed by third-party developers are copyrighted by their
11 // This program is free software; you can redistribute it and/or modify
12 // it under the terms of the GNU General Public License as published by
13 // the Free Software Foundation; either version 2 of the License, or
14 // (at your option) any later version.
16 // This program is distributed in the hope that it will be useful,
17 // but WITHOUT ANY WARRANTY; without even the implied warranty of
18 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 // GNU General Public License for more details.
21 // You should have received a copy of the GNU General Public License
22 // along with this program; if not, write to the Free Software
23 // Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA
28 #include "PartFile.h" // Interface declarations.
31 #include "config.h" // Needed for VERSION
34 #include <protocol/kad/Constants.h>
35 #include <protocol/ed2k/Client2Client/TCP.h>
36 #include <protocol/Protocols.h>
37 #include <common/DataFileVersion.h>
38 #include <common/Constants.h>
39 #include <tags/FileTags.h>
42 #include <wx/tokenzr.h> // Needed for wxStringTokenizer
44 #include "KnownFileList.h" // Needed for CKnownFileList
45 #include "UploadQueue.h" // Needed for CFileHash
46 #include "IPFilter.h" // Needed for CIPFilter
47 #include "Server.h" // Needed for CServer
48 #include "ServerConnect.h" // Needed for CServerConnect
49 #include "updownclient.h" // Needed for CUpDownClient
50 #include "MemFile.h" // Needed for CMemFile
51 #include "Preferences.h" // Needed for CPreferences
52 #include "DownloadQueue.h" // Needed for CDownloadQueue
53 #include "amule.h" // Needed for theApp
54 #include "ED2KLink.h" // Needed for CED2KLink
55 #include "Packet.h" // Needed for CTag
56 #include "SearchList.h" // Needed for CSearchFile
57 #include "ClientList.h" // Needed for clientlist
58 #include "Statistics.h" // Needed for theStats
60 #include <common/Format.h> // Needed for CFormat
61 #include <common/FileFunctions.h> // Needed for GetLastModificationTime
62 #include "ThreadTasks.h" // Needed for CHashingTask/CCompletionTask/CAllocateFileTask
63 #include "GuiEvents.h" // Needed for Notify_*
64 #include "DataToText.h" // Needed for OriginToText()
65 #include "PlatformSpecific.h" // Needed for CreateSparseFile()
66 #include "FileArea.h" // Needed for CFileArea
67 #include "ScopedPtr.h" // Needed for CScopedArray
68 #include "CorruptionBlackBox.h"
70 #include "kademlia/kademlia/Kademlia.h"
71 #include "kademlia/kademlia/Search.h"
74 SFileRating::SFileRating(const wxString
&u
, const wxString
&f
, sint16 r
, const wxString
&c
)
84 SFileRating::SFileRating(const SFileRating
&fr
)
86 UserName(fr
.UserName
),
87 FileName(fr
.FileName
),
94 SFileRating::SFileRating(const CUpDownClient
&client
)
96 UserName(client
.GetUserName()),
97 FileName(client
.GetClientFilename()),
98 Rating(client
.GetFileRating()),
99 Comment(client
.GetFileComment())
104 SFileRating::~SFileRating()
109 class PartFileBufferedData
112 CScopedArray
<byte
> data
; // This is the data to be written
113 uint64 start
; // This is the start offset of the data
114 uint64 end
; // This is the end offset of the data
115 Requested_Block_Struct
*block
; // This is the requested block that this data relates to
117 PartFileBufferedData(byte
* _data
, uint64 _start
, uint64 _end
, Requested_Block_Struct
*_block
)
118 : data(_data
), start(_start
), end(_end
), block(_block
)
123 typedef std::list
<Chunk
> ChunkList
;
128 CPartFile::CPartFile()
133 CPartFile::CPartFile(CSearchFile
* searchresult
)
137 m_abyFileHash
= searchresult
->GetFileHash();
138 SetFileName(searchresult
->GetFileName());
139 SetFileSize(searchresult
->GetFileSize());
141 for (unsigned int i
= 0; i
< searchresult
->m_taglist
.size(); ++i
){
142 const CTag
& pTag
= searchresult
->m_taglist
[i
];
144 bool bTagAdded
= false;
145 if (pTag
.GetNameID() == 0 && !pTag
.GetName().IsEmpty() && (pTag
.IsStr() || pTag
.IsInt())) {
146 static const struct {
151 { wxT(FT_ED2K_MEDIA_ARTIST
), 2 },
152 { wxT(FT_ED2K_MEDIA_ALBUM
), 2 },
153 { wxT(FT_ED2K_MEDIA_TITLE
), 2 },
154 { wxT(FT_ED2K_MEDIA_LENGTH
), 2 },
155 { wxT(FT_ED2K_MEDIA_BITRATE
), 3 },
156 { wxT(FT_ED2K_MEDIA_CODEC
), 2 }
159 for (unsigned int t
= 0; t
< itemsof(_aMetaTags
); ++t
) {
160 if ( pTag
.GetType() == _aMetaTags
[t
].nType
&&
161 (pTag
.GetName() == _aMetaTags
[t
].pszName
)) {
162 // skip string tags with empty string values
163 if (pTag
.IsStr() && pTag
.GetStr().IsEmpty()) {
167 // skip "length" tags with "0: 0" values
168 if (pTag
.GetName() == wxT(FT_ED2K_MEDIA_LENGTH
)) {
169 if (pTag
.GetStr().IsSameAs(wxT("0: 0")) ||
170 pTag
.GetStr().IsSameAs(wxT("0:0"))) {
175 // skip "bitrate" tags with '0' values
176 if ((pTag
.GetName() == wxT(FT_ED2K_MEDIA_BITRATE
)) && !pTag
.GetInt()) {
180 AddDebugLogLineM( false, logPartFile
,
181 wxT("CPartFile::CPartFile(CSearchFile*): added tag ") +
182 pTag
.GetFullInfo() );
183 m_taglist
.push_back(pTag
);
188 } else if (pTag
.GetNameID() != 0 && pTag
.GetName().IsEmpty() && (pTag
.IsStr() || pTag
.IsInt())) {
189 static const struct {
197 for (unsigned int t
= 0; t
< itemsof(_aMetaTags
); ++t
) {
198 if (pTag
.GetType() == _aMetaTags
[t
].nType
&& pTag
.GetNameID() == _aMetaTags
[t
].nID
) {
199 // skip string tags with empty string values
200 if (pTag
.IsStr() && pTag
.GetStr().IsEmpty()) {
204 AddDebugLogLineM( false, logPartFile
,
205 wxT("CPartFile::CPartFile(CSearchFile*): added tag ") +
206 pTag
.GetFullInfo() );
207 m_taglist
.push_back(pTag
);
215 AddDebugLogLineM( false, logPartFile
,
216 wxT("CPartFile::CPartFile(CSearchFile*): ignored tag ") +
217 pTag
.GetFullInfo() );
225 CPartFile::CPartFile(const CED2KFileLink
* fileLink
)
229 SetFileName(CPath(fileLink
->GetName()));
230 SetFileSize(fileLink
->GetSize());
231 m_abyFileHash
= fileLink
->GetHashKey();
235 if (fileLink
->m_hashset
) {
236 if (!LoadHashsetFromFile(fileLink
->m_hashset
, true)) {
237 AddDebugLogLineM(true, logPartFile
, wxT("eD2K link contained invalid hashset: ") + fileLink
->GetLink());
243 CPartFile::~CPartFile()
245 // if it's not opened, it was completed or deleted
246 if (m_hpartfile
.IsOpened()) {
249 // Update met file (with current directory entry)
253 DeleteContents(m_BufferedData_list
);
254 delete m_CorruptionBlackBox
;
256 wxASSERT(m_SrcList
.empty());
257 wxASSERT(m_A4AFsrclist
.empty());
260 void CPartFile::CreatePartFile()
262 // use lowest free partfilenumber for free file (InterCeptor)
266 m_partmetfilename
= CPath(wxString::Format(wxT("%03i.part.met"), i
));
267 m_fullname
= thePrefs::GetTempDir().JoinPaths(m_partmetfilename
);
268 } while (m_fullname
.FileExists());
270 m_CorruptionBlackBox
->SetPartNumber(m_partmetfilename
.RemoveAllExt().GetPrintable());
272 wxString strPartName
= m_partmetfilename
.RemoveExt().GetRaw();
273 m_taglist
.push_back(CTagString(FT_PARTFILENAME
, strPartName
));
275 m_gaplist
.Init(GetFileSize(), true); // Init empty
277 m_PartPath
= m_fullname
.RemoveExt();
279 if (thePrefs::GetAllocFullFile()) {
280 fileCreated
= m_hpartfile
.Create(m_PartPath
, true);
283 fileCreated
= PlatformSpecific::CreateSparseFile(m_PartPath
, GetFileSize());
286 AddLogLineM(false,_("ERROR: Failed to create partfile)"));
287 SetPartFileStatus(PS_ERROR
);
290 SetFilePath(thePrefs::GetTempDir());
292 if (thePrefs::GetAllocFullFile()) {
293 SetPartFileStatus(PS_ALLOCATING
);
294 CThreadScheduler::AddTask(new CAllocateFileTask(this, thePrefs::AddNewFilesPaused()));
296 AllocationFinished();
299 m_hashsetneeded
= (GetED2KPartHashCount() > 0);
302 SetActive(theApp
->IsConnected());
306 uint8
CPartFile::LoadPartFile(const CPath
& in_directory
, const CPath
& filename
, bool from_backup
, bool getsizeonly
)
308 bool isnewstyle
= false;
309 uint8 version
,partmettype
=PMT_UNKNOWN
;
311 std::map
<uint16
, Gap_Struct
*> gap_map
; // Slugfiller
314 m_partmetfilename
= filename
;
315 m_CorruptionBlackBox
->SetPartNumber(m_partmetfilename
.RemoveAllExt().GetPrintable());
316 m_filePath
= in_directory
;
317 m_fullname
= m_filePath
.JoinPaths(m_partmetfilename
);
318 m_PartPath
= m_fullname
.RemoveExt();
320 // readfile data form part.met file
321 CPath curMetFilename
= m_fullname
;
323 curMetFilename
= curMetFilename
.AppendExt(PARTMET_BAK_EXT
);
324 AddLogLineM(false, CFormat( _("Trying to load backup of met-file from %s") )
329 CFile
metFile(curMetFilename
, CFile::read
);
330 if (!metFile
.IsOpened()) {
331 AddLogLineM(false, CFormat( _("ERROR: Failed to open part.met file: %s ==> %s") )
336 } else if (metFile
.GetLength() == 0) {
337 AddLogLineM(false, CFormat( _("ERROR: part.met file is 0 size: %s ==> %s") )
344 version
= metFile
.ReadUInt8();
345 if (version
!= PARTFILE_VERSION
&& version
!= PARTFILE_SPLITTEDVERSION
&& version
!= PARTFILE_VERSION_LARGEFILE
){
347 //if (version == 83) return ImportShareazaTempFile(...)
348 AddLogLineM(false, CFormat( _("ERROR: Invalid part.met file version: %s ==> %s") )
354 isnewstyle
= (version
== PARTFILE_SPLITTEDVERSION
);
355 partmettype
= isnewstyle
? PMT_SPLITTED
: PMT_DEFAULTOLD
;
357 if (version
== PARTFILE_VERSION
) {// Do we still need this check ?
358 uint8 test
[4]; // It will fail for certain files.
359 metFile
.Seek(24, wxFromStart
);
360 metFile
.Read(test
,4);
362 metFile
.Seek(1, wxFromStart
);
363 if (test
[0]==0 && test
[1]==0 && test
[2]==2 && test
[3]==1) {
364 isnewstyle
=true; // edonkeys so called "old part style"
365 partmettype
=PMT_NEWOLD
;
370 uint32 temp
= metFile
.ReadUInt32();
372 if (temp
==0) { // 0.48 partmets - different again
373 LoadHashsetFromFile(&metFile
, false);
375 metFile
.Seek(2, wxFromStart
);
376 LoadDateFromFile(&metFile
);
377 m_abyFileHash
= metFile
.ReadHash();
381 LoadDateFromFile(&metFile
);
382 LoadHashsetFromFile(&metFile
, false);
385 uint32 tagcount
= metFile
.ReadUInt32();
387 for (uint32 j
= 0; j
< tagcount
; ++j
) {
388 CTag
newtag(metFile
,true);
391 (newtag
.GetNameID() == FT_FILESIZE
||
392 newtag
.GetNameID() == FT_FILENAME
))) {
393 switch(newtag
.GetNameID()) {
395 if (!GetFileName().IsOk()) {
396 // If it's not empty, we already loaded the unicoded one
397 SetFileName(CPath(newtag
.GetStr()));
401 case FT_LASTSEENCOMPLETE
: {
402 lastseencomplete
= newtag
.GetInt();
406 SetFileSize(newtag
.GetInt());
409 case FT_TRANSFERRED
: {
410 transferred
= newtag
.GetInt();
414 //#warning needs setfiletype string
415 //SetFileType(newtag.GetStr());
419 m_category
= newtag
.GetInt();
420 if (m_category
> theApp
->glob_prefs
->GetCatCount() - 1 ) {
425 case FT_OLDDLPRIORITY
:
426 case FT_DLPRIORITY
: {
428 m_iDownPriority
= newtag
.GetInt();
429 if( m_iDownPriority
== PR_AUTO
){
430 m_iDownPriority
= PR_HIGH
;
431 SetAutoDownPriority(true);
434 if ( m_iDownPriority
!= PR_LOW
&&
435 m_iDownPriority
!= PR_NORMAL
&&
436 m_iDownPriority
!= PR_HIGH
)
437 m_iDownPriority
= PR_NORMAL
;
438 SetAutoDownPriority(false);
444 m_paused
= (newtag
.GetInt() == 1);
445 m_stopped
= m_paused
;
448 case FT_OLDULPRIORITY
:
449 case FT_ULPRIORITY
: {
451 SetUpPriority(newtag
.GetInt(), false);
452 if( GetUpPriority() == PR_AUTO
){
453 SetUpPriority(PR_HIGH
, false);
454 SetAutoUpPriority(true);
456 SetAutoUpPriority(false);
461 case FT_KADLASTPUBLISHSRC
:{
462 SetLastPublishTimeKadSrc(newtag
.GetInt(), 0);
463 if(GetLastPublishTimeKadSrc() > (uint32
)time(NULL
)+KADEMLIAREPUBLISHTIMES
) {
464 //There may be a posibility of an older client that saved a random number here.. This will check for that..
465 SetLastPublishTimeKadSrc(0,0);
469 case FT_KADLASTPUBLISHNOTES
:{
470 SetLastPublishTimeKadNotes(newtag
.GetInt());
473 // old tags: as long as they are not needed, take the chance to purge them
475 case FT_KADLASTPUBLISHKEY
:
477 case FT_DL_ACTIVE_TIME
:
478 if (newtag
.IsInt()) {
479 m_nDlActiveTime
= newtag
.GetInt();
482 case FT_CORRUPTEDPARTS
: {
483 wxASSERT(m_corrupted_list
.empty());
484 wxString
strCorruptedParts(newtag
.GetStr());
485 wxStringTokenizer
tokenizer(strCorruptedParts
, wxT(","));
486 while ( tokenizer
.HasMoreTokens() ) {
487 wxString token
= tokenizer
.GetNextToken();
489 if (token
.ToULong(&uPart
)) {
490 if (uPart
< GetPartCount() && !IsCorruptedPart(uPart
)) {
491 m_corrupted_list
.push_back(uPart
);
500 hash
.DecodeBase32(newtag
.GetStr()) == CAICHHash::GetHashSize();
501 wxASSERT(hashSizeOk
);
503 m_pAICHHashSet
->SetMasterHash(hash
, AICH_VERIFIED
);
507 case FT_ATTRANSFERRED
:{
508 statistic
.SetAllTimeTransferred(statistic
.GetAllTimeTransferred() + (uint64
)newtag
.GetInt());
511 case FT_ATTRANSFERREDHI
:{
512 statistic
.SetAllTimeTransferred(statistic
.GetAllTimeTransferred() + (((uint64
)newtag
.GetInt()) << 32));
515 case FT_ATREQUESTED
:{
516 statistic
.SetAllTimeRequests(newtag
.GetInt());
520 statistic
.SetAllTimeAccepts(newtag
.GetInt());
524 // Start Changes by Slugfiller for better exception handling
526 wxCharBuffer tag_ansi_name
= newtag
.GetName().ToAscii();
527 char gap_mark
= tag_ansi_name
? tag_ansi_name
[0u] : 0;
528 if ( newtag
.IsInt() && (newtag
.GetName().Length() > 1) &&
529 ((gap_mark
== FT_GAPSTART
) ||
530 (gap_mark
== FT_GAPEND
))) {
531 Gap_Struct
*gap
= NULL
;
532 unsigned long int gapkey
;
533 if (newtag
.GetName().Mid(1).ToULong(&gapkey
)) {
534 if ( gap_map
.find( gapkey
) == gap_map
.end() ) {
535 gap
= new Gap_Struct
;
536 gap_map
[gapkey
] = gap
;
537 gap
->start
= (uint64
)-1;
538 gap
->end
= (uint64
)-1;
540 gap
= gap_map
[ gapkey
];
542 if (gap_mark
== FT_GAPSTART
) {
543 gap
->start
= newtag
.GetInt();
545 if (gap_mark
== FT_GAPEND
) {
546 gap
->end
= newtag
.GetInt()-1;
549 AddDebugLogLineN(logPartFile
, wxT("Wrong gap map key while reading met file!"));
552 // End Changes by Slugfiller for better exception handling
554 m_taglist
.push_back(newtag
);
559 // Nothing. Else, nothing.
563 // load the hashsets from the hybridstylepartmet
564 if (isnewstyle
&& !getsizeonly
&& (metFile
.GetPosition()<metFile
.GetLength()) ) {
565 metFile
.Seek(1, wxFromCurrent
);
567 uint16 parts
=GetPartCount(); // assuming we will get all hashsets
569 for (uint16 i
= 0; i
< parts
&& (metFile
.GetPosition()+16<metFile
.GetLength()); ++i
){
570 CMD4Hash cur_hash
= metFile
.ReadHash();
571 m_hashlist
.push_back(cur_hash
);
575 if (!m_hashlist
.empty()) {
576 CreateHashFromHashlist(m_hashlist
, &checkhash
);
579 if (m_abyFileHash
== checkhash
) {
586 } catch (const CInvalidPacket
& e
) {
587 AddLogLineM(true, CFormat(wxT("Error: %s (%s) is corrupt (bad tags: %s), unable to load file."))
592 } catch (const CIOFailureException
& e
) {
593 AddDebugLogLineM(true, logPartFile
, CFormat( wxT("IO failure while loading '%s': %s") )
597 } catch (const CEOFException
& WXUNUSED(e
)) {
598 AddLogLineM(true, CFormat( _("ERROR: %s (%s) is corrupt (wrong tagcount), unable to load file.") )
601 AddLogLineM(true, _("Trying to recover file info..."));
603 // Safe file is that who have
606 // We have filesize, try other needed info
608 // Do we need to check gaps? I think not,
609 // because they are checked below. Worst
610 // scenario will only mark file as 0 bytes downloaded.
613 if (!GetFileName().IsOk()) {
614 // Not critical, let's put a random filename.
616 "Recovering no-named file - will try to recover it as RecoveredFile.dat"));
617 SetFileName(CPath(wxT("RecoveredFile.dat")));
621 _("Recovered all available file info :D - Trying to use it..."));
623 AddLogLineM(true, _("Unable to recover file info :("));
632 m_gaplist
.Init(GetFileSize(), false); // Init full, then add gaps
633 // Now to flush the map into the list (Slugfiller)
634 std::map
<uint16
, Gap_Struct
*>::iterator it
= gap_map
.begin();
635 for ( ; it
!= gap_map
.end(); ++it
) {
636 Gap_Struct
* gap
= it
->second
;
637 // SLUGFILLER: SafeHash - revised code, and extra safety
638 if ( (gap
->start
!= (uint64
)-1) &&
639 (gap
->end
!= (uint64
)-1) &&
640 gap
->start
<= gap
->end
&&
641 gap
->start
< GetFileSize()) {
642 if (gap
->end
>= GetFileSize()) {
643 gap
->end
= GetFileSize()-1; // Clipping
645 m_gaplist
.AddGap(gap
->start
, gap
->end
); // All tags accounted for, use safe adding
648 // SLUGFILLER: SafeHash
651 //check if this is a backup
652 if ( m_fullname
.GetExt().MakeLower() == wxT("backup" )) {
653 m_fullname
= m_fullname
.RemoveExt();
656 // open permanent handle
657 if ( !m_hpartfile
.Open(m_PartPath
, CFile::read_write
)) {
658 AddLogLineM(false, CFormat( _("Failed to open %s (%s)") )
664 SetPartFileStatus(PS_EMPTY
);
667 // SLUGFILLER: SafeHash - final safety, make sure any missing part of the file is gap
668 if (m_hpartfile
.GetLength() < GetFileSize())
669 AddGap(m_hpartfile
.GetLength(), GetFileSize()-1);
670 // Goes both ways - Partfile should never be too large
671 if (m_hpartfile
.GetLength() > GetFileSize()) {
672 AddDebugLogLineM( true, logPartFile
, CFormat( wxT("Partfile \"%s\" is too large! Truncating %llu bytes.") ) % GetFileName() % (m_hpartfile
.GetLength() - GetFileSize()));
673 m_hpartfile
.SetLength(GetFileSize());
675 // SLUGFILLER: SafeHash
676 } catch (const CIOFailureException
& e
) {
677 AddDebugLogLineM( true, logPartFile
, CFormat( wxT("Error while accessing partfile \"%s\": %s") ) % GetFileName() % e
.what());
678 SetPartFileStatus(PS_ERROR
);
681 // now close the file again until needed
682 m_hpartfile
.Release(true);
684 // check hashcount, file status etc
685 if (GetHashCount() != GetED2KPartHashCount()){
686 m_hashsetneeded
= true;
689 m_hashsetneeded
= false;
690 for (size_t i
= 0; i
< m_hashlist
.size(); ++i
) {
692 SetPartFileStatus(PS_READY
);
697 if (m_gaplist
.IsComplete()) { // is this file complete already?
702 if (!isnewstyle
) { // not for importing
703 const time_t file_date
= CPath::GetModificationTime(m_PartPath
);
704 if (m_lastDateChanged
!= file_date
) {
705 // It's pointless to rehash an empty file, since the case
706 // where a user has zero'd a file is handled above ...
707 if (m_hpartfile
.GetLength()) {
708 AddLogLineM(false, CFormat( _("WARNING: %s might be corrupted (%i)") )
710 % (m_lastDateChanged
- file_date
) );
712 SetPartFileStatus(PS_WAITINGFORHASH
);
714 CPath partFileName
= m_partmetfilename
.RemoveExt();
715 CThreadScheduler::AddTask(new CHashingTask(m_filePath
, partFileName
, this));
720 UpdateCompletedInfos();
721 if (completedsize
> transferred
) {
722 m_iGainDueToCompression
= completedsize
- transferred
;
723 } else if (completedsize
!= transferred
) {
724 m_iLostDueToCorruption
= transferred
- completedsize
;
731 bool CPartFile::SavePartFile(bool Initial
)
734 case PS_WAITINGFORHASH
:
740 /* Don't write anything to disk if less than 100 KB of free space is left. */
741 sint64 free
= CPath::GetFreeSpaceAt(GetFilePath());
742 if ((free
!= wxInvalidOffset
) && (free
< (100 * 1024))) {
748 if (!m_PartPath
.FileExists()) {
749 throw wxString(wxT(".part file not found"));
752 uint32 lsc
= lastseencomplete
;
755 CPath::BackupFile(m_fullname
, wxT(".backup"));
756 CPath::RemoveFile(m_fullname
);
759 file
.Open(m_fullname
, CFile::write
);
760 if (!file
.IsOpened()) {
761 throw wxString(wxT("Failed to open part.met file"));
765 file
.WriteUInt8(IsLargeFile() ? PARTFILE_VERSION_LARGEFILE
: PARTFILE_VERSION
);
767 file
.WriteUInt32(CPath::GetModificationTime(m_PartPath
));
769 file
.WriteHash(m_abyFileHash
);
770 uint16 parts
= m_hashlist
.size();
771 file
.WriteUInt16(parts
);
772 for (int x
= 0; x
< parts
; ++x
) {
773 file
.WriteHash(m_hashlist
[x
]);
776 #define FIXED_TAGS 15
777 uint32 tagcount
= m_taglist
.size() + FIXED_TAGS
+ (m_gaplist
.size()*2);
778 if (!m_corrupted_list
.empty()) {
782 if (m_pAICHHashSet
->HasValidMasterHash() && (m_pAICHHashSet
->GetStatus() == AICH_VERIFIED
)){
786 if (GetLastPublishTimeKadSrc()){
790 if (GetLastPublishTimeKadNotes()){
794 if (GetDlActiveTime()){
798 file
.WriteUInt32(tagcount
);
800 //#warning Kry - Where are lost by coruption and gained by compression?
802 // 0 (unicoded part file name)
803 // We write it with BOM to keep eMule compatibility. Note that the 'printable' filename is saved,
804 // as presently the filename does not represent an actual file.
805 CTagString( FT_FILENAME
, GetFileName().GetPrintable()).WriteTagToFile( &file
, utf8strOptBOM
);
806 CTagString( FT_FILENAME
, GetFileName().GetPrintable()).WriteTagToFile( &file
); // 1
808 CTagIntSized( FT_FILESIZE
, GetFileSize(), IsLargeFile() ? 64 : 32).WriteTagToFile( &file
);// 2
809 CTagIntSized( FT_TRANSFERRED
, transferred
, IsLargeFile() ? 64 : 32).WriteTagToFile( &file
); // 3
810 CTagInt32( FT_STATUS
, (m_paused
?1:0)).WriteTagToFile( &file
); // 4
812 if ( IsAutoDownPriority() ) {
813 CTagInt32( FT_DLPRIORITY
, (uint8
)PR_AUTO
).WriteTagToFile( &file
); // 5
814 CTagInt32( FT_OLDDLPRIORITY
, (uint8
)PR_AUTO
).WriteTagToFile( &file
); // 6
816 CTagInt32( FT_DLPRIORITY
, m_iDownPriority
).WriteTagToFile( &file
); // 5
817 CTagInt32( FT_OLDDLPRIORITY
, m_iDownPriority
).WriteTagToFile( &file
); // 6
820 CTagInt32( FT_LASTSEENCOMPLETE
, lsc
).WriteTagToFile( &file
); // 7
822 if ( IsAutoUpPriority() ) {
823 CTagInt32( FT_ULPRIORITY
, (uint8
)PR_AUTO
).WriteTagToFile( &file
); // 8
824 CTagInt32( FT_OLDULPRIORITY
, (uint8
)PR_AUTO
).WriteTagToFile( &file
); // 9
826 CTagInt32( FT_ULPRIORITY
, GetUpPriority() ).WriteTagToFile( &file
); // 8
827 CTagInt32( FT_OLDULPRIORITY
, GetUpPriority() ).WriteTagToFile( &file
); // 9
830 CTagInt32(FT_CATEGORY
, m_category
).WriteTagToFile( &file
); // 10
831 CTagInt32(FT_ATTRANSFERRED
, statistic
.GetAllTimeTransferred() & 0xFFFFFFFF).WriteTagToFile( &file
);// 11
832 CTagInt32(FT_ATTRANSFERREDHI
, statistic
.GetAllTimeTransferred() >>32).WriteTagToFile( &file
);// 12
833 CTagInt32(FT_ATREQUESTED
, statistic
.GetAllTimeRequests()).WriteTagToFile( &file
); // 13
834 CTagInt32(FT_ATACCEPTED
, statistic
.GetAllTimeAccepts()).WriteTagToFile( &file
); // 14
836 // currupt part infos
837 if (!m_corrupted_list
.empty()) {
838 wxString strCorruptedParts
;
839 std::list
<uint16
>::iterator it
= m_corrupted_list
.begin();
840 for (; it
!= m_corrupted_list
.end(); ++it
) {
841 uint16 uCorruptedPart
= *it
;
842 if (!strCorruptedParts
.IsEmpty()) {
843 strCorruptedParts
+= wxT(",");
845 strCorruptedParts
+= wxString::Format(wxT("%u"), (unsigned)uCorruptedPart
);
847 wxASSERT( !strCorruptedParts
.IsEmpty() );
849 CTagString( FT_CORRUPTEDPARTS
, strCorruptedParts
).WriteTagToFile( &file
); // 11?
853 if (m_pAICHHashSet
->HasValidMasterHash() && (m_pAICHHashSet
->GetStatus() == AICH_VERIFIED
)){
854 CTagString
aichtag(FT_AICH_HASH
, m_pAICHHashSet
->GetMasterHash().GetString() );
855 aichtag
.WriteTagToFile(&file
); // 12?
858 if (GetLastPublishTimeKadSrc()){
859 CTagInt32(FT_KADLASTPUBLISHSRC
, GetLastPublishTimeKadSrc()).WriteTagToFile(&file
); // 15?
862 if (GetLastPublishTimeKadNotes()){
863 CTagInt32(FT_KADLASTPUBLISHNOTES
, GetLastPublishTimeKadNotes()).WriteTagToFile(&file
); // 16?
866 if (GetDlActiveTime()){
867 CTagInt32(FT_DL_ACTIVE_TIME
, GetDlActiveTime()).WriteTagToFile(&file
); // 17
870 for (uint32 j
= 0; j
< (uint32
)m_taglist
.size();++j
) {
871 m_taglist
[j
].WriteTagToFile(&file
);
876 for (CGapList::const_iterator it
= m_gaplist
.begin(); it
!= m_gaplist
.end(); ++it
) {
877 wxString tagName
= wxString::Format(wxT(" %u"), i_pos
);
879 // gap start = first missing byte but gap ends = first non-missing byte
880 // in edonkey but I think its easier to user the real limits
881 tagName
[0] = FT_GAPSTART
;
882 CTagIntSized(tagName
, it
.start() , IsLargeFile() ? 64 : 32).WriteTagToFile( &file
);
884 tagName
[0] = FT_GAPEND
;
885 CTagIntSized(tagName
, it
.end() + 1, IsLargeFile() ? 64 : 32).WriteTagToFile( &file
);
889 } catch (const wxString
& error
) {
890 AddLogLineNS(CFormat( _("ERROR while saving partfile: %s (%s ==> %s)") )
896 } catch (const CIOFailureException
& e
) {
897 AddLogLineCS(_("IO failure while saving partfile: ") + e
.what());
905 CPath::RemoveFile(m_fullname
.AppendExt(wxT(".backup")));
908 sint64 metLength
= m_fullname
.GetFileSize();
909 if (metLength
== wxInvalidOffset
) {
910 theApp
->ShowAlert( CFormat( _("Could not retrieve length of '%s' - using %s file.") )
915 CPath::CloneFile(m_fullname
.AppendExt(PARTMET_BAK_EXT
), m_fullname
, true);
916 } else if (metLength
== 0) {
917 // Don't backup if it's 0 size but raise a warning!!!
918 theApp
->ShowAlert( CFormat( _("'%s' is 0 size somehow - using %s file.") )
923 CPath::CloneFile(m_fullname
.AppendExt(PARTMET_BAK_EXT
), m_fullname
, true);
925 // no error, just backup
926 CPath::BackupFile(m_fullname
, PARTMET_BAK_EXT
);
933 void CPartFile::SaveSourceSeeds()
935 #define MAX_SAVED_SOURCES 10
937 // Kry - Sources seeds
938 // Based on a Feature request, this saves the last MAX_SAVED_SOURCES
939 // sources of the file, giving a 'seed' for the next run.
940 // We save the last sources because:
941 // 1 - They could be the hardest to get
942 // 2 - They will more probably be available
943 // However, if we have downloading sources, they have preference because
944 // we probably have more credits on them.
945 // Anyway, source exchange will get us the rest of the sources
946 // This feature is currently used only on rare files (< 20 sources)
949 if (GetSourceCount()>20) {
953 CClientPtrList source_seeds
;
956 CClientPtrList::iterator it
= m_downloadingSourcesList
.begin();
957 for( ; it
!= m_downloadingSourcesList
.end() && n_sources
< MAX_SAVED_SOURCES
; ++it
) {
958 CUpDownClient
*cur_src
= *it
;
959 if (!cur_src
->HasLowID()) {
960 source_seeds
.push_back(cur_src
);
965 if (n_sources
< MAX_SAVED_SOURCES
) {
966 // Not enough downloading sources to fill the list, going to sources list
967 if (GetSourceCount() > 0) {
968 SourceSet::reverse_iterator rit
= m_SrcList
.rbegin();
969 for ( ; ((rit
!= m_SrcList
.rend()) && (n_sources
<MAX_SAVED_SOURCES
)); ++rit
) {
970 CUpDownClient
* cur_src
= *rit
;
971 if (!cur_src
->HasLowID()) {
972 source_seeds
.push_back(cur_src
);
984 const CPath seedsPath
= m_fullname
.AppendExt(wxT(".seeds"));
987 file
.Create(seedsPath
, true);
988 if (!file
.IsOpened()) {
989 AddLogLineM(false, CFormat( _("Failed to save part.met.seeds file for %s") )
995 file
.WriteUInt8(0); // v3, to avoid v2 clients choking on it.
996 file
.WriteUInt8(source_seeds
.size());
998 CClientPtrList::iterator it2
= source_seeds
.begin();
999 for (; it2
!= source_seeds
.end(); ++it2
) {
1000 CUpDownClient
* cur_src
= *it2
;
1001 file
.WriteUInt32(cur_src
->GetUserIDHybrid());
1002 file
.WriteUInt16(cur_src
->GetUserPort());
1003 file
.WriteHash(cur_src
->GetUserHash());
1004 // CryptSettings - See SourceExchange V4
1005 const uint8 uSupportsCryptLayer
= cur_src
->SupportsCryptLayer() ? 1 : 0;
1006 const uint8 uRequestsCryptLayer
= cur_src
->RequestsCryptLayer() ? 1 : 0;
1007 const uint8 uRequiresCryptLayer
= cur_src
->RequiresCryptLayer() ? 1 : 0;
1008 const uint8 byCryptOptions
= (uRequiresCryptLayer
<< 2) | (uRequestsCryptLayer
<< 1) | (uSupportsCryptLayer
<< 0);
1009 file
.WriteUInt8(byCryptOptions
);
1012 /* v2: Added to keep track of too old seeds */
1013 file
.WriteUInt32(wxDateTime::Now().GetTicks());
1015 AddLogLineM(false, CFormat( wxPLURAL("Saved %i source seed for partfile: %s (%s)", "Saved %i source seeds for partfile: %s (%s)", n_sources
) )
1019 } catch (const CIOFailureException
& e
) {
1020 AddDebugLogLineM(true, logPartFile
, CFormat( wxT("Error saving partfile's seeds file (%s - %s): %s") )
1027 CPath::RemoveFile(seedsPath
);
1031 void CPartFile::LoadSourceSeeds()
1033 CMemFile sources_data
;
1035 bool valid_sources
= false;
1037 const CPath seedsPath
= m_fullname
.AppendExt(wxT(".seeds"));
1038 if (!seedsPath
.FileExists()) {
1042 CFile
file(seedsPath
, CFile::read
);
1043 if (!file
.IsOpened()) {
1044 AddLogLineM(false, CFormat( _("Partfile %s (%s) has no seeds file") )
1052 if (file
.GetLength() <= 1) {
1053 AddLogLineM(false, CFormat( _("Partfile %s (%s) has a void seeds file") )
1059 uint8 src_count
= file
.ReadUInt8();
1061 bool bUseSX2Format
= (src_count
== 0);
1063 if (bUseSX2Format
) {
1065 src_count
= file
.ReadUInt8();
1068 sources_data
.WriteUInt16(src_count
);
1070 for (int i
= 0; i
< src_count
; ++i
) {
1071 uint32 dwID
= file
.ReadUInt32();
1072 uint16 nPort
= file
.ReadUInt16();
1074 sources_data
.WriteUInt32(bUseSX2Format
? dwID
: wxUINT32_SWAP_ALWAYS(dwID
));
1075 sources_data
.WriteUInt16(nPort
);
1076 sources_data
.WriteUInt32(0);
1077 sources_data
.WriteUInt16(0);
1079 if (bUseSX2Format
) {
1080 sources_data
.WriteHash(file
.ReadHash());
1081 sources_data
.WriteUInt8(file
.ReadUInt8());
1088 // v2: Added to keep track of too old seeds
1089 time_t time
= (time_t)file
.ReadUInt32();
1091 // Time frame is 2 hours. More than enough to compile
1092 // your new aMule version!.
1093 if ((time
+ MIN2S(120)) >= wxDateTime::Now().GetTicks()) {
1094 valid_sources
= true;
1098 // v1 has no time data. We can safely use
1099 // the sources, next time will be saved.
1100 valid_sources
= true;
1103 if (valid_sources
) {
1104 sources_data
.Seek(0);
1105 AddClientSources(&sources_data
, SF_SOURCE_SEEDS
, bUseSX2Format
? 4 : 1, bUseSX2Format
);
1108 } catch (const CSafeIOException
& e
) {
1109 AddLogLineM(false, CFormat( _("Error reading partfile's seeds file (%s - %s): %s") )
1118 void CPartFile::PartFileHashFinished(CKnownFile
* result
)
1120 m_lastDateChanged
= result
->m_lastDateChanged
;
1121 bool errorfound
= false;
1122 if (GetED2KPartHashCount() == 0){
1123 if (IsComplete(0, GetFileSize()-1)){
1124 if (result
->GetFileHash() != GetFileHash()){
1127 "Found corrupted part (%d) in %d part file %s - FileResultHash |%s| FileHash |%s|",
1128 "Found corrupted part (%d) in %d parts file %s - FileResultHash |%s| FileHash |%s|",
1134 % result
->GetFileHash().Encode()
1135 % GetFileHash().Encode() );
1136 AddGap(0, GetFileSize()-1);
1142 for (size_t i
= 0; i
< m_hashlist
.size(); ++i
){
1143 // Kry - trel_ar's completed parts check on rehashing.
1144 // Very nice feature, if a file is completed but .part.met don't believe it,
1147 uint64 partStart
= i
* PARTSIZE
;
1148 uint64 partEnd
= partStart
+ GetPartSize(i
) - 1;
1149 if (!( i
< result
->GetHashCount() && (result
->GetPartHash(i
) == GetPartHash(i
)))){
1150 if (IsComplete(i
)) {
1152 if ( i
< result
->GetHashCount() )
1153 wronghash
= result
->GetPartHash(i
);
1157 "Found corrupted part (%d) in %d part file %s - FileResultHash |%s| FileHash |%s|",
1158 "Found corrupted part (%d) in %d parts file %s - FileResultHash |%s| FileHash |%s|",
1159 GetED2KPartHashCount())
1162 % GetED2KPartHashCount()
1164 % wronghash
.Encode()
1165 % GetPartHash(i
).Encode() );
1171 if (!IsComplete(i
)){
1172 AddLogLineM(false, CFormat( _("Found completed part (%i) in %s") )
1177 RemoveBlockFromList(partStart
, partEnd
);
1184 result
->GetAICHHashset()->GetStatus() == AICH_HASHSETCOMPLETE
&&
1185 status
== PS_COMPLETING
) {
1186 delete m_pAICHHashSet
;
1187 m_pAICHHashSet
= result
->GetAICHHashset();
1188 result
->SetAICHHashset(NULL
);
1189 m_pAICHHashSet
->SetOwner(this);
1191 else if (status
== PS_COMPLETING
) {
1192 AddDebugLogLineM(false, logPartFile
,
1193 CFormat(wxT("Failed to store new AICH Hashset for completed file: %s"))
1200 if (status
== PS_COMPLETING
){
1205 AddLogLineM(false, CFormat( _("Finished rehashing %s") ) % GetFileName());
1209 SetStatus(PS_READY
);
1213 SetStatus(PS_READY
);
1215 theApp
->sharedfiles
->SafeAddKFile(this);
1218 void CPartFile::AddGap(uint64 start
, uint64 end
)
1220 m_gaplist
.AddGap(start
, end
);
1221 UpdateDisplayedInfo();
1224 void CPartFile::AddGap(uint16 part
)
1226 m_gaplist
.AddGap(part
);
1227 UpdateDisplayedInfo();
1230 bool CPartFile::IsAlreadyRequested(uint64 start
, uint64 end
)
1232 std::list
<Requested_Block_Struct
*>::iterator it
= m_requestedblocks_list
.begin();
1233 for (; it
!= m_requestedblocks_list
.end(); ++it
) {
1234 Requested_Block_Struct
* cur_block
= *it
;
1236 if ((start
<= cur_block
->EndOffset
) && (end
>= cur_block
->StartOffset
)) {
1243 bool CPartFile::GetNextEmptyBlockInPart(uint16 partNumber
, Requested_Block_Struct
*result
)
1245 // Find start of this part
1246 uint64 partStart
= (PARTSIZE
* partNumber
);
1247 uint64 start
= partStart
;
1249 // What is the end limit of this block, i.e. can't go outside part (or filesize)
1250 uint64 partEnd
= partStart
+ GetPartSize(partNumber
) - 1;
1251 // Loop until find a suitable gap and return true, or no more gaps and return false
1252 CGapList::const_iterator it
= m_gaplist
.begin();
1255 uint64 gapStart
, end
;
1257 // Find the first gap from the start position
1258 for (; it
!= m_gaplist
.end(); ++it
) {
1259 gapStart
= it
.start();
1262 // Want gaps that overlap start<->partEnd
1263 if (gapStart
<= partEnd
&& end
>= start
) {
1266 } else if (gapStart
> partEnd
) {
1271 // If no gaps after start, exit
1275 // Update start position if gap starts after current pos
1276 if (start
< gapStart
) {
1279 // Find end, keeping within the max block size and the part limit
1280 uint64 blockLimit
= partStart
+ (BLOCKSIZE
* (((start
- partStart
) / BLOCKSIZE
) + 1)) - 1;
1281 if (end
> blockLimit
) {
1284 if (end
> partEnd
) {
1287 // If this gap has not already been requested, we have found a valid entry
1288 if (!IsAlreadyRequested(start
, end
)) {
1289 // Was this block to be returned
1290 if (result
!= NULL
) {
1291 result
->StartOffset
= start
;
1292 result
->EndOffset
= end
;
1293 md4cpy(result
->FileID
, GetFileHash().GetHash());
1294 result
->transferred
= 0;
1298 // Reposition to end of that gap
1301 // If tried all gaps then break out of the loop
1302 if (end
== partEnd
) {
1306 // No suitable gap found
1311 void CPartFile::FillGap(uint64 start
, uint64 end
)
1313 m_gaplist
.FillGap(start
, end
);
1314 UpdateCompletedInfos();
1315 UpdateDisplayedInfo();
1318 void CPartFile::FillGap(uint16 part
)
1320 m_gaplist
.FillGap(part
);
1321 UpdateCompletedInfos();
1322 UpdateDisplayedInfo();
1326 void CPartFile::UpdateCompletedInfos()
1328 uint64 allgaps
= m_gaplist
.GetGapSize();
1330 percentcompleted
= (1.0 - (double)allgaps
/GetFileSize()) * 100.0;
1331 completedsize
= GetFileSize() - allgaps
;
1335 void CPartFile::WritePartStatus(CMemFile
* file
)
1337 uint16 parts
= GetED2KPartCount();
1338 file
->WriteUInt16(parts
);
1340 while (done
!= parts
){
1342 for (uint32 i
= 0;i
!= 8;++i
) {
1343 if (IsComplete(i
)) {
1347 if (done
== parts
) {
1351 file
->WriteUInt8(towrite
);
1355 void CPartFile::WriteCompleteSourcesCount(CMemFile
* file
)
1357 file
->WriteUInt16(m_nCompleteSourcesCount
);
1360 uint32
CPartFile::Process(uint32 reducedownload
/*in percent*/,uint8 m_icounter
)
1363 uint32 dwCurTick
= ::GetTickCount();
1365 // If buffer size exceeds limit, or if not written within time limit, flush data
1366 if ( (m_nTotalBufferData
> thePrefs::GetFileBufferSize()) ||
1367 (dwCurTick
> (m_nLastBufferFlushTime
+ BUFFER_TIME_LIMIT
))) {
1368 // Avoid flushing while copying preview file
1369 if (!m_bPreviewing
) {
1375 // check if we want new sources from server --> MOVED for 16.40 version
1376 old_trans
=transferingsrc
;
1380 if (m_icounter
< 10) {
1381 // Update only downloading sources.
1382 CClientPtrList::iterator it
= m_downloadingSourcesList
.begin();
1383 for( ; it
!= m_downloadingSourcesList
.end(); ) {
1384 CUpDownClient
*cur_src
= *it
++;
1385 if(cur_src
->GetDownloadState() == DS_DOWNLOADING
) {
1387 kBpsDown
+= cur_src
->SetDownloadLimit(reducedownload
);
1391 // Update all sources (including downloading sources)
1392 for ( SourceSet::iterator it
= m_SrcList
.begin(); it
!= m_SrcList
.end(); ) {
1393 CUpDownClient
* cur_src
= *it
++;
1394 switch (cur_src
->GetDownloadState()) {
1395 case DS_DOWNLOADING
: {
1397 kBpsDown
+= cur_src
->SetDownloadLimit(reducedownload
);
1406 case DS_LOWTOLOWIP
: {
1407 if ( cur_src
->HasLowID() && !theApp
->DoCallback( cur_src
) ) {
1408 // If we are almost maxed on sources,
1409 // slowly remove these client to see
1410 // if we can find a better source.
1411 if( ((dwCurTick
- lastpurgetime
) > 30000) &&
1412 (GetSourceCount() >= (thePrefs::GetMaxSourcePerFile()*.8))) {
1413 RemoveSource( cur_src
);
1414 lastpurgetime
= dwCurTick
;
1418 cur_src
->SetDownloadState(DS_ONQUEUE
);
1423 case DS_NONEEDEDPARTS
: {
1424 // we try to purge noneeded source, even without reaching the limit
1425 if((dwCurTick
- lastpurgetime
) > 40000) {
1426 if(!cur_src
->SwapToAnotherFile(false , false, false , NULL
)) {
1427 //however we only delete them if reaching the limit
1428 if (GetSourceCount() >= (thePrefs::GetMaxSourcePerFile()*.8 )) {
1429 RemoveSource(cur_src
);
1430 lastpurgetime
= dwCurTick
;
1431 break; //Johnny-B - nothing more to do here (good eye!)
1434 lastpurgetime
= dwCurTick
;
1438 // doubled reasktime for no needed parts - save connections and traffic
1439 if ( !((!cur_src
->GetLastAskedTime()) ||
1440 (dwCurTick
- cur_src
->GetLastAskedTime()) > FILEREASKTIME
*2)) {
1443 // Recheck this client to see if still NNP..
1444 // Set to DS_NONE so that we force a TCP reask next time..
1445 cur_src
->SetDownloadState(DS_NONE
);
1450 if( cur_src
->IsRemoteQueueFull()) {
1451 if( ((dwCurTick
- lastpurgetime
) > 60000) &&
1452 (GetSourceCount() >= (thePrefs::GetMaxSourcePerFile()*.8 )) ) {
1453 RemoveSource( cur_src
);
1454 lastpurgetime
= dwCurTick
;
1455 break; //Johnny-B - nothing more to do here (good eye!)
1459 // Give up to 1 min for UDP to respond..
1460 // If we are within on min on TCP, do not try..
1461 if ( theApp
->IsConnected() &&
1462 ( (!cur_src
->GetLastAskedTime()) ||
1463 (dwCurTick
- cur_src
->GetLastAskedTime()) > FILEREASKTIME
-20000)) {
1464 cur_src
->UDPReaskForDownload();
1467 // No break here, since the next case takes care of asking for downloads.
1470 case DS_TOOMANYCONNS
:
1472 case DS_WAITCALLBACK
:
1473 case DS_WAITCALLBACKKAD
: {
1474 if ( theApp
->IsConnected() &&
1475 ( (!cur_src
->GetLastAskedTime()) ||
1476 (dwCurTick
- cur_src
->GetLastAskedTime()) > FILEREASKTIME
)) {
1477 if (!cur_src
->AskForDownload()) {
1478 // I left this break here just as a reminder
1479 // just in case re rearange things..
1488 /* eMule 0.30c implementation, i give it a try (Creteil) BEGIN ... */
1489 if (IsA4AFAuto() && ((!m_LastNoNeededCheck
) || (dwCurTick
- m_LastNoNeededCheck
> 900000))) {
1490 m_LastNoNeededCheck
= dwCurTick
;
1491 for ( SourceSet::iterator it
= m_A4AFsrclist
.begin(); it
!= m_A4AFsrclist
.end(); ) {
1492 CUpDownClient
*cur_source
= *it
++;
1493 uint8 download_state
=cur_source
->GetDownloadState();
1494 if( download_state
!= DS_DOWNLOADING
1495 && cur_source
->GetRequestFile()
1496 && ((!cur_source
->GetRequestFile()->IsA4AFAuto()) || download_state
== DS_NONEEDEDPARTS
))
1498 cur_source
->SwapToAnotherFile(false, false, false, this);
1502 /* eMule 0.30c implementation, i give it a try (Creteil) END ... */
1504 // swap No needed partfiles if possible
1506 if (((old_trans
==0) && (transferingsrc
>0)) || ((old_trans
>0) && (transferingsrc
==0))) {
1507 SetPartFileStatus(status
);
1510 // Kad source search
1511 if( GetMaxSourcePerFileUDP() > GetSourceCount()){
1512 //Once we can handle lowID users in Kad, we remove the second IsConnected
1513 if (theApp
->downloadqueue
->DoKademliaFileRequest() && (Kademlia::CKademlia::GetTotalFile() < KADEMLIATOTALFILE
) && (dwCurTick
> m_LastSearchTimeKad
) && Kademlia::CKademlia::IsConnected() && theApp
->IsConnected() && !IsStopped()){
1515 theApp
->downloadqueue
->SetLastKademliaFileRequest();
1517 if (GetKadFileSearchID()) {
1518 /* This will never happen anyway. We're talking a
1519 1h timespan and searches are at max 45secs */
1520 Kademlia::CSearchManager::StopSearch(GetKadFileSearchID(), false);
1523 Kademlia::CUInt128
kadFileID(GetFileHash().GetHash());
1524 Kademlia::CSearch
* pSearch
= Kademlia::CSearchManager::PrepareLookup(Kademlia::CSearch::FILE, true, kadFileID
);
1525 AddDebugLogLineM(false, logKadSearch
, CFormat(wxT("Preparing a Kad Search for '%s'")) % GetFileName());
1527 AddDebugLogLineM(false, logKadSearch
, CFormat(wxT("Kad lookup started for '%s'")) % GetFileName());
1528 if(m_TotalSearchesKad
< 7) {
1529 m_TotalSearchesKad
++;
1531 m_LastSearchTimeKad
= dwCurTick
+ (KADEMLIAREASKTIME
*m_TotalSearchesKad
);
1532 SetKadFileSearchID(pSearch
->GetSearchID());
1536 if(GetKadFileSearchID()) {
1537 Kademlia::CSearchManager::StopSearch(GetKadFileSearchID(), true);
1541 // check if we want new sources from server
1542 if ( !m_localSrcReqQueued
&&
1543 ( (!m_lastsearchtime
) ||
1544 (dwCurTick
- m_lastsearchtime
) > SERVERREASKTIME
) &&
1545 theApp
->IsConnectedED2K() &&
1546 thePrefs::GetMaxSourcePerFileSoft() > GetSourceCount() &&
1548 m_localSrcReqQueued
= true;
1549 theApp
->downloadqueue
->SendLocalSrcRequest(this);
1552 // calculate datarate, set limit etc.
1557 // Kry - does the 3 / 30 difference produce too much flickering or CPU?
1558 if (m_count
>= 30) {
1560 UpdateAutoDownPriority();
1561 UpdateDisplayedInfo();
1562 if(m_bPercentUpdated
== false) {
1563 UpdateCompletedInfos();
1565 m_bPercentUpdated
= false;
1566 if (thePrefs::ShowCatTabInfos()) {
1567 Notify_ShowUpdateCatTabTitles();
1571 // release file handle if unused for some time
1572 m_hpartfile
.Release();
1574 return (uint32
)(kBpsDown
*1024.0);
1577 bool CPartFile::CanAddSource(uint32 userid
, uint16 port
, uint32 serverip
, uint16 serverport
, uint8
* pdebug_lowiddropped
, bool ed2kID
)
1580 //The incoming ID could have the userid in the Hybrid format..
1581 uint32 hybridID
= 0;
1583 if (IsLowID(userid
)) {
1586 hybridID
= wxUINT32_SWAP_ALWAYS(userid
);
1590 if (!IsLowID(userid
)) {
1591 userid
= wxUINT32_SWAP_ALWAYS(userid
);
1595 // MOD Note: Do not change this part - Merkur
1596 if (theApp
->IsConnectedED2K()) {
1597 if(::IsLowID(theApp
->GetED2KID())) {
1598 if(theApp
->GetED2KID() == userid
&& theApp
->serverconnect
->GetCurrentServer()->GetIP() == serverip
&& theApp
->serverconnect
->GetCurrentServer()->GetPort() == serverport
) {
1601 if(theApp
->GetPublicIP() == userid
) {
1605 if(theApp
->GetED2KID() == userid
&& thePrefs::GetPort() == port
) {
1611 if (Kademlia::CKademlia::IsConnected()) {
1612 if(!Kademlia::CKademlia::IsFirewalled()) {
1613 if(Kademlia::CKademlia::GetIPAddress() == hybridID
&& thePrefs::GetPort() == port
) {
1619 //This allows *.*.*.0 clients to not be removed if Ed2kID == false
1620 if ( IsLowID(hybridID
) && theApp
->IsFirewalled()) {
1621 if (pdebug_lowiddropped
) {
1622 (*pdebug_lowiddropped
)++;
1630 void CPartFile::AddSources(CMemFile
& sources
,uint32 serverip
, uint16 serverport
, unsigned origin
, bool bWithObfuscationAndHash
)
1632 uint8 count
= sources
.ReadUInt8();
1633 uint8 debug_lowiddropped
= 0;
1634 uint8 debug_possiblesources
= 0;
1635 CMD4Hash achUserHash
;
1638 // since we may received multiple search source UDP results we have to "consume" all data of that packet
1639 AddDebugLogLineM(false, logPartFile
, wxT("Trying to add sources for a stopped file"));
1640 sources
.Seek(count
*(4+2), wxFromCurrent
);
1644 for (int i
= 0;i
!= count
;++i
) {
1645 uint32 userid
= sources
.ReadUInt32();
1646 uint16 port
= sources
.ReadUInt16();
1648 uint8 byCryptOptions
= 0;
1649 if (bWithObfuscationAndHash
){
1650 byCryptOptions
= sources
.ReadUInt8();
1651 if ((byCryptOptions
& 0x80) > 0) {
1652 achUserHash
= sources
.ReadHash();
1655 if ((thePrefs::IsClientCryptLayerRequested() && (byCryptOptions
& 0x01/*supported*/) > 0 && (byCryptOptions
& 0x80) == 0)
1656 || (thePrefs::IsClientCryptLayerSupported() && (byCryptOptions
& 0x02/*requested*/) > 0 && (byCryptOptions
& 0x80) == 0)) {
1657 AddDebugLogLineM(false, logPartFile
, wxString::Format(wxT("Server didn't provide UserHash for source %u, even if it was expected to (or local obfuscationsettings changed during serverconnect"), userid
));
1658 } else if (!thePrefs::IsClientCryptLayerRequested() && (byCryptOptions
& 0x02/*requested*/) == 0 && (byCryptOptions
& 0x80) != 0) {
1659 AddDebugLogLineM(false, logPartFile
, wxString::Format(wxT("Server provided UserHash for source %u, even if it wasn't expected to (or local obfuscationsettings changed during serverconnect"), userid
));
1664 // "Filter LAN IPs" and "IPfilter" the received sources IP addresses
1665 if (!IsLowID(userid
)) {
1666 // check for 0-IP, localhost and optionally for LAN addresses
1667 if ( !IsGoodIP(userid
, thePrefs::FilterLanIPs()) ) {
1670 if (theApp
->ipfilter
->IsFiltered(userid
)) {
1675 if (!CanAddSource(userid
, port
, serverip
, serverport
, &debug_lowiddropped
)) {
1679 if(thePrefs::GetMaxSourcePerFile() > GetSourceCount()) {
1680 ++debug_possiblesources
;
1681 CUpDownClient
* newsource
= new CUpDownClient(port
,userid
,serverip
,serverport
,this, true, true);
1683 newsource
->SetSourceFrom((ESourceFrom
)origin
);
1684 newsource
->SetConnectOptions(byCryptOptions
, true, false);
1686 if ((byCryptOptions
& 0x80) != 0) {
1687 newsource
->SetUserHash(achUserHash
);
1690 theApp
->downloadqueue
->CheckAndAddSource(this,newsource
);
1692 AddDebugLogLineM(false, logPartFile
, wxT("Consuming a packet because of max sources reached"));
1693 // Since we may receive multiple search source UDP results we have to "consume" all data of that packet
1694 // This '+1' is added because 'i' counts from 0.
1695 sources
.Seek((count
-(i
+1))*(4+2), wxFromCurrent
);
1696 if (GetKadFileSearchID()) {
1697 Kademlia::CSearchManager::StopSearch(GetKadFileSearchID(), false);
1704 void CPartFile::UpdatePartsInfo()
1706 if( !IsPartFile() ) {
1707 CKnownFile::UpdatePartsInfo();
1712 uint16 partcount
= GetPartCount();
1713 bool flag
= (time(NULL
) - m_nCompleteSourcesTime
> 0);
1715 // Ensure the frequency-list is ready
1716 if ( m_SrcpartFrequency
.size() != GetPartCount() ) {
1717 m_SrcpartFrequency
.clear();
1718 m_SrcpartFrequency
.insert(m_SrcpartFrequency
.begin(), GetPartCount(), 0);
1721 // Find number of available parts
1722 uint16 availablecounter
= 0;
1723 for ( uint16 i
= 0; i
< partcount
; ++i
) {
1724 if ( m_SrcpartFrequency
[i
] )
1728 if ( ( availablecounter
== partcount
) && ( m_availablePartsCount
< partcount
) ) {
1729 lastseencomplete
= time(NULL
);
1732 m_availablePartsCount
= availablecounter
;
1735 ArrayOfUInts16 count
;
1737 count
.reserve(GetSourceCount());
1739 for ( SourceSet::iterator it
= m_SrcList
.begin(); it
!= m_SrcList
.end(); ++it
) {
1740 if ( !(*it
)->GetUpPartStatus().empty() && (*it
)->GetUpPartCount() == partcount
) {
1741 count
.push_back((*it
)->GetUpCompleteSourcesCount());
1745 m_nCompleteSourcesCount
= m_nCompleteSourcesCountLo
= m_nCompleteSourcesCountHi
= 0;
1747 for (uint16 i
= 0; i
< partcount
; ++i
) {
1749 m_nCompleteSourcesCount
= m_SrcpartFrequency
[i
];
1751 else if( m_nCompleteSourcesCount
> m_SrcpartFrequency
[i
]) {
1752 m_nCompleteSourcesCount
= m_SrcpartFrequency
[i
];
1755 count
.push_back(m_nCompleteSourcesCount
);
1757 int32 n
= count
.size();
1759 std::sort(count
.begin(), count
.end(), std::less
<uint16
>());
1762 int32 i
= n
>> 1; // (n / 2)
1763 int32 j
= (n
* 3) >> 2; // (n * 3) / 4
1764 int32 k
= (n
* 7) >> 3; // (n * 7) / 8
1766 //When still a part file, adjust your guesses by 20% to what you see..
1770 //Not many sources, so just use what you see..
1771 // welcome to 'plain stupid code'
1772 // m_nCompleteSourcesCount;
1773 m_nCompleteSourcesCountLo
= m_nCompleteSourcesCount
;
1774 m_nCompleteSourcesCountHi
= m_nCompleteSourcesCount
;
1775 } else if (n
< 20) {
1776 // For low guess and normal guess count
1777 // If we see more sources then the guessed low and normal, use what we see.
1778 // If we see less sources then the guessed low, adjust network accounts for 80%,
1779 // we account for 20% with what we see and make sure we are still above the normal.
1781 // Adjust 80% network and 20% what we see.
1782 if ( count
[i
] < m_nCompleteSourcesCount
) {
1783 m_nCompleteSourcesCountLo
= m_nCompleteSourcesCount
;
1785 m_nCompleteSourcesCountLo
=
1786 (uint16
)((float)(count
[i
]*.8) +
1787 (float)(m_nCompleteSourcesCount
*.2));
1789 m_nCompleteSourcesCount
= m_nCompleteSourcesCountLo
;
1790 m_nCompleteSourcesCountHi
=
1791 (uint16
)((float)(count
[j
]*.8) +
1792 (float)(m_nCompleteSourcesCount
*.2));
1793 if( m_nCompleteSourcesCountHi
< m_nCompleteSourcesCount
) {
1794 m_nCompleteSourcesCountHi
= m_nCompleteSourcesCount
;
1802 // Adjust network accounts for 80%, we account for 20% with what
1803 // we see and make sure we are still above the low.
1805 // Adjust network accounts for 80%, we account for 20% with what
1806 // we see and make sure we are still above the normal.
1808 m_nCompleteSourcesCountLo
= m_nCompleteSourcesCount
;
1809 m_nCompleteSourcesCount
= (uint16
)((float)(count
[j
]*.8)+(float)(m_nCompleteSourcesCount
*.2));
1810 if( m_nCompleteSourcesCount
< m_nCompleteSourcesCountLo
) {
1811 m_nCompleteSourcesCount
= m_nCompleteSourcesCountLo
;
1813 m_nCompleteSourcesCountHi
= (uint16
)((float)(count
[k
]*.8)+(float)(m_nCompleteSourcesCount
*.2));
1814 if( m_nCompleteSourcesCountHi
< m_nCompleteSourcesCount
) {
1815 m_nCompleteSourcesCountHi
= m_nCompleteSourcesCount
;
1819 m_nCompleteSourcesTime
= time(NULL
) + (60);
1821 UpdateDisplayedInfo();
1824 // [Maella -Enhanced Chunk Selection- (based on jicxicmic)]
1825 bool CPartFile::GetNextRequestedBlock(CUpDownClient
* sender
,
1826 std::vector
<Requested_Block_Struct
*>& toadd
, uint16
& count
)
1829 // The purpose of this function is to return a list of blocks (~180KB) to
1830 // download. To avoid a prematurely stop of the downloading, all blocks that
1831 // are requested from the same source must be located within the same
1832 // chunk (=> part ~9MB).
1834 // The selection of the chunk to download is one of the CRITICAL parts of the
1835 // edonkey network. The selection algorithm must insure the best spreading
1838 // The selection is based on 4 criteria:
1839 // 1. Frequency of the chunk (availability), very rare chunks must be downloaded
1840 // as quickly as possible to become a new available source.
1841 // 2. Parts used for preview (first + last chunk), preview or check a
1842 // file (e.g. movie, mp3)
1843 // 3. Request state (downloading in process), try to ask each source for another
1844 // chunk. Spread the requests between all sources.
1845 // 4. Completion (shortest-to-complete), partially retrieved chunks should be
1846 // completed before starting to download other one.
1848 // The frequency criterion defines three zones: very rare (<10%), rare (<50%)
1849 // and common (>30%). Inside each zone, the criteria have a specific weight, used
1850 // to calculate the priority of chunks. The chunk(s) with the highest
1851 // priority (highest=0, lowest=0xffff) is/are selected first.
1853 // very rare (preview) rare common
1854 // 0% <---- +0 pt ----> 10% <----- +10000 pt -----> 50% <---- +20000 pt ----> 100%
1855 // 1. <------- frequency: +25*frequency pt ----------->
1856 // 2. <- preview: +1 pt --><-------------- preview: set to 10000 pt ------------->
1857 // 3. <------ request: download in progress +20000 pt ------>
1858 // 4a. <- completion: 0% +100, 25% +75 .. 100% +0 pt --><-- !req => completion --->
1859 // 4b. <--- req => !completion -->
1861 // Unrolled, the priority scale is:
1863 // 0..xxxx unrequested and requested very rare chunks
1864 // 10000..1xxxx unrequested rare chunks + unrequested preview chunks
1865 // 20000..2xxxx unrequested common chunks (priority to the most complete)
1866 // 30000..3xxxx requested rare chunks + requested preview chunks
1867 // 40000..4xxxx requested common chunks (priority to the least complete)
1869 // This algorithm usually selects first the rarest chunk(s). However, partially
1870 // complete chunk(s) that is/are close to completion may overtake the priority
1871 // (priority inversion).
1872 // For the common chuncks, the algorithm tries to spread the dowload between
1876 // Check input parameters
1877 if ( sender
->GetPartStatus().empty() ) {
1880 // Define and create the list of the chunks to download
1881 const uint16 partCount
= GetPartCount();
1882 ChunkList chunksList
;
1885 uint16 newBlockCount
= 0;
1886 while(newBlockCount
!= count
) {
1887 // Create a request block stucture if a chunk has been previously selected
1888 if(sender
->GetLastPartAsked() != 0xffff) {
1889 Requested_Block_Struct
* pBlock
= new Requested_Block_Struct
;
1890 if(GetNextEmptyBlockInPart(sender
->GetLastPartAsked(), pBlock
) == true) {
1891 // Keep a track of all pending requested blocks
1892 m_requestedblocks_list
.push_back(pBlock
);
1893 // Update list of blocks to return
1894 toadd
.push_back(pBlock
);
1896 // Skip end of loop (=> CPU load)
1899 // All blocks for this chunk have been already requested
1901 // => Try to select another chunk
1902 sender
->SetLastPartAsked(0xffff);
1906 // Check if a new chunk must be selected (e.g. download starting, previous chunk complete)
1907 if(sender
->GetLastPartAsked() == 0xffff) {
1908 // Quantify all chunks (create list of chunks to download)
1909 // This is done only one time and only if it is necessary (=> CPU load)
1910 if(chunksList
.empty()) {
1911 // Indentify the locally missing part(s) that this source has
1912 for(uint16 i
=0; i
< partCount
; ++i
) {
1913 if(sender
->IsPartAvailable(i
) == true && GetNextEmptyBlockInPart(i
, NULL
) == true) {
1914 // Create a new entry for this chunk and add it to the list
1917 newEntry
.frequency
= m_SrcpartFrequency
[i
];
1918 chunksList
.push_back(newEntry
);
1922 // Check if any bloks(s) could be downloaded
1923 if(chunksList
.empty()) {
1924 break; // Exit main loop while()
1927 // Define the bounds of the three zones (very rare, rare)
1928 // more depending on available sources
1930 if (GetSourceCount()>800) {
1932 } else if (GetSourceCount()>200) {
1935 uint16 limit
= modif
*GetSourceCount()/ 100;
1939 const uint16 veryRareBound
= limit
;
1940 const uint16 rareBound
= 2*limit
;
1942 // Cache Preview state (Criterion 2)
1943 FileType type
= GetFiletype(GetFileName());
1944 const bool isPreviewEnable
=
1945 thePrefs::GetPreviewPrio() &&
1946 (type
== ftArchive
|| type
== ftVideo
);
1948 // Collect and calculate criteria for all chunks
1949 for (ChunkList::iterator it
= chunksList
.begin(); it
!= chunksList
.end(); ++it
) {
1950 Chunk
& cur_chunk
= *it
;
1953 const uint64 uStart
= cur_chunk
.part
* PARTSIZE
;
1954 const uint64 uEnd
= uStart
+ GetPartSize(cur_chunk
.part
) - 1;
1955 // Criterion 2. Parts used for preview
1956 // Remark: - We need to download the first part and the last part(s).
1957 // - When the last part is very small, it's necessary to
1958 // download the two last parts.
1959 bool critPreview
= false;
1960 if(isPreviewEnable
== true) {
1961 if(cur_chunk
.part
== 0) {
1962 critPreview
= true; // First chunk
1963 } else if(cur_chunk
.part
== partCount
-1) {
1964 critPreview
= true; // Last chunk
1965 } else if(cur_chunk
.part
== partCount
-2) {
1966 // Last chunk - 1 (only if last chunk is too small)
1967 const uint32 sizeOfLastChunk
= GetFileSize() - uEnd
;
1968 if(sizeOfLastChunk
< PARTSIZE
/3) {
1969 critPreview
= true; // Last chunk - 1
1974 // Criterion 3. Request state (downloading in process from other source(s))
1976 const bool critRequested
=
1977 cur_chunk
.frequency
> veryRareBound
&&
1978 IsAlreadyRequested(uStart
, uEnd
);
1980 // Criterion 4. Completion
1981 // PARTSIZE instead of GetPartSize() favours the last chunk - but that may be intentional
1982 uint32 partSize
= PARTSIZE
- m_gaplist
.GetGapSize(cur_chunk
.part
);
1983 const uint16 critCompletion
= (uint16
)(partSize
/(PARTSIZE
/100)); // in [%]
1985 // Calculate priority with all criteria
1986 if(cur_chunk
.frequency
<= veryRareBound
) {
1987 // 0..xxxx unrequested + requested very rare chunks
1988 cur_chunk
.rank
= (25 * cur_chunk
.frequency
) + // Criterion 1
1989 ((critPreview
== true) ? 0 : 1) + // Criterion 2
1990 (100 - critCompletion
); // Criterion 4
1991 } else if(critPreview
== true) {
1992 // 10000..10100 unrequested preview chunks
1993 // 30000..30100 requested preview chunks
1994 cur_chunk
.rank
= ((critRequested
== false) ? 10000 : 30000) + // Criterion 3
1995 (100 - critCompletion
); // Criterion 4
1996 } else if(cur_chunk
.frequency
<= rareBound
) {
1997 // 10101..1xxxx unrequested rare chunks
1998 // 30101..3xxxx requested rare chunks
1999 cur_chunk
.rank
= (25 * cur_chunk
.frequency
) + // Criterion 1
2000 ((critRequested
== false) ? 10101 : 30101) + // Criterion 3
2001 (100 - critCompletion
); // Criterion 4
2004 if(critRequested
== false) { // Criterion 3
2005 // 20000..2xxxx unrequested common chunks
2006 cur_chunk
.rank
= 20000 + // Criterion 3
2007 (100 - critCompletion
); // Criterion 4
2009 // 40000..4xxxx requested common chunks
2010 // Remark: The weight of the completion criterion is inversed
2011 // to spead the requests over the completing chunks.
2012 // Without this, the chunk closest to completion will
2013 // received every new sources.
2014 cur_chunk
.rank
= 40000 + // Criterion 3
2015 (critCompletion
); // Criterion 4
2021 // Select the next chunk to download
2022 if(!chunksList
.empty()) {
2023 // Find and count the chunck(s) with the highest priority
2024 uint16 chunkCount
= 0; // Number of found chunks with same priority
2025 uint16 rank
= 0xffff; // Highest priority found
2027 // Collect and calculate criteria for all chunks
2028 for (ChunkList::iterator it
= chunksList
.begin(); it
!= chunksList
.end(); ++it
) {
2029 const Chunk
& cur_chunk
= *it
;
2030 if(cur_chunk
.rank
< rank
) {
2032 rank
= cur_chunk
.rank
;
2033 } else if(cur_chunk
.rank
== rank
) {
2038 // Use a random access to avoid that everybody tries to download the
2039 // same chunks at the same time (=> spread the selected chunk among clients)
2040 uint16 randomness
= 1 + (int) (((float)(chunkCount
-1))*rand()/(RAND_MAX
+1.0));
2042 for (ChunkList::iterator it
= chunksList
.begin(); it
!= chunksList
.end(); ++it
) {
2043 const Chunk
& cur_chunk
= *it
;
2044 if(cur_chunk
.rank
== rank
) {
2046 if(randomness
== 0) {
2047 // Selection process is over
2048 sender
->SetLastPartAsked(cur_chunk
.part
);
2049 // Remark: this list might be reused up to *count times
2050 chunksList
.erase(it
);
2051 break; // exit loop for()
2056 // There is no remaining chunk to download
2057 break; // Exit main loop while()
2061 // Return the number of the blocks
2062 count
= newBlockCount
;
2064 return (newBlockCount
> 0);
2069 void CPartFile::RemoveBlockFromList(uint64 start
,uint64 end
)
2071 std::list
<Requested_Block_Struct
*>::iterator it
= m_requestedblocks_list
.begin();
2072 while (it
!= m_requestedblocks_list
.end()) {
2073 std::list
<Requested_Block_Struct
*>::iterator it2
= it
++;
2075 if ((*it2
)->StartOffset
<= start
&& (*it2
)->EndOffset
>= end
) {
2076 m_requestedblocks_list
.erase(it2
);
2082 void CPartFile::RemoveAllRequestedBlocks(void)
2084 m_requestedblocks_list
.clear();
2088 void CPartFile::CompleteFile(bool bIsHashingDone
)
2090 if (GetKadFileSearchID()) {
2091 Kademlia::CSearchManager::StopSearch(GetKadFileSearchID(), false);
2094 theApp
->downloadqueue
->RemoveLocalServerRequest(this);
2096 AddDebugLogLineM( false, logPartFile
, wxString( wxT("CPartFile::CompleteFile: Hash ") ) + ( bIsHashingDone
? wxT("done") : wxT("not done") ) );
2098 if (!bIsHashingDone
) {
2099 SetPartFileStatus(PS_COMPLETING
);
2102 CPath partFile
= m_partmetfilename
.RemoveExt();
2103 CThreadScheduler::AddTask(new CHashingTask(GetFilePath(), partFile
, this));
2107 m_is_A4AF_auto
=false;
2108 SetPartFileStatus(PS_COMPLETING
);
2109 // guess I was wrong about not need to spaw a thread ...
2110 // It is if the temp and incoming dirs are on different
2111 // partitions/drives and the file is large...[oz]
2114 PerformFileComplete();
2118 if (thePrefs::ShowCatTabInfos()) {
2119 Notify_ShowUpdateCatTabTitles();
2121 UpdateDisplayedInfo(true);
2125 void CPartFile::CompleteFileEnded(bool errorOccured
, const CPath
& newname
)
2129 SetPartFileStatus(PS_ERROR
);
2130 AddLogLineM(true, CFormat( _("Unexpected error while completing %s. File paused") )% GetFileName() );
2132 m_fullname
= newname
;
2134 SetFilePath(m_fullname
.GetPath());
2135 SetFileName(m_fullname
.GetFullName());
2136 m_lastDateChanged
= CPath::GetModificationTime(m_fullname
);
2138 SetPartFileStatus(PS_COMPLETE
);
2142 // TODO: What the f*** if it is already known?
2143 theApp
->knownfiles
->SafeAddKFile(this);
2145 // remove the file from the suspended uploads list
2146 theApp
->uploadqueue
->ResumeUpload(GetFileHash());
2147 theApp
->downloadqueue
->RemoveFile(this);
2148 theApp
->sharedfiles
->SafeAddKFile(this);
2149 UpdateDisplayedInfo(true);
2151 // republish that file to the ed2k-server to update the 'FT_COMPLETE_SOURCES' counter on the server.
2152 theApp
->sharedfiles
->RepublishFile(this);
2154 // Ensure that completed shows the correct value
2155 completedsize
= GetFileSize();
2157 // clear the blackbox to free up memory
2158 m_CorruptionBlackBox
->Free();
2160 AddLogLineM(true, CFormat( _("Finished downloading: %s") ) % GetFileName() );
2163 theApp
->downloadqueue
->StartNextFile(this);
2167 void CPartFile::PerformFileComplete()
2169 // add this file to the suspended uploads list
2170 theApp
->uploadqueue
->SuspendUpload(GetFileHash());
2173 // close permanent handle
2174 if (m_hpartfile
.IsOpened()) {
2175 m_hpartfile
.Close();
2178 // Schedule task for completion of the file
2179 CThreadScheduler::AddTask(new CCompletionTask(this));
2183 void CPartFile::RemoveAllSources(bool bTryToSwap
)
2185 for( SourceSet::iterator it
= m_SrcList
.begin(); it
!= m_SrcList
.end();) {
2186 CUpDownClient
* cur_src
= *it
++;
2188 if (!cur_src
->SwapToAnotherFile(true, true, true, NULL
)) {
2189 RemoveSource(cur_src
,true,false);
2190 // If it was not swapped, it's not on any file anymore, and should die
2193 RemoveSource(cur_src
,true,false);
2199 /* eMule 0.30c implementation, i give it a try (Creteil) BEGIN ... */
2200 // remove all links A4AF in sources to this file
2201 if(!m_A4AFsrclist
.empty()) {
2202 for( SourceSet::iterator it
= m_A4AFsrclist
.begin(); it
!= m_A4AFsrclist
.end(); ) {
2203 CUpDownClient
* cur_src
= *it
++;
2204 if ( cur_src
->DeleteFileRequest( this ) ) {
2205 Notify_DownloadCtrlRemoveSource(cur_src
, this);
2208 m_A4AFsrclist
.clear();
2210 /* eMule 0.30c implementation, i give it a try (Creteil) END ... */
2211 UpdateFileRatingCommentAvail();
2215 void CPartFile::Delete()
2217 AddLogLineM(false, CFormat(_("Deleting file: %s")) % GetFileName());
2218 // Barry - Need to tell any connected clients to stop sending the file
2220 AddDebugLogLineM(false, logPartFile
, wxT("\tStopped"));
2222 theApp
->sharedfiles
->RemoveFile(this);
2223 AddDebugLogLineM(false, logPartFile
, wxT("\tRemoved from shared"));
2224 theApp
->downloadqueue
->RemoveFile(this);
2225 AddDebugLogLineM(false, logPartFile
, wxT("\tRemoved from download queue"));
2226 Notify_DownloadCtrlRemoveFile(this);
2227 AddDebugLogLineM(false, logPartFile
, wxT("\tRemoved from transferwnd"));
2229 if (m_hpartfile
.IsOpened()) {
2230 m_hpartfile
.Close();
2233 AddDebugLogLineM(false, logPartFile
, wxT("\tClosed"));
2235 if (!CPath::RemoveFile(m_fullname
)) {
2236 AddDebugLogLineM(true, logPartFile
, CFormat(wxT("\tFailed to delete '%s'")) % m_fullname
);
2238 AddDebugLogLineM(false, logPartFile
, wxT("\tRemoved .part.met"));
2241 if (!CPath::RemoveFile(m_PartPath
)) {
2242 AddDebugLogLineM(true, logPartFile
, CFormat(wxT("Failed to delete '%s'")) % m_PartPath
);
2244 AddDebugLogLineM(false, logPartFile
, wxT("\tRemoved .part"));
2247 CPath BAKName
= m_fullname
.AppendExt(PARTMET_BAK_EXT
);
2248 if (!CPath::RemoveFile(BAKName
)) {
2249 AddDebugLogLineM(true, logPartFile
, CFormat(wxT("Failed to delete '%s'")) % BAKName
);
2251 AddDebugLogLineM(false, logPartFile
, wxT("\tRemoved .BAK"));
2254 CPath SEEDSName
= m_fullname
.AppendExt(wxT(".seeds"));
2255 if (SEEDSName
.FileExists()) {
2256 if (CPath::RemoveFile(SEEDSName
)) {
2257 AddDebugLogLineM(false, logPartFile
, wxT("\tRemoved .seeds"));
2259 AddDebugLogLineM(true, logPartFile
, CFormat(wxT("Failed to delete '%s'")) % SEEDSName
);
2263 AddDebugLogLineM(false, logPartFile
, wxT("Done"));
2269 bool CPartFile::HashSinglePart(uint16 partnumber
)
2271 if ((GetHashCount() <= partnumber
) && (GetPartCount() > 1)) {
2273 CFormat( _("WARNING: Unable to hash downloaded part - hashset incomplete for '%s'") )
2275 m_hashsetneeded
= true;
2277 } else if ((GetHashCount() <= partnumber
) && GetPartCount() != 1) {
2278 AddLogLineM(true, CFormat( _("ERROR: Unable to hash downloaded part - hashset incomplete (%s). This should never happen")) % GetFileName() );
2279 m_hashsetneeded
= true;
2282 CMD4Hash hashresult
;
2283 uint64 offset
= PARTSIZE
* partnumber
;
2284 uint32 length
= GetPartSize(partnumber
);
2286 CreateHashFromFile(m_hpartfile
, offset
, length
, &hashresult
, NULL
);
2287 } catch (const CIOFailureException
& e
) {
2288 AddLogLineM(true, CFormat( wxT("EOF while hashing downloaded part %u with length %u (max %u) of partfile '%s' with length %u: %s"))
2289 % partnumber
% length
% (offset
+length
) % GetFileName() % GetFileSize() % e
.what());
2290 SetPartFileStatus(PS_ERROR
);
2292 } catch (const CEOFException
& e
) {
2293 AddLogLineM(true, CFormat( wxT("EOF while hashing downloaded part %u with length %u (max %u) of partfile '%s' with length %u: %s"))
2294 % partnumber
% length
% (offset
+length
) % GetFileName() % GetFileSize() % e
.what());
2298 if (GetPartCount() > 1) {
2299 if (hashresult
!= GetPartHash(partnumber
)) {
2300 AddDebugLogLineM(false, logPartFile
, CFormat( wxT("%s: Expected hash of part %d: %s")) % GetFileName() % partnumber
% GetPartHash(partnumber
).Encode() );
2301 AddDebugLogLineM(false, logPartFile
, CFormat( wxT("%s: Actual hash of part %d: %s")) % GetFileName() % partnumber
% hashresult
.Encode() );
2307 if (hashresult
!= m_abyFileHash
) {
2317 bool CPartFile::IsCorruptedPart(uint16 partnumber
)
2319 return std::find(m_corrupted_list
.begin(), m_corrupted_list
.end(), partnumber
)
2320 != m_corrupted_list
.end();
2324 void CPartFile::SetDownPriority(uint8 np
, bool bSave
, bool bRefresh
)
2326 if ( m_iDownPriority
!= np
) {
2327 m_iDownPriority
= np
;
2329 UpdateDisplayedInfo(true);
2336 void CPartFile::StopFile(bool bCancel
)
2338 // Kry - Need to set it here to get into SetPartFileStatus(status) correctly
2341 // Barry - Need to tell any connected clients to stop sending the file
2344 m_LastSearchTimeKad
= 0;
2345 m_TotalSearchesKad
= 0;
2347 RemoveAllSources(true);
2350 memset(m_anStates
,0,sizeof(m_anStates
));
2356 UpdateDisplayedInfo(true);
2360 void CPartFile::StopPausedFile()
2363 // Once an hour, remove any sources for files which are no longer active downloads
2364 switch (GetStatus()) {
2366 case PS_INSUFFICIENT
:
2368 if (time(NULL
) - m_iLastPausePurge
> (60*60)) {
2369 m_iLastPausePurge
= time(NULL
);
2375 // release file handle if unused for some time
2376 m_hpartfile
.Release();
2380 void CPartFile::PauseFile(bool bInsufficient
)
2384 if ( status
== PS_COMPLETE
|| status
== PS_COMPLETING
) {
2388 if (GetKadFileSearchID()) {
2389 Kademlia::CSearchManager::StopSearch(GetKadFileSearchID(), true);
2390 // If we were in the middle of searching, reset timer so they can resume searching.
2391 m_LastSearchTimeKad
= 0;
2394 m_iLastPausePurge
= time(NULL
);
2396 theApp
->downloadqueue
->RemoveLocalServerRequest(this);
2398 CPacket
packet( OP_CANCELTRANSFER
, 0, OP_EDONKEYPROT
);
2399 for( SourceSet::iterator it
= m_SrcList
.begin(); it
!= m_SrcList
.end(); ) {
2400 CUpDownClient
* cur_src
= *it
++;
2401 if (cur_src
->GetDownloadState() == DS_DOWNLOADING
) {
2402 if (!cur_src
->GetSentCancelTransfer()) {
2403 theStats::AddUpOverheadOther( packet
.GetPacketSize() );
2404 AddDebugLogLineM( false, logLocalClient
, wxT("Local Client: OP_CANCELTRANSFER to ") + cur_src
->GetFullIP() );
2405 cur_src
->SendPacket( &packet
, false, true );
2406 cur_src
->SetSentCancelTransfer( true );
2408 cur_src
->SetDownloadState(DS_ONQUEUE
);
2413 m_insufficient
= bInsufficient
;
2419 m_anStates
[DS_DOWNLOADING
] = 0;
2425 void CPartFile::ResumeFile()
2427 if ( status
== PS_COMPLETE
|| status
== PS_COMPLETING
) {
2431 if ( m_insufficient
&& !CheckFreeDiskSpace() ) {
2432 // Still not enough free discspace
2438 m_insufficient
= false;
2440 m_lastsearchtime
= 0;
2442 SetActive(theApp
->IsConnected());
2444 if (m_gaplist
.IsComplete() && (GetStatus() == PS_ERROR
)) {
2445 // The file has already been hashed at this point
2449 UpdateDisplayedInfo(true);
2453 bool CPartFile::CheckFreeDiskSpace( uint64 neededSpace
)
2455 uint64 free
= CPath::GetFreeSpaceAt(GetFilePath());
2456 if (free
== static_cast<uint64
>(wxInvalidOffset
)) {
2457 // If GetFreeSpaceAt() fails, then the path probably does not exist.
2461 // The very least acceptable diskspace is a single PART
2462 if ( free
< PARTSIZE
) {
2463 // Always fail in this case, since we risk losing data if we try to
2464 // write on a full partition.
2468 // All other checks are only made if the user has enabled them
2469 if ( thePrefs::IsCheckDiskspaceEnabled() ) {
2470 neededSpace
+= thePrefs::GetMinFreeDiskSpace();
2472 // Due to the the existance of sparse files, we cannot assume that
2473 // writes within the file doesn't cause new blocks to be allocated.
2474 // Therefore, we have to simply stop writing the moment the limit has
2476 return free
>= neededSpace
;
2483 void CPartFile::SetLastAnsweredTime()
2485 m_ClientSrcAnswered
= ::GetTickCount();
2488 void CPartFile::SetLastAnsweredTimeTimeout()
2490 m_ClientSrcAnswered
= 2 * CONNECTION_LATENCY
+ ::GetTickCount() - SOURCECLIENTREASKS
;
2493 CPacket
*CPartFile::CreateSrcInfoPacket(const CUpDownClient
* forClient
, uint8 byRequestedVersion
, uint16 nRequestedOptions
)
2496 if ( m_SrcList
.empty() ) {
2501 return CKnownFile::CreateSrcInfoPacket(forClient
, byRequestedVersion
, nRequestedOptions
);
2504 if (((forClient
->GetRequestFile() != this)
2505 && (forClient
->GetUploadFile() != this)) || forClient
->GetUploadFileID() != GetFileHash()) {
2506 wxString file1
= _("Unknown");
2507 if (forClient
->GetRequestFile() && forClient
->GetRequestFile()->GetFileName().IsOk()) {
2508 file1
= forClient
->GetRequestFile()->GetFileName().GetPrintable();
2509 } else if (forClient
->GetUploadFile() && forClient
->GetUploadFile()->GetFileName().IsOk()) {
2510 file1
= forClient
->GetUploadFile()->GetFileName().GetPrintable();
2512 wxString file2
= _("Unknown");
2513 if (GetFileName().IsOk()) {
2514 file2
= GetFileName().GetPrintable();
2516 AddDebugLogLineM(false, logPartFile
, wxT("File mismatch on source packet (P) Sending: ") + file1
+ wxT(" From: ") + file2
);
2520 if ( !(GetStatus() == PS_READY
|| GetStatus() == PS_EMPTY
)) {
2524 const BitVector
& reqstatus
= forClient
->GetPartStatus();
2525 bool KnowNeededParts
= !reqstatus
.empty();
2526 //wxASSERT(rcvstatus.size() == GetPartCount()); // Obviously!
2527 if (KnowNeededParts
&& (reqstatus
.size() != GetPartCount())) {
2528 // Yuck. Same file but different part count? Seriously fucked up.
2529 // This happens rather often with reqstatus.size() == 0. Don't log then.
2530 if (reqstatus
.size()) {
2531 AddDebugLogLineM(false, logKnownFiles
, CFormat(wxT("Impossible situation: different partcounts: %i (client) and %i (file) for %s")) % reqstatus
.size() % GetPartCount() % GetFileName());
2536 CMemFile
data(1024);
2538 uint8 byUsedVersion
;
2540 if (forClient
->SupportsSourceExchange2() && byRequestedVersion
> 0){
2541 // the client uses SourceExchange2 and requested the highest version he knows
2542 // and we send the highest version we know, but of course not higher than his request
2543 byUsedVersion
= std::min(byRequestedVersion
, (uint8
)SOURCEEXCHANGE2_VERSION
);
2544 bIsSX2Packet
= true;
2545 data
.WriteUInt8(byUsedVersion
);
2547 // we don't support any special SX2 options yet, reserved for later use
2548 if (nRequestedOptions
!= 0) {
2549 AddDebugLogLineM(false, logKnownFiles
, CFormat(wxT("Client requested unknown options for SourceExchange2: %u")) % nRequestedOptions
);
2552 byUsedVersion
= forClient
->GetSourceExchange1Version();
2553 bIsSX2Packet
= false;
2554 if (forClient
->SupportsSourceExchange2()) {
2555 AddDebugLogLineM(false, logKnownFiles
, wxT("Client which announced to support SX2 sent SX1 packet instead"));
2561 data
.WriteHash(m_abyFileHash
);
2562 data
.WriteUInt16(nCount
);
2564 for (SourceSet::iterator it
= m_SrcList
.begin(); it
!= m_SrcList
.end(); ++it
) {
2566 CUpDownClient
* cur_src
= *it
;
2568 int state
= cur_src
->GetDownloadState();
2569 int valid
= ( state
== DS_DOWNLOADING
) || ( state
== DS_ONQUEUE
&& !cur_src
->IsRemoteQueueFull() );
2571 if ( cur_src
->HasLowID() || !valid
) {
2575 // only send source which have needed parts for this client if possible
2576 const BitVector
& srcstatus
= cur_src
->GetPartStatus();
2577 if ( !srcstatus
.empty() ) {
2578 //wxASSERT(srcstatus.size() == GetPartCount()); // Obviously!
2579 if (srcstatus
.size() != GetPartCount()) {
2582 if ( KnowNeededParts
) {
2583 // only send sources which have needed parts for this client
2584 for (int x
= 0; x
< GetPartCount(); ++x
) {
2585 if (srcstatus
[x
] && !reqstatus
[x
]) {
2591 // if we don't know the need parts for this client,
2592 // return any source currently a client sends it's
2593 // file status only after it has at least one complete part
2594 if (srcstatus
.size() != GetPartCount()) {
2597 for (int x
= 0; x
< GetPartCount(); ++x
){
2608 if(forClient
->GetSourceExchange1Version() > 2) {
2609 dwID
= cur_src
->GetUserIDHybrid();
2611 dwID
= wxUINT32_SWAP_ALWAYS(cur_src
->GetUserIDHybrid());
2613 data
.WriteUInt32(dwID
);
2614 data
.WriteUInt16(cur_src
->GetUserPort());
2615 data
.WriteUInt32(cur_src
->GetServerIP());
2616 data
.WriteUInt16(cur_src
->GetServerPort());
2618 if (byUsedVersion
>= 2) {
2619 data
.WriteHash(cur_src
->GetUserHash());
2622 if (byUsedVersion
>= 4){
2623 // CryptSettings - SourceExchange V4
2625 // 1 CryptLayer Required
2626 // 1 CryptLayer Requested
2627 // 1 CryptLayer Supported
2628 const uint8 uSupportsCryptLayer
= cur_src
->SupportsCryptLayer() ? 1 : 0;
2629 const uint8 uRequestsCryptLayer
= cur_src
->RequestsCryptLayer() ? 1 : 0;
2630 const uint8 uRequiresCryptLayer
= cur_src
->RequiresCryptLayer() ? 1 : 0;
2631 const uint8 byCryptOptions
= (uRequiresCryptLayer
<< 2) | (uRequestsCryptLayer
<< 1) | (uSupportsCryptLayer
<< 0);
2632 data
.WriteUInt8(byCryptOptions
);
2643 data
.Seek(bIsSX2Packet
? 17 : 16, wxFromStart
);
2644 data
.WriteUInt16(nCount
);
2646 CPacket
* result
= new CPacket(data
, OP_EMULEPROT
, bIsSX2Packet
? OP_ANSWERSOURCES2
: OP_ANSWERSOURCES
);
2648 // 16+2+501*(4+2+4+2+16) = 14046 bytes max.
2649 if (result
->GetPacketSize() > 354) {
2650 result
->PackPacket();
2656 void CPartFile::AddClientSources(CMemFile
* sources
, unsigned nSourceFrom
, uint8 uClientSXVersion
, bool bSourceExchange2
, const CUpDownClient
* /*pClient*/)
2665 uint8 uPacketSXVersion
= 0;
2666 if (!bSourceExchange2
) {
2667 nCount
= sources
->ReadUInt16();
2669 // Check if the data size matches the 'nCount' for v1 or v2 and eventually correct the source
2670 // exchange version while reading the packet data. Otherwise we could experience a higher
2671 // chance in dealing with wrong source data, userhashs and finally duplicate sources.
2672 uint32 uDataSize
= sources
->GetLength() - sources
->GetPosition();
2674 if ((uint32
)(nCount
*(4+2+4+2)) == uDataSize
) { //Checks if version 1 packet is correct size
2675 if(uClientSXVersion
!= 1) {
2678 uPacketSXVersion
= 1;
2679 } else if ((uint32
)(nCount
*(4+2+4+2+16)) == uDataSize
) { // Checks if version 2&3 packet is correct size
2680 if (uClientSXVersion
== 2) {
2681 uPacketSXVersion
= 2;
2682 } else if (uClientSXVersion
> 2) {
2683 uPacketSXVersion
= 3;
2687 } else if (nCount
*(4+2+4+2+16+1) == uDataSize
) {
2688 if (uClientSXVersion
!= 4 ) {
2691 uPacketSXVersion
= 4;
2693 // If v5 inserts additional data (like v2), the above code will correctly filter those packets.
2694 // If v5 appends additional data after <count>(<Sources>)[count], we are in trouble with the
2695 // above code. Though a client which does not understand v5+ should never receive such a packet.
2696 AddDebugLogLineM(false, logClient
, CFormat(wxT("Received invalid source exchange packet (v%u) of data size %u for %s")) % uClientSXVersion
% uDataSize
% GetFileName());
2701 // We only check if the version is known by us and do a quick sanitize check on known version
2702 // other then SX1, the packet will be ignored if any error appears, sicne it can't be a "misunderstanding" anymore
2703 if (uClientSXVersion
> SOURCEEXCHANGE2_VERSION
|| uClientSXVersion
== 0 ){
2704 AddDebugLogLineM(false, logPartFile
, CFormat(wxT("Invalid source exchange type version: %i")) % uClientSXVersion
);
2708 // all known versions use the first 2 bytes as count and unknown version are already filtered above
2709 nCount
= sources
->ReadUInt16();
2710 uint32 uDataSize
= (uint32
)(sources
->GetLength() - sources
->GetPosition());
2711 bool bError
= false;
2712 switch (uClientSXVersion
){
2714 bError
= nCount
*(4+2+4+2) != uDataSize
;
2718 bError
= nCount
*(4+2+4+2+16) != uDataSize
;
2721 bError
= nCount
*(4+2+4+2+16+1) != uDataSize
;
2729 AddDebugLogLineM(false, logPartFile
, wxT("Invalid source exchange data size."));
2732 uPacketSXVersion
= uClientSXVersion
;
2735 for (uint16 i
= 0;i
!= nCount
;++i
) {
2737 uint32 dwID
= sources
->ReadUInt32();
2738 uint16 nPort
= sources
->ReadUInt16();
2739 uint32 dwServerIP
= sources
->ReadUInt32();
2740 uint16 nServerPort
= sources
->ReadUInt16();
2743 if (uPacketSXVersion
> 1) {
2744 userHash
= sources
->ReadHash();
2747 uint8 byCryptOptions
= 0;
2748 if (uPacketSXVersion
>= 4) {
2749 byCryptOptions
= sources
->ReadUInt8();
2752 //Clients send ID's the the Hyrbid format so highID clients with *.*.*.0 won't be falsely switched to a lowID..
2754 if (uPacketSXVersion
>= 3) {
2755 dwIDED2K
= wxUINT32_SWAP_ALWAYS(dwID
);
2760 // check the HighID(IP) - "Filter LAN IPs" and "IPfilter" the received sources IP addresses
2761 if (!IsLowID(dwID
)) {
2762 if (!IsGoodIP(dwIDED2K
, thePrefs::FilterLanIPs())) {
2763 // check for 0-IP, localhost and optionally for LAN addresses
2764 AddDebugLogLineM(false, logIPFilter
, CFormat(wxT("Ignored source (IP=%s) received via %s - bad IP")) % Uint32toStringIP(dwIDED2K
) % OriginToText(nSourceFrom
));
2767 if (theApp
->ipfilter
->IsFiltered(dwIDED2K
)) {
2768 AddDebugLogLineM(false, logIPFilter
, CFormat(wxT("Ignored source (IP=%s) received via %s - IPFilter")) % Uint32toStringIP(dwIDED2K
) % OriginToText(nSourceFrom
));
2771 if (theApp
->clientlist
->IsBannedClient(dwIDED2K
)){
2776 // additionally check for LowID and own IP
2777 if (!CanAddSource(dwID
, nPort
, dwServerIP
, nServerPort
, NULL
, false)) {
2778 AddDebugLogLineM(false, logIPFilter
, CFormat(wxT("Ignored source (IP=%s) received via source exchange")) % Uint32toStringIP(dwIDED2K
));
2782 if(thePrefs::GetMaxSourcePerFile() > GetSourceCount()) {
2783 CUpDownClient
* newsource
= new CUpDownClient(nPort
,dwID
,dwServerIP
,nServerPort
,this, (uPacketSXVersion
< 3), true);
2784 if (uPacketSXVersion
> 1) {
2785 newsource
->SetUserHash(userHash
);
2788 if (uPacketSXVersion
>= 4) {
2789 newsource
->SetConnectOptions(byCryptOptions
, true, false);
2792 newsource
->SetSourceFrom((ESourceFrom
)nSourceFrom
);
2793 theApp
->downloadqueue
->CheckAndAddSource(this,newsource
);
2801 void CPartFile::UpdateAutoDownPriority()
2803 if (!IsAutoDownPriority()) {
2806 if (GetSourceCount() <= RARE_FILE
) {
2807 if ( GetDownPriority() != PR_HIGH
)
2808 SetDownPriority(PR_HIGH
, false, false);
2809 } else if (GetSourceCount() < 100) {
2810 if ( GetDownPriority() != PR_NORMAL
)
2811 SetDownPriority(PR_NORMAL
, false, false);
2813 if ( GetDownPriority() != PR_LOW
)
2814 SetDownPriority(PR_LOW
, false, false);
2818 // making this function return a higher when more sources have the extended
2819 // protocol will force you to ask a larger variety of people for sources
2821 int CPartFile::GetCommonFilePenalty()
2823 //TODO: implement, but never return less than MINCOMMONPENALTY!
2824 return MINCOMMONPENALTY
;
2827 /* Barry - Replaces BlockReceived()
2829 Originally this only wrote to disk when a full 180k block
2830 had been received from a client, and only asked for data in
2833 This meant that on average 90k was lost for every connection
2834 to a client data source. That is a lot of wasted data.
2836 To reduce the lost data, packets are now written to a buffer
2837 and flushed to disk regularly regardless of size downloaded.
2838 This includes compressed packets.
2840 Data is also requested only where gaps are, not in 180k blocks.
2841 The requests will still not exceed 180k, but may be smaller to
2845 // Kry - transize is 32bits, no packet can be more than that (this is
2846 // compressed size). Even 32bits is too much imho.As for the return size,
2847 // look at the lenData below.
2848 uint32
CPartFile::WriteToBuffer(uint32 transize
, byte
* data
, uint64 start
, uint64 end
, Requested_Block_Struct
*block
, const CUpDownClient
* client
)
2850 // Increment transferred bytes counter for this file
2851 transferred
+= transize
;
2853 // This is needed a few times
2854 // Kry - should not need a uint64 here - no block is larger than
2855 // 2GB even after uncompressed.
2856 uint32 lenData
= (uint32
) (end
- start
+ 1);
2858 if(lenData
> transize
) {
2859 m_iGainDueToCompression
+= lenData
-transize
;
2862 // Occasionally packets are duplicated, no point writing it twice
2863 if (IsComplete(start
, end
)) {
2864 AddDebugLogLineM(false, logPartFile
,
2865 CFormat(wxT("File '%s' has already been written from %u to %u"))
2866 % GetFileName() % start
% end
);
2870 // security sanitize check to make sure we do not write anything into an already hashed complete chunk
2871 const uint64 nStartChunk
= start
/ PARTSIZE
;
2872 const uint64 nEndChunk
= end
/ PARTSIZE
;
2873 if (IsComplete(nStartChunk
)) {
2874 AddDebugLogLineM(false, logPartFile
, CFormat(wxT("Received data touches already hashed chunk - ignored (start): %u-%u; File=%s")) % start
% end
% GetFileName());
2876 } else if (nStartChunk
!= nEndChunk
) {
2877 if (IsComplete(nEndChunk
)) {
2878 AddDebugLogLineM(false, logPartFile
, CFormat(wxT("Received data touches already hashed chunk - ignored (end): %u-%u; File=%s")) % start
% end
% GetFileName());
2883 AddDebugLogLineM(false, logPartFile
, CFormat(wxT("Received data crosses chunk boundaries: %u-%u; File=%s")) % start
% end
% GetFileName());
2888 // log transferinformation in our "blackbox"
2889 m_CorruptionBlackBox
->TransferredData(start
, end
, client
->GetIP());
2891 // Create copy of data as new buffer
2892 byte
*buffer
= new byte
[lenData
];
2893 memcpy(buffer
, data
, lenData
);
2895 // Create a new buffered queue entry
2896 PartFileBufferedData
*item
= new PartFileBufferedData(buffer
, start
, end
, block
);
2898 // Add to the queue in the correct position (most likely the end)
2901 std::list
<PartFileBufferedData
*>::iterator it
= m_BufferedData_list
.begin();
2902 for (; it
!= m_BufferedData_list
.end(); ++it
) {
2903 PartFileBufferedData
* queueItem
= *it
;
2905 if (item
->end
<= queueItem
->end
) {
2906 if (it
!= m_BufferedData_list
.begin()) {
2909 m_BufferedData_list
.insert(--it
, item
);
2917 m_BufferedData_list
.push_front(item
);
2920 // Increment buffer size marker
2921 m_nTotalBufferData
+= lenData
;
2923 // Mark this small section of the file as filled
2924 FillGap(item
->start
, item
->end
);
2926 // Update the flushed mark on the requested block
2927 // The loop here is unfortunate but necessary to detect deleted blocks.
2929 std::list
<Requested_Block_Struct
*>::iterator it2
= m_requestedblocks_list
.begin();
2930 for (; it2
!= m_requestedblocks_list
.end(); ++it2
) {
2931 if (*it2
== item
->block
) {
2932 item
->block
->transferred
+= lenData
;
2936 if (m_gaplist
.IsComplete()) {
2940 // Return the length of data written to the buffer
2944 void CPartFile::FlushBuffer(bool fromAICHRecoveryDataAvailable
)
2946 m_nLastBufferFlushTime
= GetTickCount();
2948 if (m_BufferedData_list
.empty()) {
2953 uint32 partCount
= GetPartCount();
2954 // Remember which parts need to be checked at the end of the flush
2955 std::vector
<bool> changedPart(partCount
, false);
2957 // Ensure file is big enough to write data to (the last item will be the furthest from the start)
2958 if (!CheckFreeDiskSpace(m_nTotalBufferData
)) {
2959 // Not enough free space to write the last item, bail
2960 AddLogLineM(true, CFormat( _("WARNING: Not enough free disk-space! Pausing file: %s") ) % GetFileName());
2966 // Loop through queue
2967 while ( !m_BufferedData_list
.empty() ) {
2968 // Get top item and remove it from the queue
2969 CScopedPtr
<PartFileBufferedData
> item(m_BufferedData_list
.front());
2970 m_BufferedData_list
.pop_front();
2972 // This is needed a few times
2973 wxASSERT((item
->end
- item
->start
) < 0xFFFFFFFF);
2974 uint32 lenData
= (uint32
)(item
->end
- item
->start
+ 1);
2976 // SLUGFILLER: SafeHash - could be more than one part
2977 for (uint32 curpart
= (item
->start
/PARTSIZE
); curpart
<= (item
->end
/PARTSIZE
); ++curpart
) {
2978 wxASSERT(curpart
< partCount
);
2979 changedPart
[curpart
] = true;
2981 // SLUGFILLER: SafeHash
2983 // Go to the correct position in file and write block of data
2985 m_hpartfile
.WriteAt(item
->data
.get(), item
->start
, lenData
);
2986 // Decrease buffer size
2987 m_nTotalBufferData
-= lenData
;
2988 } catch (const CIOFailureException
& e
) {
2989 AddDebugLogLineM(true, logPartFile
, wxT("Error while saving part-file: ") + e
.what());
2990 SetPartFileStatus(PS_ERROR
);
2991 // No need to bang your head against it again and again if it has already failed.
2992 DeleteContents(m_BufferedData_list
);
2993 m_nTotalBufferData
= 0;
2999 // Update last-changed date
3000 m_lastDateChanged
= wxDateTime::GetTimeNow();
3003 // Partfile should never be too large
3004 if (m_hpartfile
.GetLength() > GetFileSize()) {
3005 // it's "last chance" correction. the real bugfix has to be applied 'somewhere' else
3006 m_hpartfile
.SetLength(GetFileSize());
3008 } catch (const CIOFailureException
& e
) {
3009 AddDebugLogLineM(true, logPartFile
,
3010 CFormat(wxT("Error while truncating part-file (%s): %s"))
3011 % m_PartPath
% e
.what());
3012 SetPartFileStatus(PS_ERROR
);
3017 // Check each part of the file
3018 for (uint16 partNumber
= 0; partNumber
< partCount
; ++partNumber
) {
3019 if (changedPart
[partNumber
] == false) {
3023 uint32 partRange
= GetPartSize(partNumber
) - 1;
3025 // Is this 9MB part complete
3026 if (IsComplete(partNumber
)) {
3028 if (!HashSinglePart(partNumber
)) {
3029 AddLogLineM(true, CFormat(
3030 _("Downloaded part %i is corrupt in file: %s") ) % partNumber
% GetFileName() );
3032 // add part to corrupted list, if not already there
3033 if (!IsCorruptedPart(partNumber
)) {
3034 m_corrupted_list
.push_back(partNumber
);
3036 // request AICH recovery data
3037 // Don't if called from the AICHRecovery. It's already there and would lead to an infinite recursion.
3038 if (!fromAICHRecoveryDataAvailable
) {
3039 RequestAICHRecovery(partNumber
);
3041 // Reduce transferred amount by corrupt amount
3042 m_iLostDueToCorruption
+= (partRange
+ 1);
3044 if (!m_hashsetneeded
) {
3045 AddDebugLogLineM(false, logPartFile
, CFormat(
3046 wxT("Finished part %u of '%s'")) % partNumber
% GetFileName());
3049 // tell the blackbox about the verified data
3050 m_CorruptionBlackBox
->VerifiedData(true, partNumber
, 0, partRange
);
3052 // if this part was successfully completed (although ICH is active), remove from corrupted list
3053 EraseFirstValue(m_corrupted_list
, partNumber
);
3055 if (status
== PS_EMPTY
) {
3056 if (theApp
->IsRunning()) { // may be called during shutdown!
3057 if (GetHashCount() == GetED2KPartHashCount() && !m_hashsetneeded
) {
3058 // Successfully completed part, make it available for sharing
3059 SetStatus(PS_READY
);
3060 theApp
->sharedfiles
->SafeAddKFile(this);
3065 } else if ( IsCorruptedPart(partNumber
) && // corrupted part:
3066 (thePrefs::IsICHEnabled() // old ICH: rehash whenever we have new data hoping it will be good now
3067 || fromAICHRecoveryDataAvailable
)) {// new AICH: one rehash right before performing it (maybe it's already good)
3068 // Try to recover with minimal loss
3069 if (HashSinglePart(partNumber
)) {
3070 ++m_iTotalPacketsSavedDueToICH
;
3072 uint64 uMissingInPart
= m_gaplist
.GetGapSize(partNumber
);
3073 FillGap(partNumber
);
3074 RemoveBlockFromList(PARTSIZE
*partNumber
,(PARTSIZE
*partNumber
+ partRange
));
3076 // tell the blackbox about the verified data
3077 m_CorruptionBlackBox
->VerifiedData(true, partNumber
, 0, partRange
);
3079 // remove from corrupted list
3080 EraseFirstValue(m_corrupted_list
, partNumber
);
3082 AddLogLineM(true, CFormat( _("ICH: Recovered corrupted part %i for %s -> Saved bytes: %s") )
3085 % CastItoXBytes(uMissingInPart
));
3087 if (GetHashCount() == GetED2KPartHashCount() && !m_hashsetneeded
) {
3088 if (status
== PS_EMPTY
) {
3089 // Successfully recovered part, make it available for sharing
3090 SetStatus(PS_READY
);
3091 if (theApp
->IsRunning()) // may be called during shutdown!
3092 theApp
->sharedfiles
->SafeAddKFile(this);
3102 if (theApp
->IsRunning()) { // may be called during shutdown!
3103 // Is this file finished ?
3104 if (m_gaplist
.IsComplete()) {
3105 CompleteFile(false);
3111 // read data for upload, return false on error
3112 bool CPartFile::ReadData(CFileArea
& area
, uint64 offset
, uint32 toread
)
3115 if (offset
+ toread
> GetFileSize()) {
3116 AddDebugLogLineM(false, logPartFile
, CFormat(wxT("tried to read %d bytes past eof of %s"))
3117 % (offset
+ toread
- GetFileSize()) % GetFileName());
3122 area
.ReadAt(m_hpartfile
, offset
, toread
);
3123 // if it fails it throws (which the caller should catch)
3128 void CPartFile::UpdateFileRatingCommentAvail()
3130 bool prevComment
= m_hasComment
;
3131 int prevRating
= m_iUserRating
;
3133 m_hasComment
= false;
3135 int ratingCount
= 0;
3137 SourceSet::iterator it
= m_SrcList
.begin();
3138 for (; it
!= m_SrcList
.end(); ++it
) {
3139 CUpDownClient
* cur_src
= *it
;
3141 if (!cur_src
->GetFileComment().IsEmpty()) {
3142 if (thePrefs::IsCommentFiltered(cur_src
->GetFileComment())) {
3145 m_hasComment
= true;
3148 uint8 rating
= cur_src
->GetFileRating();
3150 wxASSERT(rating
<= 5);
3153 m_iUserRating
+= rating
;
3158 m_iUserRating
/= ratingCount
;
3159 wxASSERT(m_iUserRating
> 0 && m_iUserRating
<= 5);
3162 if ((prevComment
!= m_hasComment
) || (prevRating
!= m_iUserRating
)) {
3163 UpdateDisplayedInfo();
3168 void CPartFile::SetCategory(uint8 cat
)
3170 wxASSERT( cat
< theApp
->glob_prefs
->GetCatCount() );
3176 bool CPartFile::RemoveSource(CUpDownClient
* toremove
, bool updatewindow
, bool bDoStatsUpdate
)
3178 wxASSERT( toremove
);
3180 bool result
= theApp
->downloadqueue
->RemoveSource( toremove
, updatewindow
, bDoStatsUpdate
);
3182 // Check if the client should be deleted, but not if the client is already dying
3183 if ( !toremove
->GetSocket() && !toremove
->HasBeenDeleted() ) {
3184 if ( toremove
->Disconnected(wxT("RemoveSource - purged")) ) {
3185 toremove
->Safe_Delete();
3192 void CPartFile::AddDownloadingSource(CUpDownClient
* client
)
3194 CClientPtrList::iterator it
=
3195 std::find(m_downloadingSourcesList
.begin(), m_downloadingSourcesList
.end(), client
);
3196 if (it
== m_downloadingSourcesList
.end()) {
3197 m_downloadingSourcesList
.push_back(client
);
3202 void CPartFile::RemoveDownloadingSource(CUpDownClient
* client
)
3204 CClientPtrList::iterator it
=
3205 std::find(m_downloadingSourcesList
.begin(), m_downloadingSourcesList
.end(), client
);
3206 if (it
!= m_downloadingSourcesList
.end()) {
3207 m_downloadingSourcesList
.erase(it
);
3212 void CPartFile::SetPartFileStatus(uint8 newstatus
)
3216 if (thePrefs::GetAllcatType()) {
3217 Notify_DownloadCtrlUpdateItem(this);
3220 Notify_DownloadCtrlSort();
3224 uint64
CPartFile::GetNeededSpace()
3227 uint64 length
= m_hpartfile
.GetLength();
3229 if (length
> GetFileSize()) {
3230 return 0; // Shouldn't happen, but just in case
3233 return GetFileSize() - length
;
3234 } catch (const CIOFailureException
& e
) {
3235 AddDebugLogLineM(true, logPartFile
,
3236 CFormat(wxT("Error while retrieving file-length (%s): %s"))
3237 % m_PartPath
% e
.what());
3238 SetPartFileStatus(PS_ERROR
);
3243 void CPartFile::SetStatus(uint8 in
)
3245 wxASSERT( in
!= PS_PAUSED
&& in
!= PS_INSUFFICIENT
);
3249 if (theApp
->IsRunning()) {
3250 UpdateDisplayedInfo( true );
3252 if ( thePrefs::ShowCatTabInfos() ) {
3253 Notify_ShowUpdateCatTabTitles();
3259 void CPartFile::RequestAICHRecovery(uint16 nPart
)
3262 if ( !m_pAICHHashSet
->HasValidMasterHash() ||
3263 (m_pAICHHashSet
->GetStatus() != AICH_TRUSTED
&& m_pAICHHashSet
->GetStatus() != AICH_VERIFIED
)){
3264 AddDebugLogLineM( false, logAICHRecovery
, wxT("Unable to request AICH Recoverydata because we have no trusted Masterhash") );
3267 if (GetPartSize(nPart
) <= EMBLOCKSIZE
)
3269 if (CAICHHashSet::IsClientRequestPending(this, nPart
)){
3270 AddDebugLogLineM( false, logAICHRecovery
, wxT("RequestAICHRecovery: Already a request for this part pending"));
3274 // first check if we have already the recoverydata, no need to rerequest it then
3275 if (m_pAICHHashSet
->IsPartDataAvailable(nPart
*PARTSIZE
)){
3276 AddDebugLogLineM( false, logAICHRecovery
, wxT("Found PartRecoveryData in memory"));
3277 AICHRecoveryDataAvailable(nPart
);
3281 wxASSERT( nPart
< GetPartCount() );
3282 // find some random client which support AICH to ask for the blocks
3283 // first lets see how many we have at all, we prefer high id very much
3284 uint32 cAICHClients
= 0;
3285 uint32 cAICHLowIDClients
= 0;
3286 for ( SourceSet::iterator it
= m_SrcList
.begin(); it
!= m_SrcList
.end(); ++it
) {
3287 CUpDownClient
* pCurClient
= *(it
);
3288 if ( pCurClient
->IsSupportingAICH() &&
3289 pCurClient
->GetReqFileAICHHash() != NULL
&&
3290 !pCurClient
->IsAICHReqPending()
3291 && (*pCurClient
->GetReqFileAICHHash()) == m_pAICHHashSet
->GetMasterHash())
3293 if (pCurClient
->HasLowID()) {
3294 ++cAICHLowIDClients
;
3300 if ((cAICHClients
| cAICHLowIDClients
) == 0){
3301 AddDebugLogLineM( false, logAICHRecovery
, wxT("Unable to request AICH Recoverydata because found no client who supports it and has the same hash as the trusted one"));
3304 uint32 nSeclectedClient
;
3305 if (cAICHClients
> 0) {
3306 nSeclectedClient
= (rand() % cAICHClients
) + 1;
3308 nSeclectedClient
= (rand() % cAICHLowIDClients
) + 1;
3310 CUpDownClient
* pClient
= NULL
;
3311 for ( SourceSet::iterator it
= m_SrcList
.begin(); it
!= m_SrcList
.end(); ++it
) {
3312 CUpDownClient
* pCurClient
= *(it
);
3313 if (pCurClient
->IsSupportingAICH() && pCurClient
->GetReqFileAICHHash() != NULL
&& !pCurClient
->IsAICHReqPending()
3314 && (*pCurClient
->GetReqFileAICHHash()) == m_pAICHHashSet
->GetMasterHash())
3316 if (cAICHClients
> 0){
3317 if (!pCurClient
->HasLowID())
3321 wxASSERT( pCurClient
->HasLowID());
3324 if (nSeclectedClient
== 0){
3325 pClient
= pCurClient
;
3330 if (pClient
== NULL
){
3335 AddDebugLogLineM( false, logAICHRecovery
, CFormat( wxT("Requesting AICH Hash (%s) form client %s") ) % ( cAICHClients
? wxT("HighId") : wxT("LowID") ) % pClient
->GetClientFullInfo() );
3336 pClient
->SendAICHRequest(this, nPart
);
3341 void CPartFile::AICHRecoveryDataAvailable(uint16 nPart
)
3343 if (GetPartCount() < nPart
){
3349 uint32 length
= GetPartSize(nPart
);
3350 // if the part was already ok, it would now be complete
3351 if (IsComplete(nPart
)){
3352 AddDebugLogLineM( false, logAICHRecovery
,
3353 wxString::Format( wxT("Processing AICH Recovery data: The part (%u) is already complete, canceling"), nPart
) );
3359 CAICHHashTree
* pVerifiedHash
= m_pAICHHashSet
->m_pHashTree
.FindHash(nPart
*PARTSIZE
, length
);
3360 if (pVerifiedHash
== NULL
|| !pVerifiedHash
->GetHashValid()){
3361 AddDebugLogLineM( true, logAICHRecovery
, wxT("Processing AICH Recovery data: Unable to get verified hash from hashset (should never happen)") );
3365 CAICHHashTree
htOurHash(pVerifiedHash
->GetNDataSize(), pVerifiedHash
->GetIsLeftBranch(), pVerifiedHash
->GetNBaseSize());
3367 CreateHashFromFile(m_hpartfile
, PARTSIZE
* nPart
, length
, NULL
, &htOurHash
);
3368 } catch (const CIOFailureException
& e
) {
3369 AddDebugLogLineM(true, logAICHRecovery
,
3370 CFormat(wxT("IO failure while hashing part-file '%s': %s"))
3371 % m_hpartfile
.GetFilePath() % e
.what());
3372 SetPartFileStatus(PS_ERROR
);
3376 if (!htOurHash
.GetHashValid()){
3377 AddDebugLogLineM( false, logAICHRecovery
, wxT("Processing AICH Recovery data: Failed to retrieve AICH Hashset of corrupt part") );
3382 // now compare the hash we just did, to the verified hash and readd all blocks which are ok
3383 uint32 nRecovered
= 0;
3384 for (uint32 pos
= 0; pos
< length
; pos
+= EMBLOCKSIZE
){
3385 const uint32 nBlockSize
= min
<uint32
>(EMBLOCKSIZE
, length
- pos
);
3386 CAICHHashTree
* pVerifiedBlock
= pVerifiedHash
->FindHash(pos
, nBlockSize
);
3387 CAICHHashTree
* pOurBlock
= htOurHash
.FindHash(pos
, nBlockSize
);
3388 if ( pVerifiedBlock
== NULL
|| pOurBlock
== NULL
|| !pVerifiedBlock
->GetHashValid() || !pOurBlock
->GetHashValid()){
3392 if (pOurBlock
->GetHash() == pVerifiedBlock
->GetHash()){
3393 FillGap(PARTSIZE
*nPart
+pos
, PARTSIZE
*nPart
+ pos
+ (nBlockSize
-1));
3394 RemoveBlockFromList(PARTSIZE
*nPart
, PARTSIZE
*nPart
+ (nBlockSize
-1));
3395 nRecovered
+= nBlockSize
;
3396 // tell the blackbox about the verified data
3397 m_CorruptionBlackBox
->VerifiedData(true, nPart
, pos
, pos
+ nBlockSize
- 1);
3399 // inform our "blackbox" about the corrupted block which may ban clients who sent it
3400 m_CorruptionBlackBox
->VerifiedData(false, nPart
, pos
, pos
+ nBlockSize
- 1);
3403 m_CorruptionBlackBox
->EvaluateData();
3405 // ok now some sanity checks
3406 if (IsComplete(nPart
)){
3407 // this is a bad, but it could probably happen under some rare circumstances
3408 // make sure that MD4 agrres to this fact too
3409 if (!HashSinglePart(nPart
)){
3410 AddDebugLogLineM( false, logAICHRecovery
,
3411 wxString::Format(wxT("Processing AICH Recovery data: The part (%u) got completed while recovering - but MD4 says it corrupt! Setting hashset to error state, deleting part"), nPart
));
3412 // now we are fu... unhappy
3413 m_pAICHHashSet
->SetStatus(AICH_ERROR
);
3419 AddDebugLogLineM( false, logAICHRecovery
, wxString::Format(
3420 wxT("Processing AICH Recovery data: The part (%u) got completed while recovering and MD4 agrees"), nPart
) );
3421 if (status
== PS_EMPTY
&& theApp
->IsRunning()){
3422 if (GetHashCount() == GetED2KPartHashCount() && !m_hashsetneeded
){
3423 // Successfully recovered part, make it available for sharing
3424 SetStatus(PS_READY
);
3425 theApp
->sharedfiles
->SafeAddKFile(this);
3429 if (theApp
->IsRunning()){
3430 // Is this file finished?
3431 if (m_gaplist
.IsComplete()) {
3432 CompleteFile(false);
3436 } // end sanity check
3437 // We did the best we could. If it's still incomplete, then no need to keep
3438 // bashing it with ICH. So remove it from the list of corrupted parts.
3439 EraseFirstValue(m_corrupted_list
, nPart
);
3443 // make sure the user appreciates our great recovering work :P
3444 AddDebugLogLineM( true, logAICHRecovery
, CFormat(
3445 wxT("AICH successfully recovered %s of %s from part %u for %s") )
3446 % CastItoXBytes(nRecovered
)
3447 % CastItoXBytes(length
)
3453 void CPartFile::ClientStateChanged( int oldState
, int newState
)
3455 if ( oldState
== newState
)
3458 // If the state is -1, then it's an entirely new item
3459 if ( oldState
!= -1 ) {
3460 // Was the old state a valid state?
3461 if ( oldState
== DS_ONQUEUE
|| oldState
== DS_DOWNLOADING
) {
3464 if ( oldState
== DS_CONNECTED
/* || oldState == DS_REMOTEQUEUEFULL */ ) {
3468 m_notCurrentSources
--;
3472 // If the state is -1, then the source is being removed
3473 if ( newState
!= -1 ) {
3474 // Was the old state a valid state?
3475 if ( newState
== DS_ONQUEUE
|| newState
== DS_DOWNLOADING
) {
3478 if ( newState
== DS_CONNECTED
/* || newState == DS_REMOTEQUEUEFULL */ ) {
3482 ++m_notCurrentSources
;
3488 bool CPartFile::AddSource( CUpDownClient
* client
)
3490 if (m_SrcList
.insert( client
).second
) {
3491 theStats::AddFoundSource();
3492 theStats::AddSourceOrigin(client
->GetSourceFrom());
3500 bool CPartFile::DelSource( CUpDownClient
* client
)
3502 if (m_SrcList
.erase( client
)) {
3503 theStats::RemoveSourceOrigin(client
->GetSourceFrom());
3504 theStats::RemoveFoundSource();
3512 void CPartFile::UpdatePartsFrequency( CUpDownClient
* client
, bool increment
)
3514 const BitVector
& freq
= client
->GetPartStatus();
3516 if ( m_SrcpartFrequency
.size() != GetPartCount() ) {
3517 m_SrcpartFrequency
.clear();
3518 m_SrcpartFrequency
.insert(m_SrcpartFrequency
.begin(), GetPartCount(), 0);
3525 unsigned int size
= freq
.size();
3526 if ( size
!= m_SrcpartFrequency
.size() ) {
3531 for ( unsigned int i
= 0; i
< size
; i
++ ) {
3533 m_SrcpartFrequency
[i
]++;
3537 for ( unsigned int i
= 0; i
< size
; i
++ ) {
3539 m_SrcpartFrequency
[i
]--;
3545 const FileRatingList
&CPartFile::GetRatingAndComments()
3547 m_FileRatingList
.clear();
3548 // This can be pre-processed, but is it worth the CPU?
3549 CPartFile::SourceSet::iterator it
= m_SrcList
.begin();
3550 for ( ; it
!= m_SrcList
.end(); ++it
) {
3551 CUpDownClient
*cur_src
= *it
;
3552 if (cur_src
->GetFileComment().Length()>0 || cur_src
->GetFileRating()>0) {
3553 // AddDebugLogLineM(false, logPartFile, wxString(wxT("found a comment for ")) << GetFileName());
3554 m_FileRatingList
.push_back(SFileRating(*cur_src
));
3558 return m_FileRatingList
;
3563 CPartFile::CPartFile(CEC_PartFile_Tag
*tag
)
3567 SetFileName(CPath(tag
->FileName()));
3568 m_abyFileHash
= tag
->ID();
3569 SetFileSize(tag
->SizeFull());
3570 m_gaplist
.Init(GetFileSize(), true); // Init empty
3571 m_partmetfilename
= CPath(tag
->PartMetName());
3572 transferred
= tag
->SizeXfer();
3573 percentcompleted
= (100.0*completedsize
) / GetFileSize();
3574 completedsize
= tag
->SizeDone();
3576 m_category
= tag
->FileCat();
3578 m_SrcpartFrequency
.insert(m_SrcpartFrequency
.end(), GetPartCount(), 0);
3579 m_iDownPriority
= tag
->Prio();
3580 if ( m_iDownPriority
>= 10 ) {
3581 m_iDownPriority
-= 10;
3582 m_bAutoDownPriority
= true;
3584 m_bAutoDownPriority
= false;
3590 m_a4af_source_count
= 0;
3594 * Remote gui specific code
3596 CPartFile::~CPartFile()
3600 const FileRatingList
&CPartFile::GetRatingAndComments()
3602 return m_FileRatingList
;
3604 #endif // !CLIENT_GUI
3607 void CPartFile::UpdateDisplayedInfo(bool force
)
3609 uint32 curTick
= ::GetTickCount();
3610 m_CommentUpdated
= true;
3612 // Wait 1.5s between each redraw
3613 if(force
|| curTick
-m_lastRefreshedDLDisplay
> MINWAIT_BEFORE_DLDISPLAY_WINDOWUPDATE
) {
3614 Notify_DownloadCtrlUpdateItem(this);
3615 m_lastRefreshedDLDisplay
= curTick
;
3621 void CPartFile::Init()
3623 m_showSources
= false;
3624 m_lastsearchtime
= 0;
3625 lastpurgetime
= ::GetTickCount();
3628 m_insufficient
= false;
3633 m_iLastPausePurge
= time(NULL
);
3635 if(thePrefs::GetNewAutoDown()) {
3636 m_iDownPriority
= PR_HIGH
;
3637 m_bAutoDownPriority
= true;
3639 m_iDownPriority
= PR_NORMAL
;
3640 m_bAutoDownPriority
= false;
3643 memset(m_anStates
,0,sizeof(m_anStates
));
3645 transferingsrc
= 0; // new
3649 m_CommentUpdated
= false;
3650 m_hashsetneeded
= true;
3652 percentcompleted
= 0;
3654 m_bPreviewing
= false;
3655 lastseencomplete
= 0;
3656 m_availablePartsCount
=0;
3657 m_ClientSrcAnswered
= 0;
3658 m_LastNoNeededCheck
= 0;
3660 m_nTotalBufferData
= 0;
3661 m_nLastBufferFlushTime
= 0;
3662 m_bPercentUpdated
= false;
3663 m_bRecoveringArchive
= false;
3664 m_iGainDueToCompression
= 0;
3665 m_iLostDueToCorruption
= 0;
3666 m_iTotalPacketsSavedDueToICH
= 0;
3668 m_lastRefreshedDLDisplay
= 0;
3669 m_nDlActiveTime
= 0;
3671 m_is_A4AF_auto
= false;
3672 m_localSrcReqQueued
= false;
3673 m_nCompleteSourcesTime
= time(NULL
);
3674 m_nCompleteSourcesCount
= 0;
3675 m_nCompleteSourcesCountLo
= 0;
3676 m_nCompleteSourcesCountHi
= 0;
3679 m_notCurrentSources
= 0;
3682 m_LastSearchTimeKad
= 0;
3683 m_TotalSearchesKad
= 0;
3685 m_gapptrlist
.Init(&m_gaplist
);
3688 m_CorruptionBlackBox
= new CCorruptionBlackBox();
3692 wxString
CPartFile::getPartfileStatus() const
3697 if ((status
== PS_HASHING
) || (status
== PS_WAITINGFORHASH
)) {
3698 mybuffer
=_("Hashing");
3699 } else if (status
== PS_ALLOCATING
) {
3700 mybuffer
= _("Allocating");
3702 switch (GetStatus()) {
3704 mybuffer
=_("Completing");
3707 mybuffer
=_("Complete");
3710 mybuffer
=_("Paused");
3713 mybuffer
=_("Erroneous");
3715 case PS_INSUFFICIENT
:
3716 mybuffer
= _("Insufficient disk space");
3719 if (GetTransferingSrcCount()>0) {
3720 mybuffer
=_("Downloading");
3722 mybuffer
=_("Waiting");
3726 if (m_stopped
&& (GetStatus()!=PS_COMPLETE
)) {
3727 mybuffer
=_("Stopped");
3734 int CPartFile::getPartfileStatusRang() const
3738 if (GetTransferingSrcCount()==0) tempstatus
=1;
3739 switch (GetStatus()) {
3741 case PS_WAITINGFORHASH
:
3761 wxString
CPartFile::GetFeedback() const
3763 wxString retval
= CKnownFile::GetFeedback();
3764 if (GetStatus() != PS_COMPLETE
) {
3765 retval
+= wxString(_("Downloaded")) + wxT(": ") + CastItoXBytes(GetCompletedSize()) + wxString::Format(wxT(" (%.2f%%)\n"), GetPercentCompleted())
3766 + _("Sources") + CFormat(wxT(": %u\n")) % GetSourceCount();
3768 return retval
+ _("Status") + wxT(": ") + getPartfileStatus() + wxT("\n");
3772 sint32
CPartFile::getTimeRemaining() const
3774 if (GetKBpsDown() < 0.001)
3777 return((GetFileSize()-GetCompletedSize()) / ((int)(GetKBpsDown()*1024.0)));
3780 bool CPartFile::PreviewAvailable()
3782 FileType type
= GetFiletype(GetFileName());
3784 return (((type
== ftVideo
) || (type
== ftAudio
)) && IsComplete(0, 256*1024));
3787 bool CPartFile::CheckShowItemInGivenCat(int inCategory
)
3789 // easy normal cases
3791 bool IsNotFiltered
= true;
3793 IsInCat
= ((inCategory
==0) || (inCategory
>0 && inCategory
==GetCategory()));
3795 switch (thePrefs::GetAllcatType()) {
3797 IsNotFiltered
= GetCategory() == 0 || inCategory
> 0;
3800 IsNotFiltered
= IsPartFile();
3803 IsNotFiltered
= !IsPartFile();
3807 (GetStatus() == PS_READY
|| GetStatus() == PS_EMPTY
) &&
3808 GetTransferingSrcCount() == 0;
3812 (GetStatus() == PS_READY
|| GetStatus()==PS_EMPTY
) &&
3813 GetTransferingSrcCount() > 0;
3816 IsNotFiltered
= GetStatus() == PS_ERROR
;
3819 IsNotFiltered
= GetStatus() == PS_PAUSED
&& !IsStopped();
3822 IsNotFiltered
= IsStopped();
3825 IsNotFiltered
= GetFiletype(GetFileName()) == ftVideo
;
3828 IsNotFiltered
= GetFiletype(GetFileName()) == ftAudio
;
3831 IsNotFiltered
= GetFiletype(GetFileName()) == ftArchive
;
3834 IsNotFiltered
= GetFiletype(GetFileName()) == ftCDImage
;
3837 IsNotFiltered
= GetFiletype(GetFileName()) == ftPicture
;
3840 IsNotFiltered
= GetFiletype(GetFileName()) == ftText
;
3843 IsNotFiltered
= !IsStopped() && GetStatus() != PS_PAUSED
;
3847 return IsNotFiltered
&& IsInCat
;
3851 void CPartFile::SetActive(bool bActive
)
3853 time_t tNow
= time(NULL
);
3855 if (theApp
->IsConnected()) {
3856 if (m_tActivated
== 0) {
3857 m_tActivated
= tNow
;
3861 if (m_tActivated
!= 0) {
3862 m_nDlActiveTime
+= tNow
- m_tActivated
;
3869 uint32
CPartFile::GetDlActiveTime() const
3871 uint32 nDlActiveTime
= m_nDlActiveTime
;
3872 if (m_tActivated
!= 0) {
3873 nDlActiveTime
+= time(NULL
) - m_tActivated
;
3875 return nDlActiveTime
;
3880 uint8
CPartFile::GetStatus(bool ignorepause
) const
3882 if ( (!m_paused
&& !m_insufficient
) ||
3883 status
== PS_ERROR
||
3884 status
== PS_COMPLETING
||
3885 status
== PS_COMPLETE
||
3888 } else if ( m_insufficient
) {
3889 return PS_INSUFFICIENT
;
3895 void CPartFile::AddDeadSource(const CUpDownClient
* client
)
3897 m_deadSources
.AddDeadSource( client
);
3901 bool CPartFile::IsDeadSource(const CUpDownClient
* client
)
3903 return m_deadSources
.IsDeadSource( client
);
3906 void CPartFile::SetFileName(const CPath
& fileName
)
3908 CKnownFile
* pFile
= theApp
->sharedfiles
->GetFileByID(GetFileHash());
3910 bool is_shared
= (pFile
&& pFile
== this);
3913 // The file is shared, we must clear the search keywords so we don't
3914 // publish the old name anymore.
3915 theApp
->sharedfiles
->RemoveKeywords(this);
3918 CKnownFile::SetFileName(fileName
);
3921 // And of course, we must advertise the new name if the file is shared.
3922 theApp
->sharedfiles
->AddKeywords(this);
3925 UpdateDisplayedInfo(true);
3929 uint16
CPartFile::GetMaxSources() const
3931 // This is just like this, while we don't import the private max sources per file
3932 return thePrefs::GetMaxSourcePerFile();
3936 uint16
CPartFile::GetMaxSourcePerFileSoft() const
3938 unsigned int temp
= ((unsigned int)GetMaxSources() * 9L) / 10;
3939 if (temp
> MAX_SOURCES_FILE_SOFT
) {
3940 return MAX_SOURCES_FILE_SOFT
;
3945 uint16
CPartFile::GetMaxSourcePerFileUDP() const
3947 unsigned int temp
= ((unsigned int)GetMaxSources() * 3L) / 4;
3948 if (temp
> MAX_SOURCES_FILE_UDP
) {
3949 return MAX_SOURCES_FILE_UDP
;
3954 #define DROP_FACTOR 2
3956 CUpDownClient
* CPartFile::GetSlowerDownloadingClient(uint32 speed
, CUpDownClient
* caller
) {
3957 // printf("Start slower source calculation\n");
3958 for( SourceSet::iterator it
= m_SrcList
.begin(); it
!= m_SrcList
.end(); ) {
3959 CUpDownClient
* cur_src
= *it
++;
3960 if ((cur_src
->GetDownloadState() == DS_DOWNLOADING
) && (cur_src
!= caller
)) {
3961 uint32 factored_bytes_per_second
= static_cast<uint32
>(
3962 (cur_src
->GetKBpsDown() * 1024) * DROP_FACTOR
);
3963 if ( factored_bytes_per_second
< speed
) {
3964 // printf("Selecting source %p to drop: %d < %d\n", cur_src, factored_bytes_per_second, speed);
3965 // printf("End slower source calculation\n");
3968 // printf("Not selecting source %p to drop: %d > %d\n", cur_src, factored_bytes_per_second, speed);
3972 // printf("End slower source calculation\n");
3976 void CPartFile::AllocationFinished()
3978 // see if it can be opened
3979 if (!m_hpartfile
.Open(m_PartPath
, CFile::read_write
)) {
3980 AddLogLineM(false, CFormat(_("ERROR: Failed to open partfile '%s'")) % GetFullName());
3981 SetPartFileStatus(PS_ERROR
);
3983 // then close the handle again
3984 m_hpartfile
.Release(true);
3988 // File_checked_for_headers