2 // This file is part of the aMule Project.
4 // Copyright (c) 2003-2008 aMule Team ( admin@amule.org / http://www.amule.org )
5 // Copyright (c) 2002-2008 Merkur ( devs@emule-project.net / http://www.emule-project.net )
7 // Any parts of this program derived from the xMule, lMule or eMule project,
8 // or contributed by third-party developers are copyrighted by their
11 // This program is free software; you can redistribute it and/or modify
12 // it under the terms of the GNU General Public License as published by
13 // the Free Software Foundation; either version 2 of the License, or
14 // (at your option) any later version.
16 // This program is distributed in the hope that it will be useful,
17 // but WITHOUT ANY WARRANTY; without even the implied warranty of
18 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 // GNU General Public License for more details.
21 // You should have received a copy of the GNU General Public License
22 // along with this program; if not, write to the Free Software
23 // Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA
28 #include "PartFile.h" // Interface declarations.
31 #include "config.h" // Needed for VERSION
34 #include <protocol/kad/Constants.h>
35 #include <protocol/ed2k/Client2Client/TCP.h>
36 #include <protocol/Protocols.h>
37 #include <common/DataFileVersion.h>
38 #include <common/Constants.h>
39 #include <tags/FileTags.h>
42 #include <wx/tokenzr.h> // Needed for wxStringTokenizer
44 #include "KnownFileList.h" // Needed for CKnownFileList
45 #include "CanceledFileList.h"
46 #include "UploadQueue.h" // Needed for CFileHash
47 #include "IPFilter.h" // Needed for CIPFilter
48 #include "Server.h" // Needed for CServer
49 #include "ServerConnect.h" // Needed for CServerConnect
52 #include "UpDownClientEC.h" // Needed for CUpDownClient
54 #include "updownclient.h" // Needed for CUpDownClient
57 #include "MemFile.h" // Needed for CMemFile
58 #include "Preferences.h" // Needed for CPreferences
59 #include "DownloadQueue.h" // Needed for CDownloadQueue
60 #include "amule.h" // Needed for theApp
61 #include "ED2KLink.h" // Needed for CED2KLink
62 #include "Packet.h" // Needed for CTag
63 #include "SearchList.h" // Needed for CSearchFile
64 #include "ClientList.h" // Needed for clientlist
65 #include "Statistics.h" // Needed for theStats
67 #include <common/Format.h> // Needed for CFormat
68 #include <common/FileFunctions.h> // Needed for GetLastModificationTime
69 #include "ThreadTasks.h" // Needed for CHashingTask/CCompletionTask/CAllocateFileTask
70 #include "GuiEvents.h" // Needed for Notify_*
71 #include "DataToText.h" // Needed for OriginToText()
72 #include "PlatformSpecific.h" // Needed for CreateSparseFile()
73 #include "FileArea.h" // Needed for CFileArea
74 #include "ScopedPtr.h" // Needed for CScopedArray
75 #include "CorruptionBlackBox.h"
77 #include "kademlia/kademlia/Kademlia.h"
78 #include "kademlia/kademlia/Search.h"
81 SFileRating::SFileRating(const wxString
&u
, const wxString
&f
, sint16 r
, const wxString
&c
)
91 SFileRating::SFileRating(const SFileRating
&fr
)
93 UserName(fr
.UserName
),
94 FileName(fr
.FileName
),
102 SFileRating::SFileRating(const CUpDownClient
&client
)
104 UserName(client
.GetUserName()),
105 FileName(client
.GetClientFilename()),
106 Rating(client
.GetFileRating()),
107 Comment(client
.GetFileComment())
113 SFileRating::~SFileRating()
118 class PartFileBufferedData
121 CFileArea area
; // File area to be written
122 uint64 start
; // This is the start offset of the data
123 uint64 end
; // This is the end offset of the data
124 Requested_Block_Struct
*block
; // This is the requested block that this data relates to
126 PartFileBufferedData(CFileAutoClose
& file
, byte
* data
, uint64 _start
, uint64 _end
, Requested_Block_Struct
*_block
)
127 : start(_start
), end(_end
), block(_block
)
129 area
.StartWriteAt(file
, start
, end
-start
+1);
130 memcpy(area
.GetBuffer(), data
, end
-start
+1);
135 typedef std::list
<Chunk
> ChunkList
;
140 CPartFile::CPartFile()
145 CPartFile::CPartFile(CSearchFile
* searchresult
)
149 m_abyFileHash
= searchresult
->GetFileHash();
150 SetFileName(searchresult
->GetFileName());
151 SetFileSize(searchresult
->GetFileSize());
153 for (unsigned int i
= 0; i
< searchresult
->m_taglist
.size(); ++i
){
154 const CTag
& pTag
= searchresult
->m_taglist
[i
];
156 bool bTagAdded
= false;
157 if (pTag
.GetNameID() == 0 && !pTag
.GetName().IsEmpty() && (pTag
.IsStr() || pTag
.IsInt())) {
158 static const struct {
163 { wxT(FT_ED2K_MEDIA_ARTIST
), 2 },
164 { wxT(FT_ED2K_MEDIA_ALBUM
), 2 },
165 { wxT(FT_ED2K_MEDIA_TITLE
), 2 },
166 { wxT(FT_ED2K_MEDIA_LENGTH
), 2 },
167 { wxT(FT_ED2K_MEDIA_BITRATE
), 3 },
168 { wxT(FT_ED2K_MEDIA_CODEC
), 2 }
171 for (unsigned int t
= 0; t
< itemsof(_aMetaTags
); ++t
) {
172 if ( pTag
.GetType() == _aMetaTags
[t
].nType
&&
173 (pTag
.GetName() == _aMetaTags
[t
].pszName
)) {
174 // skip string tags with empty string values
175 if (pTag
.IsStr() && pTag
.GetStr().IsEmpty()) {
179 // skip "length" tags with "0: 0" values
180 if (pTag
.GetName() == wxT(FT_ED2K_MEDIA_LENGTH
)) {
181 if (pTag
.GetStr().IsSameAs(wxT("0: 0")) ||
182 pTag
.GetStr().IsSameAs(wxT("0:0"))) {
187 // skip "bitrate" tags with '0' values
188 if ((pTag
.GetName() == wxT(FT_ED2K_MEDIA_BITRATE
)) && !pTag
.GetInt()) {
192 AddDebugLogLineN( logPartFile
,
193 wxT("CPartFile::CPartFile(CSearchFile*): added tag ") +
194 pTag
.GetFullInfo() );
195 m_taglist
.push_back(pTag
);
200 } else if (pTag
.GetNameID() != 0 && pTag
.GetName().IsEmpty() && (pTag
.IsStr() || pTag
.IsInt())) {
201 static const struct {
209 for (unsigned int t
= 0; t
< itemsof(_aMetaTags
); ++t
) {
210 if (pTag
.GetType() == _aMetaTags
[t
].nType
&& pTag
.GetNameID() == _aMetaTags
[t
].nID
) {
211 // skip string tags with empty string values
212 if (pTag
.IsStr() && pTag
.GetStr().IsEmpty()) {
216 AddDebugLogLineN( logPartFile
,
217 wxT("CPartFile::CPartFile(CSearchFile*): added tag ") +
218 pTag
.GetFullInfo() );
219 m_taglist
.push_back(pTag
);
227 AddDebugLogLineN( logPartFile
,
228 wxT("CPartFile::CPartFile(CSearchFile*): ignored tag ") +
229 pTag
.GetFullInfo() );
237 CPartFile::CPartFile(const CED2KFileLink
* fileLink
)
241 SetFileName(CPath(fileLink
->GetName()));
242 SetFileSize(fileLink
->GetSize());
243 m_abyFileHash
= fileLink
->GetHashKey();
247 if (fileLink
->m_hashset
) {
248 if (!LoadHashsetFromFile(fileLink
->m_hashset
, true)) {
249 AddDebugLogLineC(logPartFile
, wxT("eD2K link contained invalid hashset: ") + fileLink
->GetLink());
255 CPartFile::~CPartFile()
257 // if it's not opened, it was completed or deleted
258 if (m_hpartfile
.IsOpened()) {
261 // Update met file (with current directory entry)
265 DeleteContents(m_BufferedData_list
);
266 delete m_CorruptionBlackBox
;
268 wxASSERT(m_SrcList
.empty());
269 wxASSERT(m_A4AFsrclist
.empty());
272 void CPartFile::CreatePartFile()
274 // use lowest free partfilenumber for free file (InterCeptor)
278 m_partmetfilename
= CPath(CFormat(wxT("%03i.part.met")) % i
);
279 m_fullname
= thePrefs::GetTempDir().JoinPaths(m_partmetfilename
);
280 } while (m_fullname
.FileExists());
282 m_CorruptionBlackBox
->SetPartFileInfo(GetFileName().GetPrintable(), m_partmetfilename
.RemoveAllExt().GetPrintable());
284 wxString strPartName
= m_partmetfilename
.RemoveExt().GetRaw();
285 m_taglist
.push_back(CTagString(FT_PARTFILENAME
, strPartName
));
287 m_gaplist
.Init(GetFileSize(), true); // Init empty
289 m_PartPath
= m_fullname
.RemoveExt();
291 if (thePrefs::GetAllocFullFile()) {
292 fileCreated
= m_hpartfile
.Create(m_PartPath
, true);
295 fileCreated
= PlatformSpecific::CreateSparseFile(m_PartPath
, GetFileSize());
298 AddLogLineN(_("ERROR: Failed to create partfile"));
302 SetFilePath(thePrefs::GetTempDir());
304 if (thePrefs::GetAllocFullFile()) {
305 SetStatus(PS_ALLOCATING
);
306 CThreadScheduler::AddTask(new CAllocateFileTask(this, thePrefs::AddNewFilesPaused()));
308 AllocationFinished();
311 m_hashsetneeded
= (GetED2KPartHashCount() > 0);
314 SetActive(theApp
->IsConnected());
318 uint8
CPartFile::LoadPartFile(const CPath
& in_directory
, const CPath
& filename
, bool from_backup
, bool getsizeonly
)
320 bool isnewstyle
= false;
321 uint8 version
,partmettype
=PMT_UNKNOWN
;
323 std::map
<uint16
, Gap_Struct
*> gap_map
; // Slugfiller
326 m_partmetfilename
= filename
;
327 m_CorruptionBlackBox
->SetPartFileInfo(GetFileName().GetPrintable(), m_partmetfilename
.RemoveAllExt().GetPrintable());
328 m_filePath
= in_directory
;
329 m_fullname
= m_filePath
.JoinPaths(m_partmetfilename
);
330 m_PartPath
= m_fullname
.RemoveExt();
332 // readfile data form part.met file
333 CPath curMetFilename
= m_fullname
;
335 curMetFilename
= curMetFilename
.AppendExt(PARTMET_BAK_EXT
);
336 AddLogLineN(CFormat( _("Trying to load backup of met-file from %s") )
341 CFile
metFile(curMetFilename
, CFile::read
);
342 if (!metFile
.IsOpened()) {
343 AddLogLineN(CFormat( _("ERROR: Failed to open part.met file: %s ==> %s") )
348 } else if (metFile
.GetLength() == 0) {
349 AddLogLineN(CFormat( _("ERROR: part.met file is 0 size: %s ==> %s") )
356 version
= metFile
.ReadUInt8();
357 if (version
!= PARTFILE_VERSION
&& version
!= PARTFILE_SPLITTEDVERSION
&& version
!= PARTFILE_VERSION_LARGEFILE
){
359 //if (version == 83) return ImportShareazaTempFile(...)
360 AddLogLineN(CFormat( _("ERROR: Invalid part.met file version: %s ==> %s") )
366 isnewstyle
= (version
== PARTFILE_SPLITTEDVERSION
);
367 partmettype
= isnewstyle
? PMT_SPLITTED
: PMT_DEFAULTOLD
;
369 if (version
== PARTFILE_VERSION
) {// Do we still need this check ?
370 uint8 test
[4]; // It will fail for certain files.
371 metFile
.Seek(24, wxFromStart
);
372 metFile
.Read(test
,4);
374 metFile
.Seek(1, wxFromStart
);
375 if (test
[0]==0 && test
[1]==0 && test
[2]==2 && test
[3]==1) {
376 isnewstyle
=true; // edonkeys so called "old part style"
377 partmettype
=PMT_NEWOLD
;
382 uint32 temp
= metFile
.ReadUInt32();
384 if (temp
==0) { // 0.48 partmets - different again
385 LoadHashsetFromFile(&metFile
, false);
387 metFile
.Seek(2, wxFromStart
);
388 LoadDateFromFile(&metFile
);
389 m_abyFileHash
= metFile
.ReadHash();
393 LoadDateFromFile(&metFile
);
394 LoadHashsetFromFile(&metFile
, false);
397 uint32 tagcount
= metFile
.ReadUInt32();
399 for (uint32 j
= 0; j
< tagcount
; ++j
) {
400 CTag
newtag(metFile
,true);
403 (newtag
.GetNameID() == FT_FILESIZE
||
404 newtag
.GetNameID() == FT_FILENAME
))) {
405 switch(newtag
.GetNameID()) {
407 if (!GetFileName().IsOk()) {
408 // If it's not empty, we already loaded the unicoded one
409 SetFileName(CPath(newtag
.GetStr()));
413 case FT_LASTSEENCOMPLETE
: {
414 lastseencomplete
= newtag
.GetInt();
418 SetFileSize(newtag
.GetInt());
421 case FT_TRANSFERRED
: {
422 transferred
= newtag
.GetInt();
426 //#warning needs setfiletype string
427 //SetFileType(newtag.GetStr());
431 m_category
= newtag
.GetInt();
432 if (m_category
> theApp
->glob_prefs
->GetCatCount() - 1 ) {
437 case FT_OLDDLPRIORITY
:
438 case FT_DLPRIORITY
: {
440 m_iDownPriority
= newtag
.GetInt();
441 if( m_iDownPriority
== PR_AUTO
){
442 m_iDownPriority
= PR_HIGH
;
443 SetAutoDownPriority(true);
446 if ( m_iDownPriority
!= PR_LOW
&&
447 m_iDownPriority
!= PR_NORMAL
&&
448 m_iDownPriority
!= PR_HIGH
)
449 m_iDownPriority
= PR_NORMAL
;
450 SetAutoDownPriority(false);
456 m_paused
= (newtag
.GetInt() == 1);
457 m_stopped
= m_paused
;
460 case FT_OLDULPRIORITY
:
461 case FT_ULPRIORITY
: {
463 SetUpPriority(newtag
.GetInt(), false);
464 if( GetUpPriority() == PR_AUTO
){
465 SetUpPriority(PR_HIGH
, false);
466 SetAutoUpPriority(true);
468 SetAutoUpPriority(false);
473 case FT_KADLASTPUBLISHSRC
:{
474 SetLastPublishTimeKadSrc(newtag
.GetInt(), 0);
475 if(GetLastPublishTimeKadSrc() > (uint32
)time(NULL
)+KADEMLIAREPUBLISHTIMES
) {
476 //There may be a posibility of an older client that saved a random number here.. This will check for that..
477 SetLastPublishTimeKadSrc(0,0);
481 case FT_KADLASTPUBLISHNOTES
:{
482 SetLastPublishTimeKadNotes(newtag
.GetInt());
485 // old tags: as long as they are not needed, take the chance to purge them
487 case FT_KADLASTPUBLISHKEY
:
489 case FT_DL_ACTIVE_TIME
:
490 if (newtag
.IsInt()) {
491 m_nDlActiveTime
= newtag
.GetInt();
494 case FT_CORRUPTEDPARTS
: {
495 wxASSERT(m_corrupted_list
.empty());
496 wxString
strCorruptedParts(newtag
.GetStr());
497 wxStringTokenizer
tokenizer(strCorruptedParts
, wxT(","));
498 while ( tokenizer
.HasMoreTokens() ) {
499 wxString token
= tokenizer
.GetNextToken();
501 if (token
.ToULong(&uPart
)) {
502 if (uPart
< GetPartCount() && !IsCorruptedPart(uPart
)) {
503 m_corrupted_list
.push_back(uPart
);
512 hash
.DecodeBase32(newtag
.GetStr()) == CAICHHash::GetHashSize();
513 wxASSERT(hashSizeOk
);
515 m_pAICHHashSet
->SetMasterHash(hash
, AICH_VERIFIED
);
519 case FT_ATTRANSFERRED
:{
520 statistic
.SetAllTimeTransferred(statistic
.GetAllTimeTransferred() + (uint64
)newtag
.GetInt());
523 case FT_ATTRANSFERREDHI
:{
524 statistic
.SetAllTimeTransferred(statistic
.GetAllTimeTransferred() + (((uint64
)newtag
.GetInt()) << 32));
527 case FT_ATREQUESTED
:{
528 statistic
.SetAllTimeRequests(newtag
.GetInt());
532 statistic
.SetAllTimeAccepts(newtag
.GetInt());
536 // Start Changes by Slugfiller for better exception handling
538 wxCharBuffer tag_ansi_name
= newtag
.GetName().ToAscii();
539 char gap_mark
= tag_ansi_name
? tag_ansi_name
[0u] : 0;
540 if ( newtag
.IsInt() && (newtag
.GetName().Length() > 1) &&
541 ((gap_mark
== FT_GAPSTART
) ||
542 (gap_mark
== FT_GAPEND
))) {
543 Gap_Struct
*gap
= NULL
;
544 unsigned long int gapkey
;
545 if (newtag
.GetName().Mid(1).ToULong(&gapkey
)) {
546 if ( gap_map
.find( gapkey
) == gap_map
.end() ) {
547 gap
= new Gap_Struct
;
548 gap_map
[gapkey
] = gap
;
549 gap
->start
= (uint64
)-1;
550 gap
->end
= (uint64
)-1;
552 gap
= gap_map
[ gapkey
];
554 if (gap_mark
== FT_GAPSTART
) {
555 gap
->start
= newtag
.GetInt();
557 if (gap_mark
== FT_GAPEND
) {
558 gap
->end
= newtag
.GetInt()-1;
561 AddDebugLogLineN(logPartFile
, wxT("Wrong gap map key while reading met file!"));
564 // End Changes by Slugfiller for better exception handling
566 m_taglist
.push_back(newtag
);
571 // Nothing. Else, nothing.
575 // load the hashsets from the hybridstylepartmet
576 if (isnewstyle
&& !getsizeonly
&& (metFile
.GetPosition()<metFile
.GetLength()) ) {
577 metFile
.Seek(1, wxFromCurrent
);
579 uint16 parts
=GetPartCount(); // assuming we will get all hashsets
581 for (uint16 i
= 0; i
< parts
&& (metFile
.GetPosition()+16<metFile
.GetLength()); ++i
){
582 CMD4Hash cur_hash
= metFile
.ReadHash();
583 m_hashlist
.push_back(cur_hash
);
587 if (!m_hashlist
.empty()) {
588 CreateHashFromHashlist(m_hashlist
, &checkhash
);
591 if (m_abyFileHash
== checkhash
) {
598 } catch (const CInvalidPacket
& e
) {
599 AddLogLineC(CFormat(_("Error: %s (%s) is corrupt (bad tags: %s), unable to load file."))
604 } catch (const CIOFailureException
& e
) {
605 AddDebugLogLineC(logPartFile
, CFormat( wxT("IO failure while loading '%s': %s") )
609 } catch (const CEOFException
& WXUNUSED(e
)) {
610 AddLogLineC(CFormat( _("ERROR: %s (%s) is corrupt (wrong tagcount), unable to load file.") )
613 AddLogLineC(_("Trying to recover file info..."));
615 // Safe file is that who have
618 // We have filesize, try other needed info
620 // Do we need to check gaps? I think not,
621 // because they are checked below. Worst
622 // scenario will only mark file as 0 bytes downloaded.
625 if (!GetFileName().IsOk()) {
626 // Not critical, let's put a random filename.
628 "Recovering no-named file - will try to recover it as RecoveredFile.dat"));
629 SetFileName(CPath(wxT("RecoveredFile.dat")));
632 AddLogLineC(_("Recovered all available file info :D - Trying to use it..."));
634 AddLogLineC(_("Unable to recover file info :("));
643 m_gaplist
.Init(GetFileSize(), false); // Init full, then add gaps
644 // Now to flush the map into the list (Slugfiller)
645 std::map
<uint16
, Gap_Struct
*>::iterator it
= gap_map
.begin();
646 for ( ; it
!= gap_map
.end(); ++it
) {
647 Gap_Struct
* gap
= it
->second
;
648 // SLUGFILLER: SafeHash - revised code, and extra safety
649 if ( (gap
->start
!= (uint64
)-1) &&
650 (gap
->end
!= (uint64
)-1) &&
651 gap
->start
<= gap
->end
&&
652 gap
->start
< GetFileSize()) {
653 if (gap
->end
>= GetFileSize()) {
654 gap
->end
= GetFileSize()-1; // Clipping
656 m_gaplist
.AddGap(gap
->start
, gap
->end
); // All tags accounted for, use safe adding
659 // SLUGFILLER: SafeHash
662 //check if this is a backup
663 if ( m_fullname
.GetExt().MakeLower() == wxT("backup" )) {
664 m_fullname
= m_fullname
.RemoveExt();
667 // open permanent handle
668 if ( !m_hpartfile
.Open(m_PartPath
, CFile::read_write
)) {
669 AddLogLineN(CFormat( _("Failed to open %s (%s)") )
678 // SLUGFILLER: SafeHash - final safety, make sure any missing part of the file is gap
679 if (m_hpartfile
.GetLength() < GetFileSize())
680 AddGap(m_hpartfile
.GetLength(), GetFileSize()-1);
681 // Goes both ways - Partfile should never be too large
682 if (m_hpartfile
.GetLength() > GetFileSize()) {
683 AddDebugLogLineC(logPartFile
, CFormat( wxT("Partfile \"%s\" is too large! Truncating %llu bytes.") ) % GetFileName() % (m_hpartfile
.GetLength() - GetFileSize()));
684 m_hpartfile
.SetLength(GetFileSize());
686 // SLUGFILLER: SafeHash
687 } catch (const CIOFailureException
& e
) {
688 AddDebugLogLineC(logPartFile
, CFormat( wxT("Error while accessing partfile \"%s\": %s") ) % GetFileName() % e
.what());
692 // now close the file again until needed
693 m_hpartfile
.Release(true);
695 // check hashcount, file status etc
696 if (GetHashCount() != GetED2KPartHashCount()){
697 m_hashsetneeded
= true;
700 m_hashsetneeded
= false;
701 for (size_t i
= 0; i
< m_hashlist
.size(); ++i
) {
708 if (m_gaplist
.IsComplete()) { // is this file complete already?
713 if (!isnewstyle
) { // not for importing
714 const time_t file_date
= CPath::GetModificationTime(m_PartPath
);
715 if (m_lastDateChanged
!= file_date
) {
716 // It's pointless to rehash an empty file, since the case
717 // where a user has zero'd a file is handled above ...
718 if (m_hpartfile
.GetLength()) {
719 AddLogLineN(CFormat( _("WARNING: %s might be corrupted (%i)") )
721 % (m_lastDateChanged
- file_date
) );
723 SetStatus(PS_WAITINGFORHASH
);
725 CPath partFileName
= m_partmetfilename
.RemoveExt();
726 CThreadScheduler::AddTask(new CHashingTask(m_filePath
, partFileName
, this));
731 UpdateCompletedInfos();
732 if (completedsize
> transferred
) {
733 m_iGainDueToCompression
= completedsize
- transferred
;
734 } else if (completedsize
!= transferred
) {
735 m_iLostDueToCorruption
= transferred
- completedsize
;
742 bool CPartFile::SavePartFile(bool Initial
)
745 case PS_WAITINGFORHASH
:
751 /* Don't write anything to disk if less than 100 KB of free space is left. */
752 sint64 free
= CPath::GetFreeSpaceAt(GetFilePath());
753 if ((free
!= wxInvalidOffset
) && (free
< (100 * 1024))) {
759 if (!m_PartPath
.FileExists()) {
760 throw wxString(wxT(".part file not found"));
763 uint32 lsc
= lastseencomplete
;
766 CPath::BackupFile(m_fullname
, wxT(".backup"));
767 CPath::RemoveFile(m_fullname
);
770 file
.Open(m_fullname
, CFile::write
);
771 if (!file
.IsOpened()) {
772 throw wxString(wxT("Failed to open part.met file"));
776 file
.WriteUInt8(IsLargeFile() ? PARTFILE_VERSION_LARGEFILE
: PARTFILE_VERSION
);
778 file
.WriteUInt32(CPath::GetModificationTime(m_PartPath
));
780 file
.WriteHash(m_abyFileHash
);
781 uint16 parts
= m_hashlist
.size();
782 file
.WriteUInt16(parts
);
783 for (int x
= 0; x
< parts
; ++x
) {
784 file
.WriteHash(m_hashlist
[x
]);
787 #define FIXED_TAGS 15
788 uint32 tagcount
= m_taglist
.size() + FIXED_TAGS
+ (m_gaplist
.size()*2);
789 if (!m_corrupted_list
.empty()) {
793 if (m_pAICHHashSet
->HasValidMasterHash() && (m_pAICHHashSet
->GetStatus() == AICH_VERIFIED
)){
797 if (GetLastPublishTimeKadSrc()){
801 if (GetLastPublishTimeKadNotes()){
805 if (GetDlActiveTime()){
809 file
.WriteUInt32(tagcount
);
811 //#warning Kry - Where are lost by coruption and gained by compression?
813 // 0 (unicoded part file name)
814 // We write it with BOM to keep eMule compatibility. Note that the 'printable' filename is saved,
815 // as presently the filename does not represent an actual file.
816 CTagString( FT_FILENAME
, GetFileName().GetPrintable()).WriteTagToFile( &file
, utf8strOptBOM
);
817 CTagString( FT_FILENAME
, GetFileName().GetPrintable()).WriteTagToFile( &file
); // 1
819 CTagIntSized( FT_FILESIZE
, GetFileSize(), IsLargeFile() ? 64 : 32).WriteTagToFile( &file
);// 2
820 CTagIntSized( FT_TRANSFERRED
, transferred
, IsLargeFile() ? 64 : 32).WriteTagToFile( &file
); // 3
821 CTagInt32( FT_STATUS
, (m_paused
?1:0)).WriteTagToFile( &file
); // 4
823 if ( IsAutoDownPriority() ) {
824 CTagInt32( FT_DLPRIORITY
, (uint8
)PR_AUTO
).WriteTagToFile( &file
); // 5
825 CTagInt32( FT_OLDDLPRIORITY
, (uint8
)PR_AUTO
).WriteTagToFile( &file
); // 6
827 CTagInt32( FT_DLPRIORITY
, m_iDownPriority
).WriteTagToFile( &file
); // 5
828 CTagInt32( FT_OLDDLPRIORITY
, m_iDownPriority
).WriteTagToFile( &file
); // 6
831 CTagInt32( FT_LASTSEENCOMPLETE
, lsc
).WriteTagToFile( &file
); // 7
833 if ( IsAutoUpPriority() ) {
834 CTagInt32( FT_ULPRIORITY
, (uint8
)PR_AUTO
).WriteTagToFile( &file
); // 8
835 CTagInt32( FT_OLDULPRIORITY
, (uint8
)PR_AUTO
).WriteTagToFile( &file
); // 9
837 CTagInt32( FT_ULPRIORITY
, GetUpPriority() ).WriteTagToFile( &file
); // 8
838 CTagInt32( FT_OLDULPRIORITY
, GetUpPriority() ).WriteTagToFile( &file
); // 9
841 CTagInt32(FT_CATEGORY
, m_category
).WriteTagToFile( &file
); // 10
842 CTagInt32(FT_ATTRANSFERRED
, statistic
.GetAllTimeTransferred() & 0xFFFFFFFF).WriteTagToFile( &file
);// 11
843 CTagInt32(FT_ATTRANSFERREDHI
, statistic
.GetAllTimeTransferred() >>32).WriteTagToFile( &file
);// 12
844 CTagInt32(FT_ATREQUESTED
, statistic
.GetAllTimeRequests()).WriteTagToFile( &file
); // 13
845 CTagInt32(FT_ATACCEPTED
, statistic
.GetAllTimeAccepts()).WriteTagToFile( &file
); // 14
847 // currupt part infos
848 if (!m_corrupted_list
.empty()) {
849 wxString strCorruptedParts
;
850 std::list
<uint16
>::iterator it
= m_corrupted_list
.begin();
851 for (; it
!= m_corrupted_list
.end(); ++it
) {
852 uint16 uCorruptedPart
= *it
;
853 if (!strCorruptedParts
.IsEmpty()) {
854 strCorruptedParts
+= wxT(",");
856 strCorruptedParts
+= CFormat(wxT("%u")) % uCorruptedPart
;
858 wxASSERT( !strCorruptedParts
.IsEmpty() );
860 CTagString( FT_CORRUPTEDPARTS
, strCorruptedParts
).WriteTagToFile( &file
); // 11?
864 if (m_pAICHHashSet
->HasValidMasterHash() && (m_pAICHHashSet
->GetStatus() == AICH_VERIFIED
)){
865 CTagString
aichtag(FT_AICH_HASH
, m_pAICHHashSet
->GetMasterHash().GetString() );
866 aichtag
.WriteTagToFile(&file
); // 12?
869 if (GetLastPublishTimeKadSrc()){
870 CTagInt32(FT_KADLASTPUBLISHSRC
, GetLastPublishTimeKadSrc()).WriteTagToFile(&file
); // 15?
873 if (GetLastPublishTimeKadNotes()){
874 CTagInt32(FT_KADLASTPUBLISHNOTES
, GetLastPublishTimeKadNotes()).WriteTagToFile(&file
); // 16?
877 if (GetDlActiveTime()){
878 CTagInt32(FT_DL_ACTIVE_TIME
, GetDlActiveTime()).WriteTagToFile(&file
); // 17
881 for (uint32 j
= 0; j
< (uint32
)m_taglist
.size();++j
) {
882 m_taglist
[j
].WriteTagToFile(&file
);
887 for (CGapList::const_iterator it
= m_gaplist
.begin(); it
!= m_gaplist
.end(); ++it
) {
888 wxString tagName
= CFormat(wxT(" %u")) % i_pos
;
890 // gap start = first missing byte but gap ends = first non-missing byte
891 // in edonkey but I think its easier to user the real limits
892 tagName
[0] = FT_GAPSTART
;
893 CTagIntSized(tagName
, it
.start(), IsLargeFile() ? 64 : 32).WriteTagToFile( &file
);
895 tagName
[0] = FT_GAPEND
;
896 CTagIntSized(tagName
, it
.end() + 1, IsLargeFile() ? 64 : 32).WriteTagToFile( &file
);
900 } catch (const wxString
& error
) {
901 AddLogLineNS(CFormat( _("ERROR while saving partfile: %s (%s ==> %s)") )
907 } catch (const CIOFailureException
& e
) {
908 AddLogLineCS(_("IO failure while saving partfile: ") + e
.what());
916 CPath::RemoveFile(m_fullname
.AppendExt(wxT(".backup")));
919 sint64 metLength
= m_fullname
.GetFileSize();
920 if (metLength
== wxInvalidOffset
) {
921 theApp
->ShowAlert( CFormat( _("Could not retrieve length of '%s' - using %s file.") )
926 CPath::CloneFile(m_fullname
.AppendExt(PARTMET_BAK_EXT
), m_fullname
, true);
927 } else if (metLength
== 0) {
928 // Don't backup if it's 0 size but raise a warning!!!
929 theApp
->ShowAlert( CFormat( _("'%s' is 0 size somehow - using %s file.") )
934 CPath::CloneFile(m_fullname
.AppendExt(PARTMET_BAK_EXT
), m_fullname
, true);
936 // no error, just backup
937 CPath::BackupFile(m_fullname
, PARTMET_BAK_EXT
);
944 void CPartFile::SaveSourceSeeds()
946 #define MAX_SAVED_SOURCES 10
948 // Kry - Sources seeds
949 // Based on a Feature request, this saves the last MAX_SAVED_SOURCES
950 // sources of the file, giving a 'seed' for the next run.
951 // We save the last sources because:
952 // 1 - They could be the hardest to get
953 // 2 - They will more probably be available
954 // However, if we have downloading sources, they have preference because
955 // we probably have more credits on them.
956 // Anyway, source exchange will get us the rest of the sources
957 // This feature is currently used only on rare files (< 20 sources)
960 if (GetSourceCount()>20) {
964 CClientRefList source_seeds
;
967 CClientRefList::iterator it
= m_downloadingSourcesList
.begin();
968 for( ; it
!= m_downloadingSourcesList
.end() && n_sources
< MAX_SAVED_SOURCES
; ++it
) {
969 if (!it
->HasLowID()) {
970 source_seeds
.push_back(*it
);
975 if (n_sources
< MAX_SAVED_SOURCES
) {
976 // Not enough downloading sources to fill the list, going to sources list
977 if (GetSourceCount() > 0) {
978 SourceSet::reverse_iterator rit
= m_SrcList
.rbegin();
979 for ( ; ((rit
!= m_SrcList
.rend()) && (n_sources
<MAX_SAVED_SOURCES
)); ++rit
) {
980 if (!rit
->HasLowID()) {
981 source_seeds
.push_back(*rit
);
993 const CPath seedsPath
= m_fullname
.AppendExt(wxT(".seeds"));
996 file
.Create(seedsPath
, true);
997 if (!file
.IsOpened()) {
998 AddLogLineN(CFormat( _("Failed to save part.met.seeds file for %s") )
1004 file
.WriteUInt8(0); // v3, to avoid v2 clients choking on it.
1005 file
.WriteUInt8(source_seeds
.size());
1007 CClientRefList::iterator it2
= source_seeds
.begin();
1008 for (; it2
!= source_seeds
.end(); ++it2
) {
1009 CUpDownClient
* cur_src
= it2
->GetClient();
1010 file
.WriteUInt32(cur_src
->GetUserIDHybrid());
1011 file
.WriteUInt16(cur_src
->GetUserPort());
1012 file
.WriteHash(cur_src
->GetUserHash());
1013 // CryptSettings - See SourceExchange V4
1014 const uint8 uSupportsCryptLayer
= cur_src
->SupportsCryptLayer() ? 1 : 0;
1015 const uint8 uRequestsCryptLayer
= cur_src
->RequestsCryptLayer() ? 1 : 0;
1016 const uint8 uRequiresCryptLayer
= cur_src
->RequiresCryptLayer() ? 1 : 0;
1017 const uint8 byCryptOptions
= (uRequiresCryptLayer
<< 2) | (uRequestsCryptLayer
<< 1) | (uSupportsCryptLayer
<< 0);
1018 file
.WriteUInt8(byCryptOptions
);
1021 /* v2: Added to keep track of too old seeds */
1022 file
.WriteUInt32(wxDateTime::Now().GetTicks());
1024 AddLogLineN(CFormat( wxPLURAL("Saved %i source seed for partfile: %s (%s)", "Saved %i source seeds for partfile: %s (%s)", n_sources
) )
1028 } catch (const CIOFailureException
& e
) {
1029 AddDebugLogLineC( logPartFile
, CFormat( wxT("Error saving partfile's seeds file (%s - %s): %s") )
1036 CPath::RemoveFile(seedsPath
);
1040 void CPartFile::LoadSourceSeeds()
1042 CMemFile sources_data
;
1044 bool valid_sources
= false;
1046 const CPath seedsPath
= m_fullname
.AppendExt(wxT(".seeds"));
1047 if (!seedsPath
.FileExists()) {
1051 CFile
file(seedsPath
, CFile::read
);
1052 if (!file
.IsOpened()) {
1053 AddLogLineN(CFormat( _("Partfile %s (%s) has no seeds file") )
1061 if (file
.GetLength() <= 1) {
1062 AddLogLineN(CFormat( _("Partfile %s (%s) has a void seeds file") )
1068 uint8 src_count
= file
.ReadUInt8();
1070 bool bUseSX2Format
= (src_count
== 0);
1072 if (bUseSX2Format
) {
1074 src_count
= file
.ReadUInt8();
1077 sources_data
.WriteUInt16(src_count
);
1079 for (int i
= 0; i
< src_count
; ++i
) {
1080 uint32 dwID
= file
.ReadUInt32();
1081 uint16 nPort
= file
.ReadUInt16();
1083 sources_data
.WriteUInt32(bUseSX2Format
? dwID
: wxUINT32_SWAP_ALWAYS(dwID
));
1084 sources_data
.WriteUInt16(nPort
);
1085 sources_data
.WriteUInt32(0);
1086 sources_data
.WriteUInt16(0);
1088 if (bUseSX2Format
) {
1089 sources_data
.WriteHash(file
.ReadHash());
1090 sources_data
.WriteUInt8(file
.ReadUInt8());
1097 // v2: Added to keep track of too old seeds
1098 time_t time
= (time_t)file
.ReadUInt32();
1100 // Time frame is 2 hours. More than enough to compile
1101 // your new aMule version!.
1102 if ((time
+ MIN2S(120)) >= wxDateTime::Now().GetTicks()) {
1103 valid_sources
= true;
1107 // v1 has no time data. We can safely use
1108 // the sources, next time will be saved.
1109 valid_sources
= true;
1112 if (valid_sources
) {
1113 sources_data
.Seek(0);
1114 AddClientSources(&sources_data
, SF_SOURCE_SEEDS
, bUseSX2Format
? 4 : 1, bUseSX2Format
);
1117 } catch (const CSafeIOException
& e
) {
1118 AddLogLineN(CFormat( _("Error reading partfile's seeds file (%s - %s): %s") )
1127 void CPartFile::PartFileHashFinished(CKnownFile
* result
)
1129 m_lastDateChanged
= result
->m_lastDateChanged
;
1130 bool errorfound
= false;
1131 if (GetED2KPartHashCount() == 0){
1132 if (IsComplete(0, GetFileSize()-1)){
1133 if (result
->GetFileHash() != GetFileHash()){
1134 AddLogLineN(CFormat(wxPLURAL(
1135 "Found corrupted part (%d) in %d part file %s - FileResultHash |%s| FileHash |%s|",
1136 "Found corrupted part (%d) in %d parts file %s - FileResultHash |%s| FileHash |%s|",
1142 % result
->GetFileHash().Encode()
1143 % GetFileHash().Encode() );
1144 AddGap(0, GetFileSize()-1);
1150 for (size_t i
= 0; i
< m_hashlist
.size(); ++i
){
1151 // Kry - trel_ar's completed parts check on rehashing.
1152 // Very nice feature, if a file is completed but .part.met don't believe it,
1155 uint64 partStart
= i
* PARTSIZE
;
1156 uint64 partEnd
= partStart
+ GetPartSize(i
) - 1;
1157 if (!( i
< result
->GetHashCount() && (result
->GetPartHash(i
) == GetPartHash(i
)))){
1158 if (IsComplete(i
)) {
1160 if ( i
< result
->GetHashCount() )
1161 wronghash
= result
->GetPartHash(i
);
1163 AddLogLineN(CFormat(wxPLURAL(
1164 "Found corrupted part (%d) in %d part file %s - FileResultHash |%s| FileHash |%s|",
1165 "Found corrupted part (%d) in %d parts file %s - FileResultHash |%s| FileHash |%s|",
1166 GetED2KPartHashCount())
1169 % GetED2KPartHashCount()
1171 % wronghash
.Encode()
1172 % GetPartHash(i
).Encode() );
1178 if (!IsComplete(i
)){
1179 AddLogLineN(CFormat( _("Found completed part (%i) in %s") )
1184 RemoveBlockFromList(partStart
, partEnd
);
1191 result
->GetAICHHashset()->GetStatus() == AICH_HASHSETCOMPLETE
&&
1192 status
== PS_COMPLETING
) {
1193 delete m_pAICHHashSet
;
1194 m_pAICHHashSet
= result
->GetAICHHashset();
1195 result
->SetAICHHashset(NULL
);
1196 m_pAICHHashSet
->SetOwner(this);
1198 else if (status
== PS_COMPLETING
) {
1199 AddDebugLogLineN(logPartFile
,
1200 CFormat(wxT("Failed to store new AICH Hashset for completed file: %s"))
1207 if (status
== PS_COMPLETING
){
1212 AddLogLineN(CFormat( _("Finished rehashing %s") ) % GetFileName());
1216 SetStatus(PS_READY
);
1220 SetStatus(PS_READY
);
1222 theApp
->sharedfiles
->SafeAddKFile(this);
1225 void CPartFile::AddGap(uint64 start
, uint64 end
)
1227 m_gaplist
.AddGap(start
, end
);
1228 UpdateDisplayedInfo();
1231 void CPartFile::AddGap(uint16 part
)
1233 m_gaplist
.AddGap(part
);
1234 UpdateDisplayedInfo();
1237 bool CPartFile::IsAlreadyRequested(uint64 start
, uint64 end
)
1239 std::list
<Requested_Block_Struct
*>::iterator it
= m_requestedblocks_list
.begin();
1240 for (; it
!= m_requestedblocks_list
.end(); ++it
) {
1241 Requested_Block_Struct
* cur_block
= *it
;
1243 if ((start
<= cur_block
->EndOffset
) && (end
>= cur_block
->StartOffset
)) {
1250 bool CPartFile::GetNextEmptyBlockInPart(uint16 partNumber
, Requested_Block_Struct
*result
)
1252 // Find start of this part
1253 uint64 partStart
= (PARTSIZE
* partNumber
);
1254 uint64 start
= partStart
;
1256 // What is the end limit of this block, i.e. can't go outside part (or filesize)
1257 uint64 partEnd
= partStart
+ GetPartSize(partNumber
) - 1;
1258 // Loop until find a suitable gap and return true, or no more gaps and return false
1259 CGapList::const_iterator it
= m_gaplist
.begin();
1262 uint64 gapStart
, end
;
1264 // Find the first gap from the start position
1265 for (; it
!= m_gaplist
.end(); ++it
) {
1266 gapStart
= it
.start();
1269 // Want gaps that overlap start<->partEnd
1270 if (gapStart
<= partEnd
&& end
>= start
) {
1273 } else if (gapStart
> partEnd
) {
1278 // If no gaps after start, exit
1282 // Update start position if gap starts after current pos
1283 if (start
< gapStart
) {
1286 // Find end, keeping within the max block size and the part limit
1287 uint64 blockLimit
= partStart
+ (BLOCKSIZE
* (((start
- partStart
) / BLOCKSIZE
) + 1)) - 1;
1288 if (end
> blockLimit
) {
1291 if (end
> partEnd
) {
1294 // If this gap has not already been requested, we have found a valid entry
1295 if (!IsAlreadyRequested(start
, end
)) {
1296 // Was this block to be returned
1297 if (result
!= NULL
) {
1298 result
->StartOffset
= start
;
1299 result
->EndOffset
= end
;
1300 md4cpy(result
->FileID
, GetFileHash().GetHash());
1301 result
->transferred
= 0;
1305 // Reposition to end of that gap
1308 // If tried all gaps then break out of the loop
1309 if (end
== partEnd
) {
1313 // No suitable gap found
1318 void CPartFile::FillGap(uint64 start
, uint64 end
)
1320 m_gaplist
.FillGap(start
, end
);
1321 UpdateCompletedInfos();
1322 UpdateDisplayedInfo();
1325 void CPartFile::FillGap(uint16 part
)
1327 m_gaplist
.FillGap(part
);
1328 UpdateCompletedInfos();
1329 UpdateDisplayedInfo();
1333 void CPartFile::UpdateCompletedInfos()
1335 uint64 allgaps
= m_gaplist
.GetGapSize();
1337 percentcompleted
= (1.0 - (double)allgaps
/GetFileSize()) * 100.0;
1338 completedsize
= GetFileSize() - allgaps
;
1342 void CPartFile::WritePartStatus(CMemFile
* file
)
1344 uint16 parts
= GetED2KPartCount();
1345 file
->WriteUInt16(parts
);
1347 while (done
!= parts
){
1349 for (uint32 i
= 0;i
!= 8;++i
) {
1350 if (IsComplete(done
)) {
1354 if (done
== parts
) {
1358 file
->WriteUInt8(towrite
);
1362 void CPartFile::WriteCompleteSourcesCount(CMemFile
* file
)
1364 file
->WriteUInt16(m_nCompleteSourcesCount
);
1367 uint32
CPartFile::Process(uint32 reducedownload
/*in percent*/,uint8 m_icounter
)
1370 uint32 dwCurTick
= ::GetTickCount();
1372 // If buffer size exceeds limit, or if not written within time limit, flush data
1373 if ( (m_nTotalBufferData
> thePrefs::GetFileBufferSize()) ||
1374 (dwCurTick
> (m_nLastBufferFlushTime
+ BUFFER_TIME_LIMIT
))) {
1375 // Avoid flushing while copying preview file
1376 if (!m_bPreviewing
) {
1382 // check if we want new sources from server --> MOVED for 16.40 version
1383 old_trans
=transferingsrc
;
1387 if (m_icounter
< 10) {
1388 // Update only downloading sources.
1389 CClientRefList::iterator it
= m_downloadingSourcesList
.begin();
1390 for( ; it
!= m_downloadingSourcesList
.end(); ) {
1391 CUpDownClient
*cur_src
= it
++->GetClient();
1392 if(cur_src
->GetDownloadState() == DS_DOWNLOADING
) {
1394 kBpsDown
+= cur_src
->SetDownloadLimit(reducedownload
);
1398 // Update all sources (including downloading sources)
1399 for ( SourceSet::iterator it
= m_SrcList
.begin(); it
!= m_SrcList
.end(); ) {
1400 CUpDownClient
* cur_src
= it
++->GetClient();
1401 switch (cur_src
->GetDownloadState()) {
1402 case DS_DOWNLOADING
: {
1404 kBpsDown
+= cur_src
->SetDownloadLimit(reducedownload
);
1413 case DS_LOWTOLOWIP
: {
1414 if (cur_src
->HasLowID() && !theApp
->CanDoCallback(cur_src
->GetServerIP(), cur_src
->GetServerPort())) {
1415 // If we are almost maxed on sources,
1416 // slowly remove these client to see
1417 // if we can find a better source.
1418 if (((dwCurTick
- lastpurgetime
) > 30000) &&
1419 (GetSourceCount() >= (thePrefs::GetMaxSourcePerFile()*.8))) {
1420 RemoveSource(cur_src
);
1421 lastpurgetime
= dwCurTick
;
1425 cur_src
->SetDownloadState(DS_ONQUEUE
);
1430 case DS_NONEEDEDPARTS
: {
1431 // we try to purge noneeded source, even without reaching the limit
1432 if((dwCurTick
- lastpurgetime
) > 40000) {
1433 if(!cur_src
->SwapToAnotherFile(false , false, false , NULL
)) {
1434 //however we only delete them if reaching the limit
1435 if (GetSourceCount() >= (thePrefs::GetMaxSourcePerFile()*.8 )) {
1436 RemoveSource(cur_src
);
1437 lastpurgetime
= dwCurTick
;
1438 break; //Johnny-B - nothing more to do here (good eye!)
1441 lastpurgetime
= dwCurTick
;
1445 // doubled reasktime for no needed parts - save connections and traffic
1446 if ( !((!cur_src
->GetLastAskedTime()) ||
1447 (dwCurTick
- cur_src
->GetLastAskedTime()) > FILEREASKTIME
*2)) {
1450 // Recheck this client to see if still NNP..
1451 // Set to DS_NONE so that we force a TCP reask next time..
1452 cur_src
->SetDownloadState(DS_NONE
);
1457 if( cur_src
->IsRemoteQueueFull()) {
1458 if( ((dwCurTick
- lastpurgetime
) > 60000) &&
1459 (GetSourceCount() >= (thePrefs::GetMaxSourcePerFile()*.8 )) ) {
1460 RemoveSource( cur_src
);
1461 lastpurgetime
= dwCurTick
;
1462 break; //Johnny-B - nothing more to do here (good eye!)
1466 // Give up to 1 min for UDP to respond..
1467 // If we are within on min on TCP, do not try..
1468 if ( theApp
->IsConnected() &&
1469 ( (!cur_src
->GetLastAskedTime()) ||
1470 (dwCurTick
- cur_src
->GetLastAskedTime()) > FILEREASKTIME
-20000)) {
1471 cur_src
->UDPReaskForDownload();
1474 // No break here, since the next case takes care of asking for downloads.
1477 case DS_TOOMANYCONNS
:
1479 case DS_WAITCALLBACK
:
1480 case DS_WAITCALLBACKKAD
: {
1481 if ( theApp
->IsConnected() &&
1482 ( (!cur_src
->GetLastAskedTime()) ||
1483 (dwCurTick
- cur_src
->GetLastAskedTime()) > FILEREASKTIME
)) {
1484 if (!cur_src
->AskForDownload()) {
1485 // I left this break here just as a reminder
1486 // just in case re rearange things..
1495 /* eMule 0.30c implementation, i give it a try (Creteil) BEGIN ... */
1496 if (IsA4AFAuto() && ((!m_LastNoNeededCheck
) || (dwCurTick
- m_LastNoNeededCheck
> 900000))) {
1497 m_LastNoNeededCheck
= dwCurTick
;
1498 for ( SourceSet::iterator it
= m_A4AFsrclist
.begin(); it
!= m_A4AFsrclist
.end(); ) {
1499 CUpDownClient
*cur_source
= it
++->GetClient();
1500 uint8 download_state
=cur_source
->GetDownloadState();
1501 if( download_state
!= DS_DOWNLOADING
1502 && cur_source
->GetRequestFile()
1503 && ((!cur_source
->GetRequestFile()->IsA4AFAuto()) || download_state
== DS_NONEEDEDPARTS
))
1505 cur_source
->SwapToAnotherFile(false, false, false, this);
1509 /* eMule 0.30c implementation, i give it a try (Creteil) END ... */
1511 // swap No needed partfiles if possible
1513 if (((old_trans
==0) && (transferingsrc
>0)) || ((old_trans
>0) && (transferingsrc
==0))) {
1517 // Kad source search
1518 if( GetMaxSourcePerFileUDP() > GetSourceCount()){
1519 //Once we can handle lowID users in Kad, we remove the second IsConnected
1520 if (theApp
->downloadqueue
->DoKademliaFileRequest() && (Kademlia::CKademlia::GetTotalFile() < KADEMLIATOTALFILE
) && (dwCurTick
> m_LastSearchTimeKad
) && Kademlia::CKademlia::IsConnected() && theApp
->IsConnected() && !IsStopped()){
1522 theApp
->downloadqueue
->SetLastKademliaFileRequest();
1524 if (GetKadFileSearchID()) {
1525 /* This will never happen anyway. We're talking a
1526 1h timespan and searches are at max 45secs */
1527 Kademlia::CSearchManager::StopSearch(GetKadFileSearchID(), false);
1530 Kademlia::CUInt128
kadFileID(GetFileHash().GetHash());
1531 Kademlia::CSearch
* pSearch
= Kademlia::CSearchManager::PrepareLookup(Kademlia::CSearch::FILE, true, kadFileID
);
1532 AddDebugLogLineN(logKadSearch
, CFormat(wxT("Preparing a Kad Search for '%s'")) % GetFileName());
1534 AddDebugLogLineN(logKadSearch
, CFormat(wxT("Kad lookup started for '%s'")) % GetFileName());
1535 if(m_TotalSearchesKad
< 7) {
1536 m_TotalSearchesKad
++;
1538 m_LastSearchTimeKad
= dwCurTick
+ (KADEMLIAREASKTIME
*m_TotalSearchesKad
);
1539 SetKadFileSearchID(pSearch
->GetSearchID());
1543 if(GetKadFileSearchID()) {
1544 Kademlia::CSearchManager::StopSearch(GetKadFileSearchID(), true);
1548 // check if we want new sources from server
1549 if ( !m_localSrcReqQueued
&&
1550 ( (!m_lastsearchtime
) ||
1551 (dwCurTick
- m_lastsearchtime
) > SERVERREASKTIME
) &&
1552 theApp
->IsConnectedED2K() &&
1553 thePrefs::GetMaxSourcePerFileSoft() > GetSourceCount() &&
1555 m_localSrcReqQueued
= true;
1556 theApp
->downloadqueue
->SendLocalSrcRequest(this);
1559 // calculate datarate, set limit etc.
1564 // Kry - does the 3 / 30 difference produce too much flickering or CPU?
1565 if (m_count
>= 30) {
1567 UpdateAutoDownPriority();
1568 UpdateDisplayedInfo();
1569 if(m_bPercentUpdated
== false) {
1570 UpdateCompletedInfos();
1572 m_bPercentUpdated
= false;
1575 // release file handle if unused for some time
1576 m_hpartfile
.Release();
1578 return (uint32
)(kBpsDown
*1024.0);
1581 bool CPartFile::CanAddSource(uint32 userid
, uint16 port
, uint32 serverip
, uint16 serverport
, uint8
* pdebug_lowiddropped
, bool ed2kID
)
1584 //The incoming ID could have the userid in the Hybrid format..
1585 uint32 hybridID
= 0;
1587 if (IsLowID(userid
)) {
1590 hybridID
= wxUINT32_SWAP_ALWAYS(userid
);
1594 if (!IsLowID(userid
)) {
1595 userid
= wxUINT32_SWAP_ALWAYS(userid
);
1599 // MOD Note: Do not change this part - Merkur
1600 if (theApp
->IsConnectedED2K()) {
1601 if(::IsLowID(theApp
->GetED2KID())) {
1602 if(theApp
->GetED2KID() == userid
&& theApp
->serverconnect
->GetCurrentServer()->GetIP() == serverip
&& theApp
->serverconnect
->GetCurrentServer()->GetPort() == serverport
) {
1605 if(theApp
->GetPublicIP() == userid
) {
1609 if(theApp
->GetED2KID() == userid
&& thePrefs::GetPort() == port
) {
1615 if (Kademlia::CKademlia::IsConnected()) {
1616 if(!Kademlia::CKademlia::IsFirewalled()) {
1617 if(Kademlia::CKademlia::GetIPAddress() == hybridID
&& thePrefs::GetPort() == port
) {
1623 //This allows *.*.*.0 clients to not be removed if Ed2kID == false
1624 if ( IsLowID(hybridID
) && theApp
->IsFirewalled()) {
1625 if (pdebug_lowiddropped
) {
1626 (*pdebug_lowiddropped
)++;
1634 void CPartFile::AddSources(CMemFile
& sources
,uint32 serverip
, uint16 serverport
, unsigned origin
, bool bWithObfuscationAndHash
)
1636 uint8 count
= sources
.ReadUInt8();
1637 uint8 debug_lowiddropped
= 0;
1638 uint8 debug_possiblesources
= 0;
1639 CMD4Hash achUserHash
;
1642 // since we may received multiple search source UDP results we have to "consume" all data of that packet
1643 AddDebugLogLineN(logPartFile
, wxT("Trying to add sources for a stopped file"));
1644 sources
.Seek(count
*(4+2), wxFromCurrent
);
1648 for (int i
= 0;i
!= count
;++i
) {
1649 uint32 userid
= sources
.ReadUInt32();
1650 uint16 port
= sources
.ReadUInt16();
1652 uint8 byCryptOptions
= 0;
1653 if (bWithObfuscationAndHash
){
1654 byCryptOptions
= sources
.ReadUInt8();
1655 if ((byCryptOptions
& 0x80) > 0) {
1656 achUserHash
= sources
.ReadHash();
1659 if ((thePrefs::IsClientCryptLayerRequested() && (byCryptOptions
& 0x01/*supported*/) > 0 && (byCryptOptions
& 0x80) == 0)
1660 || (thePrefs::IsClientCryptLayerSupported() && (byCryptOptions
& 0x02/*requested*/) > 0 && (byCryptOptions
& 0x80) == 0)) {
1661 AddDebugLogLineN(logPartFile
, CFormat(wxT("Server didn't provide UserHash for source %u, even if it was expected to (or local obfuscationsettings changed during serverconnect")) % userid
);
1662 } else if (!thePrefs::IsClientCryptLayerRequested() && (byCryptOptions
& 0x02/*requested*/) == 0 && (byCryptOptions
& 0x80) != 0) {
1663 AddDebugLogLineN(logPartFile
, CFormat(wxT("Server provided UserHash for source %u, even if it wasn't expected to (or local obfuscationsettings changed during serverconnect")) % userid
);
1668 // "Filter LAN IPs" and "IPfilter" the received sources IP addresses
1669 if (!IsLowID(userid
)) {
1670 // check for 0-IP, localhost and optionally for LAN addresses
1671 if ( !IsGoodIP(userid
, thePrefs::FilterLanIPs()) ) {
1674 if (theApp
->ipfilter
->IsFiltered(userid
)) {
1679 if (!CanAddSource(userid
, port
, serverip
, serverport
, &debug_lowiddropped
)) {
1683 if(thePrefs::GetMaxSourcePerFile() > GetSourceCount()) {
1684 ++debug_possiblesources
;
1685 CUpDownClient
* newsource
= new CUpDownClient(port
,userid
,serverip
,serverport
,this, true, true);
1687 newsource
->SetSourceFrom((ESourceFrom
)origin
);
1688 newsource
->SetConnectOptions(byCryptOptions
, true, false);
1690 if ((byCryptOptions
& 0x80) != 0) {
1691 newsource
->SetUserHash(achUserHash
);
1694 theApp
->downloadqueue
->CheckAndAddSource(this,newsource
);
1696 AddDebugLogLineN(logPartFile
, wxT("Consuming a packet because of max sources reached"));
1697 // Since we may receive multiple search source UDP results we have to "consume" all data of that packet
1698 // This '+1' is added because 'i' counts from 0.
1699 sources
.Seek((count
-(i
+1))*(4+2), wxFromCurrent
);
1700 if (GetKadFileSearchID()) {
1701 Kademlia::CSearchManager::StopSearch(GetKadFileSearchID(), false);
1708 void CPartFile::UpdatePartsInfo()
1710 if( !IsPartFile() ) {
1711 CKnownFile::UpdatePartsInfo();
1716 uint16 partcount
= GetPartCount();
1717 bool flag
= (time(NULL
) - m_nCompleteSourcesTime
> 0);
1719 // Ensure the frequency-list is ready
1720 if ( m_SrcpartFrequency
.size() != GetPartCount() ) {
1721 m_SrcpartFrequency
.clear();
1722 m_SrcpartFrequency
.insert(m_SrcpartFrequency
.begin(), GetPartCount(), 0);
1725 // Find number of available parts
1726 uint16 availablecounter
= 0;
1727 for ( uint16 i
= 0; i
< partcount
; ++i
) {
1728 if ( m_SrcpartFrequency
[i
] )
1732 if ( ( availablecounter
== partcount
) && ( m_availablePartsCount
< partcount
) ) {
1733 lastseencomplete
= time(NULL
);
1736 m_availablePartsCount
= availablecounter
;
1739 ArrayOfUInts16 count
;
1741 count
.reserve(GetSourceCount());
1743 for ( SourceSet::iterator it
= m_SrcList
.begin(); it
!= m_SrcList
.end(); ++it
) {
1744 CUpDownClient
* client
= it
->GetClient();
1745 if ( !client
->GetUpPartStatus().empty() && client
->GetUpPartCount() == partcount
) {
1746 count
.push_back(client
->GetUpCompleteSourcesCount());
1750 m_nCompleteSourcesCount
= m_nCompleteSourcesCountLo
= m_nCompleteSourcesCountHi
= 0;
1752 for (uint16 i
= 0; i
< partcount
; ++i
) {
1754 m_nCompleteSourcesCount
= m_SrcpartFrequency
[i
];
1756 else if( m_nCompleteSourcesCount
> m_SrcpartFrequency
[i
]) {
1757 m_nCompleteSourcesCount
= m_SrcpartFrequency
[i
];
1760 count
.push_back(m_nCompleteSourcesCount
);
1762 int32 n
= count
.size();
1764 std::sort(count
.begin(), count
.end(), std::less
<uint16
>());
1767 int32 i
= n
>> 1; // (n / 2)
1768 int32 j
= (n
* 3) >> 2; // (n * 3) / 4
1769 int32 k
= (n
* 7) >> 3; // (n * 7) / 8
1771 //When still a part file, adjust your guesses by 20% to what you see..
1775 //Not many sources, so just use what you see..
1776 // welcome to 'plain stupid code'
1777 // m_nCompleteSourcesCount;
1778 m_nCompleteSourcesCountLo
= m_nCompleteSourcesCount
;
1779 m_nCompleteSourcesCountHi
= m_nCompleteSourcesCount
;
1780 } else if (n
< 20) {
1781 // For low guess and normal guess count
1782 // If we see more sources then the guessed low and normal, use what we see.
1783 // If we see less sources then the guessed low, adjust network accounts for 80%,
1784 // we account for 20% with what we see and make sure we are still above the normal.
1786 // Adjust 80% network and 20% what we see.
1787 if ( count
[i
] < m_nCompleteSourcesCount
) {
1788 m_nCompleteSourcesCountLo
= m_nCompleteSourcesCount
;
1790 m_nCompleteSourcesCountLo
=
1791 (uint16
)((float)(count
[i
]*.8) +
1792 (float)(m_nCompleteSourcesCount
*.2));
1794 m_nCompleteSourcesCount
= m_nCompleteSourcesCountLo
;
1795 m_nCompleteSourcesCountHi
=
1796 (uint16
)((float)(count
[j
]*.8) +
1797 (float)(m_nCompleteSourcesCount
*.2));
1798 if( m_nCompleteSourcesCountHi
< m_nCompleteSourcesCount
) {
1799 m_nCompleteSourcesCountHi
= m_nCompleteSourcesCount
;
1807 // Adjust network accounts for 80%, we account for 20% with what
1808 // we see and make sure we are still above the low.
1810 // Adjust network accounts for 80%, we account for 20% with what
1811 // we see and make sure we are still above the normal.
1813 m_nCompleteSourcesCountLo
= m_nCompleteSourcesCount
;
1814 m_nCompleteSourcesCount
= (uint16
)((float)(count
[j
]*.8)+(float)(m_nCompleteSourcesCount
*.2));
1815 if( m_nCompleteSourcesCount
< m_nCompleteSourcesCountLo
) {
1816 m_nCompleteSourcesCount
= m_nCompleteSourcesCountLo
;
1818 m_nCompleteSourcesCountHi
= (uint16
)((float)(count
[k
]*.8)+(float)(m_nCompleteSourcesCount
*.2));
1819 if( m_nCompleteSourcesCountHi
< m_nCompleteSourcesCount
) {
1820 m_nCompleteSourcesCountHi
= m_nCompleteSourcesCount
;
1824 m_nCompleteSourcesTime
= time(NULL
) + (60);
1826 UpdateDisplayedInfo();
1829 // [Maella -Enhanced Chunk Selection- (based on jicxicmic)]
1830 bool CPartFile::GetNextRequestedBlock(CUpDownClient
* sender
,
1831 std::vector
<Requested_Block_Struct
*>& toadd
, uint16
& count
)
1834 // The purpose of this function is to return a list of blocks (~180KB) to
1835 // download. To avoid a prematurely stop of the downloading, all blocks that
1836 // are requested from the same source must be located within the same
1837 // chunk (=> part ~9MB).
1839 // The selection of the chunk to download is one of the CRITICAL parts of the
1840 // edonkey network. The selection algorithm must insure the best spreading
1843 // The selection is based on 4 criteria:
1844 // 1. Frequency of the chunk (availability), very rare chunks must be downloaded
1845 // as quickly as possible to become a new available source.
1846 // 2. Parts used for preview (first + last chunk), preview or check a
1847 // file (e.g. movie, mp3)
1848 // 3. Request state (downloading in process), try to ask each source for another
1849 // chunk. Spread the requests between all sources.
1850 // 4. Completion (shortest-to-complete), partially retrieved chunks should be
1851 // completed before starting to download other one.
1853 // The frequency criterion defines three zones: very rare (<10%), rare (<50%)
1854 // and common (>30%). Inside each zone, the criteria have a specific weight, used
1855 // to calculate the priority of chunks. The chunk(s) with the highest
1856 // priority (highest=0, lowest=0xffff) is/are selected first.
1858 // very rare (preview) rare common
1859 // 0% <---- +0 pt ----> 10% <----- +10000 pt -----> 50% <---- +20000 pt ----> 100%
1860 // 1. <------- frequency: +25*frequency pt ----------->
1861 // 2. <- preview: +1 pt --><-------------- preview: set to 10000 pt ------------->
1862 // 3. <------ request: download in progress +20000 pt ------>
1863 // 4a. <- completion: 0% +100, 25% +75 .. 100% +0 pt --><-- !req => completion --->
1864 // 4b. <--- req => !completion -->
1866 // Unrolled, the priority scale is:
1868 // 0..xxxx unrequested and requested very rare chunks
1869 // 10000..1xxxx unrequested rare chunks + unrequested preview chunks
1870 // 20000..2xxxx unrequested common chunks (priority to the most complete)
1871 // 30000..3xxxx requested rare chunks + requested preview chunks
1872 // 40000..4xxxx requested common chunks (priority to the least complete)
1874 // This algorithm usually selects first the rarest chunk(s). However, partially
1875 // complete chunk(s) that is/are close to completion may overtake the priority
1876 // (priority inversion).
1877 // For the common chuncks, the algorithm tries to spread the dowload between
1881 // Check input parameters
1882 if ( sender
->GetPartStatus().empty() ) {
1885 // Define and create the list of the chunks to download
1886 const uint16 partCount
= GetPartCount();
1887 ChunkList chunksList
;
1890 uint16 newBlockCount
= 0;
1891 while(newBlockCount
!= count
) {
1892 // Create a request block stucture if a chunk has been previously selected
1893 if(sender
->GetLastPartAsked() != 0xffff) {
1894 Requested_Block_Struct
* pBlock
= new Requested_Block_Struct
;
1895 if(GetNextEmptyBlockInPart(sender
->GetLastPartAsked(), pBlock
) == true) {
1896 // Keep a track of all pending requested blocks
1897 m_requestedblocks_list
.push_back(pBlock
);
1898 // Update list of blocks to return
1899 toadd
.push_back(pBlock
);
1901 // Skip end of loop (=> CPU load)
1904 // All blocks for this chunk have been already requested
1906 // => Try to select another chunk
1907 sender
->SetLastPartAsked(0xffff);
1911 // Check if a new chunk must be selected (e.g. download starting, previous chunk complete)
1912 if(sender
->GetLastPartAsked() == 0xffff) {
1913 // Quantify all chunks (create list of chunks to download)
1914 // This is done only one time and only if it is necessary (=> CPU load)
1915 if(chunksList
.empty()) {
1916 // Indentify the locally missing part(s) that this source has
1917 for(uint16 i
=0; i
< partCount
; ++i
) {
1918 if(sender
->IsPartAvailable(i
) == true && GetNextEmptyBlockInPart(i
, NULL
) == true) {
1919 // Create a new entry for this chunk and add it to the list
1922 newEntry
.frequency
= m_SrcpartFrequency
[i
];
1923 chunksList
.push_back(newEntry
);
1927 // Check if any bloks(s) could be downloaded
1928 if(chunksList
.empty()) {
1929 break; // Exit main loop while()
1932 // Define the bounds of the three zones (very rare, rare)
1933 // more depending on available sources
1935 if (GetSourceCount()>800) {
1937 } else if (GetSourceCount()>200) {
1940 uint16 limit
= modif
*GetSourceCount()/ 100;
1944 const uint16 veryRareBound
= limit
;
1945 const uint16 rareBound
= 2*limit
;
1947 // Cache Preview state (Criterion 2)
1948 FileType type
= GetFiletype(GetFileName());
1949 const bool isPreviewEnable
=
1950 thePrefs::GetPreviewPrio() &&
1951 (type
== ftArchive
|| type
== ftVideo
);
1953 // Collect and calculate criteria for all chunks
1954 for (ChunkList::iterator it
= chunksList
.begin(); it
!= chunksList
.end(); ++it
) {
1955 Chunk
& cur_chunk
= *it
;
1958 const uint64 uStart
= cur_chunk
.part
* PARTSIZE
;
1959 const uint64 uEnd
= uStart
+ GetPartSize(cur_chunk
.part
) - 1;
1960 // Criterion 2. Parts used for preview
1961 // Remark: - We need to download the first part and the last part(s).
1962 // - When the last part is very small, it's necessary to
1963 // download the two last parts.
1964 bool critPreview
= false;
1965 if(isPreviewEnable
== true) {
1966 if(cur_chunk
.part
== 0) {
1967 critPreview
= true; // First chunk
1968 } else if(cur_chunk
.part
== partCount
-1) {
1969 critPreview
= true; // Last chunk
1970 } else if(cur_chunk
.part
== partCount
-2) {
1971 // Last chunk - 1 (only if last chunk is too small)
1972 const uint32 sizeOfLastChunk
= GetFileSize() - uEnd
;
1973 if(sizeOfLastChunk
< PARTSIZE
/3) {
1974 critPreview
= true; // Last chunk - 1
1979 // Criterion 3. Request state (downloading in process from other source(s))
1981 const bool critRequested
=
1982 cur_chunk
.frequency
> veryRareBound
&&
1983 IsAlreadyRequested(uStart
, uEnd
);
1985 // Criterion 4. Completion
1986 // PARTSIZE instead of GetPartSize() favours the last chunk - but that may be intentional
1987 uint32 partSize
= PARTSIZE
- m_gaplist
.GetGapSize(cur_chunk
.part
);
1988 const uint16 critCompletion
= (uint16
)(partSize
/(PARTSIZE
/100)); // in [%]
1990 // Calculate priority with all criteria
1991 if(cur_chunk
.frequency
<= veryRareBound
) {
1992 // 0..xxxx unrequested + requested very rare chunks
1993 cur_chunk
.rank
= (25 * cur_chunk
.frequency
) + // Criterion 1
1994 ((critPreview
== true) ? 0 : 1) + // Criterion 2
1995 (100 - critCompletion
); // Criterion 4
1996 } else if(critPreview
== true) {
1997 // 10000..10100 unrequested preview chunks
1998 // 30000..30100 requested preview chunks
1999 cur_chunk
.rank
= ((critRequested
== false) ? 10000 : 30000) + // Criterion 3
2000 (100 - critCompletion
); // Criterion 4
2001 } else if(cur_chunk
.frequency
<= rareBound
) {
2002 // 10101..1xxxx unrequested rare chunks
2003 // 30101..3xxxx requested rare chunks
2004 cur_chunk
.rank
= (25 * cur_chunk
.frequency
) + // Criterion 1
2005 ((critRequested
== false) ? 10101 : 30101) + // Criterion 3
2006 (100 - critCompletion
); // Criterion 4
2009 if(critRequested
== false) { // Criterion 3
2010 // 20000..2xxxx unrequested common chunks
2011 cur_chunk
.rank
= 20000 + // Criterion 3
2012 (100 - critCompletion
); // Criterion 4
2014 // 40000..4xxxx requested common chunks
2015 // Remark: The weight of the completion criterion is inversed
2016 // to spead the requests over the completing chunks.
2017 // Without this, the chunk closest to completion will
2018 // received every new sources.
2019 cur_chunk
.rank
= 40000 + // Criterion 3
2020 (critCompletion
); // Criterion 4
2026 // Select the next chunk to download
2027 if(!chunksList
.empty()) {
2028 // Find and count the chunck(s) with the highest priority
2029 uint16 chunkCount
= 0; // Number of found chunks with same priority
2030 uint16 rank
= 0xffff; // Highest priority found
2032 // Collect and calculate criteria for all chunks
2033 for (ChunkList::iterator it
= chunksList
.begin(); it
!= chunksList
.end(); ++it
) {
2034 const Chunk
& cur_chunk
= *it
;
2035 if(cur_chunk
.rank
< rank
) {
2037 rank
= cur_chunk
.rank
;
2038 } else if(cur_chunk
.rank
== rank
) {
2043 // Use a random access to avoid that everybody tries to download the
2044 // same chunks at the same time (=> spread the selected chunk among clients)
2045 uint16 randomness
= 1 + (int) (((float)(chunkCount
-1))*rand()/(RAND_MAX
+1.0));
2047 for (ChunkList::iterator it
= chunksList
.begin(); it
!= chunksList
.end(); ++it
) {
2048 const Chunk
& cur_chunk
= *it
;
2049 if(cur_chunk
.rank
== rank
) {
2051 if(randomness
== 0) {
2052 // Selection process is over
2053 sender
->SetLastPartAsked(cur_chunk
.part
);
2054 // Remark: this list might be reused up to *count times
2055 chunksList
.erase(it
);
2056 break; // exit loop for()
2061 // There is no remaining chunk to download
2062 break; // Exit main loop while()
2066 // Return the number of the blocks
2067 count
= newBlockCount
;
2069 return (newBlockCount
> 0);
2074 void CPartFile::RemoveBlockFromList(uint64 start
,uint64 end
)
2076 std::list
<Requested_Block_Struct
*>::iterator it
= m_requestedblocks_list
.begin();
2077 while (it
!= m_requestedblocks_list
.end()) {
2078 std::list
<Requested_Block_Struct
*>::iterator it2
= it
++;
2080 if ((*it2
)->StartOffset
<= start
&& (*it2
)->EndOffset
>= end
) {
2081 m_requestedblocks_list
.erase(it2
);
2087 void CPartFile::RemoveAllRequestedBlocks(void)
2089 m_requestedblocks_list
.clear();
2093 void CPartFile::CompleteFile(bool bIsHashingDone
)
2095 if (GetKadFileSearchID()) {
2096 Kademlia::CSearchManager::StopSearch(GetKadFileSearchID(), false);
2099 theApp
->downloadqueue
->RemoveLocalServerRequest(this);
2101 AddDebugLogLineN( logPartFile
, wxString( wxT("CPartFile::CompleteFile: Hash ") ) + ( bIsHashingDone
? wxT("done") : wxT("not done") ) );
2103 if (!bIsHashingDone
) {
2104 SetStatus(PS_COMPLETING
);
2107 CPath partFile
= m_partmetfilename
.RemoveExt();
2108 CThreadScheduler::AddTask(new CHashingTask(GetFilePath(), partFile
, this));
2112 m_is_A4AF_auto
=false;
2113 SetStatus(PS_COMPLETING
);
2114 // guess I was wrong about not need to spaw a thread ...
2115 // It is if the temp and incoming dirs are on different
2116 // partitions/drives and the file is large...[oz]
2119 PerformFileComplete();
2123 if (thePrefs::ShowCatTabInfos()) {
2124 Notify_ShowUpdateCatTabTitles();
2126 UpdateDisplayedInfo(true);
2130 void CPartFile::CompleteFileEnded(bool errorOccured
, const CPath
& newname
)
2134 SetStatus(PS_ERROR
);
2135 AddLogLineC(CFormat( _("Unexpected error while completing %s. File paused") )% GetFileName() );
2137 m_fullname
= newname
;
2139 SetFilePath(m_fullname
.GetPath());
2140 SetFileName(m_fullname
.GetFullName());
2141 m_lastDateChanged
= CPath::GetModificationTime(m_fullname
);
2143 SetStatus(PS_COMPLETE
);
2148 // Remove from list of canceled files in case it was canceled once upon a time
2149 if (theApp
->canceledfiles
->Remove(GetFileHash())) {
2150 theApp
->canceledfiles
->Save();
2153 // Mark as known (checks if it's already known),
2154 // also updates search files
2155 theApp
->knownfiles
->SafeAddKFile(this);
2157 // remove the file from the suspended uploads list
2158 theApp
->uploadqueue
->ResumeUpload(GetFileHash());
2159 theApp
->downloadqueue
->RemoveFile(this, true);
2160 theApp
->sharedfiles
->SafeAddKFile(this);
2161 UpdateDisplayedInfo(true);
2163 // republish that file to the ed2k-server to update the 'FT_COMPLETE_SOURCES' counter on the server.
2164 theApp
->sharedfiles
->RepublishFile(this);
2166 // Ensure that completed shows the correct value
2167 completedsize
= GetFileSize();
2169 // clear the blackbox to free up memory
2170 m_CorruptionBlackBox
->Free();
2172 AddLogLineC(CFormat( _("Finished downloading: %s") ) % GetFileName() );
2175 theApp
->downloadqueue
->StartNextFile(this);
2179 void CPartFile::PerformFileComplete()
2181 // add this file to the suspended uploads list
2182 theApp
->uploadqueue
->SuspendUpload(GetFileHash(), false);
2185 // close permanent handle
2186 if (m_hpartfile
.IsOpened()) {
2187 m_hpartfile
.Close();
2190 // Schedule task for completion of the file
2191 CThreadScheduler::AddTask(new CCompletionTask(this));
2195 void CPartFile::RemoveAllSources(bool bTryToSwap
)
2197 for( SourceSet::iterator it
= m_SrcList
.begin(); it
!= m_SrcList
.end();) {
2198 CUpDownClient
* cur_src
= it
++->GetClient();
2200 if (!cur_src
->SwapToAnotherFile(true, true, true, NULL
)) {
2201 RemoveSource(cur_src
,true,false);
2202 // If it was not swapped, it's not on any file anymore, and should die
2205 RemoveSource(cur_src
,true,false);
2211 /* eMule 0.30c implementation, i give it a try (Creteil) BEGIN ... */
2212 // remove all links A4AF in sources to this file
2213 if(!m_A4AFsrclist
.empty()) {
2214 for( SourceSet::iterator it
= m_A4AFsrclist
.begin(); it
!= m_A4AFsrclist
.end(); ) {
2215 CUpDownClient
* cur_src
= it
++->GetClient();
2216 if ( cur_src
->DeleteFileRequest( this ) ) {
2217 Notify_SourceCtrlRemoveSource(cur_src
->ECID(), this);
2220 m_A4AFsrclist
.clear();
2222 /* eMule 0.30c implementation, i give it a try (Creteil) END ... */
2223 UpdateFileRatingCommentAvail();
2227 void CPartFile::Delete()
2229 AddLogLineN(CFormat(_("Deleting file: %s")) % GetFileName());
2230 // Barry - Need to tell any connected clients to stop sending the file
2232 AddDebugLogLineN(logPartFile
, wxT("\tStopped"));
2234 uint16 removed
= theApp
->uploadqueue
->SuspendUpload(GetFileHash(), true);
2235 AddDebugLogLineN(logPartFile
, CFormat(wxT("\tSuspended upload to %d clients")) % removed
);
2236 theApp
->sharedfiles
->RemoveFile(this);
2237 AddDebugLogLineN(logPartFile
, wxT("\tRemoved from shared"));
2238 theApp
->downloadqueue
->RemoveFile(this);
2239 AddDebugLogLineN(logPartFile
, wxT("\tRemoved from download queue"));
2240 Notify_DownloadCtrlRemoveFile(this);
2241 AddDebugLogLineN(logPartFile
, wxT("\tRemoved from transferwnd"));
2242 if (theApp
->canceledfiles
->Add(GetFileHash())) {
2243 theApp
->canceledfiles
->Save();
2245 AddDebugLogLineN(logPartFile
, wxT("\tAdded to canceled file list"));
2246 theApp
->searchlist
->UpdateSearchFileByHash(GetFileHash()); // Update file in the search dialog if it's still open
2248 if (m_hpartfile
.IsOpened()) {
2249 m_hpartfile
.Close();
2252 AddDebugLogLineN(logPartFile
, wxT("\tClosed"));
2254 if (!CPath::RemoveFile(m_fullname
)) {
2255 AddDebugLogLineC(logPartFile
, CFormat(wxT("\tFailed to delete '%s'")) % m_fullname
);
2257 AddDebugLogLineN(logPartFile
, wxT("\tRemoved .part.met"));
2260 if (!CPath::RemoveFile(m_PartPath
)) {
2261 AddDebugLogLineC(logPartFile
, CFormat(wxT("Failed to delete '%s'")) % m_PartPath
);
2263 AddDebugLogLineN(logPartFile
, wxT("\tRemoved .part"));
2266 CPath BAKName
= m_fullname
.AppendExt(PARTMET_BAK_EXT
);
2267 if (!CPath::RemoveFile(BAKName
)) {
2268 AddDebugLogLineC(logPartFile
, CFormat(wxT("Failed to delete '%s'")) % BAKName
);
2270 AddDebugLogLineN(logPartFile
, wxT("\tRemoved .BAK"));
2273 CPath SEEDSName
= m_fullname
.AppendExt(wxT(".seeds"));
2274 if (SEEDSName
.FileExists()) {
2275 if (CPath::RemoveFile(SEEDSName
)) {
2276 AddDebugLogLineN(logPartFile
, wxT("\tRemoved .seeds"));
2278 AddDebugLogLineC(logPartFile
, CFormat(wxT("Failed to delete '%s'")) % SEEDSName
);
2282 AddDebugLogLineN(logPartFile
, wxT("Done"));
2288 bool CPartFile::HashSinglePart(uint16 partnumber
)
2290 if ((GetHashCount() <= partnumber
) && (GetPartCount() > 1)) {
2291 AddLogLineC(CFormat( _("WARNING: Unable to hash downloaded part - hashset incomplete for '%s'") )
2293 m_hashsetneeded
= true;
2295 } else if ((GetHashCount() <= partnumber
) && GetPartCount() != 1) {
2296 AddLogLineC(CFormat( _("ERROR: Unable to hash downloaded part - hashset incomplete (%s). This should never happen")) % GetFileName() );
2297 m_hashsetneeded
= true;
2300 CMD4Hash hashresult
;
2301 uint64 offset
= PARTSIZE
* partnumber
;
2302 uint32 length
= GetPartSize(partnumber
);
2304 CreateHashFromFile(m_hpartfile
, offset
, length
, &hashresult
, NULL
);
2305 } catch (const CIOFailureException
& e
) {
2306 AddLogLineC(CFormat( _("EOF while hashing downloaded part %u with length %u (max %u) of partfile '%s' with length %u: %s"))
2307 % partnumber
% length
% (offset
+length
) % GetFileName() % GetFileSize() % e
.what());
2308 SetStatus(PS_ERROR
);
2310 } catch (const CEOFException
& e
) {
2311 AddLogLineC(CFormat( _("EOF while hashing downloaded part %u with length %u (max %u) of partfile '%s' with length %u: %s"))
2312 % partnumber
% length
% (offset
+length
) % GetFileName() % GetFileSize() % e
.what());
2316 if (GetPartCount() > 1) {
2317 if (hashresult
!= GetPartHash(partnumber
)) {
2318 AddDebugLogLineN(logPartFile
, CFormat( wxT("%s: Expected hash of part %d: %s")) % GetFileName() % partnumber
% GetPartHash(partnumber
).Encode() );
2319 AddDebugLogLineN(logPartFile
, CFormat( wxT("%s: Actual hash of part %d: %s")) % GetFileName() % partnumber
% hashresult
.Encode() );
2325 if (hashresult
!= m_abyFileHash
) {
2335 bool CPartFile::IsCorruptedPart(uint16 partnumber
)
2337 return std::find(m_corrupted_list
.begin(), m_corrupted_list
.end(), partnumber
)
2338 != m_corrupted_list
.end();
2342 void CPartFile::SetDownPriority(uint8 np
, bool bSave
, bool bRefresh
)
2344 if ( m_iDownPriority
!= np
) {
2345 m_iDownPriority
= np
;
2347 UpdateDisplayedInfo(true);
2354 void CPartFile::StopFile(bool bCancel
)
2356 // Kry - Need to set it here to get into SetStatus(status) correctly
2359 // Barry - Need to tell any connected clients to stop sending the file
2362 m_LastSearchTimeKad
= 0;
2363 m_TotalSearchesKad
= 0;
2365 RemoveAllSources(true);
2373 UpdateDisplayedInfo(true);
2377 void CPartFile::StopPausedFile()
2380 // Once an hour, remove any sources for files which are no longer active downloads
2381 switch (GetStatus()) {
2383 case PS_INSUFFICIENT
:
2385 if (time(NULL
) - m_iLastPausePurge
> (60*60)) {
2386 m_iLastPausePurge
= time(NULL
);
2392 // release file handle if unused for some time
2393 m_hpartfile
.Release();
2397 void CPartFile::PauseFile(bool bInsufficient
)
2401 if ( status
== PS_COMPLETE
|| status
== PS_COMPLETING
) {
2405 if (GetKadFileSearchID()) {
2406 Kademlia::CSearchManager::StopSearch(GetKadFileSearchID(), true);
2407 // If we were in the middle of searching, reset timer so they can resume searching.
2408 m_LastSearchTimeKad
= 0;
2411 m_iLastPausePurge
= time(NULL
);
2413 theApp
->downloadqueue
->RemoveLocalServerRequest(this);
2415 CPacket
packet( OP_CANCELTRANSFER
, 0, OP_EDONKEYPROT
);
2416 for( SourceSet::iterator it
= m_SrcList
.begin(); it
!= m_SrcList
.end(); ) {
2417 CUpDownClient
* cur_src
= it
++->GetClient();
2418 if (cur_src
->GetDownloadState() == DS_DOWNLOADING
) {
2419 if (!cur_src
->GetSentCancelTransfer()) {
2420 theStats::AddUpOverheadOther( packet
.GetPacketSize() );
2421 AddDebugLogLineN( logLocalClient
, wxT("Local Client: OP_CANCELTRANSFER to ") + cur_src
->GetFullIP() );
2422 cur_src
->SendPacket( &packet
, false, true );
2423 cur_src
->SetSentCancelTransfer( true );
2425 cur_src
->SetDownloadState(DS_ONQUEUE
);
2426 // Allow immediate reconnect on resume
2427 cur_src
->ResetLastAskedTime();
2432 m_insufficient
= bInsufficient
;
2443 void CPartFile::ResumeFile()
2445 if ( status
== PS_COMPLETE
|| status
== PS_COMPLETING
) {
2449 if ( m_insufficient
&& !CheckFreeDiskSpace() ) {
2450 // Still not enough free discspace
2456 m_insufficient
= false;
2458 m_lastsearchtime
= 0;
2460 SetActive(theApp
->IsConnected());
2462 if (m_gaplist
.IsComplete() && (GetStatus() == PS_ERROR
)) {
2463 // The file has already been hashed at this point
2467 UpdateDisplayedInfo(true);
2471 bool CPartFile::CheckFreeDiskSpace( uint64 neededSpace
)
2473 uint64 free
= CPath::GetFreeSpaceAt(GetFilePath());
2474 if (free
== static_cast<uint64
>(wxInvalidOffset
)) {
2475 // If GetFreeSpaceAt() fails, then the path probably does not exist.
2479 // The very least acceptable diskspace is a single PART
2480 if ( free
< PARTSIZE
) {
2481 // Always fail in this case, since we risk losing data if we try to
2482 // write on a full partition.
2486 // All other checks are only made if the user has enabled them
2487 if ( thePrefs::IsCheckDiskspaceEnabled() ) {
2488 neededSpace
+= thePrefs::GetMinFreeDiskSpace();
2490 // Due to the the existance of sparse files, we cannot assume that
2491 // writes within the file doesn't cause new blocks to be allocated.
2492 // Therefore, we have to simply stop writing the moment the limit has
2494 return free
>= neededSpace
;
2501 void CPartFile::SetLastAnsweredTime()
2503 m_ClientSrcAnswered
= ::GetTickCount();
2506 void CPartFile::SetLastAnsweredTimeTimeout()
2508 m_ClientSrcAnswered
= 2 * CONNECTION_LATENCY
+ ::GetTickCount() - SOURCECLIENTREASKS
;
2511 CPacket
*CPartFile::CreateSrcInfoPacket(const CUpDownClient
* forClient
, uint8 byRequestedVersion
, uint16 nRequestedOptions
)
2514 if ( m_SrcList
.empty() ) {
2519 return CKnownFile::CreateSrcInfoPacket(forClient
, byRequestedVersion
, nRequestedOptions
);
2522 if (((forClient
->GetRequestFile() != this)
2523 && (forClient
->GetUploadFile() != this)) || forClient
->GetUploadFileID() != GetFileHash()) {
2524 wxString file1
= _("Unknown");
2525 if (forClient
->GetRequestFile() && forClient
->GetRequestFile()->GetFileName().IsOk()) {
2526 file1
= forClient
->GetRequestFile()->GetFileName().GetPrintable();
2527 } else if (forClient
->GetUploadFile() && forClient
->GetUploadFile()->GetFileName().IsOk()) {
2528 file1
= forClient
->GetUploadFile()->GetFileName().GetPrintable();
2530 wxString file2
= _("Unknown");
2531 if (GetFileName().IsOk()) {
2532 file2
= GetFileName().GetPrintable();
2534 AddDebugLogLineN(logPartFile
, wxT("File mismatch on source packet (P) Sending: ") + file1
+ wxT(" From: ") + file2
);
2538 if ( !(GetStatus() == PS_READY
|| GetStatus() == PS_EMPTY
)) {
2542 const BitVector
& reqstatus
= forClient
->GetPartStatus();
2543 bool KnowNeededParts
= !reqstatus
.empty();
2544 //wxASSERT(rcvstatus.size() == GetPartCount()); // Obviously!
2545 if (KnowNeededParts
&& (reqstatus
.size() != GetPartCount())) {
2546 // Yuck. Same file but different part count? Seriously fucked up.
2547 // This happens rather often with reqstatus.size() == 0. Don't log then.
2548 if (reqstatus
.size()) {
2549 AddDebugLogLineN(logKnownFiles
, CFormat(wxT("Impossible situation: different partcounts: %i (client) and %i (file) for %s")) % reqstatus
.size() % GetPartCount() % GetFileName());
2554 CMemFile
data(1024);
2556 uint8 byUsedVersion
;
2558 if (forClient
->SupportsSourceExchange2() && byRequestedVersion
> 0){
2559 // the client uses SourceExchange2 and requested the highest version he knows
2560 // and we send the highest version we know, but of course not higher than his request
2561 byUsedVersion
= std::min(byRequestedVersion
, (uint8
)SOURCEEXCHANGE2_VERSION
);
2562 bIsSX2Packet
= true;
2563 data
.WriteUInt8(byUsedVersion
);
2565 // we don't support any special SX2 options yet, reserved for later use
2566 if (nRequestedOptions
!= 0) {
2567 AddDebugLogLineN(logKnownFiles
, CFormat(wxT("Client requested unknown options for SourceExchange2: %u")) % nRequestedOptions
);
2570 byUsedVersion
= forClient
->GetSourceExchange1Version();
2571 bIsSX2Packet
= false;
2572 if (forClient
->SupportsSourceExchange2()) {
2573 AddDebugLogLineN(logKnownFiles
, wxT("Client which announced to support SX2 sent SX1 packet instead"));
2579 data
.WriteHash(m_abyFileHash
);
2580 data
.WriteUInt16(nCount
);
2582 for (SourceSet::iterator it
= m_SrcList
.begin(); it
!= m_SrcList
.end(); ++it
) {
2584 CUpDownClient
* cur_src
= it
->GetClient();
2586 int state
= cur_src
->GetDownloadState();
2587 int valid
= ( state
== DS_DOWNLOADING
) || ( state
== DS_ONQUEUE
&& !cur_src
->IsRemoteQueueFull() );
2589 if ( cur_src
->HasLowID() || !valid
) {
2593 // only send source which have needed parts for this client if possible
2594 const BitVector
& srcstatus
= cur_src
->GetPartStatus();
2595 if ( !srcstatus
.empty() ) {
2596 //wxASSERT(srcstatus.size() == GetPartCount()); // Obviously!
2597 if (srcstatus
.size() != GetPartCount()) {
2600 if ( KnowNeededParts
) {
2601 // only send sources which have needed parts for this client
2602 for (int x
= 0; x
< GetPartCount(); ++x
) {
2603 if (srcstatus
.get(x
) && !reqstatus
.get(x
)) {
2609 // if we don't know the need parts for this client,
2610 // return any source currently a client sends it's
2611 // file status only after it has at least one complete part
2612 if (srcstatus
.size() != GetPartCount()) {
2615 for (int x
= 0; x
< GetPartCount(); ++x
){
2616 if (srcstatus
.get(x
)) {
2626 if(forClient
->GetSourceExchange1Version() > 2) {
2627 dwID
= cur_src
->GetUserIDHybrid();
2629 dwID
= wxUINT32_SWAP_ALWAYS(cur_src
->GetUserIDHybrid());
2631 data
.WriteUInt32(dwID
);
2632 data
.WriteUInt16(cur_src
->GetUserPort());
2633 data
.WriteUInt32(cur_src
->GetServerIP());
2634 data
.WriteUInt16(cur_src
->GetServerPort());
2636 if (byUsedVersion
>= 2) {
2637 data
.WriteHash(cur_src
->GetUserHash());
2640 if (byUsedVersion
>= 4){
2641 // CryptSettings - SourceExchange V4
2643 // 1 CryptLayer Required
2644 // 1 CryptLayer Requested
2645 // 1 CryptLayer Supported
2646 const uint8 uSupportsCryptLayer
= cur_src
->SupportsCryptLayer() ? 1 : 0;
2647 const uint8 uRequestsCryptLayer
= cur_src
->RequestsCryptLayer() ? 1 : 0;
2648 const uint8 uRequiresCryptLayer
= cur_src
->RequiresCryptLayer() ? 1 : 0;
2649 const uint8 byCryptOptions
= (uRequiresCryptLayer
<< 2) | (uRequestsCryptLayer
<< 1) | (uSupportsCryptLayer
<< 0);
2650 data
.WriteUInt8(byCryptOptions
);
2661 data
.Seek(bIsSX2Packet
? 17 : 16, wxFromStart
);
2662 data
.WriteUInt16(nCount
);
2664 CPacket
* result
= new CPacket(data
, OP_EMULEPROT
, bIsSX2Packet
? OP_ANSWERSOURCES2
: OP_ANSWERSOURCES
);
2666 // 16+2+501*(4+2+4+2+16) = 14046 bytes max.
2667 if (result
->GetPacketSize() > 354) {
2668 result
->PackPacket();
2674 void CPartFile::AddClientSources(CMemFile
* sources
, unsigned nSourceFrom
, uint8 uClientSXVersion
, bool bSourceExchange2
, const CUpDownClient
* /*pClient*/)
2683 uint8 uPacketSXVersion
= 0;
2684 if (!bSourceExchange2
) {
2685 nCount
= sources
->ReadUInt16();
2687 // Check if the data size matches the 'nCount' for v1 or v2 and eventually correct the source
2688 // exchange version while reading the packet data. Otherwise we could experience a higher
2689 // chance in dealing with wrong source data, userhashs and finally duplicate sources.
2690 uint32 uDataSize
= sources
->GetLength() - sources
->GetPosition();
2692 if ((uint32
)(nCount
*(4+2+4+2)) == uDataSize
) { //Checks if version 1 packet is correct size
2693 if(uClientSXVersion
!= 1) {
2696 uPacketSXVersion
= 1;
2697 } else if ((uint32
)(nCount
*(4+2+4+2+16)) == uDataSize
) { // Checks if version 2&3 packet is correct size
2698 if (uClientSXVersion
== 2) {
2699 uPacketSXVersion
= 2;
2700 } else if (uClientSXVersion
> 2) {
2701 uPacketSXVersion
= 3;
2705 } else if (nCount
*(4+2+4+2+16+1) == uDataSize
) {
2706 if (uClientSXVersion
!= 4 ) {
2709 uPacketSXVersion
= 4;
2711 // If v5 inserts additional data (like v2), the above code will correctly filter those packets.
2712 // If v5 appends additional data after <count>(<Sources>)[count], we are in trouble with the
2713 // above code. Though a client which does not understand v5+ should never receive such a packet.
2714 AddDebugLogLineN(logClient
, CFormat(wxT("Received invalid source exchange packet (v%u) of data size %u for %s")) % uClientSXVersion
% uDataSize
% GetFileName());
2719 // We only check if the version is known by us and do a quick sanitize check on known version
2720 // other then SX1, the packet will be ignored if any error appears, sicne it can't be a "misunderstanding" anymore
2721 if (uClientSXVersion
> SOURCEEXCHANGE2_VERSION
|| uClientSXVersion
== 0 ){
2722 AddDebugLogLineN(logPartFile
, CFormat(wxT("Invalid source exchange type version: %i")) % uClientSXVersion
);
2726 // all known versions use the first 2 bytes as count and unknown version are already filtered above
2727 nCount
= sources
->ReadUInt16();
2728 uint32 uDataSize
= (uint32
)(sources
->GetLength() - sources
->GetPosition());
2729 bool bError
= false;
2730 switch (uClientSXVersion
){
2732 bError
= nCount
*(4+2+4+2) != uDataSize
;
2736 bError
= nCount
*(4+2+4+2+16) != uDataSize
;
2739 bError
= nCount
*(4+2+4+2+16+1) != uDataSize
;
2747 AddDebugLogLineN(logPartFile
, wxT("Invalid source exchange data size."));
2750 uPacketSXVersion
= uClientSXVersion
;
2753 for (uint16 i
= 0;i
!= nCount
;++i
) {
2755 uint32 dwID
= sources
->ReadUInt32();
2756 uint16 nPort
= sources
->ReadUInt16();
2757 uint32 dwServerIP
= sources
->ReadUInt32();
2758 uint16 nServerPort
= sources
->ReadUInt16();
2761 if (uPacketSXVersion
> 1) {
2762 userHash
= sources
->ReadHash();
2765 uint8 byCryptOptions
= 0;
2766 if (uPacketSXVersion
>= 4) {
2767 byCryptOptions
= sources
->ReadUInt8();
2770 //Clients send ID's the the Hyrbid format so highID clients with *.*.*.0 won't be falsely switched to a lowID..
2772 if (uPacketSXVersion
>= 3) {
2773 dwIDED2K
= wxUINT32_SWAP_ALWAYS(dwID
);
2778 // check the HighID(IP) - "Filter LAN IPs" and "IPfilter" the received sources IP addresses
2779 if (!IsLowID(dwID
)) {
2780 if (!IsGoodIP(dwIDED2K
, thePrefs::FilterLanIPs())) {
2781 // check for 0-IP, localhost and optionally for LAN addresses
2782 AddDebugLogLineN(logIPFilter
, CFormat(wxT("Ignored source (IP=%s) received via %s - bad IP")) % Uint32toStringIP(dwIDED2K
) % OriginToText(nSourceFrom
));
2785 if (theApp
->ipfilter
->IsFiltered(dwIDED2K
)) {
2786 AddDebugLogLineN(logIPFilter
, CFormat(wxT("Ignored source (IP=%s) received via %s - IPFilter")) % Uint32toStringIP(dwIDED2K
) % OriginToText(nSourceFrom
));
2789 if (theApp
->clientlist
->IsBannedClient(dwIDED2K
)){
2794 // additionally check for LowID and own IP
2795 if (!CanAddSource(dwID
, nPort
, dwServerIP
, nServerPort
, NULL
, false)) {
2796 AddDebugLogLineN(logIPFilter
, CFormat(wxT("Ignored source (IP=%s) received via source exchange")) % Uint32toStringIP(dwIDED2K
));
2800 if(thePrefs::GetMaxSourcePerFile() > GetSourceCount()) {
2801 CUpDownClient
* newsource
= new CUpDownClient(nPort
,dwID
,dwServerIP
,nServerPort
,this, (uPacketSXVersion
< 3), true);
2802 if (uPacketSXVersion
> 1) {
2803 newsource
->SetUserHash(userHash
);
2806 if (uPacketSXVersion
>= 4) {
2807 newsource
->SetConnectOptions(byCryptOptions
, true, false);
2810 newsource
->SetSourceFrom((ESourceFrom
)nSourceFrom
);
2811 theApp
->downloadqueue
->CheckAndAddSource(this,newsource
);
2819 void CPartFile::UpdateAutoDownPriority()
2821 if (!IsAutoDownPriority()) {
2824 if (GetSourceCount() <= theApp
->downloadqueue
->GetRareFileThreshold()) {
2825 if ( GetDownPriority() != PR_HIGH
)
2826 SetDownPriority(PR_HIGH
, false, false);
2827 } else if (GetSourceCount() < theApp
->downloadqueue
->GetCommonFileThreshold()) {
2828 if ( GetDownPriority() != PR_NORMAL
)
2829 SetDownPriority(PR_NORMAL
, false, false);
2831 if ( GetDownPriority() != PR_LOW
)
2832 SetDownPriority(PR_LOW
, false, false);
2836 // making this function return a higher when more sources have the extended
2837 // protocol will force you to ask a larger variety of people for sources
2839 int CPartFile::GetCommonFilePenalty()
2841 //TODO: implement, but never return less than MINCOMMONPENALTY!
2842 return MINCOMMONPENALTY
;
2845 /* Barry - Replaces BlockReceived()
2847 Originally this only wrote to disk when a full 180k block
2848 had been received from a client, and only asked for data in
2851 This meant that on average 90k was lost for every connection
2852 to a client data source. That is a lot of wasted data.
2854 To reduce the lost data, packets are now written to a buffer
2855 and flushed to disk regularly regardless of size downloaded.
2856 This includes compressed packets.
2858 Data is also requested only where gaps are, not in 180k blocks.
2859 The requests will still not exceed 180k, but may be smaller to
2863 // Kry - transize is 32bits, no packet can be more than that (this is
2864 // compressed size). Even 32bits is too much imho.As for the return size,
2865 // look at the lenData below.
2866 uint32
CPartFile::WriteToBuffer(uint32 transize
, byte
* data
, uint64 start
, uint64 end
, Requested_Block_Struct
*block
, const CUpDownClient
* client
)
2868 // Increment transferred bytes counter for this file
2869 transferred
+= transize
;
2871 // This is needed a few times
2872 // Kry - should not need a uint64 here - no block is larger than
2873 // 2GB even after uncompressed.
2874 uint32 lenData
= (uint32
) (end
- start
+ 1);
2876 if(lenData
> transize
) {
2877 m_iGainDueToCompression
+= lenData
-transize
;
2880 // Occasionally packets are duplicated, no point writing it twice
2881 if (IsComplete(start
, end
)) {
2882 AddDebugLogLineN(logPartFile
,
2883 CFormat(wxT("File '%s' has already been written from %u to %u"))
2884 % GetFileName() % start
% end
);
2888 // security sanitize check to make sure we do not write anything into an already hashed complete chunk
2889 const uint64 nStartChunk
= start
/ PARTSIZE
;
2890 const uint64 nEndChunk
= end
/ PARTSIZE
;
2891 if (IsComplete(nStartChunk
)) {
2892 AddDebugLogLineN(logPartFile
, CFormat(wxT("Received data touches already hashed chunk - ignored (start): %u-%u; File=%s")) % start
% end
% GetFileName());
2894 } else if (nStartChunk
!= nEndChunk
) {
2895 if (IsComplete(nEndChunk
)) {
2896 AddDebugLogLineN(logPartFile
, CFormat(wxT("Received data touches already hashed chunk - ignored (end): %u-%u; File=%s")) % start
% end
% GetFileName());
2899 AddDebugLogLineN(logPartFile
, CFormat(wxT("Received data crosses chunk boundaries: %u-%u; File=%s")) % start
% end
% GetFileName());
2903 // log transferinformation in our "blackbox"
2904 m_CorruptionBlackBox
->TransferredData(start
, end
, client
->GetIP());
2906 // Create a new buffered queue entry
2907 PartFileBufferedData
*item
= new PartFileBufferedData(m_hpartfile
, data
, start
, end
, block
);
2909 // Add to the queue in the correct position (most likely the end)
2912 std::list
<PartFileBufferedData
*>::iterator it
= m_BufferedData_list
.begin();
2913 for (; it
!= m_BufferedData_list
.end(); ++it
) {
2914 PartFileBufferedData
* queueItem
= *it
;
2916 if (item
->end
<= queueItem
->end
) {
2917 if (it
!= m_BufferedData_list
.begin()) {
2920 m_BufferedData_list
.insert(--it
, item
);
2928 m_BufferedData_list
.push_front(item
);
2931 // Increment buffer size marker
2932 m_nTotalBufferData
+= lenData
;
2934 // Mark this small section of the file as filled
2935 FillGap(item
->start
, item
->end
);
2937 // Update the flushed mark on the requested block
2938 // The loop here is unfortunate but necessary to detect deleted blocks.
2940 std::list
<Requested_Block_Struct
*>::iterator it2
= m_requestedblocks_list
.begin();
2941 for (; it2
!= m_requestedblocks_list
.end(); ++it2
) {
2942 if (*it2
== item
->block
) {
2943 item
->block
->transferred
+= lenData
;
2947 if (m_gaplist
.IsComplete()) {
2951 // Return the length of data written to the buffer
2955 void CPartFile::FlushBuffer(bool fromAICHRecoveryDataAvailable
)
2957 m_nLastBufferFlushTime
= GetTickCount();
2959 if (m_BufferedData_list
.empty()) {
2964 uint32 partCount
= GetPartCount();
2965 // Remember which parts need to be checked at the end of the flush
2966 std::vector
<bool> changedPart(partCount
, false);
2968 // Ensure file is big enough to write data to (the last item will be the furthest from the start)
2969 if (!CheckFreeDiskSpace(m_nTotalBufferData
)) {
2970 // Not enough free space to write the last item, bail
2971 AddLogLineC(CFormat( _("WARNING: Not enough free disk-space! Pausing file: %s") ) % GetFileName());
2977 // Loop through queue
2978 while ( !m_BufferedData_list
.empty() ) {
2979 // Get top item and remove it from the queue
2980 CScopedPtr
<PartFileBufferedData
> item(m_BufferedData_list
.front());
2981 m_BufferedData_list
.pop_front();
2983 // This is needed a few times
2984 wxASSERT((item
->end
- item
->start
) < 0xFFFFFFFF);
2985 uint32 lenData
= (uint32
)(item
->end
- item
->start
+ 1);
2987 // SLUGFILLER: SafeHash - could be more than one part
2988 for (uint32 curpart
= (item
->start
/PARTSIZE
); curpart
<= (item
->end
/PARTSIZE
); ++curpart
) {
2989 wxASSERT(curpart
< partCount
);
2990 changedPart
[curpart
] = true;
2992 // SLUGFILLER: SafeHash
2994 // Go to the correct position in file and write block of data
2996 item
->area
.FlushAt(m_hpartfile
, item
->start
, lenData
);
2997 // Decrease buffer size
2998 m_nTotalBufferData
-= lenData
;
2999 } catch (const CIOFailureException
& e
) {
3000 AddDebugLogLineC(logPartFile
, wxT("Error while saving part-file: ") + e
.what());
3001 SetStatus(PS_ERROR
);
3002 // No need to bang your head against it again and again if it has already failed.
3003 DeleteContents(m_BufferedData_list
);
3004 m_nTotalBufferData
= 0;
3010 // Update last-changed date
3011 m_lastDateChanged
= wxDateTime::GetTimeNow();
3014 // Partfile should never be too large
3015 if (m_hpartfile
.GetLength() > GetFileSize()) {
3016 // it's "last chance" correction. the real bugfix has to be applied 'somewhere' else
3017 m_hpartfile
.SetLength(GetFileSize());
3019 } catch (const CIOFailureException
& e
) {
3020 AddDebugLogLineC(logPartFile
,
3021 CFormat(wxT("Error while truncating part-file (%s): %s"))
3022 % m_PartPath
% e
.what());
3023 SetStatus(PS_ERROR
);
3028 // Check each part of the file
3029 for (uint16 partNumber
= 0; partNumber
< partCount
; ++partNumber
) {
3030 if (changedPart
[partNumber
] == false) {
3034 uint32 partRange
= GetPartSize(partNumber
) - 1;
3036 // Is this 9MB part complete
3037 if (IsComplete(partNumber
)) {
3039 if (!HashSinglePart(partNumber
)) {
3040 AddLogLineC(CFormat(
3041 _("Downloaded part %i is corrupt in file: %s") ) % partNumber
% GetFileName() );
3043 // add part to corrupted list, if not already there
3044 if (!IsCorruptedPart(partNumber
)) {
3045 m_corrupted_list
.push_back(partNumber
);
3047 // request AICH recovery data
3048 // Don't if called from the AICHRecovery. It's already there and would lead to an infinite recursion.
3049 if (!fromAICHRecoveryDataAvailable
) {
3050 RequestAICHRecovery(partNumber
);
3052 // Reduce transferred amount by corrupt amount
3053 m_iLostDueToCorruption
+= (partRange
+ 1);
3055 if (!m_hashsetneeded
) {
3056 AddDebugLogLineN(logPartFile
, CFormat(
3057 wxT("Finished part %u of '%s'")) % partNumber
% GetFileName());
3060 // tell the blackbox about the verified data
3061 m_CorruptionBlackBox
->VerifiedData(true, partNumber
, 0, partRange
);
3063 // if this part was successfully completed (although ICH is active), remove from corrupted list
3064 EraseFirstValue(m_corrupted_list
, partNumber
);
3066 if (status
== PS_EMPTY
) {
3067 if (theApp
->IsRunning()) { // may be called during shutdown!
3068 if (GetHashCount() == GetED2KPartHashCount() && !m_hashsetneeded
) {
3069 // Successfully completed part, make it available for sharing
3070 SetStatus(PS_READY
);
3071 theApp
->sharedfiles
->SafeAddKFile(this);
3076 } else if ( IsCorruptedPart(partNumber
) && // corrupted part:
3077 (thePrefs::IsICHEnabled() // old ICH: rehash whenever we have new data hoping it will be good now
3078 || fromAICHRecoveryDataAvailable
)) {// new AICH: one rehash right before performing it (maybe it's already good)
3079 // Try to recover with minimal loss
3080 if (HashSinglePart(partNumber
)) {
3081 ++m_iTotalPacketsSavedDueToICH
;
3083 uint64 uMissingInPart
= m_gaplist
.GetGapSize(partNumber
);
3084 FillGap(partNumber
);
3085 RemoveBlockFromList(PARTSIZE
*partNumber
,(PARTSIZE
*partNumber
+ partRange
));
3087 // tell the blackbox about the verified data
3088 m_CorruptionBlackBox
->VerifiedData(true, partNumber
, 0, partRange
);
3090 // remove from corrupted list
3091 EraseFirstValue(m_corrupted_list
, partNumber
);
3093 AddLogLineC(CFormat( _("ICH: Recovered corrupted part %i for %s -> Saved bytes: %s") )
3096 % CastItoXBytes(uMissingInPart
));
3098 if (GetHashCount() == GetED2KPartHashCount() && !m_hashsetneeded
) {
3099 if (status
== PS_EMPTY
) {
3100 // Successfully recovered part, make it available for sharing
3101 SetStatus(PS_READY
);
3102 if (theApp
->IsRunning()) // may be called during shutdown!
3103 theApp
->sharedfiles
->SafeAddKFile(this);
3113 if (theApp
->IsRunning()) { // may be called during shutdown!
3114 // Is this file finished ?
3115 if (m_gaplist
.IsComplete()) {
3116 CompleteFile(false);
3122 // read data for upload, return false on error
3123 bool CPartFile::ReadData(CFileArea
& area
, uint64 offset
, uint32 toread
)
3126 if (offset
+ toread
> GetFileSize()) {
3127 AddDebugLogLineN(logPartFile
, CFormat(wxT("tried to read %d bytes past eof of %s"))
3128 % (offset
+ toread
- GetFileSize()) % GetFileName());
3133 area
.ReadAt(m_hpartfile
, offset
, toread
);
3134 // if it fails it throws (which the caller should catch)
3139 void CPartFile::UpdateFileRatingCommentAvail()
3141 bool prevComment
= m_hasComment
;
3142 int prevRating
= m_iUserRating
;
3144 m_hasComment
= false;
3146 int ratingCount
= 0;
3148 SourceSet::iterator it
= m_SrcList
.begin();
3149 for (; it
!= m_SrcList
.end(); ++it
) {
3150 CUpDownClient
* cur_src
= it
->GetClient();
3152 if (!cur_src
->GetFileComment().IsEmpty()) {
3153 if (thePrefs::IsCommentFiltered(cur_src
->GetFileComment())) {
3156 m_hasComment
= true;
3159 uint8 rating
= cur_src
->GetFileRating();
3161 wxASSERT(rating
<= 5);
3164 m_iUserRating
+= rating
;
3169 m_iUserRating
/= ratingCount
;
3170 wxASSERT(m_iUserRating
> 0 && m_iUserRating
<= 5);
3173 if ((prevComment
!= m_hasComment
) || (prevRating
!= m_iUserRating
)) {
3174 UpdateDisplayedInfo();
3179 void CPartFile::SetCategory(uint8 cat
)
3181 wxASSERT( cat
< theApp
->glob_prefs
->GetCatCount() );
3187 bool CPartFile::RemoveSource(CUpDownClient
* toremove
, bool updatewindow
, bool bDoStatsUpdate
)
3189 wxASSERT( toremove
);
3191 bool result
= theApp
->downloadqueue
->RemoveSource( toremove
, updatewindow
, bDoStatsUpdate
);
3193 // Check if the client should be deleted, but not if the client is already dying
3194 if ( !toremove
->GetSocket() && !toremove
->HasBeenDeleted() ) {
3195 if ( toremove
->Disconnected(wxT("RemoveSource - purged")) ) {
3196 toremove
->Safe_Delete();
3203 void CPartFile::AddDownloadingSource(CUpDownClient
* client
)
3205 CClientRefList::iterator it
=
3206 std::find(m_downloadingSourcesList
.begin(), m_downloadingSourcesList
.end(), CCLIENTREF(client
, wxEmptyString
));
3207 if (it
== m_downloadingSourcesList
.end()) {
3208 m_downloadingSourcesList
.push_back(CCLIENTREF(client
, wxT("CPartFile::AddDownloadingSource")));
3213 void CPartFile::RemoveDownloadingSource(CUpDownClient
* client
)
3215 CClientRefList::iterator it
=
3216 std::find(m_downloadingSourcesList
.begin(), m_downloadingSourcesList
.end(), CCLIENTREF(client
, wxEmptyString
));
3217 if (it
!= m_downloadingSourcesList
.end()) {
3218 m_downloadingSourcesList
.erase(it
);
3223 uint64
CPartFile::GetNeededSpace()
3226 uint64 length
= m_hpartfile
.GetLength();
3228 if (length
> GetFileSize()) {
3229 return 0; // Shouldn't happen, but just in case
3232 return GetFileSize() - length
;
3233 } catch (const CIOFailureException
& e
) {
3234 AddDebugLogLineC(logPartFile
,
3235 CFormat(wxT("Error while retrieving file-length (%s): %s"))
3236 % m_PartPath
% e
.what());
3237 SetStatus(PS_ERROR
);
3242 void CPartFile::SetStatus(uint8 in
)
3244 // PAUSED and INSUFFICIENT have extra flag variables m_paused and m_insufficient
3245 // - they are never to be stored in status
3246 wxASSERT( in
!= PS_PAUSED
&& in
!= PS_INSUFFICIENT
);
3250 if (theApp
->IsRunning()) {
3251 UpdateDisplayedInfo( true );
3253 if ( thePrefs::ShowCatTabInfos() ) {
3254 Notify_ShowUpdateCatTabTitles();
3256 Notify_DownloadCtrlSort();
3261 void CPartFile::RequestAICHRecovery(uint16 nPart
)
3264 if ( !m_pAICHHashSet
->HasValidMasterHash() ||
3265 (m_pAICHHashSet
->GetStatus() != AICH_TRUSTED
&& m_pAICHHashSet
->GetStatus() != AICH_VERIFIED
)){
3266 AddDebugLogLineN( logAICHRecovery
, wxT("Unable to request AICH Recoverydata because we have no trusted Masterhash") );
3269 if (GetPartSize(nPart
) <= EMBLOCKSIZE
)
3271 if (CAICHHashSet::IsClientRequestPending(this, nPart
)){
3272 AddDebugLogLineN( logAICHRecovery
, wxT("RequestAICHRecovery: Already a request for this part pending"));
3276 // first check if we have already the recoverydata, no need to rerequest it then
3277 if (m_pAICHHashSet
->IsPartDataAvailable(nPart
*PARTSIZE
)){
3278 AddDebugLogLineN( logAICHRecovery
, wxT("Found PartRecoveryData in memory"));
3279 AICHRecoveryDataAvailable(nPart
);
3283 wxASSERT( nPart
< GetPartCount() );
3284 // find some random client which support AICH to ask for the blocks
3285 // first lets see how many we have at all, we prefer high id very much
3286 uint32 cAICHClients
= 0;
3287 uint32 cAICHLowIDClients
= 0;
3288 for ( SourceSet::iterator it
= m_SrcList
.begin(); it
!= m_SrcList
.end(); ++it
) {
3289 CUpDownClient
* pCurClient
= it
->GetClient();
3290 if ( pCurClient
->IsSupportingAICH() &&
3291 pCurClient
->GetReqFileAICHHash() != NULL
&&
3292 !pCurClient
->IsAICHReqPending()
3293 && (*pCurClient
->GetReqFileAICHHash()) == m_pAICHHashSet
->GetMasterHash())
3295 if (pCurClient
->HasLowID()) {
3296 ++cAICHLowIDClients
;
3302 if ((cAICHClients
| cAICHLowIDClients
) == 0){
3303 AddDebugLogLineN( logAICHRecovery
, wxT("Unable to request AICH Recoverydata because found no client who supports it and has the same hash as the trusted one"));
3306 uint32 nSeclectedClient
;
3307 if (cAICHClients
> 0) {
3308 nSeclectedClient
= (rand() % cAICHClients
) + 1;
3310 nSeclectedClient
= (rand() % cAICHLowIDClients
) + 1;
3312 CUpDownClient
* pClient
= NULL
;
3313 for ( SourceSet::iterator it
= m_SrcList
.begin(); it
!= m_SrcList
.end(); ++it
) {
3314 CUpDownClient
* pCurClient
= it
->GetClient();
3315 if (pCurClient
->IsSupportingAICH() && pCurClient
->GetReqFileAICHHash() != NULL
&& !pCurClient
->IsAICHReqPending()
3316 && (*pCurClient
->GetReqFileAICHHash()) == m_pAICHHashSet
->GetMasterHash())
3318 if (cAICHClients
> 0){
3319 if (!pCurClient
->HasLowID())
3323 wxASSERT( pCurClient
->HasLowID());
3326 if (nSeclectedClient
== 0){
3327 pClient
= pCurClient
;
3332 if (pClient
== NULL
){
3337 AddDebugLogLineN( logAICHRecovery
, CFormat( wxT("Requesting AICH Hash (%s) form client %s") ) % ( cAICHClients
? wxT("HighId") : wxT("LowID") ) % pClient
->GetClientFullInfo() );
3338 pClient
->SendAICHRequest(this, nPart
);
3343 void CPartFile::AICHRecoveryDataAvailable(uint16 nPart
)
3345 if (GetPartCount() < nPart
){
3351 uint32 length
= GetPartSize(nPart
);
3352 // if the part was already ok, it would now be complete
3353 if (IsComplete(nPart
)) {
3354 AddDebugLogLineN(logAICHRecovery
, CFormat(wxT("Processing AICH Recovery data: The part (%u) is already complete, canceling")) % nPart
);
3360 CAICHHashTree
* pVerifiedHash
= m_pAICHHashSet
->m_pHashTree
.FindHash(nPart
*PARTSIZE
, length
);
3361 if (pVerifiedHash
== NULL
|| !pVerifiedHash
->GetHashValid()){
3362 AddDebugLogLineC( logAICHRecovery
, wxT("Processing AICH Recovery data: Unable to get verified hash from hashset (should never happen)") );
3366 CAICHHashTree
htOurHash(pVerifiedHash
->GetNDataSize(), pVerifiedHash
->GetIsLeftBranch(), pVerifiedHash
->GetNBaseSize());
3368 CreateHashFromFile(m_hpartfile
, PARTSIZE
* nPart
, length
, NULL
, &htOurHash
);
3369 } catch (const CIOFailureException
& e
) {
3370 AddDebugLogLineC(logAICHRecovery
,
3371 CFormat(wxT("IO failure while hashing part-file '%s': %s"))
3372 % m_hpartfile
.GetFilePath() % e
.what());
3373 SetStatus(PS_ERROR
);
3377 if (!htOurHash
.GetHashValid()){
3378 AddDebugLogLineN( logAICHRecovery
, wxT("Processing AICH Recovery data: Failed to retrieve AICH Hashset of corrupt part") );
3383 // now compare the hash we just did, to the verified hash and readd all blocks which are ok
3384 uint32 nRecovered
= 0;
3385 for (uint32 pos
= 0; pos
< length
; pos
+= EMBLOCKSIZE
){
3386 const uint32 nBlockSize
= min
<uint32
>(EMBLOCKSIZE
, length
- pos
);
3387 CAICHHashTree
* pVerifiedBlock
= pVerifiedHash
->FindHash(pos
, nBlockSize
);
3388 CAICHHashTree
* pOurBlock
= htOurHash
.FindHash(pos
, nBlockSize
);
3389 if ( pVerifiedBlock
== NULL
|| pOurBlock
== NULL
|| !pVerifiedBlock
->GetHashValid() || !pOurBlock
->GetHashValid()){
3393 if (pOurBlock
->GetHash() == pVerifiedBlock
->GetHash()){
3394 FillGap(PARTSIZE
*nPart
+pos
, PARTSIZE
*nPart
+ pos
+ (nBlockSize
-1));
3395 RemoveBlockFromList(PARTSIZE
*nPart
, PARTSIZE
*nPart
+ (nBlockSize
-1));
3396 nRecovered
+= nBlockSize
;
3397 // tell the blackbox about the verified data
3398 m_CorruptionBlackBox
->VerifiedData(true, nPart
, pos
, pos
+ nBlockSize
- 1);
3400 // inform our "blackbox" about the corrupted block which may ban clients who sent it
3401 m_CorruptionBlackBox
->VerifiedData(false, nPart
, pos
, pos
+ nBlockSize
- 1);
3404 m_CorruptionBlackBox
->EvaluateData();
3406 // ok now some sanity checks
3407 if (IsComplete(nPart
)) {
3408 // this is bad, but it could probably happen under some rare circumstances
3409 // make sure that MD4 agrees to this fact too
3410 if (!HashSinglePart(nPart
)) {
3411 AddDebugLogLineN(logAICHRecovery
,
3412 CFormat(wxT("Processing AICH Recovery data: The part (%u) got completed while recovering - but MD4 says it corrupt! Setting hashset to error state, deleting part")) % nPart
);
3413 // now we are fu... unhappy
3414 m_pAICHHashSet
->SetStatus(AICH_ERROR
);
3419 AddDebugLogLineN(logAICHRecovery
,
3420 CFormat(wxT("Processing AICH Recovery data: The part (%u) got completed while recovering and MD4 agrees")) % nPart
);
3421 if (status
== PS_EMPTY
&& theApp
->IsRunning()) {
3422 if (GetHashCount() == GetED2KPartHashCount() && !m_hashsetneeded
) {
3423 // Successfully recovered part, make it available for sharing
3424 SetStatus(PS_READY
);
3425 theApp
->sharedfiles
->SafeAddKFile(this);
3429 if (theApp
->IsRunning()) {
3430 // Is this file finished?
3431 if (m_gaplist
.IsComplete()) {
3432 CompleteFile(false);
3436 } // end sanity check
3437 // We did the best we could. If it's still incomplete, then no need to keep
3438 // bashing it with ICH. So remove it from the list of corrupted parts.
3439 EraseFirstValue(m_corrupted_list
, nPart
);
3443 // make sure the user appreciates our great recovering work :P
3444 AddDebugLogLineC( logAICHRecovery
, CFormat(
3445 wxT("AICH successfully recovered %s of %s from part %u for %s") )
3446 % CastItoXBytes(nRecovered
)
3447 % CastItoXBytes(length
)
3453 void CPartFile::ClientStateChanged( int oldState
, int newState
)
3455 if ( oldState
== newState
)
3458 // If the state is -1, then it's an entirely new item
3459 if ( oldState
!= -1 ) {
3460 // Was the old state a valid state?
3461 if ( oldState
== DS_ONQUEUE
|| oldState
== DS_DOWNLOADING
) {
3464 if ( oldState
== DS_CONNECTED
/* || oldState == DS_REMOTEQUEUEFULL */ ) {
3468 m_notCurrentSources
--;
3472 // If the state is -1, then the source is being removed
3473 if ( newState
!= -1 ) {
3474 // Was the old state a valid state?
3475 if ( newState
== DS_ONQUEUE
|| newState
== DS_DOWNLOADING
) {
3478 if ( newState
== DS_CONNECTED
/* || newState == DS_REMOTEQUEUEFULL */ ) {
3482 ++m_notCurrentSources
;
3488 bool CPartFile::AddSource( CUpDownClient
* client
)
3490 if (m_SrcList
.insert(CCLIENTREF(client
, wxT("CPartFile::AddSource"))).second
) {
3491 theStats::AddFoundSource();
3492 theStats::AddSourceOrigin(client
->GetSourceFrom());
3500 bool CPartFile::DelSource( CUpDownClient
* client
)
3502 if (m_SrcList
.erase(CCLIENTREF(client
, wxEmptyString
))) {
3503 theStats::RemoveSourceOrigin(client
->GetSourceFrom());
3504 theStats::RemoveFoundSource();
3512 void CPartFile::UpdatePartsFrequency( CUpDownClient
* client
, bool increment
)
3514 const BitVector
& freq
= client
->GetPartStatus();
3516 if ( m_SrcpartFrequency
.size() != GetPartCount() ) {
3517 m_SrcpartFrequency
.clear();
3518 m_SrcpartFrequency
.insert(m_SrcpartFrequency
.begin(), GetPartCount(), 0);
3525 unsigned int size
= freq
.size();
3526 if ( size
!= m_SrcpartFrequency
.size() ) {
3531 for ( unsigned int i
= 0; i
< size
; i
++ ) {
3532 if ( freq
.get(i
) ) {
3533 m_SrcpartFrequency
[i
]++;
3537 for ( unsigned int i
= 0; i
< size
; i
++ ) {
3538 if ( freq
.get(i
) ) {
3539 m_SrcpartFrequency
[i
]--;
3545 void CPartFile::GetRatingAndComments(FileRatingList
& list
) const
3548 // This can be pre-processed, but is it worth the CPU?
3549 CPartFile::SourceSet::const_iterator it
= m_SrcList
.begin();
3550 for ( ; it
!= m_SrcList
.end(); ++it
) {
3551 CUpDownClient
*cur_src
= it
->GetClient();
3552 if (cur_src
->GetFileComment().Length()>0 || cur_src
->GetFileRating()>0) {
3553 // AddDebugLogLineN(logPartFile, wxString(wxT("found a comment for ")) << GetFileName());
3554 list
.push_back(SFileRating(*cur_src
));
3561 CPartFile::CPartFile(CEC_PartFile_Tag
*tag
) : CKnownFile(tag
)
3565 SetFileName(CPath(tag
->FileName()));
3566 m_abyFileHash
= tag
->FileHash();
3567 SetFileSize(tag
->SizeFull());
3568 m_gaplist
.Init(GetFileSize(), true); // Init empty
3569 m_partmetfilename
= CPath(tag
->PartMetName());
3570 m_fullname
= m_partmetfilename
; // We have only the met number, so show it without path in the detail dialog.
3572 m_SrcpartFrequency
.insert(m_SrcpartFrequency
.end(), GetPartCount(), 0);
3574 // these are only in CLIENT_GUI and not covered by Init()
3577 m_iDownPriorityEC
= 0;
3578 m_a4af_source_count
= 0;
3583 * Remote gui specific code
3585 CPartFile::~CPartFile()
3589 void CPartFile::GetRatingAndComments(FileRatingList
& list
) const
3591 list
= m_FileRatingList
;
3594 void CPartFile::SetCategory(uint8 cat
)
3600 bool CPartFile::AddSource(CUpDownClient
* client
)
3602 return m_SrcList
.insert(CCLIENTREF(client
, wxT("CPartFile::AddSource"))).second
!= 0;
3606 bool CPartFile::DelSource(CUpDownClient
* client
)
3608 return m_SrcList
.erase(CCLIENTREF(client
, wxEmptyString
)) != 0;
3612 #endif // !CLIENT_GUI
3615 void CPartFile::UpdateDisplayedInfo(bool force
)
3617 uint32 curTick
= ::GetTickCount();
3619 // Wait 1.5s between each redraw
3620 if (force
|| curTick
-m_lastRefreshedDLDisplay
> MINWAIT_BEFORE_DLDISPLAY_WINDOWUPDATE
) {
3621 Notify_DownloadCtrlUpdateItem(this);
3622 m_lastRefreshedDLDisplay
= curTick
;
3627 void CPartFile::Init()
3629 m_lastsearchtime
= 0;
3630 lastpurgetime
= ::GetTickCount();
3633 m_insufficient
= false;
3638 m_iLastPausePurge
= time(NULL
);
3640 if(thePrefs::GetNewAutoDown()) {
3641 m_iDownPriority
= PR_HIGH
;
3642 m_bAutoDownPriority
= true;
3644 m_iDownPriority
= PR_NORMAL
;
3645 m_bAutoDownPriority
= false;
3648 transferingsrc
= 0; // new
3652 m_hashsetneeded
= true;
3654 percentcompleted
= 0;
3656 m_bPreviewing
= false;
3657 lastseencomplete
= 0;
3658 m_availablePartsCount
=0;
3659 m_ClientSrcAnswered
= 0;
3660 m_LastNoNeededCheck
= 0;
3662 m_nTotalBufferData
= 0;
3663 m_nLastBufferFlushTime
= 0;
3664 m_bPercentUpdated
= false;
3665 m_bRecoveringArchive
= false;
3666 m_iGainDueToCompression
= 0;
3667 m_iLostDueToCorruption
= 0;
3668 m_iTotalPacketsSavedDueToICH
= 0;
3670 m_lastRefreshedDLDisplay
= 0;
3671 m_nDlActiveTime
= 0;
3673 m_is_A4AF_auto
= false;
3674 m_localSrcReqQueued
= false;
3675 m_nCompleteSourcesTime
= time(NULL
);
3676 m_nCompleteSourcesCount
= 0;
3677 m_nCompleteSourcesCountLo
= 0;
3678 m_nCompleteSourcesCountHi
= 0;
3681 m_notCurrentSources
= 0;
3684 m_LastSearchTimeKad
= 0;
3685 m_TotalSearchesKad
= 0;
3688 m_CorruptionBlackBox
= new CCorruptionBlackBox();
3692 wxString
CPartFile::getPartfileStatus() const
3697 if ((status
== PS_HASHING
) || (status
== PS_WAITINGFORHASH
)) {
3698 mybuffer
=_("Hashing");
3699 } else if (status
== PS_ALLOCATING
) {
3700 mybuffer
= _("Allocating");
3702 switch (GetStatus()) {
3704 mybuffer
=_("Completing");
3707 mybuffer
=_("Complete");
3710 mybuffer
=_("Paused");
3713 mybuffer
=_("Erroneous");
3715 case PS_INSUFFICIENT
:
3716 mybuffer
= _("Insufficient disk space");
3719 if (GetTransferingSrcCount()>0) {
3720 mybuffer
=_("Downloading");
3722 mybuffer
=_("Waiting");
3726 if (m_stopped
&& (GetStatus()!=PS_COMPLETE
)) {
3727 mybuffer
=_("Stopped");
3734 int CPartFile::getPartfileStatusRang() const
3738 if (GetTransferingSrcCount()==0) tempstatus
=1;
3739 switch (GetStatus()) {
3741 case PS_WAITINGFORHASH
:
3761 wxString
CPartFile::GetFeedback() const
3763 wxString retval
= CKnownFile::GetFeedback();
3764 if (GetStatus() != PS_COMPLETE
) {
3765 retval
+= CFormat(wxT("%s: %s (%.2f%%)\n%s: %u\n"))
3766 % _("Downloaded") % CastItoXBytes(GetCompletedSize()) % GetPercentCompleted() % _("Sources") % GetSourceCount();
3768 return retval
+ _("Status") + wxT(": ") + getPartfileStatus() + wxT("\n");
3772 sint32
CPartFile::getTimeRemaining() const
3774 if (GetKBpsDown() < 0.001)
3777 return((GetFileSize()-GetCompletedSize()) / ((int)(GetKBpsDown()*1024.0)));
3780 bool CPartFile::PreviewAvailable()
3782 const uint64 minSizeForPreview
= 256 * 1024;
3783 FileType type
= GetFiletype(GetFileName());
3785 return (type
== ftVideo
|| type
== ftAudio
) &&
3786 GetFileSize() >= minSizeForPreview
&&
3787 IsComplete(0, minSizeForPreview
);
3790 bool CPartFile::CheckShowItemInGivenCat(int inCategory
)
3792 // first check if item belongs in this cat in principle
3793 if (inCategory
> 0 && inCategory
!= GetCategory()) {
3797 // if yes apply filter
3800 switch (thePrefs::GetAllcatFilter()) {
3802 show
= GetCategory() == 0 || inCategory
> 0;
3805 show
= IsPartFile();
3808 show
= !IsPartFile();
3812 (GetStatus() == PS_READY
|| GetStatus() == PS_EMPTY
) &&
3813 GetTransferingSrcCount() == 0;
3815 case acfDownloading
:
3817 (GetStatus() == PS_READY
|| GetStatus() == PS_EMPTY
) &&
3818 GetTransferingSrcCount() > 0;
3821 show
= GetStatus() == PS_ERROR
;
3824 show
= GetStatus() == PS_PAUSED
&& !IsStopped();
3830 show
= GetFiletype(GetFileName()) == ftVideo
;
3833 show
= GetFiletype(GetFileName()) == ftAudio
;
3836 show
= GetFiletype(GetFileName()) == ftArchive
;
3839 show
= GetFiletype(GetFileName()) == ftCDImage
;
3842 show
= GetFiletype(GetFileName()) == ftPicture
;
3845 show
= GetFiletype(GetFileName()) == ftText
;
3848 show
= !IsStopped() && GetStatus() != PS_PAUSED
;
3859 void CPartFile::RemoveCategory(uint8 cat
)
3861 if (m_category
== cat
) {
3862 // Reset the category
3864 } else if (m_category
> cat
) {
3865 // Set to the new position of the original category
3871 void CPartFile::SetActive(bool bActive
)
3873 time_t tNow
= time(NULL
);
3875 if (theApp
->IsConnected()) {
3876 if (m_tActivated
== 0) {
3877 m_tActivated
= tNow
;
3881 if (m_tActivated
!= 0) {
3882 m_nDlActiveTime
+= tNow
- m_tActivated
;
3889 uint32
CPartFile::GetDlActiveTime() const
3891 uint32 nDlActiveTime
= m_nDlActiveTime
;
3892 if (m_tActivated
!= 0) {
3893 nDlActiveTime
+= time(NULL
) - m_tActivated
;
3895 return nDlActiveTime
;
3899 uint16
CPartFile::GetPartMetNumber() const
3902 return m_partmetfilename
.RemoveAllExt().GetRaw().ToLong(&nr
) ? nr
: 0;
3908 uint8
CPartFile::GetStatus(bool ignorepause
) const
3910 if ( (!m_paused
&& !m_insufficient
) ||
3911 status
== PS_ERROR
||
3912 status
== PS_COMPLETING
||
3913 status
== PS_COMPLETE
||
3916 } else if ( m_insufficient
) {
3917 return PS_INSUFFICIENT
;
3923 void CPartFile::AddDeadSource(const CUpDownClient
* client
)
3925 m_deadSources
.AddDeadSource( client
);
3929 bool CPartFile::IsDeadSource(const CUpDownClient
* client
)
3931 return m_deadSources
.IsDeadSource( client
);
3934 void CPartFile::SetFileName(const CPath
& fileName
)
3936 CKnownFile
* pFile
= theApp
->sharedfiles
->GetFileByID(GetFileHash());
3938 bool is_shared
= (pFile
&& pFile
== this);
3941 // The file is shared, we must clear the search keywords so we don't
3942 // publish the old name anymore.
3943 theApp
->sharedfiles
->RemoveKeywords(this);
3946 CKnownFile::SetFileName(fileName
);
3949 // And of course, we must advertise the new name if the file is shared.
3950 theApp
->sharedfiles
->AddKeywords(this);
3953 UpdateDisplayedInfo(true);
3957 uint16
CPartFile::GetMaxSources() const
3959 // This is just like this, while we don't import the private max sources per file
3960 return thePrefs::GetMaxSourcePerFile();
3964 uint16
CPartFile::GetMaxSourcePerFileSoft() const
3966 unsigned int temp
= ((unsigned int)GetMaxSources() * 9L) / 10;
3967 if (temp
> MAX_SOURCES_FILE_SOFT
) {
3968 return MAX_SOURCES_FILE_SOFT
;
3973 uint16
CPartFile::GetMaxSourcePerFileUDP() const
3975 unsigned int temp
= ((unsigned int)GetMaxSources() * 3L) / 4;
3976 if (temp
> MAX_SOURCES_FILE_UDP
) {
3977 return MAX_SOURCES_FILE_UDP
;
3982 #define DROP_FACTOR 2
3984 CUpDownClient
* CPartFile::GetSlowerDownloadingClient(uint32 speed
, CUpDownClient
* caller
) {
3985 // printf("Start slower source calculation\n");
3986 for( SourceSet::iterator it
= m_SrcList
.begin(); it
!= m_SrcList
.end(); ) {
3987 CUpDownClient
* cur_src
= it
++->GetClient();
3988 if ((cur_src
->GetDownloadState() == DS_DOWNLOADING
) && (cur_src
!= caller
)) {
3989 uint32 factored_bytes_per_second
= static_cast<uint32
>(
3990 (cur_src
->GetKBpsDown() * 1024) * DROP_FACTOR
);
3991 if ( factored_bytes_per_second
< speed
) {
3992 // printf("Selecting source %p to drop: %d < %d\n", cur_src, factored_bytes_per_second, speed);
3993 // printf("End slower source calculation\n");
3996 // printf("Not selecting source %p to drop: %d > %d\n", cur_src, factored_bytes_per_second, speed);
4000 // printf("End slower source calculation\n");
4004 void CPartFile::AllocationFinished()
4006 // see if it can be opened
4007 if (!m_hpartfile
.Open(m_PartPath
, CFile::read_write
)) {
4008 AddLogLineN(CFormat(_("ERROR: Failed to open partfile '%s'")) % GetFullName());
4009 SetStatus(PS_ERROR
);
4011 // then close the handle again
4012 m_hpartfile
.Release(true);
4016 // File_checked_for_headers