2 // This file is part of the aMule Project.
4 // Copyright (c) 2003-2011 aMule Team ( admin@amule.org / http://www.amule.org )
5 // Copyright (c) 2002-2011 Merkur ( devs@emule-project.net / http://www.emule-project.net )
7 // Any parts of this program derived from the xMule, lMule or eMule project,
8 // or contributed by third-party developers are copyrighted by their
11 // This program is free software; you can redistribute it and/or modify
12 // it under the terms of the GNU General Public License as published by
13 // the Free Software Foundation; either version 2 of the License, or
14 // (at your option) any later version.
16 // This program is distributed in the hope that it will be useful,
17 // but WITHOUT ANY WARRANTY; without even the implied warranty of
18 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 // GNU General Public License for more details.
21 // You should have received a copy of the GNU General Public License
22 // along with this program; if not, write to the Free Software
23 // Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA
28 #include "PartFile.h" // Interface declarations.
31 #include "config.h" // Needed for VERSION
34 #include <protocol/kad/Constants.h>
35 #include <protocol/ed2k/Client2Client/TCP.h>
36 #include <protocol/Protocols.h>
37 #include <common/DataFileVersion.h>
38 #include <common/Constants.h>
39 #include <tags/FileTags.h>
42 #include <wx/tokenzr.h> // Needed for wxStringTokenizer
44 #include "KnownFileList.h" // Needed for CKnownFileList
45 #include "CanceledFileList.h"
46 #include "UploadQueue.h" // Needed for CFileHash
47 #include "IPFilter.h" // Needed for CIPFilter
48 #include "Server.h" // Needed for CServer
49 #include "ServerConnect.h" // Needed for CServerConnect
52 #include "UpDownClientEC.h" // Needed for CUpDownClient
54 #include "updownclient.h" // Needed for CUpDownClient
57 #include "MemFile.h" // Needed for CMemFile
58 #include "Preferences.h" // Needed for CPreferences
59 #include "DownloadQueue.h" // Needed for CDownloadQueue
60 #include "amule.h" // Needed for theApp
61 #include "ED2KLink.h" // Needed for CED2KLink
62 #include "Packet.h" // Needed for CTag
63 #include "SearchList.h" // Needed for CSearchFile
64 #include "ClientList.h" // Needed for clientlist
65 #include "Statistics.h" // Needed for theStats
67 #include <common/Format.h> // Needed for CFormat
68 #include <common/FileFunctions.h> // Needed for GetLastModificationTime
69 #include "ThreadTasks.h" // Needed for CHashingTask/CCompletionTask/CAllocateFileTask
70 #include "GuiEvents.h" // Needed for Notify_*
71 #include "DataToText.h" // Needed for OriginToText()
72 #include "PlatformSpecific.h" // Needed for CreateSparseFile()
73 #include "FileArea.h" // Needed for CFileArea
74 #include "ScopedPtr.h" // Needed for CScopedArray
75 #include "CorruptionBlackBox.h"
77 #include "kademlia/kademlia/Kademlia.h"
78 #include "kademlia/kademlia/Search.h"
81 SFileRating::SFileRating(const wxString
&u
, const wxString
&f
, sint16 r
, const wxString
&c
)
91 SFileRating::SFileRating(const SFileRating
&fr
)
93 UserName(fr
.UserName
),
94 FileName(fr
.FileName
),
102 SFileRating::SFileRating(const CUpDownClient
&client
)
104 UserName(client
.GetUserName()),
105 FileName(client
.GetClientFilename()),
106 Rating(client
.GetFileRating()),
107 Comment(client
.GetFileComment())
113 SFileRating::~SFileRating()
118 class PartFileBufferedData
121 CFileArea area
; // File area to be written
122 uint64 start
; // This is the start offset of the data
123 uint64 end
; // This is the end offset of the data
124 Requested_Block_Struct
*block
; // This is the requested block that this data relates to
126 PartFileBufferedData(CFileAutoClose
& file
, byte
* data
, uint64 _start
, uint64 _end
, Requested_Block_Struct
*_block
)
127 : start(_start
), end(_end
), block(_block
)
129 area
.StartWriteAt(file
, start
, end
-start
+1);
130 memcpy(area
.GetBuffer(), data
, end
-start
+1);
135 typedef std::list
<Chunk
> ChunkList
;
140 CPartFile::CPartFile()
145 CPartFile::CPartFile(CSearchFile
* searchresult
)
149 m_abyFileHash
= searchresult
->GetFileHash();
150 SetFileName(searchresult
->GetFileName());
151 SetFileSize(searchresult
->GetFileSize());
153 for (unsigned int i
= 0; i
< searchresult
->m_taglist
.size(); ++i
){
154 const CTag
& pTag
= searchresult
->m_taglist
[i
];
156 bool bTagAdded
= false;
157 if (pTag
.GetNameID() == 0 && !pTag
.GetName().IsEmpty() && (pTag
.IsStr() || pTag
.IsInt())) {
158 static const struct {
163 { wxT(FT_ED2K_MEDIA_ARTIST
), 2 },
164 { wxT(FT_ED2K_MEDIA_ALBUM
), 2 },
165 { wxT(FT_ED2K_MEDIA_TITLE
), 2 },
166 { wxT(FT_ED2K_MEDIA_LENGTH
), 2 },
167 { wxT(FT_ED2K_MEDIA_BITRATE
), 3 },
168 { wxT(FT_ED2K_MEDIA_CODEC
), 2 }
171 for (unsigned int t
= 0; t
< itemsof(_aMetaTags
); ++t
) {
172 if ( pTag
.GetType() == _aMetaTags
[t
].nType
&&
173 (pTag
.GetName() == _aMetaTags
[t
].pszName
)) {
174 // skip string tags with empty string values
175 if (pTag
.IsStr() && pTag
.GetStr().IsEmpty()) {
179 // skip "length" tags with "0: 0" values
180 if (pTag
.GetName() == wxT(FT_ED2K_MEDIA_LENGTH
)) {
181 if (pTag
.GetStr().IsSameAs(wxT("0: 0")) ||
182 pTag
.GetStr().IsSameAs(wxT("0:0"))) {
187 // skip "bitrate" tags with '0' values
188 if ((pTag
.GetName() == wxT(FT_ED2K_MEDIA_BITRATE
)) && !pTag
.GetInt()) {
192 AddDebugLogLineN( logPartFile
,
193 wxT("CPartFile::CPartFile(CSearchFile*): added tag ") +
194 pTag
.GetFullInfo() );
195 m_taglist
.push_back(pTag
);
200 } else if (pTag
.GetNameID() != 0 && pTag
.GetName().IsEmpty() && (pTag
.IsStr() || pTag
.IsInt())) {
201 static const struct {
209 for (unsigned int t
= 0; t
< itemsof(_aMetaTags
); ++t
) {
210 if (pTag
.GetType() == _aMetaTags
[t
].nType
&& pTag
.GetNameID() == _aMetaTags
[t
].nID
) {
211 // skip string tags with empty string values
212 if (pTag
.IsStr() && pTag
.GetStr().IsEmpty()) {
216 AddDebugLogLineN( logPartFile
,
217 wxT("CPartFile::CPartFile(CSearchFile*): added tag ") +
218 pTag
.GetFullInfo() );
219 m_taglist
.push_back(pTag
);
227 AddDebugLogLineN( logPartFile
,
228 wxT("CPartFile::CPartFile(CSearchFile*): ignored tag ") +
229 pTag
.GetFullInfo() );
237 CPartFile::CPartFile(const CED2KFileLink
* fileLink
)
241 SetFileName(CPath(fileLink
->GetName()));
242 SetFileSize(fileLink
->GetSize());
243 m_abyFileHash
= fileLink
->GetHashKey();
247 if (fileLink
->m_hashset
) {
248 if (!LoadHashsetFromFile(fileLink
->m_hashset
, true)) {
249 AddDebugLogLineC(logPartFile
, wxT("eD2K link contained invalid hashset: ") + fileLink
->GetLink());
255 CPartFile::~CPartFile()
257 // if it's not opened, it was completed or deleted
258 if (m_hpartfile
.IsOpened()) {
261 // Update met file (with current directory entry)
265 DeleteContents(m_BufferedData_list
);
266 delete m_CorruptionBlackBox
;
268 wxASSERT(m_SrcList
.empty());
269 wxASSERT(m_A4AFsrclist
.empty());
272 void CPartFile::CreatePartFile()
274 // use lowest free partfilenumber for free file (InterCeptor)
278 m_partmetfilename
= CPath(CFormat(wxT("%03i.part.met")) % i
);
279 m_fullname
= thePrefs::GetTempDir().JoinPaths(m_partmetfilename
);
280 } while (m_fullname
.FileExists());
282 m_CorruptionBlackBox
->SetPartFileInfo(GetFileName().GetPrintable(), m_partmetfilename
.RemoveAllExt().GetPrintable());
284 wxString strPartName
= m_partmetfilename
.RemoveExt().GetRaw();
285 m_taglist
.push_back(CTagString(FT_PARTFILENAME
, strPartName
));
287 m_gaplist
.Init(GetFileSize(), true); // Init empty
289 m_PartPath
= m_fullname
.RemoveExt();
291 if (thePrefs::GetAllocFullFile()) {
292 fileCreated
= m_hpartfile
.Create(m_PartPath
, true);
295 fileCreated
= PlatformSpecific::CreateSparseFile(m_PartPath
, GetFileSize());
298 AddLogLineN(_("ERROR: Failed to create partfile"));
302 SetFilePath(thePrefs::GetTempDir());
304 if (thePrefs::GetAllocFullFile()) {
305 SetStatus(PS_ALLOCATING
);
306 CThreadScheduler::AddTask(new CAllocateFileTask(this, thePrefs::AddNewFilesPaused()));
308 AllocationFinished();
311 m_hashsetneeded
= (GetED2KPartHashCount() > 0);
314 SetActive(theApp
->IsConnected());
318 uint8
CPartFile::LoadPartFile(const CPath
& in_directory
, const CPath
& filename
, bool from_backup
, bool getsizeonly
)
320 bool isnewstyle
= false;
321 uint8 version
,partmettype
=PMT_UNKNOWN
;
323 std::map
<uint16
, Gap_Struct
*> gap_map
; // Slugfiller
326 m_partmetfilename
= filename
;
327 m_CorruptionBlackBox
->SetPartFileInfo(GetFileName().GetPrintable(), m_partmetfilename
.RemoveAllExt().GetPrintable());
328 m_filePath
= in_directory
;
329 m_fullname
= m_filePath
.JoinPaths(m_partmetfilename
);
330 m_PartPath
= m_fullname
.RemoveExt();
332 // readfile data form part.met file
333 CPath curMetFilename
= m_fullname
;
335 curMetFilename
= curMetFilename
.AppendExt(PARTMET_BAK_EXT
);
336 AddLogLineN(CFormat( _("Trying to load backup of met-file from %s") )
341 CFile
metFile(curMetFilename
, CFile::read
);
342 if (!metFile
.IsOpened()) {
343 AddLogLineN(CFormat( _("ERROR: Failed to open part.met file: %s ==> %s") )
348 } else if (metFile
.GetLength() == 0) {
349 AddLogLineN(CFormat( _("ERROR: part.met file is 0 size: %s ==> %s") )
356 version
= metFile
.ReadUInt8();
357 if (version
!= PARTFILE_VERSION
&& version
!= PARTFILE_SPLITTEDVERSION
&& version
!= PARTFILE_VERSION_LARGEFILE
){
359 //if (version == 83) return ImportShareazaTempFile(...)
360 AddLogLineN(CFormat( _("ERROR: Invalid part.met file version: %s ==> %s") )
366 isnewstyle
= (version
== PARTFILE_SPLITTEDVERSION
);
367 partmettype
= isnewstyle
? PMT_SPLITTED
: PMT_DEFAULTOLD
;
369 if (version
== PARTFILE_VERSION
) {// Do we still need this check ?
370 uint8 test
[4]; // It will fail for certain files.
371 metFile
.Seek(24, wxFromStart
);
372 metFile
.Read(test
,4);
374 metFile
.Seek(1, wxFromStart
);
375 if (test
[0]==0 && test
[1]==0 && test
[2]==2 && test
[3]==1) {
376 isnewstyle
=true; // edonkeys so called "old part style"
377 partmettype
=PMT_NEWOLD
;
382 uint32 temp
= metFile
.ReadUInt32();
384 if (temp
==0) { // 0.48 partmets - different again
385 LoadHashsetFromFile(&metFile
, false);
387 metFile
.Seek(2, wxFromStart
);
388 LoadDateFromFile(&metFile
);
389 m_abyFileHash
= metFile
.ReadHash();
393 LoadDateFromFile(&metFile
);
394 LoadHashsetFromFile(&metFile
, false);
397 uint32 tagcount
= metFile
.ReadUInt32();
399 for (uint32 j
= 0; j
< tagcount
; ++j
) {
400 CTag
newtag(metFile
,true);
403 (newtag
.GetNameID() == FT_FILESIZE
||
404 newtag
.GetNameID() == FT_FILENAME
))) {
405 switch(newtag
.GetNameID()) {
407 if (!GetFileName().IsOk()) {
408 // If it's not empty, we already loaded the unicoded one
409 SetFileName(CPath(newtag
.GetStr()));
413 case FT_LASTSEENCOMPLETE
: {
414 lastseencomplete
= newtag
.GetInt();
418 SetFileSize(newtag
.GetInt());
421 case FT_TRANSFERRED
: {
422 transferred
= newtag
.GetInt();
426 //#warning needs setfiletype string
427 //SetFileType(newtag.GetStr());
431 m_category
= newtag
.GetInt();
432 if (m_category
> theApp
->glob_prefs
->GetCatCount() - 1 ) {
437 case FT_OLDDLPRIORITY
:
438 case FT_DLPRIORITY
: {
440 m_iDownPriority
= newtag
.GetInt();
441 if( m_iDownPriority
== PR_AUTO
){
442 m_iDownPriority
= PR_HIGH
;
443 SetAutoDownPriority(true);
446 if ( m_iDownPriority
!= PR_LOW
&&
447 m_iDownPriority
!= PR_NORMAL
&&
448 m_iDownPriority
!= PR_HIGH
)
449 m_iDownPriority
= PR_NORMAL
;
450 SetAutoDownPriority(false);
456 m_paused
= (newtag
.GetInt() == 1);
457 m_stopped
= m_paused
;
460 case FT_OLDULPRIORITY
:
461 case FT_ULPRIORITY
: {
463 SetUpPriority(newtag
.GetInt(), false);
464 if( GetUpPriority() == PR_AUTO
){
465 SetUpPriority(PR_HIGH
, false);
466 SetAutoUpPriority(true);
468 SetAutoUpPriority(false);
473 case FT_KADLASTPUBLISHSRC
:{
474 SetLastPublishTimeKadSrc(newtag
.GetInt(), 0);
475 if(GetLastPublishTimeKadSrc() > (uint32
)time(NULL
)+KADEMLIAREPUBLISHTIMES
) {
476 //There may be a posibility of an older client that saved a random number here.. This will check for that..
477 SetLastPublishTimeKadSrc(0,0);
481 case FT_KADLASTPUBLISHNOTES
:{
482 SetLastPublishTimeKadNotes(newtag
.GetInt());
485 // old tags: as long as they are not needed, take the chance to purge them
487 case FT_KADLASTPUBLISHKEY
:
489 case FT_DL_ACTIVE_TIME
:
490 if (newtag
.IsInt()) {
491 m_nDlActiveTime
= newtag
.GetInt();
494 case FT_CORRUPTEDPARTS
: {
495 wxASSERT(m_corrupted_list
.empty());
496 wxString
strCorruptedParts(newtag
.GetStr());
497 wxStringTokenizer
tokenizer(strCorruptedParts
, wxT(","));
498 while ( tokenizer
.HasMoreTokens() ) {
499 wxString token
= tokenizer
.GetNextToken();
501 if (token
.ToULong(&uPart
)) {
502 if (uPart
< GetPartCount() && !IsCorruptedPart(uPart
)) {
503 m_corrupted_list
.push_back(uPart
);
512 hash
.DecodeBase32(newtag
.GetStr()) == CAICHHash::GetHashSize();
513 wxASSERT(hashSizeOk
);
515 m_pAICHHashSet
->SetMasterHash(hash
, AICH_VERIFIED
);
519 case FT_ATTRANSFERRED
:{
520 statistic
.SetAllTimeTransferred(statistic
.GetAllTimeTransferred() + (uint64
)newtag
.GetInt());
523 case FT_ATTRANSFERREDHI
:{
524 statistic
.SetAllTimeTransferred(statistic
.GetAllTimeTransferred() + (((uint64
)newtag
.GetInt()) << 32));
527 case FT_ATREQUESTED
:{
528 statistic
.SetAllTimeRequests(newtag
.GetInt());
532 statistic
.SetAllTimeAccepts(newtag
.GetInt());
536 // Start Changes by Slugfiller for better exception handling
538 wxCharBuffer tag_ansi_name
= newtag
.GetName().ToAscii();
539 char gap_mark
= tag_ansi_name
? tag_ansi_name
[0u] : 0;
540 if ( newtag
.IsInt() && (newtag
.GetName().Length() > 1) &&
541 ((gap_mark
== FT_GAPSTART
) ||
542 (gap_mark
== FT_GAPEND
))) {
543 Gap_Struct
*gap
= NULL
;
544 unsigned long int gapkey
;
545 if (newtag
.GetName().Mid(1).ToULong(&gapkey
)) {
546 if ( gap_map
.find( gapkey
) == gap_map
.end() ) {
547 gap
= new Gap_Struct
;
548 gap_map
[gapkey
] = gap
;
549 gap
->start
= (uint64
)-1;
550 gap
->end
= (uint64
)-1;
552 gap
= gap_map
[ gapkey
];
554 if (gap_mark
== FT_GAPSTART
) {
555 gap
->start
= newtag
.GetInt();
557 if (gap_mark
== FT_GAPEND
) {
558 gap
->end
= newtag
.GetInt()-1;
561 AddDebugLogLineN(logPartFile
, wxT("Wrong gap map key while reading met file!"));
564 // End Changes by Slugfiller for better exception handling
566 m_taglist
.push_back(newtag
);
571 // Nothing. Else, nothing.
575 // load the hashsets from the hybridstylepartmet
576 if (isnewstyle
&& !getsizeonly
&& (metFile
.GetPosition()<metFile
.GetLength()) ) {
577 metFile
.Seek(1, wxFromCurrent
);
579 uint16 parts
=GetPartCount(); // assuming we will get all hashsets
581 for (uint16 i
= 0; i
< parts
&& (metFile
.GetPosition()+16<metFile
.GetLength()); ++i
){
582 CMD4Hash cur_hash
= metFile
.ReadHash();
583 m_hashlist
.push_back(cur_hash
);
587 if (!m_hashlist
.empty()) {
588 CreateHashFromHashlist(m_hashlist
, &checkhash
);
590 if (m_abyFileHash
!= checkhash
) {
594 } catch (const CInvalidPacket
& e
) {
595 AddLogLineC(CFormat(_("Error: %s (%s) is corrupt (bad tags: %s), unable to load file."))
600 } catch (const CIOFailureException
& e
) {
601 AddDebugLogLineC(logPartFile
, CFormat( wxT("IO failure while loading '%s': %s") )
605 } catch (const CEOFException
& WXUNUSED(e
)) {
606 AddLogLineC(CFormat( _("ERROR: %s (%s) is corrupt (wrong tagcount), unable to load file.") )
609 AddLogLineC(_("Trying to recover file info..."));
611 // Safe file is that who have
614 // We have filesize, try other needed info
616 // Do we need to check gaps? I think not,
617 // because they are checked below. Worst
618 // scenario will only mark file as 0 bytes downloaded.
621 if (!GetFileName().IsOk()) {
622 // Not critical, let's put a random filename.
624 "Recovering no-named file - will try to recover it as RecoveredFile.dat"));
625 SetFileName(CPath(wxT("RecoveredFile.dat")));
628 AddLogLineC(_("Recovered all available file info :D - Trying to use it..."));
630 AddLogLineC(_("Unable to recover file info :("));
639 m_gaplist
.Init(GetFileSize(), false); // Init full, then add gaps
640 // Now to flush the map into the list (Slugfiller)
641 std::map
<uint16
, Gap_Struct
*>::iterator it
= gap_map
.begin();
642 for ( ; it
!= gap_map
.end(); ++it
) {
643 Gap_Struct
* gap
= it
->second
;
644 // SLUGFILLER: SafeHash - revised code, and extra safety
645 if ( (gap
->start
!= (uint64
)-1) &&
646 (gap
->end
!= (uint64
)-1) &&
647 gap
->start
<= gap
->end
&&
648 gap
->start
< GetFileSize()) {
649 if (gap
->end
>= GetFileSize()) {
650 gap
->end
= GetFileSize()-1; // Clipping
652 m_gaplist
.AddGap(gap
->start
, gap
->end
); // All tags accounted for, use safe adding
655 // SLUGFILLER: SafeHash
658 //check if this is a backup
659 if ( m_fullname
.GetExt().MakeLower() == wxT("backup" )) {
660 m_fullname
= m_fullname
.RemoveExt();
663 // open permanent handle
664 if ( !m_hpartfile
.Open(m_PartPath
, CFile::read_write
)) {
665 AddLogLineN(CFormat( _("Failed to open %s (%s)") )
674 // SLUGFILLER: SafeHash - final safety, make sure any missing part of the file is gap
675 if (m_hpartfile
.GetLength() < GetFileSize())
676 AddGap(m_hpartfile
.GetLength(), GetFileSize()-1);
677 // Goes both ways - Partfile should never be too large
678 if (m_hpartfile
.GetLength() > GetFileSize()) {
679 AddDebugLogLineC(logPartFile
, CFormat( wxT("Partfile \"%s\" is too large! Truncating %llu bytes.") ) % GetFileName() % (m_hpartfile
.GetLength() - GetFileSize()));
680 m_hpartfile
.SetLength(GetFileSize());
682 // SLUGFILLER: SafeHash
683 } catch (const CIOFailureException
& e
) {
684 AddDebugLogLineC(logPartFile
, CFormat( wxT("Error while accessing partfile \"%s\": %s") ) % GetFileName() % e
.what());
688 // now close the file again until needed
689 m_hpartfile
.Release(true);
691 // check hashcount, file status etc
692 if (GetHashCount() != GetED2KPartHashCount()){
693 m_hashsetneeded
= true;
696 m_hashsetneeded
= false;
697 for (size_t i
= 0; i
< m_hashlist
.size(); ++i
) {
704 if (m_gaplist
.IsComplete()) { // is this file complete already?
709 if (!isnewstyle
) { // not for importing
710 const time_t file_date
= CPath::GetModificationTime(m_PartPath
);
711 if (m_lastDateChanged
!= file_date
) {
712 // It's pointless to rehash an empty file, since the case
713 // where a user has zero'd a file is handled above ...
714 if (m_hpartfile
.GetLength()) {
715 AddLogLineN(CFormat( _("WARNING: %s might be corrupted (%i)") )
717 % (m_lastDateChanged
- file_date
) );
719 SetStatus(PS_WAITINGFORHASH
);
721 CPath partFileName
= m_partmetfilename
.RemoveExt();
722 CThreadScheduler::AddTask(new CHashingTask(m_filePath
, partFileName
, this));
727 UpdateCompletedInfos();
728 if (completedsize
> transferred
) {
729 m_iGainDueToCompression
= completedsize
- transferred
;
730 } else if (completedsize
!= transferred
) {
731 m_iLostDueToCorruption
= transferred
- completedsize
;
738 bool CPartFile::SavePartFile(bool Initial
)
741 case PS_WAITINGFORHASH
:
747 /* Don't write anything to disk if less than 100 KB of free space is left. */
748 sint64 free
= CPath::GetFreeSpaceAt(GetFilePath());
749 if ((free
!= wxInvalidOffset
) && (free
< (100 * 1024))) {
755 if (!m_PartPath
.FileExists()) {
756 throw wxString(wxT(".part file not found"));
759 uint32 lsc
= lastseencomplete
;
762 CPath::BackupFile(m_fullname
, wxT(".backup"));
763 CPath::RemoveFile(m_fullname
);
766 file
.Open(m_fullname
, CFile::write
);
767 if (!file
.IsOpened()) {
768 throw wxString(wxT("Failed to open part.met file"));
772 file
.WriteUInt8(IsLargeFile() ? PARTFILE_VERSION_LARGEFILE
: PARTFILE_VERSION
);
774 file
.WriteUInt32(CPath::GetModificationTime(m_PartPath
));
776 file
.WriteHash(m_abyFileHash
);
777 uint16 parts
= m_hashlist
.size();
778 file
.WriteUInt16(parts
);
779 for (int x
= 0; x
< parts
; ++x
) {
780 file
.WriteHash(m_hashlist
[x
]);
783 #define FIXED_TAGS 15
784 uint32 tagcount
= m_taglist
.size() + FIXED_TAGS
+ (m_gaplist
.size()*2);
785 if (!m_corrupted_list
.empty()) {
789 if (m_pAICHHashSet
->HasValidMasterHash() && (m_pAICHHashSet
->GetStatus() == AICH_VERIFIED
)){
793 if (GetLastPublishTimeKadSrc()){
797 if (GetLastPublishTimeKadNotes()){
801 if (GetDlActiveTime()){
805 file
.WriteUInt32(tagcount
);
807 //#warning Kry - Where are lost by coruption and gained by compression?
809 // 0 (unicoded part file name)
810 // We write it with BOM to keep eMule compatibility. Note that the 'printable' filename is saved,
811 // as presently the filename does not represent an actual file.
812 CTagString( FT_FILENAME
, GetFileName().GetPrintable()).WriteTagToFile( &file
, utf8strOptBOM
);
813 CTagString( FT_FILENAME
, GetFileName().GetPrintable()).WriteTagToFile( &file
); // 1
815 CTagIntSized( FT_FILESIZE
, GetFileSize(), IsLargeFile() ? 64 : 32).WriteTagToFile( &file
);// 2
816 CTagIntSized( FT_TRANSFERRED
, transferred
, IsLargeFile() ? 64 : 32).WriteTagToFile( &file
); // 3
817 CTagInt32( FT_STATUS
, (m_paused
?1:0)).WriteTagToFile( &file
); // 4
819 if ( IsAutoDownPriority() ) {
820 CTagInt32( FT_DLPRIORITY
, (uint8
)PR_AUTO
).WriteTagToFile( &file
); // 5
821 CTagInt32( FT_OLDDLPRIORITY
, (uint8
)PR_AUTO
).WriteTagToFile( &file
); // 6
823 CTagInt32( FT_DLPRIORITY
, m_iDownPriority
).WriteTagToFile( &file
); // 5
824 CTagInt32( FT_OLDDLPRIORITY
, m_iDownPriority
).WriteTagToFile( &file
); // 6
827 CTagInt32( FT_LASTSEENCOMPLETE
, lsc
).WriteTagToFile( &file
); // 7
829 if ( IsAutoUpPriority() ) {
830 CTagInt32( FT_ULPRIORITY
, (uint8
)PR_AUTO
).WriteTagToFile( &file
); // 8
831 CTagInt32( FT_OLDULPRIORITY
, (uint8
)PR_AUTO
).WriteTagToFile( &file
); // 9
833 CTagInt32( FT_ULPRIORITY
, GetUpPriority() ).WriteTagToFile( &file
); // 8
834 CTagInt32( FT_OLDULPRIORITY
, GetUpPriority() ).WriteTagToFile( &file
); // 9
837 CTagInt32(FT_CATEGORY
, m_category
).WriteTagToFile( &file
); // 10
838 CTagInt32(FT_ATTRANSFERRED
, statistic
.GetAllTimeTransferred() & 0xFFFFFFFF).WriteTagToFile( &file
);// 11
839 CTagInt32(FT_ATTRANSFERREDHI
, statistic
.GetAllTimeTransferred() >>32).WriteTagToFile( &file
);// 12
840 CTagInt32(FT_ATREQUESTED
, statistic
.GetAllTimeRequests()).WriteTagToFile( &file
); // 13
841 CTagInt32(FT_ATACCEPTED
, statistic
.GetAllTimeAccepts()).WriteTagToFile( &file
); // 14
843 // currupt part infos
844 if (!m_corrupted_list
.empty()) {
845 wxString strCorruptedParts
;
846 std::list
<uint16
>::iterator it
= m_corrupted_list
.begin();
847 for (; it
!= m_corrupted_list
.end(); ++it
) {
848 uint16 uCorruptedPart
= *it
;
849 if (!strCorruptedParts
.IsEmpty()) {
850 strCorruptedParts
+= wxT(",");
852 strCorruptedParts
+= CFormat(wxT("%u")) % uCorruptedPart
;
854 wxASSERT( !strCorruptedParts
.IsEmpty() );
856 CTagString( FT_CORRUPTEDPARTS
, strCorruptedParts
).WriteTagToFile( &file
); // 11?
860 if (m_pAICHHashSet
->HasValidMasterHash() && (m_pAICHHashSet
->GetStatus() == AICH_VERIFIED
)){
861 CTagString
aichtag(FT_AICH_HASH
, m_pAICHHashSet
->GetMasterHash().GetString() );
862 aichtag
.WriteTagToFile(&file
); // 12?
865 if (GetLastPublishTimeKadSrc()){
866 CTagInt32(FT_KADLASTPUBLISHSRC
, GetLastPublishTimeKadSrc()).WriteTagToFile(&file
); // 15?
869 if (GetLastPublishTimeKadNotes()){
870 CTagInt32(FT_KADLASTPUBLISHNOTES
, GetLastPublishTimeKadNotes()).WriteTagToFile(&file
); // 16?
873 if (GetDlActiveTime()){
874 CTagInt32(FT_DL_ACTIVE_TIME
, GetDlActiveTime()).WriteTagToFile(&file
); // 17
877 for (uint32 j
= 0; j
< (uint32
)m_taglist
.size();++j
) {
878 m_taglist
[j
].WriteTagToFile(&file
);
883 for (CGapList::const_iterator it
= m_gaplist
.begin(); it
!= m_gaplist
.end(); ++it
) {
884 wxString tagName
= CFormat(wxT(" %u")) % i_pos
;
886 // gap start = first missing byte but gap ends = first non-missing byte
887 // in edonkey but I think its easier to user the real limits
888 tagName
[0] = FT_GAPSTART
;
889 CTagIntSized(tagName
, it
.start(), IsLargeFile() ? 64 : 32).WriteTagToFile( &file
);
891 tagName
[0] = FT_GAPEND
;
892 CTagIntSized(tagName
, it
.end() + 1, IsLargeFile() ? 64 : 32).WriteTagToFile( &file
);
896 } catch (const wxString
& error
) {
897 AddLogLineNS(CFormat( _("ERROR while saving partfile: %s (%s ==> %s)") )
903 } catch (const CIOFailureException
& e
) {
904 AddLogLineCS(_("IO failure while saving partfile: ") + e
.what());
912 CPath::RemoveFile(m_fullname
.AppendExt(wxT(".backup")));
915 sint64 metLength
= m_fullname
.GetFileSize();
916 if (metLength
== wxInvalidOffset
) {
917 theApp
->ShowAlert( CFormat( _("Could not retrieve length of '%s' - using %s file.") )
922 CPath::CloneFile(m_fullname
.AppendExt(PARTMET_BAK_EXT
), m_fullname
, true);
923 } else if (metLength
== 0) {
924 // Don't backup if it's 0 size but raise a warning!!!
925 theApp
->ShowAlert( CFormat( _("'%s' is 0 size somehow - using %s file.") )
930 CPath::CloneFile(m_fullname
.AppendExt(PARTMET_BAK_EXT
), m_fullname
, true);
932 // no error, just backup
933 CPath::BackupFile(m_fullname
, PARTMET_BAK_EXT
);
940 void CPartFile::SaveSourceSeeds()
942 #define MAX_SAVED_SOURCES 10
944 // Kry - Sources seeds
945 // Based on a Feature request, this saves the last MAX_SAVED_SOURCES
946 // sources of the file, giving a 'seed' for the next run.
947 // We save the last sources because:
948 // 1 - They could be the hardest to get
949 // 2 - They will more probably be available
950 // However, if we have downloading sources, they have preference because
951 // we probably have more credits on them.
952 // Anyway, source exchange will get us the rest of the sources
953 // This feature is currently used only on rare files (< 20 sources)
956 if (GetSourceCount()>20) {
960 CClientRefList source_seeds
;
963 CClientRefList::iterator it
= m_downloadingSourcesList
.begin();
964 for( ; it
!= m_downloadingSourcesList
.end() && n_sources
< MAX_SAVED_SOURCES
; ++it
) {
965 if (!it
->HasLowID()) {
966 source_seeds
.push_back(*it
);
971 if (n_sources
< MAX_SAVED_SOURCES
) {
972 // Not enough downloading sources to fill the list, going to sources list
973 if (GetSourceCount() > 0) {
974 SourceSet::reverse_iterator rit
= m_SrcList
.rbegin();
975 for ( ; ((rit
!= m_SrcList
.rend()) && (n_sources
<MAX_SAVED_SOURCES
)); ++rit
) {
976 if (!rit
->HasLowID()) {
977 source_seeds
.push_back(*rit
);
989 const CPath seedsPath
= m_fullname
.AppendExt(wxT(".seeds"));
992 file
.Create(seedsPath
, true);
993 if (!file
.IsOpened()) {
994 AddLogLineN(CFormat( _("Failed to save part.met.seeds file for %s") )
1000 file
.WriteUInt8(0); // v3, to avoid v2 clients choking on it.
1001 file
.WriteUInt8(source_seeds
.size());
1003 CClientRefList::iterator it2
= source_seeds
.begin();
1004 for (; it2
!= source_seeds
.end(); ++it2
) {
1005 CUpDownClient
* cur_src
= it2
->GetClient();
1006 file
.WriteUInt32(cur_src
->GetUserIDHybrid());
1007 file
.WriteUInt16(cur_src
->GetUserPort());
1008 file
.WriteHash(cur_src
->GetUserHash());
1009 // CryptSettings - See SourceExchange V4
1010 const uint8 uSupportsCryptLayer
= cur_src
->SupportsCryptLayer() ? 1 : 0;
1011 const uint8 uRequestsCryptLayer
= cur_src
->RequestsCryptLayer() ? 1 : 0;
1012 const uint8 uRequiresCryptLayer
= cur_src
->RequiresCryptLayer() ? 1 : 0;
1013 const uint8 byCryptOptions
= (uRequiresCryptLayer
<< 2) | (uRequestsCryptLayer
<< 1) | (uSupportsCryptLayer
<< 0);
1014 file
.WriteUInt8(byCryptOptions
);
1017 /* v2: Added to keep track of too old seeds */
1018 file
.WriteUInt32(wxDateTime::Now().GetTicks());
1020 AddLogLineN(CFormat( wxPLURAL("Saved %i source seed for partfile: %s (%s)", "Saved %i source seeds for partfile: %s (%s)", n_sources
) )
1024 } catch (const CIOFailureException
& e
) {
1025 AddDebugLogLineC( logPartFile
, CFormat( wxT("Error saving partfile's seeds file (%s - %s): %s") )
1032 CPath::RemoveFile(seedsPath
);
1036 void CPartFile::LoadSourceSeeds()
1038 CMemFile sources_data
;
1040 bool valid_sources
= false;
1042 const CPath seedsPath
= m_fullname
.AppendExt(wxT(".seeds"));
1043 if (!seedsPath
.FileExists()) {
1047 CFile
file(seedsPath
, CFile::read
);
1048 if (!file
.IsOpened()) {
1049 AddLogLineN(CFormat( _("Partfile %s (%s) has no seeds file") )
1057 if (file
.GetLength() <= 1) {
1058 AddLogLineN(CFormat( _("Partfile %s (%s) has a void seeds file") )
1064 uint8 src_count
= file
.ReadUInt8();
1066 bool bUseSX2Format
= (src_count
== 0);
1068 if (bUseSX2Format
) {
1070 src_count
= file
.ReadUInt8();
1073 sources_data
.WriteUInt16(src_count
);
1075 for (int i
= 0; i
< src_count
; ++i
) {
1076 uint32 dwID
= file
.ReadUInt32();
1077 uint16 nPort
= file
.ReadUInt16();
1079 sources_data
.WriteUInt32(bUseSX2Format
? dwID
: wxUINT32_SWAP_ALWAYS(dwID
));
1080 sources_data
.WriteUInt16(nPort
);
1081 sources_data
.WriteUInt32(0);
1082 sources_data
.WriteUInt16(0);
1084 if (bUseSX2Format
) {
1085 sources_data
.WriteHash(file
.ReadHash());
1086 sources_data
.WriteUInt8(file
.ReadUInt8());
1093 // v2: Added to keep track of too old seeds
1094 time_t time
= (time_t)file
.ReadUInt32();
1096 // Time frame is 2 hours. More than enough to compile
1097 // your new aMule version!.
1098 if ((time
+ MIN2S(120)) >= wxDateTime::Now().GetTicks()) {
1099 valid_sources
= true;
1103 // v1 has no time data. We can safely use
1104 // the sources, next time will be saved.
1105 valid_sources
= true;
1108 if (valid_sources
) {
1109 sources_data
.Seek(0);
1110 AddClientSources(&sources_data
, SF_SOURCE_SEEDS
, bUseSX2Format
? 4 : 1, bUseSX2Format
);
1113 } catch (const CSafeIOException
& e
) {
1114 AddLogLineN(CFormat( _("Error reading partfile's seeds file (%s - %s): %s") )
1123 void CPartFile::PartFileHashFinished(CKnownFile
* result
)
1125 m_lastDateChanged
= result
->m_lastDateChanged
;
1126 bool errorfound
= false;
1127 if (GetED2KPartHashCount() == 0){
1128 if (IsComplete(0, GetFileSize()-1)){
1129 if (result
->GetFileHash() != GetFileHash()){
1130 // cppcheck-suppress zerodiv
1131 AddLogLineN(CFormat(wxPLURAL(
1132 "Found corrupted part (%d) in %d part file %s - FileResultHash |%s| FileHash |%s|",
1133 "Found corrupted part (%d) in %d parts file %s - FileResultHash |%s| FileHash |%s|",
1139 % result
->GetFileHash().Encode()
1140 % GetFileHash().Encode() );
1141 AddGap(0, GetFileSize()-1);
1147 for (size_t i
= 0; i
< m_hashlist
.size(); ++i
){
1148 // Kry - trel_ar's completed parts check on rehashing.
1149 // Very nice feature, if a file is completed but .part.met don't believe it,
1152 uint64 partStart
= i
* PARTSIZE
;
1153 uint64 partEnd
= partStart
+ GetPartSize(i
) - 1;
1154 if (!( i
< result
->GetHashCount() && (result
->GetPartHash(i
) == GetPartHash(i
)))){
1155 if (IsComplete(i
)) {
1157 if ( i
< result
->GetHashCount() )
1158 wronghash
= result
->GetPartHash(i
);
1160 AddLogLineN(CFormat(wxPLURAL(
1161 "Found corrupted part (%d) in %d part file %s - FileResultHash |%s| FileHash |%s|",
1162 "Found corrupted part (%d) in %d parts file %s - FileResultHash |%s| FileHash |%s|",
1163 GetED2KPartHashCount())
1166 % GetED2KPartHashCount()
1168 % wronghash
.Encode()
1169 % GetPartHash(i
).Encode() );
1175 if (!IsComplete(i
)){
1176 AddLogLineN(CFormat( _("Found completed part (%i) in %s") )
1181 RemoveBlockFromList(partStart
, partEnd
);
1188 result
->GetAICHHashset()->GetStatus() == AICH_HASHSETCOMPLETE
&&
1189 status
== PS_COMPLETING
) {
1190 delete m_pAICHHashSet
;
1191 m_pAICHHashSet
= result
->GetAICHHashset();
1192 result
->SetAICHHashset(NULL
);
1193 m_pAICHHashSet
->SetOwner(this);
1195 else if (status
== PS_COMPLETING
) {
1196 AddDebugLogLineN(logPartFile
,
1197 CFormat(wxT("Failed to store new AICH Hashset for completed file: %s"))
1204 if (status
== PS_COMPLETING
){
1209 AddLogLineN(CFormat( _("Finished rehashing %s") ) % GetFileName());
1213 SetStatus(PS_READY
);
1217 SetStatus(PS_READY
);
1219 theApp
->sharedfiles
->SafeAddKFile(this);
1222 void CPartFile::AddGap(uint64 start
, uint64 end
)
1224 m_gaplist
.AddGap(start
, end
);
1225 UpdateDisplayedInfo();
1228 void CPartFile::AddGap(uint16 part
)
1230 m_gaplist
.AddGap(part
);
1231 UpdateDisplayedInfo();
1234 bool CPartFile::IsAlreadyRequested(uint64 start
, uint64 end
)
1236 std::list
<Requested_Block_Struct
*>::iterator it
= m_requestedblocks_list
.begin();
1237 for (; it
!= m_requestedblocks_list
.end(); ++it
) {
1238 Requested_Block_Struct
* cur_block
= *it
;
1240 if ((start
<= cur_block
->EndOffset
) && (end
>= cur_block
->StartOffset
)) {
1247 bool CPartFile::GetNextEmptyBlockInPart(uint16 partNumber
, Requested_Block_Struct
*result
)
1249 // Find start of this part
1250 uint64 partStart
= (PARTSIZE
* partNumber
);
1251 uint64 start
= partStart
;
1253 // What is the end limit of this block, i.e. can't go outside part (or filesize)
1254 uint64 partEnd
= partStart
+ GetPartSize(partNumber
) - 1;
1255 // Loop until find a suitable gap and return true, or no more gaps and return false
1256 CGapList::const_iterator it
= m_gaplist
.begin();
1259 uint64 gapStart
, end
;
1261 // Find the first gap from the start position
1262 for (; it
!= m_gaplist
.end(); ++it
) {
1263 gapStart
= it
.start();
1266 // Want gaps that overlap start<->partEnd
1267 if (gapStart
<= partEnd
&& end
>= start
) {
1270 } else if (gapStart
> partEnd
) {
1275 // If no gaps after start, exit
1279 // Update start position if gap starts after current pos
1280 if (start
< gapStart
) {
1283 // Find end, keeping within the max block size and the part limit
1284 uint64 blockLimit
= partStart
+ (BLOCKSIZE
* (((start
- partStart
) / BLOCKSIZE
) + 1)) - 1;
1285 if (end
> blockLimit
) {
1288 if (end
> partEnd
) {
1291 // If this gap has not already been requested, we have found a valid entry
1292 if (!IsAlreadyRequested(start
, end
)) {
1293 // Was this block to be returned
1294 if (result
!= NULL
) {
1295 result
->StartOffset
= start
;
1296 result
->EndOffset
= end
;
1297 md4cpy(result
->FileID
, GetFileHash().GetHash());
1298 result
->transferred
= 0;
1302 // Reposition to end of that gap
1305 // If tried all gaps then break out of the loop
1306 if (end
== partEnd
) {
1310 // No suitable gap found
1315 void CPartFile::FillGap(uint64 start
, uint64 end
)
1317 m_gaplist
.FillGap(start
, end
);
1318 UpdateCompletedInfos();
1319 UpdateDisplayedInfo();
1322 void CPartFile::FillGap(uint16 part
)
1324 m_gaplist
.FillGap(part
);
1325 UpdateCompletedInfos();
1326 UpdateDisplayedInfo();
1330 void CPartFile::UpdateCompletedInfos()
1332 uint64 allgaps
= m_gaplist
.GetGapSize();
1334 percentcompleted
= (1.0 - (double)allgaps
/GetFileSize()) * 100.0;
1335 completedsize
= GetFileSize() - allgaps
;
1339 void CPartFile::WritePartStatus(CMemFile
* file
)
1341 uint16 parts
= GetED2KPartCount();
1342 file
->WriteUInt16(parts
);
1344 while (done
!= parts
){
1346 for (uint32 i
= 0;i
!= 8;++i
) {
1347 if (IsComplete(done
)) {
1351 if (done
== parts
) {
1355 file
->WriteUInt8(towrite
);
1359 void CPartFile::WriteCompleteSourcesCount(CMemFile
* file
)
1361 file
->WriteUInt16(m_nCompleteSourcesCount
);
1364 uint32
CPartFile::Process(uint32 reducedownload
/*in percent*/,uint8 m_icounter
)
1367 uint32 dwCurTick
= ::GetTickCount();
1369 // If buffer size exceeds limit, or if not written within time limit, flush data
1370 if ( (m_nTotalBufferData
> thePrefs::GetFileBufferSize()) ||
1371 (dwCurTick
> (m_nLastBufferFlushTime
+ BUFFER_TIME_LIMIT
))) {
1376 // check if we want new sources from server --> MOVED for 16.40 version
1377 old_trans
=transferingsrc
;
1381 if (m_icounter
< 10) {
1382 // Update only downloading sources.
1383 CClientRefList::iterator it
= m_downloadingSourcesList
.begin();
1384 for( ; it
!= m_downloadingSourcesList
.end(); ) {
1385 CUpDownClient
*cur_src
= it
++->GetClient();
1386 if(cur_src
->GetDownloadState() == DS_DOWNLOADING
) {
1388 kBpsDown
+= cur_src
->SetDownloadLimit(reducedownload
);
1392 // Update all sources (including downloading sources)
1393 for ( SourceSet::iterator it
= m_SrcList
.begin(); it
!= m_SrcList
.end(); ) {
1394 CUpDownClient
* cur_src
= it
++->GetClient();
1395 switch (cur_src
->GetDownloadState()) {
1396 case DS_DOWNLOADING
: {
1398 kBpsDown
+= cur_src
->SetDownloadLimit(reducedownload
);
1407 case DS_LOWTOLOWIP
: {
1408 if (cur_src
->HasLowID() && !theApp
->CanDoCallback(cur_src
->GetServerIP(), cur_src
->GetServerPort())) {
1409 // If we are almost maxed on sources,
1410 // slowly remove these client to see
1411 // if we can find a better source.
1412 if (((dwCurTick
- lastpurgetime
) > 30000) &&
1413 (GetSourceCount() >= (thePrefs::GetMaxSourcePerFile()*.8))) {
1414 RemoveSource(cur_src
);
1415 lastpurgetime
= dwCurTick
;
1419 cur_src
->SetDownloadState(DS_ONQUEUE
);
1424 case DS_NONEEDEDPARTS
: {
1425 // we try to purge noneeded source, even without reaching the limit
1426 if((dwCurTick
- lastpurgetime
) > 40000) {
1427 if(!cur_src
->SwapToAnotherFile(false , false, false , NULL
)) {
1428 //however we only delete them if reaching the limit
1429 if (GetSourceCount() >= (thePrefs::GetMaxSourcePerFile()*.8 )) {
1430 RemoveSource(cur_src
);
1431 lastpurgetime
= dwCurTick
;
1432 break; //Johnny-B - nothing more to do here (good eye!)
1435 lastpurgetime
= dwCurTick
;
1439 // doubled reasktime for no needed parts - save connections and traffic
1440 if ( !((!cur_src
->GetLastAskedTime()) ||
1441 (dwCurTick
- cur_src
->GetLastAskedTime()) > FILEREASKTIME
*2)) {
1444 // Recheck this client to see if still NNP..
1445 // Set to DS_NONE so that we force a TCP reask next time..
1446 cur_src
->SetDownloadState(DS_NONE
);
1451 if( cur_src
->IsRemoteQueueFull()) {
1452 if( ((dwCurTick
- lastpurgetime
) > 60000) &&
1453 (GetSourceCount() >= (thePrefs::GetMaxSourcePerFile()*.8 )) ) {
1454 RemoveSource( cur_src
);
1455 lastpurgetime
= dwCurTick
;
1456 break; //Johnny-B - nothing more to do here (good eye!)
1460 // Give up to 1 min for UDP to respond..
1461 // If we are within on min on TCP, do not try..
1462 if ( theApp
->IsConnected() &&
1463 ( (!cur_src
->GetLastAskedTime()) ||
1464 (dwCurTick
- cur_src
->GetLastAskedTime()) > FILEREASKTIME
-20000)) {
1465 cur_src
->UDPReaskForDownload();
1468 // No break here, since the next case takes care of asking for downloads.
1471 case DS_TOOMANYCONNS
:
1473 case DS_WAITCALLBACK
:
1474 case DS_WAITCALLBACKKAD
: {
1475 if ( theApp
->IsConnected() &&
1476 ( (!cur_src
->GetLastAskedTime()) ||
1477 (dwCurTick
- cur_src
->GetLastAskedTime()) > FILEREASKTIME
)) {
1478 if (!cur_src
->AskForDownload()) {
1479 // I left this break here just as a reminder
1480 // just in case re rearange things..
1489 /* eMule 0.30c implementation, i give it a try (Creteil) BEGIN ... */
1490 if (IsA4AFAuto() && ((!m_LastNoNeededCheck
) || (dwCurTick
- m_LastNoNeededCheck
> 900000))) {
1491 m_LastNoNeededCheck
= dwCurTick
;
1492 for ( SourceSet::iterator it
= m_A4AFsrclist
.begin(); it
!= m_A4AFsrclist
.end(); ) {
1493 CUpDownClient
*cur_source
= it
++->GetClient();
1494 uint8 download_state
=cur_source
->GetDownloadState();
1495 if( download_state
!= DS_DOWNLOADING
1496 && cur_source
->GetRequestFile()
1497 && ((!cur_source
->GetRequestFile()->IsA4AFAuto()) || download_state
== DS_NONEEDEDPARTS
))
1499 cur_source
->SwapToAnotherFile(false, false, false, this);
1503 /* eMule 0.30c implementation, i give it a try (Creteil) END ... */
1505 // swap No needed partfiles if possible
1507 if (((old_trans
==0) && (transferingsrc
>0)) || ((old_trans
>0) && (transferingsrc
==0))) {
1511 // Kad source search
1512 if( GetMaxSourcePerFileUDP() > GetSourceCount()){
1513 //Once we can handle lowID users in Kad, we remove the second IsConnected
1514 if (theApp
->downloadqueue
->DoKademliaFileRequest() && (Kademlia::CKademlia::GetTotalFile() < KADEMLIATOTALFILE
) && (dwCurTick
> m_LastSearchTimeKad
) && Kademlia::CKademlia::IsConnected() && theApp
->IsConnected() && !IsStopped()){
1516 theApp
->downloadqueue
->SetLastKademliaFileRequest();
1518 if (GetKadFileSearchID()) {
1519 /* This will never happen anyway. We're talking a
1520 1h timespan and searches are at max 45secs */
1521 Kademlia::CSearchManager::StopSearch(GetKadFileSearchID(), false);
1524 Kademlia::CUInt128
kadFileID(GetFileHash().GetHash());
1525 Kademlia::CSearch
* pSearch
= Kademlia::CSearchManager::PrepareLookup(Kademlia::CSearch::FILE, true, kadFileID
);
1526 AddDebugLogLineN(logKadSearch
, CFormat(wxT("Preparing a Kad Search for '%s'")) % GetFileName());
1528 AddDebugLogLineN(logKadSearch
, CFormat(wxT("Kad lookup started for '%s'")) % GetFileName());
1529 if(m_TotalSearchesKad
< 7) {
1530 m_TotalSearchesKad
++;
1532 m_LastSearchTimeKad
= dwCurTick
+ (KADEMLIAREASKTIME
*m_TotalSearchesKad
);
1533 SetKadFileSearchID(pSearch
->GetSearchID());
1537 if(GetKadFileSearchID()) {
1538 Kademlia::CSearchManager::StopSearch(GetKadFileSearchID(), true);
1542 // check if we want new sources from server
1543 if ( !m_localSrcReqQueued
&&
1544 ( (!m_lastsearchtime
) ||
1545 (dwCurTick
- m_lastsearchtime
) > SERVERREASKTIME
) &&
1546 theApp
->IsConnectedED2K() &&
1547 thePrefs::GetMaxSourcePerFileSoft() > GetSourceCount() &&
1549 m_localSrcReqQueued
= true;
1550 theApp
->downloadqueue
->SendLocalSrcRequest(this);
1553 // calculate datarate, set limit etc.
1558 // Kry - does the 3 / 30 difference produce too much flickering or CPU?
1559 if (m_count
>= 30) {
1561 UpdateAutoDownPriority();
1562 UpdateDisplayedInfo();
1563 if(m_bPercentUpdated
== false) {
1564 UpdateCompletedInfos();
1566 m_bPercentUpdated
= false;
1569 // release file handle if unused for some time
1570 m_hpartfile
.Release();
1572 return (uint32
)(kBpsDown
*1024.0);
1575 bool CPartFile::CanAddSource(uint32 userid
, uint16 port
, uint32 serverip
, uint16 serverport
, uint8
* pdebug_lowiddropped
, bool ed2kID
)
1578 //The incoming ID could have the userid in the Hybrid format..
1579 uint32 hybridID
= 0;
1581 if (IsLowID(userid
)) {
1584 hybridID
= wxUINT32_SWAP_ALWAYS(userid
);
1588 if (!IsLowID(userid
)) {
1589 userid
= wxUINT32_SWAP_ALWAYS(userid
);
1593 // MOD Note: Do not change this part - Merkur
1594 if (theApp
->IsConnectedED2K()) {
1595 if(::IsLowID(theApp
->GetED2KID())) {
1596 if(theApp
->GetED2KID() == userid
&& theApp
->serverconnect
->GetCurrentServer()->GetIP() == serverip
&& theApp
->serverconnect
->GetCurrentServer()->GetPort() == serverport
) {
1599 if(theApp
->GetPublicIP() == userid
) {
1603 if(theApp
->GetED2KID() == userid
&& thePrefs::GetPort() == port
) {
1609 if (Kademlia::CKademlia::IsConnected()) {
1610 if(!Kademlia::CKademlia::IsFirewalled()) {
1611 if(Kademlia::CKademlia::GetIPAddress() == hybridID
&& thePrefs::GetPort() == port
) {
1617 //This allows *.*.*.0 clients to not be removed if Ed2kID == false
1618 if ( IsLowID(hybridID
) && theApp
->IsFirewalled()) {
1619 if (pdebug_lowiddropped
) {
1620 (*pdebug_lowiddropped
)++;
1628 void CPartFile::AddSources(CMemFile
& sources
,uint32 serverip
, uint16 serverport
, unsigned origin
, bool bWithObfuscationAndHash
)
1630 uint8 count
= sources
.ReadUInt8();
1631 uint8 debug_lowiddropped
= 0;
1632 uint8 debug_possiblesources
= 0;
1633 CMD4Hash achUserHash
;
1636 // since we may received multiple search source UDP results we have to "consume" all data of that packet
1637 AddDebugLogLineN(logPartFile
, wxT("Trying to add sources for a stopped file"));
1638 sources
.Seek(count
*(4+2), wxFromCurrent
);
1642 for (int i
= 0;i
!= count
;++i
) {
1643 uint32 userid
= sources
.ReadUInt32();
1644 uint16 port
= sources
.ReadUInt16();
1646 uint8 byCryptOptions
= 0;
1647 if (bWithObfuscationAndHash
){
1648 byCryptOptions
= sources
.ReadUInt8();
1649 if ((byCryptOptions
& 0x80) > 0) {
1650 achUserHash
= sources
.ReadHash();
1653 if ((thePrefs::IsClientCryptLayerRequested() && (byCryptOptions
& 0x01/*supported*/) > 0 && (byCryptOptions
& 0x80) == 0)
1654 || (thePrefs::IsClientCryptLayerSupported() && (byCryptOptions
& 0x02/*requested*/) > 0 && (byCryptOptions
& 0x80) == 0)) {
1655 AddDebugLogLineN(logPartFile
, CFormat(wxT("Server didn't provide UserHash for source %u, even if it was expected to (or local obfuscationsettings changed during serverconnect")) % userid
);
1656 } else if (!thePrefs::IsClientCryptLayerRequested() && (byCryptOptions
& 0x02/*requested*/) == 0 && (byCryptOptions
& 0x80) != 0) {
1657 AddDebugLogLineN(logPartFile
, CFormat(wxT("Server provided UserHash for source %u, even if it wasn't expected to (or local obfuscationsettings changed during serverconnect")) % userid
);
1662 // "Filter LAN IPs" and "IPfilter" the received sources IP addresses
1663 if (!IsLowID(userid
)) {
1664 // check for 0-IP, localhost and optionally for LAN addresses
1665 if ( !IsGoodIP(userid
, thePrefs::FilterLanIPs()) ) {
1668 if (theApp
->ipfilter
->IsFiltered(userid
)) {
1673 if (!CanAddSource(userid
, port
, serverip
, serverport
, &debug_lowiddropped
)) {
1677 if(thePrefs::GetMaxSourcePerFile() > GetSourceCount()) {
1678 ++debug_possiblesources
;
1679 CUpDownClient
* newsource
= new CUpDownClient(port
,userid
,serverip
,serverport
,this, true, true);
1681 newsource
->SetSourceFrom((ESourceFrom
)origin
);
1682 newsource
->SetConnectOptions(byCryptOptions
, true, false);
1684 if ((byCryptOptions
& 0x80) != 0) {
1685 newsource
->SetUserHash(achUserHash
);
1688 theApp
->downloadqueue
->CheckAndAddSource(this,newsource
);
1690 AddDebugLogLineN(logPartFile
, wxT("Consuming a packet because of max sources reached"));
1691 // Since we may receive multiple search source UDP results we have to "consume" all data of that packet
1692 // This '+1' is added because 'i' counts from 0.
1693 sources
.Seek((count
-(i
+1))*(4+2), wxFromCurrent
);
1694 if (GetKadFileSearchID()) {
1695 Kademlia::CSearchManager::StopSearch(GetKadFileSearchID(), false);
1702 void CPartFile::UpdatePartsInfo()
1704 if( !IsPartFile() ) {
1705 CKnownFile::UpdatePartsInfo();
1710 uint16 partcount
= GetPartCount();
1711 bool flag
= (time(NULL
) - m_nCompleteSourcesTime
> 0);
1713 // Ensure the frequency-list is ready
1714 if ( m_SrcpartFrequency
.size() != GetPartCount() ) {
1715 m_SrcpartFrequency
.clear();
1716 m_SrcpartFrequency
.insert(m_SrcpartFrequency
.begin(), GetPartCount(), 0);
1719 // Find number of available parts
1720 uint16 availablecounter
= 0;
1721 for ( uint16 i
= 0; i
< partcount
; ++i
) {
1722 if ( m_SrcpartFrequency
[i
] )
1726 if ( ( availablecounter
== partcount
) && ( m_availablePartsCount
< partcount
) ) {
1727 lastseencomplete
= time(NULL
);
1730 m_availablePartsCount
= availablecounter
;
1733 ArrayOfUInts16 count
;
1735 count
.reserve(GetSourceCount());
1737 for ( SourceSet::iterator it
= m_SrcList
.begin(); it
!= m_SrcList
.end(); ++it
) {
1738 CUpDownClient
* client
= it
->GetClient();
1739 if ( !client
->GetUpPartStatus().empty() && client
->GetUpPartCount() == partcount
) {
1740 count
.push_back(client
->GetUpCompleteSourcesCount());
1744 m_nCompleteSourcesCount
= m_nCompleteSourcesCountLo
= m_nCompleteSourcesCountHi
= 0;
1746 for (uint16 i
= 0; i
< partcount
; ++i
) {
1748 m_nCompleteSourcesCount
= m_SrcpartFrequency
[i
];
1750 else if( m_nCompleteSourcesCount
> m_SrcpartFrequency
[i
]) {
1751 m_nCompleteSourcesCount
= m_SrcpartFrequency
[i
];
1754 count
.push_back(m_nCompleteSourcesCount
);
1756 int32 n
= count
.size();
1758 std::sort(count
.begin(), count
.end(), std::less
<uint16
>());
1761 int32 i
= n
>> 1; // (n / 2)
1762 int32 j
= (n
* 3) >> 2; // (n * 3) / 4
1763 int32 k
= (n
* 7) >> 3; // (n * 7) / 8
1765 //When still a part file, adjust your guesses by 20% to what you see..
1769 //Not many sources, so just use what you see..
1770 // welcome to 'plain stupid code'
1771 // m_nCompleteSourcesCount;
1772 m_nCompleteSourcesCountLo
= m_nCompleteSourcesCount
;
1773 m_nCompleteSourcesCountHi
= m_nCompleteSourcesCount
;
1774 } else if (n
< 20) {
1775 // For low guess and normal guess count
1776 // If we see more sources then the guessed low and normal, use what we see.
1777 // If we see less sources then the guessed low, adjust network accounts for 80%,
1778 // we account for 20% with what we see and make sure we are still above the normal.
1780 // Adjust 80% network and 20% what we see.
1781 if ( count
[i
] < m_nCompleteSourcesCount
) {
1782 m_nCompleteSourcesCountLo
= m_nCompleteSourcesCount
;
1784 m_nCompleteSourcesCountLo
=
1785 (uint16
)((float)(count
[i
]*.8) +
1786 (float)(m_nCompleteSourcesCount
*.2));
1788 m_nCompleteSourcesCount
= m_nCompleteSourcesCountLo
;
1789 m_nCompleteSourcesCountHi
=
1790 (uint16
)((float)(count
[j
]*.8) +
1791 (float)(m_nCompleteSourcesCount
*.2));
1792 if( m_nCompleteSourcesCountHi
< m_nCompleteSourcesCount
) {
1793 m_nCompleteSourcesCountHi
= m_nCompleteSourcesCount
;
1801 // Adjust network accounts for 80%, we account for 20% with what
1802 // we see and make sure we are still above the low.
1804 // Adjust network accounts for 80%, we account for 20% with what
1805 // we see and make sure we are still above the normal.
1807 m_nCompleteSourcesCountLo
= m_nCompleteSourcesCount
;
1808 m_nCompleteSourcesCount
= (uint16
)((float)(count
[j
]*.8)+(float)(m_nCompleteSourcesCount
*.2));
1809 if( m_nCompleteSourcesCount
< m_nCompleteSourcesCountLo
) {
1810 m_nCompleteSourcesCount
= m_nCompleteSourcesCountLo
;
1812 m_nCompleteSourcesCountHi
= (uint16
)((float)(count
[k
]*.8)+(float)(m_nCompleteSourcesCount
*.2));
1813 if( m_nCompleteSourcesCountHi
< m_nCompleteSourcesCount
) {
1814 m_nCompleteSourcesCountHi
= m_nCompleteSourcesCount
;
1818 m_nCompleteSourcesTime
= time(NULL
) + (60);
1820 UpdateDisplayedInfo();
1823 // [Maella -Enhanced Chunk Selection- (based on jicxicmic)]
1824 bool CPartFile::GetNextRequestedBlock(CUpDownClient
* sender
,
1825 std::vector
<Requested_Block_Struct
*>& toadd
, uint16
& count
)
1828 // The purpose of this function is to return a list of blocks (~180KB) to
1829 // download. To avoid a prematurely stop of the downloading, all blocks that
1830 // are requested from the same source must be located within the same
1831 // chunk (=> part ~9MB).
1833 // The selection of the chunk to download is one of the CRITICAL parts of the
1834 // edonkey network. The selection algorithm must insure the best spreading
1837 // The selection is based on 4 criteria:
1838 // 1. Frequency of the chunk (availability), very rare chunks must be downloaded
1839 // as quickly as possible to become a new available source.
1840 // 2. Parts used for preview (first + last chunk), preview or check a
1841 // file (e.g. movie, mp3)
1842 // 3. Request state (downloading in process), try to ask each source for another
1843 // chunk. Spread the requests between all sources.
1844 // 4. Completion (shortest-to-complete), partially retrieved chunks should be
1845 // completed before starting to download other one.
1847 // The frequency criterion defines three zones: very rare (<10%), rare (<50%)
1848 // and common (>30%). Inside each zone, the criteria have a specific weight, used
1849 // to calculate the priority of chunks. The chunk(s) with the highest
1850 // priority (highest=0, lowest=0xffff) is/are selected first.
1852 // very rare (preview) rare common
1853 // 0% <---- +0 pt ----> 10% <----- +10000 pt -----> 50% <---- +20000 pt ----> 100%
1854 // 1. <------- frequency: +25*frequency pt ----------->
1855 // 2. <- preview: +1 pt --><-------------- preview: set to 10000 pt ------------->
1856 // 3. <------ request: download in progress +20000 pt ------>
1857 // 4a. <- completion: 0% +100, 25% +75 .. 100% +0 pt --><-- !req => completion --->
1858 // 4b. <--- req => !completion -->
1860 // Unrolled, the priority scale is:
1862 // 0..xxxx unrequested and requested very rare chunks
1863 // 10000..1xxxx unrequested rare chunks + unrequested preview chunks
1864 // 20000..2xxxx unrequested common chunks (priority to the most complete)
1865 // 30000..3xxxx requested rare chunks + requested preview chunks
1866 // 40000..4xxxx requested common chunks (priority to the least complete)
1868 // This algorithm usually selects first the rarest chunk(s). However, partially
1869 // complete chunk(s) that is/are close to completion may overtake the priority
1870 // (priority inversion).
1871 // For the common chuncks, the algorithm tries to spread the dowload between
1875 // Check input parameters
1876 if ( sender
->GetPartStatus().empty() ) {
1879 // Define and create the list of the chunks to download
1880 const uint16 partCount
= GetPartCount();
1881 ChunkList chunksList
;
1884 uint16 newBlockCount
= 0;
1885 while(newBlockCount
!= count
) {
1886 // Create a request block stucture if a chunk has been previously selected
1887 if(sender
->GetLastPartAsked() != 0xffff) {
1888 Requested_Block_Struct
* pBlock
= new Requested_Block_Struct
;
1889 if(GetNextEmptyBlockInPart(sender
->GetLastPartAsked(), pBlock
) == true) {
1890 // Keep a track of all pending requested blocks
1891 m_requestedblocks_list
.push_back(pBlock
);
1892 // Update list of blocks to return
1893 toadd
.push_back(pBlock
);
1895 // Skip end of loop (=> CPU load)
1898 // All blocks for this chunk have been already requested
1900 // => Try to select another chunk
1901 sender
->SetLastPartAsked(0xffff);
1905 // Check if a new chunk must be selected (e.g. download starting, previous chunk complete)
1906 if(sender
->GetLastPartAsked() == 0xffff) {
1907 // Quantify all chunks (create list of chunks to download)
1908 // This is done only one time and only if it is necessary (=> CPU load)
1909 if(chunksList
.empty()) {
1910 // Indentify the locally missing part(s) that this source has
1911 for(uint16 i
=0; i
< partCount
; ++i
) {
1912 if(sender
->IsPartAvailable(i
) == true && GetNextEmptyBlockInPart(i
, NULL
) == true) {
1913 // Create a new entry for this chunk and add it to the list
1916 newEntry
.frequency
= m_SrcpartFrequency
[i
];
1917 chunksList
.push_back(newEntry
);
1921 // Check if any bloks(s) could be downloaded
1922 if(chunksList
.empty()) {
1923 break; // Exit main loop while()
1926 // Define the bounds of the three zones (very rare, rare)
1927 // more depending on available sources
1929 if (GetSourceCount()>800) {
1931 } else if (GetSourceCount()>200) {
1934 uint16 limit
= modif
*GetSourceCount()/ 100;
1938 const uint16 veryRareBound
= limit
;
1939 const uint16 rareBound
= 2*limit
;
1941 // Cache Preview state (Criterion 2)
1942 FileType type
= GetFiletype(GetFileName());
1943 const bool isPreviewEnable
=
1944 thePrefs::GetPreviewPrio() &&
1945 (type
== ftArchive
|| type
== ftVideo
);
1947 // Collect and calculate criteria for all chunks
1948 for (ChunkList::iterator it
= chunksList
.begin(); it
!= chunksList
.end(); ++it
) {
1949 Chunk
& cur_chunk
= *it
;
1952 const uint64 uStart
= cur_chunk
.part
* PARTSIZE
;
1953 const uint64 uEnd
= uStart
+ GetPartSize(cur_chunk
.part
) - 1;
1954 // Criterion 2. Parts used for preview
1955 // Remark: - We need to download the first part and the last part(s).
1956 // - When the last part is very small, it's necessary to
1957 // download the two last parts.
1958 bool critPreview
= false;
1959 if(isPreviewEnable
== true) {
1960 if(cur_chunk
.part
== 0) {
1961 critPreview
= true; // First chunk
1962 } else if(cur_chunk
.part
== partCount
-1) {
1963 critPreview
= true; // Last chunk
1964 } else if(cur_chunk
.part
== partCount
-2) {
1965 // Last chunk - 1 (only if last chunk is too small)
1966 const uint32 sizeOfLastChunk
= GetFileSize() - uEnd
;
1967 if(sizeOfLastChunk
< PARTSIZE
/3) {
1968 critPreview
= true; // Last chunk - 1
1973 // Criterion 3. Request state (downloading in process from other source(s))
1975 const bool critRequested
=
1976 cur_chunk
.frequency
> veryRareBound
&&
1977 IsAlreadyRequested(uStart
, uEnd
);
1979 // Criterion 4. Completion
1980 // PARTSIZE instead of GetPartSize() favours the last chunk - but that may be intentional
1981 uint32 partSize
= PARTSIZE
- m_gaplist
.GetGapSize(cur_chunk
.part
);
1982 const uint16 critCompletion
= (uint16
)(partSize
/(PARTSIZE
/100)); // in [%]
1984 // Calculate priority with all criteria
1985 if(cur_chunk
.frequency
<= veryRareBound
) {
1986 // 0..xxxx unrequested + requested very rare chunks
1987 cur_chunk
.rank
= (25 * cur_chunk
.frequency
) + // Criterion 1
1988 ((critPreview
== true) ? 0 : 1) + // Criterion 2
1989 (100 - critCompletion
); // Criterion 4
1990 } else if(critPreview
== true) {
1991 // 10000..10100 unrequested preview chunks
1992 // 30000..30100 requested preview chunks
1993 cur_chunk
.rank
= ((critRequested
== false) ? 10000 : 30000) + // Criterion 3
1994 (100 - critCompletion
); // Criterion 4
1995 } else if(cur_chunk
.frequency
<= rareBound
) {
1996 // 10101..1xxxx unrequested rare chunks
1997 // 30101..3xxxx requested rare chunks
1998 cur_chunk
.rank
= (25 * cur_chunk
.frequency
) + // Criterion 1
1999 ((critRequested
== false) ? 10101 : 30101) + // Criterion 3
2000 (100 - critCompletion
); // Criterion 4
2003 if(critRequested
== false) { // Criterion 3
2004 // 20000..2xxxx unrequested common chunks
2005 cur_chunk
.rank
= 20000 + // Criterion 3
2006 (100 - critCompletion
); // Criterion 4
2008 // 40000..4xxxx requested common chunks
2009 // Remark: The weight of the completion criterion is inversed
2010 // to spead the requests over the completing chunks.
2011 // Without this, the chunk closest to completion will
2012 // received every new sources.
2013 cur_chunk
.rank
= 40000 + // Criterion 3
2014 (critCompletion
); // Criterion 4
2020 // Select the next chunk to download
2021 if(!chunksList
.empty()) {
2022 // Find and count the chunck(s) with the highest priority
2023 uint16 chunkCount
= 0; // Number of found chunks with same priority
2024 uint16 rank
= 0xffff; // Highest priority found
2026 // Collect and calculate criteria for all chunks
2027 for (ChunkList::iterator it
= chunksList
.begin(); it
!= chunksList
.end(); ++it
) {
2028 const Chunk
& cur_chunk
= *it
;
2029 if(cur_chunk
.rank
< rank
) {
2031 rank
= cur_chunk
.rank
;
2032 } else if(cur_chunk
.rank
== rank
) {
2037 // Use a random access to avoid that everybody tries to download the
2038 // same chunks at the same time (=> spread the selected chunk among clients)
2039 uint16 randomness
= 1 + (int) (((float)(chunkCount
-1))*rand()/(RAND_MAX
+1.0));
2041 for (ChunkList::iterator it
= chunksList
.begin(); it
!= chunksList
.end(); ++it
) {
2042 const Chunk
& cur_chunk
= *it
;
2043 if(cur_chunk
.rank
== rank
) {
2045 if(randomness
== 0) {
2046 // Selection process is over
2047 sender
->SetLastPartAsked(cur_chunk
.part
);
2048 // Remark: this list might be reused up to *count times
2049 chunksList
.erase(it
);
2050 break; // exit loop for()
2055 // There is no remaining chunk to download
2056 break; // Exit main loop while()
2060 // Return the number of the blocks
2061 count
= newBlockCount
;
2063 return (newBlockCount
> 0);
2068 void CPartFile::RemoveBlockFromList(uint64 start
,uint64 end
)
2070 std::list
<Requested_Block_Struct
*>::iterator it
= m_requestedblocks_list
.begin();
2071 while (it
!= m_requestedblocks_list
.end()) {
2072 std::list
<Requested_Block_Struct
*>::iterator it2
= it
++;
2074 if ((*it2
)->StartOffset
<= start
&& (*it2
)->EndOffset
>= end
) {
2075 m_requestedblocks_list
.erase(it2
);
2081 void CPartFile::RemoveAllRequestedBlocks(void)
2083 m_requestedblocks_list
.clear();
2087 void CPartFile::CompleteFile(bool bIsHashingDone
)
2089 if (GetKadFileSearchID()) {
2090 Kademlia::CSearchManager::StopSearch(GetKadFileSearchID(), false);
2093 theApp
->downloadqueue
->RemoveLocalServerRequest(this);
2095 AddDebugLogLineN( logPartFile
, wxString( wxT("CPartFile::CompleteFile: Hash ") ) + ( bIsHashingDone
? wxT("done") : wxT("not done") ) );
2097 if (!bIsHashingDone
) {
2098 SetStatus(PS_COMPLETING
);
2101 CPath partFile
= m_partmetfilename
.RemoveExt();
2102 CThreadScheduler::AddTask(new CHashingTask(GetFilePath(), partFile
, this));
2106 m_is_A4AF_auto
=false;
2107 SetStatus(PS_COMPLETING
);
2108 // guess I was wrong about not need to spaw a thread ...
2109 // It is if the temp and incoming dirs are on different
2110 // partitions/drives and the file is large...[oz]
2113 PerformFileComplete();
2117 if (thePrefs::ShowCatTabInfos()) {
2118 Notify_ShowUpdateCatTabTitles();
2120 UpdateDisplayedInfo(true);
2124 void CPartFile::CompleteFileEnded(bool errorOccured
, const CPath
& newname
)
2128 SetStatus(PS_ERROR
);
2129 AddLogLineC(CFormat( _("Unexpected error while completing %s. File paused") )% GetFileName() );
2131 m_fullname
= newname
;
2133 SetFilePath(m_fullname
.GetPath());
2134 SetFileName(m_fullname
.GetFullName());
2135 m_lastDateChanged
= CPath::GetModificationTime(m_fullname
);
2137 SetStatus(PS_COMPLETE
);
2142 // Remove from list of canceled files in case it was canceled once upon a time
2143 if (theApp
->canceledfiles
->Remove(GetFileHash())) {
2144 theApp
->canceledfiles
->Save();
2147 // Mark as known (checks if it's already known),
2148 // also updates search files
2149 theApp
->knownfiles
->SafeAddKFile(this);
2151 // remove the file from the suspended uploads list
2152 theApp
->uploadqueue
->ResumeUpload(GetFileHash());
2153 theApp
->downloadqueue
->RemoveFile(this, true);
2154 theApp
->sharedfiles
->SafeAddKFile(this);
2155 UpdateDisplayedInfo(true);
2157 // republish that file to the ed2k-server to update the 'FT_COMPLETE_SOURCES' counter on the server.
2158 theApp
->sharedfiles
->RepublishFile(this);
2160 // Ensure that completed shows the correct value
2161 completedsize
= GetFileSize();
2163 // clear the blackbox to free up memory
2164 m_CorruptionBlackBox
->Free();
2166 AddLogLineC(CFormat( _("Finished downloading: %s") ) % GetFileName() );
2169 theApp
->downloadqueue
->StartNextFile(this);
2173 void CPartFile::PerformFileComplete()
2175 // add this file to the suspended uploads list
2176 theApp
->uploadqueue
->SuspendUpload(GetFileHash(), false);
2179 // close permanent handle
2180 if (m_hpartfile
.IsOpened()) {
2181 m_hpartfile
.Close();
2184 // Schedule task for completion of the file
2185 CThreadScheduler::AddTask(new CCompletionTask(this));
2189 void CPartFile::RemoveAllSources(bool bTryToSwap
)
2191 for( SourceSet::iterator it
= m_SrcList
.begin(); it
!= m_SrcList
.end();) {
2192 CUpDownClient
* cur_src
= it
++->GetClient();
2194 if (!cur_src
->SwapToAnotherFile(true, true, true, NULL
)) {
2195 RemoveSource(cur_src
,true,false);
2196 // If it was not swapped, it's not on any file anymore, and should die
2199 RemoveSource(cur_src
,true,false);
2205 /* eMule 0.30c implementation, i give it a try (Creteil) BEGIN ... */
2206 // remove all links A4AF in sources to this file
2207 if(!m_A4AFsrclist
.empty()) {
2208 for( SourceSet::iterator it
= m_A4AFsrclist
.begin(); it
!= m_A4AFsrclist
.end(); ) {
2209 CUpDownClient
* cur_src
= it
++->GetClient();
2210 if ( cur_src
->DeleteFileRequest( this ) ) {
2211 Notify_SourceCtrlRemoveSource(cur_src
->ECID(), this);
2214 m_A4AFsrclist
.clear();
2216 /* eMule 0.30c implementation, i give it a try (Creteil) END ... */
2217 UpdateFileRatingCommentAvail();
2221 void CPartFile::Delete()
2223 AddLogLineN(CFormat(_("Deleting file: %s")) % GetFileName());
2224 // Barry - Need to tell any connected clients to stop sending the file
2226 AddDebugLogLineN(logPartFile
, wxT("\tStopped"));
2231 theApp
->uploadqueue
->SuspendUpload(GetFileHash(), true);
2232 AddDebugLogLineN(logPartFile
, CFormat(wxT("\tSuspended upload to %d clients")) % removed
);
2233 theApp
->sharedfiles
->RemoveFile(this);
2234 AddDebugLogLineN(logPartFile
, wxT("\tRemoved from shared"));
2235 theApp
->downloadqueue
->RemoveFile(this);
2236 AddDebugLogLineN(logPartFile
, wxT("\tRemoved from download queue"));
2237 Notify_DownloadCtrlRemoveFile(this);
2238 AddDebugLogLineN(logPartFile
, wxT("\tRemoved from transferwnd"));
2239 if (theApp
->canceledfiles
->Add(GetFileHash())) {
2240 theApp
->canceledfiles
->Save();
2242 AddDebugLogLineN(logPartFile
, wxT("\tAdded to canceled file list"));
2243 theApp
->searchlist
->UpdateSearchFileByHash(GetFileHash()); // Update file in the search dialog if it's still open
2245 if (m_hpartfile
.IsOpened()) {
2246 m_hpartfile
.Close();
2249 AddDebugLogLineN(logPartFile
, wxT("\tClosed"));
2251 if (!CPath::RemoveFile(m_fullname
)) {
2252 AddDebugLogLineC(logPartFile
, CFormat(wxT("\tFailed to delete '%s'")) % m_fullname
);
2254 AddDebugLogLineN(logPartFile
, wxT("\tRemoved .part.met"));
2257 if (!CPath::RemoveFile(m_PartPath
)) {
2258 AddDebugLogLineC(logPartFile
, CFormat(wxT("Failed to delete '%s'")) % m_PartPath
);
2260 AddDebugLogLineN(logPartFile
, wxT("\tRemoved .part"));
2263 CPath BAKName
= m_fullname
.AppendExt(PARTMET_BAK_EXT
);
2264 if (!CPath::RemoveFile(BAKName
)) {
2265 AddDebugLogLineC(logPartFile
, CFormat(wxT("Failed to delete '%s'")) % BAKName
);
2267 AddDebugLogLineN(logPartFile
, wxT("\tRemoved .bak"));
2270 CPath SEEDSName
= m_fullname
.AppendExt(wxT(".seeds"));
2271 if (SEEDSName
.FileExists()) {
2272 if (CPath::RemoveFile(SEEDSName
)) {
2273 AddDebugLogLineN(logPartFile
, wxT("\tRemoved .seeds"));
2275 AddDebugLogLineC(logPartFile
, CFormat(wxT("Failed to delete '%s'")) % SEEDSName
);
2279 AddDebugLogLineN(logPartFile
, wxT("Done"));
2285 bool CPartFile::HashSinglePart(uint16 partnumber
)
2287 if ((GetHashCount() <= partnumber
) && (GetPartCount() > 1)) {
2288 AddLogLineC(CFormat( _("WARNING: Unable to hash downloaded part - hashset incomplete for '%s'") )
2290 m_hashsetneeded
= true;
2292 } else if ((GetHashCount() <= partnumber
) && GetPartCount() != 1) {
2293 AddLogLineC(CFormat( _("ERROR: Unable to hash downloaded part - hashset incomplete (%s). This should never happen")) % GetFileName() );
2294 m_hashsetneeded
= true;
2297 CMD4Hash hashresult
;
2298 uint64 offset
= PARTSIZE
* partnumber
;
2299 uint32 length
= GetPartSize(partnumber
);
2301 CreateHashFromFile(m_hpartfile
, offset
, length
, &hashresult
, NULL
);
2302 } catch (const CIOFailureException
& e
) {
2303 AddLogLineC(CFormat( _("EOF while hashing downloaded part %u with length %u (max %u) of partfile '%s' with length %u: %s"))
2304 % partnumber
% length
% (offset
+length
) % GetFileName() % GetFileSize() % e
.what());
2305 SetStatus(PS_ERROR
);
2307 } catch (const CEOFException
& e
) {
2308 AddLogLineC(CFormat( _("EOF while hashing downloaded part %u with length %u (max %u) of partfile '%s' with length %u: %s"))
2309 % partnumber
% length
% (offset
+length
) % GetFileName() % GetFileSize() % e
.what());
2310 SetStatus(PS_ERROR
);
2314 if (GetPartCount() > 1) {
2315 if (hashresult
!= GetPartHash(partnumber
)) {
2316 AddDebugLogLineN(logPartFile
, CFormat( wxT("%s: Expected hash of part %d: %s")) % GetFileName() % partnumber
% GetPartHash(partnumber
).Encode() );
2317 AddDebugLogLineN(logPartFile
, CFormat( wxT("%s: Actual hash of part %d: %s")) % GetFileName() % partnumber
% hashresult
.Encode() );
2323 if (hashresult
!= m_abyFileHash
) {
2333 bool CPartFile::IsCorruptedPart(uint16 partnumber
)
2335 return std::find(m_corrupted_list
.begin(), m_corrupted_list
.end(), partnumber
)
2336 != m_corrupted_list
.end();
2340 void CPartFile::SetDownPriority(uint8 np
, bool bSave
, bool bRefresh
)
2342 if ( m_iDownPriority
!= np
) {
2343 m_iDownPriority
= np
;
2345 UpdateDisplayedInfo(true);
2352 void CPartFile::StopFile(bool bCancel
)
2354 // Kry - Need to set it here to get into SetStatus(status) correctly
2357 // Barry - Need to tell any connected clients to stop sending the file
2360 m_LastSearchTimeKad
= 0;
2361 m_TotalSearchesKad
= 0;
2363 RemoveAllSources(true);
2371 UpdateDisplayedInfo(true);
2375 void CPartFile::StopPausedFile()
2378 // Once an hour, remove any sources for files which are no longer active downloads
2379 switch (GetStatus()) {
2381 case PS_INSUFFICIENT
:
2383 if (time(NULL
) - m_iLastPausePurge
> (60*60)) {
2384 m_iLastPausePurge
= time(NULL
);
2390 // release file handle if unused for some time
2391 m_hpartfile
.Release();
2395 void CPartFile::PauseFile(bool bInsufficient
)
2399 if ( status
== PS_COMPLETE
|| status
== PS_COMPLETING
) {
2403 if (GetKadFileSearchID()) {
2404 Kademlia::CSearchManager::StopSearch(GetKadFileSearchID(), true);
2405 // If we were in the middle of searching, reset timer so they can resume searching.
2406 m_LastSearchTimeKad
= 0;
2409 m_iLastPausePurge
= time(NULL
);
2411 theApp
->downloadqueue
->RemoveLocalServerRequest(this);
2413 CPacket
packet( OP_CANCELTRANSFER
, 0, OP_EDONKEYPROT
);
2414 for( SourceSet::iterator it
= m_SrcList
.begin(); it
!= m_SrcList
.end(); ) {
2415 CUpDownClient
* cur_src
= it
++->GetClient();
2416 if (cur_src
->GetDownloadState() == DS_DOWNLOADING
) {
2417 if (!cur_src
->GetSentCancelTransfer()) {
2418 theStats::AddUpOverheadOther( packet
.GetPacketSize() );
2419 AddDebugLogLineN( logLocalClient
, wxT("Local Client: OP_CANCELTRANSFER to ") + cur_src
->GetFullIP() );
2420 cur_src
->SendPacket( &packet
, false, true );
2421 cur_src
->SetSentCancelTransfer( true );
2423 cur_src
->SetDownloadState(DS_ONQUEUE
);
2424 // Allow immediate reconnect on resume
2425 cur_src
->ResetLastAskedTime();
2430 m_insufficient
= bInsufficient
;
2441 void CPartFile::ResumeFile()
2443 if ( status
== PS_COMPLETE
|| status
== PS_COMPLETING
) {
2447 if ( m_insufficient
&& !CheckFreeDiskSpace() ) {
2448 // Still not enough free discspace
2454 m_insufficient
= false;
2456 m_lastsearchtime
= 0;
2458 SetActive(theApp
->IsConnected());
2460 if (m_gaplist
.IsComplete() && (GetStatus() == PS_ERROR
)) {
2461 // The file has already been hashed at this point
2465 UpdateDisplayedInfo(true);
2469 bool CPartFile::CheckFreeDiskSpace( uint64 neededSpace
)
2471 uint64 free
= CPath::GetFreeSpaceAt(GetFilePath());
2472 if (free
== static_cast<uint64
>(wxInvalidOffset
)) {
2473 // If GetFreeSpaceAt() fails, then the path probably does not exist.
2477 // The very least acceptable diskspace is a single PART
2478 if ( free
< PARTSIZE
) {
2479 // Always fail in this case, since we risk losing data if we try to
2480 // write on a full partition.
2484 // All other checks are only made if the user has enabled them
2485 if ( thePrefs::IsCheckDiskspaceEnabled() ) {
2486 neededSpace
+= thePrefs::GetMinFreeDiskSpace();
2488 // Due to the the existance of sparse files, we cannot assume that
2489 // writes within the file doesn't cause new blocks to be allocated.
2490 // Therefore, we have to simply stop writing the moment the limit has
2492 return free
>= neededSpace
;
2499 void CPartFile::SetLastAnsweredTime()
2501 m_ClientSrcAnswered
= ::GetTickCount();
2504 void CPartFile::SetLastAnsweredTimeTimeout()
2506 m_ClientSrcAnswered
= 2 * CONNECTION_LATENCY
+ ::GetTickCount() - SOURCECLIENTREASKS
;
2509 CPacket
*CPartFile::CreateSrcInfoPacket(const CUpDownClient
* forClient
, uint8 byRequestedVersion
, uint16 nRequestedOptions
)
2512 if ( m_SrcList
.empty() ) {
2517 return CKnownFile::CreateSrcInfoPacket(forClient
, byRequestedVersion
, nRequestedOptions
);
2520 if (((forClient
->GetRequestFile() != this)
2521 && (forClient
->GetUploadFile() != this)) || forClient
->GetUploadFileID() != GetFileHash()) {
2522 wxString file1
= _("Unknown");
2523 if (forClient
->GetRequestFile() && forClient
->GetRequestFile()->GetFileName().IsOk()) {
2524 file1
= forClient
->GetRequestFile()->GetFileName().GetPrintable();
2525 } else if (forClient
->GetUploadFile() && forClient
->GetUploadFile()->GetFileName().IsOk()) {
2526 file1
= forClient
->GetUploadFile()->GetFileName().GetPrintable();
2528 wxString file2
= _("Unknown");
2529 if (GetFileName().IsOk()) {
2530 file2
= GetFileName().GetPrintable();
2532 AddDebugLogLineN(logPartFile
, wxT("File mismatch on source packet (P) Sending: ") + file1
+ wxT(" From: ") + file2
);
2536 if ( !(GetStatus() == PS_READY
|| GetStatus() == PS_EMPTY
)) {
2540 const BitVector
& reqstatus
= forClient
->GetPartStatus();
2541 bool KnowNeededParts
= !reqstatus
.empty();
2542 //wxASSERT(rcvstatus.size() == GetPartCount()); // Obviously!
2543 if (KnowNeededParts
&& (reqstatus
.size() != GetPartCount())) {
2544 // Yuck. Same file but different part count? Seriously fucked up.
2545 // This happens rather often with reqstatus.size() == 0. Don't log then.
2546 if (reqstatus
.size()) {
2547 AddDebugLogLineN(logKnownFiles
, CFormat(wxT("Impossible situation: different partcounts: %i (client) and %i (file) for %s")) % reqstatus
.size() % GetPartCount() % GetFileName());
2552 CMemFile
data(1024);
2554 uint8 byUsedVersion
;
2556 if (forClient
->SupportsSourceExchange2() && byRequestedVersion
> 0){
2557 // the client uses SourceExchange2 and requested the highest version he knows
2558 // and we send the highest version we know, but of course not higher than his request
2559 byUsedVersion
= std::min(byRequestedVersion
, (uint8
)SOURCEEXCHANGE2_VERSION
);
2560 bIsSX2Packet
= true;
2561 data
.WriteUInt8(byUsedVersion
);
2563 // we don't support any special SX2 options yet, reserved for later use
2564 if (nRequestedOptions
!= 0) {
2565 AddDebugLogLineN(logKnownFiles
, CFormat(wxT("Client requested unknown options for SourceExchange2: %u")) % nRequestedOptions
);
2568 byUsedVersion
= forClient
->GetSourceExchange1Version();
2569 bIsSX2Packet
= false;
2570 if (forClient
->SupportsSourceExchange2()) {
2571 AddDebugLogLineN(logKnownFiles
, wxT("Client which announced to support SX2 sent SX1 packet instead"));
2577 data
.WriteHash(m_abyFileHash
);
2578 data
.WriteUInt16(nCount
);
2580 for (SourceSet::iterator it
= m_SrcList
.begin(); it
!= m_SrcList
.end(); ++it
) {
2582 CUpDownClient
* cur_src
= it
->GetClient();
2584 int state
= cur_src
->GetDownloadState();
2585 int valid
= ( state
== DS_DOWNLOADING
) || ( state
== DS_ONQUEUE
&& !cur_src
->IsRemoteQueueFull() );
2587 if ( cur_src
->HasLowID() || !valid
) {
2591 // only send source which have needed parts for this client if possible
2592 const BitVector
& srcstatus
= cur_src
->GetPartStatus();
2593 if ( !srcstatus
.empty() ) {
2594 //wxASSERT(srcstatus.size() == GetPartCount()); // Obviously!
2595 if (srcstatus
.size() != GetPartCount()) {
2598 if ( KnowNeededParts
) {
2599 // only send sources which have needed parts for this client
2600 for (int x
= 0; x
< GetPartCount(); ++x
) {
2601 if (srcstatus
.get(x
) && !reqstatus
.get(x
)) {
2607 // if we don't know the need parts for this client,
2608 // return any source currently a client sends it's
2609 // file status only after it has at least one complete part
2610 if (srcstatus
.size() != GetPartCount()) {
2613 for (int x
= 0; x
< GetPartCount(); ++x
){
2614 if (srcstatus
.get(x
)) {
2624 if(forClient
->GetSourceExchange1Version() > 2) {
2625 dwID
= cur_src
->GetUserIDHybrid();
2627 dwID
= wxUINT32_SWAP_ALWAYS(cur_src
->GetUserIDHybrid());
2629 data
.WriteUInt32(dwID
);
2630 data
.WriteUInt16(cur_src
->GetUserPort());
2631 data
.WriteUInt32(cur_src
->GetServerIP());
2632 data
.WriteUInt16(cur_src
->GetServerPort());
2634 if (byUsedVersion
>= 2) {
2635 data
.WriteHash(cur_src
->GetUserHash());
2638 if (byUsedVersion
>= 4){
2639 // CryptSettings - SourceExchange V4
2641 // 1 CryptLayer Required
2642 // 1 CryptLayer Requested
2643 // 1 CryptLayer Supported
2644 const uint8 uSupportsCryptLayer
= cur_src
->SupportsCryptLayer() ? 1 : 0;
2645 const uint8 uRequestsCryptLayer
= cur_src
->RequestsCryptLayer() ? 1 : 0;
2646 const uint8 uRequiresCryptLayer
= cur_src
->RequiresCryptLayer() ? 1 : 0;
2647 const uint8 byCryptOptions
= (uRequiresCryptLayer
<< 2) | (uRequestsCryptLayer
<< 1) | (uSupportsCryptLayer
<< 0);
2648 data
.WriteUInt8(byCryptOptions
);
2659 data
.Seek(bIsSX2Packet
? 17 : 16, wxFromStart
);
2660 data
.WriteUInt16(nCount
);
2662 CPacket
* result
= new CPacket(data
, OP_EMULEPROT
, bIsSX2Packet
? OP_ANSWERSOURCES2
: OP_ANSWERSOURCES
);
2664 // 16+2+501*(4+2+4+2+16) = 14046 bytes max.
2665 if (result
->GetPacketSize() > 354) {
2666 result
->PackPacket();
2672 void CPartFile::AddClientSources(CMemFile
* sources
, unsigned nSourceFrom
, uint8 uClientSXVersion
, bool bSourceExchange2
, const CUpDownClient
* /*pClient*/)
2681 uint8 uPacketSXVersion
= 0;
2682 if (!bSourceExchange2
) {
2683 nCount
= sources
->ReadUInt16();
2685 // Check if the data size matches the 'nCount' for v1 or v2 and eventually correct the source
2686 // exchange version while reading the packet data. Otherwise we could experience a higher
2687 // chance in dealing with wrong source data, userhashs and finally duplicate sources.
2688 uint32 uDataSize
= sources
->GetLength() - sources
->GetPosition();
2690 if ((uint32
)(nCount
*(4+2+4+2)) == uDataSize
) { //Checks if version 1 packet is correct size
2691 if(uClientSXVersion
!= 1) {
2694 uPacketSXVersion
= 1;
2695 } else if ((uint32
)(nCount
*(4+2+4+2+16)) == uDataSize
) { // Checks if version 2&3 packet is correct size
2696 if (uClientSXVersion
== 2) {
2697 uPacketSXVersion
= 2;
2698 } else if (uClientSXVersion
> 2) {
2699 uPacketSXVersion
= 3;
2703 } else if (nCount
*(4+2+4+2+16+1) == uDataSize
) {
2704 if (uClientSXVersion
!= 4 ) {
2707 uPacketSXVersion
= 4;
2709 // If v5 inserts additional data (like v2), the above code will correctly filter those packets.
2710 // If v5 appends additional data after <count>(<Sources>)[count], we are in trouble with the
2711 // above code. Though a client which does not understand v5+ should never receive such a packet.
2712 AddDebugLogLineN(logClient
, CFormat(wxT("Received invalid source exchange packet (v%u) of data size %u for %s")) % uClientSXVersion
% uDataSize
% GetFileName());
2717 // We only check if the version is known by us and do a quick sanitize check on known version
2718 // other then SX1, the packet will be ignored if any error appears, sicne it can't be a "misunderstanding" anymore
2719 if (uClientSXVersion
> SOURCEEXCHANGE2_VERSION
|| uClientSXVersion
== 0 ){
2720 AddDebugLogLineN(logPartFile
, CFormat(wxT("Invalid source exchange type version: %i")) % uClientSXVersion
);
2724 // all known versions use the first 2 bytes as count and unknown version are already filtered above
2725 nCount
= sources
->ReadUInt16();
2726 uint32 uDataSize
= (uint32
)(sources
->GetLength() - sources
->GetPosition());
2727 bool bError
= false;
2728 switch (uClientSXVersion
){
2730 bError
= nCount
*(4+2+4+2) != uDataSize
;
2734 bError
= nCount
*(4+2+4+2+16) != uDataSize
;
2737 bError
= nCount
*(4+2+4+2+16+1) != uDataSize
;
2745 AddDebugLogLineN(logPartFile
, wxT("Invalid source exchange data size."));
2748 uPacketSXVersion
= uClientSXVersion
;
2751 for (uint16 i
= 0;i
!= nCount
;++i
) {
2753 uint32 dwID
= sources
->ReadUInt32();
2754 uint16 nPort
= sources
->ReadUInt16();
2755 uint32 dwServerIP
= sources
->ReadUInt32();
2756 uint16 nServerPort
= sources
->ReadUInt16();
2759 if (uPacketSXVersion
> 1) {
2760 userHash
= sources
->ReadHash();
2763 uint8 byCryptOptions
= 0;
2764 if (uPacketSXVersion
>= 4) {
2765 byCryptOptions
= sources
->ReadUInt8();
2768 //Clients send ID's the the Hyrbid format so highID clients with *.*.*.0 won't be falsely switched to a lowID..
2770 if (uPacketSXVersion
>= 3) {
2771 dwIDED2K
= wxUINT32_SWAP_ALWAYS(dwID
);
2776 // check the HighID(IP) - "Filter LAN IPs" and "IPfilter" the received sources IP addresses
2777 if (!IsLowID(dwID
)) {
2778 if (!IsGoodIP(dwIDED2K
, thePrefs::FilterLanIPs())) {
2779 // check for 0-IP, localhost and optionally for LAN addresses
2780 AddDebugLogLineN(logIPFilter
, CFormat(wxT("Ignored source (IP=%s) received via %s - bad IP")) % Uint32toStringIP(dwIDED2K
) % OriginToText(nSourceFrom
));
2783 if (theApp
->ipfilter
->IsFiltered(dwIDED2K
)) {
2784 AddDebugLogLineN(logIPFilter
, CFormat(wxT("Ignored source (IP=%s) received via %s - IPFilter")) % Uint32toStringIP(dwIDED2K
) % OriginToText(nSourceFrom
));
2787 if (theApp
->clientlist
->IsBannedClient(dwIDED2K
)){
2792 // additionally check for LowID and own IP
2793 if (!CanAddSource(dwID
, nPort
, dwServerIP
, nServerPort
, NULL
, false)) {
2794 AddDebugLogLineN(logIPFilter
, CFormat(wxT("Ignored source (IP=%s) received via source exchange")) % Uint32toStringIP(dwIDED2K
));
2798 if(thePrefs::GetMaxSourcePerFile() > GetSourceCount()) {
2799 CUpDownClient
* newsource
= new CUpDownClient(nPort
,dwID
,dwServerIP
,nServerPort
,this, (uPacketSXVersion
< 3), true);
2800 if (uPacketSXVersion
> 1) {
2801 newsource
->SetUserHash(userHash
);
2804 if (uPacketSXVersion
>= 4) {
2805 newsource
->SetConnectOptions(byCryptOptions
, true, false);
2808 newsource
->SetSourceFrom((ESourceFrom
)nSourceFrom
);
2809 theApp
->downloadqueue
->CheckAndAddSource(this,newsource
);
2817 void CPartFile::UpdateAutoDownPriority()
2819 if (!IsAutoDownPriority()) {
2822 if (GetSourceCount() <= theApp
->downloadqueue
->GetRareFileThreshold()) {
2823 if ( GetDownPriority() != PR_HIGH
)
2824 SetDownPriority(PR_HIGH
, false, false);
2825 } else if (GetSourceCount() < theApp
->downloadqueue
->GetCommonFileThreshold()) {
2826 if ( GetDownPriority() != PR_NORMAL
)
2827 SetDownPriority(PR_NORMAL
, false, false);
2829 if ( GetDownPriority() != PR_LOW
)
2830 SetDownPriority(PR_LOW
, false, false);
2834 // making this function return a higher when more sources have the extended
2835 // protocol will force you to ask a larger variety of people for sources
2837 int CPartFile::GetCommonFilePenalty()
2839 //TODO: implement, but never return less than MINCOMMONPENALTY!
2840 return MINCOMMONPENALTY
;
2843 /* Barry - Replaces BlockReceived()
2845 Originally this only wrote to disk when a full 180k block
2846 had been received from a client, and only asked for data in
2849 This meant that on average 90k was lost for every connection
2850 to a client data source. That is a lot of wasted data.
2852 To reduce the lost data, packets are now written to a buffer
2853 and flushed to disk regularly regardless of size downloaded.
2854 This includes compressed packets.
2856 Data is also requested only where gaps are, not in 180k blocks.
2857 The requests will still not exceed 180k, but may be smaller to
2861 // Kry - transize is 32bits, no packet can be more than that (this is
2862 // compressed size). Even 32bits is too much imho.As for the return size,
2863 // look at the lenData below.
2864 uint32
CPartFile::WriteToBuffer(uint32 transize
, byte
* data
, uint64 start
, uint64 end
, Requested_Block_Struct
*block
, const CUpDownClient
* client
)
2866 // Increment transferred bytes counter for this file
2867 transferred
+= transize
;
2869 // This is needed a few times
2870 // Kry - should not need a uint64 here - no block is larger than
2871 // 2GB even after uncompressed.
2872 uint32 lenData
= (uint32
) (end
- start
+ 1);
2874 if(lenData
> transize
) {
2875 m_iGainDueToCompression
+= lenData
-transize
;
2878 // Occasionally packets are duplicated, no point writing it twice
2879 if (IsComplete(start
, end
)) {
2880 AddDebugLogLineN(logPartFile
,
2881 CFormat(wxT("File '%s' has already been written from %u to %u"))
2882 % GetFileName() % start
% end
);
2886 // security sanitize check to make sure we do not write anything into an already hashed complete chunk
2887 const uint64 nStartChunk
= start
/ PARTSIZE
;
2888 const uint64 nEndChunk
= end
/ PARTSIZE
;
2889 if (IsComplete(nStartChunk
)) {
2890 AddDebugLogLineN(logPartFile
, CFormat(wxT("Received data touches already hashed chunk - ignored (start): %u-%u; File=%s")) % start
% end
% GetFileName());
2892 } else if (nStartChunk
!= nEndChunk
) {
2893 if (IsComplete(nEndChunk
)) {
2894 AddDebugLogLineN(logPartFile
, CFormat(wxT("Received data touches already hashed chunk - ignored (end): %u-%u; File=%s")) % start
% end
% GetFileName());
2897 AddDebugLogLineN(logPartFile
, CFormat(wxT("Received data crosses chunk boundaries: %u-%u; File=%s")) % start
% end
% GetFileName());
2901 // log transferinformation in our "blackbox"
2902 m_CorruptionBlackBox
->TransferredData(start
, end
, client
->GetIP());
2904 // Create a new buffered queue entry
2905 PartFileBufferedData
*item
= new PartFileBufferedData(m_hpartfile
, data
, start
, end
, block
);
2907 // Add to the queue in the correct position (most likely the end)
2910 std::list
<PartFileBufferedData
*>::iterator it
= m_BufferedData_list
.begin();
2911 for (; it
!= m_BufferedData_list
.end(); ++it
) {
2912 PartFileBufferedData
* queueItem
= *it
;
2914 if (item
->end
<= queueItem
->end
) {
2915 if (it
!= m_BufferedData_list
.begin()) {
2918 m_BufferedData_list
.insert(--it
, item
);
2926 m_BufferedData_list
.push_front(item
);
2929 // Increment buffer size marker
2930 m_nTotalBufferData
+= lenData
;
2932 // Mark this small section of the file as filled
2933 FillGap(item
->start
, item
->end
);
2935 // Update the flushed mark on the requested block
2936 // The loop here is unfortunate but necessary to detect deleted blocks.
2938 std::list
<Requested_Block_Struct
*>::iterator it2
= m_requestedblocks_list
.begin();
2939 for (; it2
!= m_requestedblocks_list
.end(); ++it2
) {
2940 if (*it2
== item
->block
) {
2941 item
->block
->transferred
+= lenData
;
2945 if (m_gaplist
.IsComplete()) {
2949 // Return the length of data written to the buffer
2953 void CPartFile::FlushBuffer(bool fromAICHRecoveryDataAvailable
)
2955 m_nLastBufferFlushTime
= GetTickCount();
2957 if (m_BufferedData_list
.empty()) {
2962 uint32 partCount
= GetPartCount();
2963 // Remember which parts need to be checked at the end of the flush
2964 std::vector
<bool> changedPart(partCount
, false);
2966 // Ensure file is big enough to write data to (the last item will be the furthest from the start)
2967 if (!CheckFreeDiskSpace(m_nTotalBufferData
)) {
2968 // Not enough free space to write the last item, bail
2969 AddLogLineC(CFormat( _("WARNING: Not enough free disk-space! Pausing file: %s") ) % GetFileName());
2975 // Loop through queue
2976 while ( !m_BufferedData_list
.empty() ) {
2977 // Get top item and remove it from the queue
2978 CScopedPtr
<PartFileBufferedData
> item(m_BufferedData_list
.front());
2979 m_BufferedData_list
.pop_front();
2981 // This is needed a few times
2982 wxASSERT((item
->end
- item
->start
) < 0xFFFFFFFF);
2983 uint32 lenData
= (uint32
)(item
->end
- item
->start
+ 1);
2985 // SLUGFILLER: SafeHash - could be more than one part
2986 for (uint32 curpart
= (item
->start
/PARTSIZE
); curpart
<= (item
->end
/PARTSIZE
); ++curpart
) {
2987 wxASSERT(curpart
< partCount
);
2988 changedPart
[curpart
] = true;
2990 // SLUGFILLER: SafeHash
2992 // Go to the correct position in file and write block of data
2994 item
->area
.FlushAt(m_hpartfile
, item
->start
, lenData
);
2995 // Decrease buffer size
2996 m_nTotalBufferData
-= lenData
;
2997 } catch (const CIOFailureException
& e
) {
2998 AddDebugLogLineC(logPartFile
, wxT("Error while saving part-file: ") + e
.what());
2999 SetStatus(PS_ERROR
);
3000 // No need to bang your head against it again and again if it has already failed.
3001 DeleteContents(m_BufferedData_list
);
3002 m_nTotalBufferData
= 0;
3008 // Update last-changed date
3009 m_lastDateChanged
= wxDateTime::GetTimeNow();
3012 // Partfile should never be too large
3013 if (m_hpartfile
.GetLength() > GetFileSize()) {
3014 // it's "last chance" correction. the real bugfix has to be applied 'somewhere' else
3015 m_hpartfile
.SetLength(GetFileSize());
3017 } catch (const CIOFailureException
& e
) {
3018 AddDebugLogLineC(logPartFile
,
3019 CFormat(wxT("Error while truncating part-file (%s): %s"))
3020 % m_PartPath
% e
.what());
3021 SetStatus(PS_ERROR
);
3026 // Check each part of the file
3027 for (uint16 partNumber
= 0; partNumber
< partCount
; ++partNumber
) {
3028 if (changedPart
[partNumber
] == false) {
3032 uint32 partRange
= GetPartSize(partNumber
) - 1;
3034 // Is this 9MB part complete
3035 if (IsComplete(partNumber
)) {
3037 if (!HashSinglePart(partNumber
)) {
3038 AddLogLineC(CFormat(
3039 _("Downloaded part %i is corrupt in file: %s") ) % partNumber
% GetFileName() );
3041 // add part to corrupted list, if not already there
3042 if (!IsCorruptedPart(partNumber
)) {
3043 m_corrupted_list
.push_back(partNumber
);
3045 // request AICH recovery data
3046 // Don't if called from the AICHRecovery. It's already there and would lead to an infinite recursion.
3047 if (!fromAICHRecoveryDataAvailable
) {
3048 RequestAICHRecovery(partNumber
);
3050 // Reduce transferred amount by corrupt amount
3051 m_iLostDueToCorruption
+= (partRange
+ 1);
3053 if (!m_hashsetneeded
) {
3054 AddDebugLogLineN(logPartFile
, CFormat(
3055 wxT("Finished part %u of '%s'")) % partNumber
% GetFileName());
3058 // tell the blackbox about the verified data
3059 m_CorruptionBlackBox
->VerifiedData(true, partNumber
, 0, partRange
);
3061 // if this part was successfully completed (although ICH is active), remove from corrupted list
3062 EraseFirstValue(m_corrupted_list
, partNumber
);
3064 if (status
== PS_EMPTY
) {
3065 if (theApp
->IsRunning()) { // may be called during shutdown!
3066 if (GetHashCount() == GetED2KPartHashCount() && !m_hashsetneeded
) {
3067 // Successfully completed part, make it available for sharing
3068 SetStatus(PS_READY
);
3069 theApp
->sharedfiles
->SafeAddKFile(this);
3074 } else if ( IsCorruptedPart(partNumber
) && // corrupted part:
3075 (thePrefs::IsICHEnabled() // old ICH: rehash whenever we have new data hoping it will be good now
3076 || fromAICHRecoveryDataAvailable
)) {// new AICH: one rehash right before performing it (maybe it's already good)
3077 // Try to recover with minimal loss
3078 if (HashSinglePart(partNumber
)) {
3079 ++m_iTotalPacketsSavedDueToICH
;
3081 uint64 uMissingInPart
= m_gaplist
.GetGapSize(partNumber
);
3082 FillGap(partNumber
);
3083 RemoveBlockFromList(PARTSIZE
*partNumber
,(PARTSIZE
*partNumber
+ partRange
));
3085 // tell the blackbox about the verified data
3086 m_CorruptionBlackBox
->VerifiedData(true, partNumber
, 0, partRange
);
3088 // remove from corrupted list
3089 EraseFirstValue(m_corrupted_list
, partNumber
);
3091 AddLogLineC(CFormat( _("ICH: Recovered corrupted part %i for %s -> Saved bytes: %s") )
3094 % CastItoXBytes(uMissingInPart
));
3096 if (GetHashCount() == GetED2KPartHashCount() && !m_hashsetneeded
) {
3097 if (status
== PS_EMPTY
) {
3098 // Successfully recovered part, make it available for sharing
3099 SetStatus(PS_READY
);
3100 if (theApp
->IsRunning()) // may be called during shutdown!
3101 theApp
->sharedfiles
->SafeAddKFile(this);
3111 if (theApp
->IsRunning()) { // may be called during shutdown!
3112 // Is this file finished ?
3113 if (m_gaplist
.IsComplete()) {
3114 CompleteFile(false);
3120 // read data for upload, return false on error
3121 bool CPartFile::ReadData(CFileArea
& area
, uint64 offset
, uint32 toread
)
3124 if (offset
+ toread
> GetFileSize()) {
3125 AddDebugLogLineN(logPartFile
, CFormat(wxT("tried to read %d bytes past eof of %s"))
3126 % (offset
+ toread
- GetFileSize()) % GetFileName());
3131 area
.ReadAt(m_hpartfile
, offset
, toread
);
3132 // if it fails it throws (which the caller should catch)
3137 void CPartFile::UpdateFileRatingCommentAvail()
3139 bool prevComment
= m_hasComment
;
3140 int prevRating
= m_iUserRating
;
3142 m_hasComment
= false;
3144 int ratingCount
= 0;
3146 SourceSet::iterator it
= m_SrcList
.begin();
3147 for (; it
!= m_SrcList
.end(); ++it
) {
3148 CUpDownClient
* cur_src
= it
->GetClient();
3150 if (!cur_src
->GetFileComment().IsEmpty()) {
3151 if (thePrefs::IsCommentFiltered(cur_src
->GetFileComment())) {
3154 m_hasComment
= true;
3157 uint8 rating
= cur_src
->GetFileRating();
3159 wxASSERT(rating
<= 5);
3162 m_iUserRating
+= rating
;
3167 m_iUserRating
/= ratingCount
;
3168 wxASSERT(m_iUserRating
> 0 && m_iUserRating
<= 5);
3171 if ((prevComment
!= m_hasComment
) || (prevRating
!= m_iUserRating
)) {
3172 UpdateDisplayedInfo();
3177 void CPartFile::SetCategory(uint8 cat
)
3179 wxASSERT( cat
< theApp
->glob_prefs
->GetCatCount() );
3185 bool CPartFile::RemoveSource(CUpDownClient
* toremove
, bool updatewindow
, bool bDoStatsUpdate
)
3187 wxASSERT( toremove
);
3189 bool result
= theApp
->downloadqueue
->RemoveSource( toremove
, updatewindow
, bDoStatsUpdate
);
3191 // Check if the client should be deleted, but not if the client is already dying
3192 if ( !toremove
->GetSocket() && !toremove
->HasBeenDeleted() ) {
3193 if ( toremove
->Disconnected(wxT("RemoveSource - purged")) ) {
3194 toremove
->Safe_Delete();
3201 void CPartFile::AddDownloadingSource(CUpDownClient
* client
)
3203 CClientRefList::iterator it
=
3204 std::find(m_downloadingSourcesList
.begin(), m_downloadingSourcesList
.end(), CCLIENTREF(client
, wxEmptyString
));
3205 if (it
== m_downloadingSourcesList
.end()) {
3206 m_downloadingSourcesList
.push_back(CCLIENTREF(client
, wxT("CPartFile::AddDownloadingSource")));
3211 void CPartFile::RemoveDownloadingSource(CUpDownClient
* client
)
3213 CClientRefList::iterator it
=
3214 std::find(m_downloadingSourcesList
.begin(), m_downloadingSourcesList
.end(), CCLIENTREF(client
, wxEmptyString
));
3215 if (it
!= m_downloadingSourcesList
.end()) {
3216 m_downloadingSourcesList
.erase(it
);
3221 uint64
CPartFile::GetNeededSpace()
3224 uint64 length
= m_hpartfile
.GetLength();
3226 if (length
> GetFileSize()) {
3227 return 0; // Shouldn't happen, but just in case
3230 return GetFileSize() - length
;
3231 } catch (const CIOFailureException
& e
) {
3232 AddDebugLogLineC(logPartFile
,
3233 CFormat(wxT("Error while retrieving file-length (%s): %s"))
3234 % m_PartPath
% e
.what());
3235 SetStatus(PS_ERROR
);
3240 void CPartFile::SetStatus(uint8 in
)
3242 // PAUSED and INSUFFICIENT have extra flag variables m_paused and m_insufficient
3243 // - they are never to be stored in status
3244 wxASSERT( in
!= PS_PAUSED
&& in
!= PS_INSUFFICIENT
);
3248 if (theApp
->IsRunning()) {
3249 UpdateDisplayedInfo( true );
3251 if ( thePrefs::ShowCatTabInfos() ) {
3252 Notify_ShowUpdateCatTabTitles();
3254 Notify_DownloadCtrlSort();
3259 void CPartFile::RequestAICHRecovery(uint16 nPart
)
3262 if ( !m_pAICHHashSet
->HasValidMasterHash() ||
3263 (m_pAICHHashSet
->GetStatus() != AICH_TRUSTED
&& m_pAICHHashSet
->GetStatus() != AICH_VERIFIED
)){
3264 AddDebugLogLineN( logAICHRecovery
, wxT("Unable to request AICH Recoverydata because we have no trusted Masterhash") );
3267 if (GetPartSize(nPart
) <= EMBLOCKSIZE
)
3269 if (CAICHHashSet::IsClientRequestPending(this, nPart
)){
3270 AddDebugLogLineN( logAICHRecovery
, wxT("RequestAICHRecovery: Already a request for this part pending"));
3274 // first check if we have already the recoverydata, no need to rerequest it then
3275 if (m_pAICHHashSet
->IsPartDataAvailable(nPart
*PARTSIZE
)){
3276 AddDebugLogLineN( logAICHRecovery
, wxT("Found PartRecoveryData in memory"));
3277 AICHRecoveryDataAvailable(nPart
);
3281 wxASSERT( nPart
< GetPartCount() );
3282 // find some random client which support AICH to ask for the blocks
3283 // first lets see how many we have at all, we prefer high id very much
3284 uint32 cAICHClients
= 0;
3285 uint32 cAICHLowIDClients
= 0;
3286 for ( SourceSet::iterator it
= m_SrcList
.begin(); it
!= m_SrcList
.end(); ++it
) {
3287 CUpDownClient
* pCurClient
= it
->GetClient();
3288 if ( pCurClient
->IsSupportingAICH() &&
3289 pCurClient
->GetReqFileAICHHash() != NULL
&&
3290 !pCurClient
->IsAICHReqPending()
3291 && (*pCurClient
->GetReqFileAICHHash()) == m_pAICHHashSet
->GetMasterHash())
3293 if (pCurClient
->HasLowID()) {
3294 ++cAICHLowIDClients
;
3300 if ((cAICHClients
| cAICHLowIDClients
) == 0){
3301 AddDebugLogLineN( logAICHRecovery
, wxT("Unable to request AICH Recoverydata because found no client who supports it and has the same hash as the trusted one"));
3304 uint32 nSeclectedClient
;
3305 if (cAICHClients
> 0) {
3306 nSeclectedClient
= (rand() % cAICHClients
) + 1;
3308 nSeclectedClient
= (rand() % cAICHLowIDClients
) + 1;
3310 CUpDownClient
* pClient
= NULL
;
3311 for ( SourceSet::iterator it
= m_SrcList
.begin(); it
!= m_SrcList
.end(); ++it
) {
3312 CUpDownClient
* pCurClient
= it
->GetClient();
3313 if (pCurClient
->IsSupportingAICH() && pCurClient
->GetReqFileAICHHash() != NULL
&& !pCurClient
->IsAICHReqPending()
3314 && (*pCurClient
->GetReqFileAICHHash()) == m_pAICHHashSet
->GetMasterHash())
3316 if (cAICHClients
> 0){
3317 if (!pCurClient
->HasLowID())
3321 wxASSERT( pCurClient
->HasLowID());
3324 if (nSeclectedClient
== 0){
3325 pClient
= pCurClient
;
3330 if (pClient
== NULL
){
3335 AddDebugLogLineN( logAICHRecovery
, CFormat( wxT("Requesting AICH Hash (%s) form client %s") ) % ( cAICHClients
? wxT("HighId") : wxT("LowID") ) % pClient
->GetClientFullInfo() );
3336 pClient
->SendAICHRequest(this, nPart
);
3341 void CPartFile::AICHRecoveryDataAvailable(uint16 nPart
)
3343 if (GetPartCount() < nPart
){
3349 uint32 length
= GetPartSize(nPart
);
3350 // if the part was already ok, it would now be complete
3351 if (IsComplete(nPart
)) {
3352 AddDebugLogLineN(logAICHRecovery
, CFormat(wxT("Processing AICH Recovery data: The part (%u) is already complete, canceling")) % nPart
);
3358 CAICHHashTree
* pVerifiedHash
= m_pAICHHashSet
->m_pHashTree
.FindHash(nPart
*PARTSIZE
, length
);
3359 if (pVerifiedHash
== NULL
|| !pVerifiedHash
->GetHashValid()){
3360 AddDebugLogLineC( logAICHRecovery
, wxT("Processing AICH Recovery data: Unable to get verified hash from hashset (should never happen)") );
3364 CAICHHashTree
htOurHash(pVerifiedHash
->GetNDataSize(), pVerifiedHash
->GetIsLeftBranch(), pVerifiedHash
->GetNBaseSize());
3366 CreateHashFromFile(m_hpartfile
, PARTSIZE
* nPart
, length
, NULL
, &htOurHash
);
3367 } catch (const CIOFailureException
& e
) {
3368 AddDebugLogLineC(logAICHRecovery
,
3369 CFormat(wxT("IO failure while hashing part-file '%s': %s"))
3370 % m_hpartfile
.GetFilePath() % e
.what());
3371 SetStatus(PS_ERROR
);
3375 if (!htOurHash
.GetHashValid()){
3376 AddDebugLogLineN( logAICHRecovery
, wxT("Processing AICH Recovery data: Failed to retrieve AICH Hashset of corrupt part") );
3381 // now compare the hash we just did, to the verified hash and readd all blocks which are ok
3382 uint32 nRecovered
= 0;
3383 for (uint32 pos
= 0; pos
< length
; pos
+= EMBLOCKSIZE
){
3384 const uint32 nBlockSize
= min
<uint32
>(EMBLOCKSIZE
, length
- pos
);
3385 CAICHHashTree
* pVerifiedBlock
= pVerifiedHash
->FindHash(pos
, nBlockSize
);
3386 CAICHHashTree
* pOurBlock
= htOurHash
.FindHash(pos
, nBlockSize
);
3387 if ( pVerifiedBlock
== NULL
|| pOurBlock
== NULL
|| !pVerifiedBlock
->GetHashValid() || !pOurBlock
->GetHashValid()){
3391 if (pOurBlock
->GetHash() == pVerifiedBlock
->GetHash()){
3392 FillGap(PARTSIZE
*nPart
+pos
, PARTSIZE
*nPart
+ pos
+ (nBlockSize
-1));
3393 RemoveBlockFromList(PARTSIZE
*nPart
, PARTSIZE
*nPart
+ (nBlockSize
-1));
3394 nRecovered
+= nBlockSize
;
3395 // tell the blackbox about the verified data
3396 m_CorruptionBlackBox
->VerifiedData(true, nPart
, pos
, pos
+ nBlockSize
- 1);
3398 // inform our "blackbox" about the corrupted block which may ban clients who sent it
3399 m_CorruptionBlackBox
->VerifiedData(false, nPart
, pos
, pos
+ nBlockSize
- 1);
3402 m_CorruptionBlackBox
->EvaluateData();
3404 // ok now some sanity checks
3405 if (IsComplete(nPart
)) {
3406 // this is bad, but it could probably happen under some rare circumstances
3407 // make sure that MD4 agrees to this fact too
3408 if (!HashSinglePart(nPart
)) {
3409 AddDebugLogLineN(logAICHRecovery
,
3410 CFormat(wxT("Processing AICH Recovery data: The part (%u) got completed while recovering - but MD4 says it corrupt! Setting hashset to error state, deleting part")) % nPart
);
3411 // now we are fu... unhappy
3412 m_pAICHHashSet
->SetStatus(AICH_ERROR
);
3417 AddDebugLogLineN(logAICHRecovery
,
3418 CFormat(wxT("Processing AICH Recovery data: The part (%u) got completed while recovering and MD4 agrees")) % nPart
);
3419 if (status
== PS_EMPTY
&& theApp
->IsRunning()) {
3420 if (GetHashCount() == GetED2KPartHashCount() && !m_hashsetneeded
) {
3421 // Successfully recovered part, make it available for sharing
3422 SetStatus(PS_READY
);
3423 theApp
->sharedfiles
->SafeAddKFile(this);
3427 if (theApp
->IsRunning()) {
3428 // Is this file finished?
3429 if (m_gaplist
.IsComplete()) {
3430 CompleteFile(false);
3434 } // end sanity check
3435 // We did the best we could. If it's still incomplete, then no need to keep
3436 // bashing it with ICH. So remove it from the list of corrupted parts.
3437 EraseFirstValue(m_corrupted_list
, nPart
);
3441 // make sure the user appreciates our great recovering work :P
3442 AddDebugLogLineC( logAICHRecovery
, CFormat(
3443 wxT("AICH successfully recovered %s of %s from part %u for %s") )
3444 % CastItoXBytes(nRecovered
)
3445 % CastItoXBytes(length
)
3451 void CPartFile::ClientStateChanged( int oldState
, int newState
)
3453 if ( oldState
== newState
)
3456 // If the state is -1, then it's an entirely new item
3457 if ( oldState
!= -1 ) {
3458 // Was the old state a valid state?
3459 if ( oldState
== DS_ONQUEUE
|| oldState
== DS_DOWNLOADING
) {
3462 if ( oldState
== DS_CONNECTED
/* || oldState == DS_REMOTEQUEUEFULL */ ) {
3466 m_notCurrentSources
--;
3470 // If the state is -1, then the source is being removed
3471 if ( newState
!= -1 ) {
3472 // Was the old state a valid state?
3473 if ( newState
== DS_ONQUEUE
|| newState
== DS_DOWNLOADING
) {
3476 if ( newState
== DS_CONNECTED
/* || newState == DS_REMOTEQUEUEFULL */ ) {
3480 ++m_notCurrentSources
;
3486 bool CPartFile::AddSource( CUpDownClient
* client
)
3488 if (m_SrcList
.insert(CCLIENTREF(client
, wxT("CPartFile::AddSource"))).second
) {
3489 theStats::AddFoundSource();
3490 theStats::AddSourceOrigin(client
->GetSourceFrom());
3498 bool CPartFile::DelSource( CUpDownClient
* client
)
3500 if (m_SrcList
.erase(CCLIENTREF(client
, wxEmptyString
))) {
3501 theStats::RemoveSourceOrigin(client
->GetSourceFrom());
3502 theStats::RemoveFoundSource();
3510 void CPartFile::UpdatePartsFrequency( CUpDownClient
* client
, bool increment
)
3512 const BitVector
& freq
= client
->GetPartStatus();
3514 if ( m_SrcpartFrequency
.size() != GetPartCount() ) {
3515 m_SrcpartFrequency
.clear();
3516 m_SrcpartFrequency
.insert(m_SrcpartFrequency
.begin(), GetPartCount(), 0);
3523 unsigned int size
= freq
.size();
3524 if ( size
!= m_SrcpartFrequency
.size() ) {
3529 for ( unsigned int i
= 0; i
< size
; i
++ ) {
3530 if ( freq
.get(i
) ) {
3531 m_SrcpartFrequency
[i
]++;
3535 for ( unsigned int i
= 0; i
< size
; i
++ ) {
3536 if ( freq
.get(i
) ) {
3537 m_SrcpartFrequency
[i
]--;
3543 void CPartFile::GetRatingAndComments(FileRatingList
& list
) const
3546 // This can be pre-processed, but is it worth the CPU?
3547 CPartFile::SourceSet::const_iterator it
= m_SrcList
.begin();
3548 for ( ; it
!= m_SrcList
.end(); ++it
) {
3549 CUpDownClient
*cur_src
= it
->GetClient();
3550 if (cur_src
->GetFileComment().Length()>0 || cur_src
->GetFileRating()>0) {
3551 // AddDebugLogLineN(logPartFile, wxString(wxT("found a comment for ")) << GetFileName());
3552 list
.push_back(SFileRating(*cur_src
));
3559 CPartFile::CPartFile(CEC_PartFile_Tag
*tag
) : CKnownFile(tag
)
3563 SetFileName(CPath(tag
->FileName()));
3564 m_abyFileHash
= tag
->FileHash();
3565 SetFileSize(tag
->SizeFull());
3566 m_gaplist
.Init(GetFileSize(), true); // Init empty
3567 m_partmetfilename
= CPath(tag
->PartMetName());
3568 m_fullname
= m_partmetfilename
; // We have only the met number, so show it without path in the detail dialog.
3570 m_SrcpartFrequency
.insert(m_SrcpartFrequency
.end(), GetPartCount(), 0);
3572 // these are only in CLIENT_GUI and not covered by Init()
3575 m_iDownPriorityEC
= 0;
3576 m_a4af_source_count
= 0;
3581 * Remote gui specific code
3583 CPartFile::~CPartFile()
3587 void CPartFile::GetRatingAndComments(FileRatingList
& list
) const
3589 list
= m_FileRatingList
;
3592 void CPartFile::SetCategory(uint8 cat
)
3598 bool CPartFile::AddSource(CUpDownClient
* client
)
3600 return m_SrcList
.insert(CCLIENTREF(client
, wxT("CPartFile::AddSource"))).second
!= 0;
3604 bool CPartFile::DelSource(CUpDownClient
* client
)
3606 return m_SrcList
.erase(CCLIENTREF(client
, wxEmptyString
)) != 0;
3610 #endif // !CLIENT_GUI
3613 void CPartFile::UpdateDisplayedInfo(bool force
)
3615 uint32 curTick
= ::GetTickCount();
3617 // Wait 1.5s between each redraw
3618 if (force
|| curTick
-m_lastRefreshedDLDisplay
> MINWAIT_BEFORE_DLDISPLAY_WINDOWUPDATE
) {
3619 Notify_DownloadCtrlUpdateItem(this);
3620 m_lastRefreshedDLDisplay
= curTick
;
3625 void CPartFile::Init()
3627 m_lastsearchtime
= 0;
3628 lastpurgetime
= ::GetTickCount();
3631 m_insufficient
= false;
3636 m_iLastPausePurge
= time(NULL
);
3638 if(thePrefs::GetNewAutoDown()) {
3639 m_iDownPriority
= PR_HIGH
;
3640 m_bAutoDownPriority
= true;
3642 m_iDownPriority
= PR_NORMAL
;
3643 m_bAutoDownPriority
= false;
3646 transferingsrc
= 0; // new
3650 m_hashsetneeded
= true;
3652 percentcompleted
= 0;
3654 lastseencomplete
= 0;
3655 m_availablePartsCount
=0;
3656 m_ClientSrcAnswered
= 0;
3657 m_LastNoNeededCheck
= 0;
3659 m_nTotalBufferData
= 0;
3660 m_nLastBufferFlushTime
= 0;
3661 m_bPercentUpdated
= false;
3662 m_iGainDueToCompression
= 0;
3663 m_iLostDueToCorruption
= 0;
3664 m_iTotalPacketsSavedDueToICH
= 0;
3666 m_lastRefreshedDLDisplay
= 0;
3667 m_nDlActiveTime
= 0;
3669 m_is_A4AF_auto
= false;
3670 m_localSrcReqQueued
= false;
3671 m_nCompleteSourcesTime
= time(NULL
);
3672 m_nCompleteSourcesCount
= 0;
3673 m_nCompleteSourcesCountLo
= 0;
3674 m_nCompleteSourcesCountHi
= 0;
3677 m_notCurrentSources
= 0;
3680 m_LastSearchTimeKad
= 0;
3681 m_TotalSearchesKad
= 0;
3684 m_CorruptionBlackBox
= new CCorruptionBlackBox();
3688 wxString
CPartFile::getPartfileStatus() const
3693 if ((status
== PS_HASHING
) || (status
== PS_WAITINGFORHASH
)) {
3694 mybuffer
=_("Hashing");
3695 } else if (status
== PS_ALLOCATING
) {
3696 mybuffer
= _("Allocating");
3698 switch (GetStatus()) {
3700 mybuffer
=_("Completing");
3703 mybuffer
=_("Complete");
3706 mybuffer
=_("Paused");
3709 mybuffer
=_("Erroneous");
3711 case PS_INSUFFICIENT
:
3712 mybuffer
= _("Insufficient disk space");
3715 if (GetTransferingSrcCount()>0) {
3716 mybuffer
=_("Downloading");
3718 mybuffer
=_("Waiting");
3722 if (m_stopped
&& (GetStatus()!=PS_COMPLETE
)) {
3723 mybuffer
=_("Stopped");
3730 int CPartFile::getPartfileStatusRang() const
3734 if (GetTransferingSrcCount()==0) tempstatus
=1;
3735 switch (GetStatus()) {
3737 case PS_WAITINGFORHASH
:
3757 wxString
CPartFile::GetFeedback() const
3759 wxString retval
= CKnownFile::GetFeedback();
3760 if (GetStatus() != PS_COMPLETE
) {
3761 retval
+= CFormat(wxT("%s: %s (%.2f%%)\n%s: %u\n"))
3762 % _("Downloaded") % CastItoXBytes(GetCompletedSize()) % GetPercentCompleted() % _("Sources") % GetSourceCount();
3764 return retval
+ _("Status") + wxT(": ") + getPartfileStatus() + wxT("\n");
3768 sint32
CPartFile::getTimeRemaining() const
3770 if (GetKBpsDown() < 0.001)
3773 return((GetFileSize()-GetCompletedSize()) / ((int)(GetKBpsDown()*1024.0)));
3776 bool CPartFile::PreviewAvailable()
3778 const uint64 minSizeForPreview
= 256 * 1024;
3779 FileType type
= GetFiletype(GetFileName());
3781 return (type
== ftVideo
|| type
== ftAudio
) &&
3782 GetFileSize() >= minSizeForPreview
&&
3783 IsComplete(0, minSizeForPreview
);
3786 bool CPartFile::CheckShowItemInGivenCat(int inCategory
)
3788 // first check if item belongs in this cat in principle
3789 if (inCategory
> 0 && inCategory
!= GetCategory()) {
3793 // if yes apply filter
3796 switch (thePrefs::GetAllcatFilter()) {
3798 show
= GetCategory() == 0 || inCategory
> 0;
3801 show
= IsPartFile();
3804 show
= !IsPartFile();
3808 (GetStatus() == PS_READY
|| GetStatus() == PS_EMPTY
) &&
3809 GetTransferingSrcCount() == 0;
3811 case acfDownloading
:
3813 (GetStatus() == PS_READY
|| GetStatus() == PS_EMPTY
) &&
3814 GetTransferingSrcCount() > 0;
3817 show
= GetStatus() == PS_ERROR
;
3820 show
= GetStatus() == PS_PAUSED
&& !IsStopped();
3826 show
= GetFiletype(GetFileName()) == ftVideo
;
3829 show
= GetFiletype(GetFileName()) == ftAudio
;
3832 show
= GetFiletype(GetFileName()) == ftArchive
;
3835 show
= GetFiletype(GetFileName()) == ftCDImage
;
3838 show
= GetFiletype(GetFileName()) == ftPicture
;
3841 show
= GetFiletype(GetFileName()) == ftText
;
3844 show
= !IsStopped() && GetStatus() != PS_PAUSED
;
3855 void CPartFile::RemoveCategory(uint8 cat
)
3857 if (m_category
== cat
) {
3858 // Reset the category
3860 } else if (m_category
> cat
) {
3861 // Set to the new position of the original category
3867 void CPartFile::SetActive(bool bActive
)
3869 time_t tNow
= time(NULL
);
3871 if (theApp
->IsConnected()) {
3872 if (m_tActivated
== 0) {
3873 m_tActivated
= tNow
;
3877 if (m_tActivated
!= 0) {
3878 m_nDlActiveTime
+= tNow
- m_tActivated
;
3885 uint32
CPartFile::GetDlActiveTime() const
3887 uint32 nDlActiveTime
= m_nDlActiveTime
;
3888 if (m_tActivated
!= 0) {
3889 nDlActiveTime
+= time(NULL
) - m_tActivated
;
3891 return nDlActiveTime
;
3895 uint16
CPartFile::GetPartMetNumber() const
3898 return m_partmetfilename
.RemoveAllExt().GetRaw().ToLong(&nr
) ? nr
: 0;
3902 void CPartFile::SetHashingProgress(uint16 part
) const
3904 m_hashingProgress
= part
;
3905 Notify_DownloadCtrlUpdateItem(this);
3911 uint8
CPartFile::GetStatus(bool ignorepause
) const
3913 if ( (!m_paused
&& !m_insufficient
) ||
3914 status
== PS_ERROR
||
3915 status
== PS_COMPLETING
||
3916 status
== PS_COMPLETE
||
3919 } else if ( m_insufficient
) {
3920 return PS_INSUFFICIENT
;
3926 void CPartFile::AddDeadSource(const CUpDownClient
* client
)
3928 m_deadSources
.AddDeadSource( client
);
3932 bool CPartFile::IsDeadSource(const CUpDownClient
* client
)
3934 return m_deadSources
.IsDeadSource( client
);
3937 void CPartFile::SetFileName(const CPath
& fileName
)
3939 CKnownFile
* pFile
= theApp
->sharedfiles
->GetFileByID(GetFileHash());
3941 bool is_shared
= (pFile
&& pFile
== this);
3944 // The file is shared, we must clear the search keywords so we don't
3945 // publish the old name anymore.
3946 theApp
->sharedfiles
->RemoveKeywords(this);
3949 CKnownFile::SetFileName(fileName
);
3952 // And of course, we must advertise the new name if the file is shared.
3953 theApp
->sharedfiles
->AddKeywords(this);
3956 UpdateDisplayedInfo(true);
3960 uint16
CPartFile::GetMaxSources() const
3962 // This is just like this, while we don't import the private max sources per file
3963 return thePrefs::GetMaxSourcePerFile();
3967 uint16
CPartFile::GetMaxSourcePerFileSoft() const
3969 unsigned int temp
= ((unsigned int)GetMaxSources() * 9L) / 10;
3970 if (temp
> MAX_SOURCES_FILE_SOFT
) {
3971 return MAX_SOURCES_FILE_SOFT
;
3976 uint16
CPartFile::GetMaxSourcePerFileUDP() const
3978 unsigned int temp
= ((unsigned int)GetMaxSources() * 3L) / 4;
3979 if (temp
> MAX_SOURCES_FILE_UDP
) {
3980 return MAX_SOURCES_FILE_UDP
;
3985 #define DROP_FACTOR 2
3987 CUpDownClient
* CPartFile::GetSlowerDownloadingClient(uint32 speed
, CUpDownClient
* caller
) {
3988 // printf("Start slower source calculation\n");
3989 for( SourceSet::iterator it
= m_SrcList
.begin(); it
!= m_SrcList
.end(); ) {
3990 CUpDownClient
* cur_src
= it
++->GetClient();
3991 if ((cur_src
->GetDownloadState() == DS_DOWNLOADING
) && (cur_src
!= caller
)) {
3992 uint32 factored_bytes_per_second
= static_cast<uint32
>(
3993 (cur_src
->GetKBpsDown() * 1024) * DROP_FACTOR
);
3994 if ( factored_bytes_per_second
< speed
) {
3995 // printf("Selecting source %p to drop: %d < %d\n", cur_src, factored_bytes_per_second, speed);
3996 // printf("End slower source calculation\n");
3999 // printf("Not selecting source %p to drop: %d > %d\n", cur_src, factored_bytes_per_second, speed);
4003 // printf("End slower source calculation\n");
4007 void CPartFile::AllocationFinished()
4009 // see if it can be opened
4010 if (!m_hpartfile
.Open(m_PartPath
, CFile::read_write
)) {
4011 AddLogLineN(CFormat(_("ERROR: Failed to open partfile '%s'")) % GetFullName());
4012 SetStatus(PS_ERROR
);
4014 // then close the handle again
4015 m_hpartfile
.Release(true);
4019 // File_checked_for_headers