2 // This file is part of the aMule Project.
4 // Copyright (c) 2003-2011 aMule Team ( admin@amule.org / http://www.amule.org )
5 // Copyright (c) 2002-2011 Merkur ( devs@emule-project.net / http://www.emule-project.net )
7 // Any parts of this program derived from the xMule, lMule or eMule project,
8 // or contributed by third-party developers are copyrighted by their
11 // This program is free software; you can redistribute it and/or modify
12 // it under the terms of the GNU General Public License as published by
13 // the Free Software Foundation; either version 2 of the License, or
14 // (at your option) any later version.
16 // This program is distributed in the hope that it will be useful,
17 // but WITHOUT ANY WARRANTY; without even the implied warranty of
18 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 // GNU General Public License for more details.
21 // You should have received a copy of the GNU General Public License
22 // along with this program; if not, write to the Free Software
23 // Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA
28 #include "PartFile.h" // Interface declarations.
31 #include "config.h" // Needed for VERSION
34 #include <protocol/kad/Constants.h>
35 #include <protocol/ed2k/Client2Client/TCP.h>
36 #include <protocol/Protocols.h>
37 #include <common/DataFileVersion.h>
38 #include <common/Constants.h>
39 #include <tags/FileTags.h>
42 #include <wx/tokenzr.h> // Needed for wxStringTokenizer
44 #include "KnownFileList.h" // Needed for CKnownFileList
45 #include "CanceledFileList.h"
46 #include "UploadQueue.h" // Needed for CFileHash
47 #include "IPFilter.h" // Needed for CIPFilter
48 #include "Server.h" // Needed for CServer
49 #include "ServerConnect.h" // Needed for CServerConnect
52 #include "UpDownClientEC.h" // Needed for CUpDownClient
54 #include "updownclient.h" // Needed for CUpDownClient
57 #include "MemFile.h" // Needed for CMemFile
58 #include "Preferences.h" // Needed for CPreferences
59 #include "DownloadQueue.h" // Needed for CDownloadQueue
60 #include "amule.h" // Needed for theApp
61 #include "ED2KLink.h" // Needed for CED2KLink
62 #include "Packet.h" // Needed for CTag
63 #include "SearchList.h" // Needed for CSearchFile
64 #include "ClientList.h" // Needed for clientlist
65 #include "Statistics.h" // Needed for theStats
67 #include <common/Format.h> // Needed for CFormat
68 #include <common/FileFunctions.h> // Needed for GetLastModificationTime
69 #include "ThreadTasks.h" // Needed for CHashingTask/CCompletionTask/CAllocateFileTask
70 #include "GuiEvents.h" // Needed for Notify_*
71 #include "DataToText.h" // Needed for OriginToText()
72 #include "PlatformSpecific.h" // Needed for CreateSparseFile()
73 #include "FileArea.h" // Needed for CFileArea
74 #include "ScopedPtr.h" // Needed for CScopedArray
75 #include "CorruptionBlackBox.h"
77 #include "kademlia/kademlia/Kademlia.h"
78 #include "kademlia/kademlia/Search.h"
81 SFileRating::SFileRating(const wxString
&u
, const wxString
&f
, sint16 r
, const wxString
&c
)
91 SFileRating::SFileRating(const SFileRating
&fr
)
93 UserName(fr
.UserName
),
94 FileName(fr
.FileName
),
102 SFileRating::SFileRating(const CUpDownClient
&client
)
104 UserName(client
.GetUserName()),
105 FileName(client
.GetClientFilename()),
106 Rating(client
.GetFileRating()),
107 Comment(client
.GetFileComment())
113 SFileRating::~SFileRating()
118 class PartFileBufferedData
121 CFileArea area
; // File area to be written
122 uint64 start
; // This is the start offset of the data
123 uint64 end
; // This is the end offset of the data
124 Requested_Block_Struct
*block
; // This is the requested block that this data relates to
126 PartFileBufferedData(CFileAutoClose
& file
, byte
* data
, uint64 _start
, uint64 _end
, Requested_Block_Struct
*_block
)
127 : start(_start
), end(_end
), block(_block
)
129 area
.StartWriteAt(file
, start
, end
-start
+1);
130 memcpy(area
.GetBuffer(), data
, end
-start
+1);
135 typedef std::list
<Chunk
> ChunkList
;
140 CPartFile::CPartFile()
145 CPartFile::CPartFile(CSearchFile
* searchresult
)
149 m_abyFileHash
= searchresult
->GetFileHash();
150 SetFileName(searchresult
->GetFileName());
151 SetFileSize(searchresult
->GetFileSize());
153 for (unsigned int i
= 0; i
< searchresult
->m_taglist
.size(); ++i
){
154 const CTag
& pTag
= searchresult
->m_taglist
[i
];
156 bool bTagAdded
= false;
157 if (pTag
.GetNameID() == 0 && !pTag
.GetName().IsEmpty() && (pTag
.IsStr() || pTag
.IsInt())) {
158 static const struct {
163 { wxT(FT_ED2K_MEDIA_ARTIST
), 2 },
164 { wxT(FT_ED2K_MEDIA_ALBUM
), 2 },
165 { wxT(FT_ED2K_MEDIA_TITLE
), 2 },
166 { wxT(FT_ED2K_MEDIA_LENGTH
), 2 },
167 { wxT(FT_ED2K_MEDIA_BITRATE
), 3 },
168 { wxT(FT_ED2K_MEDIA_CODEC
), 2 }
171 for (unsigned int t
= 0; t
< itemsof(_aMetaTags
); ++t
) {
172 if ( pTag
.GetType() == _aMetaTags
[t
].nType
&&
173 (pTag
.GetName() == _aMetaTags
[t
].pszName
)) {
174 // skip string tags with empty string values
175 if (pTag
.IsStr() && pTag
.GetStr().IsEmpty()) {
179 // skip "length" tags with "0: 0" values
180 if (pTag
.GetName() == wxT(FT_ED2K_MEDIA_LENGTH
)) {
181 if (pTag
.GetStr().IsSameAs(wxT("0: 0")) ||
182 pTag
.GetStr().IsSameAs(wxT("0:0"))) {
187 // skip "bitrate" tags with '0' values
188 if ((pTag
.GetName() == wxT(FT_ED2K_MEDIA_BITRATE
)) && !pTag
.GetInt()) {
192 AddDebugLogLineN( logPartFile
,
193 wxT("CPartFile::CPartFile(CSearchFile*): added tag ") +
194 pTag
.GetFullInfo() );
195 m_taglist
.push_back(pTag
);
200 } else if (pTag
.GetNameID() != 0 && pTag
.GetName().IsEmpty() && (pTag
.IsStr() || pTag
.IsInt())) {
201 static const struct {
209 for (unsigned int t
= 0; t
< itemsof(_aMetaTags
); ++t
) {
210 if (pTag
.GetType() == _aMetaTags
[t
].nType
&& pTag
.GetNameID() == _aMetaTags
[t
].nID
) {
211 // skip string tags with empty string values
212 if (pTag
.IsStr() && pTag
.GetStr().IsEmpty()) {
216 AddDebugLogLineN( logPartFile
,
217 wxT("CPartFile::CPartFile(CSearchFile*): added tag ") +
218 pTag
.GetFullInfo() );
219 m_taglist
.push_back(pTag
);
227 AddDebugLogLineN( logPartFile
,
228 wxT("CPartFile::CPartFile(CSearchFile*): ignored tag ") +
229 pTag
.GetFullInfo() );
237 CPartFile::CPartFile(const CED2KFileLink
* fileLink
)
241 SetFileName(CPath(fileLink
->GetName()));
242 SetFileSize(fileLink
->GetSize());
243 m_abyFileHash
= fileLink
->GetHashKey();
247 if (fileLink
->m_hashset
) {
248 if (!LoadHashsetFromFile(fileLink
->m_hashset
, true)) {
249 AddDebugLogLineC(logPartFile
, wxT("eD2K link contained invalid hashset: ") + fileLink
->GetLink());
255 CPartFile::~CPartFile()
257 // if it's not opened, it was completed or deleted
258 if (m_hpartfile
.IsOpened()) {
261 // Update met file (with current directory entry)
265 DeleteContents(m_BufferedData_list
);
266 delete m_CorruptionBlackBox
;
268 wxASSERT(m_SrcList
.empty());
269 wxASSERT(m_A4AFsrclist
.empty());
272 void CPartFile::CreatePartFile(bool isImporting
)
274 // use lowest free partfilenumber for free file (InterCeptor)
278 m_partmetfilename
= CPath(CFormat(wxT("%03i.part.met")) % i
);
279 m_fullname
= thePrefs::GetTempDir().JoinPaths(m_partmetfilename
);
280 } while (m_fullname
.FileExists());
282 m_CorruptionBlackBox
->SetPartFileInfo(GetFileName().GetPrintable(), m_partmetfilename
.RemoveAllExt().GetPrintable());
284 m_gaplist
.Init(GetFileSize(), true); // Init empty
286 m_PartPath
= m_fullname
.RemoveExt();
288 if (thePrefs::GetAllocFullFile() || !thePrefs::CreateFilesSparse()) {
289 fileCreated
= m_hpartfile
.Create(m_PartPath
, true);
292 fileCreated
= PlatformSpecific::CreateSparseFile(m_PartPath
, GetFileSize());
295 AddLogLineN(_("ERROR: Failed to create partfile"));
299 SetFilePath(thePrefs::GetTempDir());
301 if (!isImporting
&& thePrefs::GetAllocFullFile()) {
302 SetStatus(PS_ALLOCATING
);
303 CThreadScheduler::AddTask(new CAllocateFileTask(this, thePrefs::AddNewFilesPaused()));
305 AllocationFinished();
308 m_hashsetneeded
= (GetED2KPartHashCount() > 0);
311 SetActive(theApp
->IsConnected());
315 uint8
CPartFile::LoadPartFile(const CPath
& in_directory
, const CPath
& filename
, bool from_backup
, bool getsizeonly
)
317 bool isnewstyle
= false;
318 uint8 version
,partmettype
=PMT_UNKNOWN
;
320 std::map
<uint16
, Gap_Struct
*> gap_map
; // Slugfiller
323 m_partmetfilename
= filename
;
324 m_CorruptionBlackBox
->SetPartFileInfo(GetFileName().GetPrintable(), m_partmetfilename
.RemoveAllExt().GetPrintable());
325 m_filePath
= in_directory
;
326 m_fullname
= m_filePath
.JoinPaths(m_partmetfilename
);
327 m_PartPath
= m_fullname
.RemoveExt();
329 // readfile data form part.met file
330 CPath curMetFilename
= m_fullname
;
332 curMetFilename
= curMetFilename
.AppendExt(PARTMET_BAK_EXT
);
333 AddLogLineN(CFormat( _("Trying to load backup of met-file from %s") )
338 CFile
metFile(curMetFilename
, CFile::read
);
339 if (!metFile
.IsOpened()) {
340 AddLogLineN(CFormat( _("ERROR: Failed to open part.met file: %s ==> %s") )
345 } else if (metFile
.GetLength() == 0) {
346 AddLogLineN(CFormat( _("ERROR: part.met file is 0 size: %s ==> %s") )
353 version
= metFile
.ReadUInt8();
354 if (version
!= PARTFILE_VERSION
&& version
!= PARTFILE_SPLITTEDVERSION
&& version
!= PARTFILE_VERSION_LARGEFILE
){
356 //if (version == 83) return ImportShareazaTempFile(...)
357 AddLogLineN(CFormat( _("ERROR: Invalid part.met file version: %s ==> %s") )
363 isnewstyle
= (version
== PARTFILE_SPLITTEDVERSION
);
364 partmettype
= isnewstyle
? PMT_SPLITTED
: PMT_DEFAULTOLD
;
366 if (version
== PARTFILE_VERSION
) {// Do we still need this check ?
367 uint8 test
[4]; // It will fail for certain files.
368 metFile
.Seek(24, wxFromStart
);
369 metFile
.Read(test
,4);
371 metFile
.Seek(1, wxFromStart
);
372 if (test
[0]==0 && test
[1]==0 && test
[2]==2 && test
[3]==1) {
373 isnewstyle
=true; // edonkeys so called "old part style"
374 partmettype
=PMT_NEWOLD
;
379 uint32 temp
= metFile
.ReadUInt32();
381 if (temp
==0) { // 0.48 partmets - different again
382 LoadHashsetFromFile(&metFile
, false);
384 metFile
.Seek(2, wxFromStart
);
385 LoadDateFromFile(&metFile
);
386 m_abyFileHash
= metFile
.ReadHash();
390 LoadDateFromFile(&metFile
);
391 LoadHashsetFromFile(&metFile
, false);
394 uint32 tagcount
= metFile
.ReadUInt32();
396 for (uint32 j
= 0; j
< tagcount
; ++j
) {
397 CTag
newtag(metFile
,true);
400 (newtag
.GetNameID() == FT_FILESIZE
||
401 newtag
.GetNameID() == FT_FILENAME
))) {
402 switch(newtag
.GetNameID()) {
404 if (!GetFileName().IsOk()) {
405 // If it's not empty, we already loaded the unicoded one
406 SetFileName(CPath(newtag
.GetStr()));
410 case FT_LASTSEENCOMPLETE
: {
411 lastseencomplete
= newtag
.GetInt();
415 SetFileSize(newtag
.GetInt());
418 case FT_TRANSFERRED
: {
419 transferred
= newtag
.GetInt();
423 //#warning needs setfiletype string
424 //SetFileType(newtag.GetStr());
428 m_category
= newtag
.GetInt();
429 if (m_category
> theApp
->glob_prefs
->GetCatCount() - 1 ) {
434 case FT_OLDDLPRIORITY
:
435 case FT_DLPRIORITY
: {
437 m_iDownPriority
= newtag
.GetInt();
438 if( m_iDownPriority
== PR_AUTO
){
439 m_iDownPriority
= PR_HIGH
;
440 SetAutoDownPriority(true);
443 if ( m_iDownPriority
!= PR_LOW
&&
444 m_iDownPriority
!= PR_NORMAL
&&
445 m_iDownPriority
!= PR_HIGH
)
446 m_iDownPriority
= PR_NORMAL
;
447 SetAutoDownPriority(false);
453 m_paused
= (newtag
.GetInt() == 1);
454 m_stopped
= m_paused
;
457 case FT_OLDULPRIORITY
:
458 case FT_ULPRIORITY
: {
460 SetUpPriority(newtag
.GetInt(), false);
461 if( GetUpPriority() == PR_AUTO
){
462 SetUpPriority(PR_HIGH
, false);
463 SetAutoUpPriority(true);
465 SetAutoUpPriority(false);
470 case FT_KADLASTPUBLISHSRC
:{
471 SetLastPublishTimeKadSrc(newtag
.GetInt(), 0);
472 if(GetLastPublishTimeKadSrc() > (uint32
)time(NULL
)+KADEMLIAREPUBLISHTIMES
) {
473 //There may be a posibility of an older client that saved a random number here.. This will check for that..
474 SetLastPublishTimeKadSrc(0,0);
478 case FT_KADLASTPUBLISHNOTES
:{
479 SetLastPublishTimeKadNotes(newtag
.GetInt());
482 // old tags: as long as they are not needed, take the chance to purge them
484 case FT_KADLASTPUBLISHKEY
:
485 case FT_PARTFILENAME
:
487 case FT_DL_ACTIVE_TIME
:
488 if (newtag
.IsInt()) {
489 m_nDlActiveTime
= newtag
.GetInt();
492 case FT_CORRUPTEDPARTS
: {
493 wxASSERT(m_corrupted_list
.empty());
494 wxString
strCorruptedParts(newtag
.GetStr());
495 wxStringTokenizer
tokenizer(strCorruptedParts
, wxT(","));
496 while ( tokenizer
.HasMoreTokens() ) {
497 wxString token
= tokenizer
.GetNextToken();
499 if (token
.ToULong(&uPart
)) {
500 if (uPart
< GetPartCount() && !IsCorruptedPart(uPart
)) {
501 m_corrupted_list
.push_back(uPart
);
510 hash
.DecodeBase32(newtag
.GetStr()) == CAICHHash::GetHashSize();
511 wxASSERT(hashSizeOk
);
513 m_pAICHHashSet
->SetMasterHash(hash
, AICH_VERIFIED
);
517 case FT_ATTRANSFERRED
:{
518 statistic
.SetAllTimeTransferred(statistic
.GetAllTimeTransferred() + (uint64
)newtag
.GetInt());
521 case FT_ATTRANSFERREDHI
:{
522 statistic
.SetAllTimeTransferred(statistic
.GetAllTimeTransferred() + (((uint64
)newtag
.GetInt()) << 32));
525 case FT_ATREQUESTED
:{
526 statistic
.SetAllTimeRequests(newtag
.GetInt());
530 statistic
.SetAllTimeAccepts(newtag
.GetInt());
534 // Start Changes by Slugfiller for better exception handling
536 wxCharBuffer tag_ansi_name
= newtag
.GetName().ToAscii();
537 char gap_mark
= tag_ansi_name
.data() ? tag_ansi_name
[0u] : 0;
538 if ( newtag
.IsInt() && (newtag
.GetName().Length() > 1) &&
539 ((gap_mark
== FT_GAPSTART
) ||
540 (gap_mark
== FT_GAPEND
))) {
541 Gap_Struct
*gap
= NULL
;
542 unsigned long int gapkey
;
543 if (newtag
.GetName().Mid(1).ToULong(&gapkey
)) {
544 if ( gap_map
.find( gapkey
) == gap_map
.end() ) {
545 gap
= new Gap_Struct
;
546 gap_map
[gapkey
] = gap
;
547 gap
->start
= (uint64
)-1;
548 gap
->end
= (uint64
)-1;
550 gap
= gap_map
[ gapkey
];
552 if (gap_mark
== FT_GAPSTART
) {
553 gap
->start
= newtag
.GetInt();
555 if (gap_mark
== FT_GAPEND
) {
556 gap
->end
= newtag
.GetInt()-1;
559 AddDebugLogLineN(logPartFile
, wxT("Wrong gap map key while reading met file!"));
562 // End Changes by Slugfiller for better exception handling
564 m_taglist
.push_back(newtag
);
569 // Nothing. Else, nothing.
573 // load the hashsets from the hybridstylepartmet
574 if (isnewstyle
&& !getsizeonly
&& (metFile
.GetPosition()<metFile
.GetLength()) ) {
575 metFile
.Seek(1, wxFromCurrent
);
577 uint16 parts
=GetPartCount(); // assuming we will get all hashsets
579 for (uint16 i
= 0; i
< parts
&& (metFile
.GetPosition()+16<metFile
.GetLength()); ++i
){
580 CMD4Hash cur_hash
= metFile
.ReadHash();
581 m_hashlist
.push_back(cur_hash
);
585 if (!m_hashlist
.empty()) {
586 CreateHashFromHashlist(m_hashlist
, &checkhash
);
588 if (m_abyFileHash
!= checkhash
) {
592 } catch (const CInvalidPacket
& e
) {
593 AddLogLineC(CFormat(_("Error: %s (%s) is corrupt (bad tags: %s), unable to load file."))
598 } catch (const CIOFailureException
& e
) {
599 AddDebugLogLineC(logPartFile
, CFormat( wxT("IO failure while loading '%s': %s") )
603 } catch (const CEOFException
& WXUNUSED(e
)) {
604 AddLogLineC(CFormat( _("ERROR: %s (%s) is corrupt (wrong tagcount), unable to load file.") )
607 AddLogLineC(_("Trying to recover file info..."));
609 // Safe file is that who have
612 // We have filesize, try other needed info
614 // Do we need to check gaps? I think not,
615 // because they are checked below. Worst
616 // scenario will only mark file as 0 bytes downloaded.
619 if (!GetFileName().IsOk()) {
620 // Not critical, let's put a random filename.
622 "Recovering no-named file - will try to recover it as RecoveredFile.dat"));
623 SetFileName(CPath(wxT("RecoveredFile.dat")));
626 AddLogLineC(_("Recovered all available file info :D - Trying to use it..."));
628 AddLogLineC(_("Unable to recover file info :("));
637 m_gaplist
.Init(GetFileSize(), false); // Init full, then add gaps
638 // Now to flush the map into the list (Slugfiller)
639 std::map
<uint16
, Gap_Struct
*>::iterator it
= gap_map
.begin();
640 for ( ; it
!= gap_map
.end(); ++it
) {
641 Gap_Struct
* gap
= it
->second
;
642 // SLUGFILLER: SafeHash - revised code, and extra safety
643 if ( (gap
->start
!= (uint64
)-1) &&
644 (gap
->end
!= (uint64
)-1) &&
645 gap
->start
<= gap
->end
&&
646 gap
->start
< GetFileSize()) {
647 if (gap
->end
>= GetFileSize()) {
648 gap
->end
= GetFileSize()-1; // Clipping
650 m_gaplist
.AddGap(gap
->start
, gap
->end
); // All tags accounted for, use safe adding
653 // SLUGFILLER: SafeHash
656 //check if this is a backup
657 if ( m_fullname
.GetExt().MakeLower() == wxT("backup" )) {
658 m_fullname
= m_fullname
.RemoveExt();
661 // open permanent handle
662 if ( !m_hpartfile
.Open(m_PartPath
, CFile::read_write
)) {
663 AddLogLineN(CFormat( _("Failed to open %s (%s)") )
672 // SLUGFILLER: SafeHash - final safety, make sure any missing part of the file is gap
673 if (m_hpartfile
.GetLength() < GetFileSize())
674 AddGap(m_hpartfile
.GetLength(), GetFileSize()-1);
675 // Goes both ways - Partfile should never be too large
676 if (m_hpartfile
.GetLength() > GetFileSize()) {
677 AddDebugLogLineC(logPartFile
, CFormat( wxT("Partfile \"%s\" is too large! Truncating %llu bytes.") ) % GetFileName() % (m_hpartfile
.GetLength() - GetFileSize()));
678 m_hpartfile
.SetLength(GetFileSize());
680 // SLUGFILLER: SafeHash
681 } catch (const CIOFailureException
& e
) {
682 AddDebugLogLineC(logPartFile
, CFormat( wxT("Error while accessing partfile \"%s\": %s") ) % GetFileName() % e
.what());
686 // now close the file again until needed
687 m_hpartfile
.Release(true);
689 // check hashcount, file status etc
690 if (GetHashCount() != GetED2KPartHashCount()){
691 m_hashsetneeded
= true;
694 m_hashsetneeded
= false;
695 for (size_t i
= 0; i
< m_hashlist
.size(); ++i
) {
702 if (m_gaplist
.IsComplete()) { // is this file complete already?
707 if (!isnewstyle
) { // not for importing
708 const time_t file_date
= CPath::GetModificationTime(m_PartPath
);
709 if (m_lastDateChanged
!= file_date
) {
710 // It's pointless to rehash an empty file, since the case
711 // where a user has zero'd a file is handled above ...
712 if (m_hpartfile
.GetLength()) {
713 AddLogLineN(CFormat( _("WARNING: %s might be corrupted (%i)") )
715 % (m_lastDateChanged
- file_date
) );
717 SetStatus(PS_WAITINGFORHASH
);
719 CPath partFileName
= m_partmetfilename
.RemoveExt();
720 CThreadScheduler::AddTask(new CHashingTask(m_filePath
, partFileName
, this));
725 UpdateCompletedInfos();
726 if (completedsize
> transferred
) {
727 m_iGainDueToCompression
= completedsize
- transferred
;
728 } else if (completedsize
!= transferred
) {
729 m_iLostDueToCorruption
= transferred
- completedsize
;
736 bool CPartFile::SavePartFile(bool Initial
)
739 case PS_WAITINGFORHASH
:
745 /* Don't write anything to disk if less than 100 KB of free space is left. */
746 sint64 free
= CPath::GetFreeSpaceAt(GetFilePath());
747 if ((free
!= wxInvalidOffset
) && (free
< (100 * 1024))) {
753 if (!m_PartPath
.FileExists()) {
754 throw wxString(wxT(".part file not found"));
757 uint32 lsc
= lastseencomplete
;
760 CPath::BackupFile(m_fullname
, wxT(".backup"));
761 CPath::RemoveFile(m_fullname
);
764 file
.Open(m_fullname
, CFile::write
);
765 if (!file
.IsOpened()) {
766 throw wxString(wxT("Failed to open part.met file"));
770 file
.WriteUInt8(IsLargeFile() ? PARTFILE_VERSION_LARGEFILE
: PARTFILE_VERSION
);
772 file
.WriteUInt32(CPath::GetModificationTime(m_PartPath
));
774 file
.WriteHash(m_abyFileHash
);
775 uint16 parts
= m_hashlist
.size();
776 file
.WriteUInt16(parts
);
777 for (int x
= 0; x
< parts
; ++x
) {
778 file
.WriteHash(m_hashlist
[x
]);
781 #define FIXED_TAGS 15
782 uint32 tagcount
= m_taglist
.size() + FIXED_TAGS
+ (m_gaplist
.size()*2);
783 if (!m_corrupted_list
.empty()) {
787 if (m_pAICHHashSet
->HasValidMasterHash() && (m_pAICHHashSet
->GetStatus() == AICH_VERIFIED
)){
791 if (GetLastPublishTimeKadSrc()){
795 if (GetLastPublishTimeKadNotes()){
799 if (GetDlActiveTime()){
803 file
.WriteUInt32(tagcount
);
805 //#warning Kry - Where are lost by coruption and gained by compression?
807 // 0 (unicoded part file name)
808 // We write it with BOM to keep eMule compatibility. Note that the 'printable' filename is saved,
809 // as presently the filename does not represent an actual file.
810 CTagString( FT_FILENAME
, GetFileName().GetPrintable()).WriteTagToFile( &file
, utf8strOptBOM
);
811 CTagString( FT_FILENAME
, GetFileName().GetPrintable()).WriteTagToFile( &file
); // 1
813 CTagIntSized( FT_FILESIZE
, GetFileSize(), IsLargeFile() ? 64 : 32).WriteTagToFile( &file
);// 2
814 CTagIntSized( FT_TRANSFERRED
, transferred
, IsLargeFile() ? 64 : 32).WriteTagToFile( &file
); // 3
815 CTagInt32( FT_STATUS
, (m_paused
?1:0)).WriteTagToFile( &file
); // 4
817 if ( IsAutoDownPriority() ) {
818 CTagInt32( FT_DLPRIORITY
, (uint8
)PR_AUTO
).WriteTagToFile( &file
); // 5
819 CTagInt32( FT_OLDDLPRIORITY
, (uint8
)PR_AUTO
).WriteTagToFile( &file
); // 6
821 CTagInt32( FT_DLPRIORITY
, m_iDownPriority
).WriteTagToFile( &file
); // 5
822 CTagInt32( FT_OLDDLPRIORITY
, m_iDownPriority
).WriteTagToFile( &file
); // 6
825 CTagInt32( FT_LASTSEENCOMPLETE
, lsc
).WriteTagToFile( &file
); // 7
827 if ( IsAutoUpPriority() ) {
828 CTagInt32( FT_ULPRIORITY
, (uint8
)PR_AUTO
).WriteTagToFile( &file
); // 8
829 CTagInt32( FT_OLDULPRIORITY
, (uint8
)PR_AUTO
).WriteTagToFile( &file
); // 9
831 CTagInt32( FT_ULPRIORITY
, GetUpPriority() ).WriteTagToFile( &file
); // 8
832 CTagInt32( FT_OLDULPRIORITY
, GetUpPriority() ).WriteTagToFile( &file
); // 9
835 CTagInt32(FT_CATEGORY
, m_category
).WriteTagToFile( &file
); // 10
836 CTagInt32(FT_ATTRANSFERRED
, statistic
.GetAllTimeTransferred() & 0xFFFFFFFF).WriteTagToFile( &file
);// 11
837 CTagInt32(FT_ATTRANSFERREDHI
, statistic
.GetAllTimeTransferred() >>32).WriteTagToFile( &file
);// 12
838 CTagInt32(FT_ATREQUESTED
, statistic
.GetAllTimeRequests()).WriteTagToFile( &file
); // 13
839 CTagInt32(FT_ATACCEPTED
, statistic
.GetAllTimeAccepts()).WriteTagToFile( &file
); // 14
841 // currupt part infos
842 if (!m_corrupted_list
.empty()) {
843 wxString strCorruptedParts
;
844 std::list
<uint16
>::iterator it
= m_corrupted_list
.begin();
845 for (; it
!= m_corrupted_list
.end(); ++it
) {
846 uint16 uCorruptedPart
= *it
;
847 if (!strCorruptedParts
.IsEmpty()) {
848 strCorruptedParts
+= wxT(",");
850 strCorruptedParts
+= CFormat(wxT("%u")) % uCorruptedPart
;
852 wxASSERT( !strCorruptedParts
.IsEmpty() );
854 CTagString( FT_CORRUPTEDPARTS
, strCorruptedParts
).WriteTagToFile( &file
); // 11?
858 if (m_pAICHHashSet
->HasValidMasterHash() && (m_pAICHHashSet
->GetStatus() == AICH_VERIFIED
)){
859 CTagString
aichtag(FT_AICH_HASH
, m_pAICHHashSet
->GetMasterHash().GetString() );
860 aichtag
.WriteTagToFile(&file
); // 12?
863 if (GetLastPublishTimeKadSrc()){
864 CTagInt32(FT_KADLASTPUBLISHSRC
, GetLastPublishTimeKadSrc()).WriteTagToFile(&file
); // 15?
867 if (GetLastPublishTimeKadNotes()){
868 CTagInt32(FT_KADLASTPUBLISHNOTES
, GetLastPublishTimeKadNotes()).WriteTagToFile(&file
); // 16?
871 if (GetDlActiveTime()){
872 CTagInt32(FT_DL_ACTIVE_TIME
, GetDlActiveTime()).WriteTagToFile(&file
); // 17
875 for (uint32 j
= 0; j
< (uint32
)m_taglist
.size();++j
) {
876 m_taglist
[j
].WriteTagToFile(&file
);
881 for (CGapList::const_iterator it
= m_gaplist
.begin(); it
!= m_gaplist
.end(); ++it
) {
882 wxString tagName
= CFormat(wxT(" %u")) % i_pos
;
884 // gap start = first missing byte but gap ends = first non-missing byte
885 // in edonkey but I think its easier to user the real limits
886 tagName
[0] = FT_GAPSTART
;
887 CTagIntSized(tagName
, it
.start(), IsLargeFile() ? 64 : 32).WriteTagToFile( &file
);
889 tagName
[0] = FT_GAPEND
;
890 CTagIntSized(tagName
, it
.end() + 1, IsLargeFile() ? 64 : 32).WriteTagToFile( &file
);
894 } catch (const wxString
& error
) {
895 AddLogLineNS(CFormat( _("ERROR while saving partfile: %s (%s ==> %s)") )
901 } catch (const CIOFailureException
& e
) {
902 AddLogLineCS(_("IO failure while saving partfile: ") + e
.what());
910 CPath::RemoveFile(m_fullname
.AppendExt(wxT(".backup")));
913 sint64 metLength
= m_fullname
.GetFileSize();
914 if (metLength
== wxInvalidOffset
) {
915 theApp
->ShowAlert( CFormat( _("Could not retrieve length of '%s' - using %s file.") )
920 CPath::CloneFile(m_fullname
.AppendExt(PARTMET_BAK_EXT
), m_fullname
, true);
921 } else if (metLength
== 0) {
922 // Don't backup if it's 0 size but raise a warning!!!
923 theApp
->ShowAlert( CFormat( _("'%s' is 0 size somehow - using %s file.") )
928 CPath::CloneFile(m_fullname
.AppendExt(PARTMET_BAK_EXT
), m_fullname
, true);
930 // no error, just backup
931 CPath::BackupFile(m_fullname
, PARTMET_BAK_EXT
);
938 void CPartFile::SaveSourceSeeds()
940 #define MAX_SAVED_SOURCES 10
942 // Kry - Sources seeds
943 // Based on a Feature request, this saves the last MAX_SAVED_SOURCES
944 // sources of the file, giving a 'seed' for the next run.
945 // We save the last sources because:
946 // 1 - They could be the hardest to get
947 // 2 - They will more probably be available
948 // However, if we have downloading sources, they have preference because
949 // we probably have more credits on them.
950 // Anyway, source exchange will get us the rest of the sources
951 // This feature is currently used only on rare files (< 20 sources)
954 if (GetSourceCount()>20) {
958 CClientRefList source_seeds
;
961 CClientRefList::iterator it
= m_downloadingSourcesList
.begin();
962 for( ; it
!= m_downloadingSourcesList
.end() && n_sources
< MAX_SAVED_SOURCES
; ++it
) {
963 if (!it
->HasLowID()) {
964 source_seeds
.push_back(*it
);
969 if (n_sources
< MAX_SAVED_SOURCES
) {
970 // Not enough downloading sources to fill the list, going to sources list
971 if (GetSourceCount() > 0) {
972 SourceSet::reverse_iterator rit
= m_SrcList
.rbegin();
973 for ( ; ((rit
!= m_SrcList
.rend()) && (n_sources
<MAX_SAVED_SOURCES
)); ++rit
) {
974 if (!rit
->HasLowID()) {
975 source_seeds
.push_back(*rit
);
987 const CPath seedsPath
= m_fullname
.AppendExt(wxT(".seeds"));
990 file
.Create(seedsPath
, true);
991 if (!file
.IsOpened()) {
992 AddLogLineN(CFormat( _("Failed to save part.met.seeds file for %s") )
998 file
.WriteUInt8(0); // v3, to avoid v2 clients choking on it.
999 file
.WriteUInt8(source_seeds
.size());
1001 CClientRefList::iterator it2
= source_seeds
.begin();
1002 for (; it2
!= source_seeds
.end(); ++it2
) {
1003 CUpDownClient
* cur_src
= it2
->GetClient();
1004 file
.WriteUInt32(cur_src
->GetUserIDHybrid());
1005 file
.WriteUInt16(cur_src
->GetUserPort());
1006 file
.WriteHash(cur_src
->GetUserHash());
1007 // CryptSettings - See SourceExchange V4
1008 const uint8 uSupportsCryptLayer
= cur_src
->SupportsCryptLayer() ? 1 : 0;
1009 const uint8 uRequestsCryptLayer
= cur_src
->RequestsCryptLayer() ? 1 : 0;
1010 const uint8 uRequiresCryptLayer
= cur_src
->RequiresCryptLayer() ? 1 : 0;
1011 const uint8 byCryptOptions
= (uRequiresCryptLayer
<< 2) | (uRequestsCryptLayer
<< 1) | (uSupportsCryptLayer
<< 0);
1012 file
.WriteUInt8(byCryptOptions
);
1015 /* v2: Added to keep track of too old seeds */
1016 file
.WriteUInt32(wxDateTime::Now().GetTicks());
1018 AddLogLineN(CFormat( wxPLURAL("Saved %i source seed for partfile: %s (%s)", "Saved %i source seeds for partfile: %s (%s)", n_sources
) )
1022 } catch (const CIOFailureException
& e
) {
1023 AddDebugLogLineC( logPartFile
, CFormat( wxT("Error saving partfile's seeds file (%s - %s): %s") )
1030 CPath::RemoveFile(seedsPath
);
1034 void CPartFile::LoadSourceSeeds()
1036 CMemFile sources_data
;
1038 bool valid_sources
= false;
1040 const CPath seedsPath
= m_fullname
.AppendExt(wxT(".seeds"));
1041 if (!seedsPath
.FileExists()) {
1045 CFile
file(seedsPath
, CFile::read
);
1046 if (!file
.IsOpened()) {
1047 // Exists but can't be opened. Should not happen. Probably permission problem, try to remove it.
1048 AddLogLineN(CFormat( _("Can't read seeds file for Partfile %s (%s)") )
1051 CPath::RemoveFile(seedsPath
);
1055 bool badSeedsFile
= false;
1057 uint8 src_count
= file
.ReadUInt8();
1059 bool bUseSX2Format
= (src_count
== 0);
1061 if (bUseSX2Format
) {
1063 src_count
= file
.ReadUInt8();
1066 sources_data
.WriteUInt16(src_count
);
1068 for (int i
= 0; i
< src_count
; ++i
) {
1069 uint32 dwID
= file
.ReadUInt32();
1070 uint16 nPort
= file
.ReadUInt16();
1072 sources_data
.WriteUInt32(bUseSX2Format
? dwID
: wxUINT32_SWAP_ALWAYS(dwID
));
1073 sources_data
.WriteUInt16(nPort
);
1074 sources_data
.WriteUInt32(0);
1075 sources_data
.WriteUInt16(0);
1077 if (bUseSX2Format
) {
1078 sources_data
.WriteHash(file
.ReadHash());
1079 sources_data
.WriteUInt8(file
.ReadUInt8());
1086 // v2: Added to keep track of too old seeds
1087 time_t time
= (time_t)file
.ReadUInt32();
1089 // Time frame is 2 hours. More than enough to compile
1090 // your new aMule version!.
1091 if ((time
+ MIN2S(120)) >= wxDateTime::Now().GetTicks()) {
1092 valid_sources
= true;
1096 // v1 has no time data. We can safely use
1097 // the sources, next time will be saved.
1098 valid_sources
= true;
1101 if (valid_sources
) {
1102 sources_data
.Seek(0);
1103 AddClientSources(&sources_data
, SF_SOURCE_SEEDS
, bUseSX2Format
? 4 : 1, bUseSX2Format
);
1106 } catch (const CSafeIOException
& e
) {
1107 AddLogLineN(CFormat( _("Error reading partfile's seeds file (%s - %s): %s") )
1111 badSeedsFile
= true;
1116 // If we got an exception reading it remove it.
1117 CPath::RemoveFile(seedsPath
);
1121 void CPartFile::PartFileHashFinished(CKnownFile
* result
)
1123 m_lastDateChanged
= result
->m_lastDateChanged
;
1124 bool errorfound
= false;
1125 if (GetED2KPartHashCount() == 0){
1126 if (IsComplete(0, GetFileSize()-1)){
1127 if (result
->GetFileHash() != GetFileHash()){
1128 // cppcheck-suppress zerodiv
1129 AddLogLineN(CFormat(wxPLURAL(
1130 "Found corrupted part (%d) in %d part file %s - FileResultHash |%s| FileHash |%s|",
1131 "Found corrupted part (%d) in %d parts file %s - FileResultHash |%s| FileHash |%s|",
1137 % result
->GetFileHash().Encode()
1138 % GetFileHash().Encode() );
1139 AddGap(0, GetFileSize()-1);
1145 for (size_t i
= 0; i
< m_hashlist
.size(); ++i
){
1146 // Kry - trel_ar's completed parts check on rehashing.
1147 // Very nice feature, if a file is completed but .part.met don't believe it,
1150 if (!( i
< result
->GetHashCount() && (result
->GetPartHash(i
) == GetPartHash(i
)))){
1151 if (IsComplete(i
)) {
1153 if ( i
< result
->GetHashCount() )
1154 wronghash
= result
->GetPartHash(i
);
1156 AddLogLineN(CFormat(wxPLURAL(
1157 "Found corrupted part (%d) in %d part file %s - FileResultHash |%s| FileHash |%s|",
1158 "Found corrupted part (%d) in %d parts file %s - FileResultHash |%s| FileHash |%s|",
1159 GetED2KPartHashCount())
1162 % GetED2KPartHashCount()
1164 % wronghash
.Encode()
1165 % GetPartHash(i
).Encode() );
1171 if (!IsComplete(i
)){
1172 AddLogLineN(CFormat( _("Found completed part (%i) in %s") )
1177 uint64 partStart
= i
* PARTSIZE
;
1178 uint64 partEnd
= partStart
+ GetPartSize(i
) - 1;
1179 RemoveBlockFromList(partStart
, partEnd
);
1186 result
->GetAICHHashset()->GetStatus() == AICH_HASHSETCOMPLETE
&&
1187 status
== PS_COMPLETING
) {
1188 delete m_pAICHHashSet
;
1189 m_pAICHHashSet
= result
->GetAICHHashset();
1190 result
->SetAICHHashset(NULL
);
1191 m_pAICHHashSet
->SetOwner(this);
1193 else if (status
== PS_COMPLETING
) {
1194 AddDebugLogLineN(logPartFile
,
1195 CFormat(wxT("Failed to store new AICH Hashset for completed file: %s"))
1202 if (status
== PS_COMPLETING
){
1207 AddLogLineN(CFormat( _("Finished rehashing %s") ) % GetFileName());
1211 SetStatus(PS_READY
);
1215 SetStatus(PS_READY
);
1217 theApp
->sharedfiles
->SafeAddKFile(this);
1220 void CPartFile::AddGap(uint64 start
, uint64 end
)
1222 m_gaplist
.AddGap(start
, end
);
1223 UpdateDisplayedInfo();
1226 void CPartFile::AddGap(uint16 part
)
1228 m_gaplist
.AddGap(part
);
1229 UpdateDisplayedInfo();
1232 bool CPartFile::IsAlreadyRequested(uint64 start
, uint64 end
)
1234 std::list
<Requested_Block_Struct
*>::iterator it
= m_requestedblocks_list
.begin();
1235 for (; it
!= m_requestedblocks_list
.end(); ++it
) {
1236 Requested_Block_Struct
* cur_block
= *it
;
1238 if ((start
<= cur_block
->EndOffset
) && (end
>= cur_block
->StartOffset
)) {
1245 bool CPartFile::GetNextEmptyBlockInPart(uint16 partNumber
, Requested_Block_Struct
*result
)
1247 // Find start of this part
1248 uint64 partStart
= (PARTSIZE
* partNumber
);
1249 uint64 start
= partStart
;
1251 // What is the end limit of this block, i.e. can't go outside part (or filesize)
1252 uint64 partEnd
= partStart
+ GetPartSize(partNumber
) - 1;
1253 // Loop until find a suitable gap and return true, or no more gaps and return false
1254 CGapList::const_iterator it
= m_gaplist
.begin();
1257 uint64 gapStart
, end
;
1259 // Find the first gap from the start position
1260 for (; it
!= m_gaplist
.end(); ++it
) {
1261 gapStart
= it
.start();
1264 // Want gaps that overlap start<->partEnd
1265 if (gapStart
<= partEnd
&& end
>= start
) {
1268 } else if (gapStart
> partEnd
) {
1273 // If no gaps after start, exit
1277 // Update start position if gap starts after current pos
1278 if (start
< gapStart
) {
1281 // Find end, keeping within the max block size and the part limit
1282 uint64 blockLimit
= partStart
+ (BLOCKSIZE
* (((start
- partStart
) / BLOCKSIZE
) + 1)) - 1;
1283 if (end
> blockLimit
) {
1286 if (end
> partEnd
) {
1289 // If this gap has not already been requested, we have found a valid entry
1290 if (!IsAlreadyRequested(start
, end
)) {
1291 // Was this block to be returned
1292 if (result
!= NULL
) {
1293 result
->StartOffset
= start
;
1294 result
->EndOffset
= end
;
1295 md4cpy(result
->FileID
, GetFileHash().GetHash());
1296 result
->transferred
= 0;
1300 // Reposition to end of that gap
1303 // If tried all gaps then break out of the loop
1304 if (end
== partEnd
) {
1308 // No suitable gap found
1313 void CPartFile::FillGap(uint64 start
, uint64 end
)
1315 m_gaplist
.FillGap(start
, end
);
1316 UpdateCompletedInfos();
1317 UpdateDisplayedInfo();
1320 void CPartFile::FillGap(uint16 part
)
1322 m_gaplist
.FillGap(part
);
1323 UpdateCompletedInfos();
1324 UpdateDisplayedInfo();
1328 void CPartFile::UpdateCompletedInfos()
1330 uint64 allgaps
= m_gaplist
.GetGapSize();
1332 percentcompleted
= (1.0 - (double)allgaps
/GetFileSize()) * 100.0;
1333 completedsize
= GetFileSize() - allgaps
;
1337 void CPartFile::WritePartStatus(CMemFile
* file
)
1339 uint16 parts
= GetED2KPartCount();
1340 file
->WriteUInt16(parts
);
1342 while (done
!= parts
){
1344 for (uint32 i
= 0;i
!= 8;++i
) {
1345 if (IsComplete(done
)) {
1349 if (done
== parts
) {
1353 file
->WriteUInt8(towrite
);
1357 void CPartFile::WriteCompleteSourcesCount(CMemFile
* file
)
1359 file
->WriteUInt16(m_nCompleteSourcesCount
);
1362 uint32
CPartFile::Process(uint32 reducedownload
/*in percent*/,uint8 m_icounter
)
1365 uint32 dwCurTick
= ::GetTickCount();
1367 // If buffer size exceeds limit, or if not written within time limit, flush data
1368 if ( (m_nTotalBufferData
> thePrefs::GetFileBufferSize()) ||
1369 (dwCurTick
> (m_nLastBufferFlushTime
+ BUFFER_TIME_LIMIT
))) {
1374 // check if we want new sources from server --> MOVED for 16.40 version
1375 old_trans
=transferingsrc
;
1379 if (m_icounter
< 10) {
1380 // Update only downloading sources.
1381 CClientRefList::iterator it
= m_downloadingSourcesList
.begin();
1382 for( ; it
!= m_downloadingSourcesList
.end(); ) {
1383 CUpDownClient
*cur_src
= it
++->GetClient();
1384 if(cur_src
->GetDownloadState() == DS_DOWNLOADING
) {
1386 kBpsDown
+= cur_src
->SetDownloadLimit(reducedownload
);
1390 // Update all sources (including downloading sources)
1391 for ( SourceSet::iterator it
= m_SrcList
.begin(); it
!= m_SrcList
.end(); ) {
1392 CUpDownClient
* cur_src
= it
++->GetClient();
1393 switch (cur_src
->GetDownloadState()) {
1394 case DS_DOWNLOADING
: {
1396 kBpsDown
+= cur_src
->SetDownloadLimit(reducedownload
);
1405 case DS_LOWTOLOWIP
: {
1406 if (cur_src
->HasLowID() && !theApp
->CanDoCallback(cur_src
->GetServerIP(), cur_src
->GetServerPort())) {
1407 // If we are almost maxed on sources,
1408 // slowly remove these client to see
1409 // if we can find a better source.
1410 if (((dwCurTick
- lastpurgetime
) > 30000) &&
1411 (GetSourceCount() >= (thePrefs::GetMaxSourcePerFile()*.8))) {
1412 RemoveSource(cur_src
);
1413 lastpurgetime
= dwCurTick
;
1417 cur_src
->SetDownloadState(DS_ONQUEUE
);
1422 case DS_NONEEDEDPARTS
: {
1423 // we try to purge noneeded source, even without reaching the limit
1424 if((dwCurTick
- lastpurgetime
) > 40000) {
1425 if(!cur_src
->SwapToAnotherFile(false , false, false , NULL
)) {
1426 //however we only delete them if reaching the limit
1427 if (GetSourceCount() >= (thePrefs::GetMaxSourcePerFile()*.8 )) {
1428 RemoveSource(cur_src
);
1429 lastpurgetime
= dwCurTick
;
1430 break; //Johnny-B - nothing more to do here (good eye!)
1433 lastpurgetime
= dwCurTick
;
1437 // doubled reasktime for no needed parts - save connections and traffic
1438 if ( !((!cur_src
->GetLastAskedTime()) ||
1439 (dwCurTick
- cur_src
->GetLastAskedTime()) > FILEREASKTIME
*2)) {
1442 // Recheck this client to see if still NNP..
1443 // Set to DS_NONE so that we force a TCP reask next time..
1444 cur_src
->SetDownloadState(DS_NONE
);
1449 if( cur_src
->IsRemoteQueueFull()) {
1450 if( ((dwCurTick
- lastpurgetime
) > 60000) &&
1451 (GetSourceCount() >= (thePrefs::GetMaxSourcePerFile()*.8 )) ) {
1452 RemoveSource( cur_src
);
1453 lastpurgetime
= dwCurTick
;
1454 break; //Johnny-B - nothing more to do here (good eye!)
1458 // Give up to 1 min for UDP to respond..
1459 // If we are within on min on TCP, do not try..
1460 if ( theApp
->IsConnected() &&
1461 ( (!cur_src
->GetLastAskedTime()) ||
1462 (dwCurTick
- cur_src
->GetLastAskedTime()) > FILEREASKTIME
-20000)) {
1463 cur_src
->UDPReaskForDownload();
1466 // No break here, since the next case takes care of asking for downloads.
1470 case DS_TOOMANYCONNS
:
1472 case DS_WAITCALLBACK
:
1473 case DS_WAITCALLBACKKAD
: {
1474 if ( theApp
->IsConnected() &&
1475 ( (!cur_src
->GetLastAskedTime()) ||
1476 (dwCurTick
- cur_src
->GetLastAskedTime()) > FILEREASKTIME
)) {
1477 if (!cur_src
->AskForDownload()) {
1478 // I left this break here just as a reminder
1479 // just in case re rearange things..
1488 /* eMule 0.30c implementation, i give it a try (Creteil) BEGIN ... */
1489 if (IsA4AFAuto() && ((!m_LastNoNeededCheck
) || (dwCurTick
- m_LastNoNeededCheck
> 900000))) {
1490 m_LastNoNeededCheck
= dwCurTick
;
1491 for ( SourceSet::iterator it
= m_A4AFsrclist
.begin(); it
!= m_A4AFsrclist
.end(); ) {
1492 CUpDownClient
*cur_source
= it
++->GetClient();
1493 uint8 download_state
=cur_source
->GetDownloadState();
1494 if( download_state
!= DS_DOWNLOADING
1495 && cur_source
->GetRequestFile()
1496 && ((!cur_source
->GetRequestFile()->IsA4AFAuto()) || download_state
== DS_NONEEDEDPARTS
))
1498 cur_source
->SwapToAnotherFile(false, false, false, this);
1502 /* eMule 0.30c implementation, i give it a try (Creteil) END ... */
1504 // swap No needed partfiles if possible
1506 if (((old_trans
==0) && (transferingsrc
>0)) || ((old_trans
>0) && (transferingsrc
==0))) {
1510 // Kad source search
1511 if( GetMaxSourcePerFileUDP() > GetSourceCount()){
1512 //Once we can handle lowID users in Kad, we remove the second IsConnected
1513 if (theApp
->downloadqueue
->DoKademliaFileRequest() && (Kademlia::CKademlia::GetTotalFile() < KADEMLIATOTALFILE
) && (dwCurTick
> m_LastSearchTimeKad
) && Kademlia::CKademlia::IsConnected() && theApp
->IsConnected() && !IsStopped()){
1515 theApp
->downloadqueue
->SetLastKademliaFileRequest();
1517 if (GetKadFileSearchID()) {
1518 /* This will never happen anyway. We're talking a
1519 1h timespan and searches are at max 45secs */
1520 Kademlia::CSearchManager::StopSearch(GetKadFileSearchID(), false);
1523 Kademlia::CUInt128
kadFileID(GetFileHash().GetHash());
1524 Kademlia::CSearch
* pSearch
= Kademlia::CSearchManager::PrepareLookup(Kademlia::CSearch::FILE, true, kadFileID
);
1525 AddDebugLogLineN(logKadSearch
, CFormat(wxT("Preparing a Kad Search for '%s'")) % GetFileName());
1527 AddDebugLogLineN(logKadSearch
, CFormat(wxT("Kad lookup started for '%s'")) % GetFileName());
1528 if(m_TotalSearchesKad
< 7) {
1529 m_TotalSearchesKad
++;
1531 m_LastSearchTimeKad
= dwCurTick
+ (KADEMLIAREASKTIME
*m_TotalSearchesKad
);
1532 SetKadFileSearchID(pSearch
->GetSearchID());
1536 if(GetKadFileSearchID()) {
1537 Kademlia::CSearchManager::StopSearch(GetKadFileSearchID(), true);
1541 // check if we want new sources from server
1542 if ( !m_localSrcReqQueued
&&
1543 ( (!m_lastsearchtime
) ||
1544 (dwCurTick
- m_lastsearchtime
) > SERVERREASKTIME
) &&
1545 theApp
->IsConnectedED2K() &&
1546 thePrefs::GetMaxSourcePerFileSoft() > GetSourceCount() &&
1548 m_localSrcReqQueued
= true;
1549 theApp
->downloadqueue
->SendLocalSrcRequest(this);
1552 // calculate datarate, set limit etc.
1557 // Kry - does the 3 / 30 difference produce too much flickering or CPU?
1558 if (m_count
>= 30) {
1560 UpdateAutoDownPriority();
1561 UpdateDisplayedInfo();
1562 if(m_bPercentUpdated
== false) {
1563 UpdateCompletedInfos();
1565 m_bPercentUpdated
= false;
1568 // release file handle if unused for some time
1569 m_hpartfile
.Release();
1571 return (uint32
)(kBpsDown
*1024.0);
1574 bool CPartFile::CanAddSource(uint32 userid
, uint16 port
, uint32 serverip
, uint16 serverport
, uint8
* pdebug_lowiddropped
, bool ed2kID
)
1577 //The incoming ID could have the userid in the Hybrid format..
1578 uint32 hybridID
= 0;
1580 if (IsLowID(userid
)) {
1583 hybridID
= wxUINT32_SWAP_ALWAYS(userid
);
1587 if (!IsLowID(userid
)) {
1588 userid
= wxUINT32_SWAP_ALWAYS(userid
);
1592 // MOD Note: Do not change this part - Merkur
1593 if (theApp
->IsConnectedED2K()) {
1594 if(::IsLowID(theApp
->GetED2KID())) {
1595 if(theApp
->GetED2KID() == userid
&& theApp
->serverconnect
->GetCurrentServer()->GetIP() == serverip
&& theApp
->serverconnect
->GetCurrentServer()->GetPort() == serverport
) {
1598 if(theApp
->GetPublicIP() == userid
) {
1602 if(theApp
->GetED2KID() == userid
&& thePrefs::GetPort() == port
) {
1608 if (Kademlia::CKademlia::IsConnected()) {
1609 if(!Kademlia::CKademlia::IsFirewalled()) {
1610 if(Kademlia::CKademlia::GetIPAddress() == hybridID
&& thePrefs::GetPort() == port
) {
1616 //This allows *.*.*.0 clients to not be removed if Ed2kID == false
1617 if ( IsLowID(hybridID
) && theApp
->IsFirewalled()) {
1618 if (pdebug_lowiddropped
) {
1619 (*pdebug_lowiddropped
)++;
1627 void CPartFile::AddSources(CMemFile
& sources
,uint32 serverip
, uint16 serverport
, unsigned origin
, bool bWithObfuscationAndHash
)
1629 uint8 count
= sources
.ReadUInt8();
1630 uint8 debug_lowiddropped
= 0;
1631 uint8 debug_possiblesources
= 0;
1632 CMD4Hash achUserHash
;
1635 // since we may received multiple search source UDP results we have to "consume" all data of that packet
1636 AddDebugLogLineN(logPartFile
, wxT("Trying to add sources for a stopped file"));
1637 sources
.Seek(count
*(4+2), wxFromCurrent
);
1641 for (int i
= 0;i
!= count
;++i
) {
1642 uint32 userid
= sources
.ReadUInt32();
1643 uint16 port
= sources
.ReadUInt16();
1645 uint8 byCryptOptions
= 0;
1646 if (bWithObfuscationAndHash
){
1647 byCryptOptions
= sources
.ReadUInt8();
1648 if ((byCryptOptions
& 0x80) > 0) {
1649 achUserHash
= sources
.ReadHash();
1652 if ((thePrefs::IsClientCryptLayerRequested() && (byCryptOptions
& 0x01/*supported*/) > 0 && (byCryptOptions
& 0x80) == 0)
1653 || (thePrefs::IsClientCryptLayerSupported() && (byCryptOptions
& 0x02/*requested*/) > 0 && (byCryptOptions
& 0x80) == 0)) {
1654 AddDebugLogLineN(logPartFile
, CFormat(wxT("Server didn't provide UserHash for source %u, even if it was expected to (or local obfuscationsettings changed during serverconnect")) % userid
);
1655 } else if (!thePrefs::IsClientCryptLayerRequested() && (byCryptOptions
& 0x02/*requested*/) == 0 && (byCryptOptions
& 0x80) != 0) {
1656 AddDebugLogLineN(logPartFile
, CFormat(wxT("Server provided UserHash for source %u, even if it wasn't expected to (or local obfuscationsettings changed during serverconnect")) % userid
);
1661 // "Filter LAN IPs" and "IPfilter" the received sources IP addresses
1662 if (!IsLowID(userid
)) {
1663 // check for 0-IP, localhost and optionally for LAN addresses
1664 if ( !IsGoodIP(userid
, thePrefs::FilterLanIPs()) ) {
1667 if (theApp
->ipfilter
->IsFiltered(userid
)) {
1672 if (!CanAddSource(userid
, port
, serverip
, serverport
, &debug_lowiddropped
)) {
1676 if(thePrefs::GetMaxSourcePerFile() > GetSourceCount()) {
1677 ++debug_possiblesources
;
1678 CUpDownClient
* newsource
= new CUpDownClient(port
,userid
,serverip
,serverport
,this, true, true);
1680 newsource
->SetSourceFrom((ESourceFrom
)origin
);
1681 newsource
->SetConnectOptions(byCryptOptions
, true, false);
1683 if ((byCryptOptions
& 0x80) != 0) {
1684 newsource
->SetUserHash(achUserHash
);
1687 theApp
->downloadqueue
->CheckAndAddSource(this,newsource
);
1689 AddDebugLogLineN(logPartFile
, wxT("Consuming a packet because of max sources reached"));
1690 // Since we may receive multiple search source UDP results we have to "consume" all data of that packet
1691 // This '+1' is added because 'i' counts from 0.
1692 sources
.Seek((count
-(i
+1))*(4+2), wxFromCurrent
);
1693 if (GetKadFileSearchID()) {
1694 Kademlia::CSearchManager::StopSearch(GetKadFileSearchID(), false);
1701 void CPartFile::UpdatePartsInfo()
1703 if( !IsPartFile() ) {
1704 CKnownFile::UpdatePartsInfo();
1709 uint16 partcount
= GetPartCount();
1710 bool flag
= (time(NULL
) - m_nCompleteSourcesTime
> 0);
1712 // Ensure the frequency-list is ready
1713 if ( m_SrcpartFrequency
.size() != GetPartCount() ) {
1714 m_SrcpartFrequency
.clear();
1715 m_SrcpartFrequency
.insert(m_SrcpartFrequency
.begin(), GetPartCount(), 0);
1718 // Find number of available parts
1719 uint16 availablecounter
= 0;
1720 for ( uint16 i
= 0; i
< partcount
; ++i
) {
1721 if ( m_SrcpartFrequency
[i
] )
1725 if ( ( availablecounter
== partcount
) && ( m_availablePartsCount
< partcount
) ) {
1726 lastseencomplete
= time(NULL
);
1729 m_availablePartsCount
= availablecounter
;
1732 ArrayOfUInts16 count
;
1734 count
.reserve(GetSourceCount());
1736 for ( SourceSet::iterator it
= m_SrcList
.begin(); it
!= m_SrcList
.end(); ++it
) {
1737 CUpDownClient
* client
= it
->GetClient();
1738 if ( !client
->GetUpPartStatus().empty() && client
->GetUpPartCount() == partcount
) {
1739 count
.push_back(client
->GetUpCompleteSourcesCount());
1743 m_nCompleteSourcesCount
= m_nCompleteSourcesCountLo
= m_nCompleteSourcesCountHi
= 0;
1745 for (uint16 i
= 0; i
< partcount
; ++i
) {
1747 m_nCompleteSourcesCount
= m_SrcpartFrequency
[i
];
1749 else if( m_nCompleteSourcesCount
> m_SrcpartFrequency
[i
]) {
1750 m_nCompleteSourcesCount
= m_SrcpartFrequency
[i
];
1753 count
.push_back(m_nCompleteSourcesCount
);
1755 int32 n
= count
.size();
1757 std::sort(count
.begin(), count
.end(), std::less
<uint16
>());
1760 int32 i
= n
>> 1; // (n / 2)
1761 int32 j
= (n
* 3) >> 2; // (n * 3) / 4
1762 int32 k
= (n
* 7) >> 3; // (n * 7) / 8
1764 //When still a part file, adjust your guesses by 20% to what you see..
1768 //Not many sources, so just use what you see..
1769 // welcome to 'plain stupid code'
1770 // m_nCompleteSourcesCount;
1771 m_nCompleteSourcesCountLo
= m_nCompleteSourcesCount
;
1772 m_nCompleteSourcesCountHi
= m_nCompleteSourcesCount
;
1773 } else if (n
< 20) {
1774 // For low guess and normal guess count
1775 // If we see more sources then the guessed low and normal, use what we see.
1776 // If we see less sources then the guessed low, adjust network accounts for 80%,
1777 // we account for 20% with what we see and make sure we are still above the normal.
1779 // Adjust 80% network and 20% what we see.
1780 if ( count
[i
] < m_nCompleteSourcesCount
) {
1781 m_nCompleteSourcesCountLo
= m_nCompleteSourcesCount
;
1783 m_nCompleteSourcesCountLo
=
1784 (uint16
)((float)(count
[i
]*.8) +
1785 (float)(m_nCompleteSourcesCount
*.2));
1787 m_nCompleteSourcesCount
= m_nCompleteSourcesCountLo
;
1788 m_nCompleteSourcesCountHi
=
1789 (uint16
)((float)(count
[j
]*.8) +
1790 (float)(m_nCompleteSourcesCount
*.2));
1791 if( m_nCompleteSourcesCountHi
< m_nCompleteSourcesCount
) {
1792 m_nCompleteSourcesCountHi
= m_nCompleteSourcesCount
;
1800 // Adjust network accounts for 80%, we account for 20% with what
1801 // we see and make sure we are still above the low.
1803 // Adjust network accounts for 80%, we account for 20% with what
1804 // we see and make sure we are still above the normal.
1806 m_nCompleteSourcesCountLo
= m_nCompleteSourcesCount
;
1807 m_nCompleteSourcesCount
= (uint16
)((float)(count
[j
]*.8)+(float)(m_nCompleteSourcesCount
*.2));
1808 if( m_nCompleteSourcesCount
< m_nCompleteSourcesCountLo
) {
1809 m_nCompleteSourcesCount
= m_nCompleteSourcesCountLo
;
1811 m_nCompleteSourcesCountHi
= (uint16
)((float)(count
[k
]*.8)+(float)(m_nCompleteSourcesCount
*.2));
1812 if( m_nCompleteSourcesCountHi
< m_nCompleteSourcesCount
) {
1813 m_nCompleteSourcesCountHi
= m_nCompleteSourcesCount
;
1817 m_nCompleteSourcesTime
= time(NULL
) + (60);
1819 UpdateDisplayedInfo();
1822 // [Maella -Enhanced Chunk Selection- (based on jicxicmic)]
1823 bool CPartFile::GetNextRequestedBlock(CUpDownClient
* sender
,
1824 std::vector
<Requested_Block_Struct
*>& toadd
, uint16
& count
)
1827 // The purpose of this function is to return a list of blocks (~180KB) to
1828 // download. To avoid a prematurely stop of the downloading, all blocks that
1829 // are requested from the same source must be located within the same
1830 // chunk (=> part ~9MB).
1832 // The selection of the chunk to download is one of the CRITICAL parts of the
1833 // edonkey network. The selection algorithm must insure the best spreading
1836 // The selection is based on 4 criteria:
1837 // 1. Frequency of the chunk (availability), very rare chunks must be downloaded
1838 // as quickly as possible to become a new available source.
1839 // 2. Parts used for preview (first + last chunk), preview or check a
1840 // file (e.g. movie, mp3)
1841 // 3. Request state (downloading in process), try to ask each source for another
1842 // chunk. Spread the requests between all sources.
1843 // 4. Completion (shortest-to-complete), partially retrieved chunks should be
1844 // completed before starting to download other one.
1846 // The frequency criterion defines three zones: very rare (<10%), rare (<50%)
1847 // and common (>30%). Inside each zone, the criteria have a specific weight, used
1848 // to calculate the priority of chunks. The chunk(s) with the highest
1849 // priority (highest=0, lowest=0xffff) is/are selected first.
1851 // very rare (preview) rare common
1852 // 0% <---- +0 pt ----> 10% <----- +10000 pt -----> 50% <---- +20000 pt ----> 100%
1853 // 1. <------- frequency: +25*frequency pt ----------->
1854 // 2. <- preview: +1 pt --><-------------- preview: set to 10000 pt ------------->
1855 // 3. <------ request: download in progress +20000 pt ------>
1856 // 4a. <- completion: 0% +100, 25% +75 .. 100% +0 pt --><-- !req => completion --->
1857 // 4b. <--- req => !completion -->
1859 // Unrolled, the priority scale is:
1861 // 0..xxxx unrequested and requested very rare chunks
1862 // 10000..1xxxx unrequested rare chunks + unrequested preview chunks
1863 // 20000..2xxxx unrequested common chunks (priority to the most complete)
1864 // 30000..3xxxx requested rare chunks + requested preview chunks
1865 // 40000..4xxxx requested common chunks (priority to the least complete)
1867 // This algorithm usually selects first the rarest chunk(s). However, partially
1868 // complete chunk(s) that is/are close to completion may overtake the priority
1869 // (priority inversion).
1870 // For the common chuncks, the algorithm tries to spread the dowload between
1874 // Check input parameters
1875 if ( sender
->GetPartStatus().empty() ) {
1878 // Define and create the list of the chunks to download
1879 const uint16 partCount
= GetPartCount();
1880 ChunkList chunksList
;
1883 uint16 newBlockCount
= 0;
1884 while(newBlockCount
!= count
) {
1885 // Create a request block stucture if a chunk has been previously selected
1886 if(sender
->GetLastPartAsked() != 0xffff) {
1887 Requested_Block_Struct
* pBlock
= new Requested_Block_Struct
;
1888 if(GetNextEmptyBlockInPart(sender
->GetLastPartAsked(), pBlock
) == true) {
1889 // Keep a track of all pending requested blocks
1890 m_requestedblocks_list
.push_back(pBlock
);
1891 // Update list of blocks to return
1892 toadd
.push_back(pBlock
);
1894 // Skip end of loop (=> CPU load)
1897 // All blocks for this chunk have been already requested
1899 // => Try to select another chunk
1900 sender
->SetLastPartAsked(0xffff);
1904 // Check if a new chunk must be selected (e.g. download starting, previous chunk complete)
1905 if(sender
->GetLastPartAsked() == 0xffff) {
1906 // Quantify all chunks (create list of chunks to download)
1907 // This is done only one time and only if it is necessary (=> CPU load)
1908 if(chunksList
.empty()) {
1909 // Indentify the locally missing part(s) that this source has
1910 for(uint16 i
=0; i
< partCount
; ++i
) {
1911 if(sender
->IsPartAvailable(i
) == true && GetNextEmptyBlockInPart(i
, NULL
) == true) {
1912 // Create a new entry for this chunk and add it to the list
1915 newEntry
.frequency
= m_SrcpartFrequency
[i
];
1916 chunksList
.push_back(newEntry
);
1920 // Check if any bloks(s) could be downloaded
1921 if(chunksList
.empty()) {
1922 break; // Exit main loop while()
1925 // Define the bounds of the three zones (very rare, rare)
1926 // more depending on available sources
1928 if (GetSourceCount()>800) {
1930 } else if (GetSourceCount()>200) {
1933 uint16 limit
= modif
*GetSourceCount()/ 100;
1937 const uint16 veryRareBound
= limit
;
1938 const uint16 rareBound
= 2*limit
;
1940 // Cache Preview state (Criterion 2)
1941 FileType type
= GetFiletype(GetFileName());
1942 const bool isPreviewEnable
=
1943 thePrefs::GetPreviewPrio() &&
1944 (type
== ftArchive
|| type
== ftVideo
);
1946 // Collect and calculate criteria for all chunks
1947 for (ChunkList::iterator it
= chunksList
.begin(); it
!= chunksList
.end(); ++it
) {
1948 Chunk
& cur_chunk
= *it
;
1951 const uint64 uStart
= cur_chunk
.part
* PARTSIZE
;
1952 const uint64 uEnd
= uStart
+ GetPartSize(cur_chunk
.part
) - 1;
1953 // Criterion 2. Parts used for preview
1954 // Remark: - We need to download the first part and the last part(s).
1955 // - When the last part is very small, it's necessary to
1956 // download the two last parts.
1957 bool critPreview
= false;
1958 if(isPreviewEnable
== true) {
1959 if(cur_chunk
.part
== 0) {
1960 critPreview
= true; // First chunk
1961 } else if(cur_chunk
.part
== partCount
-1) {
1962 critPreview
= true; // Last chunk
1963 } else if(cur_chunk
.part
== partCount
-2) {
1964 // Last chunk - 1 (only if last chunk is too small)
1965 const uint32 sizeOfLastChunk
= GetFileSize() - uEnd
;
1966 if(sizeOfLastChunk
< PARTSIZE
/3) {
1967 critPreview
= true; // Last chunk - 1
1972 // Criterion 3. Request state (downloading in process from other source(s))
1974 const bool critRequested
=
1975 cur_chunk
.frequency
> veryRareBound
&&
1976 IsAlreadyRequested(uStart
, uEnd
);
1978 // Criterion 4. Completion
1979 // PARTSIZE instead of GetPartSize() favours the last chunk - but that may be intentional
1980 uint32 partSize
= PARTSIZE
- m_gaplist
.GetGapSize(cur_chunk
.part
);
1981 const uint16 critCompletion
= (uint16
)(partSize
/(PARTSIZE
/100)); // in [%]
1983 // Calculate priority with all criteria
1984 if(cur_chunk
.frequency
<= veryRareBound
) {
1985 // 0..xxxx unrequested + requested very rare chunks
1986 cur_chunk
.rank
= (25 * cur_chunk
.frequency
) + // Criterion 1
1987 ((critPreview
== true) ? 0 : 1) + // Criterion 2
1988 (100 - critCompletion
); // Criterion 4
1989 } else if(critPreview
== true) {
1990 // 10000..10100 unrequested preview chunks
1991 // 30000..30100 requested preview chunks
1992 cur_chunk
.rank
= ((critRequested
== false) ? 10000 : 30000) + // Criterion 3
1993 (100 - critCompletion
); // Criterion 4
1994 } else if(cur_chunk
.frequency
<= rareBound
) {
1995 // 10101..1xxxx unrequested rare chunks
1996 // 30101..3xxxx requested rare chunks
1997 cur_chunk
.rank
= (25 * cur_chunk
.frequency
) + // Criterion 1
1998 ((critRequested
== false) ? 10101 : 30101) + // Criterion 3
1999 (100 - critCompletion
); // Criterion 4
2002 if(critRequested
== false) { // Criterion 3
2003 // 20000..2xxxx unrequested common chunks
2004 cur_chunk
.rank
= 20000 + // Criterion 3
2005 (100 - critCompletion
); // Criterion 4
2007 // 40000..4xxxx requested common chunks
2008 // Remark: The weight of the completion criterion is inversed
2009 // to spead the requests over the completing chunks.
2010 // Without this, the chunk closest to completion will
2011 // received every new sources.
2012 cur_chunk
.rank
= 40000 + // Criterion 3
2013 (critCompletion
); // Criterion 4
2019 // Select the next chunk to download
2020 if(!chunksList
.empty()) {
2021 // Find and count the chunck(s) with the highest priority
2022 uint16 chunkCount
= 0; // Number of found chunks with same priority
2023 uint16 rank
= 0xffff; // Highest priority found
2025 // Collect and calculate criteria for all chunks
2026 for (ChunkList::iterator it
= chunksList
.begin(); it
!= chunksList
.end(); ++it
) {
2027 const Chunk
& cur_chunk
= *it
;
2028 if(cur_chunk
.rank
< rank
) {
2030 rank
= cur_chunk
.rank
;
2031 } else if(cur_chunk
.rank
== rank
) {
2036 // Use a random access to avoid that everybody tries to download the
2037 // same chunks at the same time (=> spread the selected chunk among clients)
2038 uint16 randomness
= 1 + (int) (((float)(chunkCount
-1))*rand()/(RAND_MAX
+1.0));
2040 for (ChunkList::iterator it
= chunksList
.begin(); it
!= chunksList
.end(); ++it
) {
2041 const Chunk
& cur_chunk
= *it
;
2042 if(cur_chunk
.rank
== rank
) {
2044 if(randomness
== 0) {
2045 // Selection process is over
2046 sender
->SetLastPartAsked(cur_chunk
.part
);
2047 // Remark: this list might be reused up to *count times
2048 chunksList
.erase(it
);
2049 break; // exit loop for()
2054 // There is no remaining chunk to download
2055 break; // Exit main loop while()
2059 // Return the number of the blocks
2060 count
= newBlockCount
;
2062 return (newBlockCount
> 0);
2067 void CPartFile::RemoveBlockFromList(uint64 start
,uint64 end
)
2069 std::list
<Requested_Block_Struct
*>::iterator it
= m_requestedblocks_list
.begin();
2070 while (it
!= m_requestedblocks_list
.end()) {
2071 std::list
<Requested_Block_Struct
*>::iterator it2
= it
++;
2073 if ((*it2
)->StartOffset
<= start
&& (*it2
)->EndOffset
>= end
) {
2074 m_requestedblocks_list
.erase(it2
);
2080 void CPartFile::RemoveAllRequestedBlocks(void)
2082 m_requestedblocks_list
.clear();
2086 void CPartFile::CompleteFile(bool bIsHashingDone
)
2088 if (GetKadFileSearchID()) {
2089 Kademlia::CSearchManager::StopSearch(GetKadFileSearchID(), false);
2092 theApp
->downloadqueue
->RemoveLocalServerRequest(this);
2094 AddDebugLogLineN( logPartFile
, wxString( wxT("CPartFile::CompleteFile: Hash ") ) + ( bIsHashingDone
? wxT("done") : wxT("not done") ) );
2096 if (!bIsHashingDone
) {
2097 SetStatus(PS_COMPLETING
);
2100 CPath partFile
= m_partmetfilename
.RemoveExt();
2101 CThreadScheduler::AddTask(new CHashingTask(GetFilePath(), partFile
, this));
2105 m_is_A4AF_auto
=false;
2106 SetStatus(PS_COMPLETING
);
2107 // guess I was wrong about not need to spaw a thread ...
2108 // It is if the temp and incoming dirs are on different
2109 // partitions/drives and the file is large...[oz]
2112 PerformFileComplete();
2116 if (thePrefs::ShowCatTabInfos()) {
2117 Notify_ShowUpdateCatTabTitles();
2119 UpdateDisplayedInfo(true);
2123 void CPartFile::CompleteFileEnded(bool errorOccured
, const CPath
& newname
)
2127 SetStatus(PS_ERROR
);
2128 AddLogLineC(CFormat( _("Unexpected error while completing %s. File paused") )% GetFileName() );
2130 m_fullname
= newname
;
2132 SetFilePath(m_fullname
.GetPath());
2133 SetFileName(m_fullname
.GetFullName());
2134 m_lastDateChanged
= CPath::GetModificationTime(m_fullname
);
2136 SetStatus(PS_COMPLETE
);
2141 // Remove from list of canceled files in case it was canceled once upon a time
2142 if (theApp
->canceledfiles
->Remove(GetFileHash())) {
2143 theApp
->canceledfiles
->Save();
2146 // Mark as known (checks if it's already known),
2147 // also updates search files
2148 theApp
->knownfiles
->SafeAddKFile(this);
2150 // remove the file from the suspended uploads list
2151 theApp
->uploadqueue
->ResumeUpload(GetFileHash());
2152 theApp
->downloadqueue
->RemoveFile(this, true);
2153 theApp
->sharedfiles
->SafeAddKFile(this);
2154 UpdateDisplayedInfo(true);
2156 // republish that file to the ed2k-server to update the 'FT_COMPLETE_SOURCES' counter on the server.
2157 theApp
->sharedfiles
->RepublishFile(this);
2159 // Ensure that completed shows the correct value
2160 completedsize
= GetFileSize();
2162 // clear the blackbox to free up memory
2163 m_CorruptionBlackBox
->Free();
2165 AddLogLineC(CFormat( _("Finished downloading: %s") ) % GetFileName() );
2168 theApp
->downloadqueue
->StartNextFile(this);
2172 void CPartFile::PerformFileComplete()
2174 // add this file to the suspended uploads list
2175 theApp
->uploadqueue
->SuspendUpload(GetFileHash(), false);
2178 // close permanent handle
2179 if (m_hpartfile
.IsOpened()) {
2180 m_hpartfile
.Close();
2183 // Schedule task for completion of the file
2184 CThreadScheduler::AddTask(new CCompletionTask(this));
2188 void CPartFile::RemoveAllSources(bool bTryToSwap
)
2190 for( SourceSet::iterator it
= m_SrcList
.begin(); it
!= m_SrcList
.end();) {
2191 CUpDownClient
* cur_src
= it
++->GetClient();
2193 if (!cur_src
->SwapToAnotherFile(true, true, true, NULL
)) {
2194 RemoveSource(cur_src
,true,false);
2195 // If it was not swapped, it's not on any file anymore, and should die
2198 RemoveSource(cur_src
,true,false);
2204 /* eMule 0.30c implementation, i give it a try (Creteil) BEGIN ... */
2205 // remove all links A4AF in sources to this file
2206 if(!m_A4AFsrclist
.empty()) {
2207 for( SourceSet::iterator it
= m_A4AFsrclist
.begin(); it
!= m_A4AFsrclist
.end(); ) {
2208 CUpDownClient
* cur_src
= it
++->GetClient();
2209 if ( cur_src
->DeleteFileRequest( this ) ) {
2210 Notify_SourceCtrlRemoveSource(cur_src
->ECID(), this);
2213 m_A4AFsrclist
.clear();
2215 /* eMule 0.30c implementation, i give it a try (Creteil) END ... */
2216 UpdateFileRatingCommentAvail();
2220 void CPartFile::Delete()
2222 AddLogLineN(CFormat(_("Deleting file: %s")) % GetFileName());
2223 // Barry - Need to tell any connected clients to stop sending the file
2225 AddDebugLogLineN(logPartFile
, wxT("\tStopped"));
2230 theApp
->uploadqueue
->SuspendUpload(GetFileHash(), true);
2231 AddDebugLogLineN(logPartFile
, CFormat(wxT("\tSuspended upload to %d clients")) % removed
);
2232 theApp
->sharedfiles
->RemoveFile(this);
2233 AddDebugLogLineN(logPartFile
, wxT("\tRemoved from shared"));
2234 theApp
->downloadqueue
->RemoveFile(this);
2235 AddDebugLogLineN(logPartFile
, wxT("\tRemoved from download queue"));
2236 Notify_DownloadCtrlRemoveFile(this);
2237 AddDebugLogLineN(logPartFile
, wxT("\tRemoved from transferwnd"));
2238 if (theApp
->canceledfiles
->Add(GetFileHash())) {
2239 theApp
->canceledfiles
->Save();
2241 AddDebugLogLineN(logPartFile
, wxT("\tAdded to canceled file list"));
2242 theApp
->searchlist
->UpdateSearchFileByHash(GetFileHash()); // Update file in the search dialog if it's still open
2244 if (m_hpartfile
.IsOpened()) {
2245 m_hpartfile
.Close();
2248 AddDebugLogLineN(logPartFile
, wxT("\tClosed"));
2250 // cppcheck-suppress duplicateBranch
2251 if (!CPath::RemoveFile(m_fullname
)) {
2252 AddDebugLogLineC(logPartFile
, CFormat(wxT("\tFailed to delete '%s'")) % m_fullname
);
2254 AddDebugLogLineN(logPartFile
, wxT("\tRemoved .part.met"));
2257 // cppcheck-suppress duplicateBranch
2258 if (!CPath::RemoveFile(m_PartPath
)) {
2259 AddDebugLogLineC(logPartFile
, CFormat(wxT("Failed to delete '%s'")) % m_PartPath
);
2261 AddDebugLogLineN(logPartFile
, wxT("\tRemoved .part"));
2264 CPath BAKName
= m_fullname
.AppendExt(PARTMET_BAK_EXT
);
2265 // cppcheck-suppress duplicateBranch
2266 if (!CPath::RemoveFile(BAKName
)) {
2267 AddDebugLogLineC(logPartFile
, CFormat(wxT("Failed to delete '%s'")) % BAKName
);
2269 AddDebugLogLineN(logPartFile
, wxT("\tRemoved .bak"));
2272 CPath SEEDSName
= m_fullname
.AppendExt(wxT(".seeds"));
2273 if (SEEDSName
.FileExists()) {
2274 // cppcheck-suppress duplicateBranch
2275 if (CPath::RemoveFile(SEEDSName
)) {
2276 AddDebugLogLineN(logPartFile
, wxT("\tRemoved .seeds"));
2278 AddDebugLogLineC(logPartFile
, CFormat(wxT("Failed to delete '%s'")) % SEEDSName
);
2282 AddDebugLogLineN(logPartFile
, wxT("Done"));
2288 bool CPartFile::HashSinglePart(uint16 partnumber
)
2290 if ((GetHashCount() <= partnumber
) && (GetPartCount() > 1)) {
2291 AddLogLineC(CFormat( _("WARNING: Unable to hash downloaded part - hashset incomplete for '%s'") )
2293 m_hashsetneeded
= true;
2295 } else if ((GetHashCount() <= partnumber
) && GetPartCount() != 1) {
2296 AddLogLineC(CFormat( _("ERROR: Unable to hash downloaded part - hashset incomplete (%s). This should never happen")) % GetFileName() );
2297 m_hashsetneeded
= true;
2300 CMD4Hash hashresult
;
2301 uint64 offset
= PARTSIZE
* partnumber
;
2302 uint32 length
= GetPartSize(partnumber
);
2304 CreateHashFromFile(m_hpartfile
, offset
, length
, &hashresult
, NULL
);
2305 } catch (const CIOFailureException
& e
) {
2306 AddLogLineC(CFormat( _("EOF while hashing downloaded part %u with length %u (max %u) of partfile '%s' with length %u: %s"))
2307 % partnumber
% length
% (offset
+length
) % GetFileName() % GetFileSize() % e
.what());
2308 SetStatus(PS_ERROR
);
2310 } catch (const CEOFException
& e
) {
2311 AddLogLineC(CFormat( _("EOF while hashing downloaded part %u with length %u (max %u) of partfile '%s' with length %u: %s"))
2312 % partnumber
% length
% (offset
+length
) % GetFileName() % GetFileSize() % e
.what());
2313 SetStatus(PS_ERROR
);
2317 if (GetPartCount() > 1) {
2318 if (hashresult
!= GetPartHash(partnumber
)) {
2319 AddDebugLogLineN(logPartFile
, CFormat( wxT("%s: Expected hash of part %d: %s")) % GetFileName() % partnumber
% GetPartHash(partnumber
).Encode() );
2320 AddDebugLogLineN(logPartFile
, CFormat( wxT("%s: Actual hash of part %d: %s")) % GetFileName() % partnumber
% hashresult
.Encode() );
2326 if (hashresult
!= m_abyFileHash
) {
2336 bool CPartFile::IsCorruptedPart(uint16 partnumber
)
2338 return std::find(m_corrupted_list
.begin(), m_corrupted_list
.end(), partnumber
)
2339 != m_corrupted_list
.end();
2343 void CPartFile::SetDownPriority(uint8 np
, bool bSave
, bool bRefresh
)
2345 if ( m_iDownPriority
!= np
) {
2346 m_iDownPriority
= np
;
2348 UpdateDisplayedInfo(true);
2355 void CPartFile::StopFile(bool bCancel
)
2357 // Kry - Need to set it here to get into SetStatus(status) correctly
2360 // Barry - Need to tell any connected clients to stop sending the file
2363 m_LastSearchTimeKad
= 0;
2364 m_TotalSearchesKad
= 0;
2366 RemoveAllSources(true);
2374 UpdateDisplayedInfo(true);
2378 void CPartFile::StopPausedFile()
2381 // Once an hour, remove any sources for files which are no longer active downloads
2382 switch (GetStatus()) {
2384 case PS_INSUFFICIENT
:
2386 if (time(NULL
) - m_iLastPausePurge
> (60*60)) {
2387 m_iLastPausePurge
= time(NULL
);
2393 // release file handle if unused for some time
2394 m_hpartfile
.Release();
2398 void CPartFile::PauseFile(bool bInsufficient
)
2402 if ( status
== PS_COMPLETE
|| status
== PS_COMPLETING
) {
2406 if (GetKadFileSearchID()) {
2407 Kademlia::CSearchManager::StopSearch(GetKadFileSearchID(), true);
2408 // If we were in the middle of searching, reset timer so they can resume searching.
2409 m_LastSearchTimeKad
= 0;
2412 m_iLastPausePurge
= time(NULL
);
2414 theApp
->downloadqueue
->RemoveLocalServerRequest(this);
2416 CPacket
packet( OP_CANCELTRANSFER
, 0, OP_EDONKEYPROT
);
2417 for( SourceSet::iterator it
= m_SrcList
.begin(); it
!= m_SrcList
.end(); ) {
2418 CUpDownClient
* cur_src
= it
++->GetClient();
2419 if (cur_src
->GetDownloadState() == DS_DOWNLOADING
) {
2420 if (!cur_src
->GetSentCancelTransfer()) {
2421 theStats::AddUpOverheadOther( packet
.GetPacketSize() );
2422 AddDebugLogLineN( logLocalClient
, wxT("Local Client: OP_CANCELTRANSFER to ") + cur_src
->GetFullIP() );
2423 cur_src
->SendPacket( &packet
, false, true );
2424 cur_src
->SetSentCancelTransfer( true );
2426 cur_src
->SetDownloadState(DS_ONQUEUE
);
2427 // Allow immediate reconnect on resume
2428 cur_src
->ResetLastAskedTime();
2433 m_insufficient
= bInsufficient
;
2444 void CPartFile::ResumeFile()
2446 if ( status
== PS_COMPLETE
|| status
== PS_COMPLETING
) {
2450 if ( m_insufficient
&& !CheckFreeDiskSpace() ) {
2451 // Still not enough free discspace
2457 m_insufficient
= false;
2459 m_lastsearchtime
= 0;
2461 SetActive(theApp
->IsConnected());
2463 if (m_gaplist
.IsComplete() && (GetStatus() == PS_ERROR
)) {
2464 // The file has already been hashed at this point
2468 UpdateDisplayedInfo(true);
2472 bool CPartFile::CheckFreeDiskSpace( uint64 neededSpace
)
2474 uint64 free
= CPath::GetFreeSpaceAt(GetFilePath());
2475 if (free
== static_cast<uint64
>(wxInvalidOffset
)) {
2476 // If GetFreeSpaceAt() fails, then the path probably does not exist.
2480 // The very least acceptable diskspace is a single PART
2481 if ( free
< PARTSIZE
) {
2482 // Always fail in this case, since we risk losing data if we try to
2483 // write on a full partition.
2487 // All other checks are only made if the user has enabled them
2488 if ( thePrefs::IsCheckDiskspaceEnabled() ) {
2489 neededSpace
+= thePrefs::GetMinFreeDiskSpace();
2491 // Due to the the existance of sparse files, we cannot assume that
2492 // writes within the file doesn't cause new blocks to be allocated.
2493 // Therefore, we have to simply stop writing the moment the limit has
2495 return free
>= neededSpace
;
2502 void CPartFile::SetLastAnsweredTime()
2504 m_ClientSrcAnswered
= ::GetTickCount();
2507 void CPartFile::SetLastAnsweredTimeTimeout()
2509 m_ClientSrcAnswered
= 2 * CONNECTION_LATENCY
+ ::GetTickCount() - SOURCECLIENTREASKS
;
2512 CPacket
*CPartFile::CreateSrcInfoPacket(const CUpDownClient
* forClient
, uint8 byRequestedVersion
, uint16 nRequestedOptions
)
2515 if ( m_SrcList
.empty() ) {
2520 return CKnownFile::CreateSrcInfoPacket(forClient
, byRequestedVersion
, nRequestedOptions
);
2523 if (((forClient
->GetRequestFile() != this)
2524 && (forClient
->GetUploadFile() != this)) || forClient
->GetUploadFileID() != GetFileHash()) {
2525 wxString file1
= _("Unknown");
2526 if (forClient
->GetRequestFile() && forClient
->GetRequestFile()->GetFileName().IsOk()) {
2527 file1
= forClient
->GetRequestFile()->GetFileName().GetPrintable();
2528 } else if (forClient
->GetUploadFile() && forClient
->GetUploadFile()->GetFileName().IsOk()) {
2529 file1
= forClient
->GetUploadFile()->GetFileName().GetPrintable();
2531 wxString file2
= _("Unknown");
2532 if (GetFileName().IsOk()) {
2533 file2
= GetFileName().GetPrintable();
2535 AddDebugLogLineN(logPartFile
, wxT("File mismatch on source packet (P) Sending: ") + file1
+ wxT(" From: ") + file2
);
2539 if ( !(GetStatus() == PS_READY
|| GetStatus() == PS_EMPTY
)) {
2543 const BitVector
& reqstatus
= forClient
->GetPartStatus();
2544 bool KnowNeededParts
= !reqstatus
.empty();
2545 //wxASSERT(rcvstatus.size() == GetPartCount()); // Obviously!
2546 if (KnowNeededParts
&& (reqstatus
.size() != GetPartCount())) {
2547 // Yuck. Same file but different part count? Seriously fucked up.
2548 // This happens rather often with reqstatus.size() == 0. Don't log then.
2549 if (reqstatus
.size()) {
2550 AddDebugLogLineN(logKnownFiles
, CFormat(wxT("Impossible situation: different partcounts: %i (client) and %i (file) for %s")) % reqstatus
.size() % GetPartCount() % GetFileName());
2555 CMemFile
data(1024);
2557 uint8 byUsedVersion
;
2559 if (forClient
->SupportsSourceExchange2() && byRequestedVersion
> 0){
2560 // the client uses SourceExchange2 and requested the highest version he knows
2561 // and we send the highest version we know, but of course not higher than his request
2562 byUsedVersion
= std::min(byRequestedVersion
, (uint8
)SOURCEEXCHANGE2_VERSION
);
2563 bIsSX2Packet
= true;
2564 data
.WriteUInt8(byUsedVersion
);
2566 // we don't support any special SX2 options yet, reserved for later use
2567 if (nRequestedOptions
!= 0) {
2568 AddDebugLogLineN(logKnownFiles
, CFormat(wxT("Client requested unknown options for SourceExchange2: %u")) % nRequestedOptions
);
2571 byUsedVersion
= forClient
->GetSourceExchange1Version();
2572 bIsSX2Packet
= false;
2573 if (forClient
->SupportsSourceExchange2()) {
2574 AddDebugLogLineN(logKnownFiles
, wxT("Client which announced to support SX2 sent SX1 packet instead"));
2580 data
.WriteHash(m_abyFileHash
);
2581 data
.WriteUInt16(nCount
);
2583 for (SourceSet::iterator it
= m_SrcList
.begin(); it
!= m_SrcList
.end(); ++it
) {
2585 CUpDownClient
* cur_src
= it
->GetClient();
2587 int state
= cur_src
->GetDownloadState();
2588 int valid
= ( state
== DS_DOWNLOADING
) || ( state
== DS_ONQUEUE
&& !cur_src
->IsRemoteQueueFull() );
2590 if ( cur_src
->HasLowID() || !valid
) {
2594 // only send source which have needed parts for this client if possible
2595 const BitVector
& srcstatus
= cur_src
->GetPartStatus();
2596 if ( !srcstatus
.empty() ) {
2597 //wxASSERT(srcstatus.size() == GetPartCount()); // Obviously!
2598 if (srcstatus
.size() != GetPartCount()) {
2601 if ( KnowNeededParts
) {
2602 // only send sources which have needed parts for this client
2603 for (int x
= 0; x
< GetPartCount(); ++x
) {
2604 if (srcstatus
.get(x
) && !reqstatus
.get(x
)) {
2610 // if we don't know the need parts for this client,
2611 // return any source currently a client sends it's
2612 // file status only after it has at least one complete part
2613 if (srcstatus
.size() != GetPartCount()) {
2616 for (int x
= 0; x
< GetPartCount(); ++x
){
2617 if (srcstatus
.get(x
)) {
2627 if(forClient
->GetSourceExchange1Version() > 2) {
2628 dwID
= cur_src
->GetUserIDHybrid();
2630 dwID
= wxUINT32_SWAP_ALWAYS(cur_src
->GetUserIDHybrid());
2632 data
.WriteUInt32(dwID
);
2633 data
.WriteUInt16(cur_src
->GetUserPort());
2634 data
.WriteUInt32(cur_src
->GetServerIP());
2635 data
.WriteUInt16(cur_src
->GetServerPort());
2637 if (byUsedVersion
>= 2) {
2638 data
.WriteHash(cur_src
->GetUserHash());
2641 if (byUsedVersion
>= 4){
2642 // CryptSettings - SourceExchange V4
2644 // 1 CryptLayer Required
2645 // 1 CryptLayer Requested
2646 // 1 CryptLayer Supported
2647 const uint8 uSupportsCryptLayer
= cur_src
->SupportsCryptLayer() ? 1 : 0;
2648 const uint8 uRequestsCryptLayer
= cur_src
->RequestsCryptLayer() ? 1 : 0;
2649 const uint8 uRequiresCryptLayer
= cur_src
->RequiresCryptLayer() ? 1 : 0;
2650 const uint8 byCryptOptions
= (uRequiresCryptLayer
<< 2) | (uRequestsCryptLayer
<< 1) | (uSupportsCryptLayer
<< 0);
2651 data
.WriteUInt8(byCryptOptions
);
2662 data
.Seek(bIsSX2Packet
? 17 : 16, wxFromStart
);
2663 data
.WriteUInt16(nCount
);
2665 CPacket
* result
= new CPacket(data
, OP_EMULEPROT
, bIsSX2Packet
? OP_ANSWERSOURCES2
: OP_ANSWERSOURCES
);
2667 // 16+2+501*(4+2+4+2+16) = 14046 bytes max.
2668 if (result
->GetPacketSize() > 354) {
2669 result
->PackPacket();
2675 void CPartFile::AddClientSources(CMemFile
* sources
, unsigned nSourceFrom
, uint8 uClientSXVersion
, bool bSourceExchange2
, const CUpDownClient
* /*pClient*/)
2684 uint8 uPacketSXVersion
= 0;
2685 if (!bSourceExchange2
) {
2686 nCount
= sources
->ReadUInt16();
2688 // Check if the data size matches the 'nCount' for v1 or v2 and eventually correct the source
2689 // exchange version while reading the packet data. Otherwise we could experience a higher
2690 // chance in dealing with wrong source data, userhashs and finally duplicate sources.
2691 uint32 uDataSize
= sources
->GetLength() - sources
->GetPosition();
2693 if ((uint32
)(nCount
*(4+2+4+2)) == uDataSize
) { //Checks if version 1 packet is correct size
2694 if(uClientSXVersion
!= 1) {
2697 uPacketSXVersion
= 1;
2698 } else if ((uint32
)(nCount
*(4+2+4+2+16)) == uDataSize
) { // Checks if version 2&3 packet is correct size
2699 if (uClientSXVersion
== 2) {
2700 uPacketSXVersion
= 2;
2701 } else if (uClientSXVersion
> 2) {
2702 uPacketSXVersion
= 3;
2706 } else if (nCount
*(4+2+4+2+16+1) == uDataSize
) {
2707 if (uClientSXVersion
!= 4 ) {
2710 uPacketSXVersion
= 4;
2712 // If v5 inserts additional data (like v2), the above code will correctly filter those packets.
2713 // If v5 appends additional data after <count>(<Sources>)[count], we are in trouble with the
2714 // above code. Though a client which does not understand v5+ should never receive such a packet.
2715 AddDebugLogLineN(logClient
, CFormat(wxT("Received invalid source exchange packet (v%u) of data size %u for %s")) % uClientSXVersion
% uDataSize
% GetFileName());
2720 // We only check if the version is known by us and do a quick sanitize check on known version
2721 // other then SX1, the packet will be ignored if any error appears, sicne it can't be a "misunderstanding" anymore
2722 if (uClientSXVersion
> SOURCEEXCHANGE2_VERSION
|| uClientSXVersion
== 0 ){
2723 AddDebugLogLineN(logPartFile
, CFormat(wxT("Invalid source exchange type version: %i")) % uClientSXVersion
);
2727 // all known versions use the first 2 bytes as count and unknown version are already filtered above
2728 nCount
= sources
->ReadUInt16();
2729 uint32 uDataSize
= (uint32
)(sources
->GetLength() - sources
->GetPosition());
2730 bool bError
= false;
2731 switch (uClientSXVersion
){
2733 bError
= nCount
*(4+2+4+2) != uDataSize
;
2737 bError
= nCount
*(4+2+4+2+16) != uDataSize
;
2740 bError
= nCount
*(4+2+4+2+16+1) != uDataSize
;
2748 AddDebugLogLineN(logPartFile
, wxT("Invalid source exchange data size."));
2751 uPacketSXVersion
= uClientSXVersion
;
2754 for (uint16 i
= 0;i
!= nCount
;++i
) {
2756 uint32 dwID
= sources
->ReadUInt32();
2757 uint16 nPort
= sources
->ReadUInt16();
2758 uint32 dwServerIP
= sources
->ReadUInt32();
2759 uint16 nServerPort
= sources
->ReadUInt16();
2762 if (uPacketSXVersion
> 1) {
2763 userHash
= sources
->ReadHash();
2766 uint8 byCryptOptions
= 0;
2767 if (uPacketSXVersion
>= 4) {
2768 byCryptOptions
= sources
->ReadUInt8();
2771 //Clients send ID's the the Hyrbid format so highID clients with *.*.*.0 won't be falsely switched to a lowID..
2773 if (uPacketSXVersion
>= 3) {
2774 dwIDED2K
= wxUINT32_SWAP_ALWAYS(dwID
);
2779 // check the HighID(IP) - "Filter LAN IPs" and "IPfilter" the received sources IP addresses
2780 if (!IsLowID(dwID
)) {
2781 if (!IsGoodIP(dwIDED2K
, thePrefs::FilterLanIPs())) {
2782 // check for 0-IP, localhost and optionally for LAN addresses
2783 AddDebugLogLineN(logIPFilter
, CFormat(wxT("Ignored source (IP=%s) received via %s - bad IP")) % Uint32toStringIP(dwIDED2K
) % OriginToText(nSourceFrom
));
2786 if (theApp
->ipfilter
->IsFiltered(dwIDED2K
)) {
2787 AddDebugLogLineN(logIPFilter
, CFormat(wxT("Ignored source (IP=%s) received via %s - IPFilter")) % Uint32toStringIP(dwIDED2K
) % OriginToText(nSourceFrom
));
2790 if (theApp
->clientlist
->IsBannedClient(dwIDED2K
)){
2795 // additionally check for LowID and own IP
2796 if (!CanAddSource(dwID
, nPort
, dwServerIP
, nServerPort
, NULL
, false)) {
2797 AddDebugLogLineN(logIPFilter
, CFormat(wxT("Ignored source (IP=%s) received via source exchange")) % Uint32toStringIP(dwIDED2K
));
2801 if(thePrefs::GetMaxSourcePerFile() > GetSourceCount()) {
2802 CUpDownClient
* newsource
= new CUpDownClient(nPort
,dwID
,dwServerIP
,nServerPort
,this, (uPacketSXVersion
< 3), true);
2803 if (uPacketSXVersion
> 1) {
2804 newsource
->SetUserHash(userHash
);
2807 if (uPacketSXVersion
>= 4) {
2808 newsource
->SetConnectOptions(byCryptOptions
, true, false);
2811 newsource
->SetSourceFrom((ESourceFrom
)nSourceFrom
);
2812 theApp
->downloadqueue
->CheckAndAddSource(this,newsource
);
2820 void CPartFile::UpdateAutoDownPriority()
2822 if (!IsAutoDownPriority()) {
2825 if (GetSourceCount() <= theApp
->downloadqueue
->GetRareFileThreshold()) {
2826 if ( GetDownPriority() != PR_HIGH
)
2827 SetDownPriority(PR_HIGH
, false, false);
2828 } else if (GetSourceCount() < theApp
->downloadqueue
->GetCommonFileThreshold()) {
2829 if ( GetDownPriority() != PR_NORMAL
)
2830 SetDownPriority(PR_NORMAL
, false, false);
2832 if ( GetDownPriority() != PR_LOW
)
2833 SetDownPriority(PR_LOW
, false, false);
2837 // making this function return a higher when more sources have the extended
2838 // protocol will force you to ask a larger variety of people for sources
2840 int CPartFile::GetCommonFilePenalty()
2842 //TODO: implement, but never return less than MINCOMMONPENALTY!
2843 return MINCOMMONPENALTY
;
2846 /* Barry - Replaces BlockReceived()
2848 Originally this only wrote to disk when a full 180k block
2849 had been received from a client, and only asked for data in
2852 This meant that on average 90k was lost for every connection
2853 to a client data source. That is a lot of wasted data.
2855 To reduce the lost data, packets are now written to a buffer
2856 and flushed to disk regularly regardless of size downloaded.
2857 This includes compressed packets.
2859 Data is also requested only where gaps are, not in 180k blocks.
2860 The requests will still not exceed 180k, but may be smaller to
2864 // Kry - transize is 32bits, no packet can be more than that (this is
2865 // compressed size). Even 32bits is too much imho.As for the return size,
2866 // look at the lenData below.
2867 uint32
CPartFile::WriteToBuffer(uint32 transize
, byte
* data
, uint64 start
, uint64 end
, Requested_Block_Struct
*block
, const CUpDownClient
* client
)
2869 // Increment transferred bytes counter for this file
2870 transferred
+= transize
;
2872 // This is needed a few times
2873 // Kry - should not need a uint64 here - no block is larger than
2874 // 2GB even after uncompressed.
2875 uint32 lenData
= (uint32
) (end
- start
+ 1);
2877 if(lenData
> transize
) {
2878 m_iGainDueToCompression
+= lenData
-transize
;
2881 // Occasionally packets are duplicated, no point writing it twice
2882 if (IsComplete(start
, end
)) {
2883 AddDebugLogLineN(logPartFile
,
2884 CFormat(wxT("File '%s' has already been written from %u to %u"))
2885 % GetFileName() % start
% end
);
2889 // security sanitize check to make sure we do not write anything into an already hashed complete chunk
2890 const uint64 nStartChunk
= start
/ PARTSIZE
;
2891 const uint64 nEndChunk
= end
/ PARTSIZE
;
2892 if (IsComplete(nStartChunk
)) {
2893 AddDebugLogLineN(logPartFile
, CFormat(wxT("Received data touches already hashed chunk - ignored (start): %u-%u; File=%s")) % start
% end
% GetFileName());
2895 } else if (nStartChunk
!= nEndChunk
) {
2896 if (IsComplete(nEndChunk
)) {
2897 AddDebugLogLineN(logPartFile
, CFormat(wxT("Received data touches already hashed chunk - ignored (end): %u-%u; File=%s")) % start
% end
% GetFileName());
2900 AddDebugLogLineN(logPartFile
, CFormat(wxT("Received data crosses chunk boundaries: %u-%u; File=%s")) % start
% end
% GetFileName());
2904 // log transferinformation in our "blackbox"
2905 m_CorruptionBlackBox
->TransferredData(start
, end
, client
->GetIP());
2907 // Create a new buffered queue entry
2908 PartFileBufferedData
*item
= new PartFileBufferedData(m_hpartfile
, data
, start
, end
, block
);
2910 // Add to the queue in the correct position (most likely the end)
2913 std::list
<PartFileBufferedData
*>::iterator it
= m_BufferedData_list
.begin();
2914 for (; it
!= m_BufferedData_list
.end(); ++it
) {
2915 PartFileBufferedData
* queueItem
= *it
;
2917 if (item
->end
<= queueItem
->end
) {
2918 if (it
!= m_BufferedData_list
.begin()) {
2921 m_BufferedData_list
.insert(--it
, item
);
2929 m_BufferedData_list
.push_front(item
);
2932 // Increment buffer size marker
2933 m_nTotalBufferData
+= lenData
;
2935 // Mark this small section of the file as filled
2936 FillGap(item
->start
, item
->end
);
2938 // Update the flushed mark on the requested block
2939 // The loop here is unfortunate but necessary to detect deleted blocks.
2941 std::list
<Requested_Block_Struct
*>::iterator it2
= m_requestedblocks_list
.begin();
2942 for (; it2
!= m_requestedblocks_list
.end(); ++it2
) {
2943 if (*it2
== item
->block
) {
2944 item
->block
->transferred
+= lenData
;
2948 if (m_gaplist
.IsComplete()) {
2952 // Return the length of data written to the buffer
2956 void CPartFile::FlushBuffer(bool fromAICHRecoveryDataAvailable
)
2958 m_nLastBufferFlushTime
= GetTickCount();
2960 if (m_BufferedData_list
.empty()) {
2965 uint32 partCount
= GetPartCount();
2966 // Remember which parts need to be checked at the end of the flush
2967 std::vector
<bool> changedPart(partCount
, false);
2969 // Ensure file is big enough to write data to (the last item will be the furthest from the start)
2970 if (!CheckFreeDiskSpace(m_nTotalBufferData
)) {
2971 // Not enough free space to write the last item, bail
2972 AddLogLineC(CFormat( _("WARNING: Not enough free disk-space! Pausing file: %s") ) % GetFileName());
2978 // Loop through queue
2979 while ( !m_BufferedData_list
.empty() ) {
2980 // Get top item and remove it from the queue
2981 CScopedPtr
<PartFileBufferedData
> item(m_BufferedData_list
.front());
2982 m_BufferedData_list
.pop_front();
2984 // This is needed a few times
2985 wxASSERT((item
->end
- item
->start
) < 0xFFFFFFFF);
2986 uint32 lenData
= (uint32
)(item
->end
- item
->start
+ 1);
2988 // SLUGFILLER: SafeHash - could be more than one part
2989 for (uint32 curpart
= (item
->start
/PARTSIZE
); curpart
<= (item
->end
/PARTSIZE
); ++curpart
) {
2990 wxASSERT(curpart
< partCount
);
2991 changedPart
[curpart
] = true;
2993 // SLUGFILLER: SafeHash
2995 // Go to the correct position in file and write block of data
2997 item
->area
.FlushAt(m_hpartfile
, item
->start
, lenData
);
2998 // Decrease buffer size
2999 m_nTotalBufferData
-= lenData
;
3000 } catch (const CIOFailureException
& e
) {
3001 AddDebugLogLineC(logPartFile
, wxT("Error while saving part-file: ") + e
.what());
3002 SetStatus(PS_ERROR
);
3003 // No need to bang your head against it again and again if it has already failed.
3004 DeleteContents(m_BufferedData_list
);
3005 m_nTotalBufferData
= 0;
3011 // Update last-changed date
3012 m_lastDateChanged
= wxDateTime::GetTimeNow();
3015 // Partfile should never be too large
3016 if (m_hpartfile
.GetLength() > GetFileSize()) {
3017 // it's "last chance" correction. the real bugfix has to be applied 'somewhere' else
3018 m_hpartfile
.SetLength(GetFileSize());
3020 } catch (const CIOFailureException
& e
) {
3021 AddDebugLogLineC(logPartFile
,
3022 CFormat(wxT("Error while truncating part-file (%s): %s"))
3023 % m_PartPath
% e
.what());
3024 SetStatus(PS_ERROR
);
3029 // Check each part of the file
3030 for (uint16 partNumber
= 0; partNumber
< partCount
; ++partNumber
) {
3031 if (changedPart
[partNumber
] == false) {
3035 uint32 partRange
= GetPartSize(partNumber
) - 1;
3037 // Is this 9MB part complete
3038 if (IsComplete(partNumber
)) {
3040 if (!HashSinglePart(partNumber
)) {
3041 AddLogLineC(CFormat(
3042 _("Downloaded part %i is corrupt in file: %s") ) % partNumber
% GetFileName() );
3044 // add part to corrupted list, if not already there
3045 if (!IsCorruptedPart(partNumber
)) {
3046 m_corrupted_list
.push_back(partNumber
);
3048 // request AICH recovery data
3049 // Don't if called from the AICHRecovery. It's already there and would lead to an infinite recursion.
3050 if (!fromAICHRecoveryDataAvailable
) {
3051 RequestAICHRecovery(partNumber
);
3053 // Reduce transferred amount by corrupt amount
3054 m_iLostDueToCorruption
+= (partRange
+ 1);
3056 if (!m_hashsetneeded
) {
3057 AddDebugLogLineN(logPartFile
, CFormat(
3058 wxT("Finished part %u of '%s'")) % partNumber
% GetFileName());
3061 // tell the blackbox about the verified data
3062 m_CorruptionBlackBox
->VerifiedData(true, partNumber
, 0, partRange
);
3064 // if this part was successfully completed (although ICH is active), remove from corrupted list
3065 EraseFirstValue(m_corrupted_list
, partNumber
);
3067 if (status
== PS_EMPTY
) {
3068 if (theApp
->IsRunning()) { // may be called during shutdown!
3069 if (GetHashCount() == GetED2KPartHashCount() && !m_hashsetneeded
) {
3070 // Successfully completed part, make it available for sharing
3071 SetStatus(PS_READY
);
3072 theApp
->sharedfiles
->SafeAddKFile(this);
3077 } else if ( IsCorruptedPart(partNumber
) && // corrupted part:
3078 (thePrefs::IsICHEnabled() // old ICH: rehash whenever we have new data hoping it will be good now
3079 || fromAICHRecoveryDataAvailable
)) {// new AICH: one rehash right before performing it (maybe it's already good)
3080 // Try to recover with minimal loss
3081 if (HashSinglePart(partNumber
)) {
3082 ++m_iTotalPacketsSavedDueToICH
;
3084 uint64 uMissingInPart
= m_gaplist
.GetGapSize(partNumber
);
3085 FillGap(partNumber
);
3086 RemoveBlockFromList(PARTSIZE
*partNumber
,(PARTSIZE
*partNumber
+ partRange
));
3088 // tell the blackbox about the verified data
3089 m_CorruptionBlackBox
->VerifiedData(true, partNumber
, 0, partRange
);
3091 // remove from corrupted list
3092 EraseFirstValue(m_corrupted_list
, partNumber
);
3094 AddLogLineC(CFormat( _("ICH: Recovered corrupted part %i for %s -> Saved bytes: %s") )
3097 % CastItoXBytes(uMissingInPart
));
3099 if (GetHashCount() == GetED2KPartHashCount() && !m_hashsetneeded
) {
3100 if (status
== PS_EMPTY
) {
3101 // Successfully recovered part, make it available for sharing
3102 SetStatus(PS_READY
);
3103 if (theApp
->IsRunning()) // may be called during shutdown!
3104 theApp
->sharedfiles
->SafeAddKFile(this);
3114 if (theApp
->IsRunning()) { // may be called during shutdown!
3115 // Is this file finished ?
3116 if (m_gaplist
.IsComplete()) {
3117 CompleteFile(false);
3123 // read data for upload, return false on error
3124 bool CPartFile::ReadData(CFileArea
& area
, uint64 offset
, uint32 toread
)
3127 if (offset
+ toread
> GetFileSize()) {
3128 AddDebugLogLineN(logPartFile
, CFormat(wxT("tried to read %d bytes past eof of %s"))
3129 % (offset
+ toread
- GetFileSize()) % GetFileName());
3134 area
.ReadAt(m_hpartfile
, offset
, toread
);
3135 // if it fails it throws (which the caller should catch)
3140 void CPartFile::UpdateFileRatingCommentAvail()
3142 bool prevComment
= m_hasComment
;
3143 int prevRating
= m_iUserRating
;
3145 m_hasComment
= false;
3147 int ratingCount
= 0;
3149 SourceSet::iterator it
= m_SrcList
.begin();
3150 for (; it
!= m_SrcList
.end(); ++it
) {
3151 CUpDownClient
* cur_src
= it
->GetClient();
3153 if (!cur_src
->GetFileComment().IsEmpty()) {
3154 if (thePrefs::IsCommentFiltered(cur_src
->GetFileComment())) {
3157 m_hasComment
= true;
3160 uint8 rating
= cur_src
->GetFileRating();
3162 wxASSERT(rating
<= 5);
3165 m_iUserRating
+= rating
;
3170 m_iUserRating
/= ratingCount
;
3171 wxASSERT(m_iUserRating
> 0 && m_iUserRating
<= 5);
3174 if ((prevComment
!= m_hasComment
) || (prevRating
!= m_iUserRating
)) {
3175 UpdateDisplayedInfo();
3180 void CPartFile::SetCategory(uint8 cat
)
3182 wxASSERT( cat
< theApp
->glob_prefs
->GetCatCount() );
3188 bool CPartFile::RemoveSource(CUpDownClient
* toremove
, bool updatewindow
, bool bDoStatsUpdate
)
3190 wxASSERT( toremove
);
3192 bool result
= theApp
->downloadqueue
->RemoveSource( toremove
, updatewindow
, bDoStatsUpdate
);
3194 // Check if the client should be deleted, but not if the client is already dying
3195 if ( !toremove
->GetSocket() && !toremove
->HasBeenDeleted() ) {
3196 if ( toremove
->Disconnected(wxT("RemoveSource - purged")) ) {
3197 toremove
->Safe_Delete();
3204 void CPartFile::AddDownloadingSource(CUpDownClient
* client
)
3206 CClientRefList::iterator it
=
3207 std::find(m_downloadingSourcesList
.begin(), m_downloadingSourcesList
.end(), CCLIENTREF(client
, wxEmptyString
));
3208 if (it
== m_downloadingSourcesList
.end()) {
3209 m_downloadingSourcesList
.push_back(CCLIENTREF(client
, wxT("CPartFile::AddDownloadingSource")));
3214 void CPartFile::RemoveDownloadingSource(CUpDownClient
* client
)
3216 CClientRefList::iterator it
=
3217 std::find(m_downloadingSourcesList
.begin(), m_downloadingSourcesList
.end(), CCLIENTREF(client
, wxEmptyString
));
3218 if (it
!= m_downloadingSourcesList
.end()) {
3219 m_downloadingSourcesList
.erase(it
);
3224 uint64
CPartFile::GetNeededSpace()
3227 uint64 length
= m_hpartfile
.GetLength();
3229 if (length
> GetFileSize()) {
3230 return 0; // Shouldn't happen, but just in case
3233 return GetFileSize() - length
;
3234 } catch (const CIOFailureException
& e
) {
3235 AddDebugLogLineC(logPartFile
,
3236 CFormat(wxT("Error while retrieving file-length (%s): %s"))
3237 % m_PartPath
% e
.what());
3238 SetStatus(PS_ERROR
);
3243 void CPartFile::SetStatus(uint8 in
)
3245 // PAUSED and INSUFFICIENT have extra flag variables m_paused and m_insufficient
3246 // - they are never to be stored in status
3247 wxASSERT( in
!= PS_PAUSED
&& in
!= PS_INSUFFICIENT
);
3251 if (theApp
->IsRunning()) {
3252 UpdateDisplayedInfo( true );
3254 if ( thePrefs::ShowCatTabInfos() ) {
3255 Notify_ShowUpdateCatTabTitles();
3257 Notify_DownloadCtrlSort();
3262 void CPartFile::RequestAICHRecovery(uint16 nPart
)
3265 if ( !m_pAICHHashSet
->HasValidMasterHash() ||
3266 (m_pAICHHashSet
->GetStatus() != AICH_TRUSTED
&& m_pAICHHashSet
->GetStatus() != AICH_VERIFIED
)){
3267 AddDebugLogLineN( logAICHRecovery
, wxT("Unable to request AICH Recoverydata because we have no trusted Masterhash") );
3270 if (GetPartSize(nPart
) <= EMBLOCKSIZE
)
3272 if (CAICHHashSet::IsClientRequestPending(this, nPart
)){
3273 AddDebugLogLineN( logAICHRecovery
, wxT("RequestAICHRecovery: Already a request for this part pending"));
3277 // first check if we have already the recoverydata, no need to rerequest it then
3278 if (m_pAICHHashSet
->IsPartDataAvailable(nPart
*PARTSIZE
)){
3279 AddDebugLogLineN( logAICHRecovery
, wxT("Found PartRecoveryData in memory"));
3280 AICHRecoveryDataAvailable(nPart
);
3284 wxASSERT( nPart
< GetPartCount() );
3285 // find some random client which support AICH to ask for the blocks
3286 // first lets see how many we have at all, we prefer high id very much
3287 uint32 cAICHClients
= 0;
3288 uint32 cAICHLowIDClients
= 0;
3289 for ( SourceSet::iterator it
= m_SrcList
.begin(); it
!= m_SrcList
.end(); ++it
) {
3290 CUpDownClient
* pCurClient
= it
->GetClient();
3291 if ( pCurClient
->IsSupportingAICH() &&
3292 pCurClient
->GetReqFileAICHHash() != NULL
&&
3293 !pCurClient
->IsAICHReqPending()
3294 && (*pCurClient
->GetReqFileAICHHash()) == m_pAICHHashSet
->GetMasterHash())
3296 if (pCurClient
->HasLowID()) {
3297 ++cAICHLowIDClients
;
3303 if ((cAICHClients
| cAICHLowIDClients
) == 0){
3304 AddDebugLogLineN( logAICHRecovery
, wxT("Unable to request AICH Recoverydata because found no client who supports it and has the same hash as the trusted one"));
3307 uint32 nSeclectedClient
;
3308 if (cAICHClients
> 0) {
3309 nSeclectedClient
= (rand() % cAICHClients
) + 1;
3311 nSeclectedClient
= (rand() % cAICHLowIDClients
) + 1;
3313 CUpDownClient
* pClient
= NULL
;
3314 for ( SourceSet::iterator it
= m_SrcList
.begin(); it
!= m_SrcList
.end(); ++it
) {
3315 CUpDownClient
* pCurClient
= it
->GetClient();
3316 if (pCurClient
->IsSupportingAICH() && pCurClient
->GetReqFileAICHHash() != NULL
&& !pCurClient
->IsAICHReqPending()
3317 && (*pCurClient
->GetReqFileAICHHash()) == m_pAICHHashSet
->GetMasterHash())
3319 if (cAICHClients
> 0){
3320 if (!pCurClient
->HasLowID())
3324 wxASSERT( pCurClient
->HasLowID());
3327 if (nSeclectedClient
== 0){
3328 pClient
= pCurClient
;
3333 if (pClient
== NULL
){
3338 AddDebugLogLineN( logAICHRecovery
, CFormat( wxT("Requesting AICH Hash (%s) form client %s") ) % ( cAICHClients
? wxT("HighId") : wxT("LowID") ) % pClient
->GetClientFullInfo() );
3339 pClient
->SendAICHRequest(this, nPart
);
3344 void CPartFile::AICHRecoveryDataAvailable(uint16 nPart
)
3346 if (GetPartCount() < nPart
){
3352 uint32 length
= GetPartSize(nPart
);
3353 // if the part was already ok, it would now be complete
3354 if (IsComplete(nPart
)) {
3355 AddDebugLogLineN(logAICHRecovery
, CFormat(wxT("Processing AICH Recovery data: The part (%u) is already complete, canceling")) % nPart
);
3361 CAICHHashTree
* pVerifiedHash
= m_pAICHHashSet
->m_pHashTree
.FindHash(nPart
*PARTSIZE
, length
);
3362 if (pVerifiedHash
== NULL
|| !pVerifiedHash
->GetHashValid()){
3363 AddDebugLogLineC( logAICHRecovery
, wxT("Processing AICH Recovery data: Unable to get verified hash from hashset (should never happen)") );
3367 CAICHHashTree
htOurHash(pVerifiedHash
->GetNDataSize(), pVerifiedHash
->GetIsLeftBranch(), pVerifiedHash
->GetNBaseSize());
3369 CreateHashFromFile(m_hpartfile
, PARTSIZE
* nPart
, length
, NULL
, &htOurHash
);
3370 } catch (const CIOFailureException
& e
) {
3371 AddDebugLogLineC(logAICHRecovery
,
3372 CFormat(wxT("IO failure while hashing part-file '%s': %s"))
3373 % m_hpartfile
.GetFilePath() % e
.what());
3374 SetStatus(PS_ERROR
);
3378 if (!htOurHash
.GetHashValid()){
3379 AddDebugLogLineN( logAICHRecovery
, wxT("Processing AICH Recovery data: Failed to retrieve AICH Hashset of corrupt part") );
3384 // now compare the hash we just did, to the verified hash and readd all blocks which are ok
3385 uint32 nRecovered
= 0;
3386 for (uint32 pos
= 0; pos
< length
; pos
+= EMBLOCKSIZE
){
3387 const uint32 nBlockSize
= min
<uint32
>(EMBLOCKSIZE
, length
- pos
);
3388 CAICHHashTree
* pVerifiedBlock
= pVerifiedHash
->FindHash(pos
, nBlockSize
);
3389 CAICHHashTree
* pOurBlock
= htOurHash
.FindHash(pos
, nBlockSize
);
3390 if ( pVerifiedBlock
== NULL
|| pOurBlock
== NULL
|| !pVerifiedBlock
->GetHashValid() || !pOurBlock
->GetHashValid()){
3394 if (pOurBlock
->GetHash() == pVerifiedBlock
->GetHash()){
3395 FillGap(PARTSIZE
*nPart
+pos
, PARTSIZE
*nPart
+ pos
+ (nBlockSize
-1));
3396 RemoveBlockFromList(PARTSIZE
*nPart
, PARTSIZE
*nPart
+ (nBlockSize
-1));
3397 nRecovered
+= nBlockSize
;
3398 // tell the blackbox about the verified data
3399 m_CorruptionBlackBox
->VerifiedData(true, nPart
, pos
, pos
+ nBlockSize
- 1);
3401 // inform our "blackbox" about the corrupted block which may ban clients who sent it
3402 m_CorruptionBlackBox
->VerifiedData(false, nPart
, pos
, pos
+ nBlockSize
- 1);
3405 m_CorruptionBlackBox
->EvaluateData();
3407 // ok now some sanity checks
3408 if (IsComplete(nPart
)) {
3409 // this is bad, but it could probably happen under some rare circumstances
3410 // make sure that MD4 agrees to this fact too
3411 if (!HashSinglePart(nPart
)) {
3412 AddDebugLogLineN(logAICHRecovery
,
3413 CFormat(wxT("Processing AICH Recovery data: The part (%u) got completed while recovering - but MD4 says it corrupt! Setting hashset to error state, deleting part")) % nPart
);
3414 // now we are fu... unhappy
3415 m_pAICHHashSet
->SetStatus(AICH_ERROR
);
3420 AddDebugLogLineN(logAICHRecovery
,
3421 CFormat(wxT("Processing AICH Recovery data: The part (%u) got completed while recovering and MD4 agrees")) % nPart
);
3422 if (status
== PS_EMPTY
&& theApp
->IsRunning()) {
3423 if (GetHashCount() == GetED2KPartHashCount() && !m_hashsetneeded
) {
3424 // Successfully recovered part, make it available for sharing
3425 SetStatus(PS_READY
);
3426 theApp
->sharedfiles
->SafeAddKFile(this);
3430 if (theApp
->IsRunning()) {
3431 // Is this file finished?
3432 if (m_gaplist
.IsComplete()) {
3433 CompleteFile(false);
3437 } // end sanity check
3438 // We did the best we could. If it's still incomplete, then no need to keep
3439 // bashing it with ICH. So remove it from the list of corrupted parts.
3440 EraseFirstValue(m_corrupted_list
, nPart
);
3444 // make sure the user appreciates our great recovering work :P
3445 AddDebugLogLineC( logAICHRecovery
, CFormat(
3446 wxT("AICH successfully recovered %s of %s from part %u for %s") )
3447 % CastItoXBytes(nRecovered
)
3448 % CastItoXBytes(length
)
3454 void CPartFile::ClientStateChanged( int oldState
, int newState
)
3456 if ( oldState
== newState
)
3459 // If the state is -1, then it's an entirely new item
3460 if ( oldState
!= -1 ) {
3461 // Was the old state a valid state?
3462 if ( oldState
== DS_ONQUEUE
|| oldState
== DS_DOWNLOADING
) {
3465 if ( oldState
== DS_CONNECTED
/* || oldState == DS_REMOTEQUEUEFULL */ ) {
3469 m_notCurrentSources
--;
3473 // If the state is -1, then the source is being removed
3474 if ( newState
!= -1 ) {
3475 // Was the old state a valid state?
3476 if ( newState
== DS_ONQUEUE
|| newState
== DS_DOWNLOADING
) {
3479 if ( newState
== DS_CONNECTED
/* || newState == DS_REMOTEQUEUEFULL */ ) {
3483 ++m_notCurrentSources
;
3489 bool CPartFile::AddSource( CUpDownClient
* client
)
3491 if (m_SrcList
.insert(CCLIENTREF(client
, wxT("CPartFile::AddSource"))).second
) {
3492 theStats::AddFoundSource();
3493 theStats::AddSourceOrigin(client
->GetSourceFrom());
3501 bool CPartFile::DelSource( CUpDownClient
* client
)
3503 if (m_SrcList
.erase(CCLIENTREF(client
, wxEmptyString
))) {
3504 theStats::RemoveSourceOrigin(client
->GetSourceFrom());
3505 theStats::RemoveFoundSource();
3513 void CPartFile::UpdatePartsFrequency( CUpDownClient
* client
, bool increment
)
3515 const BitVector
& freq
= client
->GetPartStatus();
3517 if ( m_SrcpartFrequency
.size() != GetPartCount() ) {
3518 m_SrcpartFrequency
.clear();
3519 m_SrcpartFrequency
.insert(m_SrcpartFrequency
.begin(), GetPartCount(), 0);
3526 unsigned int size
= freq
.size();
3527 if ( size
!= m_SrcpartFrequency
.size() ) {
3532 for ( unsigned int i
= 0; i
< size
; i
++ ) {
3533 if ( freq
.get(i
) ) {
3534 m_SrcpartFrequency
[i
]++;
3538 for ( unsigned int i
= 0; i
< size
; i
++ ) {
3539 if ( freq
.get(i
) ) {
3540 m_SrcpartFrequency
[i
]--;
3546 void CPartFile::GetRatingAndComments(FileRatingList
& list
) const
3549 // This can be pre-processed, but is it worth the CPU?
3550 CPartFile::SourceSet::const_iterator it
= m_SrcList
.begin();
3551 for ( ; it
!= m_SrcList
.end(); ++it
) {
3552 CUpDownClient
*cur_src
= it
->GetClient();
3553 if (cur_src
->GetFileComment().Length()>0 || cur_src
->GetFileRating()>0) {
3554 // AddDebugLogLineN(logPartFile, wxString(wxT("found a comment for ")) << GetFileName());
3555 list
.push_back(SFileRating(*cur_src
));
3562 CPartFile::CPartFile(const CEC_PartFile_Tag
*tag
) : CKnownFile(tag
)
3566 SetFileName(CPath(tag
->FileName()));
3567 m_abyFileHash
= tag
->FileHash();
3568 SetFileSize(tag
->SizeFull());
3569 m_gaplist
.Init(GetFileSize(), true); // Init empty
3570 m_partmetfilename
= CPath(tag
->PartMetName());
3571 m_fullname
= m_partmetfilename
; // We have only the met number, so show it without path in the detail dialog.
3573 m_SrcpartFrequency
.insert(m_SrcpartFrequency
.end(), GetPartCount(), 0);
3575 // these are only in CLIENT_GUI and not covered by Init()
3578 m_iDownPriorityEC
= 0;
3579 m_a4af_source_count
= 0;
3584 * Remote gui specific code
3586 CPartFile::~CPartFile()
3590 void CPartFile::GetRatingAndComments(FileRatingList
& list
) const
3592 list
= m_FileRatingList
;
3595 void CPartFile::SetCategory(uint8 cat
)
3601 bool CPartFile::AddSource(CUpDownClient
* client
)
3603 return m_SrcList
.insert(CCLIENTREF(client
, wxT("CPartFile::AddSource"))).second
!= 0;
3607 bool CPartFile::DelSource(CUpDownClient
* client
)
3609 return m_SrcList
.erase(CCLIENTREF(client
, wxEmptyString
)) != 0;
3613 #endif // !CLIENT_GUI
3616 void CPartFile::UpdateDisplayedInfo(bool force
)
3618 uint32 curTick
= ::GetTickCount();
3620 // Wait 1.5s between each redraw
3621 if (force
|| curTick
-m_lastRefreshedDLDisplay
> MINWAIT_BEFORE_DLDISPLAY_WINDOWUPDATE
) {
3622 Notify_DownloadCtrlUpdateItem(this);
3623 m_lastRefreshedDLDisplay
= curTick
;
3628 void CPartFile::Init()
3630 m_lastsearchtime
= 0;
3631 lastpurgetime
= ::GetTickCount();
3634 m_insufficient
= false;
3639 m_iLastPausePurge
= time(NULL
);
3641 if(thePrefs::GetNewAutoDown()) {
3642 m_iDownPriority
= PR_HIGH
;
3643 m_bAutoDownPriority
= true;
3645 m_iDownPriority
= PR_NORMAL
;
3646 m_bAutoDownPriority
= false;
3649 transferingsrc
= 0; // new
3653 m_hashsetneeded
= true;
3655 percentcompleted
= 0;
3657 lastseencomplete
= 0;
3658 m_availablePartsCount
=0;
3659 m_ClientSrcAnswered
= 0;
3660 m_LastNoNeededCheck
= 0;
3662 m_nTotalBufferData
= 0;
3663 m_nLastBufferFlushTime
= 0;
3664 m_bPercentUpdated
= false;
3665 m_iGainDueToCompression
= 0;
3666 m_iLostDueToCorruption
= 0;
3667 m_iTotalPacketsSavedDueToICH
= 0;
3669 m_lastRefreshedDLDisplay
= 0;
3670 m_nDlActiveTime
= 0;
3672 m_is_A4AF_auto
= false;
3673 m_localSrcReqQueued
= false;
3674 m_nCompleteSourcesTime
= time(NULL
);
3675 m_nCompleteSourcesCount
= 0;
3676 m_nCompleteSourcesCountLo
= 0;
3677 m_nCompleteSourcesCountHi
= 0;
3680 m_notCurrentSources
= 0;
3683 m_LastSearchTimeKad
= 0;
3684 m_TotalSearchesKad
= 0;
3687 m_CorruptionBlackBox
= new CCorruptionBlackBox();
3691 wxString
CPartFile::getPartfileStatus() const
3696 if ((status
== PS_HASHING
) || (status
== PS_WAITINGFORHASH
)) {
3697 mybuffer
=_("Hashing");
3698 } else if (status
== PS_ALLOCATING
) {
3699 mybuffer
= _("Allocating");
3701 switch (GetStatus()) {
3703 mybuffer
=_("Completing");
3706 mybuffer
=_("Complete");
3709 mybuffer
=_("Paused");
3712 mybuffer
=_("Erroneous");
3714 case PS_INSUFFICIENT
:
3715 mybuffer
= _("Insufficient disk space");
3718 if (GetTransferingSrcCount()>0) {
3719 mybuffer
=_("Downloading");
3721 mybuffer
=_("Waiting");
3725 if (m_stopped
&& (GetStatus()!=PS_COMPLETE
)) {
3726 mybuffer
=_("Stopped");
3733 int CPartFile::getPartfileStatusRang() const
3737 if (GetTransferingSrcCount()==0) tempstatus
=1;
3738 switch (GetStatus()) {
3740 case PS_WAITINGFORHASH
:
3760 wxString
CPartFile::GetFeedback() const
3762 wxString retval
= CKnownFile::GetFeedback();
3763 if (GetStatus() != PS_COMPLETE
) {
3764 retval
+= CFormat(wxT("%s: %s (%.2f%%)\n%s: %u\n"))
3765 % _("Downloaded") % CastItoXBytes(GetCompletedSize()) % GetPercentCompleted() % _("Sources") % GetSourceCount();
3767 return retval
+ _("Status") + wxT(": ") + getPartfileStatus() + wxT("\n");
3771 sint32
CPartFile::getTimeRemaining() const
3773 if (GetKBpsDown() < 0.001)
3776 return((GetFileSize()-GetCompletedSize()) / ((int)(GetKBpsDown()*1024.0)));
3779 bool CPartFile::PreviewAvailable()
3781 const uint64 minSizeForPreview
= 256 * 1024;
3782 FileType type
= GetFiletype(GetFileName());
3784 return (type
== ftVideo
|| type
== ftAudio
) &&
3785 GetFileSize() >= minSizeForPreview
&&
3786 IsComplete(0, minSizeForPreview
);
3789 bool CPartFile::CheckShowItemInGivenCat(int inCategory
)
3791 // first check if item belongs in this cat in principle
3792 if (inCategory
> 0 && inCategory
!= GetCategory()) {
3796 // if yes apply filter
3799 switch (thePrefs::GetAllcatFilter()) {
3801 show
= GetCategory() == 0 || inCategory
> 0;
3804 show
= IsPartFile();
3807 show
= !IsPartFile();
3811 (GetStatus() == PS_READY
|| GetStatus() == PS_EMPTY
) &&
3812 GetTransferingSrcCount() == 0;
3814 case acfDownloading
:
3816 (GetStatus() == PS_READY
|| GetStatus() == PS_EMPTY
) &&
3817 GetTransferingSrcCount() > 0;
3820 show
= GetStatus() == PS_ERROR
;
3823 show
= GetStatus() == PS_PAUSED
&& !IsStopped();
3829 show
= GetFiletype(GetFileName()) == ftVideo
;
3832 show
= GetFiletype(GetFileName()) == ftAudio
;
3835 show
= GetFiletype(GetFileName()) == ftArchive
;
3838 show
= GetFiletype(GetFileName()) == ftCDImage
;
3841 show
= GetFiletype(GetFileName()) == ftPicture
;
3844 show
= GetFiletype(GetFileName()) == ftText
;
3847 show
= !IsStopped() && GetStatus() != PS_PAUSED
;
3858 void CPartFile::RemoveCategory(uint8 cat
)
3860 if (m_category
== cat
) {
3861 // Reset the category
3863 } else if (m_category
> cat
) {
3864 // Set to the new position of the original category
3870 void CPartFile::SetActive(bool bActive
)
3872 time_t tNow
= time(NULL
);
3874 if (theApp
->IsConnected()) {
3875 if (m_tActivated
== 0) {
3876 m_tActivated
= tNow
;
3880 if (m_tActivated
!= 0) {
3881 m_nDlActiveTime
+= tNow
- m_tActivated
;
3888 uint32
CPartFile::GetDlActiveTime() const
3890 uint32 nDlActiveTime
= m_nDlActiveTime
;
3891 if (m_tActivated
!= 0) {
3892 nDlActiveTime
+= time(NULL
) - m_tActivated
;
3894 return nDlActiveTime
;
3898 uint16
CPartFile::GetPartMetNumber() const
3901 return m_partmetfilename
.RemoveAllExt().GetRaw().ToLong(&nr
) ? nr
: 0;
3905 void CPartFile::SetHashingProgress(uint16 part
) const
3907 m_hashingProgress
= part
;
3908 Notify_DownloadCtrlUpdateItem(this);
3914 uint8
CPartFile::GetStatus(bool ignorepause
) const
3916 if ( (!m_paused
&& !m_insufficient
) ||
3917 status
== PS_ERROR
||
3918 status
== PS_COMPLETING
||
3919 status
== PS_COMPLETE
||
3922 } else if ( m_insufficient
) {
3923 return PS_INSUFFICIENT
;
3929 void CPartFile::AddDeadSource(const CUpDownClient
* client
)
3931 m_deadSources
.AddDeadSource( client
);
3935 bool CPartFile::IsDeadSource(const CUpDownClient
* client
)
3937 return m_deadSources
.IsDeadSource( client
);
3940 void CPartFile::SetFileName(const CPath
& fileName
)
3942 CKnownFile
* pFile
= theApp
->sharedfiles
->GetFileByID(GetFileHash());
3944 bool is_shared
= (pFile
&& pFile
== this);
3947 // The file is shared, we must clear the search keywords so we don't
3948 // publish the old name anymore.
3949 theApp
->sharedfiles
->RemoveKeywords(this);
3952 CKnownFile::SetFileName(fileName
);
3955 // And of course, we must advertise the new name if the file is shared.
3956 theApp
->sharedfiles
->AddKeywords(this);
3959 UpdateDisplayedInfo(true);
3963 uint16
CPartFile::GetMaxSources() const
3965 // This is just like this, while we don't import the private max sources per file
3966 return thePrefs::GetMaxSourcePerFile();
3970 uint16
CPartFile::GetMaxSourcePerFileSoft() const
3972 unsigned int temp
= ((unsigned int)GetMaxSources() * 9L) / 10;
3973 if (temp
> MAX_SOURCES_FILE_SOFT
) {
3974 return MAX_SOURCES_FILE_SOFT
;
3979 uint16
CPartFile::GetMaxSourcePerFileUDP() const
3981 unsigned int temp
= ((unsigned int)GetMaxSources() * 3L) / 4;
3982 if (temp
> MAX_SOURCES_FILE_UDP
) {
3983 return MAX_SOURCES_FILE_UDP
;
3988 #define DROP_FACTOR 2
3990 CUpDownClient
* CPartFile::GetSlowerDownloadingClient(uint32 speed
, CUpDownClient
* caller
) {
3991 // printf("Start slower source calculation\n");
3992 for( SourceSet::iterator it
= m_SrcList
.begin(); it
!= m_SrcList
.end(); ) {
3993 CUpDownClient
* cur_src
= it
++->GetClient();
3994 if ((cur_src
->GetDownloadState() == DS_DOWNLOADING
) && (cur_src
!= caller
)) {
3995 uint32 factored_bytes_per_second
= static_cast<uint32
>(
3996 (cur_src
->GetKBpsDown() * 1024) * DROP_FACTOR
);
3997 if ( factored_bytes_per_second
< speed
) {
3998 // printf("Selecting source %p to drop: %d < %d\n", cur_src, factored_bytes_per_second, speed);
3999 // printf("End slower source calculation\n");
4002 // printf("Not selecting source %p to drop: %d > %d\n", cur_src, factored_bytes_per_second, speed);
4006 // printf("End slower source calculation\n");
4010 void CPartFile::AllocationFinished()
4012 // see if it can be opened
4013 if (!m_hpartfile
.Open(m_PartPath
, CFile::read_write
)) {
4014 AddLogLineN(CFormat(_("ERROR: Failed to open partfile '%s'")) % GetFullName());
4015 SetStatus(PS_ERROR
);
4017 // then close the handle again
4018 m_hpartfile
.Release(true);
4022 // File_checked_for_headers