2 // This file is part of the aMule Project.
4 // Copyright (c) 2003-2011 aMule Team ( admin@amule.org / http://www.amule.org )
5 // Copyright (c) 2002-2011 Merkur ( devs@emule-project.net / http://www.emule-project.net )
7 // Any parts of this program derived from the xMule, lMule or eMule project,
8 // or contributed by third-party developers are copyrighted by their
11 // This program is free software; you can redistribute it and/or modify
12 // it under the terms of the GNU General Public License as published by
13 // the Free Software Foundation; either version 2 of the License, or
14 // (at your option) any later version.
16 // This program is distributed in the hope that it will be useful,
17 // but WITHOUT ANY WARRANTY; without even the implied warranty of
18 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 // GNU General Public License for more details.
21 // You should have received a copy of the GNU General Public License
22 // along with this program; if not, write to the Free Software
23 // Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA
28 #include "PartFile.h" // Interface declarations.
31 #include "config.h" // Needed for VERSION
34 #include <protocol/kad/Constants.h>
35 #include <protocol/ed2k/Client2Client/TCP.h>
36 #include <protocol/Protocols.h>
37 #include <common/DataFileVersion.h>
38 #include <common/Constants.h>
39 #include <tags/FileTags.h>
42 #include <wx/tokenzr.h> // Needed for wxStringTokenizer
44 #include "KnownFileList.h" // Needed for CKnownFileList
45 #include "CanceledFileList.h"
46 #include "UploadQueue.h" // Needed for CFileHash
47 #include "IPFilter.h" // Needed for CIPFilter
48 #include "Server.h" // Needed for CServer
49 #include "ServerConnect.h" // Needed for CServerConnect
52 #include "UpDownClientEC.h" // Needed for CUpDownClient
54 #include "updownclient.h" // Needed for CUpDownClient
57 #include "MemFile.h" // Needed for CMemFile
58 #include "Preferences.h" // Needed for CPreferences
59 #include "DownloadQueue.h" // Needed for CDownloadQueue
60 #include "amule.h" // Needed for theApp
61 #include "ED2KLink.h" // Needed for CED2KLink
62 #include "Packet.h" // Needed for CTag
63 #include "SearchList.h" // Needed for CSearchFile
64 #include "ClientList.h" // Needed for clientlist
65 #include "Statistics.h" // Needed for theStats
67 #include <common/Format.h> // Needed for CFormat
68 #include <common/FileFunctions.h> // Needed for GetLastModificationTime
69 #include "ThreadTasks.h" // Needed for CHashingTask/CCompletionTask/CAllocateFileTask
70 #include "GuiEvents.h" // Needed for Notify_*
71 #include "DataToText.h" // Needed for OriginToText()
72 #include "PlatformSpecific.h" // Needed for CreateSparseFile()
73 #include "FileArea.h" // Needed for CFileArea
74 #include "ScopedPtr.h" // Needed for CScopedArray
75 #include "CorruptionBlackBox.h"
77 #include "kademlia/kademlia/Kademlia.h"
78 #include "kademlia/kademlia/Search.h"
81 SFileRating::SFileRating(const wxString
&u
, const wxString
&f
, sint16 r
, const wxString
&c
)
91 SFileRating::SFileRating(const SFileRating
&fr
)
93 UserName(fr
.UserName
),
94 FileName(fr
.FileName
),
102 SFileRating::SFileRating(const CUpDownClient
&client
)
104 UserName(client
.GetUserName()),
105 FileName(client
.GetClientFilename()),
106 Rating(client
.GetFileRating()),
107 Comment(client
.GetFileComment())
113 SFileRating::~SFileRating()
118 class PartFileBufferedData
121 CFileArea area
; // File area to be written
122 uint64 start
; // This is the start offset of the data
123 uint64 end
; // This is the end offset of the data
124 Requested_Block_Struct
*block
; // This is the requested block that this data relates to
126 PartFileBufferedData(CFileAutoClose
& file
, byte
* data
, uint64 _start
, uint64 _end
, Requested_Block_Struct
*_block
)
127 : start(_start
), end(_end
), block(_block
)
129 area
.StartWriteAt(file
, start
, end
-start
+1);
130 memcpy(area
.GetBuffer(), data
, end
-start
+1);
135 typedef std::list
<Chunk
> ChunkList
;
140 CPartFile::CPartFile()
145 CPartFile::CPartFile(CSearchFile
* searchresult
)
149 m_abyFileHash
= searchresult
->GetFileHash();
150 SetFileName(searchresult
->GetFileName());
151 SetFileSize(searchresult
->GetFileSize());
153 for (unsigned int i
= 0; i
< searchresult
->m_taglist
.size(); ++i
){
154 const CTag
& pTag
= searchresult
->m_taglist
[i
];
156 bool bTagAdded
= false;
157 if (pTag
.GetNameID() == 0 && !pTag
.GetName().IsEmpty() && (pTag
.IsStr() || pTag
.IsInt())) {
158 static const struct {
163 { wxT(FT_ED2K_MEDIA_ARTIST
), 2 },
164 { wxT(FT_ED2K_MEDIA_ALBUM
), 2 },
165 { wxT(FT_ED2K_MEDIA_TITLE
), 2 },
166 { wxT(FT_ED2K_MEDIA_LENGTH
), 2 },
167 { wxT(FT_ED2K_MEDIA_BITRATE
), 3 },
168 { wxT(FT_ED2K_MEDIA_CODEC
), 2 }
171 for (unsigned int t
= 0; t
< itemsof(_aMetaTags
); ++t
) {
172 if ( pTag
.GetType() == _aMetaTags
[t
].nType
&&
173 (pTag
.GetName() == _aMetaTags
[t
].pszName
)) {
174 // skip string tags with empty string values
175 if (pTag
.IsStr() && pTag
.GetStr().IsEmpty()) {
179 // skip "length" tags with "0: 0" values
180 if (pTag
.GetName() == wxT(FT_ED2K_MEDIA_LENGTH
)) {
181 if (pTag
.GetStr().IsSameAs(wxT("0: 0")) ||
182 pTag
.GetStr().IsSameAs(wxT("0:0"))) {
187 // skip "bitrate" tags with '0' values
188 if ((pTag
.GetName() == wxT(FT_ED2K_MEDIA_BITRATE
)) && !pTag
.GetInt()) {
192 AddDebugLogLineN( logPartFile
,
193 wxT("CPartFile::CPartFile(CSearchFile*): added tag ") +
194 pTag
.GetFullInfo() );
195 m_taglist
.push_back(pTag
);
200 } else if (pTag
.GetNameID() != 0 && pTag
.GetName().IsEmpty() && (pTag
.IsStr() || pTag
.IsInt())) {
201 static const struct {
209 for (unsigned int t
= 0; t
< itemsof(_aMetaTags
); ++t
) {
210 if (pTag
.GetType() == _aMetaTags
[t
].nType
&& pTag
.GetNameID() == _aMetaTags
[t
].nID
) {
211 // skip string tags with empty string values
212 if (pTag
.IsStr() && pTag
.GetStr().IsEmpty()) {
216 AddDebugLogLineN( logPartFile
,
217 wxT("CPartFile::CPartFile(CSearchFile*): added tag ") +
218 pTag
.GetFullInfo() );
219 m_taglist
.push_back(pTag
);
227 AddDebugLogLineN( logPartFile
,
228 wxT("CPartFile::CPartFile(CSearchFile*): ignored tag ") +
229 pTag
.GetFullInfo() );
237 CPartFile::CPartFile(const CED2KFileLink
* fileLink
)
241 SetFileName(CPath(fileLink
->GetName()));
242 SetFileSize(fileLink
->GetSize());
243 m_abyFileHash
= fileLink
->GetHashKey();
247 if (fileLink
->m_hashset
) {
248 if (!LoadHashsetFromFile(fileLink
->m_hashset
, true)) {
249 AddDebugLogLineC(logPartFile
, wxT("eD2K link contained invalid hashset: ") + fileLink
->GetLink());
255 CPartFile::~CPartFile()
257 // if it's not opened, it was completed or deleted
258 if (m_hpartfile
.IsOpened()) {
261 // Update met file (with current directory entry)
265 DeleteContents(m_BufferedData_list
);
266 delete m_CorruptionBlackBox
;
268 wxASSERT(m_SrcList
.empty());
269 wxASSERT(m_A4AFsrclist
.empty());
272 void CPartFile::CreatePartFile(bool isImporting
)
274 // use lowest free partfilenumber for free file (InterCeptor)
278 m_partmetfilename
= CPath(CFormat(wxT("%03i.part.met")) % i
);
279 m_fullname
= thePrefs::GetTempDir().JoinPaths(m_partmetfilename
);
280 } while (m_fullname
.FileExists());
282 m_CorruptionBlackBox
->SetPartFileInfo(GetFileName().GetPrintable(), m_partmetfilename
.RemoveAllExt().GetPrintable());
284 m_gaplist
.Init(GetFileSize(), true); // Init empty
286 m_PartPath
= m_fullname
.RemoveExt();
288 if (thePrefs::GetAllocFullFile() || !thePrefs::CreateFilesSparse()) {
289 fileCreated
= m_hpartfile
.Create(m_PartPath
, true);
292 fileCreated
= PlatformSpecific::CreateSparseFile(m_PartPath
, GetFileSize());
295 AddLogLineN(_("ERROR: Failed to create partfile"));
299 SetFilePath(thePrefs::GetTempDir());
301 if (!isImporting
&& thePrefs::GetAllocFullFile()) {
302 SetStatus(PS_ALLOCATING
);
303 CThreadScheduler::AddTask(new CAllocateFileTask(this, thePrefs::AddNewFilesPaused()));
305 AllocationFinished();
308 m_hashsetneeded
= (GetED2KPartHashCount() > 0);
311 SetActive(theApp
->IsConnected());
315 uint8
CPartFile::LoadPartFile(const CPath
& in_directory
, const CPath
& filename
, bool from_backup
, bool getsizeonly
)
317 bool isnewstyle
= false;
318 uint8 version
,partmettype
=PMT_UNKNOWN
;
320 std::map
<uint16
, Gap_Struct
*> gap_map
; // Slugfiller
323 m_partmetfilename
= filename
;
324 m_CorruptionBlackBox
->SetPartFileInfo(GetFileName().GetPrintable(), m_partmetfilename
.RemoveAllExt().GetPrintable());
325 m_filePath
= in_directory
;
326 m_fullname
= m_filePath
.JoinPaths(m_partmetfilename
);
327 m_PartPath
= m_fullname
.RemoveExt();
329 // readfile data form part.met file
330 CPath curMetFilename
= m_fullname
;
332 curMetFilename
= curMetFilename
.AppendExt(PARTMET_BAK_EXT
);
333 AddLogLineN(CFormat( _("Trying to load backup of met-file from %s") )
338 CFile
metFile(curMetFilename
, CFile::read
);
339 if (!metFile
.IsOpened()) {
340 AddLogLineN(CFormat( _("ERROR: Failed to open part.met file: %s ==> %s") )
345 } else if (metFile
.GetLength() == 0) {
346 AddLogLineN(CFormat( _("ERROR: part.met file is 0 size: %s ==> %s") )
353 version
= metFile
.ReadUInt8();
354 if (version
!= PARTFILE_VERSION
&& version
!= PARTFILE_SPLITTEDVERSION
&& version
!= PARTFILE_VERSION_LARGEFILE
){
356 //if (version == 83) return ImportShareazaTempFile(...)
357 AddLogLineN(CFormat( _("ERROR: Invalid part.met file version: %s ==> %s") )
363 isnewstyle
= (version
== PARTFILE_SPLITTEDVERSION
);
364 partmettype
= isnewstyle
? PMT_SPLITTED
: PMT_DEFAULTOLD
;
366 if (version
== PARTFILE_VERSION
) {// Do we still need this check ?
367 uint8 test
[4]; // It will fail for certain files.
368 metFile
.Seek(24, wxFromStart
);
369 metFile
.Read(test
,4);
371 metFile
.Seek(1, wxFromStart
);
372 if (test
[0]==0 && test
[1]==0 && test
[2]==2 && test
[3]==1) {
373 isnewstyle
=true; // edonkeys so called "old part style"
374 partmettype
=PMT_NEWOLD
;
379 uint32 temp
= metFile
.ReadUInt32();
381 if (temp
==0) { // 0.48 partmets - different again
382 LoadHashsetFromFile(&metFile
, false);
384 metFile
.Seek(2, wxFromStart
);
385 LoadDateFromFile(&metFile
);
386 m_abyFileHash
= metFile
.ReadHash();
390 LoadDateFromFile(&metFile
);
391 LoadHashsetFromFile(&metFile
, false);
394 uint32 tagcount
= metFile
.ReadUInt32();
396 for (uint32 j
= 0; j
< tagcount
; ++j
) {
397 CTag
newtag(metFile
,true);
400 (newtag
.GetNameID() == FT_FILESIZE
||
401 newtag
.GetNameID() == FT_FILENAME
))) {
402 switch(newtag
.GetNameID()) {
404 if (!GetFileName().IsOk()) {
405 // If it's not empty, we already loaded the unicoded one
406 SetFileName(CPath(newtag
.GetStr()));
410 case FT_LASTSEENCOMPLETE
: {
411 lastseencomplete
= newtag
.GetInt();
415 SetFileSize(newtag
.GetInt());
418 case FT_TRANSFERRED
: {
419 transferred
= newtag
.GetInt();
423 //#warning needs setfiletype string
424 //SetFileType(newtag.GetStr());
428 m_category
= newtag
.GetInt();
429 if (m_category
> theApp
->glob_prefs
->GetCatCount() - 1 ) {
434 case FT_OLDDLPRIORITY
:
435 case FT_DLPRIORITY
: {
437 m_iDownPriority
= newtag
.GetInt();
438 if( m_iDownPriority
== PR_AUTO
){
439 m_iDownPriority
= PR_HIGH
;
440 SetAutoDownPriority(true);
443 if ( m_iDownPriority
!= PR_LOW
&&
444 m_iDownPriority
!= PR_NORMAL
&&
445 m_iDownPriority
!= PR_HIGH
)
446 m_iDownPriority
= PR_NORMAL
;
447 SetAutoDownPriority(false);
453 m_paused
= (newtag
.GetInt() == 1);
454 m_stopped
= m_paused
;
457 case FT_OLDULPRIORITY
:
458 case FT_ULPRIORITY
: {
460 SetUpPriority(newtag
.GetInt(), false);
461 if( GetUpPriority() == PR_AUTO
){
462 SetUpPriority(PR_HIGH
, false);
463 SetAutoUpPriority(true);
465 SetAutoUpPriority(false);
470 case FT_KADLASTPUBLISHSRC
:{
471 SetLastPublishTimeKadSrc(newtag
.GetInt(), 0);
472 if(GetLastPublishTimeKadSrc() > (uint32
)time(NULL
)+KADEMLIAREPUBLISHTIMES
) {
473 //There may be a posibility of an older client that saved a random number here.. This will check for that..
474 SetLastPublishTimeKadSrc(0,0);
478 case FT_KADLASTPUBLISHNOTES
:{
479 SetLastPublishTimeKadNotes(newtag
.GetInt());
482 // old tags: as long as they are not needed, take the chance to purge them
484 case FT_KADLASTPUBLISHKEY
:
485 case FT_PARTFILENAME
:
487 case FT_DL_ACTIVE_TIME
:
488 if (newtag
.IsInt()) {
489 m_nDlActiveTime
= newtag
.GetInt();
492 case FT_CORRUPTEDPARTS
: {
493 wxASSERT(m_corrupted_list
.empty());
494 wxString
strCorruptedParts(newtag
.GetStr());
495 wxStringTokenizer
tokenizer(strCorruptedParts
, wxT(","));
496 while ( tokenizer
.HasMoreTokens() ) {
497 wxString token
= tokenizer
.GetNextToken();
499 if (token
.ToULong(&uPart
)) {
500 if (uPart
< GetPartCount() && !IsCorruptedPart(uPart
)) {
501 m_corrupted_list
.push_back(uPart
);
510 hash
.DecodeBase32(newtag
.GetStr()) == CAICHHash::GetHashSize();
511 wxASSERT(hashSizeOk
);
513 m_pAICHHashSet
->SetMasterHash(hash
, AICH_VERIFIED
);
517 case FT_ATTRANSFERRED
:{
518 statistic
.SetAllTimeTransferred(statistic
.GetAllTimeTransferred() + (uint64
)newtag
.GetInt());
521 case FT_ATTRANSFERREDHI
:{
522 statistic
.SetAllTimeTransferred(statistic
.GetAllTimeTransferred() + (((uint64
)newtag
.GetInt()) << 32));
525 case FT_ATREQUESTED
:{
526 statistic
.SetAllTimeRequests(newtag
.GetInt());
530 statistic
.SetAllTimeAccepts(newtag
.GetInt());
534 // Start Changes by Slugfiller for better exception handling
536 wxCharBuffer tag_ansi_name
= newtag
.GetName().ToAscii();
537 char gap_mark
= tag_ansi_name
? tag_ansi_name
[0u] : 0;
538 if ( newtag
.IsInt() && (newtag
.GetName().Length() > 1) &&
539 ((gap_mark
== FT_GAPSTART
) ||
540 (gap_mark
== FT_GAPEND
))) {
541 Gap_Struct
*gap
= NULL
;
542 unsigned long int gapkey
;
543 if (newtag
.GetName().Mid(1).ToULong(&gapkey
)) {
544 if ( gap_map
.find( gapkey
) == gap_map
.end() ) {
545 gap
= new Gap_Struct
;
546 gap_map
[gapkey
] = gap
;
547 gap
->start
= (uint64
)-1;
548 gap
->end
= (uint64
)-1;
550 gap
= gap_map
[ gapkey
];
552 if (gap_mark
== FT_GAPSTART
) {
553 gap
->start
= newtag
.GetInt();
555 if (gap_mark
== FT_GAPEND
) {
556 gap
->end
= newtag
.GetInt()-1;
559 AddDebugLogLineN(logPartFile
, wxT("Wrong gap map key while reading met file!"));
562 // End Changes by Slugfiller for better exception handling
564 m_taglist
.push_back(newtag
);
569 // Nothing. Else, nothing.
573 // load the hashsets from the hybridstylepartmet
574 if (isnewstyle
&& !getsizeonly
&& (metFile
.GetPosition()<metFile
.GetLength()) ) {
575 metFile
.Seek(1, wxFromCurrent
);
577 uint16 parts
=GetPartCount(); // assuming we will get all hashsets
579 for (uint16 i
= 0; i
< parts
&& (metFile
.GetPosition()+16<metFile
.GetLength()); ++i
){
580 CMD4Hash cur_hash
= metFile
.ReadHash();
581 m_hashlist
.push_back(cur_hash
);
585 if (!m_hashlist
.empty()) {
586 CreateHashFromHashlist(m_hashlist
, &checkhash
);
588 if (m_abyFileHash
!= checkhash
) {
592 } catch (const CInvalidPacket
& e
) {
593 AddLogLineC(CFormat(_("Error: %s (%s) is corrupt (bad tags: %s), unable to load file."))
598 } catch (const CIOFailureException
& e
) {
599 AddDebugLogLineC(logPartFile
, CFormat( wxT("IO failure while loading '%s': %s") )
603 } catch (const CEOFException
& WXUNUSED(e
)) {
604 AddLogLineC(CFormat( _("ERROR: %s (%s) is corrupt (wrong tagcount), unable to load file.") )
607 AddLogLineC(_("Trying to recover file info..."));
609 // Safe file is that who have
612 // We have filesize, try other needed info
614 // Do we need to check gaps? I think not,
615 // because they are checked below. Worst
616 // scenario will only mark file as 0 bytes downloaded.
619 if (!GetFileName().IsOk()) {
620 // Not critical, let's put a random filename.
622 "Recovering no-named file - will try to recover it as RecoveredFile.dat"));
623 SetFileName(CPath(wxT("RecoveredFile.dat")));
626 AddLogLineC(_("Recovered all available file info :D - Trying to use it..."));
628 AddLogLineC(_("Unable to recover file info :("));
637 m_gaplist
.Init(GetFileSize(), false); // Init full, then add gaps
638 // Now to flush the map into the list (Slugfiller)
639 std::map
<uint16
, Gap_Struct
*>::iterator it
= gap_map
.begin();
640 for ( ; it
!= gap_map
.end(); ++it
) {
641 Gap_Struct
* gap
= it
->second
;
642 // SLUGFILLER: SafeHash - revised code, and extra safety
643 if ( (gap
->start
!= (uint64
)-1) &&
644 (gap
->end
!= (uint64
)-1) &&
645 gap
->start
<= gap
->end
&&
646 gap
->start
< GetFileSize()) {
647 if (gap
->end
>= GetFileSize()) {
648 gap
->end
= GetFileSize()-1; // Clipping
650 m_gaplist
.AddGap(gap
->start
, gap
->end
); // All tags accounted for, use safe adding
653 // SLUGFILLER: SafeHash
656 //check if this is a backup
657 if ( m_fullname
.GetExt().MakeLower() == wxT("backup" )) {
658 m_fullname
= m_fullname
.RemoveExt();
661 // open permanent handle
662 if ( !m_hpartfile
.Open(m_PartPath
, CFile::read_write
)) {
663 AddLogLineN(CFormat( _("Failed to open %s (%s)") )
672 // SLUGFILLER: SafeHash - final safety, make sure any missing part of the file is gap
673 if (m_hpartfile
.GetLength() < GetFileSize())
674 AddGap(m_hpartfile
.GetLength(), GetFileSize()-1);
675 // Goes both ways - Partfile should never be too large
676 if (m_hpartfile
.GetLength() > GetFileSize()) {
677 AddDebugLogLineC(logPartFile
, CFormat( wxT("Partfile \"%s\" is too large! Truncating %llu bytes.") ) % GetFileName() % (m_hpartfile
.GetLength() - GetFileSize()));
678 m_hpartfile
.SetLength(GetFileSize());
680 // SLUGFILLER: SafeHash
681 } catch (const CIOFailureException
& e
) {
682 AddDebugLogLineC(logPartFile
, CFormat( wxT("Error while accessing partfile \"%s\": %s") ) % GetFileName() % e
.what());
686 // now close the file again until needed
687 m_hpartfile
.Release(true);
689 // check hashcount, file status etc
690 if (GetHashCount() != GetED2KPartHashCount()){
691 m_hashsetneeded
= true;
694 m_hashsetneeded
= false;
695 for (size_t i
= 0; i
< m_hashlist
.size(); ++i
) {
702 if (m_gaplist
.IsComplete()) { // is this file complete already?
707 if (!isnewstyle
) { // not for importing
708 const time_t file_date
= CPath::GetModificationTime(m_PartPath
);
709 if (m_lastDateChanged
!= file_date
) {
710 // It's pointless to rehash an empty file, since the case
711 // where a user has zero'd a file is handled above ...
712 if (m_hpartfile
.GetLength()) {
713 AddLogLineN(CFormat( _("WARNING: %s might be corrupted (%i)") )
715 % (m_lastDateChanged
- file_date
) );
717 SetStatus(PS_WAITINGFORHASH
);
719 CPath partFileName
= m_partmetfilename
.RemoveExt();
720 CThreadScheduler::AddTask(new CHashingTask(m_filePath
, partFileName
, this));
725 UpdateCompletedInfos();
726 if (completedsize
> transferred
) {
727 m_iGainDueToCompression
= completedsize
- transferred
;
728 } else if (completedsize
!= transferred
) {
729 m_iLostDueToCorruption
= transferred
- completedsize
;
736 bool CPartFile::SavePartFile(bool Initial
)
739 case PS_WAITINGFORHASH
:
745 /* Don't write anything to disk if less than 100 KB of free space is left. */
746 sint64 free
= CPath::GetFreeSpaceAt(GetFilePath());
747 if ((free
!= wxInvalidOffset
) && (free
< (100 * 1024))) {
753 if (!m_PartPath
.FileExists()) {
754 throw wxString(wxT(".part file not found"));
757 uint32 lsc
= lastseencomplete
;
760 CPath::BackupFile(m_fullname
, wxT(".backup"));
761 CPath::RemoveFile(m_fullname
);
764 file
.Open(m_fullname
, CFile::write
);
765 if (!file
.IsOpened()) {
766 throw wxString(wxT("Failed to open part.met file"));
770 file
.WriteUInt8(IsLargeFile() ? PARTFILE_VERSION_LARGEFILE
: PARTFILE_VERSION
);
772 file
.WriteUInt32(CPath::GetModificationTime(m_PartPath
));
774 file
.WriteHash(m_abyFileHash
);
775 uint16 parts
= m_hashlist
.size();
776 file
.WriteUInt16(parts
);
777 for (int x
= 0; x
< parts
; ++x
) {
778 file
.WriteHash(m_hashlist
[x
]);
781 #define FIXED_TAGS 15
782 uint32 tagcount
= m_taglist
.size() + FIXED_TAGS
+ (m_gaplist
.size()*2);
783 if (!m_corrupted_list
.empty()) {
787 if (m_pAICHHashSet
->HasValidMasterHash() && (m_pAICHHashSet
->GetStatus() == AICH_VERIFIED
)){
791 if (GetLastPublishTimeKadSrc()){
795 if (GetLastPublishTimeKadNotes()){
799 if (GetDlActiveTime()){
803 file
.WriteUInt32(tagcount
);
805 //#warning Kry - Where are lost by coruption and gained by compression?
807 // 0 (unicoded part file name)
808 // We write it with BOM to keep eMule compatibility. Note that the 'printable' filename is saved,
809 // as presently the filename does not represent an actual file.
810 CTagString( FT_FILENAME
, GetFileName().GetPrintable()).WriteTagToFile( &file
, utf8strOptBOM
);
811 CTagString( FT_FILENAME
, GetFileName().GetPrintable()).WriteTagToFile( &file
); // 1
813 CTagIntSized( FT_FILESIZE
, GetFileSize(), IsLargeFile() ? 64 : 32).WriteTagToFile( &file
);// 2
814 CTagIntSized( FT_TRANSFERRED
, transferred
, IsLargeFile() ? 64 : 32).WriteTagToFile( &file
); // 3
815 CTagInt32( FT_STATUS
, (m_paused
?1:0)).WriteTagToFile( &file
); // 4
817 if ( IsAutoDownPriority() ) {
818 CTagInt32( FT_DLPRIORITY
, (uint8
)PR_AUTO
).WriteTagToFile( &file
); // 5
819 CTagInt32( FT_OLDDLPRIORITY
, (uint8
)PR_AUTO
).WriteTagToFile( &file
); // 6
821 CTagInt32( FT_DLPRIORITY
, m_iDownPriority
).WriteTagToFile( &file
); // 5
822 CTagInt32( FT_OLDDLPRIORITY
, m_iDownPriority
).WriteTagToFile( &file
); // 6
825 CTagInt32( FT_LASTSEENCOMPLETE
, lsc
).WriteTagToFile( &file
); // 7
827 if ( IsAutoUpPriority() ) {
828 CTagInt32( FT_ULPRIORITY
, (uint8
)PR_AUTO
).WriteTagToFile( &file
); // 8
829 CTagInt32( FT_OLDULPRIORITY
, (uint8
)PR_AUTO
).WriteTagToFile( &file
); // 9
831 CTagInt32( FT_ULPRIORITY
, GetUpPriority() ).WriteTagToFile( &file
); // 8
832 CTagInt32( FT_OLDULPRIORITY
, GetUpPriority() ).WriteTagToFile( &file
); // 9
835 CTagInt32(FT_CATEGORY
, m_category
).WriteTagToFile( &file
); // 10
836 CTagInt32(FT_ATTRANSFERRED
, statistic
.GetAllTimeTransferred() & 0xFFFFFFFF).WriteTagToFile( &file
);// 11
837 CTagInt32(FT_ATTRANSFERREDHI
, statistic
.GetAllTimeTransferred() >>32).WriteTagToFile( &file
);// 12
838 CTagInt32(FT_ATREQUESTED
, statistic
.GetAllTimeRequests()).WriteTagToFile( &file
); // 13
839 CTagInt32(FT_ATACCEPTED
, statistic
.GetAllTimeAccepts()).WriteTagToFile( &file
); // 14
841 // currupt part infos
842 if (!m_corrupted_list
.empty()) {
843 wxString strCorruptedParts
;
844 std::list
<uint16
>::iterator it
= m_corrupted_list
.begin();
845 for (; it
!= m_corrupted_list
.end(); ++it
) {
846 uint16 uCorruptedPart
= *it
;
847 if (!strCorruptedParts
.IsEmpty()) {
848 strCorruptedParts
+= wxT(",");
850 strCorruptedParts
+= CFormat(wxT("%u")) % uCorruptedPart
;
852 wxASSERT( !strCorruptedParts
.IsEmpty() );
854 CTagString( FT_CORRUPTEDPARTS
, strCorruptedParts
).WriteTagToFile( &file
); // 11?
858 if (m_pAICHHashSet
->HasValidMasterHash() && (m_pAICHHashSet
->GetStatus() == AICH_VERIFIED
)){
859 CTagString
aichtag(FT_AICH_HASH
, m_pAICHHashSet
->GetMasterHash().GetString() );
860 aichtag
.WriteTagToFile(&file
); // 12?
863 if (GetLastPublishTimeKadSrc()){
864 CTagInt32(FT_KADLASTPUBLISHSRC
, GetLastPublishTimeKadSrc()).WriteTagToFile(&file
); // 15?
867 if (GetLastPublishTimeKadNotes()){
868 CTagInt32(FT_KADLASTPUBLISHNOTES
, GetLastPublishTimeKadNotes()).WriteTagToFile(&file
); // 16?
871 if (GetDlActiveTime()){
872 CTagInt32(FT_DL_ACTIVE_TIME
, GetDlActiveTime()).WriteTagToFile(&file
); // 17
875 for (uint32 j
= 0; j
< (uint32
)m_taglist
.size();++j
) {
876 m_taglist
[j
].WriteTagToFile(&file
);
881 for (CGapList::const_iterator it
= m_gaplist
.begin(); it
!= m_gaplist
.end(); ++it
) {
882 wxString tagName
= CFormat(wxT(" %u")) % i_pos
;
884 // gap start = first missing byte but gap ends = first non-missing byte
885 // in edonkey but I think its easier to user the real limits
886 tagName
[0] = FT_GAPSTART
;
887 CTagIntSized(tagName
, it
.start(), IsLargeFile() ? 64 : 32).WriteTagToFile( &file
);
889 tagName
[0] = FT_GAPEND
;
890 CTagIntSized(tagName
, it
.end() + 1, IsLargeFile() ? 64 : 32).WriteTagToFile( &file
);
894 } catch (const wxString
& error
) {
895 AddLogLineNS(CFormat( _("ERROR while saving partfile: %s (%s ==> %s)") )
901 } catch (const CIOFailureException
& e
) {
902 AddLogLineCS(_("IO failure while saving partfile: ") + e
.what());
910 CPath::RemoveFile(m_fullname
.AppendExt(wxT(".backup")));
913 sint64 metLength
= m_fullname
.GetFileSize();
914 if (metLength
== wxInvalidOffset
) {
915 theApp
->ShowAlert( CFormat( _("Could not retrieve length of '%s' - using %s file.") )
920 CPath::CloneFile(m_fullname
.AppendExt(PARTMET_BAK_EXT
), m_fullname
, true);
921 } else if (metLength
== 0) {
922 // Don't backup if it's 0 size but raise a warning!!!
923 theApp
->ShowAlert( CFormat( _("'%s' is 0 size somehow - using %s file.") )
928 CPath::CloneFile(m_fullname
.AppendExt(PARTMET_BAK_EXT
), m_fullname
, true);
930 // no error, just backup
931 CPath::BackupFile(m_fullname
, PARTMET_BAK_EXT
);
938 void CPartFile::SaveSourceSeeds()
940 #define MAX_SAVED_SOURCES 10
942 // Kry - Sources seeds
943 // Based on a Feature request, this saves the last MAX_SAVED_SOURCES
944 // sources of the file, giving a 'seed' for the next run.
945 // We save the last sources because:
946 // 1 - They could be the hardest to get
947 // 2 - They will more probably be available
948 // However, if we have downloading sources, they have preference because
949 // we probably have more credits on them.
950 // Anyway, source exchange will get us the rest of the sources
951 // This feature is currently used only on rare files (< 20 sources)
954 if (GetSourceCount()>20) {
958 CClientRefList source_seeds
;
961 CClientRefList::iterator it
= m_downloadingSourcesList
.begin();
962 for( ; it
!= m_downloadingSourcesList
.end() && n_sources
< MAX_SAVED_SOURCES
; ++it
) {
963 if (!it
->HasLowID()) {
964 source_seeds
.push_back(*it
);
969 if (n_sources
< MAX_SAVED_SOURCES
) {
970 // Not enough downloading sources to fill the list, going to sources list
971 if (GetSourceCount() > 0) {
972 SourceSet::reverse_iterator rit
= m_SrcList
.rbegin();
973 for ( ; ((rit
!= m_SrcList
.rend()) && (n_sources
<MAX_SAVED_SOURCES
)); ++rit
) {
974 if (!rit
->HasLowID()) {
975 source_seeds
.push_back(*rit
);
987 const CPath seedsPath
= m_fullname
.AppendExt(wxT(".seeds"));
990 file
.Create(seedsPath
, true);
991 if (!file
.IsOpened()) {
992 AddLogLineN(CFormat( _("Failed to save part.met.seeds file for %s") )
998 file
.WriteUInt8(0); // v3, to avoid v2 clients choking on it.
999 file
.WriteUInt8(source_seeds
.size());
1001 CClientRefList::iterator it2
= source_seeds
.begin();
1002 for (; it2
!= source_seeds
.end(); ++it2
) {
1003 CUpDownClient
* cur_src
= it2
->GetClient();
1004 file
.WriteUInt32(cur_src
->GetUserIDHybrid());
1005 file
.WriteUInt16(cur_src
->GetUserPort());
1006 file
.WriteHash(cur_src
->GetUserHash());
1007 // CryptSettings - See SourceExchange V4
1008 const uint8 uSupportsCryptLayer
= cur_src
->SupportsCryptLayer() ? 1 : 0;
1009 const uint8 uRequestsCryptLayer
= cur_src
->RequestsCryptLayer() ? 1 : 0;
1010 const uint8 uRequiresCryptLayer
= cur_src
->RequiresCryptLayer() ? 1 : 0;
1011 const uint8 byCryptOptions
= (uRequiresCryptLayer
<< 2) | (uRequestsCryptLayer
<< 1) | (uSupportsCryptLayer
<< 0);
1012 file
.WriteUInt8(byCryptOptions
);
1015 /* v2: Added to keep track of too old seeds */
1016 file
.WriteUInt32(wxDateTime::Now().GetTicks());
1018 AddLogLineN(CFormat( wxPLURAL("Saved %i source seed for partfile: %s (%s)", "Saved %i source seeds for partfile: %s (%s)", n_sources
) )
1022 } catch (const CIOFailureException
& e
) {
1023 AddDebugLogLineC( logPartFile
, CFormat( wxT("Error saving partfile's seeds file (%s - %s): %s") )
1030 CPath::RemoveFile(seedsPath
);
1034 void CPartFile::LoadSourceSeeds()
1036 CMemFile sources_data
;
1038 bool valid_sources
= false;
1040 const CPath seedsPath
= m_fullname
.AppendExt(wxT(".seeds"));
1041 if (!seedsPath
.FileExists()) {
1045 CFile
file(seedsPath
, CFile::read
);
1046 if (!file
.IsOpened()) {
1047 // Exists but can't be opened. Should not happen. Probably permission problem, try to remove it.
1048 AddLogLineN(CFormat( _("Can't read seeds file for Partfile %s (%s)") )
1051 CPath::RemoveFile(seedsPath
);
1055 bool badSeedsFile
= false;
1057 uint8 src_count
= file
.ReadUInt8();
1059 bool bUseSX2Format
= (src_count
== 0);
1061 if (bUseSX2Format
) {
1063 src_count
= file
.ReadUInt8();
1066 sources_data
.WriteUInt16(src_count
);
1068 for (int i
= 0; i
< src_count
; ++i
) {
1069 uint32 dwID
= file
.ReadUInt32();
1070 uint16 nPort
= file
.ReadUInt16();
1072 sources_data
.WriteUInt32(bUseSX2Format
? dwID
: wxUINT32_SWAP_ALWAYS(dwID
));
1073 sources_data
.WriteUInt16(nPort
);
1074 sources_data
.WriteUInt32(0);
1075 sources_data
.WriteUInt16(0);
1077 if (bUseSX2Format
) {
1078 sources_data
.WriteHash(file
.ReadHash());
1079 sources_data
.WriteUInt8(file
.ReadUInt8());
1086 // v2: Added to keep track of too old seeds
1087 time_t time
= (time_t)file
.ReadUInt32();
1089 // Time frame is 2 hours. More than enough to compile
1090 // your new aMule version!.
1091 if ((time
+ MIN2S(120)) >= wxDateTime::Now().GetTicks()) {
1092 valid_sources
= true;
1096 // v1 has no time data. We can safely use
1097 // the sources, next time will be saved.
1098 valid_sources
= true;
1101 if (valid_sources
) {
1102 sources_data
.Seek(0);
1103 AddClientSources(&sources_data
, SF_SOURCE_SEEDS
, bUseSX2Format
? 4 : 1, bUseSX2Format
);
1106 } catch (const CSafeIOException
& e
) {
1107 AddLogLineN(CFormat( _("Error reading partfile's seeds file (%s - %s): %s") )
1111 badSeedsFile
= true;
1116 // If we got an exception reading it remove it.
1117 CPath::RemoveFile(seedsPath
);
1121 void CPartFile::PartFileHashFinished(CKnownFile
* result
)
1123 m_lastDateChanged
= result
->m_lastDateChanged
;
1124 bool errorfound
= false;
1125 if (GetED2KPartHashCount() == 0){
1126 if (IsComplete(0, GetFileSize()-1)){
1127 if (result
->GetFileHash() != GetFileHash()){
1128 // cppcheck-suppress zerodiv
1129 AddLogLineN(CFormat(wxPLURAL(
1130 "Found corrupted part (%d) in %d part file %s - FileResultHash |%s| FileHash |%s|",
1131 "Found corrupted part (%d) in %d parts file %s - FileResultHash |%s| FileHash |%s|",
1137 % result
->GetFileHash().Encode()
1138 % GetFileHash().Encode() );
1139 AddGap(0, GetFileSize()-1);
1145 for (size_t i
= 0; i
< m_hashlist
.size(); ++i
){
1146 // Kry - trel_ar's completed parts check on rehashing.
1147 // Very nice feature, if a file is completed but .part.met don't believe it,
1150 if (!( i
< result
->GetHashCount() && (result
->GetPartHash(i
) == GetPartHash(i
)))){
1151 if (IsComplete(i
)) {
1153 if ( i
< result
->GetHashCount() )
1154 wronghash
= result
->GetPartHash(i
);
1156 AddLogLineN(CFormat(wxPLURAL(
1157 "Found corrupted part (%d) in %d part file %s - FileResultHash |%s| FileHash |%s|",
1158 "Found corrupted part (%d) in %d parts file %s - FileResultHash |%s| FileHash |%s|",
1159 GetED2KPartHashCount())
1162 % GetED2KPartHashCount()
1164 % wronghash
.Encode()
1165 % GetPartHash(i
).Encode() );
1171 if (!IsComplete(i
)){
1172 AddLogLineN(CFormat( _("Found completed part (%i) in %s") )
1177 uint64 partStart
= i
* PARTSIZE
;
1178 uint64 partEnd
= partStart
+ GetPartSize(i
) - 1;
1179 RemoveBlockFromList(partStart
, partEnd
);
1186 result
->GetAICHHashset()->GetStatus() == AICH_HASHSETCOMPLETE
&&
1187 status
== PS_COMPLETING
) {
1188 delete m_pAICHHashSet
;
1189 m_pAICHHashSet
= result
->GetAICHHashset();
1190 result
->SetAICHHashset(NULL
);
1191 m_pAICHHashSet
->SetOwner(this);
1193 else if (status
== PS_COMPLETING
) {
1194 AddDebugLogLineN(logPartFile
,
1195 CFormat(wxT("Failed to store new AICH Hashset for completed file: %s"))
1202 if (status
== PS_COMPLETING
){
1207 AddLogLineN(CFormat( _("Finished rehashing %s") ) % GetFileName());
1211 SetStatus(PS_READY
);
1215 SetStatus(PS_READY
);
1217 theApp
->sharedfiles
->SafeAddKFile(this);
1220 void CPartFile::AddGap(uint64 start
, uint64 end
)
1222 m_gaplist
.AddGap(start
, end
);
1223 UpdateDisplayedInfo();
1226 void CPartFile::AddGap(uint16 part
)
1228 m_gaplist
.AddGap(part
);
1229 UpdateDisplayedInfo();
1232 bool CPartFile::IsAlreadyRequested(uint64 start
, uint64 end
)
1234 std::list
<Requested_Block_Struct
*>::iterator it
= m_requestedblocks_list
.begin();
1235 for (; it
!= m_requestedblocks_list
.end(); ++it
) {
1236 Requested_Block_Struct
* cur_block
= *it
;
1238 if ((start
<= cur_block
->EndOffset
) && (end
>= cur_block
->StartOffset
)) {
1245 bool CPartFile::GetNextEmptyBlockInPart(uint16 partNumber
, Requested_Block_Struct
*result
)
1247 // Find start of this part
1248 uint64 partStart
= (PARTSIZE
* partNumber
);
1249 uint64 start
= partStart
;
1251 // What is the end limit of this block, i.e. can't go outside part (or filesize)
1252 uint64 partEnd
= partStart
+ GetPartSize(partNumber
) - 1;
1253 // Loop until find a suitable gap and return true, or no more gaps and return false
1254 CGapList::const_iterator it
= m_gaplist
.begin();
1257 uint64 gapStart
, end
;
1259 // Find the first gap from the start position
1260 for (; it
!= m_gaplist
.end(); ++it
) {
1261 gapStart
= it
.start();
1264 // Want gaps that overlap start<->partEnd
1265 if (gapStart
<= partEnd
&& end
>= start
) {
1268 } else if (gapStart
> partEnd
) {
1273 // If no gaps after start, exit
1277 // Update start position if gap starts after current pos
1278 if (start
< gapStart
) {
1281 // Find end, keeping within the max block size and the part limit
1282 uint64 blockLimit
= partStart
+ (BLOCKSIZE
* (((start
- partStart
) / BLOCKSIZE
) + 1)) - 1;
1283 if (end
> blockLimit
) {
1286 if (end
> partEnd
) {
1289 // If this gap has not already been requested, we have found a valid entry
1290 if (!IsAlreadyRequested(start
, end
)) {
1291 // Was this block to be returned
1292 if (result
!= NULL
) {
1293 result
->StartOffset
= start
;
1294 result
->EndOffset
= end
;
1295 md4cpy(result
->FileID
, GetFileHash().GetHash());
1296 result
->transferred
= 0;
1300 // Reposition to end of that gap
1303 // If tried all gaps then break out of the loop
1304 if (end
== partEnd
) {
1308 // No suitable gap found
1313 void CPartFile::FillGap(uint64 start
, uint64 end
)
1315 m_gaplist
.FillGap(start
, end
);
1316 UpdateCompletedInfos();
1317 UpdateDisplayedInfo();
1320 void CPartFile::FillGap(uint16 part
)
1322 m_gaplist
.FillGap(part
);
1323 UpdateCompletedInfos();
1324 UpdateDisplayedInfo();
1328 void CPartFile::UpdateCompletedInfos()
1330 uint64 allgaps
= m_gaplist
.GetGapSize();
1332 percentcompleted
= (1.0 - (double)allgaps
/GetFileSize()) * 100.0;
1333 completedsize
= GetFileSize() - allgaps
;
1337 void CPartFile::WritePartStatus(CMemFile
* file
)
1339 uint16 parts
= GetED2KPartCount();
1340 file
->WriteUInt16(parts
);
1342 while (done
!= parts
){
1344 for (uint32 i
= 0;i
!= 8;++i
) {
1345 if (IsComplete(done
)) {
1349 if (done
== parts
) {
1353 file
->WriteUInt8(towrite
);
1357 void CPartFile::WriteCompleteSourcesCount(CMemFile
* file
)
1359 file
->WriteUInt16(m_nCompleteSourcesCount
);
1362 uint32
CPartFile::Process(uint32 reducedownload
/*in percent*/,uint8 m_icounter
)
1365 uint32 dwCurTick
= ::GetTickCount();
1367 // If buffer size exceeds limit, or if not written within time limit, flush data
1368 if ( (m_nTotalBufferData
> thePrefs::GetFileBufferSize()) ||
1369 (dwCurTick
> (m_nLastBufferFlushTime
+ BUFFER_TIME_LIMIT
))) {
1374 // check if we want new sources from server --> MOVED for 16.40 version
1375 old_trans
=transferingsrc
;
1379 if (m_icounter
< 10) {
1380 // Update only downloading sources.
1381 CClientRefList::iterator it
= m_downloadingSourcesList
.begin();
1382 for( ; it
!= m_downloadingSourcesList
.end(); ) {
1383 CUpDownClient
*cur_src
= it
++->GetClient();
1384 if(cur_src
->GetDownloadState() == DS_DOWNLOADING
) {
1386 kBpsDown
+= cur_src
->SetDownloadLimit(reducedownload
);
1390 // Update all sources (including downloading sources)
1391 for ( SourceSet::iterator it
= m_SrcList
.begin(); it
!= m_SrcList
.end(); ) {
1392 CUpDownClient
* cur_src
= it
++->GetClient();
1393 switch (cur_src
->GetDownloadState()) {
1394 case DS_DOWNLOADING
: {
1396 kBpsDown
+= cur_src
->SetDownloadLimit(reducedownload
);
1405 case DS_LOWTOLOWIP
: {
1406 if (cur_src
->HasLowID() && !theApp
->CanDoCallback(cur_src
->GetServerIP(), cur_src
->GetServerPort())) {
1407 // If we are almost maxed on sources,
1408 // slowly remove these client to see
1409 // if we can find a better source.
1410 if (((dwCurTick
- lastpurgetime
) > 30000) &&
1411 (GetSourceCount() >= (thePrefs::GetMaxSourcePerFile()*.8))) {
1412 RemoveSource(cur_src
);
1413 lastpurgetime
= dwCurTick
;
1417 cur_src
->SetDownloadState(DS_ONQUEUE
);
1422 case DS_NONEEDEDPARTS
: {
1423 // we try to purge noneeded source, even without reaching the limit
1424 if((dwCurTick
- lastpurgetime
) > 40000) {
1425 if(!cur_src
->SwapToAnotherFile(false , false, false , NULL
)) {
1426 //however we only delete them if reaching the limit
1427 if (GetSourceCount() >= (thePrefs::GetMaxSourcePerFile()*.8 )) {
1428 RemoveSource(cur_src
);
1429 lastpurgetime
= dwCurTick
;
1430 break; //Johnny-B - nothing more to do here (good eye!)
1433 lastpurgetime
= dwCurTick
;
1437 // doubled reasktime for no needed parts - save connections and traffic
1438 if ( !((!cur_src
->GetLastAskedTime()) ||
1439 (dwCurTick
- cur_src
->GetLastAskedTime()) > FILEREASKTIME
*2)) {
1442 // Recheck this client to see if still NNP..
1443 // Set to DS_NONE so that we force a TCP reask next time..
1444 cur_src
->SetDownloadState(DS_NONE
);
1449 if( cur_src
->IsRemoteQueueFull()) {
1450 if( ((dwCurTick
- lastpurgetime
) > 60000) &&
1451 (GetSourceCount() >= (thePrefs::GetMaxSourcePerFile()*.8 )) ) {
1452 RemoveSource( cur_src
);
1453 lastpurgetime
= dwCurTick
;
1454 break; //Johnny-B - nothing more to do here (good eye!)
1458 // Give up to 1 min for UDP to respond..
1459 // If we are within on min on TCP, do not try..
1460 if ( theApp
->IsConnected() &&
1461 ( (!cur_src
->GetLastAskedTime()) ||
1462 (dwCurTick
- cur_src
->GetLastAskedTime()) > FILEREASKTIME
-20000)) {
1463 cur_src
->UDPReaskForDownload();
1466 // No break here, since the next case takes care of asking for downloads.
1469 case DS_TOOMANYCONNS
:
1471 case DS_WAITCALLBACK
:
1472 case DS_WAITCALLBACKKAD
: {
1473 if ( theApp
->IsConnected() &&
1474 ( (!cur_src
->GetLastAskedTime()) ||
1475 (dwCurTick
- cur_src
->GetLastAskedTime()) > FILEREASKTIME
)) {
1476 if (!cur_src
->AskForDownload()) {
1477 // I left this break here just as a reminder
1478 // just in case re rearange things..
1487 /* eMule 0.30c implementation, i give it a try (Creteil) BEGIN ... */
1488 if (IsA4AFAuto() && ((!m_LastNoNeededCheck
) || (dwCurTick
- m_LastNoNeededCheck
> 900000))) {
1489 m_LastNoNeededCheck
= dwCurTick
;
1490 for ( SourceSet::iterator it
= m_A4AFsrclist
.begin(); it
!= m_A4AFsrclist
.end(); ) {
1491 CUpDownClient
*cur_source
= it
++->GetClient();
1492 uint8 download_state
=cur_source
->GetDownloadState();
1493 if( download_state
!= DS_DOWNLOADING
1494 && cur_source
->GetRequestFile()
1495 && ((!cur_source
->GetRequestFile()->IsA4AFAuto()) || download_state
== DS_NONEEDEDPARTS
))
1497 cur_source
->SwapToAnotherFile(false, false, false, this);
1501 /* eMule 0.30c implementation, i give it a try (Creteil) END ... */
1503 // swap No needed partfiles if possible
1505 if (((old_trans
==0) && (transferingsrc
>0)) || ((old_trans
>0) && (transferingsrc
==0))) {
1509 // Kad source search
1510 if( GetMaxSourcePerFileUDP() > GetSourceCount()){
1511 //Once we can handle lowID users in Kad, we remove the second IsConnected
1512 if (theApp
->downloadqueue
->DoKademliaFileRequest() && (Kademlia::CKademlia::GetTotalFile() < KADEMLIATOTALFILE
) && (dwCurTick
> m_LastSearchTimeKad
) && Kademlia::CKademlia::IsConnected() && theApp
->IsConnected() && !IsStopped()){
1514 theApp
->downloadqueue
->SetLastKademliaFileRequest();
1516 if (GetKadFileSearchID()) {
1517 /* This will never happen anyway. We're talking a
1518 1h timespan and searches are at max 45secs */
1519 Kademlia::CSearchManager::StopSearch(GetKadFileSearchID(), false);
1522 Kademlia::CUInt128
kadFileID(GetFileHash().GetHash());
1523 Kademlia::CSearch
* pSearch
= Kademlia::CSearchManager::PrepareLookup(Kademlia::CSearch::FILE, true, kadFileID
);
1524 AddDebugLogLineN(logKadSearch
, CFormat(wxT("Preparing a Kad Search for '%s'")) % GetFileName());
1526 AddDebugLogLineN(logKadSearch
, CFormat(wxT("Kad lookup started for '%s'")) % GetFileName());
1527 if(m_TotalSearchesKad
< 7) {
1528 m_TotalSearchesKad
++;
1530 m_LastSearchTimeKad
= dwCurTick
+ (KADEMLIAREASKTIME
*m_TotalSearchesKad
);
1531 SetKadFileSearchID(pSearch
->GetSearchID());
1535 if(GetKadFileSearchID()) {
1536 Kademlia::CSearchManager::StopSearch(GetKadFileSearchID(), true);
1540 // check if we want new sources from server
1541 if ( !m_localSrcReqQueued
&&
1542 ( (!m_lastsearchtime
) ||
1543 (dwCurTick
- m_lastsearchtime
) > SERVERREASKTIME
) &&
1544 theApp
->IsConnectedED2K() &&
1545 thePrefs::GetMaxSourcePerFileSoft() > GetSourceCount() &&
1547 m_localSrcReqQueued
= true;
1548 theApp
->downloadqueue
->SendLocalSrcRequest(this);
1551 // calculate datarate, set limit etc.
1556 // Kry - does the 3 / 30 difference produce too much flickering or CPU?
1557 if (m_count
>= 30) {
1559 UpdateAutoDownPriority();
1560 UpdateDisplayedInfo();
1561 if(m_bPercentUpdated
== false) {
1562 UpdateCompletedInfos();
1564 m_bPercentUpdated
= false;
1567 // release file handle if unused for some time
1568 m_hpartfile
.Release();
1570 return (uint32
)(kBpsDown
*1024.0);
1573 bool CPartFile::CanAddSource(uint32 userid
, uint16 port
, uint32 serverip
, uint16 serverport
, uint8
* pdebug_lowiddropped
, bool ed2kID
)
1576 //The incoming ID could have the userid in the Hybrid format..
1577 uint32 hybridID
= 0;
1579 if (IsLowID(userid
)) {
1582 hybridID
= wxUINT32_SWAP_ALWAYS(userid
);
1586 if (!IsLowID(userid
)) {
1587 userid
= wxUINT32_SWAP_ALWAYS(userid
);
1591 // MOD Note: Do not change this part - Merkur
1592 if (theApp
->IsConnectedED2K()) {
1593 if(::IsLowID(theApp
->GetED2KID())) {
1594 if(theApp
->GetED2KID() == userid
&& theApp
->serverconnect
->GetCurrentServer()->GetIP() == serverip
&& theApp
->serverconnect
->GetCurrentServer()->GetPort() == serverport
) {
1597 if(theApp
->GetPublicIP() == userid
) {
1601 if(theApp
->GetED2KID() == userid
&& thePrefs::GetPort() == port
) {
1607 if (Kademlia::CKademlia::IsConnected()) {
1608 if(!Kademlia::CKademlia::IsFirewalled()) {
1609 if(Kademlia::CKademlia::GetIPAddress() == hybridID
&& thePrefs::GetPort() == port
) {
1615 //This allows *.*.*.0 clients to not be removed if Ed2kID == false
1616 if ( IsLowID(hybridID
) && theApp
->IsFirewalled()) {
1617 if (pdebug_lowiddropped
) {
1618 (*pdebug_lowiddropped
)++;
1626 void CPartFile::AddSources(CMemFile
& sources
,uint32 serverip
, uint16 serverport
, unsigned origin
, bool bWithObfuscationAndHash
)
1628 uint8 count
= sources
.ReadUInt8();
1629 uint8 debug_lowiddropped
= 0;
1630 uint8 debug_possiblesources
= 0;
1631 CMD4Hash achUserHash
;
1634 // since we may received multiple search source UDP results we have to "consume" all data of that packet
1635 AddDebugLogLineN(logPartFile
, wxT("Trying to add sources for a stopped file"));
1636 sources
.Seek(count
*(4+2), wxFromCurrent
);
1640 for (int i
= 0;i
!= count
;++i
) {
1641 uint32 userid
= sources
.ReadUInt32();
1642 uint16 port
= sources
.ReadUInt16();
1644 uint8 byCryptOptions
= 0;
1645 if (bWithObfuscationAndHash
){
1646 byCryptOptions
= sources
.ReadUInt8();
1647 if ((byCryptOptions
& 0x80) > 0) {
1648 achUserHash
= sources
.ReadHash();
1651 if ((thePrefs::IsClientCryptLayerRequested() && (byCryptOptions
& 0x01/*supported*/) > 0 && (byCryptOptions
& 0x80) == 0)
1652 || (thePrefs::IsClientCryptLayerSupported() && (byCryptOptions
& 0x02/*requested*/) > 0 && (byCryptOptions
& 0x80) == 0)) {
1653 AddDebugLogLineN(logPartFile
, CFormat(wxT("Server didn't provide UserHash for source %u, even if it was expected to (or local obfuscationsettings changed during serverconnect")) % userid
);
1654 } else if (!thePrefs::IsClientCryptLayerRequested() && (byCryptOptions
& 0x02/*requested*/) == 0 && (byCryptOptions
& 0x80) != 0) {
1655 AddDebugLogLineN(logPartFile
, CFormat(wxT("Server provided UserHash for source %u, even if it wasn't expected to (or local obfuscationsettings changed during serverconnect")) % userid
);
1660 // "Filter LAN IPs" and "IPfilter" the received sources IP addresses
1661 if (!IsLowID(userid
)) {
1662 // check for 0-IP, localhost and optionally for LAN addresses
1663 if ( !IsGoodIP(userid
, thePrefs::FilterLanIPs()) ) {
1666 if (theApp
->ipfilter
->IsFiltered(userid
)) {
1671 if (!CanAddSource(userid
, port
, serverip
, serverport
, &debug_lowiddropped
)) {
1675 if(thePrefs::GetMaxSourcePerFile() > GetSourceCount()) {
1676 ++debug_possiblesources
;
1677 CUpDownClient
* newsource
= new CUpDownClient(port
,userid
,serverip
,serverport
,this, true, true);
1679 newsource
->SetSourceFrom((ESourceFrom
)origin
);
1680 newsource
->SetConnectOptions(byCryptOptions
, true, false);
1682 if ((byCryptOptions
& 0x80) != 0) {
1683 newsource
->SetUserHash(achUserHash
);
1686 theApp
->downloadqueue
->CheckAndAddSource(this,newsource
);
1688 AddDebugLogLineN(logPartFile
, wxT("Consuming a packet because of max sources reached"));
1689 // Since we may receive multiple search source UDP results we have to "consume" all data of that packet
1690 // This '+1' is added because 'i' counts from 0.
1691 sources
.Seek((count
-(i
+1))*(4+2), wxFromCurrent
);
1692 if (GetKadFileSearchID()) {
1693 Kademlia::CSearchManager::StopSearch(GetKadFileSearchID(), false);
1700 void CPartFile::UpdatePartsInfo()
1702 if( !IsPartFile() ) {
1703 CKnownFile::UpdatePartsInfo();
1708 uint16 partcount
= GetPartCount();
1709 bool flag
= (time(NULL
) - m_nCompleteSourcesTime
> 0);
1711 // Ensure the frequency-list is ready
1712 if ( m_SrcpartFrequency
.size() != GetPartCount() ) {
1713 m_SrcpartFrequency
.clear();
1714 m_SrcpartFrequency
.insert(m_SrcpartFrequency
.begin(), GetPartCount(), 0);
1717 // Find number of available parts
1718 uint16 availablecounter
= 0;
1719 for ( uint16 i
= 0; i
< partcount
; ++i
) {
1720 if ( m_SrcpartFrequency
[i
] )
1724 if ( ( availablecounter
== partcount
) && ( m_availablePartsCount
< partcount
) ) {
1725 lastseencomplete
= time(NULL
);
1728 m_availablePartsCount
= availablecounter
;
1731 ArrayOfUInts16 count
;
1733 count
.reserve(GetSourceCount());
1735 for ( SourceSet::iterator it
= m_SrcList
.begin(); it
!= m_SrcList
.end(); ++it
) {
1736 CUpDownClient
* client
= it
->GetClient();
1737 if ( !client
->GetUpPartStatus().empty() && client
->GetUpPartCount() == partcount
) {
1738 count
.push_back(client
->GetUpCompleteSourcesCount());
1742 m_nCompleteSourcesCount
= m_nCompleteSourcesCountLo
= m_nCompleteSourcesCountHi
= 0;
1744 for (uint16 i
= 0; i
< partcount
; ++i
) {
1746 m_nCompleteSourcesCount
= m_SrcpartFrequency
[i
];
1748 else if( m_nCompleteSourcesCount
> m_SrcpartFrequency
[i
]) {
1749 m_nCompleteSourcesCount
= m_SrcpartFrequency
[i
];
1752 count
.push_back(m_nCompleteSourcesCount
);
1754 int32 n
= count
.size();
1756 std::sort(count
.begin(), count
.end(), std::less
<uint16
>());
1759 int32 i
= n
>> 1; // (n / 2)
1760 int32 j
= (n
* 3) >> 2; // (n * 3) / 4
1761 int32 k
= (n
* 7) >> 3; // (n * 7) / 8
1763 //When still a part file, adjust your guesses by 20% to what you see..
1767 //Not many sources, so just use what you see..
1768 // welcome to 'plain stupid code'
1769 // m_nCompleteSourcesCount;
1770 m_nCompleteSourcesCountLo
= m_nCompleteSourcesCount
;
1771 m_nCompleteSourcesCountHi
= m_nCompleteSourcesCount
;
1772 } else if (n
< 20) {
1773 // For low guess and normal guess count
1774 // If we see more sources then the guessed low and normal, use what we see.
1775 // If we see less sources then the guessed low, adjust network accounts for 80%,
1776 // we account for 20% with what we see and make sure we are still above the normal.
1778 // Adjust 80% network and 20% what we see.
1779 if ( count
[i
] < m_nCompleteSourcesCount
) {
1780 m_nCompleteSourcesCountLo
= m_nCompleteSourcesCount
;
1782 m_nCompleteSourcesCountLo
=
1783 (uint16
)((float)(count
[i
]*.8) +
1784 (float)(m_nCompleteSourcesCount
*.2));
1786 m_nCompleteSourcesCount
= m_nCompleteSourcesCountLo
;
1787 m_nCompleteSourcesCountHi
=
1788 (uint16
)((float)(count
[j
]*.8) +
1789 (float)(m_nCompleteSourcesCount
*.2));
1790 if( m_nCompleteSourcesCountHi
< m_nCompleteSourcesCount
) {
1791 m_nCompleteSourcesCountHi
= m_nCompleteSourcesCount
;
1799 // Adjust network accounts for 80%, we account for 20% with what
1800 // we see and make sure we are still above the low.
1802 // Adjust network accounts for 80%, we account for 20% with what
1803 // we see and make sure we are still above the normal.
1805 m_nCompleteSourcesCountLo
= m_nCompleteSourcesCount
;
1806 m_nCompleteSourcesCount
= (uint16
)((float)(count
[j
]*.8)+(float)(m_nCompleteSourcesCount
*.2));
1807 if( m_nCompleteSourcesCount
< m_nCompleteSourcesCountLo
) {
1808 m_nCompleteSourcesCount
= m_nCompleteSourcesCountLo
;
1810 m_nCompleteSourcesCountHi
= (uint16
)((float)(count
[k
]*.8)+(float)(m_nCompleteSourcesCount
*.2));
1811 if( m_nCompleteSourcesCountHi
< m_nCompleteSourcesCount
) {
1812 m_nCompleteSourcesCountHi
= m_nCompleteSourcesCount
;
1816 m_nCompleteSourcesTime
= time(NULL
) + (60);
1818 UpdateDisplayedInfo();
1821 // [Maella -Enhanced Chunk Selection- (based on jicxicmic)]
1822 bool CPartFile::GetNextRequestedBlock(CUpDownClient
* sender
,
1823 std::vector
<Requested_Block_Struct
*>& toadd
, uint16
& count
)
1826 // The purpose of this function is to return a list of blocks (~180KB) to
1827 // download. To avoid a prematurely stop of the downloading, all blocks that
1828 // are requested from the same source must be located within the same
1829 // chunk (=> part ~9MB).
1831 // The selection of the chunk to download is one of the CRITICAL parts of the
1832 // edonkey network. The selection algorithm must insure the best spreading
1835 // The selection is based on 4 criteria:
1836 // 1. Frequency of the chunk (availability), very rare chunks must be downloaded
1837 // as quickly as possible to become a new available source.
1838 // 2. Parts used for preview (first + last chunk), preview or check a
1839 // file (e.g. movie, mp3)
1840 // 3. Request state (downloading in process), try to ask each source for another
1841 // chunk. Spread the requests between all sources.
1842 // 4. Completion (shortest-to-complete), partially retrieved chunks should be
1843 // completed before starting to download other one.
1845 // The frequency criterion defines three zones: very rare (<10%), rare (<50%)
1846 // and common (>30%). Inside each zone, the criteria have a specific weight, used
1847 // to calculate the priority of chunks. The chunk(s) with the highest
1848 // priority (highest=0, lowest=0xffff) is/are selected first.
1850 // very rare (preview) rare common
1851 // 0% <---- +0 pt ----> 10% <----- +10000 pt -----> 50% <---- +20000 pt ----> 100%
1852 // 1. <------- frequency: +25*frequency pt ----------->
1853 // 2. <- preview: +1 pt --><-------------- preview: set to 10000 pt ------------->
1854 // 3. <------ request: download in progress +20000 pt ------>
1855 // 4a. <- completion: 0% +100, 25% +75 .. 100% +0 pt --><-- !req => completion --->
1856 // 4b. <--- req => !completion -->
1858 // Unrolled, the priority scale is:
1860 // 0..xxxx unrequested and requested very rare chunks
1861 // 10000..1xxxx unrequested rare chunks + unrequested preview chunks
1862 // 20000..2xxxx unrequested common chunks (priority to the most complete)
1863 // 30000..3xxxx requested rare chunks + requested preview chunks
1864 // 40000..4xxxx requested common chunks (priority to the least complete)
1866 // This algorithm usually selects first the rarest chunk(s). However, partially
1867 // complete chunk(s) that is/are close to completion may overtake the priority
1868 // (priority inversion).
1869 // For the common chuncks, the algorithm tries to spread the dowload between
1873 // Check input parameters
1874 if ( sender
->GetPartStatus().empty() ) {
1877 // Define and create the list of the chunks to download
1878 const uint16 partCount
= GetPartCount();
1879 ChunkList chunksList
;
1882 uint16 newBlockCount
= 0;
1883 while(newBlockCount
!= count
) {
1884 // Create a request block stucture if a chunk has been previously selected
1885 if(sender
->GetLastPartAsked() != 0xffff) {
1886 Requested_Block_Struct
* pBlock
= new Requested_Block_Struct
;
1887 if(GetNextEmptyBlockInPart(sender
->GetLastPartAsked(), pBlock
) == true) {
1888 // Keep a track of all pending requested blocks
1889 m_requestedblocks_list
.push_back(pBlock
);
1890 // Update list of blocks to return
1891 toadd
.push_back(pBlock
);
1893 // Skip end of loop (=> CPU load)
1896 // All blocks for this chunk have been already requested
1898 // => Try to select another chunk
1899 sender
->SetLastPartAsked(0xffff);
1903 // Check if a new chunk must be selected (e.g. download starting, previous chunk complete)
1904 if(sender
->GetLastPartAsked() == 0xffff) {
1905 // Quantify all chunks (create list of chunks to download)
1906 // This is done only one time and only if it is necessary (=> CPU load)
1907 if(chunksList
.empty()) {
1908 // Indentify the locally missing part(s) that this source has
1909 for(uint16 i
=0; i
< partCount
; ++i
) {
1910 if(sender
->IsPartAvailable(i
) == true && GetNextEmptyBlockInPart(i
, NULL
) == true) {
1911 // Create a new entry for this chunk and add it to the list
1914 newEntry
.frequency
= m_SrcpartFrequency
[i
];
1915 chunksList
.push_back(newEntry
);
1919 // Check if any bloks(s) could be downloaded
1920 if(chunksList
.empty()) {
1921 break; // Exit main loop while()
1924 // Define the bounds of the three zones (very rare, rare)
1925 // more depending on available sources
1927 if (GetSourceCount()>800) {
1929 } else if (GetSourceCount()>200) {
1932 uint16 limit
= modif
*GetSourceCount()/ 100;
1936 const uint16 veryRareBound
= limit
;
1937 const uint16 rareBound
= 2*limit
;
1939 // Cache Preview state (Criterion 2)
1940 FileType type
= GetFiletype(GetFileName());
1941 const bool isPreviewEnable
=
1942 thePrefs::GetPreviewPrio() &&
1943 (type
== ftArchive
|| type
== ftVideo
);
1945 // Collect and calculate criteria for all chunks
1946 for (ChunkList::iterator it
= chunksList
.begin(); it
!= chunksList
.end(); ++it
) {
1947 Chunk
& cur_chunk
= *it
;
1950 const uint64 uStart
= cur_chunk
.part
* PARTSIZE
;
1951 const uint64 uEnd
= uStart
+ GetPartSize(cur_chunk
.part
) - 1;
1952 // Criterion 2. Parts used for preview
1953 // Remark: - We need to download the first part and the last part(s).
1954 // - When the last part is very small, it's necessary to
1955 // download the two last parts.
1956 bool critPreview
= false;
1957 if(isPreviewEnable
== true) {
1958 if(cur_chunk
.part
== 0) {
1959 critPreview
= true; // First chunk
1960 } else if(cur_chunk
.part
== partCount
-1) {
1961 critPreview
= true; // Last chunk
1962 } else if(cur_chunk
.part
== partCount
-2) {
1963 // Last chunk - 1 (only if last chunk is too small)
1964 const uint32 sizeOfLastChunk
= GetFileSize() - uEnd
;
1965 if(sizeOfLastChunk
< PARTSIZE
/3) {
1966 critPreview
= true; // Last chunk - 1
1971 // Criterion 3. Request state (downloading in process from other source(s))
1973 const bool critRequested
=
1974 cur_chunk
.frequency
> veryRareBound
&&
1975 IsAlreadyRequested(uStart
, uEnd
);
1977 // Criterion 4. Completion
1978 // PARTSIZE instead of GetPartSize() favours the last chunk - but that may be intentional
1979 uint32 partSize
= PARTSIZE
- m_gaplist
.GetGapSize(cur_chunk
.part
);
1980 const uint16 critCompletion
= (uint16
)(partSize
/(PARTSIZE
/100)); // in [%]
1982 // Calculate priority with all criteria
1983 if(cur_chunk
.frequency
<= veryRareBound
) {
1984 // 0..xxxx unrequested + requested very rare chunks
1985 cur_chunk
.rank
= (25 * cur_chunk
.frequency
) + // Criterion 1
1986 ((critPreview
== true) ? 0 : 1) + // Criterion 2
1987 (100 - critCompletion
); // Criterion 4
1988 } else if(critPreview
== true) {
1989 // 10000..10100 unrequested preview chunks
1990 // 30000..30100 requested preview chunks
1991 cur_chunk
.rank
= ((critRequested
== false) ? 10000 : 30000) + // Criterion 3
1992 (100 - critCompletion
); // Criterion 4
1993 } else if(cur_chunk
.frequency
<= rareBound
) {
1994 // 10101..1xxxx unrequested rare chunks
1995 // 30101..3xxxx requested rare chunks
1996 cur_chunk
.rank
= (25 * cur_chunk
.frequency
) + // Criterion 1
1997 ((critRequested
== false) ? 10101 : 30101) + // Criterion 3
1998 (100 - critCompletion
); // Criterion 4
2001 if(critRequested
== false) { // Criterion 3
2002 // 20000..2xxxx unrequested common chunks
2003 cur_chunk
.rank
= 20000 + // Criterion 3
2004 (100 - critCompletion
); // Criterion 4
2006 // 40000..4xxxx requested common chunks
2007 // Remark: The weight of the completion criterion is inversed
2008 // to spead the requests over the completing chunks.
2009 // Without this, the chunk closest to completion will
2010 // received every new sources.
2011 cur_chunk
.rank
= 40000 + // Criterion 3
2012 (critCompletion
); // Criterion 4
2018 // Select the next chunk to download
2019 if(!chunksList
.empty()) {
2020 // Find and count the chunck(s) with the highest priority
2021 uint16 chunkCount
= 0; // Number of found chunks with same priority
2022 uint16 rank
= 0xffff; // Highest priority found
2024 // Collect and calculate criteria for all chunks
2025 for (ChunkList::iterator it
= chunksList
.begin(); it
!= chunksList
.end(); ++it
) {
2026 const Chunk
& cur_chunk
= *it
;
2027 if(cur_chunk
.rank
< rank
) {
2029 rank
= cur_chunk
.rank
;
2030 } else if(cur_chunk
.rank
== rank
) {
2035 // Use a random access to avoid that everybody tries to download the
2036 // same chunks at the same time (=> spread the selected chunk among clients)
2037 uint16 randomness
= 1 + (int) (((float)(chunkCount
-1))*rand()/(RAND_MAX
+1.0));
2039 for (ChunkList::iterator it
= chunksList
.begin(); it
!= chunksList
.end(); ++it
) {
2040 const Chunk
& cur_chunk
= *it
;
2041 if(cur_chunk
.rank
== rank
) {
2043 if(randomness
== 0) {
2044 // Selection process is over
2045 sender
->SetLastPartAsked(cur_chunk
.part
);
2046 // Remark: this list might be reused up to *count times
2047 chunksList
.erase(it
);
2048 break; // exit loop for()
2053 // There is no remaining chunk to download
2054 break; // Exit main loop while()
2058 // Return the number of the blocks
2059 count
= newBlockCount
;
2061 return (newBlockCount
> 0);
2066 void CPartFile::RemoveBlockFromList(uint64 start
,uint64 end
)
2068 std::list
<Requested_Block_Struct
*>::iterator it
= m_requestedblocks_list
.begin();
2069 while (it
!= m_requestedblocks_list
.end()) {
2070 std::list
<Requested_Block_Struct
*>::iterator it2
= it
++;
2072 if ((*it2
)->StartOffset
<= start
&& (*it2
)->EndOffset
>= end
) {
2073 m_requestedblocks_list
.erase(it2
);
2079 void CPartFile::RemoveAllRequestedBlocks(void)
2081 m_requestedblocks_list
.clear();
2085 void CPartFile::CompleteFile(bool bIsHashingDone
)
2087 if (GetKadFileSearchID()) {
2088 Kademlia::CSearchManager::StopSearch(GetKadFileSearchID(), false);
2091 theApp
->downloadqueue
->RemoveLocalServerRequest(this);
2093 AddDebugLogLineN( logPartFile
, wxString( wxT("CPartFile::CompleteFile: Hash ") ) + ( bIsHashingDone
? wxT("done") : wxT("not done") ) );
2095 if (!bIsHashingDone
) {
2096 SetStatus(PS_COMPLETING
);
2099 CPath partFile
= m_partmetfilename
.RemoveExt();
2100 CThreadScheduler::AddTask(new CHashingTask(GetFilePath(), partFile
, this));
2104 m_is_A4AF_auto
=false;
2105 SetStatus(PS_COMPLETING
);
2106 // guess I was wrong about not need to spaw a thread ...
2107 // It is if the temp and incoming dirs are on different
2108 // partitions/drives and the file is large...[oz]
2111 PerformFileComplete();
2115 if (thePrefs::ShowCatTabInfos()) {
2116 Notify_ShowUpdateCatTabTitles();
2118 UpdateDisplayedInfo(true);
2122 void CPartFile::CompleteFileEnded(bool errorOccured
, const CPath
& newname
)
2126 SetStatus(PS_ERROR
);
2127 AddLogLineC(CFormat( _("Unexpected error while completing %s. File paused") )% GetFileName() );
2129 m_fullname
= newname
;
2131 SetFilePath(m_fullname
.GetPath());
2132 SetFileName(m_fullname
.GetFullName());
2133 m_lastDateChanged
= CPath::GetModificationTime(m_fullname
);
2135 SetStatus(PS_COMPLETE
);
2140 // Remove from list of canceled files in case it was canceled once upon a time
2141 if (theApp
->canceledfiles
->Remove(GetFileHash())) {
2142 theApp
->canceledfiles
->Save();
2145 // Mark as known (checks if it's already known),
2146 // also updates search files
2147 theApp
->knownfiles
->SafeAddKFile(this);
2149 // remove the file from the suspended uploads list
2150 theApp
->uploadqueue
->ResumeUpload(GetFileHash());
2151 theApp
->downloadqueue
->RemoveFile(this, true);
2152 theApp
->sharedfiles
->SafeAddKFile(this);
2153 UpdateDisplayedInfo(true);
2155 // republish that file to the ed2k-server to update the 'FT_COMPLETE_SOURCES' counter on the server.
2156 theApp
->sharedfiles
->RepublishFile(this);
2158 // Ensure that completed shows the correct value
2159 completedsize
= GetFileSize();
2161 // clear the blackbox to free up memory
2162 m_CorruptionBlackBox
->Free();
2164 AddLogLineC(CFormat( _("Finished downloading: %s") ) % GetFileName() );
2167 theApp
->downloadqueue
->StartNextFile(this);
2171 void CPartFile::PerformFileComplete()
2173 // add this file to the suspended uploads list
2174 theApp
->uploadqueue
->SuspendUpload(GetFileHash(), false);
2177 // close permanent handle
2178 if (m_hpartfile
.IsOpened()) {
2179 m_hpartfile
.Close();
2182 // Schedule task for completion of the file
2183 CThreadScheduler::AddTask(new CCompletionTask(this));
2187 void CPartFile::RemoveAllSources(bool bTryToSwap
)
2189 for( SourceSet::iterator it
= m_SrcList
.begin(); it
!= m_SrcList
.end();) {
2190 CUpDownClient
* cur_src
= it
++->GetClient();
2192 if (!cur_src
->SwapToAnotherFile(true, true, true, NULL
)) {
2193 RemoveSource(cur_src
,true,false);
2194 // If it was not swapped, it's not on any file anymore, and should die
2197 RemoveSource(cur_src
,true,false);
2203 /* eMule 0.30c implementation, i give it a try (Creteil) BEGIN ... */
2204 // remove all links A4AF in sources to this file
2205 if(!m_A4AFsrclist
.empty()) {
2206 for( SourceSet::iterator it
= m_A4AFsrclist
.begin(); it
!= m_A4AFsrclist
.end(); ) {
2207 CUpDownClient
* cur_src
= it
++->GetClient();
2208 if ( cur_src
->DeleteFileRequest( this ) ) {
2209 Notify_SourceCtrlRemoveSource(cur_src
->ECID(), this);
2212 m_A4AFsrclist
.clear();
2214 /* eMule 0.30c implementation, i give it a try (Creteil) END ... */
2215 UpdateFileRatingCommentAvail();
2219 void CPartFile::Delete()
2221 AddLogLineN(CFormat(_("Deleting file: %s")) % GetFileName());
2222 // Barry - Need to tell any connected clients to stop sending the file
2224 AddDebugLogLineN(logPartFile
, wxT("\tStopped"));
2229 theApp
->uploadqueue
->SuspendUpload(GetFileHash(), true);
2230 AddDebugLogLineN(logPartFile
, CFormat(wxT("\tSuspended upload to %d clients")) % removed
);
2231 theApp
->sharedfiles
->RemoveFile(this);
2232 AddDebugLogLineN(logPartFile
, wxT("\tRemoved from shared"));
2233 theApp
->downloadqueue
->RemoveFile(this);
2234 AddDebugLogLineN(logPartFile
, wxT("\tRemoved from download queue"));
2235 Notify_DownloadCtrlRemoveFile(this);
2236 AddDebugLogLineN(logPartFile
, wxT("\tRemoved from transferwnd"));
2237 if (theApp
->canceledfiles
->Add(GetFileHash())) {
2238 theApp
->canceledfiles
->Save();
2240 AddDebugLogLineN(logPartFile
, wxT("\tAdded to canceled file list"));
2241 theApp
->searchlist
->UpdateSearchFileByHash(GetFileHash()); // Update file in the search dialog if it's still open
2243 if (m_hpartfile
.IsOpened()) {
2244 m_hpartfile
.Close();
2247 AddDebugLogLineN(logPartFile
, wxT("\tClosed"));
2249 // cppcheck-suppress duplicateBranch
2250 if (!CPath::RemoveFile(m_fullname
)) {
2251 AddDebugLogLineC(logPartFile
, CFormat(wxT("\tFailed to delete '%s'")) % m_fullname
);
2253 AddDebugLogLineN(logPartFile
, wxT("\tRemoved .part.met"));
2256 // cppcheck-suppress duplicateBranch
2257 if (!CPath::RemoveFile(m_PartPath
)) {
2258 AddDebugLogLineC(logPartFile
, CFormat(wxT("Failed to delete '%s'")) % m_PartPath
);
2260 AddDebugLogLineN(logPartFile
, wxT("\tRemoved .part"));
2263 CPath BAKName
= m_fullname
.AppendExt(PARTMET_BAK_EXT
);
2264 // cppcheck-suppress duplicateBranch
2265 if (!CPath::RemoveFile(BAKName
)) {
2266 AddDebugLogLineC(logPartFile
, CFormat(wxT("Failed to delete '%s'")) % BAKName
);
2268 AddDebugLogLineN(logPartFile
, wxT("\tRemoved .bak"));
2271 CPath SEEDSName
= m_fullname
.AppendExt(wxT(".seeds"));
2272 if (SEEDSName
.FileExists()) {
2273 // cppcheck-suppress duplicateBranch
2274 if (CPath::RemoveFile(SEEDSName
)) {
2275 AddDebugLogLineN(logPartFile
, wxT("\tRemoved .seeds"));
2277 AddDebugLogLineC(logPartFile
, CFormat(wxT("Failed to delete '%s'")) % SEEDSName
);
2281 AddDebugLogLineN(logPartFile
, wxT("Done"));
2287 bool CPartFile::HashSinglePart(uint16 partnumber
)
2289 if ((GetHashCount() <= partnumber
) && (GetPartCount() > 1)) {
2290 AddLogLineC(CFormat( _("WARNING: Unable to hash downloaded part - hashset incomplete for '%s'") )
2292 m_hashsetneeded
= true;
2294 } else if ((GetHashCount() <= partnumber
) && GetPartCount() != 1) {
2295 AddLogLineC(CFormat( _("ERROR: Unable to hash downloaded part - hashset incomplete (%s). This should never happen")) % GetFileName() );
2296 m_hashsetneeded
= true;
2299 CMD4Hash hashresult
;
2300 uint64 offset
= PARTSIZE
* partnumber
;
2301 uint32 length
= GetPartSize(partnumber
);
2303 CreateHashFromFile(m_hpartfile
, offset
, length
, &hashresult
, NULL
);
2304 } catch (const CIOFailureException
& e
) {
2305 AddLogLineC(CFormat( _("EOF while hashing downloaded part %u with length %u (max %u) of partfile '%s' with length %u: %s"))
2306 % partnumber
% length
% (offset
+length
) % GetFileName() % GetFileSize() % e
.what());
2307 SetStatus(PS_ERROR
);
2309 } catch (const CEOFException
& e
) {
2310 AddLogLineC(CFormat( _("EOF while hashing downloaded part %u with length %u (max %u) of partfile '%s' with length %u: %s"))
2311 % partnumber
% length
% (offset
+length
) % GetFileName() % GetFileSize() % e
.what());
2312 SetStatus(PS_ERROR
);
2316 if (GetPartCount() > 1) {
2317 if (hashresult
!= GetPartHash(partnumber
)) {
2318 AddDebugLogLineN(logPartFile
, CFormat( wxT("%s: Expected hash of part %d: %s")) % GetFileName() % partnumber
% GetPartHash(partnumber
).Encode() );
2319 AddDebugLogLineN(logPartFile
, CFormat( wxT("%s: Actual hash of part %d: %s")) % GetFileName() % partnumber
% hashresult
.Encode() );
2325 if (hashresult
!= m_abyFileHash
) {
2335 bool CPartFile::IsCorruptedPart(uint16 partnumber
)
2337 return std::find(m_corrupted_list
.begin(), m_corrupted_list
.end(), partnumber
)
2338 != m_corrupted_list
.end();
2342 void CPartFile::SetDownPriority(uint8 np
, bool bSave
, bool bRefresh
)
2344 if ( m_iDownPriority
!= np
) {
2345 m_iDownPriority
= np
;
2347 UpdateDisplayedInfo(true);
2354 void CPartFile::StopFile(bool bCancel
)
2356 // Kry - Need to set it here to get into SetStatus(status) correctly
2359 // Barry - Need to tell any connected clients to stop sending the file
2362 m_LastSearchTimeKad
= 0;
2363 m_TotalSearchesKad
= 0;
2365 RemoveAllSources(true);
2373 UpdateDisplayedInfo(true);
2377 void CPartFile::StopPausedFile()
2380 // Once an hour, remove any sources for files which are no longer active downloads
2381 switch (GetStatus()) {
2383 case PS_INSUFFICIENT
:
2385 if (time(NULL
) - m_iLastPausePurge
> (60*60)) {
2386 m_iLastPausePurge
= time(NULL
);
2392 // release file handle if unused for some time
2393 m_hpartfile
.Release();
2397 void CPartFile::PauseFile(bool bInsufficient
)
2401 if ( status
== PS_COMPLETE
|| status
== PS_COMPLETING
) {
2405 if (GetKadFileSearchID()) {
2406 Kademlia::CSearchManager::StopSearch(GetKadFileSearchID(), true);
2407 // If we were in the middle of searching, reset timer so they can resume searching.
2408 m_LastSearchTimeKad
= 0;
2411 m_iLastPausePurge
= time(NULL
);
2413 theApp
->downloadqueue
->RemoveLocalServerRequest(this);
2415 CPacket
packet( OP_CANCELTRANSFER
, 0, OP_EDONKEYPROT
);
2416 for( SourceSet::iterator it
= m_SrcList
.begin(); it
!= m_SrcList
.end(); ) {
2417 CUpDownClient
* cur_src
= it
++->GetClient();
2418 if (cur_src
->GetDownloadState() == DS_DOWNLOADING
) {
2419 if (!cur_src
->GetSentCancelTransfer()) {
2420 theStats::AddUpOverheadOther( packet
.GetPacketSize() );
2421 AddDebugLogLineN( logLocalClient
, wxT("Local Client: OP_CANCELTRANSFER to ") + cur_src
->GetFullIP() );
2422 cur_src
->SendPacket( &packet
, false, true );
2423 cur_src
->SetSentCancelTransfer( true );
2425 cur_src
->SetDownloadState(DS_ONQUEUE
);
2426 // Allow immediate reconnect on resume
2427 cur_src
->ResetLastAskedTime();
2432 m_insufficient
= bInsufficient
;
2443 void CPartFile::ResumeFile()
2445 if ( status
== PS_COMPLETE
|| status
== PS_COMPLETING
) {
2449 if ( m_insufficient
&& !CheckFreeDiskSpace() ) {
2450 // Still not enough free discspace
2456 m_insufficient
= false;
2458 m_lastsearchtime
= 0;
2460 SetActive(theApp
->IsConnected());
2462 if (m_gaplist
.IsComplete() && (GetStatus() == PS_ERROR
)) {
2463 // The file has already been hashed at this point
2467 UpdateDisplayedInfo(true);
2471 bool CPartFile::CheckFreeDiskSpace( uint64 neededSpace
)
2473 uint64 free
= CPath::GetFreeSpaceAt(GetFilePath());
2474 if (free
== static_cast<uint64
>(wxInvalidOffset
)) {
2475 // If GetFreeSpaceAt() fails, then the path probably does not exist.
2479 // The very least acceptable diskspace is a single PART
2480 if ( free
< PARTSIZE
) {
2481 // Always fail in this case, since we risk losing data if we try to
2482 // write on a full partition.
2486 // All other checks are only made if the user has enabled them
2487 if ( thePrefs::IsCheckDiskspaceEnabled() ) {
2488 neededSpace
+= thePrefs::GetMinFreeDiskSpace();
2490 // Due to the the existance of sparse files, we cannot assume that
2491 // writes within the file doesn't cause new blocks to be allocated.
2492 // Therefore, we have to simply stop writing the moment the limit has
2494 return free
>= neededSpace
;
2501 void CPartFile::SetLastAnsweredTime()
2503 m_ClientSrcAnswered
= ::GetTickCount();
2506 void CPartFile::SetLastAnsweredTimeTimeout()
2508 m_ClientSrcAnswered
= 2 * CONNECTION_LATENCY
+ ::GetTickCount() - SOURCECLIENTREASKS
;
2511 CPacket
*CPartFile::CreateSrcInfoPacket(const CUpDownClient
* forClient
, uint8 byRequestedVersion
, uint16 nRequestedOptions
)
2514 if ( m_SrcList
.empty() ) {
2519 return CKnownFile::CreateSrcInfoPacket(forClient
, byRequestedVersion
, nRequestedOptions
);
2522 if (((forClient
->GetRequestFile() != this)
2523 && (forClient
->GetUploadFile() != this)) || forClient
->GetUploadFileID() != GetFileHash()) {
2524 wxString file1
= _("Unknown");
2525 if (forClient
->GetRequestFile() && forClient
->GetRequestFile()->GetFileName().IsOk()) {
2526 file1
= forClient
->GetRequestFile()->GetFileName().GetPrintable();
2527 } else if (forClient
->GetUploadFile() && forClient
->GetUploadFile()->GetFileName().IsOk()) {
2528 file1
= forClient
->GetUploadFile()->GetFileName().GetPrintable();
2530 wxString file2
= _("Unknown");
2531 if (GetFileName().IsOk()) {
2532 file2
= GetFileName().GetPrintable();
2534 AddDebugLogLineN(logPartFile
, wxT("File mismatch on source packet (P) Sending: ") + file1
+ wxT(" From: ") + file2
);
2538 if ( !(GetStatus() == PS_READY
|| GetStatus() == PS_EMPTY
)) {
2542 const BitVector
& reqstatus
= forClient
->GetPartStatus();
2543 bool KnowNeededParts
= !reqstatus
.empty();
2544 //wxASSERT(rcvstatus.size() == GetPartCount()); // Obviously!
2545 if (KnowNeededParts
&& (reqstatus
.size() != GetPartCount())) {
2546 // Yuck. Same file but different part count? Seriously fucked up.
2547 // This happens rather often with reqstatus.size() == 0. Don't log then.
2548 if (reqstatus
.size()) {
2549 AddDebugLogLineN(logKnownFiles
, CFormat(wxT("Impossible situation: different partcounts: %i (client) and %i (file) for %s")) % reqstatus
.size() % GetPartCount() % GetFileName());
2554 CMemFile
data(1024);
2556 uint8 byUsedVersion
;
2558 if (forClient
->SupportsSourceExchange2() && byRequestedVersion
> 0){
2559 // the client uses SourceExchange2 and requested the highest version he knows
2560 // and we send the highest version we know, but of course not higher than his request
2561 byUsedVersion
= std::min(byRequestedVersion
, (uint8
)SOURCEEXCHANGE2_VERSION
);
2562 bIsSX2Packet
= true;
2563 data
.WriteUInt8(byUsedVersion
);
2565 // we don't support any special SX2 options yet, reserved for later use
2566 if (nRequestedOptions
!= 0) {
2567 AddDebugLogLineN(logKnownFiles
, CFormat(wxT("Client requested unknown options for SourceExchange2: %u")) % nRequestedOptions
);
2570 byUsedVersion
= forClient
->GetSourceExchange1Version();
2571 bIsSX2Packet
= false;
2572 if (forClient
->SupportsSourceExchange2()) {
2573 AddDebugLogLineN(logKnownFiles
, wxT("Client which announced to support SX2 sent SX1 packet instead"));
2579 data
.WriteHash(m_abyFileHash
);
2580 data
.WriteUInt16(nCount
);
2582 for (SourceSet::iterator it
= m_SrcList
.begin(); it
!= m_SrcList
.end(); ++it
) {
2584 CUpDownClient
* cur_src
= it
->GetClient();
2586 int state
= cur_src
->GetDownloadState();
2587 int valid
= ( state
== DS_DOWNLOADING
) || ( state
== DS_ONQUEUE
&& !cur_src
->IsRemoteQueueFull() );
2589 if ( cur_src
->HasLowID() || !valid
) {
2593 // only send source which have needed parts for this client if possible
2594 const BitVector
& srcstatus
= cur_src
->GetPartStatus();
2595 if ( !srcstatus
.empty() ) {
2596 //wxASSERT(srcstatus.size() == GetPartCount()); // Obviously!
2597 if (srcstatus
.size() != GetPartCount()) {
2600 if ( KnowNeededParts
) {
2601 // only send sources which have needed parts for this client
2602 for (int x
= 0; x
< GetPartCount(); ++x
) {
2603 if (srcstatus
.get(x
) && !reqstatus
.get(x
)) {
2609 // if we don't know the need parts for this client,
2610 // return any source currently a client sends it's
2611 // file status only after it has at least one complete part
2612 if (srcstatus
.size() != GetPartCount()) {
2615 for (int x
= 0; x
< GetPartCount(); ++x
){
2616 if (srcstatus
.get(x
)) {
2626 if(forClient
->GetSourceExchange1Version() > 2) {
2627 dwID
= cur_src
->GetUserIDHybrid();
2629 dwID
= wxUINT32_SWAP_ALWAYS(cur_src
->GetUserIDHybrid());
2631 data
.WriteUInt32(dwID
);
2632 data
.WriteUInt16(cur_src
->GetUserPort());
2633 data
.WriteUInt32(cur_src
->GetServerIP());
2634 data
.WriteUInt16(cur_src
->GetServerPort());
2636 if (byUsedVersion
>= 2) {
2637 data
.WriteHash(cur_src
->GetUserHash());
2640 if (byUsedVersion
>= 4){
2641 // CryptSettings - SourceExchange V4
2643 // 1 CryptLayer Required
2644 // 1 CryptLayer Requested
2645 // 1 CryptLayer Supported
2646 const uint8 uSupportsCryptLayer
= cur_src
->SupportsCryptLayer() ? 1 : 0;
2647 const uint8 uRequestsCryptLayer
= cur_src
->RequestsCryptLayer() ? 1 : 0;
2648 const uint8 uRequiresCryptLayer
= cur_src
->RequiresCryptLayer() ? 1 : 0;
2649 const uint8 byCryptOptions
= (uRequiresCryptLayer
<< 2) | (uRequestsCryptLayer
<< 1) | (uSupportsCryptLayer
<< 0);
2650 data
.WriteUInt8(byCryptOptions
);
2661 data
.Seek(bIsSX2Packet
? 17 : 16, wxFromStart
);
2662 data
.WriteUInt16(nCount
);
2664 CPacket
* result
= new CPacket(data
, OP_EMULEPROT
, bIsSX2Packet
? OP_ANSWERSOURCES2
: OP_ANSWERSOURCES
);
2666 // 16+2+501*(4+2+4+2+16) = 14046 bytes max.
2667 if (result
->GetPacketSize() > 354) {
2668 result
->PackPacket();
2674 void CPartFile::AddClientSources(CMemFile
* sources
, unsigned nSourceFrom
, uint8 uClientSXVersion
, bool bSourceExchange2
, const CUpDownClient
* /*pClient*/)
2683 uint8 uPacketSXVersion
= 0;
2684 if (!bSourceExchange2
) {
2685 nCount
= sources
->ReadUInt16();
2687 // Check if the data size matches the 'nCount' for v1 or v2 and eventually correct the source
2688 // exchange version while reading the packet data. Otherwise we could experience a higher
2689 // chance in dealing with wrong source data, userhashs and finally duplicate sources.
2690 uint32 uDataSize
= sources
->GetLength() - sources
->GetPosition();
2692 if ((uint32
)(nCount
*(4+2+4+2)) == uDataSize
) { //Checks if version 1 packet is correct size
2693 if(uClientSXVersion
!= 1) {
2696 uPacketSXVersion
= 1;
2697 } else if ((uint32
)(nCount
*(4+2+4+2+16)) == uDataSize
) { // Checks if version 2&3 packet is correct size
2698 if (uClientSXVersion
== 2) {
2699 uPacketSXVersion
= 2;
2700 } else if (uClientSXVersion
> 2) {
2701 uPacketSXVersion
= 3;
2705 } else if (nCount
*(4+2+4+2+16+1) == uDataSize
) {
2706 if (uClientSXVersion
!= 4 ) {
2709 uPacketSXVersion
= 4;
2711 // If v5 inserts additional data (like v2), the above code will correctly filter those packets.
2712 // If v5 appends additional data after <count>(<Sources>)[count], we are in trouble with the
2713 // above code. Though a client which does not understand v5+ should never receive such a packet.
2714 AddDebugLogLineN(logClient
, CFormat(wxT("Received invalid source exchange packet (v%u) of data size %u for %s")) % uClientSXVersion
% uDataSize
% GetFileName());
2719 // We only check if the version is known by us and do a quick sanitize check on known version
2720 // other then SX1, the packet will be ignored if any error appears, sicne it can't be a "misunderstanding" anymore
2721 if (uClientSXVersion
> SOURCEEXCHANGE2_VERSION
|| uClientSXVersion
== 0 ){
2722 AddDebugLogLineN(logPartFile
, CFormat(wxT("Invalid source exchange type version: %i")) % uClientSXVersion
);
2726 // all known versions use the first 2 bytes as count and unknown version are already filtered above
2727 nCount
= sources
->ReadUInt16();
2728 uint32 uDataSize
= (uint32
)(sources
->GetLength() - sources
->GetPosition());
2729 bool bError
= false;
2730 switch (uClientSXVersion
){
2732 bError
= nCount
*(4+2+4+2) != uDataSize
;
2736 bError
= nCount
*(4+2+4+2+16) != uDataSize
;
2739 bError
= nCount
*(4+2+4+2+16+1) != uDataSize
;
2747 AddDebugLogLineN(logPartFile
, wxT("Invalid source exchange data size."));
2750 uPacketSXVersion
= uClientSXVersion
;
2753 for (uint16 i
= 0;i
!= nCount
;++i
) {
2755 uint32 dwID
= sources
->ReadUInt32();
2756 uint16 nPort
= sources
->ReadUInt16();
2757 uint32 dwServerIP
= sources
->ReadUInt32();
2758 uint16 nServerPort
= sources
->ReadUInt16();
2761 if (uPacketSXVersion
> 1) {
2762 userHash
= sources
->ReadHash();
2765 uint8 byCryptOptions
= 0;
2766 if (uPacketSXVersion
>= 4) {
2767 byCryptOptions
= sources
->ReadUInt8();
2770 //Clients send ID's the the Hyrbid format so highID clients with *.*.*.0 won't be falsely switched to a lowID..
2772 if (uPacketSXVersion
>= 3) {
2773 dwIDED2K
= wxUINT32_SWAP_ALWAYS(dwID
);
2778 // check the HighID(IP) - "Filter LAN IPs" and "IPfilter" the received sources IP addresses
2779 if (!IsLowID(dwID
)) {
2780 if (!IsGoodIP(dwIDED2K
, thePrefs::FilterLanIPs())) {
2781 // check for 0-IP, localhost and optionally for LAN addresses
2782 AddDebugLogLineN(logIPFilter
, CFormat(wxT("Ignored source (IP=%s) received via %s - bad IP")) % Uint32toStringIP(dwIDED2K
) % OriginToText(nSourceFrom
));
2785 if (theApp
->ipfilter
->IsFiltered(dwIDED2K
)) {
2786 AddDebugLogLineN(logIPFilter
, CFormat(wxT("Ignored source (IP=%s) received via %s - IPFilter")) % Uint32toStringIP(dwIDED2K
) % OriginToText(nSourceFrom
));
2789 if (theApp
->clientlist
->IsBannedClient(dwIDED2K
)){
2794 // additionally check for LowID and own IP
2795 if (!CanAddSource(dwID
, nPort
, dwServerIP
, nServerPort
, NULL
, false)) {
2796 AddDebugLogLineN(logIPFilter
, CFormat(wxT("Ignored source (IP=%s) received via source exchange")) % Uint32toStringIP(dwIDED2K
));
2800 if(thePrefs::GetMaxSourcePerFile() > GetSourceCount()) {
2801 CUpDownClient
* newsource
= new CUpDownClient(nPort
,dwID
,dwServerIP
,nServerPort
,this, (uPacketSXVersion
< 3), true);
2802 if (uPacketSXVersion
> 1) {
2803 newsource
->SetUserHash(userHash
);
2806 if (uPacketSXVersion
>= 4) {
2807 newsource
->SetConnectOptions(byCryptOptions
, true, false);
2810 newsource
->SetSourceFrom((ESourceFrom
)nSourceFrom
);
2811 theApp
->downloadqueue
->CheckAndAddSource(this,newsource
);
2819 void CPartFile::UpdateAutoDownPriority()
2821 if (!IsAutoDownPriority()) {
2824 if (GetSourceCount() <= theApp
->downloadqueue
->GetRareFileThreshold()) {
2825 if ( GetDownPriority() != PR_HIGH
)
2826 SetDownPriority(PR_HIGH
, false, false);
2827 } else if (GetSourceCount() < theApp
->downloadqueue
->GetCommonFileThreshold()) {
2828 if ( GetDownPriority() != PR_NORMAL
)
2829 SetDownPriority(PR_NORMAL
, false, false);
2831 if ( GetDownPriority() != PR_LOW
)
2832 SetDownPriority(PR_LOW
, false, false);
2836 // making this function return a higher when more sources have the extended
2837 // protocol will force you to ask a larger variety of people for sources
2839 int CPartFile::GetCommonFilePenalty()
2841 //TODO: implement, but never return less than MINCOMMONPENALTY!
2842 return MINCOMMONPENALTY
;
2845 /* Barry - Replaces BlockReceived()
2847 Originally this only wrote to disk when a full 180k block
2848 had been received from a client, and only asked for data in
2851 This meant that on average 90k was lost for every connection
2852 to a client data source. That is a lot of wasted data.
2854 To reduce the lost data, packets are now written to a buffer
2855 and flushed to disk regularly regardless of size downloaded.
2856 This includes compressed packets.
2858 Data is also requested only where gaps are, not in 180k blocks.
2859 The requests will still not exceed 180k, but may be smaller to
2863 // Kry - transize is 32bits, no packet can be more than that (this is
2864 // compressed size). Even 32bits is too much imho.As for the return size,
2865 // look at the lenData below.
2866 uint32
CPartFile::WriteToBuffer(uint32 transize
, byte
* data
, uint64 start
, uint64 end
, Requested_Block_Struct
*block
, const CUpDownClient
* client
)
2868 // Increment transferred bytes counter for this file
2869 transferred
+= transize
;
2871 // This is needed a few times
2872 // Kry - should not need a uint64 here - no block is larger than
2873 // 2GB even after uncompressed.
2874 uint32 lenData
= (uint32
) (end
- start
+ 1);
2876 if(lenData
> transize
) {
2877 m_iGainDueToCompression
+= lenData
-transize
;
2880 // Occasionally packets are duplicated, no point writing it twice
2881 if (IsComplete(start
, end
)) {
2882 AddDebugLogLineN(logPartFile
,
2883 CFormat(wxT("File '%s' has already been written from %u to %u"))
2884 % GetFileName() % start
% end
);
2888 // security sanitize check to make sure we do not write anything into an already hashed complete chunk
2889 const uint64 nStartChunk
= start
/ PARTSIZE
;
2890 const uint64 nEndChunk
= end
/ PARTSIZE
;
2891 if (IsComplete(nStartChunk
)) {
2892 AddDebugLogLineN(logPartFile
, CFormat(wxT("Received data touches already hashed chunk - ignored (start): %u-%u; File=%s")) % start
% end
% GetFileName());
2894 } else if (nStartChunk
!= nEndChunk
) {
2895 if (IsComplete(nEndChunk
)) {
2896 AddDebugLogLineN(logPartFile
, CFormat(wxT("Received data touches already hashed chunk - ignored (end): %u-%u; File=%s")) % start
% end
% GetFileName());
2899 AddDebugLogLineN(logPartFile
, CFormat(wxT("Received data crosses chunk boundaries: %u-%u; File=%s")) % start
% end
% GetFileName());
2903 // log transferinformation in our "blackbox"
2904 m_CorruptionBlackBox
->TransferredData(start
, end
, client
->GetIP());
2906 // Create a new buffered queue entry
2907 PartFileBufferedData
*item
= new PartFileBufferedData(m_hpartfile
, data
, start
, end
, block
);
2909 // Add to the queue in the correct position (most likely the end)
2912 std::list
<PartFileBufferedData
*>::iterator it
= m_BufferedData_list
.begin();
2913 for (; it
!= m_BufferedData_list
.end(); ++it
) {
2914 PartFileBufferedData
* queueItem
= *it
;
2916 if (item
->end
<= queueItem
->end
) {
2917 if (it
!= m_BufferedData_list
.begin()) {
2920 m_BufferedData_list
.insert(--it
, item
);
2928 m_BufferedData_list
.push_front(item
);
2931 // Increment buffer size marker
2932 m_nTotalBufferData
+= lenData
;
2934 // Mark this small section of the file as filled
2935 FillGap(item
->start
, item
->end
);
2937 // Update the flushed mark on the requested block
2938 // The loop here is unfortunate but necessary to detect deleted blocks.
2940 std::list
<Requested_Block_Struct
*>::iterator it2
= m_requestedblocks_list
.begin();
2941 for (; it2
!= m_requestedblocks_list
.end(); ++it2
) {
2942 if (*it2
== item
->block
) {
2943 item
->block
->transferred
+= lenData
;
2947 if (m_gaplist
.IsComplete()) {
2951 // Return the length of data written to the buffer
2955 void CPartFile::FlushBuffer(bool fromAICHRecoveryDataAvailable
)
2957 m_nLastBufferFlushTime
= GetTickCount();
2959 if (m_BufferedData_list
.empty()) {
2964 uint32 partCount
= GetPartCount();
2965 // Remember which parts need to be checked at the end of the flush
2966 std::vector
<bool> changedPart(partCount
, false);
2968 // Ensure file is big enough to write data to (the last item will be the furthest from the start)
2969 if (!CheckFreeDiskSpace(m_nTotalBufferData
)) {
2970 // Not enough free space to write the last item, bail
2971 AddLogLineC(CFormat( _("WARNING: Not enough free disk-space! Pausing file: %s") ) % GetFileName());
2977 // Loop through queue
2978 while ( !m_BufferedData_list
.empty() ) {
2979 // Get top item and remove it from the queue
2980 CScopedPtr
<PartFileBufferedData
> item(m_BufferedData_list
.front());
2981 m_BufferedData_list
.pop_front();
2983 // This is needed a few times
2984 wxASSERT((item
->end
- item
->start
) < 0xFFFFFFFF);
2985 uint32 lenData
= (uint32
)(item
->end
- item
->start
+ 1);
2987 // SLUGFILLER: SafeHash - could be more than one part
2988 for (uint32 curpart
= (item
->start
/PARTSIZE
); curpart
<= (item
->end
/PARTSIZE
); ++curpart
) {
2989 wxASSERT(curpart
< partCount
);
2990 changedPart
[curpart
] = true;
2992 // SLUGFILLER: SafeHash
2994 // Go to the correct position in file and write block of data
2996 item
->area
.FlushAt(m_hpartfile
, item
->start
, lenData
);
2997 // Decrease buffer size
2998 m_nTotalBufferData
-= lenData
;
2999 } catch (const CIOFailureException
& e
) {
3000 AddDebugLogLineC(logPartFile
, wxT("Error while saving part-file: ") + e
.what());
3001 SetStatus(PS_ERROR
);
3002 // No need to bang your head against it again and again if it has already failed.
3003 DeleteContents(m_BufferedData_list
);
3004 m_nTotalBufferData
= 0;
3010 // Update last-changed date
3011 m_lastDateChanged
= wxDateTime::GetTimeNow();
3014 // Partfile should never be too large
3015 if (m_hpartfile
.GetLength() > GetFileSize()) {
3016 // it's "last chance" correction. the real bugfix has to be applied 'somewhere' else
3017 m_hpartfile
.SetLength(GetFileSize());
3019 } catch (const CIOFailureException
& e
) {
3020 AddDebugLogLineC(logPartFile
,
3021 CFormat(wxT("Error while truncating part-file (%s): %s"))
3022 % m_PartPath
% e
.what());
3023 SetStatus(PS_ERROR
);
3028 // Check each part of the file
3029 for (uint16 partNumber
= 0; partNumber
< partCount
; ++partNumber
) {
3030 if (changedPart
[partNumber
] == false) {
3034 uint32 partRange
= GetPartSize(partNumber
) - 1;
3036 // Is this 9MB part complete
3037 if (IsComplete(partNumber
)) {
3039 if (!HashSinglePart(partNumber
)) {
3040 AddLogLineC(CFormat(
3041 _("Downloaded part %i is corrupt in file: %s") ) % partNumber
% GetFileName() );
3043 // add part to corrupted list, if not already there
3044 if (!IsCorruptedPart(partNumber
)) {
3045 m_corrupted_list
.push_back(partNumber
);
3047 // request AICH recovery data
3048 // Don't if called from the AICHRecovery. It's already there and would lead to an infinite recursion.
3049 if (!fromAICHRecoveryDataAvailable
) {
3050 RequestAICHRecovery(partNumber
);
3052 // Reduce transferred amount by corrupt amount
3053 m_iLostDueToCorruption
+= (partRange
+ 1);
3055 if (!m_hashsetneeded
) {
3056 AddDebugLogLineN(logPartFile
, CFormat(
3057 wxT("Finished part %u of '%s'")) % partNumber
% GetFileName());
3060 // tell the blackbox about the verified data
3061 m_CorruptionBlackBox
->VerifiedData(true, partNumber
, 0, partRange
);
3063 // if this part was successfully completed (although ICH is active), remove from corrupted list
3064 EraseFirstValue(m_corrupted_list
, partNumber
);
3066 if (status
== PS_EMPTY
) {
3067 if (theApp
->IsRunning()) { // may be called during shutdown!
3068 if (GetHashCount() == GetED2KPartHashCount() && !m_hashsetneeded
) {
3069 // Successfully completed part, make it available for sharing
3070 SetStatus(PS_READY
);
3071 theApp
->sharedfiles
->SafeAddKFile(this);
3076 } else if ( IsCorruptedPart(partNumber
) && // corrupted part:
3077 (thePrefs::IsICHEnabled() // old ICH: rehash whenever we have new data hoping it will be good now
3078 || fromAICHRecoveryDataAvailable
)) {// new AICH: one rehash right before performing it (maybe it's already good)
3079 // Try to recover with minimal loss
3080 if (HashSinglePart(partNumber
)) {
3081 ++m_iTotalPacketsSavedDueToICH
;
3083 uint64 uMissingInPart
= m_gaplist
.GetGapSize(partNumber
);
3084 FillGap(partNumber
);
3085 RemoveBlockFromList(PARTSIZE
*partNumber
,(PARTSIZE
*partNumber
+ partRange
));
3087 // tell the blackbox about the verified data
3088 m_CorruptionBlackBox
->VerifiedData(true, partNumber
, 0, partRange
);
3090 // remove from corrupted list
3091 EraseFirstValue(m_corrupted_list
, partNumber
);
3093 AddLogLineC(CFormat( _("ICH: Recovered corrupted part %i for %s -> Saved bytes: %s") )
3096 % CastItoXBytes(uMissingInPart
));
3098 if (GetHashCount() == GetED2KPartHashCount() && !m_hashsetneeded
) {
3099 if (status
== PS_EMPTY
) {
3100 // Successfully recovered part, make it available for sharing
3101 SetStatus(PS_READY
);
3102 if (theApp
->IsRunning()) // may be called during shutdown!
3103 theApp
->sharedfiles
->SafeAddKFile(this);
3113 if (theApp
->IsRunning()) { // may be called during shutdown!
3114 // Is this file finished ?
3115 if (m_gaplist
.IsComplete()) {
3116 CompleteFile(false);
3122 // read data for upload, return false on error
3123 bool CPartFile::ReadData(CFileArea
& area
, uint64 offset
, uint32 toread
)
3126 if (offset
+ toread
> GetFileSize()) {
3127 AddDebugLogLineN(logPartFile
, CFormat(wxT("tried to read %d bytes past eof of %s"))
3128 % (offset
+ toread
- GetFileSize()) % GetFileName());
3133 area
.ReadAt(m_hpartfile
, offset
, toread
);
3134 // if it fails it throws (which the caller should catch)
3139 void CPartFile::UpdateFileRatingCommentAvail()
3141 bool prevComment
= m_hasComment
;
3142 int prevRating
= m_iUserRating
;
3144 m_hasComment
= false;
3146 int ratingCount
= 0;
3148 SourceSet::iterator it
= m_SrcList
.begin();
3149 for (; it
!= m_SrcList
.end(); ++it
) {
3150 CUpDownClient
* cur_src
= it
->GetClient();
3152 if (!cur_src
->GetFileComment().IsEmpty()) {
3153 if (thePrefs::IsCommentFiltered(cur_src
->GetFileComment())) {
3156 m_hasComment
= true;
3159 uint8 rating
= cur_src
->GetFileRating();
3161 wxASSERT(rating
<= 5);
3164 m_iUserRating
+= rating
;
3169 m_iUserRating
/= ratingCount
;
3170 wxASSERT(m_iUserRating
> 0 && m_iUserRating
<= 5);
3173 if ((prevComment
!= m_hasComment
) || (prevRating
!= m_iUserRating
)) {
3174 UpdateDisplayedInfo();
3179 void CPartFile::SetCategory(uint8 cat
)
3181 wxASSERT( cat
< theApp
->glob_prefs
->GetCatCount() );
3187 bool CPartFile::RemoveSource(CUpDownClient
* toremove
, bool updatewindow
, bool bDoStatsUpdate
)
3189 wxASSERT( toremove
);
3191 bool result
= theApp
->downloadqueue
->RemoveSource( toremove
, updatewindow
, bDoStatsUpdate
);
3193 // Check if the client should be deleted, but not if the client is already dying
3194 if ( !toremove
->GetSocket() && !toremove
->HasBeenDeleted() ) {
3195 if ( toremove
->Disconnected(wxT("RemoveSource - purged")) ) {
3196 toremove
->Safe_Delete();
3203 void CPartFile::AddDownloadingSource(CUpDownClient
* client
)
3205 CClientRefList::iterator it
=
3206 std::find(m_downloadingSourcesList
.begin(), m_downloadingSourcesList
.end(), CCLIENTREF(client
, wxEmptyString
));
3207 if (it
== m_downloadingSourcesList
.end()) {
3208 m_downloadingSourcesList
.push_back(CCLIENTREF(client
, wxT("CPartFile::AddDownloadingSource")));
3213 void CPartFile::RemoveDownloadingSource(CUpDownClient
* client
)
3215 CClientRefList::iterator it
=
3216 std::find(m_downloadingSourcesList
.begin(), m_downloadingSourcesList
.end(), CCLIENTREF(client
, wxEmptyString
));
3217 if (it
!= m_downloadingSourcesList
.end()) {
3218 m_downloadingSourcesList
.erase(it
);
3223 uint64
CPartFile::GetNeededSpace()
3226 uint64 length
= m_hpartfile
.GetLength();
3228 if (length
> GetFileSize()) {
3229 return 0; // Shouldn't happen, but just in case
3232 return GetFileSize() - length
;
3233 } catch (const CIOFailureException
& e
) {
3234 AddDebugLogLineC(logPartFile
,
3235 CFormat(wxT("Error while retrieving file-length (%s): %s"))
3236 % m_PartPath
% e
.what());
3237 SetStatus(PS_ERROR
);
3242 void CPartFile::SetStatus(uint8 in
)
3244 // PAUSED and INSUFFICIENT have extra flag variables m_paused and m_insufficient
3245 // - they are never to be stored in status
3246 wxASSERT( in
!= PS_PAUSED
&& in
!= PS_INSUFFICIENT
);
3250 if (theApp
->IsRunning()) {
3251 UpdateDisplayedInfo( true );
3253 if ( thePrefs::ShowCatTabInfos() ) {
3254 Notify_ShowUpdateCatTabTitles();
3256 Notify_DownloadCtrlSort();
3261 void CPartFile::RequestAICHRecovery(uint16 nPart
)
3264 if ( !m_pAICHHashSet
->HasValidMasterHash() ||
3265 (m_pAICHHashSet
->GetStatus() != AICH_TRUSTED
&& m_pAICHHashSet
->GetStatus() != AICH_VERIFIED
)){
3266 AddDebugLogLineN( logAICHRecovery
, wxT("Unable to request AICH Recoverydata because we have no trusted Masterhash") );
3269 if (GetPartSize(nPart
) <= EMBLOCKSIZE
)
3271 if (CAICHHashSet::IsClientRequestPending(this, nPart
)){
3272 AddDebugLogLineN( logAICHRecovery
, wxT("RequestAICHRecovery: Already a request for this part pending"));
3276 // first check if we have already the recoverydata, no need to rerequest it then
3277 if (m_pAICHHashSet
->IsPartDataAvailable(nPart
*PARTSIZE
)){
3278 AddDebugLogLineN( logAICHRecovery
, wxT("Found PartRecoveryData in memory"));
3279 AICHRecoveryDataAvailable(nPart
);
3283 wxASSERT( nPart
< GetPartCount() );
3284 // find some random client which support AICH to ask for the blocks
3285 // first lets see how many we have at all, we prefer high id very much
3286 uint32 cAICHClients
= 0;
3287 uint32 cAICHLowIDClients
= 0;
3288 for ( SourceSet::iterator it
= m_SrcList
.begin(); it
!= m_SrcList
.end(); ++it
) {
3289 CUpDownClient
* pCurClient
= it
->GetClient();
3290 if ( pCurClient
->IsSupportingAICH() &&
3291 pCurClient
->GetReqFileAICHHash() != NULL
&&
3292 !pCurClient
->IsAICHReqPending()
3293 && (*pCurClient
->GetReqFileAICHHash()) == m_pAICHHashSet
->GetMasterHash())
3295 if (pCurClient
->HasLowID()) {
3296 ++cAICHLowIDClients
;
3302 if ((cAICHClients
| cAICHLowIDClients
) == 0){
3303 AddDebugLogLineN( logAICHRecovery
, wxT("Unable to request AICH Recoverydata because found no client who supports it and has the same hash as the trusted one"));
3306 uint32 nSeclectedClient
;
3307 if (cAICHClients
> 0) {
3308 nSeclectedClient
= (rand() % cAICHClients
) + 1;
3310 nSeclectedClient
= (rand() % cAICHLowIDClients
) + 1;
3312 CUpDownClient
* pClient
= NULL
;
3313 for ( SourceSet::iterator it
= m_SrcList
.begin(); it
!= m_SrcList
.end(); ++it
) {
3314 CUpDownClient
* pCurClient
= it
->GetClient();
3315 if (pCurClient
->IsSupportingAICH() && pCurClient
->GetReqFileAICHHash() != NULL
&& !pCurClient
->IsAICHReqPending()
3316 && (*pCurClient
->GetReqFileAICHHash()) == m_pAICHHashSet
->GetMasterHash())
3318 if (cAICHClients
> 0){
3319 if (!pCurClient
->HasLowID())
3323 wxASSERT( pCurClient
->HasLowID());
3326 if (nSeclectedClient
== 0){
3327 pClient
= pCurClient
;
3332 if (pClient
== NULL
){
3337 AddDebugLogLineN( logAICHRecovery
, CFormat( wxT("Requesting AICH Hash (%s) form client %s") ) % ( cAICHClients
? wxT("HighId") : wxT("LowID") ) % pClient
->GetClientFullInfo() );
3338 pClient
->SendAICHRequest(this, nPart
);
3343 void CPartFile::AICHRecoveryDataAvailable(uint16 nPart
)
3345 if (GetPartCount() < nPart
){
3351 uint32 length
= GetPartSize(nPart
);
3352 // if the part was already ok, it would now be complete
3353 if (IsComplete(nPart
)) {
3354 AddDebugLogLineN(logAICHRecovery
, CFormat(wxT("Processing AICH Recovery data: The part (%u) is already complete, canceling")) % nPart
);
3360 CAICHHashTree
* pVerifiedHash
= m_pAICHHashSet
->m_pHashTree
.FindHash(nPart
*PARTSIZE
, length
);
3361 if (pVerifiedHash
== NULL
|| !pVerifiedHash
->GetHashValid()){
3362 AddDebugLogLineC( logAICHRecovery
, wxT("Processing AICH Recovery data: Unable to get verified hash from hashset (should never happen)") );
3366 CAICHHashTree
htOurHash(pVerifiedHash
->GetNDataSize(), pVerifiedHash
->GetIsLeftBranch(), pVerifiedHash
->GetNBaseSize());
3368 CreateHashFromFile(m_hpartfile
, PARTSIZE
* nPart
, length
, NULL
, &htOurHash
);
3369 } catch (const CIOFailureException
& e
) {
3370 AddDebugLogLineC(logAICHRecovery
,
3371 CFormat(wxT("IO failure while hashing part-file '%s': %s"))
3372 % m_hpartfile
.GetFilePath() % e
.what());
3373 SetStatus(PS_ERROR
);
3377 if (!htOurHash
.GetHashValid()){
3378 AddDebugLogLineN( logAICHRecovery
, wxT("Processing AICH Recovery data: Failed to retrieve AICH Hashset of corrupt part") );
3383 // now compare the hash we just did, to the verified hash and readd all blocks which are ok
3384 uint32 nRecovered
= 0;
3385 for (uint32 pos
= 0; pos
< length
; pos
+= EMBLOCKSIZE
){
3386 const uint32 nBlockSize
= min
<uint32
>(EMBLOCKSIZE
, length
- pos
);
3387 CAICHHashTree
* pVerifiedBlock
= pVerifiedHash
->FindHash(pos
, nBlockSize
);
3388 CAICHHashTree
* pOurBlock
= htOurHash
.FindHash(pos
, nBlockSize
);
3389 if ( pVerifiedBlock
== NULL
|| pOurBlock
== NULL
|| !pVerifiedBlock
->GetHashValid() || !pOurBlock
->GetHashValid()){
3393 if (pOurBlock
->GetHash() == pVerifiedBlock
->GetHash()){
3394 FillGap(PARTSIZE
*nPart
+pos
, PARTSIZE
*nPart
+ pos
+ (nBlockSize
-1));
3395 RemoveBlockFromList(PARTSIZE
*nPart
, PARTSIZE
*nPart
+ (nBlockSize
-1));
3396 nRecovered
+= nBlockSize
;
3397 // tell the blackbox about the verified data
3398 m_CorruptionBlackBox
->VerifiedData(true, nPart
, pos
, pos
+ nBlockSize
- 1);
3400 // inform our "blackbox" about the corrupted block which may ban clients who sent it
3401 m_CorruptionBlackBox
->VerifiedData(false, nPart
, pos
, pos
+ nBlockSize
- 1);
3404 m_CorruptionBlackBox
->EvaluateData();
3406 // ok now some sanity checks
3407 if (IsComplete(nPart
)) {
3408 // this is bad, but it could probably happen under some rare circumstances
3409 // make sure that MD4 agrees to this fact too
3410 if (!HashSinglePart(nPart
)) {
3411 AddDebugLogLineN(logAICHRecovery
,
3412 CFormat(wxT("Processing AICH Recovery data: The part (%u) got completed while recovering - but MD4 says it corrupt! Setting hashset to error state, deleting part")) % nPart
);
3413 // now we are fu... unhappy
3414 m_pAICHHashSet
->SetStatus(AICH_ERROR
);
3419 AddDebugLogLineN(logAICHRecovery
,
3420 CFormat(wxT("Processing AICH Recovery data: The part (%u) got completed while recovering and MD4 agrees")) % nPart
);
3421 if (status
== PS_EMPTY
&& theApp
->IsRunning()) {
3422 if (GetHashCount() == GetED2KPartHashCount() && !m_hashsetneeded
) {
3423 // Successfully recovered part, make it available for sharing
3424 SetStatus(PS_READY
);
3425 theApp
->sharedfiles
->SafeAddKFile(this);
3429 if (theApp
->IsRunning()) {
3430 // Is this file finished?
3431 if (m_gaplist
.IsComplete()) {
3432 CompleteFile(false);
3436 } // end sanity check
3437 // We did the best we could. If it's still incomplete, then no need to keep
3438 // bashing it with ICH. So remove it from the list of corrupted parts.
3439 EraseFirstValue(m_corrupted_list
, nPart
);
3443 // make sure the user appreciates our great recovering work :P
3444 AddDebugLogLineC( logAICHRecovery
, CFormat(
3445 wxT("AICH successfully recovered %s of %s from part %u for %s") )
3446 % CastItoXBytes(nRecovered
)
3447 % CastItoXBytes(length
)
3453 void CPartFile::ClientStateChanged( int oldState
, int newState
)
3455 if ( oldState
== newState
)
3458 // If the state is -1, then it's an entirely new item
3459 if ( oldState
!= -1 ) {
3460 // Was the old state a valid state?
3461 if ( oldState
== DS_ONQUEUE
|| oldState
== DS_DOWNLOADING
) {
3464 if ( oldState
== DS_CONNECTED
/* || oldState == DS_REMOTEQUEUEFULL */ ) {
3468 m_notCurrentSources
--;
3472 // If the state is -1, then the source is being removed
3473 if ( newState
!= -1 ) {
3474 // Was the old state a valid state?
3475 if ( newState
== DS_ONQUEUE
|| newState
== DS_DOWNLOADING
) {
3478 if ( newState
== DS_CONNECTED
/* || newState == DS_REMOTEQUEUEFULL */ ) {
3482 ++m_notCurrentSources
;
3488 bool CPartFile::AddSource( CUpDownClient
* client
)
3490 if (m_SrcList
.insert(CCLIENTREF(client
, wxT("CPartFile::AddSource"))).second
) {
3491 theStats::AddFoundSource();
3492 theStats::AddSourceOrigin(client
->GetSourceFrom());
3500 bool CPartFile::DelSource( CUpDownClient
* client
)
3502 if (m_SrcList
.erase(CCLIENTREF(client
, wxEmptyString
))) {
3503 theStats::RemoveSourceOrigin(client
->GetSourceFrom());
3504 theStats::RemoveFoundSource();
3512 void CPartFile::UpdatePartsFrequency( CUpDownClient
* client
, bool increment
)
3514 const BitVector
& freq
= client
->GetPartStatus();
3516 if ( m_SrcpartFrequency
.size() != GetPartCount() ) {
3517 m_SrcpartFrequency
.clear();
3518 m_SrcpartFrequency
.insert(m_SrcpartFrequency
.begin(), GetPartCount(), 0);
3525 unsigned int size
= freq
.size();
3526 if ( size
!= m_SrcpartFrequency
.size() ) {
3531 for ( unsigned int i
= 0; i
< size
; i
++ ) {
3532 if ( freq
.get(i
) ) {
3533 m_SrcpartFrequency
[i
]++;
3537 for ( unsigned int i
= 0; i
< size
; i
++ ) {
3538 if ( freq
.get(i
) ) {
3539 m_SrcpartFrequency
[i
]--;
3545 void CPartFile::GetRatingAndComments(FileRatingList
& list
) const
3548 // This can be pre-processed, but is it worth the CPU?
3549 CPartFile::SourceSet::const_iterator it
= m_SrcList
.begin();
3550 for ( ; it
!= m_SrcList
.end(); ++it
) {
3551 CUpDownClient
*cur_src
= it
->GetClient();
3552 if (cur_src
->GetFileComment().Length()>0 || cur_src
->GetFileRating()>0) {
3553 // AddDebugLogLineN(logPartFile, wxString(wxT("found a comment for ")) << GetFileName());
3554 list
.push_back(SFileRating(*cur_src
));
3561 CPartFile::CPartFile(const CEC_PartFile_Tag
*tag
) : CKnownFile(tag
)
3565 SetFileName(CPath(tag
->FileName()));
3566 m_abyFileHash
= tag
->FileHash();
3567 SetFileSize(tag
->SizeFull());
3568 m_gaplist
.Init(GetFileSize(), true); // Init empty
3569 m_partmetfilename
= CPath(tag
->PartMetName());
3570 m_fullname
= m_partmetfilename
; // We have only the met number, so show it without path in the detail dialog.
3572 m_SrcpartFrequency
.insert(m_SrcpartFrequency
.end(), GetPartCount(), 0);
3574 // these are only in CLIENT_GUI and not covered by Init()
3577 m_iDownPriorityEC
= 0;
3578 m_a4af_source_count
= 0;
3583 * Remote gui specific code
3585 CPartFile::~CPartFile()
3589 void CPartFile::GetRatingAndComments(FileRatingList
& list
) const
3591 list
= m_FileRatingList
;
3594 void CPartFile::SetCategory(uint8 cat
)
3600 bool CPartFile::AddSource(CUpDownClient
* client
)
3602 return m_SrcList
.insert(CCLIENTREF(client
, wxT("CPartFile::AddSource"))).second
!= 0;
3606 bool CPartFile::DelSource(CUpDownClient
* client
)
3608 return m_SrcList
.erase(CCLIENTREF(client
, wxEmptyString
)) != 0;
3612 #endif // !CLIENT_GUI
3615 void CPartFile::UpdateDisplayedInfo(bool force
)
3617 uint32 curTick
= ::GetTickCount();
3619 // Wait 1.5s between each redraw
3620 if (force
|| curTick
-m_lastRefreshedDLDisplay
> MINWAIT_BEFORE_DLDISPLAY_WINDOWUPDATE
) {
3621 Notify_DownloadCtrlUpdateItem(this);
3622 m_lastRefreshedDLDisplay
= curTick
;
3627 void CPartFile::Init()
3629 m_lastsearchtime
= 0;
3630 lastpurgetime
= ::GetTickCount();
3633 m_insufficient
= false;
3638 m_iLastPausePurge
= time(NULL
);
3640 if(thePrefs::GetNewAutoDown()) {
3641 m_iDownPriority
= PR_HIGH
;
3642 m_bAutoDownPriority
= true;
3644 m_iDownPriority
= PR_NORMAL
;
3645 m_bAutoDownPriority
= false;
3648 transferingsrc
= 0; // new
3652 m_hashsetneeded
= true;
3654 percentcompleted
= 0;
3656 lastseencomplete
= 0;
3657 m_availablePartsCount
=0;
3658 m_ClientSrcAnswered
= 0;
3659 m_LastNoNeededCheck
= 0;
3661 m_nTotalBufferData
= 0;
3662 m_nLastBufferFlushTime
= 0;
3663 m_bPercentUpdated
= false;
3664 m_iGainDueToCompression
= 0;
3665 m_iLostDueToCorruption
= 0;
3666 m_iTotalPacketsSavedDueToICH
= 0;
3668 m_lastRefreshedDLDisplay
= 0;
3669 m_nDlActiveTime
= 0;
3671 m_is_A4AF_auto
= false;
3672 m_localSrcReqQueued
= false;
3673 m_nCompleteSourcesTime
= time(NULL
);
3674 m_nCompleteSourcesCount
= 0;
3675 m_nCompleteSourcesCountLo
= 0;
3676 m_nCompleteSourcesCountHi
= 0;
3679 m_notCurrentSources
= 0;
3682 m_LastSearchTimeKad
= 0;
3683 m_TotalSearchesKad
= 0;
3686 m_CorruptionBlackBox
= new CCorruptionBlackBox();
3690 wxString
CPartFile::getPartfileStatus() const
3695 if ((status
== PS_HASHING
) || (status
== PS_WAITINGFORHASH
)) {
3696 mybuffer
=_("Hashing");
3697 } else if (status
== PS_ALLOCATING
) {
3698 mybuffer
= _("Allocating");
3700 switch (GetStatus()) {
3702 mybuffer
=_("Completing");
3705 mybuffer
=_("Complete");
3708 mybuffer
=_("Paused");
3711 mybuffer
=_("Erroneous");
3713 case PS_INSUFFICIENT
:
3714 mybuffer
= _("Insufficient disk space");
3717 if (GetTransferingSrcCount()>0) {
3718 mybuffer
=_("Downloading");
3720 mybuffer
=_("Waiting");
3724 if (m_stopped
&& (GetStatus()!=PS_COMPLETE
)) {
3725 mybuffer
=_("Stopped");
3732 int CPartFile::getPartfileStatusRang() const
3736 if (GetTransferingSrcCount()==0) tempstatus
=1;
3737 switch (GetStatus()) {
3739 case PS_WAITINGFORHASH
:
3759 wxString
CPartFile::GetFeedback() const
3761 wxString retval
= CKnownFile::GetFeedback();
3762 if (GetStatus() != PS_COMPLETE
) {
3763 retval
+= CFormat(wxT("%s: %s (%.2f%%)\n%s: %u\n"))
3764 % _("Downloaded") % CastItoXBytes(GetCompletedSize()) % GetPercentCompleted() % _("Sources") % GetSourceCount();
3766 return retval
+ _("Status") + wxT(": ") + getPartfileStatus() + wxT("\n");
3770 sint32
CPartFile::getTimeRemaining() const
3772 if (GetKBpsDown() < 0.001)
3775 return((GetFileSize()-GetCompletedSize()) / ((int)(GetKBpsDown()*1024.0)));
3778 bool CPartFile::PreviewAvailable()
3780 const uint64 minSizeForPreview
= 256 * 1024;
3781 FileType type
= GetFiletype(GetFileName());
3783 return (type
== ftVideo
|| type
== ftAudio
) &&
3784 GetFileSize() >= minSizeForPreview
&&
3785 IsComplete(0, minSizeForPreview
);
3788 bool CPartFile::CheckShowItemInGivenCat(int inCategory
)
3790 // first check if item belongs in this cat in principle
3791 if (inCategory
> 0 && inCategory
!= GetCategory()) {
3795 // if yes apply filter
3798 switch (thePrefs::GetAllcatFilter()) {
3800 show
= GetCategory() == 0 || inCategory
> 0;
3803 show
= IsPartFile();
3806 show
= !IsPartFile();
3810 (GetStatus() == PS_READY
|| GetStatus() == PS_EMPTY
) &&
3811 GetTransferingSrcCount() == 0;
3813 case acfDownloading
:
3815 (GetStatus() == PS_READY
|| GetStatus() == PS_EMPTY
) &&
3816 GetTransferingSrcCount() > 0;
3819 show
= GetStatus() == PS_ERROR
;
3822 show
= GetStatus() == PS_PAUSED
&& !IsStopped();
3828 show
= GetFiletype(GetFileName()) == ftVideo
;
3831 show
= GetFiletype(GetFileName()) == ftAudio
;
3834 show
= GetFiletype(GetFileName()) == ftArchive
;
3837 show
= GetFiletype(GetFileName()) == ftCDImage
;
3840 show
= GetFiletype(GetFileName()) == ftPicture
;
3843 show
= GetFiletype(GetFileName()) == ftText
;
3846 show
= !IsStopped() && GetStatus() != PS_PAUSED
;
3857 void CPartFile::RemoveCategory(uint8 cat
)
3859 if (m_category
== cat
) {
3860 // Reset the category
3862 } else if (m_category
> cat
) {
3863 // Set to the new position of the original category
3869 void CPartFile::SetActive(bool bActive
)
3871 time_t tNow
= time(NULL
);
3873 if (theApp
->IsConnected()) {
3874 if (m_tActivated
== 0) {
3875 m_tActivated
= tNow
;
3879 if (m_tActivated
!= 0) {
3880 m_nDlActiveTime
+= tNow
- m_tActivated
;
3887 uint32
CPartFile::GetDlActiveTime() const
3889 uint32 nDlActiveTime
= m_nDlActiveTime
;
3890 if (m_tActivated
!= 0) {
3891 nDlActiveTime
+= time(NULL
) - m_tActivated
;
3893 return nDlActiveTime
;
3897 uint16
CPartFile::GetPartMetNumber() const
3900 return m_partmetfilename
.RemoveAllExt().GetRaw().ToLong(&nr
) ? nr
: 0;
3904 void CPartFile::SetHashingProgress(uint16 part
) const
3906 m_hashingProgress
= part
;
3907 Notify_DownloadCtrlUpdateItem(this);
3913 uint8
CPartFile::GetStatus(bool ignorepause
) const
3915 if ( (!m_paused
&& !m_insufficient
) ||
3916 status
== PS_ERROR
||
3917 status
== PS_COMPLETING
||
3918 status
== PS_COMPLETE
||
3921 } else if ( m_insufficient
) {
3922 return PS_INSUFFICIENT
;
3928 void CPartFile::AddDeadSource(const CUpDownClient
* client
)
3930 m_deadSources
.AddDeadSource( client
);
3934 bool CPartFile::IsDeadSource(const CUpDownClient
* client
)
3936 return m_deadSources
.IsDeadSource( client
);
3939 void CPartFile::SetFileName(const CPath
& fileName
)
3941 CKnownFile
* pFile
= theApp
->sharedfiles
->GetFileByID(GetFileHash());
3943 bool is_shared
= (pFile
&& pFile
== this);
3946 // The file is shared, we must clear the search keywords so we don't
3947 // publish the old name anymore.
3948 theApp
->sharedfiles
->RemoveKeywords(this);
3951 CKnownFile::SetFileName(fileName
);
3954 // And of course, we must advertise the new name if the file is shared.
3955 theApp
->sharedfiles
->AddKeywords(this);
3958 UpdateDisplayedInfo(true);
3962 uint16
CPartFile::GetMaxSources() const
3964 // This is just like this, while we don't import the private max sources per file
3965 return thePrefs::GetMaxSourcePerFile();
3969 uint16
CPartFile::GetMaxSourcePerFileSoft() const
3971 unsigned int temp
= ((unsigned int)GetMaxSources() * 9L) / 10;
3972 if (temp
> MAX_SOURCES_FILE_SOFT
) {
3973 return MAX_SOURCES_FILE_SOFT
;
3978 uint16
CPartFile::GetMaxSourcePerFileUDP() const
3980 unsigned int temp
= ((unsigned int)GetMaxSources() * 3L) / 4;
3981 if (temp
> MAX_SOURCES_FILE_UDP
) {
3982 return MAX_SOURCES_FILE_UDP
;
3987 #define DROP_FACTOR 2
3989 CUpDownClient
* CPartFile::GetSlowerDownloadingClient(uint32 speed
, CUpDownClient
* caller
) {
3990 // printf("Start slower source calculation\n");
3991 for( SourceSet::iterator it
= m_SrcList
.begin(); it
!= m_SrcList
.end(); ) {
3992 CUpDownClient
* cur_src
= it
++->GetClient();
3993 if ((cur_src
->GetDownloadState() == DS_DOWNLOADING
) && (cur_src
!= caller
)) {
3994 uint32 factored_bytes_per_second
= static_cast<uint32
>(
3995 (cur_src
->GetKBpsDown() * 1024) * DROP_FACTOR
);
3996 if ( factored_bytes_per_second
< speed
) {
3997 // printf("Selecting source %p to drop: %d < %d\n", cur_src, factored_bytes_per_second, speed);
3998 // printf("End slower source calculation\n");
4001 // printf("Not selecting source %p to drop: %d > %d\n", cur_src, factored_bytes_per_second, speed);
4005 // printf("End slower source calculation\n");
4009 void CPartFile::AllocationFinished()
4011 // see if it can be opened
4012 if (!m_hpartfile
.Open(m_PartPath
, CFile::read_write
)) {
4013 AddLogLineN(CFormat(_("ERROR: Failed to open partfile '%s'")) % GetFullName());
4014 SetStatus(PS_ERROR
);
4016 // then close the handle again
4017 m_hpartfile
.Release(true);
4021 // File_checked_for_headers