2 // This file is part of the aMule Project.
4 // Copyright (c) 2003-2011 aMule Team ( admin@amule.org / http://www.amule.org )
5 // Copyright (c) 2002-2011 Merkur ( devs@emule-project.net / http://www.emule-project.net )
7 // Any parts of this program derived from the xMule, lMule or eMule project,
8 // or contributed by third-party developers are copyrighted by their
11 // This program is free software; you can redistribute it and/or modify
12 // it under the terms of the GNU General Public License as published by
13 // the Free Software Foundation; either version 2 of the License, or
14 // (at your option) any later version.
16 // This program is distributed in the hope that it will be useful,
17 // but WITHOUT ANY WARRANTY; without even the implied warranty of
18 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 // GNU General Public License for more details.
21 // You should have received a copy of the GNU General Public License
22 // along with this program; if not, write to the Free Software
23 // Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA
28 #include "PartFile.h" // Interface declarations.
31 #include "config.h" // Needed for VERSION
34 #include <protocol/kad/Constants.h>
35 #include <protocol/ed2k/Client2Client/TCP.h>
36 #include <protocol/Protocols.h>
37 #include <common/DataFileVersion.h>
38 #include <common/Constants.h>
39 #include <tags/FileTags.h>
42 #include <wx/tokenzr.h> // Needed for wxStringTokenizer
44 #include "KnownFileList.h" // Needed for CKnownFileList
45 #include "CanceledFileList.h"
46 #include "UploadQueue.h" // Needed for CFileHash
47 #include "IPFilter.h" // Needed for CIPFilter
48 #include "Server.h" // Needed for CServer
49 #include "ServerConnect.h" // Needed for CServerConnect
52 #include "UpDownClientEC.h" // Needed for CUpDownClient
54 #include "updownclient.h" // Needed for CUpDownClient
57 #include "MemFile.h" // Needed for CMemFile
58 #include "Preferences.h" // Needed for CPreferences
59 #include "DownloadQueue.h" // Needed for CDownloadQueue
60 #include "amule.h" // Needed for theApp
61 #include "ED2KLink.h" // Needed for CED2KLink
62 #include "Packet.h" // Needed for CTag
63 #include "SearchList.h" // Needed for CSearchFile
64 #include "ClientList.h" // Needed for clientlist
65 #include "Statistics.h" // Needed for theStats
67 #include <common/Format.h> // Needed for CFormat
68 #include <common/FileFunctions.h> // Needed for GetLastModificationTime
69 #include "ThreadTasks.h" // Needed for CHashingTask/CCompletionTask/CAllocateFileTask
70 #include "GuiEvents.h" // Needed for Notify_*
71 #include "DataToText.h" // Needed for OriginToText()
72 #include "PlatformSpecific.h" // Needed for CreateSparseFile()
73 #include "FileArea.h" // Needed for CFileArea
74 #include "ScopedPtr.h" // Needed for CScopedArray
75 #include "CorruptionBlackBox.h"
77 #include "kademlia/kademlia/Kademlia.h"
78 #include "kademlia/kademlia/Search.h"
81 SFileRating::SFileRating(const wxString
&u
, const wxString
&f
, sint16 r
, const wxString
&c
)
91 SFileRating::SFileRating(const SFileRating
&fr
)
93 UserName(fr
.UserName
),
94 FileName(fr
.FileName
),
102 SFileRating::SFileRating(const CUpDownClient
&client
)
104 UserName(client
.GetUserName()),
105 FileName(client
.GetClientFilename()),
106 Rating(client
.GetFileRating()),
107 Comment(client
.GetFileComment())
113 SFileRating::~SFileRating()
118 class PartFileBufferedData
121 CFileArea area
; // File area to be written
122 uint64 start
; // This is the start offset of the data
123 uint64 end
; // This is the end offset of the data
124 Requested_Block_Struct
*block
; // This is the requested block that this data relates to
126 PartFileBufferedData(CFileAutoClose
& file
, byte
* data
, uint64 _start
, uint64 _end
, Requested_Block_Struct
*_block
)
127 : start(_start
), end(_end
), block(_block
)
129 area
.StartWriteAt(file
, start
, end
-start
+1);
130 memcpy(area
.GetBuffer(), data
, end
-start
+1);
135 typedef std::list
<Chunk
> ChunkList
;
140 CPartFile::CPartFile()
145 CPartFile::CPartFile(CSearchFile
* searchresult
)
149 m_abyFileHash
= searchresult
->GetFileHash();
150 SetFileName(searchresult
->GetFileName());
151 SetFileSize(searchresult
->GetFileSize());
153 for (unsigned int i
= 0; i
< searchresult
->m_taglist
.size(); ++i
){
154 const CTag
& pTag
= searchresult
->m_taglist
[i
];
156 bool bTagAdded
= false;
157 if (pTag
.GetNameID() == 0 && !pTag
.GetName().IsEmpty() && (pTag
.IsStr() || pTag
.IsInt())) {
158 static const struct {
163 { wxT(FT_ED2K_MEDIA_ARTIST
), 2 },
164 { wxT(FT_ED2K_MEDIA_ALBUM
), 2 },
165 { wxT(FT_ED2K_MEDIA_TITLE
), 2 },
166 { wxT(FT_ED2K_MEDIA_LENGTH
), 2 },
167 { wxT(FT_ED2K_MEDIA_BITRATE
), 3 },
168 { wxT(FT_ED2K_MEDIA_CODEC
), 2 }
171 for (unsigned int t
= 0; t
< itemsof(_aMetaTags
); ++t
) {
172 if ( pTag
.GetType() == _aMetaTags
[t
].nType
&&
173 (pTag
.GetName() == _aMetaTags
[t
].pszName
)) {
174 // skip string tags with empty string values
175 if (pTag
.IsStr() && pTag
.GetStr().IsEmpty()) {
179 // skip "length" tags with "0: 0" values
180 if (pTag
.GetName() == wxT(FT_ED2K_MEDIA_LENGTH
)) {
181 if (pTag
.GetStr().IsSameAs(wxT("0: 0")) ||
182 pTag
.GetStr().IsSameAs(wxT("0:0"))) {
187 // skip "bitrate" tags with '0' values
188 if ((pTag
.GetName() == wxT(FT_ED2K_MEDIA_BITRATE
)) && !pTag
.GetInt()) {
192 AddDebugLogLineN( logPartFile
,
193 wxT("CPartFile::CPartFile(CSearchFile*): added tag ") +
194 pTag
.GetFullInfo() );
195 m_taglist
.push_back(pTag
);
200 } else if (pTag
.GetNameID() != 0 && pTag
.GetName().IsEmpty() && (pTag
.IsStr() || pTag
.IsInt())) {
201 static const struct {
209 for (unsigned int t
= 0; t
< itemsof(_aMetaTags
); ++t
) {
210 if (pTag
.GetType() == _aMetaTags
[t
].nType
&& pTag
.GetNameID() == _aMetaTags
[t
].nID
) {
211 // skip string tags with empty string values
212 if (pTag
.IsStr() && pTag
.GetStr().IsEmpty()) {
216 AddDebugLogLineN( logPartFile
,
217 wxT("CPartFile::CPartFile(CSearchFile*): added tag ") +
218 pTag
.GetFullInfo() );
219 m_taglist
.push_back(pTag
);
227 AddDebugLogLineN( logPartFile
,
228 wxT("CPartFile::CPartFile(CSearchFile*): ignored tag ") +
229 pTag
.GetFullInfo() );
237 CPartFile::CPartFile(const CED2KFileLink
* fileLink
)
241 SetFileName(CPath(fileLink
->GetName()));
242 SetFileSize(fileLink
->GetSize());
243 m_abyFileHash
= fileLink
->GetHashKey();
247 if (fileLink
->m_hashset
) {
248 if (!LoadHashsetFromFile(fileLink
->m_hashset
, true)) {
249 AddDebugLogLineC(logPartFile
, wxT("eD2K link contained invalid hashset: ") + fileLink
->GetLink());
255 CPartFile::~CPartFile()
257 // if it's not opened, it was completed or deleted
258 if (m_hpartfile
.IsOpened()) {
261 // Update met file (with current directory entry)
265 DeleteContents(m_BufferedData_list
);
266 delete m_CorruptionBlackBox
;
268 wxASSERT(m_SrcList
.empty());
269 wxASSERT(m_A4AFsrclist
.empty());
272 void CPartFile::CreatePartFile()
274 // use lowest free partfilenumber for free file (InterCeptor)
278 m_partmetfilename
= CPath(CFormat(wxT("%03i.part.met")) % i
);
279 m_fullname
= thePrefs::GetTempDir().JoinPaths(m_partmetfilename
);
280 } while (m_fullname
.FileExists());
282 m_CorruptionBlackBox
->SetPartFileInfo(GetFileName().GetPrintable(), m_partmetfilename
.RemoveAllExt().GetPrintable());
284 wxString strPartName
= m_partmetfilename
.RemoveExt().GetRaw();
285 m_taglist
.push_back(CTagString(FT_PARTFILENAME
, strPartName
));
287 m_gaplist
.Init(GetFileSize(), true); // Init empty
289 m_PartPath
= m_fullname
.RemoveExt();
291 if (thePrefs::GetAllocFullFile()) {
292 fileCreated
= m_hpartfile
.Create(m_PartPath
, true);
295 fileCreated
= PlatformSpecific::CreateSparseFile(m_PartPath
, GetFileSize());
298 AddLogLineN(_("ERROR: Failed to create partfile"));
302 SetFilePath(thePrefs::GetTempDir());
304 if (thePrefs::GetAllocFullFile()) {
305 SetStatus(PS_ALLOCATING
);
306 CThreadScheduler::AddTask(new CAllocateFileTask(this, thePrefs::AddNewFilesPaused()));
308 AllocationFinished();
311 m_hashsetneeded
= (GetED2KPartHashCount() > 0);
314 SetActive(theApp
->IsConnected());
318 uint8
CPartFile::LoadPartFile(const CPath
& in_directory
, const CPath
& filename
, bool from_backup
, bool getsizeonly
)
320 bool isnewstyle
= false;
321 uint8 version
,partmettype
=PMT_UNKNOWN
;
323 std::map
<uint16
, Gap_Struct
*> gap_map
; // Slugfiller
326 m_partmetfilename
= filename
;
327 m_CorruptionBlackBox
->SetPartFileInfo(GetFileName().GetPrintable(), m_partmetfilename
.RemoveAllExt().GetPrintable());
328 m_filePath
= in_directory
;
329 m_fullname
= m_filePath
.JoinPaths(m_partmetfilename
);
330 m_PartPath
= m_fullname
.RemoveExt();
332 // readfile data form part.met file
333 CPath curMetFilename
= m_fullname
;
335 curMetFilename
= curMetFilename
.AppendExt(PARTMET_BAK_EXT
);
336 AddLogLineN(CFormat( _("Trying to load backup of met-file from %s") )
341 CFile
metFile(curMetFilename
, CFile::read
);
342 if (!metFile
.IsOpened()) {
343 AddLogLineN(CFormat( _("ERROR: Failed to open part.met file: %s ==> %s") )
348 } else if (metFile
.GetLength() == 0) {
349 AddLogLineN(CFormat( _("ERROR: part.met file is 0 size: %s ==> %s") )
356 version
= metFile
.ReadUInt8();
357 if (version
!= PARTFILE_VERSION
&& version
!= PARTFILE_SPLITTEDVERSION
&& version
!= PARTFILE_VERSION_LARGEFILE
){
359 //if (version == 83) return ImportShareazaTempFile(...)
360 AddLogLineN(CFormat( _("ERROR: Invalid part.met file version: %s ==> %s") )
366 isnewstyle
= (version
== PARTFILE_SPLITTEDVERSION
);
367 partmettype
= isnewstyle
? PMT_SPLITTED
: PMT_DEFAULTOLD
;
369 if (version
== PARTFILE_VERSION
) {// Do we still need this check ?
370 uint8 test
[4]; // It will fail for certain files.
371 metFile
.Seek(24, wxFromStart
);
372 metFile
.Read(test
,4);
374 metFile
.Seek(1, wxFromStart
);
375 if (test
[0]==0 && test
[1]==0 && test
[2]==2 && test
[3]==1) {
376 isnewstyle
=true; // edonkeys so called "old part style"
377 partmettype
=PMT_NEWOLD
;
382 uint32 temp
= metFile
.ReadUInt32();
384 if (temp
==0) { // 0.48 partmets - different again
385 LoadHashsetFromFile(&metFile
, false);
387 metFile
.Seek(2, wxFromStart
);
388 LoadDateFromFile(&metFile
);
389 m_abyFileHash
= metFile
.ReadHash();
393 LoadDateFromFile(&metFile
);
394 LoadHashsetFromFile(&metFile
, false);
397 uint32 tagcount
= metFile
.ReadUInt32();
399 for (uint32 j
= 0; j
< tagcount
; ++j
) {
400 CTag
newtag(metFile
,true);
403 (newtag
.GetNameID() == FT_FILESIZE
||
404 newtag
.GetNameID() == FT_FILENAME
))) {
405 switch(newtag
.GetNameID()) {
407 if (!GetFileName().IsOk()) {
408 // If it's not empty, we already loaded the unicoded one
409 SetFileName(CPath(newtag
.GetStr()));
413 case FT_LASTSEENCOMPLETE
: {
414 lastseencomplete
= newtag
.GetInt();
418 SetFileSize(newtag
.GetInt());
421 case FT_TRANSFERRED
: {
422 transferred
= newtag
.GetInt();
426 //#warning needs setfiletype string
427 //SetFileType(newtag.GetStr());
431 m_category
= newtag
.GetInt();
432 if (m_category
> theApp
->glob_prefs
->GetCatCount() - 1 ) {
437 case FT_OLDDLPRIORITY
:
438 case FT_DLPRIORITY
: {
440 m_iDownPriority
= newtag
.GetInt();
441 if( m_iDownPriority
== PR_AUTO
){
442 m_iDownPriority
= PR_HIGH
;
443 SetAutoDownPriority(true);
446 if ( m_iDownPriority
!= PR_LOW
&&
447 m_iDownPriority
!= PR_NORMAL
&&
448 m_iDownPriority
!= PR_HIGH
)
449 m_iDownPriority
= PR_NORMAL
;
450 SetAutoDownPriority(false);
456 m_paused
= (newtag
.GetInt() == 1);
457 m_stopped
= m_paused
;
460 case FT_OLDULPRIORITY
:
461 case FT_ULPRIORITY
: {
463 SetUpPriority(newtag
.GetInt(), false);
464 if( GetUpPriority() == PR_AUTO
){
465 SetUpPriority(PR_HIGH
, false);
466 SetAutoUpPriority(true);
468 SetAutoUpPriority(false);
473 case FT_KADLASTPUBLISHSRC
:{
474 SetLastPublishTimeKadSrc(newtag
.GetInt(), 0);
475 if(GetLastPublishTimeKadSrc() > (uint32
)time(NULL
)+KADEMLIAREPUBLISHTIMES
) {
476 //There may be a posibility of an older client that saved a random number here.. This will check for that..
477 SetLastPublishTimeKadSrc(0,0);
481 case FT_KADLASTPUBLISHNOTES
:{
482 SetLastPublishTimeKadNotes(newtag
.GetInt());
485 // old tags: as long as they are not needed, take the chance to purge them
487 case FT_KADLASTPUBLISHKEY
:
489 case FT_DL_ACTIVE_TIME
:
490 if (newtag
.IsInt()) {
491 m_nDlActiveTime
= newtag
.GetInt();
494 case FT_CORRUPTEDPARTS
: {
495 wxASSERT(m_corrupted_list
.empty());
496 wxString
strCorruptedParts(newtag
.GetStr());
497 wxStringTokenizer
tokenizer(strCorruptedParts
, wxT(","));
498 while ( tokenizer
.HasMoreTokens() ) {
499 wxString token
= tokenizer
.GetNextToken();
501 if (token
.ToULong(&uPart
)) {
502 if (uPart
< GetPartCount() && !IsCorruptedPart(uPart
)) {
503 m_corrupted_list
.push_back(uPart
);
512 hash
.DecodeBase32(newtag
.GetStr()) == CAICHHash::GetHashSize();
513 wxASSERT(hashSizeOk
);
515 m_pAICHHashSet
->SetMasterHash(hash
, AICH_VERIFIED
);
519 case FT_ATTRANSFERRED
:{
520 statistic
.SetAllTimeTransferred(statistic
.GetAllTimeTransferred() + (uint64
)newtag
.GetInt());
523 case FT_ATTRANSFERREDHI
:{
524 statistic
.SetAllTimeTransferred(statistic
.GetAllTimeTransferred() + (((uint64
)newtag
.GetInt()) << 32));
527 case FT_ATREQUESTED
:{
528 statistic
.SetAllTimeRequests(newtag
.GetInt());
532 statistic
.SetAllTimeAccepts(newtag
.GetInt());
536 // Start Changes by Slugfiller for better exception handling
538 wxCharBuffer tag_ansi_name
= newtag
.GetName().ToAscii();
539 char gap_mark
= tag_ansi_name
? tag_ansi_name
[0u] : 0;
540 if ( newtag
.IsInt() && (newtag
.GetName().Length() > 1) &&
541 ((gap_mark
== FT_GAPSTART
) ||
542 (gap_mark
== FT_GAPEND
))) {
543 Gap_Struct
*gap
= NULL
;
544 unsigned long int gapkey
;
545 if (newtag
.GetName().Mid(1).ToULong(&gapkey
)) {
546 if ( gap_map
.find( gapkey
) == gap_map
.end() ) {
547 gap
= new Gap_Struct
;
548 gap_map
[gapkey
] = gap
;
549 gap
->start
= (uint64
)-1;
550 gap
->end
= (uint64
)-1;
552 gap
= gap_map
[ gapkey
];
554 if (gap_mark
== FT_GAPSTART
) {
555 gap
->start
= newtag
.GetInt();
557 if (gap_mark
== FT_GAPEND
) {
558 gap
->end
= newtag
.GetInt()-1;
561 AddDebugLogLineN(logPartFile
, wxT("Wrong gap map key while reading met file!"));
564 // End Changes by Slugfiller for better exception handling
566 m_taglist
.push_back(newtag
);
571 // Nothing. Else, nothing.
575 // load the hashsets from the hybridstylepartmet
576 if (isnewstyle
&& !getsizeonly
&& (metFile
.GetPosition()<metFile
.GetLength()) ) {
577 metFile
.Seek(1, wxFromCurrent
);
579 uint16 parts
=GetPartCount(); // assuming we will get all hashsets
581 for (uint16 i
= 0; i
< parts
&& (metFile
.GetPosition()+16<metFile
.GetLength()); ++i
){
582 CMD4Hash cur_hash
= metFile
.ReadHash();
583 m_hashlist
.push_back(cur_hash
);
587 if (!m_hashlist
.empty()) {
588 CreateHashFromHashlist(m_hashlist
, &checkhash
);
590 if (m_abyFileHash
!= checkhash
) {
594 } catch (const CInvalidPacket
& e
) {
595 AddLogLineC(CFormat(_("Error: %s (%s) is corrupt (bad tags: %s), unable to load file."))
600 } catch (const CIOFailureException
& e
) {
601 AddDebugLogLineC(logPartFile
, CFormat( wxT("IO failure while loading '%s': %s") )
605 } catch (const CEOFException
& WXUNUSED(e
)) {
606 AddLogLineC(CFormat( _("ERROR: %s (%s) is corrupt (wrong tagcount), unable to load file.") )
609 AddLogLineC(_("Trying to recover file info..."));
611 // Safe file is that who have
614 // We have filesize, try other needed info
616 // Do we need to check gaps? I think not,
617 // because they are checked below. Worst
618 // scenario will only mark file as 0 bytes downloaded.
621 if (!GetFileName().IsOk()) {
622 // Not critical, let's put a random filename.
624 "Recovering no-named file - will try to recover it as RecoveredFile.dat"));
625 SetFileName(CPath(wxT("RecoveredFile.dat")));
628 AddLogLineC(_("Recovered all available file info :D - Trying to use it..."));
630 AddLogLineC(_("Unable to recover file info :("));
639 m_gaplist
.Init(GetFileSize(), false); // Init full, then add gaps
640 // Now to flush the map into the list (Slugfiller)
641 std::map
<uint16
, Gap_Struct
*>::iterator it
= gap_map
.begin();
642 for ( ; it
!= gap_map
.end(); ++it
) {
643 Gap_Struct
* gap
= it
->second
;
644 // SLUGFILLER: SafeHash - revised code, and extra safety
645 if ( (gap
->start
!= (uint64
)-1) &&
646 (gap
->end
!= (uint64
)-1) &&
647 gap
->start
<= gap
->end
&&
648 gap
->start
< GetFileSize()) {
649 if (gap
->end
>= GetFileSize()) {
650 gap
->end
= GetFileSize()-1; // Clipping
652 m_gaplist
.AddGap(gap
->start
, gap
->end
); // All tags accounted for, use safe adding
655 // SLUGFILLER: SafeHash
658 //check if this is a backup
659 if ( m_fullname
.GetExt().MakeLower() == wxT("backup" )) {
660 m_fullname
= m_fullname
.RemoveExt();
663 // open permanent handle
664 if ( !m_hpartfile
.Open(m_PartPath
, CFile::read_write
)) {
665 AddLogLineN(CFormat( _("Failed to open %s (%s)") )
674 // SLUGFILLER: SafeHash - final safety, make sure any missing part of the file is gap
675 if (m_hpartfile
.GetLength() < GetFileSize())
676 AddGap(m_hpartfile
.GetLength(), GetFileSize()-1);
677 // Goes both ways - Partfile should never be too large
678 if (m_hpartfile
.GetLength() > GetFileSize()) {
679 AddDebugLogLineC(logPartFile
, CFormat( wxT("Partfile \"%s\" is too large! Truncating %llu bytes.") ) % GetFileName() % (m_hpartfile
.GetLength() - GetFileSize()));
680 m_hpartfile
.SetLength(GetFileSize());
682 // SLUGFILLER: SafeHash
683 } catch (const CIOFailureException
& e
) {
684 AddDebugLogLineC(logPartFile
, CFormat( wxT("Error while accessing partfile \"%s\": %s") ) % GetFileName() % e
.what());
688 // now close the file again until needed
689 m_hpartfile
.Release(true);
691 // check hashcount, file status etc
692 if (GetHashCount() != GetED2KPartHashCount()){
693 m_hashsetneeded
= true;
696 m_hashsetneeded
= false;
697 for (size_t i
= 0; i
< m_hashlist
.size(); ++i
) {
704 if (m_gaplist
.IsComplete()) { // is this file complete already?
709 if (!isnewstyle
) { // not for importing
710 const time_t file_date
= CPath::GetModificationTime(m_PartPath
);
711 if (m_lastDateChanged
!= file_date
) {
712 // It's pointless to rehash an empty file, since the case
713 // where a user has zero'd a file is handled above ...
714 if (m_hpartfile
.GetLength()) {
715 AddLogLineN(CFormat( _("WARNING: %s might be corrupted (%i)") )
717 % (m_lastDateChanged
- file_date
) );
719 SetStatus(PS_WAITINGFORHASH
);
721 CPath partFileName
= m_partmetfilename
.RemoveExt();
722 CThreadScheduler::AddTask(new CHashingTask(m_filePath
, partFileName
, this));
727 UpdateCompletedInfos();
728 if (completedsize
> transferred
) {
729 m_iGainDueToCompression
= completedsize
- transferred
;
730 } else if (completedsize
!= transferred
) {
731 m_iLostDueToCorruption
= transferred
- completedsize
;
738 bool CPartFile::SavePartFile(bool Initial
)
741 case PS_WAITINGFORHASH
:
747 /* Don't write anything to disk if less than 100 KB of free space is left. */
748 sint64 free
= CPath::GetFreeSpaceAt(GetFilePath());
749 if ((free
!= wxInvalidOffset
) && (free
< (100 * 1024))) {
755 if (!m_PartPath
.FileExists()) {
756 throw wxString(wxT(".part file not found"));
759 uint32 lsc
= lastseencomplete
;
762 CPath::BackupFile(m_fullname
, wxT(".backup"));
763 CPath::RemoveFile(m_fullname
);
766 file
.Open(m_fullname
, CFile::write
);
767 if (!file
.IsOpened()) {
768 throw wxString(wxT("Failed to open part.met file"));
772 file
.WriteUInt8(IsLargeFile() ? PARTFILE_VERSION_LARGEFILE
: PARTFILE_VERSION
);
774 file
.WriteUInt32(CPath::GetModificationTime(m_PartPath
));
776 file
.WriteHash(m_abyFileHash
);
777 uint16 parts
= m_hashlist
.size();
778 file
.WriteUInt16(parts
);
779 for (int x
= 0; x
< parts
; ++x
) {
780 file
.WriteHash(m_hashlist
[x
]);
783 #define FIXED_TAGS 15
784 uint32 tagcount
= m_taglist
.size() + FIXED_TAGS
+ (m_gaplist
.size()*2);
785 if (!m_corrupted_list
.empty()) {
789 if (m_pAICHHashSet
->HasValidMasterHash() && (m_pAICHHashSet
->GetStatus() == AICH_VERIFIED
)){
793 if (GetLastPublishTimeKadSrc()){
797 if (GetLastPublishTimeKadNotes()){
801 if (GetDlActiveTime()){
805 file
.WriteUInt32(tagcount
);
807 //#warning Kry - Where are lost by coruption and gained by compression?
809 // 0 (unicoded part file name)
810 // We write it with BOM to keep eMule compatibility. Note that the 'printable' filename is saved,
811 // as presently the filename does not represent an actual file.
812 CTagString( FT_FILENAME
, GetFileName().GetPrintable()).WriteTagToFile( &file
, utf8strOptBOM
);
813 CTagString( FT_FILENAME
, GetFileName().GetPrintable()).WriteTagToFile( &file
); // 1
815 CTagIntSized( FT_FILESIZE
, GetFileSize(), IsLargeFile() ? 64 : 32).WriteTagToFile( &file
);// 2
816 CTagIntSized( FT_TRANSFERRED
, transferred
, IsLargeFile() ? 64 : 32).WriteTagToFile( &file
); // 3
817 CTagInt32( FT_STATUS
, (m_paused
?1:0)).WriteTagToFile( &file
); // 4
819 if ( IsAutoDownPriority() ) {
820 CTagInt32( FT_DLPRIORITY
, (uint8
)PR_AUTO
).WriteTagToFile( &file
); // 5
821 CTagInt32( FT_OLDDLPRIORITY
, (uint8
)PR_AUTO
).WriteTagToFile( &file
); // 6
823 CTagInt32( FT_DLPRIORITY
, m_iDownPriority
).WriteTagToFile( &file
); // 5
824 CTagInt32( FT_OLDDLPRIORITY
, m_iDownPriority
).WriteTagToFile( &file
); // 6
827 CTagInt32( FT_LASTSEENCOMPLETE
, lsc
).WriteTagToFile( &file
); // 7
829 if ( IsAutoUpPriority() ) {
830 CTagInt32( FT_ULPRIORITY
, (uint8
)PR_AUTO
).WriteTagToFile( &file
); // 8
831 CTagInt32( FT_OLDULPRIORITY
, (uint8
)PR_AUTO
).WriteTagToFile( &file
); // 9
833 CTagInt32( FT_ULPRIORITY
, GetUpPriority() ).WriteTagToFile( &file
); // 8
834 CTagInt32( FT_OLDULPRIORITY
, GetUpPriority() ).WriteTagToFile( &file
); // 9
837 CTagInt32(FT_CATEGORY
, m_category
).WriteTagToFile( &file
); // 10
838 CTagInt32(FT_ATTRANSFERRED
, statistic
.GetAllTimeTransferred() & 0xFFFFFFFF).WriteTagToFile( &file
);// 11
839 CTagInt32(FT_ATTRANSFERREDHI
, statistic
.GetAllTimeTransferred() >>32).WriteTagToFile( &file
);// 12
840 CTagInt32(FT_ATREQUESTED
, statistic
.GetAllTimeRequests()).WriteTagToFile( &file
); // 13
841 CTagInt32(FT_ATACCEPTED
, statistic
.GetAllTimeAccepts()).WriteTagToFile( &file
); // 14
843 // currupt part infos
844 if (!m_corrupted_list
.empty()) {
845 wxString strCorruptedParts
;
846 std::list
<uint16
>::iterator it
= m_corrupted_list
.begin();
847 for (; it
!= m_corrupted_list
.end(); ++it
) {
848 uint16 uCorruptedPart
= *it
;
849 if (!strCorruptedParts
.IsEmpty()) {
850 strCorruptedParts
+= wxT(",");
852 strCorruptedParts
+= CFormat(wxT("%u")) % uCorruptedPart
;
854 wxASSERT( !strCorruptedParts
.IsEmpty() );
856 CTagString( FT_CORRUPTEDPARTS
, strCorruptedParts
).WriteTagToFile( &file
); // 11?
860 if (m_pAICHHashSet
->HasValidMasterHash() && (m_pAICHHashSet
->GetStatus() == AICH_VERIFIED
)){
861 CTagString
aichtag(FT_AICH_HASH
, m_pAICHHashSet
->GetMasterHash().GetString() );
862 aichtag
.WriteTagToFile(&file
); // 12?
865 if (GetLastPublishTimeKadSrc()){
866 CTagInt32(FT_KADLASTPUBLISHSRC
, GetLastPublishTimeKadSrc()).WriteTagToFile(&file
); // 15?
869 if (GetLastPublishTimeKadNotes()){
870 CTagInt32(FT_KADLASTPUBLISHNOTES
, GetLastPublishTimeKadNotes()).WriteTagToFile(&file
); // 16?
873 if (GetDlActiveTime()){
874 CTagInt32(FT_DL_ACTIVE_TIME
, GetDlActiveTime()).WriteTagToFile(&file
); // 17
877 for (uint32 j
= 0; j
< (uint32
)m_taglist
.size();++j
) {
878 m_taglist
[j
].WriteTagToFile(&file
);
883 for (CGapList::const_iterator it
= m_gaplist
.begin(); it
!= m_gaplist
.end(); ++it
) {
884 wxString tagName
= CFormat(wxT(" %u")) % i_pos
;
886 // gap start = first missing byte but gap ends = first non-missing byte
887 // in edonkey but I think its easier to user the real limits
888 tagName
[0] = FT_GAPSTART
;
889 CTagIntSized(tagName
, it
.start(), IsLargeFile() ? 64 : 32).WriteTagToFile( &file
);
891 tagName
[0] = FT_GAPEND
;
892 CTagIntSized(tagName
, it
.end() + 1, IsLargeFile() ? 64 : 32).WriteTagToFile( &file
);
896 } catch (const wxString
& error
) {
897 AddLogLineNS(CFormat( _("ERROR while saving partfile: %s (%s ==> %s)") )
903 } catch (const CIOFailureException
& e
) {
904 AddLogLineCS(_("IO failure while saving partfile: ") + e
.what());
912 CPath::RemoveFile(m_fullname
.AppendExt(wxT(".backup")));
915 sint64 metLength
= m_fullname
.GetFileSize();
916 if (metLength
== wxInvalidOffset
) {
917 theApp
->ShowAlert( CFormat( _("Could not retrieve length of '%s' - using %s file.") )
922 CPath::CloneFile(m_fullname
.AppendExt(PARTMET_BAK_EXT
), m_fullname
, true);
923 } else if (metLength
== 0) {
924 // Don't backup if it's 0 size but raise a warning!!!
925 theApp
->ShowAlert( CFormat( _("'%s' is 0 size somehow - using %s file.") )
930 CPath::CloneFile(m_fullname
.AppendExt(PARTMET_BAK_EXT
), m_fullname
, true);
932 // no error, just backup
933 CPath::BackupFile(m_fullname
, PARTMET_BAK_EXT
);
940 void CPartFile::SaveSourceSeeds()
942 #define MAX_SAVED_SOURCES 10
944 // Kry - Sources seeds
945 // Based on a Feature request, this saves the last MAX_SAVED_SOURCES
946 // sources of the file, giving a 'seed' for the next run.
947 // We save the last sources because:
948 // 1 - They could be the hardest to get
949 // 2 - They will more probably be available
950 // However, if we have downloading sources, they have preference because
951 // we probably have more credits on them.
952 // Anyway, source exchange will get us the rest of the sources
953 // This feature is currently used only on rare files (< 20 sources)
956 if (GetSourceCount()>20) {
960 CClientRefList source_seeds
;
963 CClientRefList::iterator it
= m_downloadingSourcesList
.begin();
964 for( ; it
!= m_downloadingSourcesList
.end() && n_sources
< MAX_SAVED_SOURCES
; ++it
) {
965 if (!it
->HasLowID()) {
966 source_seeds
.push_back(*it
);
971 if (n_sources
< MAX_SAVED_SOURCES
) {
972 // Not enough downloading sources to fill the list, going to sources list
973 if (GetSourceCount() > 0) {
974 SourceSet::reverse_iterator rit
= m_SrcList
.rbegin();
975 for ( ; ((rit
!= m_SrcList
.rend()) && (n_sources
<MAX_SAVED_SOURCES
)); ++rit
) {
976 if (!rit
->HasLowID()) {
977 source_seeds
.push_back(*rit
);
989 const CPath seedsPath
= m_fullname
.AppendExt(wxT(".seeds"));
992 file
.Create(seedsPath
, true);
993 if (!file
.IsOpened()) {
994 AddLogLineN(CFormat( _("Failed to save part.met.seeds file for %s") )
1000 file
.WriteUInt8(0); // v3, to avoid v2 clients choking on it.
1001 file
.WriteUInt8(source_seeds
.size());
1003 CClientRefList::iterator it2
= source_seeds
.begin();
1004 for (; it2
!= source_seeds
.end(); ++it2
) {
1005 CUpDownClient
* cur_src
= it2
->GetClient();
1006 file
.WriteUInt32(cur_src
->GetUserIDHybrid());
1007 file
.WriteUInt16(cur_src
->GetUserPort());
1008 file
.WriteHash(cur_src
->GetUserHash());
1009 // CryptSettings - See SourceExchange V4
1010 const uint8 uSupportsCryptLayer
= cur_src
->SupportsCryptLayer() ? 1 : 0;
1011 const uint8 uRequestsCryptLayer
= cur_src
->RequestsCryptLayer() ? 1 : 0;
1012 const uint8 uRequiresCryptLayer
= cur_src
->RequiresCryptLayer() ? 1 : 0;
1013 const uint8 byCryptOptions
= (uRequiresCryptLayer
<< 2) | (uRequestsCryptLayer
<< 1) | (uSupportsCryptLayer
<< 0);
1014 file
.WriteUInt8(byCryptOptions
);
1017 /* v2: Added to keep track of too old seeds */
1018 file
.WriteUInt32(wxDateTime::Now().GetTicks());
1020 AddLogLineN(CFormat( wxPLURAL("Saved %i source seed for partfile: %s (%s)", "Saved %i source seeds for partfile: %s (%s)", n_sources
) )
1024 } catch (const CIOFailureException
& e
) {
1025 AddDebugLogLineC( logPartFile
, CFormat( wxT("Error saving partfile's seeds file (%s - %s): %s") )
1032 CPath::RemoveFile(seedsPath
);
1036 void CPartFile::LoadSourceSeeds()
1038 CMemFile sources_data
;
1040 bool valid_sources
= false;
1042 const CPath seedsPath
= m_fullname
.AppendExt(wxT(".seeds"));
1043 if (!seedsPath
.FileExists()) {
1047 CFile
file(seedsPath
, CFile::read
);
1048 if (!file
.IsOpened()) {
1049 AddLogLineN(CFormat( _("Partfile %s (%s) has no seeds file") )
1057 if (file
.GetLength() <= 1) {
1058 AddLogLineN(CFormat( _("Partfile %s (%s) has a void seeds file") )
1064 uint8 src_count
= file
.ReadUInt8();
1066 bool bUseSX2Format
= (src_count
== 0);
1068 if (bUseSX2Format
) {
1070 src_count
= file
.ReadUInt8();
1073 sources_data
.WriteUInt16(src_count
);
1075 for (int i
= 0; i
< src_count
; ++i
) {
1076 uint32 dwID
= file
.ReadUInt32();
1077 uint16 nPort
= file
.ReadUInt16();
1079 sources_data
.WriteUInt32(bUseSX2Format
? dwID
: wxUINT32_SWAP_ALWAYS(dwID
));
1080 sources_data
.WriteUInt16(nPort
);
1081 sources_data
.WriteUInt32(0);
1082 sources_data
.WriteUInt16(0);
1084 if (bUseSX2Format
) {
1085 sources_data
.WriteHash(file
.ReadHash());
1086 sources_data
.WriteUInt8(file
.ReadUInt8());
1093 // v2: Added to keep track of too old seeds
1094 time_t time
= (time_t)file
.ReadUInt32();
1096 // Time frame is 2 hours. More than enough to compile
1097 // your new aMule version!.
1098 if ((time
+ MIN2S(120)) >= wxDateTime::Now().GetTicks()) {
1099 valid_sources
= true;
1103 // v1 has no time data. We can safely use
1104 // the sources, next time will be saved.
1105 valid_sources
= true;
1108 if (valid_sources
) {
1109 sources_data
.Seek(0);
1110 AddClientSources(&sources_data
, SF_SOURCE_SEEDS
, bUseSX2Format
? 4 : 1, bUseSX2Format
);
1113 } catch (const CSafeIOException
& e
) {
1114 AddLogLineN(CFormat( _("Error reading partfile's seeds file (%s - %s): %s") )
1123 void CPartFile::PartFileHashFinished(CKnownFile
* result
)
1125 m_lastDateChanged
= result
->m_lastDateChanged
;
1126 bool errorfound
= false;
1127 if (GetED2KPartHashCount() == 0){
1128 if (IsComplete(0, GetFileSize()-1)){
1129 if (result
->GetFileHash() != GetFileHash()){
1130 AddLogLineN(CFormat(wxPLURAL(
1131 "Found corrupted part (%d) in %d part file %s - FileResultHash |%s| FileHash |%s|",
1132 "Found corrupted part (%d) in %d parts file %s - FileResultHash |%s| FileHash |%s|",
1138 % result
->GetFileHash().Encode()
1139 % GetFileHash().Encode() );
1140 AddGap(0, GetFileSize()-1);
1146 for (size_t i
= 0; i
< m_hashlist
.size(); ++i
){
1147 // Kry - trel_ar's completed parts check on rehashing.
1148 // Very nice feature, if a file is completed but .part.met don't believe it,
1151 uint64 partStart
= i
* PARTSIZE
;
1152 uint64 partEnd
= partStart
+ GetPartSize(i
) - 1;
1153 if (!( i
< result
->GetHashCount() && (result
->GetPartHash(i
) == GetPartHash(i
)))){
1154 if (IsComplete(i
)) {
1156 if ( i
< result
->GetHashCount() )
1157 wronghash
= result
->GetPartHash(i
);
1159 AddLogLineN(CFormat(wxPLURAL(
1160 "Found corrupted part (%d) in %d part file %s - FileResultHash |%s| FileHash |%s|",
1161 "Found corrupted part (%d) in %d parts file %s - FileResultHash |%s| FileHash |%s|",
1162 GetED2KPartHashCount())
1165 % GetED2KPartHashCount()
1167 % wronghash
.Encode()
1168 % GetPartHash(i
).Encode() );
1174 if (!IsComplete(i
)){
1175 AddLogLineN(CFormat( _("Found completed part (%i) in %s") )
1180 RemoveBlockFromList(partStart
, partEnd
);
1187 result
->GetAICHHashset()->GetStatus() == AICH_HASHSETCOMPLETE
&&
1188 status
== PS_COMPLETING
) {
1189 delete m_pAICHHashSet
;
1190 m_pAICHHashSet
= result
->GetAICHHashset();
1191 result
->SetAICHHashset(NULL
);
1192 m_pAICHHashSet
->SetOwner(this);
1194 else if (status
== PS_COMPLETING
) {
1195 AddDebugLogLineN(logPartFile
,
1196 CFormat(wxT("Failed to store new AICH Hashset for completed file: %s"))
1203 if (status
== PS_COMPLETING
){
1208 AddLogLineN(CFormat( _("Finished rehashing %s") ) % GetFileName());
1212 SetStatus(PS_READY
);
1216 SetStatus(PS_READY
);
1218 theApp
->sharedfiles
->SafeAddKFile(this);
1221 void CPartFile::AddGap(uint64 start
, uint64 end
)
1223 m_gaplist
.AddGap(start
, end
);
1224 UpdateDisplayedInfo();
1227 void CPartFile::AddGap(uint16 part
)
1229 m_gaplist
.AddGap(part
);
1230 UpdateDisplayedInfo();
1233 bool CPartFile::IsAlreadyRequested(uint64 start
, uint64 end
)
1235 std::list
<Requested_Block_Struct
*>::iterator it
= m_requestedblocks_list
.begin();
1236 for (; it
!= m_requestedblocks_list
.end(); ++it
) {
1237 Requested_Block_Struct
* cur_block
= *it
;
1239 if ((start
<= cur_block
->EndOffset
) && (end
>= cur_block
->StartOffset
)) {
1246 bool CPartFile::GetNextEmptyBlockInPart(uint16 partNumber
, Requested_Block_Struct
*result
)
1248 // Find start of this part
1249 uint64 partStart
= (PARTSIZE
* partNumber
);
1250 uint64 start
= partStart
;
1252 // What is the end limit of this block, i.e. can't go outside part (or filesize)
1253 uint64 partEnd
= partStart
+ GetPartSize(partNumber
) - 1;
1254 // Loop until find a suitable gap and return true, or no more gaps and return false
1255 CGapList::const_iterator it
= m_gaplist
.begin();
1258 uint64 gapStart
, end
;
1260 // Find the first gap from the start position
1261 for (; it
!= m_gaplist
.end(); ++it
) {
1262 gapStart
= it
.start();
1265 // Want gaps that overlap start<->partEnd
1266 if (gapStart
<= partEnd
&& end
>= start
) {
1269 } else if (gapStart
> partEnd
) {
1274 // If no gaps after start, exit
1278 // Update start position if gap starts after current pos
1279 if (start
< gapStart
) {
1282 // Find end, keeping within the max block size and the part limit
1283 uint64 blockLimit
= partStart
+ (BLOCKSIZE
* (((start
- partStart
) / BLOCKSIZE
) + 1)) - 1;
1284 if (end
> blockLimit
) {
1287 if (end
> partEnd
) {
1290 // If this gap has not already been requested, we have found a valid entry
1291 if (!IsAlreadyRequested(start
, end
)) {
1292 // Was this block to be returned
1293 if (result
!= NULL
) {
1294 result
->StartOffset
= start
;
1295 result
->EndOffset
= end
;
1296 md4cpy(result
->FileID
, GetFileHash().GetHash());
1297 result
->transferred
= 0;
1301 // Reposition to end of that gap
1304 // If tried all gaps then break out of the loop
1305 if (end
== partEnd
) {
1309 // No suitable gap found
1314 void CPartFile::FillGap(uint64 start
, uint64 end
)
1316 m_gaplist
.FillGap(start
, end
);
1317 UpdateCompletedInfos();
1318 UpdateDisplayedInfo();
1321 void CPartFile::FillGap(uint16 part
)
1323 m_gaplist
.FillGap(part
);
1324 UpdateCompletedInfos();
1325 UpdateDisplayedInfo();
1329 void CPartFile::UpdateCompletedInfos()
1331 uint64 allgaps
= m_gaplist
.GetGapSize();
1333 percentcompleted
= (1.0 - (double)allgaps
/GetFileSize()) * 100.0;
1334 completedsize
= GetFileSize() - allgaps
;
1338 void CPartFile::WritePartStatus(CMemFile
* file
)
1340 uint16 parts
= GetED2KPartCount();
1341 file
->WriteUInt16(parts
);
1343 while (done
!= parts
){
1345 for (uint32 i
= 0;i
!= 8;++i
) {
1346 if (IsComplete(done
)) {
1350 if (done
== parts
) {
1354 file
->WriteUInt8(towrite
);
1358 void CPartFile::WriteCompleteSourcesCount(CMemFile
* file
)
1360 file
->WriteUInt16(m_nCompleteSourcesCount
);
1363 uint32
CPartFile::Process(uint32 reducedownload
/*in percent*/,uint8 m_icounter
)
1366 uint32 dwCurTick
= ::GetTickCount();
1368 // If buffer size exceeds limit, or if not written within time limit, flush data
1369 if ( (m_nTotalBufferData
> thePrefs::GetFileBufferSize()) ||
1370 (dwCurTick
> (m_nLastBufferFlushTime
+ BUFFER_TIME_LIMIT
))) {
1371 // Avoid flushing while copying preview file
1372 if (!m_bPreviewing
) {
1378 // check if we want new sources from server --> MOVED for 16.40 version
1379 old_trans
=transferingsrc
;
1383 if (m_icounter
< 10) {
1384 // Update only downloading sources.
1385 CClientRefList::iterator it
= m_downloadingSourcesList
.begin();
1386 for( ; it
!= m_downloadingSourcesList
.end(); ) {
1387 CUpDownClient
*cur_src
= it
++->GetClient();
1388 if(cur_src
->GetDownloadState() == DS_DOWNLOADING
) {
1390 kBpsDown
+= cur_src
->SetDownloadLimit(reducedownload
);
1394 // Update all sources (including downloading sources)
1395 for ( SourceSet::iterator it
= m_SrcList
.begin(); it
!= m_SrcList
.end(); ) {
1396 CUpDownClient
* cur_src
= it
++->GetClient();
1397 switch (cur_src
->GetDownloadState()) {
1398 case DS_DOWNLOADING
: {
1400 kBpsDown
+= cur_src
->SetDownloadLimit(reducedownload
);
1409 case DS_LOWTOLOWIP
: {
1410 if (cur_src
->HasLowID() && !theApp
->CanDoCallback(cur_src
->GetServerIP(), cur_src
->GetServerPort())) {
1411 // If we are almost maxed on sources,
1412 // slowly remove these client to see
1413 // if we can find a better source.
1414 if (((dwCurTick
- lastpurgetime
) > 30000) &&
1415 (GetSourceCount() >= (thePrefs::GetMaxSourcePerFile()*.8))) {
1416 RemoveSource(cur_src
);
1417 lastpurgetime
= dwCurTick
;
1421 cur_src
->SetDownloadState(DS_ONQUEUE
);
1426 case DS_NONEEDEDPARTS
: {
1427 // we try to purge noneeded source, even without reaching the limit
1428 if((dwCurTick
- lastpurgetime
) > 40000) {
1429 if(!cur_src
->SwapToAnotherFile(false , false, false , NULL
)) {
1430 //however we only delete them if reaching the limit
1431 if (GetSourceCount() >= (thePrefs::GetMaxSourcePerFile()*.8 )) {
1432 RemoveSource(cur_src
);
1433 lastpurgetime
= dwCurTick
;
1434 break; //Johnny-B - nothing more to do here (good eye!)
1437 lastpurgetime
= dwCurTick
;
1441 // doubled reasktime for no needed parts - save connections and traffic
1442 if ( !((!cur_src
->GetLastAskedTime()) ||
1443 (dwCurTick
- cur_src
->GetLastAskedTime()) > FILEREASKTIME
*2)) {
1446 // Recheck this client to see if still NNP..
1447 // Set to DS_NONE so that we force a TCP reask next time..
1448 cur_src
->SetDownloadState(DS_NONE
);
1453 if( cur_src
->IsRemoteQueueFull()) {
1454 if( ((dwCurTick
- lastpurgetime
) > 60000) &&
1455 (GetSourceCount() >= (thePrefs::GetMaxSourcePerFile()*.8 )) ) {
1456 RemoveSource( cur_src
);
1457 lastpurgetime
= dwCurTick
;
1458 break; //Johnny-B - nothing more to do here (good eye!)
1462 // Give up to 1 min for UDP to respond..
1463 // If we are within on min on TCP, do not try..
1464 if ( theApp
->IsConnected() &&
1465 ( (!cur_src
->GetLastAskedTime()) ||
1466 (dwCurTick
- cur_src
->GetLastAskedTime()) > FILEREASKTIME
-20000)) {
1467 cur_src
->UDPReaskForDownload();
1470 // No break here, since the next case takes care of asking for downloads.
1473 case DS_TOOMANYCONNS
:
1475 case DS_WAITCALLBACK
:
1476 case DS_WAITCALLBACKKAD
: {
1477 if ( theApp
->IsConnected() &&
1478 ( (!cur_src
->GetLastAskedTime()) ||
1479 (dwCurTick
- cur_src
->GetLastAskedTime()) > FILEREASKTIME
)) {
1480 if (!cur_src
->AskForDownload()) {
1481 // I left this break here just as a reminder
1482 // just in case re rearange things..
1491 /* eMule 0.30c implementation, i give it a try (Creteil) BEGIN ... */
1492 if (IsA4AFAuto() && ((!m_LastNoNeededCheck
) || (dwCurTick
- m_LastNoNeededCheck
> 900000))) {
1493 m_LastNoNeededCheck
= dwCurTick
;
1494 for ( SourceSet::iterator it
= m_A4AFsrclist
.begin(); it
!= m_A4AFsrclist
.end(); ) {
1495 CUpDownClient
*cur_source
= it
++->GetClient();
1496 uint8 download_state
=cur_source
->GetDownloadState();
1497 if( download_state
!= DS_DOWNLOADING
1498 && cur_source
->GetRequestFile()
1499 && ((!cur_source
->GetRequestFile()->IsA4AFAuto()) || download_state
== DS_NONEEDEDPARTS
))
1501 cur_source
->SwapToAnotherFile(false, false, false, this);
1505 /* eMule 0.30c implementation, i give it a try (Creteil) END ... */
1507 // swap No needed partfiles if possible
1509 if (((old_trans
==0) && (transferingsrc
>0)) || ((old_trans
>0) && (transferingsrc
==0))) {
1513 // Kad source search
1514 if( GetMaxSourcePerFileUDP() > GetSourceCount()){
1515 //Once we can handle lowID users in Kad, we remove the second IsConnected
1516 if (theApp
->downloadqueue
->DoKademliaFileRequest() && (Kademlia::CKademlia::GetTotalFile() < KADEMLIATOTALFILE
) && (dwCurTick
> m_LastSearchTimeKad
) && Kademlia::CKademlia::IsConnected() && theApp
->IsConnected() && !IsStopped()){
1518 theApp
->downloadqueue
->SetLastKademliaFileRequest();
1520 if (GetKadFileSearchID()) {
1521 /* This will never happen anyway. We're talking a
1522 1h timespan and searches are at max 45secs */
1523 Kademlia::CSearchManager::StopSearch(GetKadFileSearchID(), false);
1526 Kademlia::CUInt128
kadFileID(GetFileHash().GetHash());
1527 Kademlia::CSearch
* pSearch
= Kademlia::CSearchManager::PrepareLookup(Kademlia::CSearch::FILE, true, kadFileID
);
1528 AddDebugLogLineN(logKadSearch
, CFormat(wxT("Preparing a Kad Search for '%s'")) % GetFileName());
1530 AddDebugLogLineN(logKadSearch
, CFormat(wxT("Kad lookup started for '%s'")) % GetFileName());
1531 if(m_TotalSearchesKad
< 7) {
1532 m_TotalSearchesKad
++;
1534 m_LastSearchTimeKad
= dwCurTick
+ (KADEMLIAREASKTIME
*m_TotalSearchesKad
);
1535 SetKadFileSearchID(pSearch
->GetSearchID());
1539 if(GetKadFileSearchID()) {
1540 Kademlia::CSearchManager::StopSearch(GetKadFileSearchID(), true);
1544 // check if we want new sources from server
1545 if ( !m_localSrcReqQueued
&&
1546 ( (!m_lastsearchtime
) ||
1547 (dwCurTick
- m_lastsearchtime
) > SERVERREASKTIME
) &&
1548 theApp
->IsConnectedED2K() &&
1549 thePrefs::GetMaxSourcePerFileSoft() > GetSourceCount() &&
1551 m_localSrcReqQueued
= true;
1552 theApp
->downloadqueue
->SendLocalSrcRequest(this);
1555 // calculate datarate, set limit etc.
1560 // Kry - does the 3 / 30 difference produce too much flickering or CPU?
1561 if (m_count
>= 30) {
1563 UpdateAutoDownPriority();
1564 UpdateDisplayedInfo();
1565 if(m_bPercentUpdated
== false) {
1566 UpdateCompletedInfos();
1568 m_bPercentUpdated
= false;
1571 // release file handle if unused for some time
1572 m_hpartfile
.Release();
1574 return (uint32
)(kBpsDown
*1024.0);
1577 bool CPartFile::CanAddSource(uint32 userid
, uint16 port
, uint32 serverip
, uint16 serverport
, uint8
* pdebug_lowiddropped
, bool ed2kID
)
1580 //The incoming ID could have the userid in the Hybrid format..
1581 uint32 hybridID
= 0;
1583 if (IsLowID(userid
)) {
1586 hybridID
= wxUINT32_SWAP_ALWAYS(userid
);
1590 if (!IsLowID(userid
)) {
1591 userid
= wxUINT32_SWAP_ALWAYS(userid
);
1595 // MOD Note: Do not change this part - Merkur
1596 if (theApp
->IsConnectedED2K()) {
1597 if(::IsLowID(theApp
->GetED2KID())) {
1598 if(theApp
->GetED2KID() == userid
&& theApp
->serverconnect
->GetCurrentServer()->GetIP() == serverip
&& theApp
->serverconnect
->GetCurrentServer()->GetPort() == serverport
) {
1601 if(theApp
->GetPublicIP() == userid
) {
1605 if(theApp
->GetED2KID() == userid
&& thePrefs::GetPort() == port
) {
1611 if (Kademlia::CKademlia::IsConnected()) {
1612 if(!Kademlia::CKademlia::IsFirewalled()) {
1613 if(Kademlia::CKademlia::GetIPAddress() == hybridID
&& thePrefs::GetPort() == port
) {
1619 //This allows *.*.*.0 clients to not be removed if Ed2kID == false
1620 if ( IsLowID(hybridID
) && theApp
->IsFirewalled()) {
1621 if (pdebug_lowiddropped
) {
1622 (*pdebug_lowiddropped
)++;
1630 void CPartFile::AddSources(CMemFile
& sources
,uint32 serverip
, uint16 serverport
, unsigned origin
, bool bWithObfuscationAndHash
)
1632 uint8 count
= sources
.ReadUInt8();
1633 uint8 debug_lowiddropped
= 0;
1634 uint8 debug_possiblesources
= 0;
1635 CMD4Hash achUserHash
;
1638 // since we may received multiple search source UDP results we have to "consume" all data of that packet
1639 AddDebugLogLineN(logPartFile
, wxT("Trying to add sources for a stopped file"));
1640 sources
.Seek(count
*(4+2), wxFromCurrent
);
1644 for (int i
= 0;i
!= count
;++i
) {
1645 uint32 userid
= sources
.ReadUInt32();
1646 uint16 port
= sources
.ReadUInt16();
1648 uint8 byCryptOptions
= 0;
1649 if (bWithObfuscationAndHash
){
1650 byCryptOptions
= sources
.ReadUInt8();
1651 if ((byCryptOptions
& 0x80) > 0) {
1652 achUserHash
= sources
.ReadHash();
1655 if ((thePrefs::IsClientCryptLayerRequested() && (byCryptOptions
& 0x01/*supported*/) > 0 && (byCryptOptions
& 0x80) == 0)
1656 || (thePrefs::IsClientCryptLayerSupported() && (byCryptOptions
& 0x02/*requested*/) > 0 && (byCryptOptions
& 0x80) == 0)) {
1657 AddDebugLogLineN(logPartFile
, CFormat(wxT("Server didn't provide UserHash for source %u, even if it was expected to (or local obfuscationsettings changed during serverconnect")) % userid
);
1658 } else if (!thePrefs::IsClientCryptLayerRequested() && (byCryptOptions
& 0x02/*requested*/) == 0 && (byCryptOptions
& 0x80) != 0) {
1659 AddDebugLogLineN(logPartFile
, CFormat(wxT("Server provided UserHash for source %u, even if it wasn't expected to (or local obfuscationsettings changed during serverconnect")) % userid
);
1664 // "Filter LAN IPs" and "IPfilter" the received sources IP addresses
1665 if (!IsLowID(userid
)) {
1666 // check for 0-IP, localhost and optionally for LAN addresses
1667 if ( !IsGoodIP(userid
, thePrefs::FilterLanIPs()) ) {
1670 if (theApp
->ipfilter
->IsFiltered(userid
)) {
1675 if (!CanAddSource(userid
, port
, serverip
, serverport
, &debug_lowiddropped
)) {
1679 if(thePrefs::GetMaxSourcePerFile() > GetSourceCount()) {
1680 ++debug_possiblesources
;
1681 CUpDownClient
* newsource
= new CUpDownClient(port
,userid
,serverip
,serverport
,this, true, true);
1683 newsource
->SetSourceFrom((ESourceFrom
)origin
);
1684 newsource
->SetConnectOptions(byCryptOptions
, true, false);
1686 if ((byCryptOptions
& 0x80) != 0) {
1687 newsource
->SetUserHash(achUserHash
);
1690 theApp
->downloadqueue
->CheckAndAddSource(this,newsource
);
1692 AddDebugLogLineN(logPartFile
, wxT("Consuming a packet because of max sources reached"));
1693 // Since we may receive multiple search source UDP results we have to "consume" all data of that packet
1694 // This '+1' is added because 'i' counts from 0.
1695 sources
.Seek((count
-(i
+1))*(4+2), wxFromCurrent
);
1696 if (GetKadFileSearchID()) {
1697 Kademlia::CSearchManager::StopSearch(GetKadFileSearchID(), false);
1704 void CPartFile::UpdatePartsInfo()
1706 if( !IsPartFile() ) {
1707 CKnownFile::UpdatePartsInfo();
1712 uint16 partcount
= GetPartCount();
1713 bool flag
= (time(NULL
) - m_nCompleteSourcesTime
> 0);
1715 // Ensure the frequency-list is ready
1716 if ( m_SrcpartFrequency
.size() != GetPartCount() ) {
1717 m_SrcpartFrequency
.clear();
1718 m_SrcpartFrequency
.insert(m_SrcpartFrequency
.begin(), GetPartCount(), 0);
1721 // Find number of available parts
1722 uint16 availablecounter
= 0;
1723 for ( uint16 i
= 0; i
< partcount
; ++i
) {
1724 if ( m_SrcpartFrequency
[i
] )
1728 if ( ( availablecounter
== partcount
) && ( m_availablePartsCount
< partcount
) ) {
1729 lastseencomplete
= time(NULL
);
1732 m_availablePartsCount
= availablecounter
;
1735 ArrayOfUInts16 count
;
1737 count
.reserve(GetSourceCount());
1739 for ( SourceSet::iterator it
= m_SrcList
.begin(); it
!= m_SrcList
.end(); ++it
) {
1740 CUpDownClient
* client
= it
->GetClient();
1741 if ( !client
->GetUpPartStatus().empty() && client
->GetUpPartCount() == partcount
) {
1742 count
.push_back(client
->GetUpCompleteSourcesCount());
1746 m_nCompleteSourcesCount
= m_nCompleteSourcesCountLo
= m_nCompleteSourcesCountHi
= 0;
1748 for (uint16 i
= 0; i
< partcount
; ++i
) {
1750 m_nCompleteSourcesCount
= m_SrcpartFrequency
[i
];
1752 else if( m_nCompleteSourcesCount
> m_SrcpartFrequency
[i
]) {
1753 m_nCompleteSourcesCount
= m_SrcpartFrequency
[i
];
1756 count
.push_back(m_nCompleteSourcesCount
);
1758 int32 n
= count
.size();
1760 std::sort(count
.begin(), count
.end(), std::less
<uint16
>());
1763 int32 i
= n
>> 1; // (n / 2)
1764 int32 j
= (n
* 3) >> 2; // (n * 3) / 4
1765 int32 k
= (n
* 7) >> 3; // (n * 7) / 8
1767 //When still a part file, adjust your guesses by 20% to what you see..
1771 //Not many sources, so just use what you see..
1772 // welcome to 'plain stupid code'
1773 // m_nCompleteSourcesCount;
1774 m_nCompleteSourcesCountLo
= m_nCompleteSourcesCount
;
1775 m_nCompleteSourcesCountHi
= m_nCompleteSourcesCount
;
1776 } else if (n
< 20) {
1777 // For low guess and normal guess count
1778 // If we see more sources then the guessed low and normal, use what we see.
1779 // If we see less sources then the guessed low, adjust network accounts for 80%,
1780 // we account for 20% with what we see and make sure we are still above the normal.
1782 // Adjust 80% network and 20% what we see.
1783 if ( count
[i
] < m_nCompleteSourcesCount
) {
1784 m_nCompleteSourcesCountLo
= m_nCompleteSourcesCount
;
1786 m_nCompleteSourcesCountLo
=
1787 (uint16
)((float)(count
[i
]*.8) +
1788 (float)(m_nCompleteSourcesCount
*.2));
1790 m_nCompleteSourcesCount
= m_nCompleteSourcesCountLo
;
1791 m_nCompleteSourcesCountHi
=
1792 (uint16
)((float)(count
[j
]*.8) +
1793 (float)(m_nCompleteSourcesCount
*.2));
1794 if( m_nCompleteSourcesCountHi
< m_nCompleteSourcesCount
) {
1795 m_nCompleteSourcesCountHi
= m_nCompleteSourcesCount
;
1803 // Adjust network accounts for 80%, we account for 20% with what
1804 // we see and make sure we are still above the low.
1806 // Adjust network accounts for 80%, we account for 20% with what
1807 // we see and make sure we are still above the normal.
1809 m_nCompleteSourcesCountLo
= m_nCompleteSourcesCount
;
1810 m_nCompleteSourcesCount
= (uint16
)((float)(count
[j
]*.8)+(float)(m_nCompleteSourcesCount
*.2));
1811 if( m_nCompleteSourcesCount
< m_nCompleteSourcesCountLo
) {
1812 m_nCompleteSourcesCount
= m_nCompleteSourcesCountLo
;
1814 m_nCompleteSourcesCountHi
= (uint16
)((float)(count
[k
]*.8)+(float)(m_nCompleteSourcesCount
*.2));
1815 if( m_nCompleteSourcesCountHi
< m_nCompleteSourcesCount
) {
1816 m_nCompleteSourcesCountHi
= m_nCompleteSourcesCount
;
1820 m_nCompleteSourcesTime
= time(NULL
) + (60);
1822 UpdateDisplayedInfo();
1825 // [Maella -Enhanced Chunk Selection- (based on jicxicmic)]
1826 bool CPartFile::GetNextRequestedBlock(CUpDownClient
* sender
,
1827 std::vector
<Requested_Block_Struct
*>& toadd
, uint16
& count
)
1830 // The purpose of this function is to return a list of blocks (~180KB) to
1831 // download. To avoid a prematurely stop of the downloading, all blocks that
1832 // are requested from the same source must be located within the same
1833 // chunk (=> part ~9MB).
1835 // The selection of the chunk to download is one of the CRITICAL parts of the
1836 // edonkey network. The selection algorithm must insure the best spreading
1839 // The selection is based on 4 criteria:
1840 // 1. Frequency of the chunk (availability), very rare chunks must be downloaded
1841 // as quickly as possible to become a new available source.
1842 // 2. Parts used for preview (first + last chunk), preview or check a
1843 // file (e.g. movie, mp3)
1844 // 3. Request state (downloading in process), try to ask each source for another
1845 // chunk. Spread the requests between all sources.
1846 // 4. Completion (shortest-to-complete), partially retrieved chunks should be
1847 // completed before starting to download other one.
1849 // The frequency criterion defines three zones: very rare (<10%), rare (<50%)
1850 // and common (>30%). Inside each zone, the criteria have a specific weight, used
1851 // to calculate the priority of chunks. The chunk(s) with the highest
1852 // priority (highest=0, lowest=0xffff) is/are selected first.
1854 // very rare (preview) rare common
1855 // 0% <---- +0 pt ----> 10% <----- +10000 pt -----> 50% <---- +20000 pt ----> 100%
1856 // 1. <------- frequency: +25*frequency pt ----------->
1857 // 2. <- preview: +1 pt --><-------------- preview: set to 10000 pt ------------->
1858 // 3. <------ request: download in progress +20000 pt ------>
1859 // 4a. <- completion: 0% +100, 25% +75 .. 100% +0 pt --><-- !req => completion --->
1860 // 4b. <--- req => !completion -->
1862 // Unrolled, the priority scale is:
1864 // 0..xxxx unrequested and requested very rare chunks
1865 // 10000..1xxxx unrequested rare chunks + unrequested preview chunks
1866 // 20000..2xxxx unrequested common chunks (priority to the most complete)
1867 // 30000..3xxxx requested rare chunks + requested preview chunks
1868 // 40000..4xxxx requested common chunks (priority to the least complete)
1870 // This algorithm usually selects first the rarest chunk(s). However, partially
1871 // complete chunk(s) that is/are close to completion may overtake the priority
1872 // (priority inversion).
1873 // For the common chuncks, the algorithm tries to spread the dowload between
1877 // Check input parameters
1878 if ( sender
->GetPartStatus().empty() ) {
1881 // Define and create the list of the chunks to download
1882 const uint16 partCount
= GetPartCount();
1883 ChunkList chunksList
;
1886 uint16 newBlockCount
= 0;
1887 while(newBlockCount
!= count
) {
1888 // Create a request block stucture if a chunk has been previously selected
1889 if(sender
->GetLastPartAsked() != 0xffff) {
1890 Requested_Block_Struct
* pBlock
= new Requested_Block_Struct
;
1891 if(GetNextEmptyBlockInPart(sender
->GetLastPartAsked(), pBlock
) == true) {
1892 // Keep a track of all pending requested blocks
1893 m_requestedblocks_list
.push_back(pBlock
);
1894 // Update list of blocks to return
1895 toadd
.push_back(pBlock
);
1897 // Skip end of loop (=> CPU load)
1900 // All blocks for this chunk have been already requested
1902 // => Try to select another chunk
1903 sender
->SetLastPartAsked(0xffff);
1907 // Check if a new chunk must be selected (e.g. download starting, previous chunk complete)
1908 if(sender
->GetLastPartAsked() == 0xffff) {
1909 // Quantify all chunks (create list of chunks to download)
1910 // This is done only one time and only if it is necessary (=> CPU load)
1911 if(chunksList
.empty()) {
1912 // Indentify the locally missing part(s) that this source has
1913 for(uint16 i
=0; i
< partCount
; ++i
) {
1914 if(sender
->IsPartAvailable(i
) == true && GetNextEmptyBlockInPart(i
, NULL
) == true) {
1915 // Create a new entry for this chunk and add it to the list
1918 newEntry
.frequency
= m_SrcpartFrequency
[i
];
1919 chunksList
.push_back(newEntry
);
1923 // Check if any bloks(s) could be downloaded
1924 if(chunksList
.empty()) {
1925 break; // Exit main loop while()
1928 // Define the bounds of the three zones (very rare, rare)
1929 // more depending on available sources
1931 if (GetSourceCount()>800) {
1933 } else if (GetSourceCount()>200) {
1936 uint16 limit
= modif
*GetSourceCount()/ 100;
1940 const uint16 veryRareBound
= limit
;
1941 const uint16 rareBound
= 2*limit
;
1943 // Cache Preview state (Criterion 2)
1944 FileType type
= GetFiletype(GetFileName());
1945 const bool isPreviewEnable
=
1946 thePrefs::GetPreviewPrio() &&
1947 (type
== ftArchive
|| type
== ftVideo
);
1949 // Collect and calculate criteria for all chunks
1950 for (ChunkList::iterator it
= chunksList
.begin(); it
!= chunksList
.end(); ++it
) {
1951 Chunk
& cur_chunk
= *it
;
1954 const uint64 uStart
= cur_chunk
.part
* PARTSIZE
;
1955 const uint64 uEnd
= uStart
+ GetPartSize(cur_chunk
.part
) - 1;
1956 // Criterion 2. Parts used for preview
1957 // Remark: - We need to download the first part and the last part(s).
1958 // - When the last part is very small, it's necessary to
1959 // download the two last parts.
1960 bool critPreview
= false;
1961 if(isPreviewEnable
== true) {
1962 if(cur_chunk
.part
== 0) {
1963 critPreview
= true; // First chunk
1964 } else if(cur_chunk
.part
== partCount
-1) {
1965 critPreview
= true; // Last chunk
1966 } else if(cur_chunk
.part
== partCount
-2) {
1967 // Last chunk - 1 (only if last chunk is too small)
1968 const uint32 sizeOfLastChunk
= GetFileSize() - uEnd
;
1969 if(sizeOfLastChunk
< PARTSIZE
/3) {
1970 critPreview
= true; // Last chunk - 1
1975 // Criterion 3. Request state (downloading in process from other source(s))
1977 const bool critRequested
=
1978 cur_chunk
.frequency
> veryRareBound
&&
1979 IsAlreadyRequested(uStart
, uEnd
);
1981 // Criterion 4. Completion
1982 // PARTSIZE instead of GetPartSize() favours the last chunk - but that may be intentional
1983 uint32 partSize
= PARTSIZE
- m_gaplist
.GetGapSize(cur_chunk
.part
);
1984 const uint16 critCompletion
= (uint16
)(partSize
/(PARTSIZE
/100)); // in [%]
1986 // Calculate priority with all criteria
1987 if(cur_chunk
.frequency
<= veryRareBound
) {
1988 // 0..xxxx unrequested + requested very rare chunks
1989 cur_chunk
.rank
= (25 * cur_chunk
.frequency
) + // Criterion 1
1990 ((critPreview
== true) ? 0 : 1) + // Criterion 2
1991 (100 - critCompletion
); // Criterion 4
1992 } else if(critPreview
== true) {
1993 // 10000..10100 unrequested preview chunks
1994 // 30000..30100 requested preview chunks
1995 cur_chunk
.rank
= ((critRequested
== false) ? 10000 : 30000) + // Criterion 3
1996 (100 - critCompletion
); // Criterion 4
1997 } else if(cur_chunk
.frequency
<= rareBound
) {
1998 // 10101..1xxxx unrequested rare chunks
1999 // 30101..3xxxx requested rare chunks
2000 cur_chunk
.rank
= (25 * cur_chunk
.frequency
) + // Criterion 1
2001 ((critRequested
== false) ? 10101 : 30101) + // Criterion 3
2002 (100 - critCompletion
); // Criterion 4
2005 if(critRequested
== false) { // Criterion 3
2006 // 20000..2xxxx unrequested common chunks
2007 cur_chunk
.rank
= 20000 + // Criterion 3
2008 (100 - critCompletion
); // Criterion 4
2010 // 40000..4xxxx requested common chunks
2011 // Remark: The weight of the completion criterion is inversed
2012 // to spead the requests over the completing chunks.
2013 // Without this, the chunk closest to completion will
2014 // received every new sources.
2015 cur_chunk
.rank
= 40000 + // Criterion 3
2016 (critCompletion
); // Criterion 4
2022 // Select the next chunk to download
2023 if(!chunksList
.empty()) {
2024 // Find and count the chunck(s) with the highest priority
2025 uint16 chunkCount
= 0; // Number of found chunks with same priority
2026 uint16 rank
= 0xffff; // Highest priority found
2028 // Collect and calculate criteria for all chunks
2029 for (ChunkList::iterator it
= chunksList
.begin(); it
!= chunksList
.end(); ++it
) {
2030 const Chunk
& cur_chunk
= *it
;
2031 if(cur_chunk
.rank
< rank
) {
2033 rank
= cur_chunk
.rank
;
2034 } else if(cur_chunk
.rank
== rank
) {
2039 // Use a random access to avoid that everybody tries to download the
2040 // same chunks at the same time (=> spread the selected chunk among clients)
2041 uint16 randomness
= 1 + (int) (((float)(chunkCount
-1))*rand()/(RAND_MAX
+1.0));
2043 for (ChunkList::iterator it
= chunksList
.begin(); it
!= chunksList
.end(); ++it
) {
2044 const Chunk
& cur_chunk
= *it
;
2045 if(cur_chunk
.rank
== rank
) {
2047 if(randomness
== 0) {
2048 // Selection process is over
2049 sender
->SetLastPartAsked(cur_chunk
.part
);
2050 // Remark: this list might be reused up to *count times
2051 chunksList
.erase(it
);
2052 break; // exit loop for()
2057 // There is no remaining chunk to download
2058 break; // Exit main loop while()
2062 // Return the number of the blocks
2063 count
= newBlockCount
;
2065 return (newBlockCount
> 0);
2070 void CPartFile::RemoveBlockFromList(uint64 start
,uint64 end
)
2072 std::list
<Requested_Block_Struct
*>::iterator it
= m_requestedblocks_list
.begin();
2073 while (it
!= m_requestedblocks_list
.end()) {
2074 std::list
<Requested_Block_Struct
*>::iterator it2
= it
++;
2076 if ((*it2
)->StartOffset
<= start
&& (*it2
)->EndOffset
>= end
) {
2077 m_requestedblocks_list
.erase(it2
);
2083 void CPartFile::RemoveAllRequestedBlocks(void)
2085 m_requestedblocks_list
.clear();
2089 void CPartFile::CompleteFile(bool bIsHashingDone
)
2091 if (GetKadFileSearchID()) {
2092 Kademlia::CSearchManager::StopSearch(GetKadFileSearchID(), false);
2095 theApp
->downloadqueue
->RemoveLocalServerRequest(this);
2097 AddDebugLogLineN( logPartFile
, wxString( wxT("CPartFile::CompleteFile: Hash ") ) + ( bIsHashingDone
? wxT("done") : wxT("not done") ) );
2099 if (!bIsHashingDone
) {
2100 SetStatus(PS_COMPLETING
);
2103 CPath partFile
= m_partmetfilename
.RemoveExt();
2104 CThreadScheduler::AddTask(new CHashingTask(GetFilePath(), partFile
, this));
2108 m_is_A4AF_auto
=false;
2109 SetStatus(PS_COMPLETING
);
2110 // guess I was wrong about not need to spaw a thread ...
2111 // It is if the temp and incoming dirs are on different
2112 // partitions/drives and the file is large...[oz]
2115 PerformFileComplete();
2119 if (thePrefs::ShowCatTabInfos()) {
2120 Notify_ShowUpdateCatTabTitles();
2122 UpdateDisplayedInfo(true);
2126 void CPartFile::CompleteFileEnded(bool errorOccured
, const CPath
& newname
)
2130 SetStatus(PS_ERROR
);
2131 AddLogLineC(CFormat( _("Unexpected error while completing %s. File paused") )% GetFileName() );
2133 m_fullname
= newname
;
2135 SetFilePath(m_fullname
.GetPath());
2136 SetFileName(m_fullname
.GetFullName());
2137 m_lastDateChanged
= CPath::GetModificationTime(m_fullname
);
2139 SetStatus(PS_COMPLETE
);
2144 // Remove from list of canceled files in case it was canceled once upon a time
2145 if (theApp
->canceledfiles
->Remove(GetFileHash())) {
2146 theApp
->canceledfiles
->Save();
2149 // Mark as known (checks if it's already known),
2150 // also updates search files
2151 theApp
->knownfiles
->SafeAddKFile(this);
2153 // remove the file from the suspended uploads list
2154 theApp
->uploadqueue
->ResumeUpload(GetFileHash());
2155 theApp
->downloadqueue
->RemoveFile(this, true);
2156 theApp
->sharedfiles
->SafeAddKFile(this);
2157 UpdateDisplayedInfo(true);
2159 // republish that file to the ed2k-server to update the 'FT_COMPLETE_SOURCES' counter on the server.
2160 theApp
->sharedfiles
->RepublishFile(this);
2162 // Ensure that completed shows the correct value
2163 completedsize
= GetFileSize();
2165 // clear the blackbox to free up memory
2166 m_CorruptionBlackBox
->Free();
2168 AddLogLineC(CFormat( _("Finished downloading: %s") ) % GetFileName() );
2171 theApp
->downloadqueue
->StartNextFile(this);
2175 void CPartFile::PerformFileComplete()
2177 // add this file to the suspended uploads list
2178 theApp
->uploadqueue
->SuspendUpload(GetFileHash(), false);
2181 // close permanent handle
2182 if (m_hpartfile
.IsOpened()) {
2183 m_hpartfile
.Close();
2186 // Schedule task for completion of the file
2187 CThreadScheduler::AddTask(new CCompletionTask(this));
2191 void CPartFile::RemoveAllSources(bool bTryToSwap
)
2193 for( SourceSet::iterator it
= m_SrcList
.begin(); it
!= m_SrcList
.end();) {
2194 CUpDownClient
* cur_src
= it
++->GetClient();
2196 if (!cur_src
->SwapToAnotherFile(true, true, true, NULL
)) {
2197 RemoveSource(cur_src
,true,false);
2198 // If it was not swapped, it's not on any file anymore, and should die
2201 RemoveSource(cur_src
,true,false);
2207 /* eMule 0.30c implementation, i give it a try (Creteil) BEGIN ... */
2208 // remove all links A4AF in sources to this file
2209 if(!m_A4AFsrclist
.empty()) {
2210 for( SourceSet::iterator it
= m_A4AFsrclist
.begin(); it
!= m_A4AFsrclist
.end(); ) {
2211 CUpDownClient
* cur_src
= it
++->GetClient();
2212 if ( cur_src
->DeleteFileRequest( this ) ) {
2213 Notify_SourceCtrlRemoveSource(cur_src
->ECID(), this);
2216 m_A4AFsrclist
.clear();
2218 /* eMule 0.30c implementation, i give it a try (Creteil) END ... */
2219 UpdateFileRatingCommentAvail();
2223 void CPartFile::Delete()
2225 AddLogLineN(CFormat(_("Deleting file: %s")) % GetFileName());
2226 // Barry - Need to tell any connected clients to stop sending the file
2228 AddDebugLogLineN(logPartFile
, wxT("\tStopped"));
2230 uint16 removed
= theApp
->uploadqueue
->SuspendUpload(GetFileHash(), true);
2231 AddDebugLogLineN(logPartFile
, CFormat(wxT("\tSuspended upload to %d clients")) % removed
);
2232 theApp
->sharedfiles
->RemoveFile(this);
2233 AddDebugLogLineN(logPartFile
, wxT("\tRemoved from shared"));
2234 theApp
->downloadqueue
->RemoveFile(this);
2235 AddDebugLogLineN(logPartFile
, wxT("\tRemoved from download queue"));
2236 Notify_DownloadCtrlRemoveFile(this);
2237 AddDebugLogLineN(logPartFile
, wxT("\tRemoved from transferwnd"));
2238 if (theApp
->canceledfiles
->Add(GetFileHash())) {
2239 theApp
->canceledfiles
->Save();
2241 AddDebugLogLineN(logPartFile
, wxT("\tAdded to canceled file list"));
2242 theApp
->searchlist
->UpdateSearchFileByHash(GetFileHash()); // Update file in the search dialog if it's still open
2244 if (m_hpartfile
.IsOpened()) {
2245 m_hpartfile
.Close();
2248 AddDebugLogLineN(logPartFile
, wxT("\tClosed"));
2250 if (!CPath::RemoveFile(m_fullname
)) {
2251 AddDebugLogLineC(logPartFile
, CFormat(wxT("\tFailed to delete '%s'")) % m_fullname
);
2253 AddDebugLogLineN(logPartFile
, wxT("\tRemoved .part.met"));
2256 if (!CPath::RemoveFile(m_PartPath
)) {
2257 AddDebugLogLineC(logPartFile
, CFormat(wxT("Failed to delete '%s'")) % m_PartPath
);
2259 AddDebugLogLineN(logPartFile
, wxT("\tRemoved .part"));
2262 CPath BAKName
= m_fullname
.AppendExt(PARTMET_BAK_EXT
);
2263 if (!CPath::RemoveFile(BAKName
)) {
2264 AddDebugLogLineC(logPartFile
, CFormat(wxT("Failed to delete '%s'")) % BAKName
);
2266 AddDebugLogLineN(logPartFile
, wxT("\tRemoved .BAK"));
2269 CPath SEEDSName
= m_fullname
.AppendExt(wxT(".seeds"));
2270 if (SEEDSName
.FileExists()) {
2271 if (CPath::RemoveFile(SEEDSName
)) {
2272 AddDebugLogLineN(logPartFile
, wxT("\tRemoved .seeds"));
2274 AddDebugLogLineC(logPartFile
, CFormat(wxT("Failed to delete '%s'")) % SEEDSName
);
2278 AddDebugLogLineN(logPartFile
, wxT("Done"));
2284 bool CPartFile::HashSinglePart(uint16 partnumber
)
2286 if ((GetHashCount() <= partnumber
) && (GetPartCount() > 1)) {
2287 AddLogLineC(CFormat( _("WARNING: Unable to hash downloaded part - hashset incomplete for '%s'") )
2289 m_hashsetneeded
= true;
2291 } else if ((GetHashCount() <= partnumber
) && GetPartCount() != 1) {
2292 AddLogLineC(CFormat( _("ERROR: Unable to hash downloaded part - hashset incomplete (%s). This should never happen")) % GetFileName() );
2293 m_hashsetneeded
= true;
2296 CMD4Hash hashresult
;
2297 uint64 offset
= PARTSIZE
* partnumber
;
2298 uint32 length
= GetPartSize(partnumber
);
2300 CreateHashFromFile(m_hpartfile
, offset
, length
, &hashresult
, NULL
);
2301 } catch (const CIOFailureException
& e
) {
2302 AddLogLineC(CFormat( _("EOF while hashing downloaded part %u with length %u (max %u) of partfile '%s' with length %u: %s"))
2303 % partnumber
% length
% (offset
+length
) % GetFileName() % GetFileSize() % e
.what());
2304 SetStatus(PS_ERROR
);
2306 } catch (const CEOFException
& e
) {
2307 AddLogLineC(CFormat( _("EOF while hashing downloaded part %u with length %u (max %u) of partfile '%s' with length %u: %s"))
2308 % partnumber
% length
% (offset
+length
) % GetFileName() % GetFileSize() % e
.what());
2309 SetStatus(PS_ERROR
);
2313 if (GetPartCount() > 1) {
2314 if (hashresult
!= GetPartHash(partnumber
)) {
2315 AddDebugLogLineN(logPartFile
, CFormat( wxT("%s: Expected hash of part %d: %s")) % GetFileName() % partnumber
% GetPartHash(partnumber
).Encode() );
2316 AddDebugLogLineN(logPartFile
, CFormat( wxT("%s: Actual hash of part %d: %s")) % GetFileName() % partnumber
% hashresult
.Encode() );
2322 if (hashresult
!= m_abyFileHash
) {
2332 bool CPartFile::IsCorruptedPart(uint16 partnumber
)
2334 return std::find(m_corrupted_list
.begin(), m_corrupted_list
.end(), partnumber
)
2335 != m_corrupted_list
.end();
2339 void CPartFile::SetDownPriority(uint8 np
, bool bSave
, bool bRefresh
)
2341 if ( m_iDownPriority
!= np
) {
2342 m_iDownPriority
= np
;
2344 UpdateDisplayedInfo(true);
2351 void CPartFile::StopFile(bool bCancel
)
2353 // Kry - Need to set it here to get into SetStatus(status) correctly
2356 // Barry - Need to tell any connected clients to stop sending the file
2359 m_LastSearchTimeKad
= 0;
2360 m_TotalSearchesKad
= 0;
2362 RemoveAllSources(true);
2370 UpdateDisplayedInfo(true);
2374 void CPartFile::StopPausedFile()
2377 // Once an hour, remove any sources for files which are no longer active downloads
2378 switch (GetStatus()) {
2380 case PS_INSUFFICIENT
:
2382 if (time(NULL
) - m_iLastPausePurge
> (60*60)) {
2383 m_iLastPausePurge
= time(NULL
);
2389 // release file handle if unused for some time
2390 m_hpartfile
.Release();
2394 void CPartFile::PauseFile(bool bInsufficient
)
2398 if ( status
== PS_COMPLETE
|| status
== PS_COMPLETING
) {
2402 if (GetKadFileSearchID()) {
2403 Kademlia::CSearchManager::StopSearch(GetKadFileSearchID(), true);
2404 // If we were in the middle of searching, reset timer so they can resume searching.
2405 m_LastSearchTimeKad
= 0;
2408 m_iLastPausePurge
= time(NULL
);
2410 theApp
->downloadqueue
->RemoveLocalServerRequest(this);
2412 CPacket
packet( OP_CANCELTRANSFER
, 0, OP_EDONKEYPROT
);
2413 for( SourceSet::iterator it
= m_SrcList
.begin(); it
!= m_SrcList
.end(); ) {
2414 CUpDownClient
* cur_src
= it
++->GetClient();
2415 if (cur_src
->GetDownloadState() == DS_DOWNLOADING
) {
2416 if (!cur_src
->GetSentCancelTransfer()) {
2417 theStats::AddUpOverheadOther( packet
.GetPacketSize() );
2418 AddDebugLogLineN( logLocalClient
, wxT("Local Client: OP_CANCELTRANSFER to ") + cur_src
->GetFullIP() );
2419 cur_src
->SendPacket( &packet
, false, true );
2420 cur_src
->SetSentCancelTransfer( true );
2422 cur_src
->SetDownloadState(DS_ONQUEUE
);
2423 // Allow immediate reconnect on resume
2424 cur_src
->ResetLastAskedTime();
2429 m_insufficient
= bInsufficient
;
2440 void CPartFile::ResumeFile()
2442 if ( status
== PS_COMPLETE
|| status
== PS_COMPLETING
) {
2446 if ( m_insufficient
&& !CheckFreeDiskSpace() ) {
2447 // Still not enough free discspace
2453 m_insufficient
= false;
2455 m_lastsearchtime
= 0;
2457 SetActive(theApp
->IsConnected());
2459 if (m_gaplist
.IsComplete() && (GetStatus() == PS_ERROR
)) {
2460 // The file has already been hashed at this point
2464 UpdateDisplayedInfo(true);
2468 bool CPartFile::CheckFreeDiskSpace( uint64 neededSpace
)
2470 uint64 free
= CPath::GetFreeSpaceAt(GetFilePath());
2471 if (free
== static_cast<uint64
>(wxInvalidOffset
)) {
2472 // If GetFreeSpaceAt() fails, then the path probably does not exist.
2476 // The very least acceptable diskspace is a single PART
2477 if ( free
< PARTSIZE
) {
2478 // Always fail in this case, since we risk losing data if we try to
2479 // write on a full partition.
2483 // All other checks are only made if the user has enabled them
2484 if ( thePrefs::IsCheckDiskspaceEnabled() ) {
2485 neededSpace
+= thePrefs::GetMinFreeDiskSpace();
2487 // Due to the the existance of sparse files, we cannot assume that
2488 // writes within the file doesn't cause new blocks to be allocated.
2489 // Therefore, we have to simply stop writing the moment the limit has
2491 return free
>= neededSpace
;
2498 void CPartFile::SetLastAnsweredTime()
2500 m_ClientSrcAnswered
= ::GetTickCount();
2503 void CPartFile::SetLastAnsweredTimeTimeout()
2505 m_ClientSrcAnswered
= 2 * CONNECTION_LATENCY
+ ::GetTickCount() - SOURCECLIENTREASKS
;
2508 CPacket
*CPartFile::CreateSrcInfoPacket(const CUpDownClient
* forClient
, uint8 byRequestedVersion
, uint16 nRequestedOptions
)
2511 if ( m_SrcList
.empty() ) {
2516 return CKnownFile::CreateSrcInfoPacket(forClient
, byRequestedVersion
, nRequestedOptions
);
2519 if (((forClient
->GetRequestFile() != this)
2520 && (forClient
->GetUploadFile() != this)) || forClient
->GetUploadFileID() != GetFileHash()) {
2521 wxString file1
= _("Unknown");
2522 if (forClient
->GetRequestFile() && forClient
->GetRequestFile()->GetFileName().IsOk()) {
2523 file1
= forClient
->GetRequestFile()->GetFileName().GetPrintable();
2524 } else if (forClient
->GetUploadFile() && forClient
->GetUploadFile()->GetFileName().IsOk()) {
2525 file1
= forClient
->GetUploadFile()->GetFileName().GetPrintable();
2527 wxString file2
= _("Unknown");
2528 if (GetFileName().IsOk()) {
2529 file2
= GetFileName().GetPrintable();
2531 AddDebugLogLineN(logPartFile
, wxT("File mismatch on source packet (P) Sending: ") + file1
+ wxT(" From: ") + file2
);
2535 if ( !(GetStatus() == PS_READY
|| GetStatus() == PS_EMPTY
)) {
2539 const BitVector
& reqstatus
= forClient
->GetPartStatus();
2540 bool KnowNeededParts
= !reqstatus
.empty();
2541 //wxASSERT(rcvstatus.size() == GetPartCount()); // Obviously!
2542 if (KnowNeededParts
&& (reqstatus
.size() != GetPartCount())) {
2543 // Yuck. Same file but different part count? Seriously fucked up.
2544 // This happens rather often with reqstatus.size() == 0. Don't log then.
2545 if (reqstatus
.size()) {
2546 AddDebugLogLineN(logKnownFiles
, CFormat(wxT("Impossible situation: different partcounts: %i (client) and %i (file) for %s")) % reqstatus
.size() % GetPartCount() % GetFileName());
2551 CMemFile
data(1024);
2553 uint8 byUsedVersion
;
2555 if (forClient
->SupportsSourceExchange2() && byRequestedVersion
> 0){
2556 // the client uses SourceExchange2 and requested the highest version he knows
2557 // and we send the highest version we know, but of course not higher than his request
2558 byUsedVersion
= std::min(byRequestedVersion
, (uint8
)SOURCEEXCHANGE2_VERSION
);
2559 bIsSX2Packet
= true;
2560 data
.WriteUInt8(byUsedVersion
);
2562 // we don't support any special SX2 options yet, reserved for later use
2563 if (nRequestedOptions
!= 0) {
2564 AddDebugLogLineN(logKnownFiles
, CFormat(wxT("Client requested unknown options for SourceExchange2: %u")) % nRequestedOptions
);
2567 byUsedVersion
= forClient
->GetSourceExchange1Version();
2568 bIsSX2Packet
= false;
2569 if (forClient
->SupportsSourceExchange2()) {
2570 AddDebugLogLineN(logKnownFiles
, wxT("Client which announced to support SX2 sent SX1 packet instead"));
2576 data
.WriteHash(m_abyFileHash
);
2577 data
.WriteUInt16(nCount
);
2579 for (SourceSet::iterator it
= m_SrcList
.begin(); it
!= m_SrcList
.end(); ++it
) {
2581 CUpDownClient
* cur_src
= it
->GetClient();
2583 int state
= cur_src
->GetDownloadState();
2584 int valid
= ( state
== DS_DOWNLOADING
) || ( state
== DS_ONQUEUE
&& !cur_src
->IsRemoteQueueFull() );
2586 if ( cur_src
->HasLowID() || !valid
) {
2590 // only send source which have needed parts for this client if possible
2591 const BitVector
& srcstatus
= cur_src
->GetPartStatus();
2592 if ( !srcstatus
.empty() ) {
2593 //wxASSERT(srcstatus.size() == GetPartCount()); // Obviously!
2594 if (srcstatus
.size() != GetPartCount()) {
2597 if ( KnowNeededParts
) {
2598 // only send sources which have needed parts for this client
2599 for (int x
= 0; x
< GetPartCount(); ++x
) {
2600 if (srcstatus
.get(x
) && !reqstatus
.get(x
)) {
2606 // if we don't know the need parts for this client,
2607 // return any source currently a client sends it's
2608 // file status only after it has at least one complete part
2609 if (srcstatus
.size() != GetPartCount()) {
2612 for (int x
= 0; x
< GetPartCount(); ++x
){
2613 if (srcstatus
.get(x
)) {
2623 if(forClient
->GetSourceExchange1Version() > 2) {
2624 dwID
= cur_src
->GetUserIDHybrid();
2626 dwID
= wxUINT32_SWAP_ALWAYS(cur_src
->GetUserIDHybrid());
2628 data
.WriteUInt32(dwID
);
2629 data
.WriteUInt16(cur_src
->GetUserPort());
2630 data
.WriteUInt32(cur_src
->GetServerIP());
2631 data
.WriteUInt16(cur_src
->GetServerPort());
2633 if (byUsedVersion
>= 2) {
2634 data
.WriteHash(cur_src
->GetUserHash());
2637 if (byUsedVersion
>= 4){
2638 // CryptSettings - SourceExchange V4
2640 // 1 CryptLayer Required
2641 // 1 CryptLayer Requested
2642 // 1 CryptLayer Supported
2643 const uint8 uSupportsCryptLayer
= cur_src
->SupportsCryptLayer() ? 1 : 0;
2644 const uint8 uRequestsCryptLayer
= cur_src
->RequestsCryptLayer() ? 1 : 0;
2645 const uint8 uRequiresCryptLayer
= cur_src
->RequiresCryptLayer() ? 1 : 0;
2646 const uint8 byCryptOptions
= (uRequiresCryptLayer
<< 2) | (uRequestsCryptLayer
<< 1) | (uSupportsCryptLayer
<< 0);
2647 data
.WriteUInt8(byCryptOptions
);
2658 data
.Seek(bIsSX2Packet
? 17 : 16, wxFromStart
);
2659 data
.WriteUInt16(nCount
);
2661 CPacket
* result
= new CPacket(data
, OP_EMULEPROT
, bIsSX2Packet
? OP_ANSWERSOURCES2
: OP_ANSWERSOURCES
);
2663 // 16+2+501*(4+2+4+2+16) = 14046 bytes max.
2664 if (result
->GetPacketSize() > 354) {
2665 result
->PackPacket();
2671 void CPartFile::AddClientSources(CMemFile
* sources
, unsigned nSourceFrom
, uint8 uClientSXVersion
, bool bSourceExchange2
, const CUpDownClient
* /*pClient*/)
2680 uint8 uPacketSXVersion
= 0;
2681 if (!bSourceExchange2
) {
2682 nCount
= sources
->ReadUInt16();
2684 // Check if the data size matches the 'nCount' for v1 or v2 and eventually correct the source
2685 // exchange version while reading the packet data. Otherwise we could experience a higher
2686 // chance in dealing with wrong source data, userhashs and finally duplicate sources.
2687 uint32 uDataSize
= sources
->GetLength() - sources
->GetPosition();
2689 if ((uint32
)(nCount
*(4+2+4+2)) == uDataSize
) { //Checks if version 1 packet is correct size
2690 if(uClientSXVersion
!= 1) {
2693 uPacketSXVersion
= 1;
2694 } else if ((uint32
)(nCount
*(4+2+4+2+16)) == uDataSize
) { // Checks if version 2&3 packet is correct size
2695 if (uClientSXVersion
== 2) {
2696 uPacketSXVersion
= 2;
2697 } else if (uClientSXVersion
> 2) {
2698 uPacketSXVersion
= 3;
2702 } else if (nCount
*(4+2+4+2+16+1) == uDataSize
) {
2703 if (uClientSXVersion
!= 4 ) {
2706 uPacketSXVersion
= 4;
2708 // If v5 inserts additional data (like v2), the above code will correctly filter those packets.
2709 // If v5 appends additional data after <count>(<Sources>)[count], we are in trouble with the
2710 // above code. Though a client which does not understand v5+ should never receive such a packet.
2711 AddDebugLogLineN(logClient
, CFormat(wxT("Received invalid source exchange packet (v%u) of data size %u for %s")) % uClientSXVersion
% uDataSize
% GetFileName());
2716 // We only check if the version is known by us and do a quick sanitize check on known version
2717 // other then SX1, the packet will be ignored if any error appears, sicne it can't be a "misunderstanding" anymore
2718 if (uClientSXVersion
> SOURCEEXCHANGE2_VERSION
|| uClientSXVersion
== 0 ){
2719 AddDebugLogLineN(logPartFile
, CFormat(wxT("Invalid source exchange type version: %i")) % uClientSXVersion
);
2723 // all known versions use the first 2 bytes as count and unknown version are already filtered above
2724 nCount
= sources
->ReadUInt16();
2725 uint32 uDataSize
= (uint32
)(sources
->GetLength() - sources
->GetPosition());
2726 bool bError
= false;
2727 switch (uClientSXVersion
){
2729 bError
= nCount
*(4+2+4+2) != uDataSize
;
2733 bError
= nCount
*(4+2+4+2+16) != uDataSize
;
2736 bError
= nCount
*(4+2+4+2+16+1) != uDataSize
;
2744 AddDebugLogLineN(logPartFile
, wxT("Invalid source exchange data size."));
2747 uPacketSXVersion
= uClientSXVersion
;
2750 for (uint16 i
= 0;i
!= nCount
;++i
) {
2752 uint32 dwID
= sources
->ReadUInt32();
2753 uint16 nPort
= sources
->ReadUInt16();
2754 uint32 dwServerIP
= sources
->ReadUInt32();
2755 uint16 nServerPort
= sources
->ReadUInt16();
2758 if (uPacketSXVersion
> 1) {
2759 userHash
= sources
->ReadHash();
2762 uint8 byCryptOptions
= 0;
2763 if (uPacketSXVersion
>= 4) {
2764 byCryptOptions
= sources
->ReadUInt8();
2767 //Clients send ID's the the Hyrbid format so highID clients with *.*.*.0 won't be falsely switched to a lowID..
2769 if (uPacketSXVersion
>= 3) {
2770 dwIDED2K
= wxUINT32_SWAP_ALWAYS(dwID
);
2775 // check the HighID(IP) - "Filter LAN IPs" and "IPfilter" the received sources IP addresses
2776 if (!IsLowID(dwID
)) {
2777 if (!IsGoodIP(dwIDED2K
, thePrefs::FilterLanIPs())) {
2778 // check for 0-IP, localhost and optionally for LAN addresses
2779 AddDebugLogLineN(logIPFilter
, CFormat(wxT("Ignored source (IP=%s) received via %s - bad IP")) % Uint32toStringIP(dwIDED2K
) % OriginToText(nSourceFrom
));
2782 if (theApp
->ipfilter
->IsFiltered(dwIDED2K
)) {
2783 AddDebugLogLineN(logIPFilter
, CFormat(wxT("Ignored source (IP=%s) received via %s - IPFilter")) % Uint32toStringIP(dwIDED2K
) % OriginToText(nSourceFrom
));
2786 if (theApp
->clientlist
->IsBannedClient(dwIDED2K
)){
2791 // additionally check for LowID and own IP
2792 if (!CanAddSource(dwID
, nPort
, dwServerIP
, nServerPort
, NULL
, false)) {
2793 AddDebugLogLineN(logIPFilter
, CFormat(wxT("Ignored source (IP=%s) received via source exchange")) % Uint32toStringIP(dwIDED2K
));
2797 if(thePrefs::GetMaxSourcePerFile() > GetSourceCount()) {
2798 CUpDownClient
* newsource
= new CUpDownClient(nPort
,dwID
,dwServerIP
,nServerPort
,this, (uPacketSXVersion
< 3), true);
2799 if (uPacketSXVersion
> 1) {
2800 newsource
->SetUserHash(userHash
);
2803 if (uPacketSXVersion
>= 4) {
2804 newsource
->SetConnectOptions(byCryptOptions
, true, false);
2807 newsource
->SetSourceFrom((ESourceFrom
)nSourceFrom
);
2808 theApp
->downloadqueue
->CheckAndAddSource(this,newsource
);
2816 void CPartFile::UpdateAutoDownPriority()
2818 if (!IsAutoDownPriority()) {
2821 if (GetSourceCount() <= theApp
->downloadqueue
->GetRareFileThreshold()) {
2822 if ( GetDownPriority() != PR_HIGH
)
2823 SetDownPriority(PR_HIGH
, false, false);
2824 } else if (GetSourceCount() < theApp
->downloadqueue
->GetCommonFileThreshold()) {
2825 if ( GetDownPriority() != PR_NORMAL
)
2826 SetDownPriority(PR_NORMAL
, false, false);
2828 if ( GetDownPriority() != PR_LOW
)
2829 SetDownPriority(PR_LOW
, false, false);
2833 // making this function return a higher when more sources have the extended
2834 // protocol will force you to ask a larger variety of people for sources
2836 int CPartFile::GetCommonFilePenalty()
2838 //TODO: implement, but never return less than MINCOMMONPENALTY!
2839 return MINCOMMONPENALTY
;
2842 /* Barry - Replaces BlockReceived()
2844 Originally this only wrote to disk when a full 180k block
2845 had been received from a client, and only asked for data in
2848 This meant that on average 90k was lost for every connection
2849 to a client data source. That is a lot of wasted data.
2851 To reduce the lost data, packets are now written to a buffer
2852 and flushed to disk regularly regardless of size downloaded.
2853 This includes compressed packets.
2855 Data is also requested only where gaps are, not in 180k blocks.
2856 The requests will still not exceed 180k, but may be smaller to
2860 // Kry - transize is 32bits, no packet can be more than that (this is
2861 // compressed size). Even 32bits is too much imho.As for the return size,
2862 // look at the lenData below.
2863 uint32
CPartFile::WriteToBuffer(uint32 transize
, byte
* data
, uint64 start
, uint64 end
, Requested_Block_Struct
*block
, const CUpDownClient
* client
)
2865 // Increment transferred bytes counter for this file
2866 transferred
+= transize
;
2868 // This is needed a few times
2869 // Kry - should not need a uint64 here - no block is larger than
2870 // 2GB even after uncompressed.
2871 uint32 lenData
= (uint32
) (end
- start
+ 1);
2873 if(lenData
> transize
) {
2874 m_iGainDueToCompression
+= lenData
-transize
;
2877 // Occasionally packets are duplicated, no point writing it twice
2878 if (IsComplete(start
, end
)) {
2879 AddDebugLogLineN(logPartFile
,
2880 CFormat(wxT("File '%s' has already been written from %u to %u"))
2881 % GetFileName() % start
% end
);
2885 // security sanitize check to make sure we do not write anything into an already hashed complete chunk
2886 const uint64 nStartChunk
= start
/ PARTSIZE
;
2887 const uint64 nEndChunk
= end
/ PARTSIZE
;
2888 if (IsComplete(nStartChunk
)) {
2889 AddDebugLogLineN(logPartFile
, CFormat(wxT("Received data touches already hashed chunk - ignored (start): %u-%u; File=%s")) % start
% end
% GetFileName());
2891 } else if (nStartChunk
!= nEndChunk
) {
2892 if (IsComplete(nEndChunk
)) {
2893 AddDebugLogLineN(logPartFile
, CFormat(wxT("Received data touches already hashed chunk - ignored (end): %u-%u; File=%s")) % start
% end
% GetFileName());
2896 AddDebugLogLineN(logPartFile
, CFormat(wxT("Received data crosses chunk boundaries: %u-%u; File=%s")) % start
% end
% GetFileName());
2900 // log transferinformation in our "blackbox"
2901 m_CorruptionBlackBox
->TransferredData(start
, end
, client
->GetIP());
2903 // Create a new buffered queue entry
2904 PartFileBufferedData
*item
= new PartFileBufferedData(m_hpartfile
, data
, start
, end
, block
);
2906 // Add to the queue in the correct position (most likely the end)
2909 std::list
<PartFileBufferedData
*>::iterator it
= m_BufferedData_list
.begin();
2910 for (; it
!= m_BufferedData_list
.end(); ++it
) {
2911 PartFileBufferedData
* queueItem
= *it
;
2913 if (item
->end
<= queueItem
->end
) {
2914 if (it
!= m_BufferedData_list
.begin()) {
2917 m_BufferedData_list
.insert(--it
, item
);
2925 m_BufferedData_list
.push_front(item
);
2928 // Increment buffer size marker
2929 m_nTotalBufferData
+= lenData
;
2931 // Mark this small section of the file as filled
2932 FillGap(item
->start
, item
->end
);
2934 // Update the flushed mark on the requested block
2935 // The loop here is unfortunate but necessary to detect deleted blocks.
2937 std::list
<Requested_Block_Struct
*>::iterator it2
= m_requestedblocks_list
.begin();
2938 for (; it2
!= m_requestedblocks_list
.end(); ++it2
) {
2939 if (*it2
== item
->block
) {
2940 item
->block
->transferred
+= lenData
;
2944 if (m_gaplist
.IsComplete()) {
2948 // Return the length of data written to the buffer
2952 void CPartFile::FlushBuffer(bool fromAICHRecoveryDataAvailable
)
2954 m_nLastBufferFlushTime
= GetTickCount();
2956 if (m_BufferedData_list
.empty()) {
2961 uint32 partCount
= GetPartCount();
2962 // Remember which parts need to be checked at the end of the flush
2963 std::vector
<bool> changedPart(partCount
, false);
2965 // Ensure file is big enough to write data to (the last item will be the furthest from the start)
2966 if (!CheckFreeDiskSpace(m_nTotalBufferData
)) {
2967 // Not enough free space to write the last item, bail
2968 AddLogLineC(CFormat( _("WARNING: Not enough free disk-space! Pausing file: %s") ) % GetFileName());
2974 // Loop through queue
2975 while ( !m_BufferedData_list
.empty() ) {
2976 // Get top item and remove it from the queue
2977 CScopedPtr
<PartFileBufferedData
> item(m_BufferedData_list
.front());
2978 m_BufferedData_list
.pop_front();
2980 // This is needed a few times
2981 wxASSERT((item
->end
- item
->start
) < 0xFFFFFFFF);
2982 uint32 lenData
= (uint32
)(item
->end
- item
->start
+ 1);
2984 // SLUGFILLER: SafeHash - could be more than one part
2985 for (uint32 curpart
= (item
->start
/PARTSIZE
); curpart
<= (item
->end
/PARTSIZE
); ++curpart
) {
2986 wxASSERT(curpart
< partCount
);
2987 changedPart
[curpart
] = true;
2989 // SLUGFILLER: SafeHash
2991 // Go to the correct position in file and write block of data
2993 item
->area
.FlushAt(m_hpartfile
, item
->start
, lenData
);
2994 // Decrease buffer size
2995 m_nTotalBufferData
-= lenData
;
2996 } catch (const CIOFailureException
& e
) {
2997 AddDebugLogLineC(logPartFile
, wxT("Error while saving part-file: ") + e
.what());
2998 SetStatus(PS_ERROR
);
2999 // No need to bang your head against it again and again if it has already failed.
3000 DeleteContents(m_BufferedData_list
);
3001 m_nTotalBufferData
= 0;
3007 // Update last-changed date
3008 m_lastDateChanged
= wxDateTime::GetTimeNow();
3011 // Partfile should never be too large
3012 if (m_hpartfile
.GetLength() > GetFileSize()) {
3013 // it's "last chance" correction. the real bugfix has to be applied 'somewhere' else
3014 m_hpartfile
.SetLength(GetFileSize());
3016 } catch (const CIOFailureException
& e
) {
3017 AddDebugLogLineC(logPartFile
,
3018 CFormat(wxT("Error while truncating part-file (%s): %s"))
3019 % m_PartPath
% e
.what());
3020 SetStatus(PS_ERROR
);
3025 // Check each part of the file
3026 for (uint16 partNumber
= 0; partNumber
< partCount
; ++partNumber
) {
3027 if (changedPart
[partNumber
] == false) {
3031 uint32 partRange
= GetPartSize(partNumber
) - 1;
3033 // Is this 9MB part complete
3034 if (IsComplete(partNumber
)) {
3036 if (!HashSinglePart(partNumber
)) {
3037 AddLogLineC(CFormat(
3038 _("Downloaded part %i is corrupt in file: %s") ) % partNumber
% GetFileName() );
3040 // add part to corrupted list, if not already there
3041 if (!IsCorruptedPart(partNumber
)) {
3042 m_corrupted_list
.push_back(partNumber
);
3044 // request AICH recovery data
3045 // Don't if called from the AICHRecovery. It's already there and would lead to an infinite recursion.
3046 if (!fromAICHRecoveryDataAvailable
) {
3047 RequestAICHRecovery(partNumber
);
3049 // Reduce transferred amount by corrupt amount
3050 m_iLostDueToCorruption
+= (partRange
+ 1);
3052 if (!m_hashsetneeded
) {
3053 AddDebugLogLineN(logPartFile
, CFormat(
3054 wxT("Finished part %u of '%s'")) % partNumber
% GetFileName());
3057 // tell the blackbox about the verified data
3058 m_CorruptionBlackBox
->VerifiedData(true, partNumber
, 0, partRange
);
3060 // if this part was successfully completed (although ICH is active), remove from corrupted list
3061 EraseFirstValue(m_corrupted_list
, partNumber
);
3063 if (status
== PS_EMPTY
) {
3064 if (theApp
->IsRunning()) { // may be called during shutdown!
3065 if (GetHashCount() == GetED2KPartHashCount() && !m_hashsetneeded
) {
3066 // Successfully completed part, make it available for sharing
3067 SetStatus(PS_READY
);
3068 theApp
->sharedfiles
->SafeAddKFile(this);
3073 } else if ( IsCorruptedPart(partNumber
) && // corrupted part:
3074 (thePrefs::IsICHEnabled() // old ICH: rehash whenever we have new data hoping it will be good now
3075 || fromAICHRecoveryDataAvailable
)) {// new AICH: one rehash right before performing it (maybe it's already good)
3076 // Try to recover with minimal loss
3077 if (HashSinglePart(partNumber
)) {
3078 ++m_iTotalPacketsSavedDueToICH
;
3080 uint64 uMissingInPart
= m_gaplist
.GetGapSize(partNumber
);
3081 FillGap(partNumber
);
3082 RemoveBlockFromList(PARTSIZE
*partNumber
,(PARTSIZE
*partNumber
+ partRange
));
3084 // tell the blackbox about the verified data
3085 m_CorruptionBlackBox
->VerifiedData(true, partNumber
, 0, partRange
);
3087 // remove from corrupted list
3088 EraseFirstValue(m_corrupted_list
, partNumber
);
3090 AddLogLineC(CFormat( _("ICH: Recovered corrupted part %i for %s -> Saved bytes: %s") )
3093 % CastItoXBytes(uMissingInPart
));
3095 if (GetHashCount() == GetED2KPartHashCount() && !m_hashsetneeded
) {
3096 if (status
== PS_EMPTY
) {
3097 // Successfully recovered part, make it available for sharing
3098 SetStatus(PS_READY
);
3099 if (theApp
->IsRunning()) // may be called during shutdown!
3100 theApp
->sharedfiles
->SafeAddKFile(this);
3110 if (theApp
->IsRunning()) { // may be called during shutdown!
3111 // Is this file finished ?
3112 if (m_gaplist
.IsComplete()) {
3113 CompleteFile(false);
3119 // read data for upload, return false on error
3120 bool CPartFile::ReadData(CFileArea
& area
, uint64 offset
, uint32 toread
)
3123 if (offset
+ toread
> GetFileSize()) {
3124 AddDebugLogLineN(logPartFile
, CFormat(wxT("tried to read %d bytes past eof of %s"))
3125 % (offset
+ toread
- GetFileSize()) % GetFileName());
3130 area
.ReadAt(m_hpartfile
, offset
, toread
);
3131 // if it fails it throws (which the caller should catch)
3136 void CPartFile::UpdateFileRatingCommentAvail()
3138 bool prevComment
= m_hasComment
;
3139 int prevRating
= m_iUserRating
;
3141 m_hasComment
= false;
3143 int ratingCount
= 0;
3145 SourceSet::iterator it
= m_SrcList
.begin();
3146 for (; it
!= m_SrcList
.end(); ++it
) {
3147 CUpDownClient
* cur_src
= it
->GetClient();
3149 if (!cur_src
->GetFileComment().IsEmpty()) {
3150 if (thePrefs::IsCommentFiltered(cur_src
->GetFileComment())) {
3153 m_hasComment
= true;
3156 uint8 rating
= cur_src
->GetFileRating();
3158 wxASSERT(rating
<= 5);
3161 m_iUserRating
+= rating
;
3166 m_iUserRating
/= ratingCount
;
3167 wxASSERT(m_iUserRating
> 0 && m_iUserRating
<= 5);
3170 if ((prevComment
!= m_hasComment
) || (prevRating
!= m_iUserRating
)) {
3171 UpdateDisplayedInfo();
3176 void CPartFile::SetCategory(uint8 cat
)
3178 wxASSERT( cat
< theApp
->glob_prefs
->GetCatCount() );
3184 bool CPartFile::RemoveSource(CUpDownClient
* toremove
, bool updatewindow
, bool bDoStatsUpdate
)
3186 wxASSERT( toremove
);
3188 bool result
= theApp
->downloadqueue
->RemoveSource( toremove
, updatewindow
, bDoStatsUpdate
);
3190 // Check if the client should be deleted, but not if the client is already dying
3191 if ( !toremove
->GetSocket() && !toremove
->HasBeenDeleted() ) {
3192 if ( toremove
->Disconnected(wxT("RemoveSource - purged")) ) {
3193 toremove
->Safe_Delete();
3200 void CPartFile::AddDownloadingSource(CUpDownClient
* client
)
3202 CClientRefList::iterator it
=
3203 std::find(m_downloadingSourcesList
.begin(), m_downloadingSourcesList
.end(), CCLIENTREF(client
, wxEmptyString
));
3204 if (it
== m_downloadingSourcesList
.end()) {
3205 m_downloadingSourcesList
.push_back(CCLIENTREF(client
, wxT("CPartFile::AddDownloadingSource")));
3210 void CPartFile::RemoveDownloadingSource(CUpDownClient
* client
)
3212 CClientRefList::iterator it
=
3213 std::find(m_downloadingSourcesList
.begin(), m_downloadingSourcesList
.end(), CCLIENTREF(client
, wxEmptyString
));
3214 if (it
!= m_downloadingSourcesList
.end()) {
3215 m_downloadingSourcesList
.erase(it
);
3220 uint64
CPartFile::GetNeededSpace()
3223 uint64 length
= m_hpartfile
.GetLength();
3225 if (length
> GetFileSize()) {
3226 return 0; // Shouldn't happen, but just in case
3229 return GetFileSize() - length
;
3230 } catch (const CIOFailureException
& e
) {
3231 AddDebugLogLineC(logPartFile
,
3232 CFormat(wxT("Error while retrieving file-length (%s): %s"))
3233 % m_PartPath
% e
.what());
3234 SetStatus(PS_ERROR
);
3239 void CPartFile::SetStatus(uint8 in
)
3241 // PAUSED and INSUFFICIENT have extra flag variables m_paused and m_insufficient
3242 // - they are never to be stored in status
3243 wxASSERT( in
!= PS_PAUSED
&& in
!= PS_INSUFFICIENT
);
3247 if (theApp
->IsRunning()) {
3248 UpdateDisplayedInfo( true );
3250 if ( thePrefs::ShowCatTabInfos() ) {
3251 Notify_ShowUpdateCatTabTitles();
3253 Notify_DownloadCtrlSort();
3258 void CPartFile::RequestAICHRecovery(uint16 nPart
)
3261 if ( !m_pAICHHashSet
->HasValidMasterHash() ||
3262 (m_pAICHHashSet
->GetStatus() != AICH_TRUSTED
&& m_pAICHHashSet
->GetStatus() != AICH_VERIFIED
)){
3263 AddDebugLogLineN( logAICHRecovery
, wxT("Unable to request AICH Recoverydata because we have no trusted Masterhash") );
3266 if (GetPartSize(nPart
) <= EMBLOCKSIZE
)
3268 if (CAICHHashSet::IsClientRequestPending(this, nPart
)){
3269 AddDebugLogLineN( logAICHRecovery
, wxT("RequestAICHRecovery: Already a request for this part pending"));
3273 // first check if we have already the recoverydata, no need to rerequest it then
3274 if (m_pAICHHashSet
->IsPartDataAvailable(nPart
*PARTSIZE
)){
3275 AddDebugLogLineN( logAICHRecovery
, wxT("Found PartRecoveryData in memory"));
3276 AICHRecoveryDataAvailable(nPart
);
3280 wxASSERT( nPart
< GetPartCount() );
3281 // find some random client which support AICH to ask for the blocks
3282 // first lets see how many we have at all, we prefer high id very much
3283 uint32 cAICHClients
= 0;
3284 uint32 cAICHLowIDClients
= 0;
3285 for ( SourceSet::iterator it
= m_SrcList
.begin(); it
!= m_SrcList
.end(); ++it
) {
3286 CUpDownClient
* pCurClient
= it
->GetClient();
3287 if ( pCurClient
->IsSupportingAICH() &&
3288 pCurClient
->GetReqFileAICHHash() != NULL
&&
3289 !pCurClient
->IsAICHReqPending()
3290 && (*pCurClient
->GetReqFileAICHHash()) == m_pAICHHashSet
->GetMasterHash())
3292 if (pCurClient
->HasLowID()) {
3293 ++cAICHLowIDClients
;
3299 if ((cAICHClients
| cAICHLowIDClients
) == 0){
3300 AddDebugLogLineN( logAICHRecovery
, wxT("Unable to request AICH Recoverydata because found no client who supports it and has the same hash as the trusted one"));
3303 uint32 nSeclectedClient
;
3304 if (cAICHClients
> 0) {
3305 nSeclectedClient
= (rand() % cAICHClients
) + 1;
3307 nSeclectedClient
= (rand() % cAICHLowIDClients
) + 1;
3309 CUpDownClient
* pClient
= NULL
;
3310 for ( SourceSet::iterator it
= m_SrcList
.begin(); it
!= m_SrcList
.end(); ++it
) {
3311 CUpDownClient
* pCurClient
= it
->GetClient();
3312 if (pCurClient
->IsSupportingAICH() && pCurClient
->GetReqFileAICHHash() != NULL
&& !pCurClient
->IsAICHReqPending()
3313 && (*pCurClient
->GetReqFileAICHHash()) == m_pAICHHashSet
->GetMasterHash())
3315 if (cAICHClients
> 0){
3316 if (!pCurClient
->HasLowID())
3320 wxASSERT( pCurClient
->HasLowID());
3323 if (nSeclectedClient
== 0){
3324 pClient
= pCurClient
;
3329 if (pClient
== NULL
){
3334 AddDebugLogLineN( logAICHRecovery
, CFormat( wxT("Requesting AICH Hash (%s) form client %s") ) % ( cAICHClients
? wxT("HighId") : wxT("LowID") ) % pClient
->GetClientFullInfo() );
3335 pClient
->SendAICHRequest(this, nPart
);
3340 void CPartFile::AICHRecoveryDataAvailable(uint16 nPart
)
3342 if (GetPartCount() < nPart
){
3348 uint32 length
= GetPartSize(nPart
);
3349 // if the part was already ok, it would now be complete
3350 if (IsComplete(nPart
)) {
3351 AddDebugLogLineN(logAICHRecovery
, CFormat(wxT("Processing AICH Recovery data: The part (%u) is already complete, canceling")) % nPart
);
3357 CAICHHashTree
* pVerifiedHash
= m_pAICHHashSet
->m_pHashTree
.FindHash(nPart
*PARTSIZE
, length
);
3358 if (pVerifiedHash
== NULL
|| !pVerifiedHash
->GetHashValid()){
3359 AddDebugLogLineC( logAICHRecovery
, wxT("Processing AICH Recovery data: Unable to get verified hash from hashset (should never happen)") );
3363 CAICHHashTree
htOurHash(pVerifiedHash
->GetNDataSize(), pVerifiedHash
->GetIsLeftBranch(), pVerifiedHash
->GetNBaseSize());
3365 CreateHashFromFile(m_hpartfile
, PARTSIZE
* nPart
, length
, NULL
, &htOurHash
);
3366 } catch (const CIOFailureException
& e
) {
3367 AddDebugLogLineC(logAICHRecovery
,
3368 CFormat(wxT("IO failure while hashing part-file '%s': %s"))
3369 % m_hpartfile
.GetFilePath() % e
.what());
3370 SetStatus(PS_ERROR
);
3374 if (!htOurHash
.GetHashValid()){
3375 AddDebugLogLineN( logAICHRecovery
, wxT("Processing AICH Recovery data: Failed to retrieve AICH Hashset of corrupt part") );
3380 // now compare the hash we just did, to the verified hash and readd all blocks which are ok
3381 uint32 nRecovered
= 0;
3382 for (uint32 pos
= 0; pos
< length
; pos
+= EMBLOCKSIZE
){
3383 const uint32 nBlockSize
= min
<uint32
>(EMBLOCKSIZE
, length
- pos
);
3384 CAICHHashTree
* pVerifiedBlock
= pVerifiedHash
->FindHash(pos
, nBlockSize
);
3385 CAICHHashTree
* pOurBlock
= htOurHash
.FindHash(pos
, nBlockSize
);
3386 if ( pVerifiedBlock
== NULL
|| pOurBlock
== NULL
|| !pVerifiedBlock
->GetHashValid() || !pOurBlock
->GetHashValid()){
3390 if (pOurBlock
->GetHash() == pVerifiedBlock
->GetHash()){
3391 FillGap(PARTSIZE
*nPart
+pos
, PARTSIZE
*nPart
+ pos
+ (nBlockSize
-1));
3392 RemoveBlockFromList(PARTSIZE
*nPart
, PARTSIZE
*nPart
+ (nBlockSize
-1));
3393 nRecovered
+= nBlockSize
;
3394 // tell the blackbox about the verified data
3395 m_CorruptionBlackBox
->VerifiedData(true, nPart
, pos
, pos
+ nBlockSize
- 1);
3397 // inform our "blackbox" about the corrupted block which may ban clients who sent it
3398 m_CorruptionBlackBox
->VerifiedData(false, nPart
, pos
, pos
+ nBlockSize
- 1);
3401 m_CorruptionBlackBox
->EvaluateData();
3403 // ok now some sanity checks
3404 if (IsComplete(nPart
)) {
3405 // this is bad, but it could probably happen under some rare circumstances
3406 // make sure that MD4 agrees to this fact too
3407 if (!HashSinglePart(nPart
)) {
3408 AddDebugLogLineN(logAICHRecovery
,
3409 CFormat(wxT("Processing AICH Recovery data: The part (%u) got completed while recovering - but MD4 says it corrupt! Setting hashset to error state, deleting part")) % nPart
);
3410 // now we are fu... unhappy
3411 m_pAICHHashSet
->SetStatus(AICH_ERROR
);
3416 AddDebugLogLineN(logAICHRecovery
,
3417 CFormat(wxT("Processing AICH Recovery data: The part (%u) got completed while recovering and MD4 agrees")) % nPart
);
3418 if (status
== PS_EMPTY
&& theApp
->IsRunning()) {
3419 if (GetHashCount() == GetED2KPartHashCount() && !m_hashsetneeded
) {
3420 // Successfully recovered part, make it available for sharing
3421 SetStatus(PS_READY
);
3422 theApp
->sharedfiles
->SafeAddKFile(this);
3426 if (theApp
->IsRunning()) {
3427 // Is this file finished?
3428 if (m_gaplist
.IsComplete()) {
3429 CompleteFile(false);
3433 } // end sanity check
3434 // We did the best we could. If it's still incomplete, then no need to keep
3435 // bashing it with ICH. So remove it from the list of corrupted parts.
3436 EraseFirstValue(m_corrupted_list
, nPart
);
3440 // make sure the user appreciates our great recovering work :P
3441 AddDebugLogLineC( logAICHRecovery
, CFormat(
3442 wxT("AICH successfully recovered %s of %s from part %u for %s") )
3443 % CastItoXBytes(nRecovered
)
3444 % CastItoXBytes(length
)
3450 void CPartFile::ClientStateChanged( int oldState
, int newState
)
3452 if ( oldState
== newState
)
3455 // If the state is -1, then it's an entirely new item
3456 if ( oldState
!= -1 ) {
3457 // Was the old state a valid state?
3458 if ( oldState
== DS_ONQUEUE
|| oldState
== DS_DOWNLOADING
) {
3461 if ( oldState
== DS_CONNECTED
/* || oldState == DS_REMOTEQUEUEFULL */ ) {
3465 m_notCurrentSources
--;
3469 // If the state is -1, then the source is being removed
3470 if ( newState
!= -1 ) {
3471 // Was the old state a valid state?
3472 if ( newState
== DS_ONQUEUE
|| newState
== DS_DOWNLOADING
) {
3475 if ( newState
== DS_CONNECTED
/* || newState == DS_REMOTEQUEUEFULL */ ) {
3479 ++m_notCurrentSources
;
3485 bool CPartFile::AddSource( CUpDownClient
* client
)
3487 if (m_SrcList
.insert(CCLIENTREF(client
, wxT("CPartFile::AddSource"))).second
) {
3488 theStats::AddFoundSource();
3489 theStats::AddSourceOrigin(client
->GetSourceFrom());
3497 bool CPartFile::DelSource( CUpDownClient
* client
)
3499 if (m_SrcList
.erase(CCLIENTREF(client
, wxEmptyString
))) {
3500 theStats::RemoveSourceOrigin(client
->GetSourceFrom());
3501 theStats::RemoveFoundSource();
3509 void CPartFile::UpdatePartsFrequency( CUpDownClient
* client
, bool increment
)
3511 const BitVector
& freq
= client
->GetPartStatus();
3513 if ( m_SrcpartFrequency
.size() != GetPartCount() ) {
3514 m_SrcpartFrequency
.clear();
3515 m_SrcpartFrequency
.insert(m_SrcpartFrequency
.begin(), GetPartCount(), 0);
3522 unsigned int size
= freq
.size();
3523 if ( size
!= m_SrcpartFrequency
.size() ) {
3528 for ( unsigned int i
= 0; i
< size
; i
++ ) {
3529 if ( freq
.get(i
) ) {
3530 m_SrcpartFrequency
[i
]++;
3534 for ( unsigned int i
= 0; i
< size
; i
++ ) {
3535 if ( freq
.get(i
) ) {
3536 m_SrcpartFrequency
[i
]--;
3542 void CPartFile::GetRatingAndComments(FileRatingList
& list
) const
3545 // This can be pre-processed, but is it worth the CPU?
3546 CPartFile::SourceSet::const_iterator it
= m_SrcList
.begin();
3547 for ( ; it
!= m_SrcList
.end(); ++it
) {
3548 CUpDownClient
*cur_src
= it
->GetClient();
3549 if (cur_src
->GetFileComment().Length()>0 || cur_src
->GetFileRating()>0) {
3550 // AddDebugLogLineN(logPartFile, wxString(wxT("found a comment for ")) << GetFileName());
3551 list
.push_back(SFileRating(*cur_src
));
3558 CPartFile::CPartFile(CEC_PartFile_Tag
*tag
) : CKnownFile(tag
)
3562 SetFileName(CPath(tag
->FileName()));
3563 m_abyFileHash
= tag
->FileHash();
3564 SetFileSize(tag
->SizeFull());
3565 m_gaplist
.Init(GetFileSize(), true); // Init empty
3566 m_partmetfilename
= CPath(tag
->PartMetName());
3567 m_fullname
= m_partmetfilename
; // We have only the met number, so show it without path in the detail dialog.
3569 m_SrcpartFrequency
.insert(m_SrcpartFrequency
.end(), GetPartCount(), 0);
3571 // these are only in CLIENT_GUI and not covered by Init()
3574 m_iDownPriorityEC
= 0;
3575 m_a4af_source_count
= 0;
3580 * Remote gui specific code
3582 CPartFile::~CPartFile()
3586 void CPartFile::GetRatingAndComments(FileRatingList
& list
) const
3588 list
= m_FileRatingList
;
3591 void CPartFile::SetCategory(uint8 cat
)
3597 bool CPartFile::AddSource(CUpDownClient
* client
)
3599 return m_SrcList
.insert(CCLIENTREF(client
, wxT("CPartFile::AddSource"))).second
!= 0;
3603 bool CPartFile::DelSource(CUpDownClient
* client
)
3605 return m_SrcList
.erase(CCLIENTREF(client
, wxEmptyString
)) != 0;
3609 #endif // !CLIENT_GUI
3612 void CPartFile::UpdateDisplayedInfo(bool force
)
3614 uint32 curTick
= ::GetTickCount();
3616 // Wait 1.5s between each redraw
3617 if (force
|| curTick
-m_lastRefreshedDLDisplay
> MINWAIT_BEFORE_DLDISPLAY_WINDOWUPDATE
) {
3618 Notify_DownloadCtrlUpdateItem(this);
3619 m_lastRefreshedDLDisplay
= curTick
;
3624 void CPartFile::Init()
3626 m_lastsearchtime
= 0;
3627 lastpurgetime
= ::GetTickCount();
3630 m_insufficient
= false;
3635 m_iLastPausePurge
= time(NULL
);
3637 if(thePrefs::GetNewAutoDown()) {
3638 m_iDownPriority
= PR_HIGH
;
3639 m_bAutoDownPriority
= true;
3641 m_iDownPriority
= PR_NORMAL
;
3642 m_bAutoDownPriority
= false;
3645 transferingsrc
= 0; // new
3649 m_hashsetneeded
= true;
3651 percentcompleted
= 0;
3653 m_bPreviewing
= false;
3654 lastseencomplete
= 0;
3655 m_availablePartsCount
=0;
3656 m_ClientSrcAnswered
= 0;
3657 m_LastNoNeededCheck
= 0;
3659 m_nTotalBufferData
= 0;
3660 m_nLastBufferFlushTime
= 0;
3661 m_bPercentUpdated
= false;
3662 m_bRecoveringArchive
= false;
3663 m_iGainDueToCompression
= 0;
3664 m_iLostDueToCorruption
= 0;
3665 m_iTotalPacketsSavedDueToICH
= 0;
3667 m_lastRefreshedDLDisplay
= 0;
3668 m_nDlActiveTime
= 0;
3670 m_is_A4AF_auto
= false;
3671 m_localSrcReqQueued
= false;
3672 m_nCompleteSourcesTime
= time(NULL
);
3673 m_nCompleteSourcesCount
= 0;
3674 m_nCompleteSourcesCountLo
= 0;
3675 m_nCompleteSourcesCountHi
= 0;
3678 m_notCurrentSources
= 0;
3681 m_LastSearchTimeKad
= 0;
3682 m_TotalSearchesKad
= 0;
3685 m_CorruptionBlackBox
= new CCorruptionBlackBox();
3689 wxString
CPartFile::getPartfileStatus() const
3694 if ((status
== PS_HASHING
) || (status
== PS_WAITINGFORHASH
)) {
3695 mybuffer
=_("Hashing");
3696 } else if (status
== PS_ALLOCATING
) {
3697 mybuffer
= _("Allocating");
3699 switch (GetStatus()) {
3701 mybuffer
=_("Completing");
3704 mybuffer
=_("Complete");
3707 mybuffer
=_("Paused");
3710 mybuffer
=_("Erroneous");
3712 case PS_INSUFFICIENT
:
3713 mybuffer
= _("Insufficient disk space");
3716 if (GetTransferingSrcCount()>0) {
3717 mybuffer
=_("Downloading");
3719 mybuffer
=_("Waiting");
3723 if (m_stopped
&& (GetStatus()!=PS_COMPLETE
)) {
3724 mybuffer
=_("Stopped");
3731 int CPartFile::getPartfileStatusRang() const
3735 if (GetTransferingSrcCount()==0) tempstatus
=1;
3736 switch (GetStatus()) {
3738 case PS_WAITINGFORHASH
:
3758 wxString
CPartFile::GetFeedback() const
3760 wxString retval
= CKnownFile::GetFeedback();
3761 if (GetStatus() != PS_COMPLETE
) {
3762 retval
+= CFormat(wxT("%s: %s (%.2f%%)\n%s: %u\n"))
3763 % _("Downloaded") % CastItoXBytes(GetCompletedSize()) % GetPercentCompleted() % _("Sources") % GetSourceCount();
3765 return retval
+ _("Status") + wxT(": ") + getPartfileStatus() + wxT("\n");
3769 sint32
CPartFile::getTimeRemaining() const
3771 if (GetKBpsDown() < 0.001)
3774 return((GetFileSize()-GetCompletedSize()) / ((int)(GetKBpsDown()*1024.0)));
3777 bool CPartFile::PreviewAvailable()
3779 const uint64 minSizeForPreview
= 256 * 1024;
3780 FileType type
= GetFiletype(GetFileName());
3782 return (type
== ftVideo
|| type
== ftAudio
) &&
3783 GetFileSize() >= minSizeForPreview
&&
3784 IsComplete(0, minSizeForPreview
);
3787 bool CPartFile::CheckShowItemInGivenCat(int inCategory
)
3789 // first check if item belongs in this cat in principle
3790 if (inCategory
> 0 && inCategory
!= GetCategory()) {
3794 // if yes apply filter
3797 switch (thePrefs::GetAllcatFilter()) {
3799 show
= GetCategory() == 0 || inCategory
> 0;
3802 show
= IsPartFile();
3805 show
= !IsPartFile();
3809 (GetStatus() == PS_READY
|| GetStatus() == PS_EMPTY
) &&
3810 GetTransferingSrcCount() == 0;
3812 case acfDownloading
:
3814 (GetStatus() == PS_READY
|| GetStatus() == PS_EMPTY
) &&
3815 GetTransferingSrcCount() > 0;
3818 show
= GetStatus() == PS_ERROR
;
3821 show
= GetStatus() == PS_PAUSED
&& !IsStopped();
3827 show
= GetFiletype(GetFileName()) == ftVideo
;
3830 show
= GetFiletype(GetFileName()) == ftAudio
;
3833 show
= GetFiletype(GetFileName()) == ftArchive
;
3836 show
= GetFiletype(GetFileName()) == ftCDImage
;
3839 show
= GetFiletype(GetFileName()) == ftPicture
;
3842 show
= GetFiletype(GetFileName()) == ftText
;
3845 show
= !IsStopped() && GetStatus() != PS_PAUSED
;
3856 void CPartFile::RemoveCategory(uint8 cat
)
3858 if (m_category
== cat
) {
3859 // Reset the category
3861 } else if (m_category
> cat
) {
3862 // Set to the new position of the original category
3868 void CPartFile::SetActive(bool bActive
)
3870 time_t tNow
= time(NULL
);
3872 if (theApp
->IsConnected()) {
3873 if (m_tActivated
== 0) {
3874 m_tActivated
= tNow
;
3878 if (m_tActivated
!= 0) {
3879 m_nDlActiveTime
+= tNow
- m_tActivated
;
3886 uint32
CPartFile::GetDlActiveTime() const
3888 uint32 nDlActiveTime
= m_nDlActiveTime
;
3889 if (m_tActivated
!= 0) {
3890 nDlActiveTime
+= time(NULL
) - m_tActivated
;
3892 return nDlActiveTime
;
3896 uint16
CPartFile::GetPartMetNumber() const
3899 return m_partmetfilename
.RemoveAllExt().GetRaw().ToLong(&nr
) ? nr
: 0;
3905 uint8
CPartFile::GetStatus(bool ignorepause
) const
3907 if ( (!m_paused
&& !m_insufficient
) ||
3908 status
== PS_ERROR
||
3909 status
== PS_COMPLETING
||
3910 status
== PS_COMPLETE
||
3913 } else if ( m_insufficient
) {
3914 return PS_INSUFFICIENT
;
3920 void CPartFile::AddDeadSource(const CUpDownClient
* client
)
3922 m_deadSources
.AddDeadSource( client
);
3926 bool CPartFile::IsDeadSource(const CUpDownClient
* client
)
3928 return m_deadSources
.IsDeadSource( client
);
3931 void CPartFile::SetFileName(const CPath
& fileName
)
3933 CKnownFile
* pFile
= theApp
->sharedfiles
->GetFileByID(GetFileHash());
3935 bool is_shared
= (pFile
&& pFile
== this);
3938 // The file is shared, we must clear the search keywords so we don't
3939 // publish the old name anymore.
3940 theApp
->sharedfiles
->RemoveKeywords(this);
3943 CKnownFile::SetFileName(fileName
);
3946 // And of course, we must advertise the new name if the file is shared.
3947 theApp
->sharedfiles
->AddKeywords(this);
3950 UpdateDisplayedInfo(true);
3954 uint16
CPartFile::GetMaxSources() const
3956 // This is just like this, while we don't import the private max sources per file
3957 return thePrefs::GetMaxSourcePerFile();
3961 uint16
CPartFile::GetMaxSourcePerFileSoft() const
3963 unsigned int temp
= ((unsigned int)GetMaxSources() * 9L) / 10;
3964 if (temp
> MAX_SOURCES_FILE_SOFT
) {
3965 return MAX_SOURCES_FILE_SOFT
;
3970 uint16
CPartFile::GetMaxSourcePerFileUDP() const
3972 unsigned int temp
= ((unsigned int)GetMaxSources() * 3L) / 4;
3973 if (temp
> MAX_SOURCES_FILE_UDP
) {
3974 return MAX_SOURCES_FILE_UDP
;
3979 #define DROP_FACTOR 2
3981 CUpDownClient
* CPartFile::GetSlowerDownloadingClient(uint32 speed
, CUpDownClient
* caller
) {
3982 // printf("Start slower source calculation\n");
3983 for( SourceSet::iterator it
= m_SrcList
.begin(); it
!= m_SrcList
.end(); ) {
3984 CUpDownClient
* cur_src
= it
++->GetClient();
3985 if ((cur_src
->GetDownloadState() == DS_DOWNLOADING
) && (cur_src
!= caller
)) {
3986 uint32 factored_bytes_per_second
= static_cast<uint32
>(
3987 (cur_src
->GetKBpsDown() * 1024) * DROP_FACTOR
);
3988 if ( factored_bytes_per_second
< speed
) {
3989 // printf("Selecting source %p to drop: %d < %d\n", cur_src, factored_bytes_per_second, speed);
3990 // printf("End slower source calculation\n");
3993 // printf("Not selecting source %p to drop: %d > %d\n", cur_src, factored_bytes_per_second, speed);
3997 // printf("End slower source calculation\n");
4001 void CPartFile::AllocationFinished()
4003 // see if it can be opened
4004 if (!m_hpartfile
.Open(m_PartPath
, CFile::read_write
)) {
4005 AddLogLineN(CFormat(_("ERROR: Failed to open partfile '%s'")) % GetFullName());
4006 SetStatus(PS_ERROR
);
4008 // then close the handle again
4009 m_hpartfile
.Release(true);
4013 // File_checked_for_headers