2 // This file is part of the aMule Project.
4 // Copyright (c) 2003-2008 aMule Team ( admin@amule.org / http://www.amule.org )
5 // Copyright (c) 2002-2008 Merkur ( devs@emule-project.net / http://www.emule-project.net )
7 // Any parts of this program derived from the xMule, lMule or eMule project,
8 // or contributed by third-party developers are copyrighted by their
11 // This program is free software; you can redistribute it and/or modify
12 // it under the terms of the GNU General Public License as published by
13 // the Free Software Foundation; either version 2 of the License, or
14 // (at your option) any later version.
16 // This program is distributed in the hope that it will be useful,
17 // but WITHOUT ANY WARRANTY; without even the implied warranty of
18 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 // GNU General Public License for more details.
21 // You should have received a copy of the GNU General Public License
22 // along with this program; if not, write to the Free Software
23 // Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA
28 #include "PartFile.h" // Interface declarations.
31 #include "config.h" // Needed for VERSION
34 #include <protocol/kad/Constants.h>
35 #include <protocol/ed2k/Client2Client/TCP.h>
36 #include <protocol/Protocols.h>
37 #include <common/DataFileVersion.h>
38 #include <common/Constants.h>
39 #include <tags/FileTags.h>
42 #include <wx/tokenzr.h> // Needed for wxStringTokenizer
44 #include "KnownFileList.h" // Needed for CKnownFileList
45 #include "CanceledFileList.h"
46 #include "UploadQueue.h" // Needed for CFileHash
47 #include "IPFilter.h" // Needed for CIPFilter
48 #include "Server.h" // Needed for CServer
49 #include "ServerConnect.h" // Needed for CServerConnect
50 #include "updownclient.h" // Needed for CUpDownClient
51 #include "MemFile.h" // Needed for CMemFile
52 #include "Preferences.h" // Needed for CPreferences
53 #include "DownloadQueue.h" // Needed for CDownloadQueue
54 #include "amule.h" // Needed for theApp
55 #include "ED2KLink.h" // Needed for CED2KLink
56 #include "Packet.h" // Needed for CTag
57 #include "SearchList.h" // Needed for CSearchFile
58 #include "ClientList.h" // Needed for clientlist
59 #include "Statistics.h" // Needed for theStats
61 #include <common/Format.h> // Needed for CFormat
62 #include <common/FileFunctions.h> // Needed for GetLastModificationTime
63 #include "ThreadTasks.h" // Needed for CHashingTask/CCompletionTask/CAllocateFileTask
64 #include "GuiEvents.h" // Needed for Notify_*
65 #include "DataToText.h" // Needed for OriginToText()
66 #include "PlatformSpecific.h" // Needed for CreateSparseFile()
67 #include "FileArea.h" // Needed for CFileArea
68 #include "ScopedPtr.h" // Needed for CScopedArray
69 #include "CorruptionBlackBox.h"
71 #include "kademlia/kademlia/Kademlia.h"
72 #include "kademlia/kademlia/Search.h"
75 SFileRating::SFileRating(const wxString
&u
, const wxString
&f
, sint16 r
, const wxString
&c
)
85 SFileRating::SFileRating(const SFileRating
&fr
)
87 UserName(fr
.UserName
),
88 FileName(fr
.FileName
),
95 SFileRating::SFileRating(const CUpDownClient
&client
)
97 UserName(client
.GetUserName()),
98 FileName(client
.GetClientFilename()),
99 Rating(client
.GetFileRating()),
100 Comment(client
.GetFileComment())
105 SFileRating::~SFileRating()
110 class PartFileBufferedData
113 CFileArea area
; // File area to be written
114 uint64 start
; // This is the start offset of the data
115 uint64 end
; // This is the end offset of the data
116 Requested_Block_Struct
*block
; // This is the requested block that this data relates to
118 PartFileBufferedData(CFileAutoClose
& file
, byte
* data
, uint64 _start
, uint64 _end
, Requested_Block_Struct
*_block
)
119 : start(_start
), end(_end
), block(_block
)
121 area
.StartWriteAt(file
, start
, end
-start
+1);
122 memcpy(area
.GetBuffer(), data
, end
-start
+1);
127 typedef std::list
<Chunk
> ChunkList
;
132 CPartFile::CPartFile()
137 CPartFile::CPartFile(CSearchFile
* searchresult
)
141 m_abyFileHash
= searchresult
->GetFileHash();
142 SetFileName(searchresult
->GetFileName());
143 SetFileSize(searchresult
->GetFileSize());
145 for (unsigned int i
= 0; i
< searchresult
->m_taglist
.size(); ++i
){
146 const CTag
& pTag
= searchresult
->m_taglist
[i
];
148 bool bTagAdded
= false;
149 if (pTag
.GetNameID() == 0 && !pTag
.GetName().IsEmpty() && (pTag
.IsStr() || pTag
.IsInt())) {
150 static const struct {
155 { wxT(FT_ED2K_MEDIA_ARTIST
), 2 },
156 { wxT(FT_ED2K_MEDIA_ALBUM
), 2 },
157 { wxT(FT_ED2K_MEDIA_TITLE
), 2 },
158 { wxT(FT_ED2K_MEDIA_LENGTH
), 2 },
159 { wxT(FT_ED2K_MEDIA_BITRATE
), 3 },
160 { wxT(FT_ED2K_MEDIA_CODEC
), 2 }
163 for (unsigned int t
= 0; t
< itemsof(_aMetaTags
); ++t
) {
164 if ( pTag
.GetType() == _aMetaTags
[t
].nType
&&
165 (pTag
.GetName() == _aMetaTags
[t
].pszName
)) {
166 // skip string tags with empty string values
167 if (pTag
.IsStr() && pTag
.GetStr().IsEmpty()) {
171 // skip "length" tags with "0: 0" values
172 if (pTag
.GetName() == wxT(FT_ED2K_MEDIA_LENGTH
)) {
173 if (pTag
.GetStr().IsSameAs(wxT("0: 0")) ||
174 pTag
.GetStr().IsSameAs(wxT("0:0"))) {
179 // skip "bitrate" tags with '0' values
180 if ((pTag
.GetName() == wxT(FT_ED2K_MEDIA_BITRATE
)) && !pTag
.GetInt()) {
184 AddDebugLogLineN( logPartFile
,
185 wxT("CPartFile::CPartFile(CSearchFile*): added tag ") +
186 pTag
.GetFullInfo() );
187 m_taglist
.push_back(pTag
);
192 } else if (pTag
.GetNameID() != 0 && pTag
.GetName().IsEmpty() && (pTag
.IsStr() || pTag
.IsInt())) {
193 static const struct {
201 for (unsigned int t
= 0; t
< itemsof(_aMetaTags
); ++t
) {
202 if (pTag
.GetType() == _aMetaTags
[t
].nType
&& pTag
.GetNameID() == _aMetaTags
[t
].nID
) {
203 // skip string tags with empty string values
204 if (pTag
.IsStr() && pTag
.GetStr().IsEmpty()) {
208 AddDebugLogLineN( logPartFile
,
209 wxT("CPartFile::CPartFile(CSearchFile*): added tag ") +
210 pTag
.GetFullInfo() );
211 m_taglist
.push_back(pTag
);
219 AddDebugLogLineN( logPartFile
,
220 wxT("CPartFile::CPartFile(CSearchFile*): ignored tag ") +
221 pTag
.GetFullInfo() );
229 CPartFile::CPartFile(const CED2KFileLink
* fileLink
)
233 SetFileName(CPath(fileLink
->GetName()));
234 SetFileSize(fileLink
->GetSize());
235 m_abyFileHash
= fileLink
->GetHashKey();
239 if (fileLink
->m_hashset
) {
240 if (!LoadHashsetFromFile(fileLink
->m_hashset
, true)) {
241 AddDebugLogLineC(logPartFile
, wxT("eD2K link contained invalid hashset: ") + fileLink
->GetLink());
247 CPartFile::~CPartFile()
249 // if it's not opened, it was completed or deleted
250 if (m_hpartfile
.IsOpened()) {
253 // Update met file (with current directory entry)
257 DeleteContents(m_BufferedData_list
);
258 delete m_CorruptionBlackBox
;
260 wxASSERT(m_SrcList
.empty());
261 wxASSERT(m_A4AFsrclist
.empty());
264 void CPartFile::CreatePartFile()
266 // use lowest free partfilenumber for free file (InterCeptor)
270 m_partmetfilename
= CPath(CFormat(wxT("%03i.part.met")) % i
);
271 m_fullname
= thePrefs::GetTempDir().JoinPaths(m_partmetfilename
);
272 } while (m_fullname
.FileExists());
274 m_CorruptionBlackBox
->SetPartFileInfo(GetFileName().GetPrintable(), m_partmetfilename
.RemoveAllExt().GetPrintable());
276 wxString strPartName
= m_partmetfilename
.RemoveExt().GetRaw();
277 m_taglist
.push_back(CTagString(FT_PARTFILENAME
, strPartName
));
279 m_gaplist
.Init(GetFileSize(), true); // Init empty
281 m_PartPath
= m_fullname
.RemoveExt();
283 if (thePrefs::GetAllocFullFile()) {
284 fileCreated
= m_hpartfile
.Create(m_PartPath
, true);
287 fileCreated
= PlatformSpecific::CreateSparseFile(m_PartPath
, GetFileSize());
290 AddLogLineN(_("ERROR: Failed to create partfile)"));
294 SetFilePath(thePrefs::GetTempDir());
296 if (thePrefs::GetAllocFullFile()) {
297 SetStatus(PS_ALLOCATING
);
298 CThreadScheduler::AddTask(new CAllocateFileTask(this, thePrefs::AddNewFilesPaused()));
300 AllocationFinished();
303 m_hashsetneeded
= (GetED2KPartHashCount() > 0);
306 SetActive(theApp
->IsConnected());
310 uint8
CPartFile::LoadPartFile(const CPath
& in_directory
, const CPath
& filename
, bool from_backup
, bool getsizeonly
)
312 bool isnewstyle
= false;
313 uint8 version
,partmettype
=PMT_UNKNOWN
;
315 std::map
<uint16
, Gap_Struct
*> gap_map
; // Slugfiller
318 m_partmetfilename
= filename
;
319 m_CorruptionBlackBox
->SetPartFileInfo(GetFileName().GetPrintable(), m_partmetfilename
.RemoveAllExt().GetPrintable());
320 m_filePath
= in_directory
;
321 m_fullname
= m_filePath
.JoinPaths(m_partmetfilename
);
322 m_PartPath
= m_fullname
.RemoveExt();
324 // readfile data form part.met file
325 CPath curMetFilename
= m_fullname
;
327 curMetFilename
= curMetFilename
.AppendExt(PARTMET_BAK_EXT
);
328 AddLogLineN(CFormat( _("Trying to load backup of met-file from %s") )
333 CFile
metFile(curMetFilename
, CFile::read
);
334 if (!metFile
.IsOpened()) {
335 AddLogLineN(CFormat( _("ERROR: Failed to open part.met file: %s ==> %s") )
340 } else if (metFile
.GetLength() == 0) {
341 AddLogLineN(CFormat( _("ERROR: part.met file is 0 size: %s ==> %s") )
348 version
= metFile
.ReadUInt8();
349 if (version
!= PARTFILE_VERSION
&& version
!= PARTFILE_SPLITTEDVERSION
&& version
!= PARTFILE_VERSION_LARGEFILE
){
351 //if (version == 83) return ImportShareazaTempFile(...)
352 AddLogLineN(CFormat( _("ERROR: Invalid part.met file version: %s ==> %s") )
358 isnewstyle
= (version
== PARTFILE_SPLITTEDVERSION
);
359 partmettype
= isnewstyle
? PMT_SPLITTED
: PMT_DEFAULTOLD
;
361 if (version
== PARTFILE_VERSION
) {// Do we still need this check ?
362 uint8 test
[4]; // It will fail for certain files.
363 metFile
.Seek(24, wxFromStart
);
364 metFile
.Read(test
,4);
366 metFile
.Seek(1, wxFromStart
);
367 if (test
[0]==0 && test
[1]==0 && test
[2]==2 && test
[3]==1) {
368 isnewstyle
=true; // edonkeys so called "old part style"
369 partmettype
=PMT_NEWOLD
;
374 uint32 temp
= metFile
.ReadUInt32();
376 if (temp
==0) { // 0.48 partmets - different again
377 LoadHashsetFromFile(&metFile
, false);
379 metFile
.Seek(2, wxFromStart
);
380 LoadDateFromFile(&metFile
);
381 m_abyFileHash
= metFile
.ReadHash();
385 LoadDateFromFile(&metFile
);
386 LoadHashsetFromFile(&metFile
, false);
389 uint32 tagcount
= metFile
.ReadUInt32();
391 for (uint32 j
= 0; j
< tagcount
; ++j
) {
392 CTag
newtag(metFile
,true);
395 (newtag
.GetNameID() == FT_FILESIZE
||
396 newtag
.GetNameID() == FT_FILENAME
))) {
397 switch(newtag
.GetNameID()) {
399 if (!GetFileName().IsOk()) {
400 // If it's not empty, we already loaded the unicoded one
401 SetFileName(CPath(newtag
.GetStr()));
405 case FT_LASTSEENCOMPLETE
: {
406 lastseencomplete
= newtag
.GetInt();
410 SetFileSize(newtag
.GetInt());
413 case FT_TRANSFERRED
: {
414 transferred
= newtag
.GetInt();
418 //#warning needs setfiletype string
419 //SetFileType(newtag.GetStr());
423 m_category
= newtag
.GetInt();
424 if (m_category
> theApp
->glob_prefs
->GetCatCount() - 1 ) {
429 case FT_OLDDLPRIORITY
:
430 case FT_DLPRIORITY
: {
432 m_iDownPriority
= newtag
.GetInt();
433 if( m_iDownPriority
== PR_AUTO
){
434 m_iDownPriority
= PR_HIGH
;
435 SetAutoDownPriority(true);
438 if ( m_iDownPriority
!= PR_LOW
&&
439 m_iDownPriority
!= PR_NORMAL
&&
440 m_iDownPriority
!= PR_HIGH
)
441 m_iDownPriority
= PR_NORMAL
;
442 SetAutoDownPriority(false);
448 m_paused
= (newtag
.GetInt() == 1);
449 m_stopped
= m_paused
;
452 case FT_OLDULPRIORITY
:
453 case FT_ULPRIORITY
: {
455 SetUpPriority(newtag
.GetInt(), false);
456 if( GetUpPriority() == PR_AUTO
){
457 SetUpPriority(PR_HIGH
, false);
458 SetAutoUpPriority(true);
460 SetAutoUpPriority(false);
465 case FT_KADLASTPUBLISHSRC
:{
466 SetLastPublishTimeKadSrc(newtag
.GetInt(), 0);
467 if(GetLastPublishTimeKadSrc() > (uint32
)time(NULL
)+KADEMLIAREPUBLISHTIMES
) {
468 //There may be a posibility of an older client that saved a random number here.. This will check for that..
469 SetLastPublishTimeKadSrc(0,0);
473 case FT_KADLASTPUBLISHNOTES
:{
474 SetLastPublishTimeKadNotes(newtag
.GetInt());
477 // old tags: as long as they are not needed, take the chance to purge them
479 case FT_KADLASTPUBLISHKEY
:
481 case FT_DL_ACTIVE_TIME
:
482 if (newtag
.IsInt()) {
483 m_nDlActiveTime
= newtag
.GetInt();
486 case FT_CORRUPTEDPARTS
: {
487 wxASSERT(m_corrupted_list
.empty());
488 wxString
strCorruptedParts(newtag
.GetStr());
489 wxStringTokenizer
tokenizer(strCorruptedParts
, wxT(","));
490 while ( tokenizer
.HasMoreTokens() ) {
491 wxString token
= tokenizer
.GetNextToken();
493 if (token
.ToULong(&uPart
)) {
494 if (uPart
< GetPartCount() && !IsCorruptedPart(uPart
)) {
495 m_corrupted_list
.push_back(uPart
);
504 hash
.DecodeBase32(newtag
.GetStr()) == CAICHHash::GetHashSize();
505 wxASSERT(hashSizeOk
);
507 m_pAICHHashSet
->SetMasterHash(hash
, AICH_VERIFIED
);
511 case FT_ATTRANSFERRED
:{
512 statistic
.SetAllTimeTransferred(statistic
.GetAllTimeTransferred() + (uint64
)newtag
.GetInt());
515 case FT_ATTRANSFERREDHI
:{
516 statistic
.SetAllTimeTransferred(statistic
.GetAllTimeTransferred() + (((uint64
)newtag
.GetInt()) << 32));
519 case FT_ATREQUESTED
:{
520 statistic
.SetAllTimeRequests(newtag
.GetInt());
524 statistic
.SetAllTimeAccepts(newtag
.GetInt());
528 // Start Changes by Slugfiller for better exception handling
530 wxCharBuffer tag_ansi_name
= newtag
.GetName().ToAscii();
531 char gap_mark
= tag_ansi_name
? tag_ansi_name
[0u] : 0;
532 if ( newtag
.IsInt() && (newtag
.GetName().Length() > 1) &&
533 ((gap_mark
== FT_GAPSTART
) ||
534 (gap_mark
== FT_GAPEND
))) {
535 Gap_Struct
*gap
= NULL
;
536 unsigned long int gapkey
;
537 if (newtag
.GetName().Mid(1).ToULong(&gapkey
)) {
538 if ( gap_map
.find( gapkey
) == gap_map
.end() ) {
539 gap
= new Gap_Struct
;
540 gap_map
[gapkey
] = gap
;
541 gap
->start
= (uint64
)-1;
542 gap
->end
= (uint64
)-1;
544 gap
= gap_map
[ gapkey
];
546 if (gap_mark
== FT_GAPSTART
) {
547 gap
->start
= newtag
.GetInt();
549 if (gap_mark
== FT_GAPEND
) {
550 gap
->end
= newtag
.GetInt()-1;
553 AddDebugLogLineN(logPartFile
, wxT("Wrong gap map key while reading met file!"));
556 // End Changes by Slugfiller for better exception handling
558 m_taglist
.push_back(newtag
);
563 // Nothing. Else, nothing.
567 // load the hashsets from the hybridstylepartmet
568 if (isnewstyle
&& !getsizeonly
&& (metFile
.GetPosition()<metFile
.GetLength()) ) {
569 metFile
.Seek(1, wxFromCurrent
);
571 uint16 parts
=GetPartCount(); // assuming we will get all hashsets
573 for (uint16 i
= 0; i
< parts
&& (metFile
.GetPosition()+16<metFile
.GetLength()); ++i
){
574 CMD4Hash cur_hash
= metFile
.ReadHash();
575 m_hashlist
.push_back(cur_hash
);
579 if (!m_hashlist
.empty()) {
580 CreateHashFromHashlist(m_hashlist
, &checkhash
);
583 if (m_abyFileHash
== checkhash
) {
590 } catch (const CInvalidPacket
& e
) {
591 AddLogLineC(CFormat(wxT("Error: %s (%s) is corrupt (bad tags: %s), unable to load file."))
596 } catch (const CIOFailureException
& e
) {
597 AddDebugLogLineC(logPartFile
, CFormat( wxT("IO failure while loading '%s': %s") )
601 } catch (const CEOFException
& WXUNUSED(e
)) {
602 AddLogLineC(CFormat( _("ERROR: %s (%s) is corrupt (wrong tagcount), unable to load file.") )
605 AddLogLineC(_("Trying to recover file info..."));
607 // Safe file is that who have
610 // We have filesize, try other needed info
612 // Do we need to check gaps? I think not,
613 // because they are checked below. Worst
614 // scenario will only mark file as 0 bytes downloaded.
617 if (!GetFileName().IsOk()) {
618 // Not critical, let's put a random filename.
620 "Recovering no-named file - will try to recover it as RecoveredFile.dat"));
621 SetFileName(CPath(wxT("RecoveredFile.dat")));
624 AddLogLineC(_("Recovered all available file info :D - Trying to use it..."));
626 AddLogLineC(_("Unable to recover file info :("));
635 m_gaplist
.Init(GetFileSize(), false); // Init full, then add gaps
636 // Now to flush the map into the list (Slugfiller)
637 std::map
<uint16
, Gap_Struct
*>::iterator it
= gap_map
.begin();
638 for ( ; it
!= gap_map
.end(); ++it
) {
639 Gap_Struct
* gap
= it
->second
;
640 // SLUGFILLER: SafeHash - revised code, and extra safety
641 if ( (gap
->start
!= (uint64
)-1) &&
642 (gap
->end
!= (uint64
)-1) &&
643 gap
->start
<= gap
->end
&&
644 gap
->start
< GetFileSize()) {
645 if (gap
->end
>= GetFileSize()) {
646 gap
->end
= GetFileSize()-1; // Clipping
648 m_gaplist
.AddGap(gap
->start
, gap
->end
); // All tags accounted for, use safe adding
651 // SLUGFILLER: SafeHash
654 //check if this is a backup
655 if ( m_fullname
.GetExt().MakeLower() == wxT("backup" )) {
656 m_fullname
= m_fullname
.RemoveExt();
659 // open permanent handle
660 if ( !m_hpartfile
.Open(m_PartPath
, CFile::read_write
)) {
661 AddLogLineN(CFormat( _("Failed to open %s (%s)") )
670 // SLUGFILLER: SafeHash - final safety, make sure any missing part of the file is gap
671 if (m_hpartfile
.GetLength() < GetFileSize())
672 AddGap(m_hpartfile
.GetLength(), GetFileSize()-1);
673 // Goes both ways - Partfile should never be too large
674 if (m_hpartfile
.GetLength() > GetFileSize()) {
675 AddDebugLogLineC(logPartFile
, CFormat( wxT("Partfile \"%s\" is too large! Truncating %llu bytes.") ) % GetFileName() % (m_hpartfile
.GetLength() - GetFileSize()));
676 m_hpartfile
.SetLength(GetFileSize());
678 // SLUGFILLER: SafeHash
679 } catch (const CIOFailureException
& e
) {
680 AddDebugLogLineC(logPartFile
, CFormat( wxT("Error while accessing partfile \"%s\": %s") ) % GetFileName() % e
.what());
684 // now close the file again until needed
685 m_hpartfile
.Release(true);
687 // check hashcount, file status etc
688 if (GetHashCount() != GetED2KPartHashCount()){
689 m_hashsetneeded
= true;
692 m_hashsetneeded
= false;
693 for (size_t i
= 0; i
< m_hashlist
.size(); ++i
) {
700 if (m_gaplist
.IsComplete()) { // is this file complete already?
705 if (!isnewstyle
) { // not for importing
706 const time_t file_date
= CPath::GetModificationTime(m_PartPath
);
707 if (m_lastDateChanged
!= file_date
) {
708 // It's pointless to rehash an empty file, since the case
709 // where a user has zero'd a file is handled above ...
710 if (m_hpartfile
.GetLength()) {
711 AddLogLineN(CFormat( _("WARNING: %s might be corrupted (%i)") )
713 % (m_lastDateChanged
- file_date
) );
715 SetStatus(PS_WAITINGFORHASH
);
717 CPath partFileName
= m_partmetfilename
.RemoveExt();
718 CThreadScheduler::AddTask(new CHashingTask(m_filePath
, partFileName
, this));
723 UpdateCompletedInfos();
724 if (completedsize
> transferred
) {
725 m_iGainDueToCompression
= completedsize
- transferred
;
726 } else if (completedsize
!= transferred
) {
727 m_iLostDueToCorruption
= transferred
- completedsize
;
734 bool CPartFile::SavePartFile(bool Initial
)
737 case PS_WAITINGFORHASH
:
743 /* Don't write anything to disk if less than 100 KB of free space is left. */
744 sint64 free
= CPath::GetFreeSpaceAt(GetFilePath());
745 if ((free
!= wxInvalidOffset
) && (free
< (100 * 1024))) {
751 if (!m_PartPath
.FileExists()) {
752 throw wxString(wxT(".part file not found"));
755 uint32 lsc
= lastseencomplete
;
758 CPath::BackupFile(m_fullname
, wxT(".backup"));
759 CPath::RemoveFile(m_fullname
);
762 file
.Open(m_fullname
, CFile::write
);
763 if (!file
.IsOpened()) {
764 throw wxString(wxT("Failed to open part.met file"));
768 file
.WriteUInt8(IsLargeFile() ? PARTFILE_VERSION_LARGEFILE
: PARTFILE_VERSION
);
770 file
.WriteUInt32(CPath::GetModificationTime(m_PartPath
));
772 file
.WriteHash(m_abyFileHash
);
773 uint16 parts
= m_hashlist
.size();
774 file
.WriteUInt16(parts
);
775 for (int x
= 0; x
< parts
; ++x
) {
776 file
.WriteHash(m_hashlist
[x
]);
779 #define FIXED_TAGS 15
780 uint32 tagcount
= m_taglist
.size() + FIXED_TAGS
+ (m_gaplist
.size()*2);
781 if (!m_corrupted_list
.empty()) {
785 if (m_pAICHHashSet
->HasValidMasterHash() && (m_pAICHHashSet
->GetStatus() == AICH_VERIFIED
)){
789 if (GetLastPublishTimeKadSrc()){
793 if (GetLastPublishTimeKadNotes()){
797 if (GetDlActiveTime()){
801 file
.WriteUInt32(tagcount
);
803 //#warning Kry - Where are lost by coruption and gained by compression?
805 // 0 (unicoded part file name)
806 // We write it with BOM to keep eMule compatibility. Note that the 'printable' filename is saved,
807 // as presently the filename does not represent an actual file.
808 CTagString( FT_FILENAME
, GetFileName().GetPrintable()).WriteTagToFile( &file
, utf8strOptBOM
);
809 CTagString( FT_FILENAME
, GetFileName().GetPrintable()).WriteTagToFile( &file
); // 1
811 CTagIntSized( FT_FILESIZE
, GetFileSize(), IsLargeFile() ? 64 : 32).WriteTagToFile( &file
);// 2
812 CTagIntSized( FT_TRANSFERRED
, transferred
, IsLargeFile() ? 64 : 32).WriteTagToFile( &file
); // 3
813 CTagInt32( FT_STATUS
, (m_paused
?1:0)).WriteTagToFile( &file
); // 4
815 if ( IsAutoDownPriority() ) {
816 CTagInt32( FT_DLPRIORITY
, (uint8
)PR_AUTO
).WriteTagToFile( &file
); // 5
817 CTagInt32( FT_OLDDLPRIORITY
, (uint8
)PR_AUTO
).WriteTagToFile( &file
); // 6
819 CTagInt32( FT_DLPRIORITY
, m_iDownPriority
).WriteTagToFile( &file
); // 5
820 CTagInt32( FT_OLDDLPRIORITY
, m_iDownPriority
).WriteTagToFile( &file
); // 6
823 CTagInt32( FT_LASTSEENCOMPLETE
, lsc
).WriteTagToFile( &file
); // 7
825 if ( IsAutoUpPriority() ) {
826 CTagInt32( FT_ULPRIORITY
, (uint8
)PR_AUTO
).WriteTagToFile( &file
); // 8
827 CTagInt32( FT_OLDULPRIORITY
, (uint8
)PR_AUTO
).WriteTagToFile( &file
); // 9
829 CTagInt32( FT_ULPRIORITY
, GetUpPriority() ).WriteTagToFile( &file
); // 8
830 CTagInt32( FT_OLDULPRIORITY
, GetUpPriority() ).WriteTagToFile( &file
); // 9
833 CTagInt32(FT_CATEGORY
, m_category
).WriteTagToFile( &file
); // 10
834 CTagInt32(FT_ATTRANSFERRED
, statistic
.GetAllTimeTransferred() & 0xFFFFFFFF).WriteTagToFile( &file
);// 11
835 CTagInt32(FT_ATTRANSFERREDHI
, statistic
.GetAllTimeTransferred() >>32).WriteTagToFile( &file
);// 12
836 CTagInt32(FT_ATREQUESTED
, statistic
.GetAllTimeRequests()).WriteTagToFile( &file
); // 13
837 CTagInt32(FT_ATACCEPTED
, statistic
.GetAllTimeAccepts()).WriteTagToFile( &file
); // 14
839 // currupt part infos
840 if (!m_corrupted_list
.empty()) {
841 wxString strCorruptedParts
;
842 std::list
<uint16
>::iterator it
= m_corrupted_list
.begin();
843 for (; it
!= m_corrupted_list
.end(); ++it
) {
844 uint16 uCorruptedPart
= *it
;
845 if (!strCorruptedParts
.IsEmpty()) {
846 strCorruptedParts
+= wxT(",");
848 strCorruptedParts
+= CFormat(wxT("%u")) % uCorruptedPart
;
850 wxASSERT( !strCorruptedParts
.IsEmpty() );
852 CTagString( FT_CORRUPTEDPARTS
, strCorruptedParts
).WriteTagToFile( &file
); // 11?
856 if (m_pAICHHashSet
->HasValidMasterHash() && (m_pAICHHashSet
->GetStatus() == AICH_VERIFIED
)){
857 CTagString
aichtag(FT_AICH_HASH
, m_pAICHHashSet
->GetMasterHash().GetString() );
858 aichtag
.WriteTagToFile(&file
); // 12?
861 if (GetLastPublishTimeKadSrc()){
862 CTagInt32(FT_KADLASTPUBLISHSRC
, GetLastPublishTimeKadSrc()).WriteTagToFile(&file
); // 15?
865 if (GetLastPublishTimeKadNotes()){
866 CTagInt32(FT_KADLASTPUBLISHNOTES
, GetLastPublishTimeKadNotes()).WriteTagToFile(&file
); // 16?
869 if (GetDlActiveTime()){
870 CTagInt32(FT_DL_ACTIVE_TIME
, GetDlActiveTime()).WriteTagToFile(&file
); // 17
873 for (uint32 j
= 0; j
< (uint32
)m_taglist
.size();++j
) {
874 m_taglist
[j
].WriteTagToFile(&file
);
879 for (CGapList::const_iterator it
= m_gaplist
.begin(); it
!= m_gaplist
.end(); ++it
) {
880 wxString tagName
= CFormat(wxT(" %u")) % i_pos
;
882 // gap start = first missing byte but gap ends = first non-missing byte
883 // in edonkey but I think its easier to user the real limits
884 tagName
[0] = FT_GAPSTART
;
885 CTagIntSized(tagName
, it
.start(), IsLargeFile() ? 64 : 32).WriteTagToFile( &file
);
887 tagName
[0] = FT_GAPEND
;
888 CTagIntSized(tagName
, it
.end() + 1, IsLargeFile() ? 64 : 32).WriteTagToFile( &file
);
892 } catch (const wxString
& error
) {
893 AddLogLineNS(CFormat( _("ERROR while saving partfile: %s (%s ==> %s)") )
899 } catch (const CIOFailureException
& e
) {
900 AddLogLineCS(_("IO failure while saving partfile: ") + e
.what());
908 CPath::RemoveFile(m_fullname
.AppendExt(wxT(".backup")));
911 sint64 metLength
= m_fullname
.GetFileSize();
912 if (metLength
== wxInvalidOffset
) {
913 theApp
->ShowAlert( CFormat( _("Could not retrieve length of '%s' - using %s file.") )
918 CPath::CloneFile(m_fullname
.AppendExt(PARTMET_BAK_EXT
), m_fullname
, true);
919 } else if (metLength
== 0) {
920 // Don't backup if it's 0 size but raise a warning!!!
921 theApp
->ShowAlert( CFormat( _("'%s' is 0 size somehow - using %s file.") )
926 CPath::CloneFile(m_fullname
.AppendExt(PARTMET_BAK_EXT
), m_fullname
, true);
928 // no error, just backup
929 CPath::BackupFile(m_fullname
, PARTMET_BAK_EXT
);
936 void CPartFile::SaveSourceSeeds()
938 #define MAX_SAVED_SOURCES 10
940 // Kry - Sources seeds
941 // Based on a Feature request, this saves the last MAX_SAVED_SOURCES
942 // sources of the file, giving a 'seed' for the next run.
943 // We save the last sources because:
944 // 1 - They could be the hardest to get
945 // 2 - They will more probably be available
946 // However, if we have downloading sources, they have preference because
947 // we probably have more credits on them.
948 // Anyway, source exchange will get us the rest of the sources
949 // This feature is currently used only on rare files (< 20 sources)
952 if (GetSourceCount()>20) {
956 CClientPtrList source_seeds
;
959 CClientPtrList::iterator it
= m_downloadingSourcesList
.begin();
960 for( ; it
!= m_downloadingSourcesList
.end() && n_sources
< MAX_SAVED_SOURCES
; ++it
) {
961 CUpDownClient
*cur_src
= *it
;
962 if (!cur_src
->HasLowID()) {
963 source_seeds
.push_back(cur_src
);
968 if (n_sources
< MAX_SAVED_SOURCES
) {
969 // Not enough downloading sources to fill the list, going to sources list
970 if (GetSourceCount() > 0) {
971 SourceSet::reverse_iterator rit
= m_SrcList
.rbegin();
972 for ( ; ((rit
!= m_SrcList
.rend()) && (n_sources
<MAX_SAVED_SOURCES
)); ++rit
) {
973 CUpDownClient
* cur_src
= *rit
;
974 if (!cur_src
->HasLowID()) {
975 source_seeds
.push_back(cur_src
);
987 const CPath seedsPath
= m_fullname
.AppendExt(wxT(".seeds"));
990 file
.Create(seedsPath
, true);
991 if (!file
.IsOpened()) {
992 AddLogLineN(CFormat( _("Failed to save part.met.seeds file for %s") )
998 file
.WriteUInt8(0); // v3, to avoid v2 clients choking on it.
999 file
.WriteUInt8(source_seeds
.size());
1001 CClientPtrList::iterator it2
= source_seeds
.begin();
1002 for (; it2
!= source_seeds
.end(); ++it2
) {
1003 CUpDownClient
* cur_src
= *it2
;
1004 file
.WriteUInt32(cur_src
->GetUserIDHybrid());
1005 file
.WriteUInt16(cur_src
->GetUserPort());
1006 file
.WriteHash(cur_src
->GetUserHash());
1007 // CryptSettings - See SourceExchange V4
1008 const uint8 uSupportsCryptLayer
= cur_src
->SupportsCryptLayer() ? 1 : 0;
1009 const uint8 uRequestsCryptLayer
= cur_src
->RequestsCryptLayer() ? 1 : 0;
1010 const uint8 uRequiresCryptLayer
= cur_src
->RequiresCryptLayer() ? 1 : 0;
1011 const uint8 byCryptOptions
= (uRequiresCryptLayer
<< 2) | (uRequestsCryptLayer
<< 1) | (uSupportsCryptLayer
<< 0);
1012 file
.WriteUInt8(byCryptOptions
);
1015 /* v2: Added to keep track of too old seeds */
1016 file
.WriteUInt32(wxDateTime::Now().GetTicks());
1018 AddLogLineN(CFormat( wxPLURAL("Saved %i source seed for partfile: %s (%s)", "Saved %i source seeds for partfile: %s (%s)", n_sources
) )
1022 } catch (const CIOFailureException
& e
) {
1023 AddDebugLogLineC( logPartFile
, CFormat( wxT("Error saving partfile's seeds file (%s - %s): %s") )
1030 CPath::RemoveFile(seedsPath
);
1034 void CPartFile::LoadSourceSeeds()
1036 CMemFile sources_data
;
1038 bool valid_sources
= false;
1040 const CPath seedsPath
= m_fullname
.AppendExt(wxT(".seeds"));
1041 if (!seedsPath
.FileExists()) {
1045 CFile
file(seedsPath
, CFile::read
);
1046 if (!file
.IsOpened()) {
1047 AddLogLineN(CFormat( _("Partfile %s (%s) has no seeds file") )
1055 if (file
.GetLength() <= 1) {
1056 AddLogLineN(CFormat( _("Partfile %s (%s) has a void seeds file") )
1062 uint8 src_count
= file
.ReadUInt8();
1064 bool bUseSX2Format
= (src_count
== 0);
1066 if (bUseSX2Format
) {
1068 src_count
= file
.ReadUInt8();
1071 sources_data
.WriteUInt16(src_count
);
1073 for (int i
= 0; i
< src_count
; ++i
) {
1074 uint32 dwID
= file
.ReadUInt32();
1075 uint16 nPort
= file
.ReadUInt16();
1077 sources_data
.WriteUInt32(bUseSX2Format
? dwID
: wxUINT32_SWAP_ALWAYS(dwID
));
1078 sources_data
.WriteUInt16(nPort
);
1079 sources_data
.WriteUInt32(0);
1080 sources_data
.WriteUInt16(0);
1082 if (bUseSX2Format
) {
1083 sources_data
.WriteHash(file
.ReadHash());
1084 sources_data
.WriteUInt8(file
.ReadUInt8());
1091 // v2: Added to keep track of too old seeds
1092 time_t time
= (time_t)file
.ReadUInt32();
1094 // Time frame is 2 hours. More than enough to compile
1095 // your new aMule version!.
1096 if ((time
+ MIN2S(120)) >= wxDateTime::Now().GetTicks()) {
1097 valid_sources
= true;
1101 // v1 has no time data. We can safely use
1102 // the sources, next time will be saved.
1103 valid_sources
= true;
1106 if (valid_sources
) {
1107 sources_data
.Seek(0);
1108 AddClientSources(&sources_data
, SF_SOURCE_SEEDS
, bUseSX2Format
? 4 : 1, bUseSX2Format
);
1111 } catch (const CSafeIOException
& e
) {
1112 AddLogLineN(CFormat( _("Error reading partfile's seeds file (%s - %s): %s") )
1121 void CPartFile::PartFileHashFinished(CKnownFile
* result
)
1123 m_lastDateChanged
= result
->m_lastDateChanged
;
1124 bool errorfound
= false;
1125 if (GetED2KPartHashCount() == 0){
1126 if (IsComplete(0, GetFileSize()-1)){
1127 if (result
->GetFileHash() != GetFileHash()){
1128 AddLogLineN(CFormat(wxPLURAL(
1129 "Found corrupted part (%d) in %d part file %s - FileResultHash |%s| FileHash |%s|",
1130 "Found corrupted part (%d) in %d parts file %s - FileResultHash |%s| FileHash |%s|",
1136 % result
->GetFileHash().Encode()
1137 % GetFileHash().Encode() );
1138 AddGap(0, GetFileSize()-1);
1144 for (size_t i
= 0; i
< m_hashlist
.size(); ++i
){
1145 // Kry - trel_ar's completed parts check on rehashing.
1146 // Very nice feature, if a file is completed but .part.met don't believe it,
1149 uint64 partStart
= i
* PARTSIZE
;
1150 uint64 partEnd
= partStart
+ GetPartSize(i
) - 1;
1151 if (!( i
< result
->GetHashCount() && (result
->GetPartHash(i
) == GetPartHash(i
)))){
1152 if (IsComplete(i
)) {
1154 if ( i
< result
->GetHashCount() )
1155 wronghash
= result
->GetPartHash(i
);
1157 AddLogLineN(CFormat(wxPLURAL(
1158 "Found corrupted part (%d) in %d part file %s - FileResultHash |%s| FileHash |%s|",
1159 "Found corrupted part (%d) in %d parts file %s - FileResultHash |%s| FileHash |%s|",
1160 GetED2KPartHashCount())
1163 % GetED2KPartHashCount()
1165 % wronghash
.Encode()
1166 % GetPartHash(i
).Encode() );
1172 if (!IsComplete(i
)){
1173 AddLogLineN(CFormat( _("Found completed part (%i) in %s") )
1178 RemoveBlockFromList(partStart
, partEnd
);
1185 result
->GetAICHHashset()->GetStatus() == AICH_HASHSETCOMPLETE
&&
1186 status
== PS_COMPLETING
) {
1187 delete m_pAICHHashSet
;
1188 m_pAICHHashSet
= result
->GetAICHHashset();
1189 result
->SetAICHHashset(NULL
);
1190 m_pAICHHashSet
->SetOwner(this);
1192 else if (status
== PS_COMPLETING
) {
1193 AddDebugLogLineN(logPartFile
,
1194 CFormat(wxT("Failed to store new AICH Hashset for completed file: %s"))
1201 if (status
== PS_COMPLETING
){
1206 AddLogLineN(CFormat( _("Finished rehashing %s") ) % GetFileName());
1210 SetStatus(PS_READY
);
1214 SetStatus(PS_READY
);
1216 theApp
->sharedfiles
->SafeAddKFile(this);
1219 void CPartFile::AddGap(uint64 start
, uint64 end
)
1221 m_gaplist
.AddGap(start
, end
);
1222 UpdateDisplayedInfo();
1225 void CPartFile::AddGap(uint16 part
)
1227 m_gaplist
.AddGap(part
);
1228 UpdateDisplayedInfo();
1231 bool CPartFile::IsAlreadyRequested(uint64 start
, uint64 end
)
1233 std::list
<Requested_Block_Struct
*>::iterator it
= m_requestedblocks_list
.begin();
1234 for (; it
!= m_requestedblocks_list
.end(); ++it
) {
1235 Requested_Block_Struct
* cur_block
= *it
;
1237 if ((start
<= cur_block
->EndOffset
) && (end
>= cur_block
->StartOffset
)) {
1244 bool CPartFile::GetNextEmptyBlockInPart(uint16 partNumber
, Requested_Block_Struct
*result
)
1246 // Find start of this part
1247 uint64 partStart
= (PARTSIZE
* partNumber
);
1248 uint64 start
= partStart
;
1250 // What is the end limit of this block, i.e. can't go outside part (or filesize)
1251 uint64 partEnd
= partStart
+ GetPartSize(partNumber
) - 1;
1252 // Loop until find a suitable gap and return true, or no more gaps and return false
1253 CGapList::const_iterator it
= m_gaplist
.begin();
1256 uint64 gapStart
, end
;
1258 // Find the first gap from the start position
1259 for (; it
!= m_gaplist
.end(); ++it
) {
1260 gapStart
= it
.start();
1263 // Want gaps that overlap start<->partEnd
1264 if (gapStart
<= partEnd
&& end
>= start
) {
1267 } else if (gapStart
> partEnd
) {
1272 // If no gaps after start, exit
1276 // Update start position if gap starts after current pos
1277 if (start
< gapStart
) {
1280 // Find end, keeping within the max block size and the part limit
1281 uint64 blockLimit
= partStart
+ (BLOCKSIZE
* (((start
- partStart
) / BLOCKSIZE
) + 1)) - 1;
1282 if (end
> blockLimit
) {
1285 if (end
> partEnd
) {
1288 // If this gap has not already been requested, we have found a valid entry
1289 if (!IsAlreadyRequested(start
, end
)) {
1290 // Was this block to be returned
1291 if (result
!= NULL
) {
1292 result
->StartOffset
= start
;
1293 result
->EndOffset
= end
;
1294 md4cpy(result
->FileID
, GetFileHash().GetHash());
1295 result
->transferred
= 0;
1299 // Reposition to end of that gap
1302 // If tried all gaps then break out of the loop
1303 if (end
== partEnd
) {
1307 // No suitable gap found
1312 void CPartFile::FillGap(uint64 start
, uint64 end
)
1314 m_gaplist
.FillGap(start
, end
);
1315 UpdateCompletedInfos();
1316 UpdateDisplayedInfo();
1319 void CPartFile::FillGap(uint16 part
)
1321 m_gaplist
.FillGap(part
);
1322 UpdateCompletedInfos();
1323 UpdateDisplayedInfo();
1327 void CPartFile::UpdateCompletedInfos()
1329 uint64 allgaps
= m_gaplist
.GetGapSize();
1331 percentcompleted
= (1.0 - (double)allgaps
/GetFileSize()) * 100.0;
1332 completedsize
= GetFileSize() - allgaps
;
1336 void CPartFile::WritePartStatus(CMemFile
* file
)
1338 uint16 parts
= GetED2KPartCount();
1339 file
->WriteUInt16(parts
);
1341 while (done
!= parts
){
1343 for (uint32 i
= 0;i
!= 8;++i
) {
1344 if (IsComplete(done
)) {
1348 if (done
== parts
) {
1352 file
->WriteUInt8(towrite
);
1356 void CPartFile::WriteCompleteSourcesCount(CMemFile
* file
)
1358 file
->WriteUInt16(m_nCompleteSourcesCount
);
1361 uint32
CPartFile::Process(uint32 reducedownload
/*in percent*/,uint8 m_icounter
)
1364 uint32 dwCurTick
= ::GetTickCount();
1366 // If buffer size exceeds limit, or if not written within time limit, flush data
1367 if ( (m_nTotalBufferData
> thePrefs::GetFileBufferSize()) ||
1368 (dwCurTick
> (m_nLastBufferFlushTime
+ BUFFER_TIME_LIMIT
))) {
1369 // Avoid flushing while copying preview file
1370 if (!m_bPreviewing
) {
1376 // check if we want new sources from server --> MOVED for 16.40 version
1377 old_trans
=transferingsrc
;
1381 if (m_icounter
< 10) {
1382 // Update only downloading sources.
1383 CClientPtrList::iterator it
= m_downloadingSourcesList
.begin();
1384 for( ; it
!= m_downloadingSourcesList
.end(); ) {
1385 CUpDownClient
*cur_src
= *it
++;
1386 if(cur_src
->GetDownloadState() == DS_DOWNLOADING
) {
1388 kBpsDown
+= cur_src
->SetDownloadLimit(reducedownload
);
1392 // Update all sources (including downloading sources)
1393 for ( SourceSet::iterator it
= m_SrcList
.begin(); it
!= m_SrcList
.end(); ) {
1394 CUpDownClient
* cur_src
= *it
++;
1395 switch (cur_src
->GetDownloadState()) {
1396 case DS_DOWNLOADING
: {
1398 kBpsDown
+= cur_src
->SetDownloadLimit(reducedownload
);
1407 case DS_LOWTOLOWIP
: {
1408 if (cur_src
->HasLowID() && !theApp
->CanDoCallback(cur_src
)) {
1409 // If we are almost maxed on sources,
1410 // slowly remove these client to see
1411 // if we can find a better source.
1412 if (((dwCurTick
- lastpurgetime
) > 30000) &&
1413 (GetSourceCount() >= (thePrefs::GetMaxSourcePerFile()*.8))) {
1414 RemoveSource(cur_src
);
1415 lastpurgetime
= dwCurTick
;
1419 cur_src
->SetDownloadState(DS_ONQUEUE
);
1424 case DS_NONEEDEDPARTS
: {
1425 // we try to purge noneeded source, even without reaching the limit
1426 if((dwCurTick
- lastpurgetime
) > 40000) {
1427 if(!cur_src
->SwapToAnotherFile(false , false, false , NULL
)) {
1428 //however we only delete them if reaching the limit
1429 if (GetSourceCount() >= (thePrefs::GetMaxSourcePerFile()*.8 )) {
1430 RemoveSource(cur_src
);
1431 lastpurgetime
= dwCurTick
;
1432 break; //Johnny-B - nothing more to do here (good eye!)
1435 lastpurgetime
= dwCurTick
;
1439 // doubled reasktime for no needed parts - save connections and traffic
1440 if ( !((!cur_src
->GetLastAskedTime()) ||
1441 (dwCurTick
- cur_src
->GetLastAskedTime()) > FILEREASKTIME
*2)) {
1444 // Recheck this client to see if still NNP..
1445 // Set to DS_NONE so that we force a TCP reask next time..
1446 cur_src
->SetDownloadState(DS_NONE
);
1451 if( cur_src
->IsRemoteQueueFull()) {
1452 if( ((dwCurTick
- lastpurgetime
) > 60000) &&
1453 (GetSourceCount() >= (thePrefs::GetMaxSourcePerFile()*.8 )) ) {
1454 RemoveSource( cur_src
);
1455 lastpurgetime
= dwCurTick
;
1456 break; //Johnny-B - nothing more to do here (good eye!)
1460 // Give up to 1 min for UDP to respond..
1461 // If we are within on min on TCP, do not try..
1462 if ( theApp
->IsConnected() &&
1463 ( (!cur_src
->GetLastAskedTime()) ||
1464 (dwCurTick
- cur_src
->GetLastAskedTime()) > FILEREASKTIME
-20000)) {
1465 cur_src
->UDPReaskForDownload();
1468 // No break here, since the next case takes care of asking for downloads.
1471 case DS_TOOMANYCONNS
:
1473 case DS_WAITCALLBACK
:
1474 case DS_WAITCALLBACKKAD
: {
1475 if ( theApp
->IsConnected() &&
1476 ( (!cur_src
->GetLastAskedTime()) ||
1477 (dwCurTick
- cur_src
->GetLastAskedTime()) > FILEREASKTIME
)) {
1478 if (!cur_src
->AskForDownload()) {
1479 // I left this break here just as a reminder
1480 // just in case re rearange things..
1489 /* eMule 0.30c implementation, i give it a try (Creteil) BEGIN ... */
1490 if (IsA4AFAuto() && ((!m_LastNoNeededCheck
) || (dwCurTick
- m_LastNoNeededCheck
> 900000))) {
1491 m_LastNoNeededCheck
= dwCurTick
;
1492 for ( SourceSet::iterator it
= m_A4AFsrclist
.begin(); it
!= m_A4AFsrclist
.end(); ) {
1493 CUpDownClient
*cur_source
= *it
++;
1494 uint8 download_state
=cur_source
->GetDownloadState();
1495 if( download_state
!= DS_DOWNLOADING
1496 && cur_source
->GetRequestFile()
1497 && ((!cur_source
->GetRequestFile()->IsA4AFAuto()) || download_state
== DS_NONEEDEDPARTS
))
1499 cur_source
->SwapToAnotherFile(false, false, false, this);
1503 /* eMule 0.30c implementation, i give it a try (Creteil) END ... */
1505 // swap No needed partfiles if possible
1507 if (((old_trans
==0) && (transferingsrc
>0)) || ((old_trans
>0) && (transferingsrc
==0))) {
1511 // Kad source search
1512 if( GetMaxSourcePerFileUDP() > GetSourceCount()){
1513 //Once we can handle lowID users in Kad, we remove the second IsConnected
1514 if (theApp
->downloadqueue
->DoKademliaFileRequest() && (Kademlia::CKademlia::GetTotalFile() < KADEMLIATOTALFILE
) && (dwCurTick
> m_LastSearchTimeKad
) && Kademlia::CKademlia::IsConnected() && theApp
->IsConnected() && !IsStopped()){
1516 theApp
->downloadqueue
->SetLastKademliaFileRequest();
1518 if (GetKadFileSearchID()) {
1519 /* This will never happen anyway. We're talking a
1520 1h timespan and searches are at max 45secs */
1521 Kademlia::CSearchManager::StopSearch(GetKadFileSearchID(), false);
1524 Kademlia::CUInt128
kadFileID(GetFileHash().GetHash());
1525 Kademlia::CSearch
* pSearch
= Kademlia::CSearchManager::PrepareLookup(Kademlia::CSearch::FILE, true, kadFileID
);
1526 AddDebugLogLineN(logKadSearch
, CFormat(wxT("Preparing a Kad Search for '%s'")) % GetFileName());
1528 AddDebugLogLineN(logKadSearch
, CFormat(wxT("Kad lookup started for '%s'")) % GetFileName());
1529 if(m_TotalSearchesKad
< 7) {
1530 m_TotalSearchesKad
++;
1532 m_LastSearchTimeKad
= dwCurTick
+ (KADEMLIAREASKTIME
*m_TotalSearchesKad
);
1533 SetKadFileSearchID(pSearch
->GetSearchID());
1537 if(GetKadFileSearchID()) {
1538 Kademlia::CSearchManager::StopSearch(GetKadFileSearchID(), true);
1542 // check if we want new sources from server
1543 if ( !m_localSrcReqQueued
&&
1544 ( (!m_lastsearchtime
) ||
1545 (dwCurTick
- m_lastsearchtime
) > SERVERREASKTIME
) &&
1546 theApp
->IsConnectedED2K() &&
1547 thePrefs::GetMaxSourcePerFileSoft() > GetSourceCount() &&
1549 m_localSrcReqQueued
= true;
1550 theApp
->downloadqueue
->SendLocalSrcRequest(this);
1553 // calculate datarate, set limit etc.
1558 // Kry - does the 3 / 30 difference produce too much flickering or CPU?
1559 if (m_count
>= 30) {
1561 UpdateAutoDownPriority();
1562 UpdateDisplayedInfo();
1563 if(m_bPercentUpdated
== false) {
1564 UpdateCompletedInfos();
1566 m_bPercentUpdated
= false;
1567 if (thePrefs::ShowCatTabInfos()) {
1568 Notify_ShowUpdateCatTabTitles();
1572 // release file handle if unused for some time
1573 m_hpartfile
.Release();
1575 return (uint32
)(kBpsDown
*1024.0);
1578 bool CPartFile::CanAddSource(uint32 userid
, uint16 port
, uint32 serverip
, uint16 serverport
, uint8
* pdebug_lowiddropped
, bool ed2kID
)
1581 //The incoming ID could have the userid in the Hybrid format..
1582 uint32 hybridID
= 0;
1584 if (IsLowID(userid
)) {
1587 hybridID
= wxUINT32_SWAP_ALWAYS(userid
);
1591 if (!IsLowID(userid
)) {
1592 userid
= wxUINT32_SWAP_ALWAYS(userid
);
1596 // MOD Note: Do not change this part - Merkur
1597 if (theApp
->IsConnectedED2K()) {
1598 if(::IsLowID(theApp
->GetED2KID())) {
1599 if(theApp
->GetED2KID() == userid
&& theApp
->serverconnect
->GetCurrentServer()->GetIP() == serverip
&& theApp
->serverconnect
->GetCurrentServer()->GetPort() == serverport
) {
1602 if(theApp
->GetPublicIP() == userid
) {
1606 if(theApp
->GetED2KID() == userid
&& thePrefs::GetPort() == port
) {
1612 if (Kademlia::CKademlia::IsConnected()) {
1613 if(!Kademlia::CKademlia::IsFirewalled()) {
1614 if(Kademlia::CKademlia::GetIPAddress() == hybridID
&& thePrefs::GetPort() == port
) {
1620 //This allows *.*.*.0 clients to not be removed if Ed2kID == false
1621 if ( IsLowID(hybridID
) && theApp
->IsFirewalled()) {
1622 if (pdebug_lowiddropped
) {
1623 (*pdebug_lowiddropped
)++;
1631 void CPartFile::AddSources(CMemFile
& sources
,uint32 serverip
, uint16 serverport
, unsigned origin
, bool bWithObfuscationAndHash
)
1633 uint8 count
= sources
.ReadUInt8();
1634 uint8 debug_lowiddropped
= 0;
1635 uint8 debug_possiblesources
= 0;
1636 CMD4Hash achUserHash
;
1639 // since we may received multiple search source UDP results we have to "consume" all data of that packet
1640 AddDebugLogLineN(logPartFile
, wxT("Trying to add sources for a stopped file"));
1641 sources
.Seek(count
*(4+2), wxFromCurrent
);
1645 for (int i
= 0;i
!= count
;++i
) {
1646 uint32 userid
= sources
.ReadUInt32();
1647 uint16 port
= sources
.ReadUInt16();
1649 uint8 byCryptOptions
= 0;
1650 if (bWithObfuscationAndHash
){
1651 byCryptOptions
= sources
.ReadUInt8();
1652 if ((byCryptOptions
& 0x80) > 0) {
1653 achUserHash
= sources
.ReadHash();
1656 if ((thePrefs::IsClientCryptLayerRequested() && (byCryptOptions
& 0x01/*supported*/) > 0 && (byCryptOptions
& 0x80) == 0)
1657 || (thePrefs::IsClientCryptLayerSupported() && (byCryptOptions
& 0x02/*requested*/) > 0 && (byCryptOptions
& 0x80) == 0)) {
1658 AddDebugLogLineN(logPartFile
, CFormat(wxT("Server didn't provide UserHash for source %u, even if it was expected to (or local obfuscationsettings changed during serverconnect")) % userid
);
1659 } else if (!thePrefs::IsClientCryptLayerRequested() && (byCryptOptions
& 0x02/*requested*/) == 0 && (byCryptOptions
& 0x80) != 0) {
1660 AddDebugLogLineN(logPartFile
, CFormat(wxT("Server provided UserHash for source %u, even if it wasn't expected to (or local obfuscationsettings changed during serverconnect")) % userid
);
1665 // "Filter LAN IPs" and "IPfilter" the received sources IP addresses
1666 if (!IsLowID(userid
)) {
1667 // check for 0-IP, localhost and optionally for LAN addresses
1668 if ( !IsGoodIP(userid
, thePrefs::FilterLanIPs()) ) {
1671 if (theApp
->ipfilter
->IsFiltered(userid
)) {
1676 if (!CanAddSource(userid
, port
, serverip
, serverport
, &debug_lowiddropped
)) {
1680 if(thePrefs::GetMaxSourcePerFile() > GetSourceCount()) {
1681 ++debug_possiblesources
;
1682 CUpDownClient
* newsource
= new CUpDownClient(port
,userid
,serverip
,serverport
,this, true, true);
1684 newsource
->SetSourceFrom((ESourceFrom
)origin
);
1685 newsource
->SetConnectOptions(byCryptOptions
, true, false);
1687 if ((byCryptOptions
& 0x80) != 0) {
1688 newsource
->SetUserHash(achUserHash
);
1691 theApp
->downloadqueue
->CheckAndAddSource(this,newsource
);
1693 AddDebugLogLineN(logPartFile
, wxT("Consuming a packet because of max sources reached"));
1694 // Since we may receive multiple search source UDP results we have to "consume" all data of that packet
1695 // This '+1' is added because 'i' counts from 0.
1696 sources
.Seek((count
-(i
+1))*(4+2), wxFromCurrent
);
1697 if (GetKadFileSearchID()) {
1698 Kademlia::CSearchManager::StopSearch(GetKadFileSearchID(), false);
1705 void CPartFile::UpdatePartsInfo()
1707 if( !IsPartFile() ) {
1708 CKnownFile::UpdatePartsInfo();
1713 uint16 partcount
= GetPartCount();
1714 bool flag
= (time(NULL
) - m_nCompleteSourcesTime
> 0);
1716 // Ensure the frequency-list is ready
1717 if ( m_SrcpartFrequency
.size() != GetPartCount() ) {
1718 m_SrcpartFrequency
.clear();
1719 m_SrcpartFrequency
.insert(m_SrcpartFrequency
.begin(), GetPartCount(), 0);
1722 // Find number of available parts
1723 uint16 availablecounter
= 0;
1724 for ( uint16 i
= 0; i
< partcount
; ++i
) {
1725 if ( m_SrcpartFrequency
[i
] )
1729 if ( ( availablecounter
== partcount
) && ( m_availablePartsCount
< partcount
) ) {
1730 lastseencomplete
= time(NULL
);
1733 m_availablePartsCount
= availablecounter
;
1736 ArrayOfUInts16 count
;
1738 count
.reserve(GetSourceCount());
1740 for ( SourceSet::iterator it
= m_SrcList
.begin(); it
!= m_SrcList
.end(); ++it
) {
1741 if ( !(*it
)->GetUpPartStatus().empty() && (*it
)->GetUpPartCount() == partcount
) {
1742 count
.push_back((*it
)->GetUpCompleteSourcesCount());
1746 m_nCompleteSourcesCount
= m_nCompleteSourcesCountLo
= m_nCompleteSourcesCountHi
= 0;
1748 for (uint16 i
= 0; i
< partcount
; ++i
) {
1750 m_nCompleteSourcesCount
= m_SrcpartFrequency
[i
];
1752 else if( m_nCompleteSourcesCount
> m_SrcpartFrequency
[i
]) {
1753 m_nCompleteSourcesCount
= m_SrcpartFrequency
[i
];
1756 count
.push_back(m_nCompleteSourcesCount
);
1758 int32 n
= count
.size();
1760 std::sort(count
.begin(), count
.end(), std::less
<uint16
>());
1763 int32 i
= n
>> 1; // (n / 2)
1764 int32 j
= (n
* 3) >> 2; // (n * 3) / 4
1765 int32 k
= (n
* 7) >> 3; // (n * 7) / 8
1767 //When still a part file, adjust your guesses by 20% to what you see..
1771 //Not many sources, so just use what you see..
1772 // welcome to 'plain stupid code'
1773 // m_nCompleteSourcesCount;
1774 m_nCompleteSourcesCountLo
= m_nCompleteSourcesCount
;
1775 m_nCompleteSourcesCountHi
= m_nCompleteSourcesCount
;
1776 } else if (n
< 20) {
1777 // For low guess and normal guess count
1778 // If we see more sources then the guessed low and normal, use what we see.
1779 // If we see less sources then the guessed low, adjust network accounts for 80%,
1780 // we account for 20% with what we see and make sure we are still above the normal.
1782 // Adjust 80% network and 20% what we see.
1783 if ( count
[i
] < m_nCompleteSourcesCount
) {
1784 m_nCompleteSourcesCountLo
= m_nCompleteSourcesCount
;
1786 m_nCompleteSourcesCountLo
=
1787 (uint16
)((float)(count
[i
]*.8) +
1788 (float)(m_nCompleteSourcesCount
*.2));
1790 m_nCompleteSourcesCount
= m_nCompleteSourcesCountLo
;
1791 m_nCompleteSourcesCountHi
=
1792 (uint16
)((float)(count
[j
]*.8) +
1793 (float)(m_nCompleteSourcesCount
*.2));
1794 if( m_nCompleteSourcesCountHi
< m_nCompleteSourcesCount
) {
1795 m_nCompleteSourcesCountHi
= m_nCompleteSourcesCount
;
1803 // Adjust network accounts for 80%, we account for 20% with what
1804 // we see and make sure we are still above the low.
1806 // Adjust network accounts for 80%, we account for 20% with what
1807 // we see and make sure we are still above the normal.
1809 m_nCompleteSourcesCountLo
= m_nCompleteSourcesCount
;
1810 m_nCompleteSourcesCount
= (uint16
)((float)(count
[j
]*.8)+(float)(m_nCompleteSourcesCount
*.2));
1811 if( m_nCompleteSourcesCount
< m_nCompleteSourcesCountLo
) {
1812 m_nCompleteSourcesCount
= m_nCompleteSourcesCountLo
;
1814 m_nCompleteSourcesCountHi
= (uint16
)((float)(count
[k
]*.8)+(float)(m_nCompleteSourcesCount
*.2));
1815 if( m_nCompleteSourcesCountHi
< m_nCompleteSourcesCount
) {
1816 m_nCompleteSourcesCountHi
= m_nCompleteSourcesCount
;
1820 m_nCompleteSourcesTime
= time(NULL
) + (60);
1822 UpdateDisplayedInfo();
1825 // [Maella -Enhanced Chunk Selection- (based on jicxicmic)]
1826 bool CPartFile::GetNextRequestedBlock(CUpDownClient
* sender
,
1827 std::vector
<Requested_Block_Struct
*>& toadd
, uint16
& count
)
1830 // The purpose of this function is to return a list of blocks (~180KB) to
1831 // download. To avoid a prematurely stop of the downloading, all blocks that
1832 // are requested from the same source must be located within the same
1833 // chunk (=> part ~9MB).
1835 // The selection of the chunk to download is one of the CRITICAL parts of the
1836 // edonkey network. The selection algorithm must insure the best spreading
1839 // The selection is based on 4 criteria:
1840 // 1. Frequency of the chunk (availability), very rare chunks must be downloaded
1841 // as quickly as possible to become a new available source.
1842 // 2. Parts used for preview (first + last chunk), preview or check a
1843 // file (e.g. movie, mp3)
1844 // 3. Request state (downloading in process), try to ask each source for another
1845 // chunk. Spread the requests between all sources.
1846 // 4. Completion (shortest-to-complete), partially retrieved chunks should be
1847 // completed before starting to download other one.
1849 // The frequency criterion defines three zones: very rare (<10%), rare (<50%)
1850 // and common (>30%). Inside each zone, the criteria have a specific weight, used
1851 // to calculate the priority of chunks. The chunk(s) with the highest
1852 // priority (highest=0, lowest=0xffff) is/are selected first.
1854 // very rare (preview) rare common
1855 // 0% <---- +0 pt ----> 10% <----- +10000 pt -----> 50% <---- +20000 pt ----> 100%
1856 // 1. <------- frequency: +25*frequency pt ----------->
1857 // 2. <- preview: +1 pt --><-------------- preview: set to 10000 pt ------------->
1858 // 3. <------ request: download in progress +20000 pt ------>
1859 // 4a. <- completion: 0% +100, 25% +75 .. 100% +0 pt --><-- !req => completion --->
1860 // 4b. <--- req => !completion -->
1862 // Unrolled, the priority scale is:
1864 // 0..xxxx unrequested and requested very rare chunks
1865 // 10000..1xxxx unrequested rare chunks + unrequested preview chunks
1866 // 20000..2xxxx unrequested common chunks (priority to the most complete)
1867 // 30000..3xxxx requested rare chunks + requested preview chunks
1868 // 40000..4xxxx requested common chunks (priority to the least complete)
1870 // This algorithm usually selects first the rarest chunk(s). However, partially
1871 // complete chunk(s) that is/are close to completion may overtake the priority
1872 // (priority inversion).
1873 // For the common chuncks, the algorithm tries to spread the dowload between
1877 // Check input parameters
1878 if ( sender
->GetPartStatus().empty() ) {
1881 // Define and create the list of the chunks to download
1882 const uint16 partCount
= GetPartCount();
1883 ChunkList chunksList
;
1886 uint16 newBlockCount
= 0;
1887 while(newBlockCount
!= count
) {
1888 // Create a request block stucture if a chunk has been previously selected
1889 if(sender
->GetLastPartAsked() != 0xffff) {
1890 Requested_Block_Struct
* pBlock
= new Requested_Block_Struct
;
1891 if(GetNextEmptyBlockInPart(sender
->GetLastPartAsked(), pBlock
) == true) {
1892 // Keep a track of all pending requested blocks
1893 m_requestedblocks_list
.push_back(pBlock
);
1894 // Update list of blocks to return
1895 toadd
.push_back(pBlock
);
1897 // Skip end of loop (=> CPU load)
1900 // All blocks for this chunk have been already requested
1902 // => Try to select another chunk
1903 sender
->SetLastPartAsked(0xffff);
1907 // Check if a new chunk must be selected (e.g. download starting, previous chunk complete)
1908 if(sender
->GetLastPartAsked() == 0xffff) {
1909 // Quantify all chunks (create list of chunks to download)
1910 // This is done only one time and only if it is necessary (=> CPU load)
1911 if(chunksList
.empty()) {
1912 // Indentify the locally missing part(s) that this source has
1913 for(uint16 i
=0; i
< partCount
; ++i
) {
1914 if(sender
->IsPartAvailable(i
) == true && GetNextEmptyBlockInPart(i
, NULL
) == true) {
1915 // Create a new entry for this chunk and add it to the list
1918 newEntry
.frequency
= m_SrcpartFrequency
[i
];
1919 chunksList
.push_back(newEntry
);
1923 // Check if any bloks(s) could be downloaded
1924 if(chunksList
.empty()) {
1925 break; // Exit main loop while()
1928 // Define the bounds of the three zones (very rare, rare)
1929 // more depending on available sources
1931 if (GetSourceCount()>800) {
1933 } else if (GetSourceCount()>200) {
1936 uint16 limit
= modif
*GetSourceCount()/ 100;
1940 const uint16 veryRareBound
= limit
;
1941 const uint16 rareBound
= 2*limit
;
1943 // Cache Preview state (Criterion 2)
1944 FileType type
= GetFiletype(GetFileName());
1945 const bool isPreviewEnable
=
1946 thePrefs::GetPreviewPrio() &&
1947 (type
== ftArchive
|| type
== ftVideo
);
1949 // Collect and calculate criteria for all chunks
1950 for (ChunkList::iterator it
= chunksList
.begin(); it
!= chunksList
.end(); ++it
) {
1951 Chunk
& cur_chunk
= *it
;
1954 const uint64 uStart
= cur_chunk
.part
* PARTSIZE
;
1955 const uint64 uEnd
= uStart
+ GetPartSize(cur_chunk
.part
) - 1;
1956 // Criterion 2. Parts used for preview
1957 // Remark: - We need to download the first part and the last part(s).
1958 // - When the last part is very small, it's necessary to
1959 // download the two last parts.
1960 bool critPreview
= false;
1961 if(isPreviewEnable
== true) {
1962 if(cur_chunk
.part
== 0) {
1963 critPreview
= true; // First chunk
1964 } else if(cur_chunk
.part
== partCount
-1) {
1965 critPreview
= true; // Last chunk
1966 } else if(cur_chunk
.part
== partCount
-2) {
1967 // Last chunk - 1 (only if last chunk is too small)
1968 const uint32 sizeOfLastChunk
= GetFileSize() - uEnd
;
1969 if(sizeOfLastChunk
< PARTSIZE
/3) {
1970 critPreview
= true; // Last chunk - 1
1975 // Criterion 3. Request state (downloading in process from other source(s))
1977 const bool critRequested
=
1978 cur_chunk
.frequency
> veryRareBound
&&
1979 IsAlreadyRequested(uStart
, uEnd
);
1981 // Criterion 4. Completion
1982 // PARTSIZE instead of GetPartSize() favours the last chunk - but that may be intentional
1983 uint32 partSize
= PARTSIZE
- m_gaplist
.GetGapSize(cur_chunk
.part
);
1984 const uint16 critCompletion
= (uint16
)(partSize
/(PARTSIZE
/100)); // in [%]
1986 // Calculate priority with all criteria
1987 if(cur_chunk
.frequency
<= veryRareBound
) {
1988 // 0..xxxx unrequested + requested very rare chunks
1989 cur_chunk
.rank
= (25 * cur_chunk
.frequency
) + // Criterion 1
1990 ((critPreview
== true) ? 0 : 1) + // Criterion 2
1991 (100 - critCompletion
); // Criterion 4
1992 } else if(critPreview
== true) {
1993 // 10000..10100 unrequested preview chunks
1994 // 30000..30100 requested preview chunks
1995 cur_chunk
.rank
= ((critRequested
== false) ? 10000 : 30000) + // Criterion 3
1996 (100 - critCompletion
); // Criterion 4
1997 } else if(cur_chunk
.frequency
<= rareBound
) {
1998 // 10101..1xxxx unrequested rare chunks
1999 // 30101..3xxxx requested rare chunks
2000 cur_chunk
.rank
= (25 * cur_chunk
.frequency
) + // Criterion 1
2001 ((critRequested
== false) ? 10101 : 30101) + // Criterion 3
2002 (100 - critCompletion
); // Criterion 4
2005 if(critRequested
== false) { // Criterion 3
2006 // 20000..2xxxx unrequested common chunks
2007 cur_chunk
.rank
= 20000 + // Criterion 3
2008 (100 - critCompletion
); // Criterion 4
2010 // 40000..4xxxx requested common chunks
2011 // Remark: The weight of the completion criterion is inversed
2012 // to spead the requests over the completing chunks.
2013 // Without this, the chunk closest to completion will
2014 // received every new sources.
2015 cur_chunk
.rank
= 40000 + // Criterion 3
2016 (critCompletion
); // Criterion 4
2022 // Select the next chunk to download
2023 if(!chunksList
.empty()) {
2024 // Find and count the chunck(s) with the highest priority
2025 uint16 chunkCount
= 0; // Number of found chunks with same priority
2026 uint16 rank
= 0xffff; // Highest priority found
2028 // Collect and calculate criteria for all chunks
2029 for (ChunkList::iterator it
= chunksList
.begin(); it
!= chunksList
.end(); ++it
) {
2030 const Chunk
& cur_chunk
= *it
;
2031 if(cur_chunk
.rank
< rank
) {
2033 rank
= cur_chunk
.rank
;
2034 } else if(cur_chunk
.rank
== rank
) {
2039 // Use a random access to avoid that everybody tries to download the
2040 // same chunks at the same time (=> spread the selected chunk among clients)
2041 uint16 randomness
= 1 + (int) (((float)(chunkCount
-1))*rand()/(RAND_MAX
+1.0));
2043 for (ChunkList::iterator it
= chunksList
.begin(); it
!= chunksList
.end(); ++it
) {
2044 const Chunk
& cur_chunk
= *it
;
2045 if(cur_chunk
.rank
== rank
) {
2047 if(randomness
== 0) {
2048 // Selection process is over
2049 sender
->SetLastPartAsked(cur_chunk
.part
);
2050 // Remark: this list might be reused up to *count times
2051 chunksList
.erase(it
);
2052 break; // exit loop for()
2057 // There is no remaining chunk to download
2058 break; // Exit main loop while()
2062 // Return the number of the blocks
2063 count
= newBlockCount
;
2065 return (newBlockCount
> 0);
2070 void CPartFile::RemoveBlockFromList(uint64 start
,uint64 end
)
2072 std::list
<Requested_Block_Struct
*>::iterator it
= m_requestedblocks_list
.begin();
2073 while (it
!= m_requestedblocks_list
.end()) {
2074 std::list
<Requested_Block_Struct
*>::iterator it2
= it
++;
2076 if ((*it2
)->StartOffset
<= start
&& (*it2
)->EndOffset
>= end
) {
2077 m_requestedblocks_list
.erase(it2
);
2083 void CPartFile::RemoveAllRequestedBlocks(void)
2085 m_requestedblocks_list
.clear();
2089 void CPartFile::CompleteFile(bool bIsHashingDone
)
2091 if (GetKadFileSearchID()) {
2092 Kademlia::CSearchManager::StopSearch(GetKadFileSearchID(), false);
2095 theApp
->downloadqueue
->RemoveLocalServerRequest(this);
2097 AddDebugLogLineN( logPartFile
, wxString( wxT("CPartFile::CompleteFile: Hash ") ) + ( bIsHashingDone
? wxT("done") : wxT("not done") ) );
2099 if (!bIsHashingDone
) {
2100 SetStatus(PS_COMPLETING
);
2103 CPath partFile
= m_partmetfilename
.RemoveExt();
2104 CThreadScheduler::AddTask(new CHashingTask(GetFilePath(), partFile
, this));
2108 m_is_A4AF_auto
=false;
2109 SetStatus(PS_COMPLETING
);
2110 // guess I was wrong about not need to spaw a thread ...
2111 // It is if the temp and incoming dirs are on different
2112 // partitions/drives and the file is large...[oz]
2115 PerformFileComplete();
2119 if (thePrefs::ShowCatTabInfos()) {
2120 Notify_ShowUpdateCatTabTitles();
2122 UpdateDisplayedInfo(true);
2126 void CPartFile::CompleteFileEnded(bool errorOccured
, const CPath
& newname
)
2130 SetStatus(PS_ERROR
);
2131 AddLogLineC(CFormat( _("Unexpected error while completing %s. File paused") )% GetFileName() );
2133 m_fullname
= newname
;
2135 SetFilePath(m_fullname
.GetPath());
2136 SetFileName(m_fullname
.GetFullName());
2137 m_lastDateChanged
= CPath::GetModificationTime(m_fullname
);
2139 SetStatus(PS_COMPLETE
);
2144 // Remove from list of canceled files in case it was canceled once upon a time
2145 if (theApp
->canceledfiles
->Remove(GetFileHash())) {
2146 theApp
->canceledfiles
->Save();
2149 // Mark as known (checks if it's already known),
2150 // also updates search files
2151 theApp
->knownfiles
->SafeAddKFile(this);
2153 // remove the file from the suspended uploads list
2154 theApp
->uploadqueue
->ResumeUpload(GetFileHash());
2155 theApp
->downloadqueue
->RemoveFile(this);
2156 theApp
->sharedfiles
->SafeAddKFile(this);
2157 UpdateDisplayedInfo(true);
2159 // republish that file to the ed2k-server to update the 'FT_COMPLETE_SOURCES' counter on the server.
2160 theApp
->sharedfiles
->RepublishFile(this);
2162 // Ensure that completed shows the correct value
2163 completedsize
= GetFileSize();
2165 // clear the blackbox to free up memory
2166 m_CorruptionBlackBox
->Free();
2168 AddLogLineC(CFormat( _("Finished downloading: %s") ) % GetFileName() );
2171 theApp
->downloadqueue
->StartNextFile(this);
2175 void CPartFile::PerformFileComplete()
2177 // add this file to the suspended uploads list
2178 theApp
->uploadqueue
->SuspendUpload(GetFileHash(), false);
2181 // close permanent handle
2182 if (m_hpartfile
.IsOpened()) {
2183 m_hpartfile
.Close();
2186 // Schedule task for completion of the file
2187 CThreadScheduler::AddTask(new CCompletionTask(this));
2191 void CPartFile::RemoveAllSources(bool bTryToSwap
)
2193 for( SourceSet::iterator it
= m_SrcList
.begin(); it
!= m_SrcList
.end();) {
2194 CUpDownClient
* cur_src
= *it
++;
2196 if (!cur_src
->SwapToAnotherFile(true, true, true, NULL
)) {
2197 RemoveSource(cur_src
,true,false);
2198 // If it was not swapped, it's not on any file anymore, and should die
2201 RemoveSource(cur_src
,true,false);
2207 /* eMule 0.30c implementation, i give it a try (Creteil) BEGIN ... */
2208 // remove all links A4AF in sources to this file
2209 if(!m_A4AFsrclist
.empty()) {
2210 for( SourceSet::iterator it
= m_A4AFsrclist
.begin(); it
!= m_A4AFsrclist
.end(); ) {
2211 CUpDownClient
* cur_src
= *it
++;
2212 if ( cur_src
->DeleteFileRequest( this ) ) {
2213 Notify_SourceCtrlRemoveSource(cur_src
, this);
2216 m_A4AFsrclist
.clear();
2218 /* eMule 0.30c implementation, i give it a try (Creteil) END ... */
2219 UpdateFileRatingCommentAvail();
2223 void CPartFile::Delete()
2225 AddLogLineN(CFormat(_("Deleting file: %s")) % GetFileName());
2226 // Barry - Need to tell any connected clients to stop sending the file
2228 AddDebugLogLineN(logPartFile
, wxT("\tStopped"));
2230 uint16 removed
= theApp
->uploadqueue
->SuspendUpload(GetFileHash(), true);
2231 AddDebugLogLineN(logPartFile
, CFormat(wxT("\tSuspended upload to %d clients")) % removed
);
2232 theApp
->sharedfiles
->RemoveFile(this);
2233 AddDebugLogLineN(logPartFile
, wxT("\tRemoved from shared"));
2234 theApp
->downloadqueue
->RemoveFile(this);
2235 AddDebugLogLineN(logPartFile
, wxT("\tRemoved from download queue"));
2236 Notify_DownloadCtrlRemoveFile(this);
2237 AddDebugLogLineN(logPartFile
, wxT("\tRemoved from transferwnd"));
2238 if (theApp
->canceledfiles
->Add(GetFileHash())) {
2239 theApp
->canceledfiles
->Save();
2241 AddDebugLogLineN(logPartFile
, wxT("\tAdded to canceled file list"));
2242 theApp
->searchlist
->UpdateSearchFileByHash(GetFileHash()); // Update file in the search dialog if it's still open
2244 if (m_hpartfile
.IsOpened()) {
2245 m_hpartfile
.Close();
2248 AddDebugLogLineN(logPartFile
, wxT("\tClosed"));
2250 if (!CPath::RemoveFile(m_fullname
)) {
2251 AddDebugLogLineC(logPartFile
, CFormat(wxT("\tFailed to delete '%s'")) % m_fullname
);
2253 AddDebugLogLineN(logPartFile
, wxT("\tRemoved .part.met"));
2256 if (!CPath::RemoveFile(m_PartPath
)) {
2257 AddDebugLogLineC(logPartFile
, CFormat(wxT("Failed to delete '%s'")) % m_PartPath
);
2259 AddDebugLogLineN(logPartFile
, wxT("\tRemoved .part"));
2262 CPath BAKName
= m_fullname
.AppendExt(PARTMET_BAK_EXT
);
2263 if (!CPath::RemoveFile(BAKName
)) {
2264 AddDebugLogLineC(logPartFile
, CFormat(wxT("Failed to delete '%s'")) % BAKName
);
2266 AddDebugLogLineN(logPartFile
, wxT("\tRemoved .BAK"));
2269 CPath SEEDSName
= m_fullname
.AppendExt(wxT(".seeds"));
2270 if (SEEDSName
.FileExists()) {
2271 if (CPath::RemoveFile(SEEDSName
)) {
2272 AddDebugLogLineN(logPartFile
, wxT("\tRemoved .seeds"));
2274 AddDebugLogLineC(logPartFile
, CFormat(wxT("Failed to delete '%s'")) % SEEDSName
);
2278 AddDebugLogLineN(logPartFile
, wxT("Done"));
2284 bool CPartFile::HashSinglePart(uint16 partnumber
)
2286 if ((GetHashCount() <= partnumber
) && (GetPartCount() > 1)) {
2287 AddLogLineC(CFormat( _("WARNING: Unable to hash downloaded part - hashset incomplete for '%s'") )
2289 m_hashsetneeded
= true;
2291 } else if ((GetHashCount() <= partnumber
) && GetPartCount() != 1) {
2292 AddLogLineC(CFormat( _("ERROR: Unable to hash downloaded part - hashset incomplete (%s). This should never happen")) % GetFileName() );
2293 m_hashsetneeded
= true;
2296 CMD4Hash hashresult
;
2297 uint64 offset
= PARTSIZE
* partnumber
;
2298 uint32 length
= GetPartSize(partnumber
);
2300 CreateHashFromFile(m_hpartfile
, offset
, length
, &hashresult
, NULL
);
2301 } catch (const CIOFailureException
& e
) {
2302 AddLogLineC(CFormat( wxT("EOF while hashing downloaded part %u with length %u (max %u) of partfile '%s' with length %u: %s"))
2303 % partnumber
% length
% (offset
+length
) % GetFileName() % GetFileSize() % e
.what());
2304 SetStatus(PS_ERROR
);
2306 } catch (const CEOFException
& e
) {
2307 AddLogLineC(CFormat( wxT("EOF while hashing downloaded part %u with length %u (max %u) of partfile '%s' with length %u: %s"))
2308 % partnumber
% length
% (offset
+length
) % GetFileName() % GetFileSize() % e
.what());
2312 if (GetPartCount() > 1) {
2313 if (hashresult
!= GetPartHash(partnumber
)) {
2314 AddDebugLogLineN(logPartFile
, CFormat( wxT("%s: Expected hash of part %d: %s")) % GetFileName() % partnumber
% GetPartHash(partnumber
).Encode() );
2315 AddDebugLogLineN(logPartFile
, CFormat( wxT("%s: Actual hash of part %d: %s")) % GetFileName() % partnumber
% hashresult
.Encode() );
2321 if (hashresult
!= m_abyFileHash
) {
2331 bool CPartFile::IsCorruptedPart(uint16 partnumber
)
2333 return std::find(m_corrupted_list
.begin(), m_corrupted_list
.end(), partnumber
)
2334 != m_corrupted_list
.end();
2338 void CPartFile::SetDownPriority(uint8 np
, bool bSave
, bool bRefresh
)
2340 if ( m_iDownPriority
!= np
) {
2341 m_iDownPriority
= np
;
2343 UpdateDisplayedInfo(true);
2350 void CPartFile::StopFile(bool bCancel
)
2352 // Kry - Need to set it here to get into SetStatus(status) correctly
2355 // Barry - Need to tell any connected clients to stop sending the file
2358 m_LastSearchTimeKad
= 0;
2359 m_TotalSearchesKad
= 0;
2361 RemoveAllSources(true);
2369 UpdateDisplayedInfo(true);
2373 void CPartFile::StopPausedFile()
2376 // Once an hour, remove any sources for files which are no longer active downloads
2377 switch (GetStatus()) {
2379 case PS_INSUFFICIENT
:
2381 if (time(NULL
) - m_iLastPausePurge
> (60*60)) {
2382 m_iLastPausePurge
= time(NULL
);
2388 // release file handle if unused for some time
2389 m_hpartfile
.Release();
2393 void CPartFile::PauseFile(bool bInsufficient
)
2397 if ( status
== PS_COMPLETE
|| status
== PS_COMPLETING
) {
2401 if (GetKadFileSearchID()) {
2402 Kademlia::CSearchManager::StopSearch(GetKadFileSearchID(), true);
2403 // If we were in the middle of searching, reset timer so they can resume searching.
2404 m_LastSearchTimeKad
= 0;
2407 m_iLastPausePurge
= time(NULL
);
2409 theApp
->downloadqueue
->RemoveLocalServerRequest(this);
2411 CPacket
packet( OP_CANCELTRANSFER
, 0, OP_EDONKEYPROT
);
2412 for( SourceSet::iterator it
= m_SrcList
.begin(); it
!= m_SrcList
.end(); ) {
2413 CUpDownClient
* cur_src
= *it
++;
2414 if (cur_src
->GetDownloadState() == DS_DOWNLOADING
) {
2415 if (!cur_src
->GetSentCancelTransfer()) {
2416 theStats::AddUpOverheadOther( packet
.GetPacketSize() );
2417 AddDebugLogLineN( logLocalClient
, wxT("Local Client: OP_CANCELTRANSFER to ") + cur_src
->GetFullIP() );
2418 cur_src
->SendPacket( &packet
, false, true );
2419 cur_src
->SetSentCancelTransfer( true );
2421 cur_src
->SetDownloadState(DS_ONQUEUE
);
2422 // Allow immediate reconnect on resume
2423 cur_src
->ResetLastAskedTime();
2428 m_insufficient
= bInsufficient
;
2439 void CPartFile::ResumeFile()
2441 if ( status
== PS_COMPLETE
|| status
== PS_COMPLETING
) {
2445 if ( m_insufficient
&& !CheckFreeDiskSpace() ) {
2446 // Still not enough free discspace
2452 m_insufficient
= false;
2454 m_lastsearchtime
= 0;
2456 SetActive(theApp
->IsConnected());
2458 if (m_gaplist
.IsComplete() && (GetStatus() == PS_ERROR
)) {
2459 // The file has already been hashed at this point
2463 UpdateDisplayedInfo(true);
2467 bool CPartFile::CheckFreeDiskSpace( uint64 neededSpace
)
2469 uint64 free
= CPath::GetFreeSpaceAt(GetFilePath());
2470 if (free
== static_cast<uint64
>(wxInvalidOffset
)) {
2471 // If GetFreeSpaceAt() fails, then the path probably does not exist.
2475 // The very least acceptable diskspace is a single PART
2476 if ( free
< PARTSIZE
) {
2477 // Always fail in this case, since we risk losing data if we try to
2478 // write on a full partition.
2482 // All other checks are only made if the user has enabled them
2483 if ( thePrefs::IsCheckDiskspaceEnabled() ) {
2484 neededSpace
+= thePrefs::GetMinFreeDiskSpace();
2486 // Due to the the existance of sparse files, we cannot assume that
2487 // writes within the file doesn't cause new blocks to be allocated.
2488 // Therefore, we have to simply stop writing the moment the limit has
2490 return free
>= neededSpace
;
2497 void CPartFile::SetLastAnsweredTime()
2499 m_ClientSrcAnswered
= ::GetTickCount();
2502 void CPartFile::SetLastAnsweredTimeTimeout()
2504 m_ClientSrcAnswered
= 2 * CONNECTION_LATENCY
+ ::GetTickCount() - SOURCECLIENTREASKS
;
2507 CPacket
*CPartFile::CreateSrcInfoPacket(const CUpDownClient
* forClient
, uint8 byRequestedVersion
, uint16 nRequestedOptions
)
2510 if ( m_SrcList
.empty() ) {
2515 return CKnownFile::CreateSrcInfoPacket(forClient
, byRequestedVersion
, nRequestedOptions
);
2518 if (((forClient
->GetRequestFile() != this)
2519 && (forClient
->GetUploadFile() != this)) || forClient
->GetUploadFileID() != GetFileHash()) {
2520 wxString file1
= _("Unknown");
2521 if (forClient
->GetRequestFile() && forClient
->GetRequestFile()->GetFileName().IsOk()) {
2522 file1
= forClient
->GetRequestFile()->GetFileName().GetPrintable();
2523 } else if (forClient
->GetUploadFile() && forClient
->GetUploadFile()->GetFileName().IsOk()) {
2524 file1
= forClient
->GetUploadFile()->GetFileName().GetPrintable();
2526 wxString file2
= _("Unknown");
2527 if (GetFileName().IsOk()) {
2528 file2
= GetFileName().GetPrintable();
2530 AddDebugLogLineN(logPartFile
, wxT("File mismatch on source packet (P) Sending: ") + file1
+ wxT(" From: ") + file2
);
2534 if ( !(GetStatus() == PS_READY
|| GetStatus() == PS_EMPTY
)) {
2538 const BitVector
& reqstatus
= forClient
->GetPartStatus();
2539 bool KnowNeededParts
= !reqstatus
.empty();
2540 //wxASSERT(rcvstatus.size() == GetPartCount()); // Obviously!
2541 if (KnowNeededParts
&& (reqstatus
.size() != GetPartCount())) {
2542 // Yuck. Same file but different part count? Seriously fucked up.
2543 // This happens rather often with reqstatus.size() == 0. Don't log then.
2544 if (reqstatus
.size()) {
2545 AddDebugLogLineN(logKnownFiles
, CFormat(wxT("Impossible situation: different partcounts: %i (client) and %i (file) for %s")) % reqstatus
.size() % GetPartCount() % GetFileName());
2550 CMemFile
data(1024);
2552 uint8 byUsedVersion
;
2554 if (forClient
->SupportsSourceExchange2() && byRequestedVersion
> 0){
2555 // the client uses SourceExchange2 and requested the highest version he knows
2556 // and we send the highest version we know, but of course not higher than his request
2557 byUsedVersion
= std::min(byRequestedVersion
, (uint8
)SOURCEEXCHANGE2_VERSION
);
2558 bIsSX2Packet
= true;
2559 data
.WriteUInt8(byUsedVersion
);
2561 // we don't support any special SX2 options yet, reserved for later use
2562 if (nRequestedOptions
!= 0) {
2563 AddDebugLogLineN(logKnownFiles
, CFormat(wxT("Client requested unknown options for SourceExchange2: %u")) % nRequestedOptions
);
2566 byUsedVersion
= forClient
->GetSourceExchange1Version();
2567 bIsSX2Packet
= false;
2568 if (forClient
->SupportsSourceExchange2()) {
2569 AddDebugLogLineN(logKnownFiles
, wxT("Client which announced to support SX2 sent SX1 packet instead"));
2575 data
.WriteHash(m_abyFileHash
);
2576 data
.WriteUInt16(nCount
);
2578 for (SourceSet::iterator it
= m_SrcList
.begin(); it
!= m_SrcList
.end(); ++it
) {
2580 CUpDownClient
* cur_src
= *it
;
2582 int state
= cur_src
->GetDownloadState();
2583 int valid
= ( state
== DS_DOWNLOADING
) || ( state
== DS_ONQUEUE
&& !cur_src
->IsRemoteQueueFull() );
2585 if ( cur_src
->HasLowID() || !valid
) {
2589 // only send source which have needed parts for this client if possible
2590 const BitVector
& srcstatus
= cur_src
->GetPartStatus();
2591 if ( !srcstatus
.empty() ) {
2592 //wxASSERT(srcstatus.size() == GetPartCount()); // Obviously!
2593 if (srcstatus
.size() != GetPartCount()) {
2596 if ( KnowNeededParts
) {
2597 // only send sources which have needed parts for this client
2598 for (int x
= 0; x
< GetPartCount(); ++x
) {
2599 if (srcstatus
.get(x
) && !reqstatus
.get(x
)) {
2605 // if we don't know the need parts for this client,
2606 // return any source currently a client sends it's
2607 // file status only after it has at least one complete part
2608 if (srcstatus
.size() != GetPartCount()) {
2611 for (int x
= 0; x
< GetPartCount(); ++x
){
2612 if (srcstatus
.get(x
)) {
2622 if(forClient
->GetSourceExchange1Version() > 2) {
2623 dwID
= cur_src
->GetUserIDHybrid();
2625 dwID
= wxUINT32_SWAP_ALWAYS(cur_src
->GetUserIDHybrid());
2627 data
.WriteUInt32(dwID
);
2628 data
.WriteUInt16(cur_src
->GetUserPort());
2629 data
.WriteUInt32(cur_src
->GetServerIP());
2630 data
.WriteUInt16(cur_src
->GetServerPort());
2632 if (byUsedVersion
>= 2) {
2633 data
.WriteHash(cur_src
->GetUserHash());
2636 if (byUsedVersion
>= 4){
2637 // CryptSettings - SourceExchange V4
2639 // 1 CryptLayer Required
2640 // 1 CryptLayer Requested
2641 // 1 CryptLayer Supported
2642 const uint8 uSupportsCryptLayer
= cur_src
->SupportsCryptLayer() ? 1 : 0;
2643 const uint8 uRequestsCryptLayer
= cur_src
->RequestsCryptLayer() ? 1 : 0;
2644 const uint8 uRequiresCryptLayer
= cur_src
->RequiresCryptLayer() ? 1 : 0;
2645 const uint8 byCryptOptions
= (uRequiresCryptLayer
<< 2) | (uRequestsCryptLayer
<< 1) | (uSupportsCryptLayer
<< 0);
2646 data
.WriteUInt8(byCryptOptions
);
2657 data
.Seek(bIsSX2Packet
? 17 : 16, wxFromStart
);
2658 data
.WriteUInt16(nCount
);
2660 CPacket
* result
= new CPacket(data
, OP_EMULEPROT
, bIsSX2Packet
? OP_ANSWERSOURCES2
: OP_ANSWERSOURCES
);
2662 // 16+2+501*(4+2+4+2+16) = 14046 bytes max.
2663 if (result
->GetPacketSize() > 354) {
2664 result
->PackPacket();
2670 void CPartFile::AddClientSources(CMemFile
* sources
, unsigned nSourceFrom
, uint8 uClientSXVersion
, bool bSourceExchange2
, const CUpDownClient
* /*pClient*/)
2679 uint8 uPacketSXVersion
= 0;
2680 if (!bSourceExchange2
) {
2681 nCount
= sources
->ReadUInt16();
2683 // Check if the data size matches the 'nCount' for v1 or v2 and eventually correct the source
2684 // exchange version while reading the packet data. Otherwise we could experience a higher
2685 // chance in dealing with wrong source data, userhashs and finally duplicate sources.
2686 uint32 uDataSize
= sources
->GetLength() - sources
->GetPosition();
2688 if ((uint32
)(nCount
*(4+2+4+2)) == uDataSize
) { //Checks if version 1 packet is correct size
2689 if(uClientSXVersion
!= 1) {
2692 uPacketSXVersion
= 1;
2693 } else if ((uint32
)(nCount
*(4+2+4+2+16)) == uDataSize
) { // Checks if version 2&3 packet is correct size
2694 if (uClientSXVersion
== 2) {
2695 uPacketSXVersion
= 2;
2696 } else if (uClientSXVersion
> 2) {
2697 uPacketSXVersion
= 3;
2701 } else if (nCount
*(4+2+4+2+16+1) == uDataSize
) {
2702 if (uClientSXVersion
!= 4 ) {
2705 uPacketSXVersion
= 4;
2707 // If v5 inserts additional data (like v2), the above code will correctly filter those packets.
2708 // If v5 appends additional data after <count>(<Sources>)[count], we are in trouble with the
2709 // above code. Though a client which does not understand v5+ should never receive such a packet.
2710 AddDebugLogLineN(logClient
, CFormat(wxT("Received invalid source exchange packet (v%u) of data size %u for %s")) % uClientSXVersion
% uDataSize
% GetFileName());
2715 // We only check if the version is known by us and do a quick sanitize check on known version
2716 // other then SX1, the packet will be ignored if any error appears, sicne it can't be a "misunderstanding" anymore
2717 if (uClientSXVersion
> SOURCEEXCHANGE2_VERSION
|| uClientSXVersion
== 0 ){
2718 AddDebugLogLineN(logPartFile
, CFormat(wxT("Invalid source exchange type version: %i")) % uClientSXVersion
);
2722 // all known versions use the first 2 bytes as count and unknown version are already filtered above
2723 nCount
= sources
->ReadUInt16();
2724 uint32 uDataSize
= (uint32
)(sources
->GetLength() - sources
->GetPosition());
2725 bool bError
= false;
2726 switch (uClientSXVersion
){
2728 bError
= nCount
*(4+2+4+2) != uDataSize
;
2732 bError
= nCount
*(4+2+4+2+16) != uDataSize
;
2735 bError
= nCount
*(4+2+4+2+16+1) != uDataSize
;
2743 AddDebugLogLineN(logPartFile
, wxT("Invalid source exchange data size."));
2746 uPacketSXVersion
= uClientSXVersion
;
2749 for (uint16 i
= 0;i
!= nCount
;++i
) {
2751 uint32 dwID
= sources
->ReadUInt32();
2752 uint16 nPort
= sources
->ReadUInt16();
2753 uint32 dwServerIP
= sources
->ReadUInt32();
2754 uint16 nServerPort
= sources
->ReadUInt16();
2757 if (uPacketSXVersion
> 1) {
2758 userHash
= sources
->ReadHash();
2761 uint8 byCryptOptions
= 0;
2762 if (uPacketSXVersion
>= 4) {
2763 byCryptOptions
= sources
->ReadUInt8();
2766 //Clients send ID's the the Hyrbid format so highID clients with *.*.*.0 won't be falsely switched to a lowID..
2768 if (uPacketSXVersion
>= 3) {
2769 dwIDED2K
= wxUINT32_SWAP_ALWAYS(dwID
);
2774 // check the HighID(IP) - "Filter LAN IPs" and "IPfilter" the received sources IP addresses
2775 if (!IsLowID(dwID
)) {
2776 if (!IsGoodIP(dwIDED2K
, thePrefs::FilterLanIPs())) {
2777 // check for 0-IP, localhost and optionally for LAN addresses
2778 AddDebugLogLineN(logIPFilter
, CFormat(wxT("Ignored source (IP=%s) received via %s - bad IP")) % Uint32toStringIP(dwIDED2K
) % OriginToText(nSourceFrom
));
2781 if (theApp
->ipfilter
->IsFiltered(dwIDED2K
)) {
2782 AddDebugLogLineN(logIPFilter
, CFormat(wxT("Ignored source (IP=%s) received via %s - IPFilter")) % Uint32toStringIP(dwIDED2K
) % OriginToText(nSourceFrom
));
2785 if (theApp
->clientlist
->IsBannedClient(dwIDED2K
)){
2790 // additionally check for LowID and own IP
2791 if (!CanAddSource(dwID
, nPort
, dwServerIP
, nServerPort
, NULL
, false)) {
2792 AddDebugLogLineN(logIPFilter
, CFormat(wxT("Ignored source (IP=%s) received via source exchange")) % Uint32toStringIP(dwIDED2K
));
2796 if(thePrefs::GetMaxSourcePerFile() > GetSourceCount()) {
2797 CUpDownClient
* newsource
= new CUpDownClient(nPort
,dwID
,dwServerIP
,nServerPort
,this, (uPacketSXVersion
< 3), true);
2798 if (uPacketSXVersion
> 1) {
2799 newsource
->SetUserHash(userHash
);
2802 if (uPacketSXVersion
>= 4) {
2803 newsource
->SetConnectOptions(byCryptOptions
, true, false);
2806 newsource
->SetSourceFrom((ESourceFrom
)nSourceFrom
);
2807 theApp
->downloadqueue
->CheckAndAddSource(this,newsource
);
2815 void CPartFile::UpdateAutoDownPriority()
2817 if (!IsAutoDownPriority()) {
2820 if (GetSourceCount() <= theApp
->downloadqueue
->GetRareFileThreshold()) {
2821 if ( GetDownPriority() != PR_HIGH
)
2822 SetDownPriority(PR_HIGH
, false, false);
2823 } else if (GetSourceCount() < theApp
->downloadqueue
->GetCommonFileThreshold()) {
2824 if ( GetDownPriority() != PR_NORMAL
)
2825 SetDownPriority(PR_NORMAL
, false, false);
2827 if ( GetDownPriority() != PR_LOW
)
2828 SetDownPriority(PR_LOW
, false, false);
2832 // making this function return a higher when more sources have the extended
2833 // protocol will force you to ask a larger variety of people for sources
2835 int CPartFile::GetCommonFilePenalty()
2837 //TODO: implement, but never return less than MINCOMMONPENALTY!
2838 return MINCOMMONPENALTY
;
2841 /* Barry - Replaces BlockReceived()
2843 Originally this only wrote to disk when a full 180k block
2844 had been received from a client, and only asked for data in
2847 This meant that on average 90k was lost for every connection
2848 to a client data source. That is a lot of wasted data.
2850 To reduce the lost data, packets are now written to a buffer
2851 and flushed to disk regularly regardless of size downloaded.
2852 This includes compressed packets.
2854 Data is also requested only where gaps are, not in 180k blocks.
2855 The requests will still not exceed 180k, but may be smaller to
2859 // Kry - transize is 32bits, no packet can be more than that (this is
2860 // compressed size). Even 32bits is too much imho.As for the return size,
2861 // look at the lenData below.
2862 uint32
CPartFile::WriteToBuffer(uint32 transize
, byte
* data
, uint64 start
, uint64 end
, Requested_Block_Struct
*block
, const CUpDownClient
* client
)
2864 // Increment transferred bytes counter for this file
2865 transferred
+= transize
;
2867 // This is needed a few times
2868 // Kry - should not need a uint64 here - no block is larger than
2869 // 2GB even after uncompressed.
2870 uint32 lenData
= (uint32
) (end
- start
+ 1);
2872 if(lenData
> transize
) {
2873 m_iGainDueToCompression
+= lenData
-transize
;
2876 // Occasionally packets are duplicated, no point writing it twice
2877 if (IsComplete(start
, end
)) {
2878 AddDebugLogLineN(logPartFile
,
2879 CFormat(wxT("File '%s' has already been written from %u to %u"))
2880 % GetFileName() % start
% end
);
2884 // security sanitize check to make sure we do not write anything into an already hashed complete chunk
2885 const uint64 nStartChunk
= start
/ PARTSIZE
;
2886 const uint64 nEndChunk
= end
/ PARTSIZE
;
2887 if (IsComplete(nStartChunk
)) {
2888 AddDebugLogLineN(logPartFile
, CFormat(wxT("Received data touches already hashed chunk - ignored (start): %u-%u; File=%s")) % start
% end
% GetFileName());
2890 } else if (nStartChunk
!= nEndChunk
) {
2891 if (IsComplete(nEndChunk
)) {
2892 AddDebugLogLineN(logPartFile
, CFormat(wxT("Received data touches already hashed chunk - ignored (end): %u-%u; File=%s")) % start
% end
% GetFileName());
2895 AddDebugLogLineN(logPartFile
, CFormat(wxT("Received data crosses chunk boundaries: %u-%u; File=%s")) % start
% end
% GetFileName());
2899 // log transferinformation in our "blackbox"
2900 m_CorruptionBlackBox
->TransferredData(start
, end
, client
->GetIP());
2902 // Create a new buffered queue entry
2903 PartFileBufferedData
*item
= new PartFileBufferedData(m_hpartfile
, data
, start
, end
, block
);
2905 // Add to the queue in the correct position (most likely the end)
2908 std::list
<PartFileBufferedData
*>::iterator it
= m_BufferedData_list
.begin();
2909 for (; it
!= m_BufferedData_list
.end(); ++it
) {
2910 PartFileBufferedData
* queueItem
= *it
;
2912 if (item
->end
<= queueItem
->end
) {
2913 if (it
!= m_BufferedData_list
.begin()) {
2916 m_BufferedData_list
.insert(--it
, item
);
2924 m_BufferedData_list
.push_front(item
);
2927 // Increment buffer size marker
2928 m_nTotalBufferData
+= lenData
;
2930 // Mark this small section of the file as filled
2931 FillGap(item
->start
, item
->end
);
2933 // Update the flushed mark on the requested block
2934 // The loop here is unfortunate but necessary to detect deleted blocks.
2936 std::list
<Requested_Block_Struct
*>::iterator it2
= m_requestedblocks_list
.begin();
2937 for (; it2
!= m_requestedblocks_list
.end(); ++it2
) {
2938 if (*it2
== item
->block
) {
2939 item
->block
->transferred
+= lenData
;
2943 if (m_gaplist
.IsComplete()) {
2947 // Return the length of data written to the buffer
2951 void CPartFile::FlushBuffer(bool fromAICHRecoveryDataAvailable
)
2953 m_nLastBufferFlushTime
= GetTickCount();
2955 if (m_BufferedData_list
.empty()) {
2960 uint32 partCount
= GetPartCount();
2961 // Remember which parts need to be checked at the end of the flush
2962 std::vector
<bool> changedPart(partCount
, false);
2964 // Ensure file is big enough to write data to (the last item will be the furthest from the start)
2965 if (!CheckFreeDiskSpace(m_nTotalBufferData
)) {
2966 // Not enough free space to write the last item, bail
2967 AddLogLineC(CFormat( _("WARNING: Not enough free disk-space! Pausing file: %s") ) % GetFileName());
2973 // Loop through queue
2974 while ( !m_BufferedData_list
.empty() ) {
2975 // Get top item and remove it from the queue
2976 CScopedPtr
<PartFileBufferedData
> item(m_BufferedData_list
.front());
2977 m_BufferedData_list
.pop_front();
2979 // This is needed a few times
2980 wxASSERT((item
->end
- item
->start
) < 0xFFFFFFFF);
2981 uint32 lenData
= (uint32
)(item
->end
- item
->start
+ 1);
2983 // SLUGFILLER: SafeHash - could be more than one part
2984 for (uint32 curpart
= (item
->start
/PARTSIZE
); curpart
<= (item
->end
/PARTSIZE
); ++curpart
) {
2985 wxASSERT(curpart
< partCount
);
2986 changedPart
[curpart
] = true;
2988 // SLUGFILLER: SafeHash
2990 // Go to the correct position in file and write block of data
2992 item
->area
.FlushAt(m_hpartfile
, item
->start
, lenData
);
2993 // Decrease buffer size
2994 m_nTotalBufferData
-= lenData
;
2995 } catch (const CIOFailureException
& e
) {
2996 AddDebugLogLineC(logPartFile
, wxT("Error while saving part-file: ") + e
.what());
2997 SetStatus(PS_ERROR
);
2998 // No need to bang your head against it again and again if it has already failed.
2999 DeleteContents(m_BufferedData_list
);
3000 m_nTotalBufferData
= 0;
3006 // Update last-changed date
3007 m_lastDateChanged
= wxDateTime::GetTimeNow();
3010 // Partfile should never be too large
3011 if (m_hpartfile
.GetLength() > GetFileSize()) {
3012 // it's "last chance" correction. the real bugfix has to be applied 'somewhere' else
3013 m_hpartfile
.SetLength(GetFileSize());
3015 } catch (const CIOFailureException
& e
) {
3016 AddDebugLogLineC(logPartFile
,
3017 CFormat(wxT("Error while truncating part-file (%s): %s"))
3018 % m_PartPath
% e
.what());
3019 SetStatus(PS_ERROR
);
3024 // Check each part of the file
3025 for (uint16 partNumber
= 0; partNumber
< partCount
; ++partNumber
) {
3026 if (changedPart
[partNumber
] == false) {
3030 uint32 partRange
= GetPartSize(partNumber
) - 1;
3032 // Is this 9MB part complete
3033 if (IsComplete(partNumber
)) {
3035 if (!HashSinglePart(partNumber
)) {
3036 AddLogLineC(CFormat(
3037 _("Downloaded part %i is corrupt in file: %s") ) % partNumber
% GetFileName() );
3039 // add part to corrupted list, if not already there
3040 if (!IsCorruptedPart(partNumber
)) {
3041 m_corrupted_list
.push_back(partNumber
);
3043 // request AICH recovery data
3044 // Don't if called from the AICHRecovery. It's already there and would lead to an infinite recursion.
3045 if (!fromAICHRecoveryDataAvailable
) {
3046 RequestAICHRecovery(partNumber
);
3048 // Reduce transferred amount by corrupt amount
3049 m_iLostDueToCorruption
+= (partRange
+ 1);
3051 if (!m_hashsetneeded
) {
3052 AddDebugLogLineN(logPartFile
, CFormat(
3053 wxT("Finished part %u of '%s'")) % partNumber
% GetFileName());
3056 // tell the blackbox about the verified data
3057 m_CorruptionBlackBox
->VerifiedData(true, partNumber
, 0, partRange
);
3059 // if this part was successfully completed (although ICH is active), remove from corrupted list
3060 EraseFirstValue(m_corrupted_list
, partNumber
);
3062 if (status
== PS_EMPTY
) {
3063 if (theApp
->IsRunning()) { // may be called during shutdown!
3064 if (GetHashCount() == GetED2KPartHashCount() && !m_hashsetneeded
) {
3065 // Successfully completed part, make it available for sharing
3066 SetStatus(PS_READY
);
3067 theApp
->sharedfiles
->SafeAddKFile(this);
3072 } else if ( IsCorruptedPart(partNumber
) && // corrupted part:
3073 (thePrefs::IsICHEnabled() // old ICH: rehash whenever we have new data hoping it will be good now
3074 || fromAICHRecoveryDataAvailable
)) {// new AICH: one rehash right before performing it (maybe it's already good)
3075 // Try to recover with minimal loss
3076 if (HashSinglePart(partNumber
)) {
3077 ++m_iTotalPacketsSavedDueToICH
;
3079 uint64 uMissingInPart
= m_gaplist
.GetGapSize(partNumber
);
3080 FillGap(partNumber
);
3081 RemoveBlockFromList(PARTSIZE
*partNumber
,(PARTSIZE
*partNumber
+ partRange
));
3083 // tell the blackbox about the verified data
3084 m_CorruptionBlackBox
->VerifiedData(true, partNumber
, 0, partRange
);
3086 // remove from corrupted list
3087 EraseFirstValue(m_corrupted_list
, partNumber
);
3089 AddLogLineC(CFormat( _("ICH: Recovered corrupted part %i for %s -> Saved bytes: %s") )
3092 % CastItoXBytes(uMissingInPart
));
3094 if (GetHashCount() == GetED2KPartHashCount() && !m_hashsetneeded
) {
3095 if (status
== PS_EMPTY
) {
3096 // Successfully recovered part, make it available for sharing
3097 SetStatus(PS_READY
);
3098 if (theApp
->IsRunning()) // may be called during shutdown!
3099 theApp
->sharedfiles
->SafeAddKFile(this);
3109 if (theApp
->IsRunning()) { // may be called during shutdown!
3110 // Is this file finished ?
3111 if (m_gaplist
.IsComplete()) {
3112 CompleteFile(false);
3118 // read data for upload, return false on error
3119 bool CPartFile::ReadData(CFileArea
& area
, uint64 offset
, uint32 toread
)
3122 if (offset
+ toread
> GetFileSize()) {
3123 AddDebugLogLineN(logPartFile
, CFormat(wxT("tried to read %d bytes past eof of %s"))
3124 % (offset
+ toread
- GetFileSize()) % GetFileName());
3129 area
.ReadAt(m_hpartfile
, offset
, toread
);
3130 // if it fails it throws (which the caller should catch)
3135 void CPartFile::UpdateFileRatingCommentAvail()
3137 bool prevComment
= m_hasComment
;
3138 int prevRating
= m_iUserRating
;
3140 m_hasComment
= false;
3142 int ratingCount
= 0;
3144 SourceSet::iterator it
= m_SrcList
.begin();
3145 for (; it
!= m_SrcList
.end(); ++it
) {
3146 CUpDownClient
* cur_src
= *it
;
3148 if (!cur_src
->GetFileComment().IsEmpty()) {
3149 if (thePrefs::IsCommentFiltered(cur_src
->GetFileComment())) {
3152 m_hasComment
= true;
3155 uint8 rating
= cur_src
->GetFileRating();
3157 wxASSERT(rating
<= 5);
3160 m_iUserRating
+= rating
;
3165 m_iUserRating
/= ratingCount
;
3166 wxASSERT(m_iUserRating
> 0 && m_iUserRating
<= 5);
3169 if ((prevComment
!= m_hasComment
) || (prevRating
!= m_iUserRating
)) {
3170 UpdateDisplayedInfo();
3175 void CPartFile::SetCategory(uint8 cat
)
3177 wxASSERT( cat
< theApp
->glob_prefs
->GetCatCount() );
3183 bool CPartFile::RemoveSource(CUpDownClient
* toremove
, bool updatewindow
, bool bDoStatsUpdate
)
3185 wxASSERT( toremove
);
3187 bool result
= theApp
->downloadqueue
->RemoveSource( toremove
, updatewindow
, bDoStatsUpdate
);
3189 // Check if the client should be deleted, but not if the client is already dying
3190 if ( !toremove
->GetSocket() && !toremove
->HasBeenDeleted() ) {
3191 if ( toremove
->Disconnected(wxT("RemoveSource - purged")) ) {
3192 toremove
->Safe_Delete();
3199 void CPartFile::AddDownloadingSource(CUpDownClient
* client
)
3201 CClientPtrList::iterator it
=
3202 std::find(m_downloadingSourcesList
.begin(), m_downloadingSourcesList
.end(), client
);
3203 if (it
== m_downloadingSourcesList
.end()) {
3204 m_downloadingSourcesList
.push_back(client
);
3209 void CPartFile::RemoveDownloadingSource(CUpDownClient
* client
)
3211 CClientPtrList::iterator it
=
3212 std::find(m_downloadingSourcesList
.begin(), m_downloadingSourcesList
.end(), client
);
3213 if (it
!= m_downloadingSourcesList
.end()) {
3214 m_downloadingSourcesList
.erase(it
);
3219 uint64
CPartFile::GetNeededSpace()
3222 uint64 length
= m_hpartfile
.GetLength();
3224 if (length
> GetFileSize()) {
3225 return 0; // Shouldn't happen, but just in case
3228 return GetFileSize() - length
;
3229 } catch (const CIOFailureException
& e
) {
3230 AddDebugLogLineC(logPartFile
,
3231 CFormat(wxT("Error while retrieving file-length (%s): %s"))
3232 % m_PartPath
% e
.what());
3233 SetStatus(PS_ERROR
);
3238 void CPartFile::SetStatus(uint8 in
)
3240 // PAUSED and INSUFFICIENT have extra flag variables m_paused and m_insufficient
3241 // - they are never to be stored in status
3242 wxASSERT( in
!= PS_PAUSED
&& in
!= PS_INSUFFICIENT
);
3246 if (theApp
->IsRunning()) {
3247 UpdateDisplayedInfo( true );
3249 if ( thePrefs::ShowCatTabInfos() ) {
3250 Notify_ShowUpdateCatTabTitles();
3252 Notify_DownloadCtrlSort();
3257 void CPartFile::RequestAICHRecovery(uint16 nPart
)
3260 if ( !m_pAICHHashSet
->HasValidMasterHash() ||
3261 (m_pAICHHashSet
->GetStatus() != AICH_TRUSTED
&& m_pAICHHashSet
->GetStatus() != AICH_VERIFIED
)){
3262 AddDebugLogLineN( logAICHRecovery
, wxT("Unable to request AICH Recoverydata because we have no trusted Masterhash") );
3265 if (GetPartSize(nPart
) <= EMBLOCKSIZE
)
3267 if (CAICHHashSet::IsClientRequestPending(this, nPart
)){
3268 AddDebugLogLineN( logAICHRecovery
, wxT("RequestAICHRecovery: Already a request for this part pending"));
3272 // first check if we have already the recoverydata, no need to rerequest it then
3273 if (m_pAICHHashSet
->IsPartDataAvailable(nPart
*PARTSIZE
)){
3274 AddDebugLogLineN( logAICHRecovery
, wxT("Found PartRecoveryData in memory"));
3275 AICHRecoveryDataAvailable(nPart
);
3279 wxASSERT( nPart
< GetPartCount() );
3280 // find some random client which support AICH to ask for the blocks
3281 // first lets see how many we have at all, we prefer high id very much
3282 uint32 cAICHClients
= 0;
3283 uint32 cAICHLowIDClients
= 0;
3284 for ( SourceSet::iterator it
= m_SrcList
.begin(); it
!= m_SrcList
.end(); ++it
) {
3285 CUpDownClient
* pCurClient
= *(it
);
3286 if ( pCurClient
->IsSupportingAICH() &&
3287 pCurClient
->GetReqFileAICHHash() != NULL
&&
3288 !pCurClient
->IsAICHReqPending()
3289 && (*pCurClient
->GetReqFileAICHHash()) == m_pAICHHashSet
->GetMasterHash())
3291 if (pCurClient
->HasLowID()) {
3292 ++cAICHLowIDClients
;
3298 if ((cAICHClients
| cAICHLowIDClients
) == 0){
3299 AddDebugLogLineN( logAICHRecovery
, wxT("Unable to request AICH Recoverydata because found no client who supports it and has the same hash as the trusted one"));
3302 uint32 nSeclectedClient
;
3303 if (cAICHClients
> 0) {
3304 nSeclectedClient
= (rand() % cAICHClients
) + 1;
3306 nSeclectedClient
= (rand() % cAICHLowIDClients
) + 1;
3308 CUpDownClient
* pClient
= NULL
;
3309 for ( SourceSet::iterator it
= m_SrcList
.begin(); it
!= m_SrcList
.end(); ++it
) {
3310 CUpDownClient
* pCurClient
= *(it
);
3311 if (pCurClient
->IsSupportingAICH() && pCurClient
->GetReqFileAICHHash() != NULL
&& !pCurClient
->IsAICHReqPending()
3312 && (*pCurClient
->GetReqFileAICHHash()) == m_pAICHHashSet
->GetMasterHash())
3314 if (cAICHClients
> 0){
3315 if (!pCurClient
->HasLowID())
3319 wxASSERT( pCurClient
->HasLowID());
3322 if (nSeclectedClient
== 0){
3323 pClient
= pCurClient
;
3328 if (pClient
== NULL
){
3333 AddDebugLogLineN( logAICHRecovery
, CFormat( wxT("Requesting AICH Hash (%s) form client %s") ) % ( cAICHClients
? wxT("HighId") : wxT("LowID") ) % pClient
->GetClientFullInfo() );
3334 pClient
->SendAICHRequest(this, nPart
);
3339 void CPartFile::AICHRecoveryDataAvailable(uint16 nPart
)
3341 if (GetPartCount() < nPart
){
3347 uint32 length
= GetPartSize(nPart
);
3348 // if the part was already ok, it would now be complete
3349 if (IsComplete(nPart
)) {
3350 AddDebugLogLineN(logAICHRecovery
, CFormat(wxT("Processing AICH Recovery data: The part (%u) is already complete, canceling")) % nPart
);
3356 CAICHHashTree
* pVerifiedHash
= m_pAICHHashSet
->m_pHashTree
.FindHash(nPart
*PARTSIZE
, length
);
3357 if (pVerifiedHash
== NULL
|| !pVerifiedHash
->GetHashValid()){
3358 AddDebugLogLineC( logAICHRecovery
, wxT("Processing AICH Recovery data: Unable to get verified hash from hashset (should never happen)") );
3362 CAICHHashTree
htOurHash(pVerifiedHash
->GetNDataSize(), pVerifiedHash
->GetIsLeftBranch(), pVerifiedHash
->GetNBaseSize());
3364 CreateHashFromFile(m_hpartfile
, PARTSIZE
* nPart
, length
, NULL
, &htOurHash
);
3365 } catch (const CIOFailureException
& e
) {
3366 AddDebugLogLineC(logAICHRecovery
,
3367 CFormat(wxT("IO failure while hashing part-file '%s': %s"))
3368 % m_hpartfile
.GetFilePath() % e
.what());
3369 SetStatus(PS_ERROR
);
3373 if (!htOurHash
.GetHashValid()){
3374 AddDebugLogLineN( logAICHRecovery
, wxT("Processing AICH Recovery data: Failed to retrieve AICH Hashset of corrupt part") );
3379 // now compare the hash we just did, to the verified hash and readd all blocks which are ok
3380 uint32 nRecovered
= 0;
3381 for (uint32 pos
= 0; pos
< length
; pos
+= EMBLOCKSIZE
){
3382 const uint32 nBlockSize
= min
<uint32
>(EMBLOCKSIZE
, length
- pos
);
3383 CAICHHashTree
* pVerifiedBlock
= pVerifiedHash
->FindHash(pos
, nBlockSize
);
3384 CAICHHashTree
* pOurBlock
= htOurHash
.FindHash(pos
, nBlockSize
);
3385 if ( pVerifiedBlock
== NULL
|| pOurBlock
== NULL
|| !pVerifiedBlock
->GetHashValid() || !pOurBlock
->GetHashValid()){
3389 if (pOurBlock
->GetHash() == pVerifiedBlock
->GetHash()){
3390 FillGap(PARTSIZE
*nPart
+pos
, PARTSIZE
*nPart
+ pos
+ (nBlockSize
-1));
3391 RemoveBlockFromList(PARTSIZE
*nPart
, PARTSIZE
*nPart
+ (nBlockSize
-1));
3392 nRecovered
+= nBlockSize
;
3393 // tell the blackbox about the verified data
3394 m_CorruptionBlackBox
->VerifiedData(true, nPart
, pos
, pos
+ nBlockSize
- 1);
3396 // inform our "blackbox" about the corrupted block which may ban clients who sent it
3397 m_CorruptionBlackBox
->VerifiedData(false, nPart
, pos
, pos
+ nBlockSize
- 1);
3400 m_CorruptionBlackBox
->EvaluateData();
3402 // ok now some sanity checks
3403 if (IsComplete(nPart
)) {
3404 // this is bad, but it could probably happen under some rare circumstances
3405 // make sure that MD4 agrees to this fact too
3406 if (!HashSinglePart(nPart
)) {
3407 AddDebugLogLineN(logAICHRecovery
,
3408 CFormat(wxT("Processing AICH Recovery data: The part (%u) got completed while recovering - but MD4 says it corrupt! Setting hashset to error state, deleting part")) % nPart
);
3409 // now we are fu... unhappy
3410 m_pAICHHashSet
->SetStatus(AICH_ERROR
);
3415 AddDebugLogLineN(logAICHRecovery
,
3416 CFormat(wxT("Processing AICH Recovery data: The part (%u) got completed while recovering and MD4 agrees")) % nPart
);
3417 if (status
== PS_EMPTY
&& theApp
->IsRunning()) {
3418 if (GetHashCount() == GetED2KPartHashCount() && !m_hashsetneeded
) {
3419 // Successfully recovered part, make it available for sharing
3420 SetStatus(PS_READY
);
3421 theApp
->sharedfiles
->SafeAddKFile(this);
3425 if (theApp
->IsRunning()) {
3426 // Is this file finished?
3427 if (m_gaplist
.IsComplete()) {
3428 CompleteFile(false);
3432 } // end sanity check
3433 // We did the best we could. If it's still incomplete, then no need to keep
3434 // bashing it with ICH. So remove it from the list of corrupted parts.
3435 EraseFirstValue(m_corrupted_list
, nPart
);
3439 // make sure the user appreciates our great recovering work :P
3440 AddDebugLogLineC( logAICHRecovery
, CFormat(
3441 wxT("AICH successfully recovered %s of %s from part %u for %s") )
3442 % CastItoXBytes(nRecovered
)
3443 % CastItoXBytes(length
)
3449 void CPartFile::ClientStateChanged( int oldState
, int newState
)
3451 if ( oldState
== newState
)
3454 // If the state is -1, then it's an entirely new item
3455 if ( oldState
!= -1 ) {
3456 // Was the old state a valid state?
3457 if ( oldState
== DS_ONQUEUE
|| oldState
== DS_DOWNLOADING
) {
3460 if ( oldState
== DS_CONNECTED
/* || oldState == DS_REMOTEQUEUEFULL */ ) {
3464 m_notCurrentSources
--;
3468 // If the state is -1, then the source is being removed
3469 if ( newState
!= -1 ) {
3470 // Was the old state a valid state?
3471 if ( newState
== DS_ONQUEUE
|| newState
== DS_DOWNLOADING
) {
3474 if ( newState
== DS_CONNECTED
/* || newState == DS_REMOTEQUEUEFULL */ ) {
3478 ++m_notCurrentSources
;
3484 bool CPartFile::AddSource( CUpDownClient
* client
)
3486 if (m_SrcList
.insert( client
).second
) {
3487 theStats::AddFoundSource();
3488 theStats::AddSourceOrigin(client
->GetSourceFrom());
3496 bool CPartFile::DelSource( CUpDownClient
* client
)
3498 if (m_SrcList
.erase( client
)) {
3499 theStats::RemoveSourceOrigin(client
->GetSourceFrom());
3500 theStats::RemoveFoundSource();
3508 void CPartFile::UpdatePartsFrequency( CUpDownClient
* client
, bool increment
)
3510 const BitVector
& freq
= client
->GetPartStatus();
3512 if ( m_SrcpartFrequency
.size() != GetPartCount() ) {
3513 m_SrcpartFrequency
.clear();
3514 m_SrcpartFrequency
.insert(m_SrcpartFrequency
.begin(), GetPartCount(), 0);
3521 unsigned int size
= freq
.size();
3522 if ( size
!= m_SrcpartFrequency
.size() ) {
3527 for ( unsigned int i
= 0; i
< size
; i
++ ) {
3528 if ( freq
.get(i
) ) {
3529 m_SrcpartFrequency
[i
]++;
3533 for ( unsigned int i
= 0; i
< size
; i
++ ) {
3534 if ( freq
.get(i
) ) {
3535 m_SrcpartFrequency
[i
]--;
3541 void CPartFile::GetRatingAndComments(FileRatingList
& list
) const
3544 // This can be pre-processed, but is it worth the CPU?
3545 CPartFile::SourceSet::const_iterator it
= m_SrcList
.begin();
3546 for ( ; it
!= m_SrcList
.end(); ++it
) {
3547 CUpDownClient
*cur_src
= *it
;
3548 if (cur_src
->GetFileComment().Length()>0 || cur_src
->GetFileRating()>0) {
3549 // AddDebugLogLineN(logPartFile, wxString(wxT("found a comment for ")) << GetFileName());
3550 list
.push_back(SFileRating(*cur_src
));
3557 CPartFile::CPartFile(CEC_PartFile_Tag
*tag
) : CKnownFile(tag
)
3561 SetFileName(CPath(tag
->FileName()));
3562 m_abyFileHash
= tag
->FileHash();
3563 SetFileSize(tag
->SizeFull());
3564 m_gaplist
.Init(GetFileSize(), true); // Init empty
3565 m_partmetfilename
= CPath(tag
->PartMetName());
3566 m_fullname
= m_partmetfilename
; // We have only the met number, so show it without path in the detail dialog.
3568 m_SrcpartFrequency
.insert(m_SrcpartFrequency
.end(), GetPartCount(), 0);
3570 // these are only in CLIENT_GUI and not covered by Init()
3573 m_iDownPriorityEC
= 0;
3574 m_a4af_source_count
= 0;
3579 * Remote gui specific code
3581 CPartFile::~CPartFile()
3585 void CPartFile::GetRatingAndComments(FileRatingList
& list
) const
3587 list
= m_FileRatingList
;
3590 void CPartFile::SetCategory(uint8 cat
)
3596 bool CPartFile::AddSource(CUpDownClient
* client
)
3598 return m_SrcList
.insert(client
).second
!= 0;
3602 bool CPartFile::DelSource(CUpDownClient
* client
)
3604 return m_SrcList
.erase(client
) != 0;
3608 #endif // !CLIENT_GUI
3611 void CPartFile::UpdateDisplayedInfo(bool force
)
3613 uint32 curTick
= ::GetTickCount();
3615 // Wait 1.5s between each redraw
3616 if (force
|| curTick
-m_lastRefreshedDLDisplay
> MINWAIT_BEFORE_DLDISPLAY_WINDOWUPDATE
) {
3617 Notify_DownloadCtrlUpdateItem(this);
3618 m_lastRefreshedDLDisplay
= curTick
;
3623 void CPartFile::Init()
3625 m_lastsearchtime
= 0;
3626 lastpurgetime
= ::GetTickCount();
3629 m_insufficient
= false;
3634 m_iLastPausePurge
= time(NULL
);
3636 if(thePrefs::GetNewAutoDown()) {
3637 m_iDownPriority
= PR_HIGH
;
3638 m_bAutoDownPriority
= true;
3640 m_iDownPriority
= PR_NORMAL
;
3641 m_bAutoDownPriority
= false;
3644 transferingsrc
= 0; // new
3648 m_hashsetneeded
= true;
3650 percentcompleted
= 0;
3652 m_bPreviewing
= false;
3653 lastseencomplete
= 0;
3654 m_availablePartsCount
=0;
3655 m_ClientSrcAnswered
= 0;
3656 m_LastNoNeededCheck
= 0;
3658 m_nTotalBufferData
= 0;
3659 m_nLastBufferFlushTime
= 0;
3660 m_bPercentUpdated
= false;
3661 m_bRecoveringArchive
= false;
3662 m_iGainDueToCompression
= 0;
3663 m_iLostDueToCorruption
= 0;
3664 m_iTotalPacketsSavedDueToICH
= 0;
3666 m_lastRefreshedDLDisplay
= 0;
3667 m_nDlActiveTime
= 0;
3669 m_is_A4AF_auto
= false;
3670 m_localSrcReqQueued
= false;
3671 m_nCompleteSourcesTime
= time(NULL
);
3672 m_nCompleteSourcesCount
= 0;
3673 m_nCompleteSourcesCountLo
= 0;
3674 m_nCompleteSourcesCountHi
= 0;
3677 m_notCurrentSources
= 0;
3680 m_LastSearchTimeKad
= 0;
3681 m_TotalSearchesKad
= 0;
3684 m_CorruptionBlackBox
= new CCorruptionBlackBox();
3688 wxString
CPartFile::getPartfileStatus() const
3693 if ((status
== PS_HASHING
) || (status
== PS_WAITINGFORHASH
)) {
3694 mybuffer
=_("Hashing");
3695 } else if (status
== PS_ALLOCATING
) {
3696 mybuffer
= _("Allocating");
3698 switch (GetStatus()) {
3700 mybuffer
=_("Completing");
3703 mybuffer
=_("Complete");
3706 mybuffer
=_("Paused");
3709 mybuffer
=_("Erroneous");
3711 case PS_INSUFFICIENT
:
3712 mybuffer
= _("Insufficient disk space");
3715 if (GetTransferingSrcCount()>0) {
3716 mybuffer
=_("Downloading");
3718 mybuffer
=_("Waiting");
3722 if (m_stopped
&& (GetStatus()!=PS_COMPLETE
)) {
3723 mybuffer
=_("Stopped");
3730 int CPartFile::getPartfileStatusRang() const
3734 if (GetTransferingSrcCount()==0) tempstatus
=1;
3735 switch (GetStatus()) {
3737 case PS_WAITINGFORHASH
:
3757 wxString
CPartFile::GetFeedback() const
3759 wxString retval
= CKnownFile::GetFeedback();
3760 if (GetStatus() != PS_COMPLETE
) {
3761 retval
+= CFormat(wxT("%s: %s (%.2f%%)\n%s: %u\n"))
3762 % _("Downloaded") % CastItoXBytes(GetCompletedSize()) % GetPercentCompleted() % _("Sources") % GetSourceCount();
3764 return retval
+ _("Status") + wxT(": ") + getPartfileStatus() + wxT("\n");
3768 sint32
CPartFile::getTimeRemaining() const
3770 if (GetKBpsDown() < 0.001)
3773 return((GetFileSize()-GetCompletedSize()) / ((int)(GetKBpsDown()*1024.0)));
3776 bool CPartFile::PreviewAvailable()
3778 const uint64 minSizeForPreview
= 256 * 1024;
3779 FileType type
= GetFiletype(GetFileName());
3781 return (type
== ftVideo
|| type
== ftAudio
) &&
3782 GetFileSize() >= minSizeForPreview
&&
3783 IsComplete(0, minSizeForPreview
);
3786 bool CPartFile::CheckShowItemInGivenCat(int inCategory
)
3788 // first check if item belongs in this cat in principle
3789 if (inCategory
> 0 && inCategory
!= GetCategory()) {
3793 // if yes apply filter
3796 switch (thePrefs::GetAllcatFilter()) {
3798 show
= GetCategory() == 0 || inCategory
> 0;
3801 show
= IsPartFile();
3804 show
= !IsPartFile();
3808 (GetStatus() == PS_READY
|| GetStatus() == PS_EMPTY
) &&
3809 GetTransferingSrcCount() == 0;
3811 case acfDownloading
:
3813 (GetStatus() == PS_READY
|| GetStatus() == PS_EMPTY
) &&
3814 GetTransferingSrcCount() > 0;
3817 show
= GetStatus() == PS_ERROR
;
3820 show
= GetStatus() == PS_PAUSED
&& !IsStopped();
3826 show
= GetFiletype(GetFileName()) == ftVideo
;
3829 show
= GetFiletype(GetFileName()) == ftAudio
;
3832 show
= GetFiletype(GetFileName()) == ftArchive
;
3835 show
= GetFiletype(GetFileName()) == ftCDImage
;
3838 show
= GetFiletype(GetFileName()) == ftPicture
;
3841 show
= GetFiletype(GetFileName()) == ftText
;
3844 show
= !IsStopped() && GetStatus() != PS_PAUSED
;
3855 void CPartFile::SetActive(bool bActive
)
3857 time_t tNow
= time(NULL
);
3859 if (theApp
->IsConnected()) {
3860 if (m_tActivated
== 0) {
3861 m_tActivated
= tNow
;
3865 if (m_tActivated
!= 0) {
3866 m_nDlActiveTime
+= tNow
- m_tActivated
;
3873 uint32
CPartFile::GetDlActiveTime() const
3875 uint32 nDlActiveTime
= m_nDlActiveTime
;
3876 if (m_tActivated
!= 0) {
3877 nDlActiveTime
+= time(NULL
) - m_tActivated
;
3879 return nDlActiveTime
;
3883 uint16
CPartFile::GetPartMetNumber() const
3886 return m_partmetfilename
.RemoveAllExt().GetRaw().ToLong(&nr
) ? nr
: 0;
3892 uint8
CPartFile::GetStatus(bool ignorepause
) const
3894 if ( (!m_paused
&& !m_insufficient
) ||
3895 status
== PS_ERROR
||
3896 status
== PS_COMPLETING
||
3897 status
== PS_COMPLETE
||
3900 } else if ( m_insufficient
) {
3901 return PS_INSUFFICIENT
;
3907 void CPartFile::AddDeadSource(const CUpDownClient
* client
)
3909 m_deadSources
.AddDeadSource( client
);
3913 bool CPartFile::IsDeadSource(const CUpDownClient
* client
)
3915 return m_deadSources
.IsDeadSource( client
);
3918 void CPartFile::SetFileName(const CPath
& fileName
)
3920 CKnownFile
* pFile
= theApp
->sharedfiles
->GetFileByID(GetFileHash());
3922 bool is_shared
= (pFile
&& pFile
== this);
3925 // The file is shared, we must clear the search keywords so we don't
3926 // publish the old name anymore.
3927 theApp
->sharedfiles
->RemoveKeywords(this);
3930 CKnownFile::SetFileName(fileName
);
3933 // And of course, we must advertise the new name if the file is shared.
3934 theApp
->sharedfiles
->AddKeywords(this);
3937 UpdateDisplayedInfo(true);
3941 uint16
CPartFile::GetMaxSources() const
3943 // This is just like this, while we don't import the private max sources per file
3944 return thePrefs::GetMaxSourcePerFile();
3948 uint16
CPartFile::GetMaxSourcePerFileSoft() const
3950 unsigned int temp
= ((unsigned int)GetMaxSources() * 9L) / 10;
3951 if (temp
> MAX_SOURCES_FILE_SOFT
) {
3952 return MAX_SOURCES_FILE_SOFT
;
3957 uint16
CPartFile::GetMaxSourcePerFileUDP() const
3959 unsigned int temp
= ((unsigned int)GetMaxSources() * 3L) / 4;
3960 if (temp
> MAX_SOURCES_FILE_UDP
) {
3961 return MAX_SOURCES_FILE_UDP
;
3966 #define DROP_FACTOR 2
3968 CUpDownClient
* CPartFile::GetSlowerDownloadingClient(uint32 speed
, CUpDownClient
* caller
) {
3969 // printf("Start slower source calculation\n");
3970 for( SourceSet::iterator it
= m_SrcList
.begin(); it
!= m_SrcList
.end(); ) {
3971 CUpDownClient
* cur_src
= *it
++;
3972 if ((cur_src
->GetDownloadState() == DS_DOWNLOADING
) && (cur_src
!= caller
)) {
3973 uint32 factored_bytes_per_second
= static_cast<uint32
>(
3974 (cur_src
->GetKBpsDown() * 1024) * DROP_FACTOR
);
3975 if ( factored_bytes_per_second
< speed
) {
3976 // printf("Selecting source %p to drop: %d < %d\n", cur_src, factored_bytes_per_second, speed);
3977 // printf("End slower source calculation\n");
3980 // printf("Not selecting source %p to drop: %d > %d\n", cur_src, factored_bytes_per_second, speed);
3984 // printf("End slower source calculation\n");
3988 void CPartFile::AllocationFinished()
3990 // see if it can be opened
3991 if (!m_hpartfile
.Open(m_PartPath
, CFile::read_write
)) {
3992 AddLogLineN(CFormat(_("ERROR: Failed to open partfile '%s'")) % GetFullName());
3993 SetStatus(PS_ERROR
);
3995 // then close the handle again
3996 m_hpartfile
.Release(true);
4000 // File_checked_for_headers