2 // This file is part of the aMule Project.
4 // Copyright (c) 2003-2008 aMule Team ( admin@amule.org / http://www.amule.org )
5 // Copyright (c) 2002-2008 Merkur ( devs@emule-project.net / http://www.emule-project.net )
7 // Any parts of this program derived from the xMule, lMule or eMule project,
8 // or contributed by third-party developers are copyrighted by their
11 // This program is free software; you can redistribute it and/or modify
12 // it under the terms of the GNU General Public License as published by
13 // the Free Software Foundation; either version 2 of the License, or
14 // (at your option) any later version.
16 // This program is distributed in the hope that it will be useful,
17 // but WITHOUT ANY WARRANTY; without even the implied warranty of
18 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 // GNU General Public License for more details.
21 // You should have received a copy of the GNU General Public License
22 // along with this program; if not, write to the Free Software
23 // Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA
28 #include "PartFile.h" // Interface declarations.
31 #include "config.h" // Needed for VERSION
34 #include <protocol/kad/Constants.h>
35 #include <protocol/ed2k/Client2Client/TCP.h>
36 #include <protocol/Protocols.h>
37 #include <common/DataFileVersion.h>
38 #include <common/Constants.h>
39 #include <tags/FileTags.h>
42 #include <wx/tokenzr.h> // Needed for wxStringTokenizer
44 #include "KnownFileList.h" // Needed for CKnownFileList
45 #include "UploadQueue.h" // Needed for CFileHash
46 #include "IPFilter.h" // Needed for CIPFilter
47 #include "Server.h" // Needed for CServer
48 #include "ServerConnect.h" // Needed for CServerConnect
49 #include "updownclient.h" // Needed for CUpDownClient
50 #include "MemFile.h" // Needed for CMemFile
51 #include "Preferences.h" // Needed for CPreferences
52 #include "DownloadQueue.h" // Needed for CDownloadQueue
53 #include "amule.h" // Needed for theApp
54 #include "ED2KLink.h" // Needed for CED2KLink
55 #include "Packet.h" // Needed for CTag
56 #include "SearchList.h" // Needed for CSearchFile
57 #include "ClientList.h" // Needed for clientlist
58 #include "Statistics.h" // Needed for theStats
60 #include <common/Format.h> // Needed for CFormat
61 #include <common/FileFunctions.h> // Needed for GetLastModificationTime
62 #include "ThreadTasks.h" // Needed for CHashingTask/CCompletionTask/CAllocateFileTask
63 #include "GuiEvents.h" // Needed for Notify_*
64 #include "DataToText.h" // Needed for OriginToText()
65 #include "PlatformSpecific.h" // Needed for CreateSparseFile()
66 #include "FileArea.h" // Needed for CFileArea
67 #include "ScopedPtr.h" // Needed for CScopedArray
69 #include "kademlia/kademlia/Kademlia.h"
70 #include "kademlia/kademlia/Search.h"
73 SFileRating::SFileRating(const wxString
&u
, const wxString
&f
, sint16 r
, const wxString
&c
)
83 SFileRating::SFileRating(const SFileRating
&fr
)
85 UserName(fr
.UserName
),
86 FileName(fr
.FileName
),
93 SFileRating::SFileRating(const CUpDownClient
&client
)
95 UserName(client
.GetUserName()),
96 FileName(client
.GetClientFilename()),
97 Rating(client
.GetFileRating()),
98 Comment(client
.GetFileComment())
103 SFileRating::~SFileRating()
108 class PartFileBufferedData
111 CScopedArray
<byte
> data
; // This is the data to be written
112 uint64 start
; // This is the start offset of the data
113 uint64 end
; // This is the end offset of the data
114 Requested_Block_Struct
*block
; // This is the requested block that this data relates to
116 PartFileBufferedData(byte
* _data
, uint64 _start
, uint64 _end
, Requested_Block_Struct
*_block
)
117 : data(_data
), start(_start
), end(_end
), block(_block
)
122 typedef std::list
<Chunk
> ChunkList
;
127 CPartFile::CPartFile()
132 CPartFile::CPartFile(CSearchFile
* searchresult
)
136 m_abyFileHash
= searchresult
->GetFileHash();
137 SetFileName(searchresult
->GetFileName());
138 SetFileSize(searchresult
->GetFileSize());
140 for (unsigned int i
= 0; i
< searchresult
->m_taglist
.size(); ++i
){
141 const CTag
& pTag
= searchresult
->m_taglist
[i
];
143 bool bTagAdded
= false;
144 if (pTag
.GetNameID() == 0 && !pTag
.GetName().IsEmpty() && (pTag
.IsStr() || pTag
.IsInt())) {
145 static const struct {
150 { wxT(FT_ED2K_MEDIA_ARTIST
), 2 },
151 { wxT(FT_ED2K_MEDIA_ALBUM
), 2 },
152 { wxT(FT_ED2K_MEDIA_TITLE
), 2 },
153 { wxT(FT_ED2K_MEDIA_LENGTH
), 2 },
154 { wxT(FT_ED2K_MEDIA_BITRATE
), 3 },
155 { wxT(FT_ED2K_MEDIA_CODEC
), 2 }
158 for (unsigned int t
= 0; t
< itemsof(_aMetaTags
); ++t
) {
159 if ( pTag
.GetType() == _aMetaTags
[t
].nType
&&
160 (pTag
.GetName() == _aMetaTags
[t
].pszName
)) {
161 // skip string tags with empty string values
162 if (pTag
.IsStr() && pTag
.GetStr().IsEmpty()) {
166 // skip "length" tags with "0: 0" values
167 if (pTag
.GetName() == wxT(FT_ED2K_MEDIA_LENGTH
)) {
168 if (pTag
.GetStr().IsSameAs(wxT("0: 0")) ||
169 pTag
.GetStr().IsSameAs(wxT("0:0"))) {
174 // skip "bitrate" tags with '0' values
175 if ((pTag
.GetName() == wxT(FT_ED2K_MEDIA_BITRATE
)) && !pTag
.GetInt()) {
179 AddDebugLogLineM( false, logPartFile
,
180 wxT("CPartFile::CPartFile(CSearchFile*): added tag ") +
181 pTag
.GetFullInfo() );
182 m_taglist
.push_back(pTag
);
187 } else if (pTag
.GetNameID() != 0 && pTag
.GetName().IsEmpty() && (pTag
.IsStr() || pTag
.IsInt())) {
188 static const struct {
196 for (unsigned int t
= 0; t
< itemsof(_aMetaTags
); ++t
) {
197 if (pTag
.GetType() == _aMetaTags
[t
].nType
&& pTag
.GetNameID() == _aMetaTags
[t
].nID
) {
198 // skip string tags with empty string values
199 if (pTag
.IsStr() && pTag
.GetStr().IsEmpty()) {
203 AddDebugLogLineM( false, logPartFile
,
204 wxT("CPartFile::CPartFile(CSearchFile*): added tag ") +
205 pTag
.GetFullInfo() );
206 m_taglist
.push_back(pTag
);
214 AddDebugLogLineM( false, logPartFile
,
215 wxT("CPartFile::CPartFile(CSearchFile*): ignored tag ") +
216 pTag
.GetFullInfo() );
224 CPartFile::CPartFile(const CED2KFileLink
* fileLink
)
228 SetFileName(CPath(fileLink
->GetName()));
229 SetFileSize(fileLink
->GetSize());
230 m_abyFileHash
= fileLink
->GetHashKey();
234 if (fileLink
->m_hashset
) {
235 if (!LoadHashsetFromFile(fileLink
->m_hashset
, true)) {
236 AddDebugLogLineM(true, logPartFile
, wxT("eD2K link contained invalid hashset: ") + fileLink
->GetLink());
242 CPartFile::~CPartFile()
245 // Barry - Ensure all buffered data is written
248 // eMule had same problem with lseek error ... and override with a simple
249 // check for INVALID_HANDLE_VALUE (that, btw, does not exist on linux)
250 // So we just guess is < 0 on error and > 2 if ok (0 stdin, 1 stdout, 2 stderr)
251 // But, where does this wrong handle comes from?
253 if (m_hpartfile
.IsOpened() && (m_hpartfile
.fd() > 2)) {
257 if (m_hpartfile
.IsOpened() && (m_hpartfile
.fd() > 2)) {
259 // Update met file (with current directory entry)
263 DeleteContents(m_BufferedData_list
);
265 wxASSERT(m_SrcList
.empty());
266 wxASSERT(m_A4AFsrclist
.empty());
269 void CPartFile::CreatePartFile()
272 // use lowest free partfilenumber for free file (InterCeptor)
276 m_partmetfilename
= CPath(wxString::Format(wxT("%03i.part.met"), i
));
277 m_fullname
= thePrefs::GetTempDir().JoinPaths(m_partmetfilename
);
278 } while (m_fullname
.FileExists());
280 wxString strPartName
= m_partmetfilename
.RemoveExt().GetRaw();
281 m_taglist
.push_back(CTagString(FT_PARTFILENAME
, strPartName
));
283 m_gaplist
.Init(GetFileSize(), true); // Init empty
285 m_PartPath
= m_fullname
.RemoveExt();
287 if (thePrefs::GetAllocFullFile()) {
288 fileCreated
= m_hpartfile
.Create(m_PartPath
.GetRaw(), true);
291 fileCreated
= PlatformSpecific::CreateSparseFile(m_PartPath
, GetFileSize());
294 AddLogLineM(false,_("ERROR: Failed to create partfile)"));
295 SetPartFileStatus(PS_ERROR
);
298 SetFilePath(thePrefs::GetTempDir());
300 if (thePrefs::GetAllocFullFile()) {
301 SetPartFileStatus(PS_ALLOCATING
);
302 CThreadScheduler::AddTask(new CAllocateFileTask(this, thePrefs::AddNewFilesPaused()));
304 AllocationFinished();
307 m_hashsetneeded
= (GetED2KPartHashCount() > 0);
310 SetActive(theApp
->IsConnected());
315 uint8
CPartFile::LoadPartFile(const CPath
& in_directory
, const CPath
& filename
, bool from_backup
, bool getsizeonly
)
317 bool isnewstyle
= false;
318 uint8 version
,partmettype
=PMT_UNKNOWN
;
320 std::map
<uint16
, Gap_Struct
*> gap_map
; // Slugfiller
323 m_partmetfilename
= filename
;
324 m_filePath
= in_directory
;
325 m_fullname
= m_filePath
.JoinPaths(m_partmetfilename
);
326 m_PartPath
= m_fullname
.RemoveExt();
328 // readfile data form part.met file
329 CPath curMetFilename
= m_fullname
;
331 curMetFilename
= curMetFilename
.AppendExt(PARTMET_BAK_EXT
);
332 AddLogLineM(false, CFormat( _("Trying to load backup of met-file from %s") )
337 CFile
metFile(curMetFilename
, CFile::read
);
338 if (!metFile
.IsOpened()) {
339 AddLogLineM(false, CFormat( _("ERROR: Failed to open part.met file: %s ==> %s") )
344 } else if (metFile
.GetLength() == 0) {
345 AddLogLineM(false, CFormat( _("ERROR: part.met file is 0 size: %s ==> %s") )
352 version
= metFile
.ReadUInt8();
353 if (version
!= PARTFILE_VERSION
&& version
!= PARTFILE_SPLITTEDVERSION
&& version
!= PARTFILE_VERSION_LARGEFILE
){
355 //if (version == 83) return ImportShareazaTempFile(...)
356 AddLogLineM(false, CFormat( _("ERROR: Invalid part.met file version: %s ==> %s") )
362 isnewstyle
= (version
== PARTFILE_SPLITTEDVERSION
);
363 partmettype
= isnewstyle
? PMT_SPLITTED
: PMT_DEFAULTOLD
;
365 if (version
== PARTFILE_VERSION
) {// Do we still need this check ?
366 uint8 test
[4]; // It will fail for certain files.
367 metFile
.Seek(24, wxFromStart
);
368 metFile
.Read(test
,4);
370 metFile
.Seek(1, wxFromStart
);
371 if (test
[0]==0 && test
[1]==0 && test
[2]==2 && test
[3]==1) {
372 isnewstyle
=true; // edonkeys so called "old part style"
373 partmettype
=PMT_NEWOLD
;
378 uint32 temp
= metFile
.ReadUInt32();
380 if (temp
==0) { // 0.48 partmets - different again
381 LoadHashsetFromFile(&metFile
, false);
383 metFile
.Seek(2, wxFromStart
);
384 LoadDateFromFile(&metFile
);
385 m_abyFileHash
= metFile
.ReadHash();
389 LoadDateFromFile(&metFile
);
390 LoadHashsetFromFile(&metFile
, false);
393 uint32 tagcount
= metFile
.ReadUInt32();
395 for (uint32 j
= 0; j
< tagcount
; ++j
) {
396 CTag
newtag(metFile
,true);
399 (newtag
.GetNameID() == FT_FILESIZE
||
400 newtag
.GetNameID() == FT_FILENAME
))) {
401 switch(newtag
.GetNameID()) {
403 if (!GetFileName().IsOk()) {
404 // If it's not empty, we already loaded the unicoded one
405 SetFileName(CPath(newtag
.GetStr()));
409 case FT_LASTSEENCOMPLETE
: {
410 lastseencomplete
= newtag
.GetInt();
414 SetFileSize(newtag
.GetInt());
417 case FT_TRANSFERRED
: {
418 transferred
= newtag
.GetInt();
422 //#warning needs setfiletype string
423 //SetFileType(newtag.GetStr());
427 m_category
= newtag
.GetInt();
428 if (m_category
> theApp
->glob_prefs
->GetCatCount() - 1 ) {
433 case FT_OLDDLPRIORITY
:
434 case FT_DLPRIORITY
: {
436 m_iDownPriority
= newtag
.GetInt();
437 if( m_iDownPriority
== PR_AUTO
){
438 m_iDownPriority
= PR_HIGH
;
439 SetAutoDownPriority(true);
442 if ( m_iDownPriority
!= PR_LOW
&&
443 m_iDownPriority
!= PR_NORMAL
&&
444 m_iDownPriority
!= PR_HIGH
)
445 m_iDownPriority
= PR_NORMAL
;
446 SetAutoDownPriority(false);
452 m_paused
= (newtag
.GetInt() == 1);
453 m_stopped
= m_paused
;
456 case FT_OLDULPRIORITY
:
457 case FT_ULPRIORITY
: {
459 SetUpPriority(newtag
.GetInt(), false);
460 if( GetUpPriority() == PR_AUTO
){
461 SetUpPriority(PR_HIGH
, false);
462 SetAutoUpPriority(true);
464 SetAutoUpPriority(false);
469 case FT_KADLASTPUBLISHSRC
:{
470 SetLastPublishTimeKadSrc(newtag
.GetInt(), 0);
471 if(GetLastPublishTimeKadSrc() > (uint32
)time(NULL
)+KADEMLIAREPUBLISHTIMES
) {
472 //There may be a posibility of an older client that saved a random number here.. This will check for that..
473 SetLastPublishTimeKadSrc(0,0);
477 case FT_KADLASTPUBLISHNOTES
:{
478 SetLastPublishTimeKadNotes(newtag
.GetInt());
481 // old tags: as long as they are not needed, take the chance to purge them
483 case FT_KADLASTPUBLISHKEY
:
485 case FT_DL_ACTIVE_TIME
:
486 if (newtag
.IsInt()) {
487 m_nDlActiveTime
= newtag
.GetInt();
490 case FT_CORRUPTEDPARTS
: {
491 wxASSERT(m_corrupted_list
.empty());
492 wxString
strCorruptedParts(newtag
.GetStr());
493 wxStringTokenizer
tokenizer(strCorruptedParts
, wxT(","));
494 while ( tokenizer
.HasMoreTokens() ) {
495 wxString token
= tokenizer
.GetNextToken();
497 if (token
.ToULong(&uPart
)) {
498 if (uPart
< GetPartCount() && !IsCorruptedPart(uPart
)) {
499 m_corrupted_list
.push_back(uPart
);
508 hash
.DecodeBase32(newtag
.GetStr()) == CAICHHash::GetHashSize();
509 wxASSERT(hashSizeOk
);
511 m_pAICHHashSet
->SetMasterHash(hash
, AICH_VERIFIED
);
515 case FT_ATTRANSFERRED
:{
516 statistic
.SetAllTimeTransferred(statistic
.GetAllTimeTransferred() + (uint64
)newtag
.GetInt());
519 case FT_ATTRANSFERREDHI
:{
520 statistic
.SetAllTimeTransferred(statistic
.GetAllTimeTransferred() + (((uint64
)newtag
.GetInt()) << 32));
523 case FT_ATREQUESTED
:{
524 statistic
.SetAllTimeRequests(newtag
.GetInt());
528 statistic
.SetAllTimeAccepts(newtag
.GetInt());
532 // Start Changes by Slugfiller for better exception handling
534 wxCharBuffer tag_ansi_name
= newtag
.GetName().ToAscii();
535 char gap_mark
= tag_ansi_name
? tag_ansi_name
[0u] : 0;
536 if ( newtag
.IsInt() && (newtag
.GetName().Length() > 1) &&
537 ((gap_mark
== FT_GAPSTART
) ||
538 (gap_mark
== FT_GAPEND
))) {
539 Gap_Struct
*gap
= NULL
;
540 unsigned long int gapkey
;
541 if (newtag
.GetName().Mid(1).ToULong(&gapkey
)) {
542 if ( gap_map
.find( gapkey
) == gap_map
.end() ) {
543 gap
= new Gap_Struct
;
544 gap_map
[gapkey
] = gap
;
545 gap
->start
= (uint64
)-1;
546 gap
->end
= (uint64
)-1;
548 gap
= gap_map
[ gapkey
];
550 if (gap_mark
== FT_GAPSTART
) {
551 gap
->start
= newtag
.GetInt();
553 if (gap_mark
== FT_GAPEND
) {
554 gap
->end
= newtag
.GetInt()-1;
557 AddDebugLogLineN(logPartFile
, wxT("Wrong gap map key while reading met file!"));
560 // End Changes by Slugfiller for better exception handling
562 m_taglist
.push_back(newtag
);
567 // Nothing. Else, nothing.
571 // load the hashsets from the hybridstylepartmet
572 if (isnewstyle
&& !getsizeonly
&& (metFile
.GetPosition()<metFile
.GetLength()) ) {
573 metFile
.Seek(1, wxFromCurrent
);
575 uint16 parts
=GetPartCount(); // assuming we will get all hashsets
577 for (uint16 i
= 0; i
< parts
&& (metFile
.GetPosition()+16<metFile
.GetLength()); ++i
){
578 CMD4Hash cur_hash
= metFile
.ReadHash();
579 m_hashlist
.push_back(cur_hash
);
583 if (!m_hashlist
.empty()) {
584 CreateHashFromHashlist(m_hashlist
, &checkhash
);
587 if (m_abyFileHash
== checkhash
) {
594 } catch (const CInvalidPacket
& e
) {
595 AddLogLineM(true, CFormat(wxT("Error: %s (%s) is corrupt (bad tags: %s), unable to load file."))
600 } catch (const CIOFailureException
& e
) {
601 AddDebugLogLineM(true, logPartFile
, CFormat( wxT("IO failure while loading '%s': %s") )
605 } catch (const CEOFException
& WXUNUSED(e
)) {
606 AddLogLineM(true, CFormat( _("ERROR: %s (%s) is corrupt (wrong tagcount), unable to load file.") )
609 AddLogLineM(true, _("Trying to recover file info..."));
611 // Safe file is that who have
614 // We have filesize, try other needed info
616 // Do we need to check gaps? I think not,
617 // because they are checked below. Worst
618 // scenario will only mark file as 0 bytes downloaded.
621 if (!GetFileName().IsOk()) {
622 // Not critical, let's put a random filename.
624 "Recovering no-named file - will try to recover it as RecoveredFile.dat"));
625 SetFileName(CPath(wxT("RecoveredFile.dat")));
629 _("Recovered all available file info :D - Trying to use it..."));
631 AddLogLineM(true, _("Unable to recover file info :("));
640 m_gaplist
.Init(GetFileSize(), false); // Init full, then add gaps
641 // Now to flush the map into the list (Slugfiller)
642 std::map
<uint16
, Gap_Struct
*>::iterator it
= gap_map
.begin();
643 for ( ; it
!= gap_map
.end(); ++it
) {
644 Gap_Struct
* gap
= it
->second
;
645 // SLUGFILLER: SafeHash - revised code, and extra safety
646 if ( (gap
->start
!= (uint64
)-1) &&
647 (gap
->end
!= (uint64
)-1) &&
648 gap
->start
<= gap
->end
&&
649 gap
->start
< GetFileSize()) {
650 if (gap
->end
>= GetFileSize()) {
651 gap
->end
= GetFileSize()-1; // Clipping
653 m_gaplist
.AddGap(gap
->start
, gap
->end
); // All tags accounted for, use safe adding
656 // SLUGFILLER: SafeHash
659 //check if this is a backup
660 if ( m_fullname
.GetExt().MakeLower() == wxT("backup" )) {
661 m_fullname
= m_fullname
.RemoveExt();
664 // open permanent handle
665 if ( !m_hpartfile
.Open(m_PartPath
, CFile::read_write
)) {
666 AddLogLineM(false, CFormat( _("Failed to open %s (%s)") )
672 SetPartFileStatus(PS_EMPTY
);
675 // SLUGFILLER: SafeHash - final safety, make sure any missing part of the file is gap
676 if (m_hpartfile
.GetLength() < GetFileSize())
677 AddGap(m_hpartfile
.GetLength(), GetFileSize()-1);
678 // Goes both ways - Partfile should never be too large
679 if (m_hpartfile
.GetLength() > GetFileSize()) {
680 AddDebugLogLineM( true, logPartFile
, CFormat( wxT("Partfile \"%s\" is too large! Truncating %llu bytes.") ) % GetFileName() % (m_hpartfile
.GetLength() - GetFileSize()));
681 m_hpartfile
.SetLength(GetFileSize());
683 // SLUGFILLER: SafeHash
684 } catch (const CIOFailureException
& e
) {
685 AddDebugLogLineM( true, logPartFile
, CFormat( wxT("Error while accessing partfile \"%s\": %s") ) % GetFileName() % e
.what());
686 SetPartFileStatus(PS_ERROR
);
690 // check hashcount, file status etc
691 if (GetHashCount() != GetED2KPartHashCount()){
692 m_hashsetneeded
= true;
695 m_hashsetneeded
= false;
696 for (size_t i
= 0; i
< m_hashlist
.size(); ++i
) {
698 SetPartFileStatus(PS_READY
);
703 if (m_gaplist
.IsComplete()) { // is this file complete already?
708 if (!isnewstyle
) { // not for importing
709 const time_t file_date
= CPath::GetModificationTime(m_PartPath
);
710 if (m_lastDateChanged
!= file_date
) {
711 // It's pointless to rehash an empty file, since the case
712 // where a user has zero'd a file is handled above ...
713 if (m_hpartfile
.GetLength()) {
714 AddLogLineM(false, CFormat( _("WARNING: %s might be corrupted (%i)") )
716 % (m_lastDateChanged
- file_date
) );
718 SetPartFileStatus(PS_WAITINGFORHASH
);
720 CPath partFileName
= m_partmetfilename
.RemoveExt();
721 CThreadScheduler::AddTask(new CHashingTask(m_filePath
, partFileName
, this));
726 UpdateCompletedInfos();
727 if (completedsize
> transferred
) {
728 m_iGainDueToCompression
= completedsize
- transferred
;
729 } else if (completedsize
!= transferred
) {
730 m_iLostDueToCorruption
= transferred
- completedsize
;
737 bool CPartFile::SavePartFile(bool Initial
)
740 case PS_WAITINGFORHASH
:
746 /* Don't write anything to disk if less than 100 KB of free space is left. */
747 sint64 free
= CPath::GetFreeSpaceAt(GetFilePath());
748 if ((free
!= wxInvalidOffset
) && (free
< (100 * 1024))) {
754 if (!m_PartPath
.FileExists()) {
755 throw wxString(wxT(".part file not found"));
758 uint32 lsc
= lastseencomplete
;
761 CPath::BackupFile(m_fullname
, wxT(".backup"));
762 CPath::RemoveFile(m_fullname
);
765 file
.Open(m_fullname
, CFile::write
);
766 if (!file
.IsOpened()) {
767 throw wxString(wxT("Failed to open part.met file"));
771 file
.WriteUInt8(IsLargeFile() ? PARTFILE_VERSION_LARGEFILE
: PARTFILE_VERSION
);
773 file
.WriteUInt32(CPath::GetModificationTime(m_PartPath
));
775 file
.WriteHash(m_abyFileHash
);
776 uint16 parts
= m_hashlist
.size();
777 file
.WriteUInt16(parts
);
778 for (int x
= 0; x
< parts
; ++x
) {
779 file
.WriteHash(m_hashlist
[x
]);
782 #define FIXED_TAGS 15
783 uint32 tagcount
= m_taglist
.size() + FIXED_TAGS
+ (m_gaplist
.size()*2);
784 if (!m_corrupted_list
.empty()) {
788 if (m_pAICHHashSet
->HasValidMasterHash() && (m_pAICHHashSet
->GetStatus() == AICH_VERIFIED
)){
792 if (GetLastPublishTimeKadSrc()){
796 if (GetLastPublishTimeKadNotes()){
800 if (GetDlActiveTime()){
804 file
.WriteUInt32(tagcount
);
806 //#warning Kry - Where are lost by coruption and gained by compression?
808 // 0 (unicoded part file name)
809 // We write it with BOM to keep eMule compatibility. Note that the 'printable' filename is saved,
810 // as presently the filename does not represent an actual file.
811 CTagString( FT_FILENAME
, GetFileName().GetPrintable()).WriteTagToFile( &file
, utf8strOptBOM
);
812 CTagString( FT_FILENAME
, GetFileName().GetPrintable()).WriteTagToFile( &file
); // 1
814 CTagIntSized( FT_FILESIZE
, GetFileSize(), IsLargeFile() ? 64 : 32).WriteTagToFile( &file
);// 2
815 CTagIntSized( FT_TRANSFERRED
, transferred
, IsLargeFile() ? 64 : 32).WriteTagToFile( &file
); // 3
816 CTagInt32( FT_STATUS
, (m_paused
?1:0)).WriteTagToFile( &file
); // 4
818 if ( IsAutoDownPriority() ) {
819 CTagInt32( FT_DLPRIORITY
, (uint8
)PR_AUTO
).WriteTagToFile( &file
); // 5
820 CTagInt32( FT_OLDDLPRIORITY
, (uint8
)PR_AUTO
).WriteTagToFile( &file
); // 6
822 CTagInt32( FT_DLPRIORITY
, m_iDownPriority
).WriteTagToFile( &file
); // 5
823 CTagInt32( FT_OLDDLPRIORITY
, m_iDownPriority
).WriteTagToFile( &file
); // 6
826 CTagInt32( FT_LASTSEENCOMPLETE
, lsc
).WriteTagToFile( &file
); // 7
828 if ( IsAutoUpPriority() ) {
829 CTagInt32( FT_ULPRIORITY
, (uint8
)PR_AUTO
).WriteTagToFile( &file
); // 8
830 CTagInt32( FT_OLDULPRIORITY
, (uint8
)PR_AUTO
).WriteTagToFile( &file
); // 9
832 CTagInt32( FT_ULPRIORITY
, GetUpPriority() ).WriteTagToFile( &file
); // 8
833 CTagInt32( FT_OLDULPRIORITY
, GetUpPriority() ).WriteTagToFile( &file
); // 9
836 CTagInt32(FT_CATEGORY
, m_category
).WriteTagToFile( &file
); // 10
837 CTagInt32(FT_ATTRANSFERRED
, statistic
.GetAllTimeTransferred() & 0xFFFFFFFF).WriteTagToFile( &file
);// 11
838 CTagInt32(FT_ATTRANSFERREDHI
, statistic
.GetAllTimeTransferred() >>32).WriteTagToFile( &file
);// 12
839 CTagInt32(FT_ATREQUESTED
, statistic
.GetAllTimeRequests()).WriteTagToFile( &file
); // 13
840 CTagInt32(FT_ATACCEPTED
, statistic
.GetAllTimeAccepts()).WriteTagToFile( &file
); // 14
842 // currupt part infos
843 if (!m_corrupted_list
.empty()) {
844 wxString strCorruptedParts
;
845 std::list
<uint16
>::iterator it
= m_corrupted_list
.begin();
846 for (; it
!= m_corrupted_list
.end(); ++it
) {
847 uint16 uCorruptedPart
= *it
;
848 if (!strCorruptedParts
.IsEmpty()) {
849 strCorruptedParts
+= wxT(",");
851 strCorruptedParts
+= wxString::Format(wxT("%u"), (unsigned)uCorruptedPart
);
853 wxASSERT( !strCorruptedParts
.IsEmpty() );
855 CTagString( FT_CORRUPTEDPARTS
, strCorruptedParts
).WriteTagToFile( &file
); // 11?
859 if (m_pAICHHashSet
->HasValidMasterHash() && (m_pAICHHashSet
->GetStatus() == AICH_VERIFIED
)){
860 CTagString
aichtag(FT_AICH_HASH
, m_pAICHHashSet
->GetMasterHash().GetString() );
861 aichtag
.WriteTagToFile(&file
); // 12?
864 if (GetLastPublishTimeKadSrc()){
865 CTagInt32(FT_KADLASTPUBLISHSRC
, GetLastPublishTimeKadSrc()).WriteTagToFile(&file
); // 15?
868 if (GetLastPublishTimeKadNotes()){
869 CTagInt32(FT_KADLASTPUBLISHNOTES
, GetLastPublishTimeKadNotes()).WriteTagToFile(&file
); // 16?
872 if (GetDlActiveTime()){
873 CTagInt32(FT_DL_ACTIVE_TIME
, GetDlActiveTime()).WriteTagToFile(&file
); // 17
876 for (uint32 j
= 0; j
< (uint32
)m_taglist
.size();++j
) {
877 m_taglist
[j
].WriteTagToFile(&file
);
882 for (CGapList::const_iterator it
= m_gaplist
.begin(); it
!= m_gaplist
.end(); ++it
) {
883 wxString tagName
= wxString::Format(wxT(" %u"), i_pos
);
885 // gap start = first missing byte but gap ends = first non-missing byte
886 // in edonkey but I think its easier to user the real limits
887 tagName
[0] = FT_GAPSTART
;
888 CTagIntSized(tagName
, it
.start() , IsLargeFile() ? 64 : 32).WriteTagToFile( &file
);
890 tagName
[0] = FT_GAPEND
;
891 CTagIntSized(tagName
, it
.end() + 1, IsLargeFile() ? 64 : 32).WriteTagToFile( &file
);
895 } catch (const wxString
& error
) {
896 AddLogLineNS(CFormat( _("ERROR while saving partfile: %s (%s ==> %s)") )
902 } catch (const CIOFailureException
& e
) {
903 AddLogLineCS(_("IO failure while saving partfile: ") + e
.what());
911 CPath::RemoveFile(m_fullname
.AppendExt(wxT(".backup")));
914 sint64 metLength
= m_fullname
.GetFileSize();
915 if (metLength
== wxInvalidOffset
) {
916 theApp
->ShowAlert( CFormat( _("Could not retrieve length of '%s' - using %s file.") )
921 CPath::CloneFile(m_fullname
.AppendExt(PARTMET_BAK_EXT
), m_fullname
, true);
922 } else if (metLength
== 0) {
923 // Don't backup if it's 0 size but raise a warning!!!
924 theApp
->ShowAlert( CFormat( _("'%s' is 0 size somehow - using %s file.") )
929 CPath::CloneFile(m_fullname
.AppendExt(PARTMET_BAK_EXT
), m_fullname
, true);
931 // no error, just backup
932 CPath::BackupFile(m_fullname
, PARTMET_BAK_EXT
);
939 void CPartFile::SaveSourceSeeds()
941 #define MAX_SAVED_SOURCES 10
943 // Kry - Sources seeds
944 // Based on a Feature request, this saves the last MAX_SAVED_SOURCES
945 // sources of the file, giving a 'seed' for the next run.
946 // We save the last sources because:
947 // 1 - They could be the hardest to get
948 // 2 - They will more probably be available
949 // However, if we have downloading sources, they have preference because
950 // we probably have more credits on them.
951 // Anyway, source exchange will get us the rest of the sources
952 // This feature is currently used only on rare files (< 20 sources)
955 if (GetSourceCount()>20) {
959 CClientPtrList source_seeds
;
962 CClientPtrList::iterator it
= m_downloadingSourcesList
.begin();
963 for( ; it
!= m_downloadingSourcesList
.end() && n_sources
< MAX_SAVED_SOURCES
; ++it
) {
964 CUpDownClient
*cur_src
= *it
;
965 if (!cur_src
->HasLowID()) {
966 source_seeds
.push_back(cur_src
);
971 if (n_sources
< MAX_SAVED_SOURCES
) {
972 // Not enough downloading sources to fill the list, going to sources list
973 if (GetSourceCount() > 0) {
974 SourceSet::reverse_iterator rit
= m_SrcList
.rbegin();
975 for ( ; ((rit
!= m_SrcList
.rend()) && (n_sources
<MAX_SAVED_SOURCES
)); ++rit
) {
976 CUpDownClient
* cur_src
= *rit
;
977 if (!cur_src
->HasLowID()) {
978 source_seeds
.push_back(cur_src
);
990 const CPath seedsPath
= m_fullname
.AppendExt(wxT(".seeds"));
993 file
.Create(seedsPath
, true);
994 if (!file
.IsOpened()) {
995 AddLogLineM(false, CFormat( _("Failed to save part.met.seeds file for %s") )
1001 file
.WriteUInt8(0); // v3, to avoid v2 clients choking on it.
1002 file
.WriteUInt8(source_seeds
.size());
1004 CClientPtrList::iterator it2
= source_seeds
.begin();
1005 for (; it2
!= source_seeds
.end(); ++it2
) {
1006 CUpDownClient
* cur_src
= *it2
;
1007 file
.WriteUInt32(cur_src
->GetUserIDHybrid());
1008 file
.WriteUInt16(cur_src
->GetUserPort());
1009 file
.WriteHash(cur_src
->GetUserHash());
1010 // CryptSettings - See SourceExchange V4
1011 const uint8 uSupportsCryptLayer
= cur_src
->SupportsCryptLayer() ? 1 : 0;
1012 const uint8 uRequestsCryptLayer
= cur_src
->RequestsCryptLayer() ? 1 : 0;
1013 const uint8 uRequiresCryptLayer
= cur_src
->RequiresCryptLayer() ? 1 : 0;
1014 const uint8 byCryptOptions
= (uRequiresCryptLayer
<< 2) | (uRequestsCryptLayer
<< 1) | (uSupportsCryptLayer
<< 0);
1015 file
.WriteUInt8(byCryptOptions
);
1018 /* v2: Added to keep track of too old seeds */
1019 file
.WriteUInt32(wxDateTime::Now().GetTicks());
1021 AddLogLineM(false, CFormat( wxPLURAL("Saved %i source seed for partfile: %s (%s)", "Saved %i source seeds for partfile: %s (%s)", n_sources
) )
1025 } catch (const CIOFailureException
& e
) {
1026 AddDebugLogLineM(true, logPartFile
, CFormat( wxT("Error saving partfile's seeds file (%s - %s): %s") )
1033 CPath::RemoveFile(seedsPath
);
1037 void CPartFile::LoadSourceSeeds()
1039 CMemFile sources_data
;
1041 bool valid_sources
= false;
1043 const CPath seedsPath
= m_fullname
.AppendExt(wxT(".seeds"));
1044 if (!seedsPath
.FileExists()) {
1048 CFile
file(seedsPath
, CFile::read
);
1049 if (!file
.IsOpened()) {
1050 AddLogLineM(false, CFormat( _("Partfile %s (%s) has no seeds file") )
1058 if (file
.GetLength() <= 1) {
1059 AddLogLineM(false, CFormat( _("Partfile %s (%s) has a void seeds file") )
1065 uint8 src_count
= file
.ReadUInt8();
1067 bool bUseSX2Format
= (src_count
== 0);
1069 if (bUseSX2Format
) {
1071 src_count
= file
.ReadUInt8();
1074 sources_data
.WriteUInt16(src_count
);
1076 for (int i
= 0; i
< src_count
; ++i
) {
1077 uint32 dwID
= file
.ReadUInt32();
1078 uint16 nPort
= file
.ReadUInt16();
1080 sources_data
.WriteUInt32(bUseSX2Format
? dwID
: wxUINT32_SWAP_ALWAYS(dwID
));
1081 sources_data
.WriteUInt16(nPort
);
1082 sources_data
.WriteUInt32(0);
1083 sources_data
.WriteUInt16(0);
1085 if (bUseSX2Format
) {
1086 sources_data
.WriteHash(file
.ReadHash());
1087 sources_data
.WriteUInt8(file
.ReadUInt8());
1094 // v2: Added to keep track of too old seeds
1095 time_t time
= (time_t)file
.ReadUInt32();
1097 // Time frame is 2 hours. More than enough to compile
1098 // your new aMule version!.
1099 if ((time
+ MIN2S(120)) >= wxDateTime::Now().GetTicks()) {
1100 valid_sources
= true;
1104 // v1 has no time data. We can safely use
1105 // the sources, next time will be saved.
1106 valid_sources
= true;
1109 if (valid_sources
) {
1110 sources_data
.Seek(0);
1111 AddClientSources(&sources_data
, SF_SOURCE_SEEDS
, bUseSX2Format
? 4 : 1, bUseSX2Format
);
1114 } catch (const CSafeIOException
& e
) {
1115 AddLogLineM(false, CFormat( _("Error reading partfile's seeds file (%s - %s): %s") )
1124 void CPartFile::PartFileHashFinished(CKnownFile
* result
)
1126 m_lastDateChanged
= result
->m_lastDateChanged
;
1127 bool errorfound
= false;
1128 if (GetED2KPartHashCount() == 0){
1129 if (IsComplete(0, GetFileSize()-1)){
1130 if (result
->GetFileHash() != GetFileHash()){
1133 "Found corrupted part (%d) in %d part file %s - FileResultHash |%s| FileHash |%s|",
1134 "Found corrupted part (%d) in %d parts file %s - FileResultHash |%s| FileHash |%s|",
1140 % result
->GetFileHash().Encode()
1141 % GetFileHash().Encode() );
1142 AddGap(0, GetFileSize()-1);
1148 for (size_t i
= 0; i
< m_hashlist
.size(); ++i
){
1149 // Kry - trel_ar's completed parts check on rehashing.
1150 // Very nice feature, if a file is completed but .part.met don't believe it,
1153 uint64 partStart
= i
* PARTSIZE
;
1154 uint64 partEnd
= partStart
+ GetPartSize(i
) - 1;
1155 if (!( i
< result
->GetHashCount() && (result
->GetPartHash(i
) == GetPartHash(i
)))){
1156 if (IsComplete(i
)) {
1158 if ( i
< result
->GetHashCount() )
1159 wronghash
= result
->GetPartHash(i
);
1163 "Found corrupted part (%d) in %d part file %s - FileResultHash |%s| FileHash |%s|",
1164 "Found corrupted part (%d) in %d parts file %s - FileResultHash |%s| FileHash |%s|",
1165 GetED2KPartHashCount())
1168 % GetED2KPartHashCount()
1170 % wronghash
.Encode()
1171 % GetPartHash(i
).Encode() );
1177 if (!IsComplete(i
)){
1178 AddLogLineM(false, CFormat( _("Found completed part (%i) in %s") )
1183 RemoveBlockFromList(partStart
, partEnd
);
1190 result
->GetAICHHashset()->GetStatus() == AICH_HASHSETCOMPLETE
&&
1191 status
== PS_COMPLETING
) {
1192 delete m_pAICHHashSet
;
1193 m_pAICHHashSet
= result
->GetAICHHashset();
1194 result
->SetAICHHashset(NULL
);
1195 m_pAICHHashSet
->SetOwner(this);
1197 else if (status
== PS_COMPLETING
) {
1198 AddDebugLogLineM(false, logPartFile
,
1199 CFormat(wxT("Failed to store new AICH Hashset for completed file: %s"))
1206 if (status
== PS_COMPLETING
){
1211 AddLogLineM(false, CFormat( _("Finished rehashing %s") ) % GetFileName());
1215 SetStatus(PS_READY
);
1219 SetStatus(PS_READY
);
1221 theApp
->sharedfiles
->SafeAddKFile(this);
1224 void CPartFile::AddGap(uint64 start
, uint64 end
)
1226 m_gaplist
.AddGap(start
, end
);
1227 UpdateDisplayedInfo();
1230 void CPartFile::AddGap(uint16 part
)
1232 m_gaplist
.AddGap(part
);
1233 UpdateDisplayedInfo();
1236 bool CPartFile::IsAlreadyRequested(uint64 start
, uint64 end
)
1238 std::list
<Requested_Block_Struct
*>::iterator it
= m_requestedblocks_list
.begin();
1239 for (; it
!= m_requestedblocks_list
.end(); ++it
) {
1240 Requested_Block_Struct
* cur_block
= *it
;
1242 if ((start
<= cur_block
->EndOffset
) && (end
>= cur_block
->StartOffset
)) {
1249 bool CPartFile::GetNextEmptyBlockInPart(uint16 partNumber
, Requested_Block_Struct
*result
)
1251 // Find start of this part
1252 uint64 partStart
= (PARTSIZE
* partNumber
);
1253 uint64 start
= partStart
;
1255 // What is the end limit of this block, i.e. can't go outside part (or filesize)
1256 uint64 partEnd
= partStart
+ GetPartSize(partNumber
) - 1;
1257 // Loop until find a suitable gap and return true, or no more gaps and return false
1258 CGapList::const_iterator it
= m_gaplist
.begin();
1261 uint64 gapStart
, end
;
1263 // Find the first gap from the start position
1264 for (; it
!= m_gaplist
.end(); ++it
) {
1265 gapStart
= it
.start();
1268 // Want gaps that overlap start<->partEnd
1269 if (gapStart
<= partEnd
&& end
>= start
) {
1272 } else if (gapStart
> partEnd
) {
1277 // If no gaps after start, exit
1281 // Update start position if gap starts after current pos
1282 if (start
< gapStart
) {
1285 // Find end, keeping within the max block size and the part limit
1286 uint64 blockLimit
= partStart
+ (BLOCKSIZE
* (((start
- partStart
) / BLOCKSIZE
) + 1)) - 1;
1287 if (end
> blockLimit
) {
1290 if (end
> partEnd
) {
1293 // If this gap has not already been requested, we have found a valid entry
1294 if (!IsAlreadyRequested(start
, end
)) {
1295 // Was this block to be returned
1296 if (result
!= NULL
) {
1297 result
->StartOffset
= start
;
1298 result
->EndOffset
= end
;
1299 md4cpy(result
->FileID
, GetFileHash().GetHash());
1300 result
->transferred
= 0;
1304 // Reposition to end of that gap
1307 // If tried all gaps then break out of the loop
1308 if (end
== partEnd
) {
1312 // No suitable gap found
1317 void CPartFile::FillGap(uint64 start
, uint64 end
)
1319 m_gaplist
.FillGap(start
, end
);
1320 UpdateCompletedInfos();
1321 UpdateDisplayedInfo();
1324 void CPartFile::FillGap(uint16 part
)
1326 m_gaplist
.FillGap(part
);
1327 UpdateCompletedInfos();
1328 UpdateDisplayedInfo();
1332 void CPartFile::UpdateCompletedInfos()
1334 uint64 allgaps
= m_gaplist
.GetGapSize();
1336 percentcompleted
= (1.0 - (double)allgaps
/GetFileSize()) * 100.0;
1337 completedsize
= GetFileSize() - allgaps
;
1341 void CPartFile::WritePartStatus(CMemFile
* file
)
1343 uint16 parts
= GetED2KPartCount();
1344 file
->WriteUInt16(parts
);
1346 while (done
!= parts
){
1348 for (uint32 i
= 0;i
!= 8;++i
) {
1349 if (IsComplete(i
)) {
1353 if (done
== parts
) {
1357 file
->WriteUInt8(towrite
);
1361 void CPartFile::WriteCompleteSourcesCount(CMemFile
* file
)
1363 file
->WriteUInt16(m_nCompleteSourcesCount
);
1366 uint32
CPartFile::Process(uint32 reducedownload
/*in percent*/,uint8 m_icounter
)
1369 uint32 dwCurTick
= ::GetTickCount();
1371 // If buffer size exceeds limit, or if not written within time limit, flush data
1372 if ( (m_nTotalBufferData
> thePrefs::GetFileBufferSize()) ||
1373 (dwCurTick
> (m_nLastBufferFlushTime
+ BUFFER_TIME_LIMIT
))) {
1374 // Avoid flushing while copying preview file
1375 if (!m_bPreviewing
) {
1381 // check if we want new sources from server --> MOVED for 16.40 version
1382 old_trans
=transferingsrc
;
1386 if (m_icounter
< 10) {
1387 // Update only downloading sources.
1388 CClientPtrList::iterator it
= m_downloadingSourcesList
.begin();
1389 for( ; it
!= m_downloadingSourcesList
.end(); ) {
1390 CUpDownClient
*cur_src
= *it
++;
1391 if(cur_src
->GetDownloadState() == DS_DOWNLOADING
) {
1393 kBpsDown
+= cur_src
->SetDownloadLimit(reducedownload
);
1397 // Update all sources (including downloading sources)
1398 for ( SourceSet::iterator it
= m_SrcList
.begin(); it
!= m_SrcList
.end(); ) {
1399 CUpDownClient
* cur_src
= *it
++;
1400 switch (cur_src
->GetDownloadState()) {
1401 case DS_DOWNLOADING
: {
1403 kBpsDown
+= cur_src
->SetDownloadLimit(reducedownload
);
1412 case DS_LOWTOLOWIP
: {
1413 if ( cur_src
->HasLowID() && !theApp
->DoCallback( cur_src
) ) {
1414 // If we are almost maxed on sources,
1415 // slowly remove these client to see
1416 // if we can find a better source.
1417 if( ((dwCurTick
- lastpurgetime
) > 30000) &&
1418 (GetSourceCount() >= (thePrefs::GetMaxSourcePerFile()*.8))) {
1419 RemoveSource( cur_src
);
1420 lastpurgetime
= dwCurTick
;
1424 cur_src
->SetDownloadState(DS_ONQUEUE
);
1429 case DS_NONEEDEDPARTS
: {
1430 // we try to purge noneeded source, even without reaching the limit
1431 if((dwCurTick
- lastpurgetime
) > 40000) {
1432 if(!cur_src
->SwapToAnotherFile(false , false, false , NULL
)) {
1433 //however we only delete them if reaching the limit
1434 if (GetSourceCount() >= (thePrefs::GetMaxSourcePerFile()*.8 )) {
1435 RemoveSource(cur_src
);
1436 lastpurgetime
= dwCurTick
;
1437 break; //Johnny-B - nothing more to do here (good eye!)
1440 lastpurgetime
= dwCurTick
;
1444 // doubled reasktime for no needed parts - save connections and traffic
1445 if ( !((!cur_src
->GetLastAskedTime()) ||
1446 (dwCurTick
- cur_src
->GetLastAskedTime()) > FILEREASKTIME
*2)) {
1449 // Recheck this client to see if still NNP..
1450 // Set to DS_NONE so that we force a TCP reask next time..
1451 cur_src
->SetDownloadState(DS_NONE
);
1456 if( cur_src
->IsRemoteQueueFull()) {
1457 if( ((dwCurTick
- lastpurgetime
) > 60000) &&
1458 (GetSourceCount() >= (thePrefs::GetMaxSourcePerFile()*.8 )) ) {
1459 RemoveSource( cur_src
);
1460 lastpurgetime
= dwCurTick
;
1461 break; //Johnny-B - nothing more to do here (good eye!)
1465 // Give up to 1 min for UDP to respond..
1466 // If we are within on min on TCP, do not try..
1467 if ( theApp
->IsConnected() &&
1468 ( (!cur_src
->GetLastAskedTime()) ||
1469 (dwCurTick
- cur_src
->GetLastAskedTime()) > FILEREASKTIME
-20000)) {
1470 cur_src
->UDPReaskForDownload();
1473 // No break here, since the next case takes care of asking for downloads.
1476 case DS_TOOMANYCONNS
:
1478 case DS_WAITCALLBACK
:
1479 case DS_WAITCALLBACKKAD
: {
1480 if ( theApp
->IsConnected() &&
1481 ( (!cur_src
->GetLastAskedTime()) ||
1482 (dwCurTick
- cur_src
->GetLastAskedTime()) > FILEREASKTIME
)) {
1483 if (!cur_src
->AskForDownload()) {
1484 // I left this break here just as a reminder
1485 // just in case re rearange things..
1494 /* eMule 0.30c implementation, i give it a try (Creteil) BEGIN ... */
1495 if (IsA4AFAuto() && ((!m_LastNoNeededCheck
) || (dwCurTick
- m_LastNoNeededCheck
> 900000))) {
1496 m_LastNoNeededCheck
= dwCurTick
;
1497 for ( SourceSet::iterator it
= m_A4AFsrclist
.begin(); it
!= m_A4AFsrclist
.end(); ) {
1498 CUpDownClient
*cur_source
= *it
++;
1499 uint8 download_state
=cur_source
->GetDownloadState();
1500 if( download_state
!= DS_DOWNLOADING
1501 && cur_source
->GetRequestFile()
1502 && ((!cur_source
->GetRequestFile()->IsA4AFAuto()) || download_state
== DS_NONEEDEDPARTS
))
1504 cur_source
->SwapToAnotherFile(false, false, false, this);
1508 /* eMule 0.30c implementation, i give it a try (Creteil) END ... */
1510 // swap No needed partfiles if possible
1512 if (((old_trans
==0) && (transferingsrc
>0)) || ((old_trans
>0) && (transferingsrc
==0))) {
1513 SetPartFileStatus(status
);
1516 // Kad source search
1517 if( GetMaxSourcePerFileUDP() > GetSourceCount()){
1518 //Once we can handle lowID users in Kad, we remove the second IsConnected
1519 if (theApp
->downloadqueue
->DoKademliaFileRequest() && (Kademlia::CKademlia::GetTotalFile() < KADEMLIATOTALFILE
) && (dwCurTick
> m_LastSearchTimeKad
) && Kademlia::CKademlia::IsConnected() && theApp
->IsConnected() && !IsStopped()){
1521 theApp
->downloadqueue
->SetLastKademliaFileRequest();
1523 if (GetKadFileSearchID()) {
1524 /* This will never happen anyway. We're talking a
1525 1h timespan and searches are at max 45secs */
1526 Kademlia::CSearchManager::StopSearch(GetKadFileSearchID(), false);
1529 Kademlia::CUInt128
kadFileID(GetFileHash().GetHash());
1530 Kademlia::CSearch
* pSearch
= Kademlia::CSearchManager::PrepareLookup(Kademlia::CSearch::FILE, true, kadFileID
);
1531 AddDebugLogLineM(false, logKadSearch
, CFormat(wxT("Preparing a Kad Search for '%s'")) % GetFileName());
1533 AddDebugLogLineM(false, logKadSearch
, CFormat(wxT("Kad lookup started for '%s'")) % GetFileName());
1534 if(m_TotalSearchesKad
< 7) {
1535 m_TotalSearchesKad
++;
1537 m_LastSearchTimeKad
= dwCurTick
+ (KADEMLIAREASKTIME
*m_TotalSearchesKad
);
1538 SetKadFileSearchID(pSearch
->GetSearchID());
1542 if(GetKadFileSearchID()) {
1543 Kademlia::CSearchManager::StopSearch(GetKadFileSearchID(), true);
1547 // check if we want new sources from server
1548 if ( !m_localSrcReqQueued
&&
1549 ( (!m_lastsearchtime
) ||
1550 (dwCurTick
- m_lastsearchtime
) > SERVERREASKTIME
) &&
1551 theApp
->IsConnectedED2K() &&
1552 thePrefs::GetMaxSourcePerFileSoft() > GetSourceCount() &&
1554 m_localSrcReqQueued
= true;
1555 theApp
->downloadqueue
->SendLocalSrcRequest(this);
1558 // calculate datarate, set limit etc.
1563 // Kry - does the 3 / 30 difference produce too much flickering or CPU?
1564 if (m_count
>= 30) {
1566 UpdateAutoDownPriority();
1567 UpdateDisplayedInfo();
1568 if(m_bPercentUpdated
== false) {
1569 UpdateCompletedInfos();
1571 m_bPercentUpdated
= false;
1572 if (thePrefs::ShowCatTabInfos()) {
1573 Notify_ShowUpdateCatTabTitles();
1577 return (uint32
)(kBpsDown
*1024.0);
1580 bool CPartFile::CanAddSource(uint32 userid
, uint16 port
, uint32 serverip
, uint16 serverport
, uint8
* pdebug_lowiddropped
, bool ed2kID
)
1583 //The incoming ID could have the userid in the Hybrid format..
1584 uint32 hybridID
= 0;
1586 if (IsLowID(userid
)) {
1589 hybridID
= wxUINT32_SWAP_ALWAYS(userid
);
1593 if (!IsLowID(userid
)) {
1594 userid
= wxUINT32_SWAP_ALWAYS(userid
);
1598 // MOD Note: Do not change this part - Merkur
1599 if (theApp
->IsConnectedED2K()) {
1600 if(::IsLowID(theApp
->GetED2KID())) {
1601 if(theApp
->GetED2KID() == userid
&& theApp
->serverconnect
->GetCurrentServer()->GetIP() == serverip
&& theApp
->serverconnect
->GetCurrentServer()->GetPort() == serverport
) {
1604 if(theApp
->GetPublicIP() == userid
) {
1608 if(theApp
->GetED2KID() == userid
&& thePrefs::GetPort() == port
) {
1614 if (Kademlia::CKademlia::IsConnected()) {
1615 if(!Kademlia::CKademlia::IsFirewalled()) {
1616 if(Kademlia::CKademlia::GetIPAddress() == hybridID
&& thePrefs::GetPort() == port
) {
1622 //This allows *.*.*.0 clients to not be removed if Ed2kID == false
1623 if ( IsLowID(hybridID
) && theApp
->IsFirewalled()) {
1624 if (pdebug_lowiddropped
) {
1625 (*pdebug_lowiddropped
)++;
1633 void CPartFile::AddSources(CMemFile
& sources
,uint32 serverip
, uint16 serverport
, unsigned origin
, bool bWithObfuscationAndHash
)
1635 uint8 count
= sources
.ReadUInt8();
1636 uint8 debug_lowiddropped
= 0;
1637 uint8 debug_possiblesources
= 0;
1638 CMD4Hash achUserHash
;
1641 // since we may received multiple search source UDP results we have to "consume" all data of that packet
1642 AddDebugLogLineM(false, logPartFile
, wxT("Trying to add sources for a stopped file"));
1643 sources
.Seek(count
*(4+2), wxFromCurrent
);
1647 for (int i
= 0;i
!= count
;++i
) {
1648 uint32 userid
= sources
.ReadUInt32();
1649 uint16 port
= sources
.ReadUInt16();
1651 uint8 byCryptOptions
= 0;
1652 if (bWithObfuscationAndHash
){
1653 byCryptOptions
= sources
.ReadUInt8();
1654 if ((byCryptOptions
& 0x80) > 0) {
1655 achUserHash
= sources
.ReadHash();
1658 if ((thePrefs::IsClientCryptLayerRequested() && (byCryptOptions
& 0x01/*supported*/) > 0 && (byCryptOptions
& 0x80) == 0)
1659 || (thePrefs::IsClientCryptLayerSupported() && (byCryptOptions
& 0x02/*requested*/) > 0 && (byCryptOptions
& 0x80) == 0)) {
1660 AddDebugLogLineM(false, logPartFile
, wxString::Format(wxT("Server didn't provide UserHash for source %u, even if it was expected to (or local obfuscationsettings changed during serverconnect"), userid
));
1661 } else if (!thePrefs::IsClientCryptLayerRequested() && (byCryptOptions
& 0x02/*requested*/) == 0 && (byCryptOptions
& 0x80) != 0) {
1662 AddDebugLogLineM(false, logPartFile
, wxString::Format(wxT("Server provided UserHash for source %u, even if it wasn't expected to (or local obfuscationsettings changed during serverconnect"), userid
));
1667 // "Filter LAN IPs" and "IPfilter" the received sources IP addresses
1668 if (!IsLowID(userid
)) {
1669 // check for 0-IP, localhost and optionally for LAN addresses
1670 if ( !IsGoodIP(userid
, thePrefs::FilterLanIPs()) ) {
1673 if (theApp
->ipfilter
->IsFiltered(userid
)) {
1678 if (!CanAddSource(userid
, port
, serverip
, serverport
, &debug_lowiddropped
)) {
1682 if(thePrefs::GetMaxSourcePerFile() > GetSourceCount()) {
1683 ++debug_possiblesources
;
1684 CUpDownClient
* newsource
= new CUpDownClient(port
,userid
,serverip
,serverport
,this, true, true);
1686 newsource
->SetSourceFrom((ESourceFrom
)origin
);
1687 newsource
->SetConnectOptions(byCryptOptions
, true, false);
1689 if ((byCryptOptions
& 0x80) != 0) {
1690 newsource
->SetUserHash(achUserHash
);
1693 theApp
->downloadqueue
->CheckAndAddSource(this,newsource
);
1695 AddDebugLogLineM(false, logPartFile
, wxT("Consuming a packet because of max sources reached"));
1696 // Since we may receive multiple search source UDP results we have to "consume" all data of that packet
1697 // This '+1' is added because 'i' counts from 0.
1698 sources
.Seek((count
-(i
+1))*(4+2), wxFromCurrent
);
1699 if (GetKadFileSearchID()) {
1700 Kademlia::CSearchManager::StopSearch(GetKadFileSearchID(), false);
1707 void CPartFile::UpdatePartsInfo()
1709 if( !IsPartFile() ) {
1710 CKnownFile::UpdatePartsInfo();
1715 uint16 partcount
= GetPartCount();
1716 bool flag
= (time(NULL
) - m_nCompleteSourcesTime
> 0);
1718 // Ensure the frequency-list is ready
1719 if ( m_SrcpartFrequency
.size() != GetPartCount() ) {
1720 m_SrcpartFrequency
.clear();
1721 m_SrcpartFrequency
.insert(m_SrcpartFrequency
.begin(), GetPartCount(), 0);
1724 // Find number of available parts
1725 uint16 availablecounter
= 0;
1726 for ( uint16 i
= 0; i
< partcount
; ++i
) {
1727 if ( m_SrcpartFrequency
[i
] )
1731 if ( ( availablecounter
== partcount
) && ( m_availablePartsCount
< partcount
) ) {
1732 lastseencomplete
= time(NULL
);
1735 m_availablePartsCount
= availablecounter
;
1738 ArrayOfUInts16 count
;
1740 count
.reserve(GetSourceCount());
1742 for ( SourceSet::iterator it
= m_SrcList
.begin(); it
!= m_SrcList
.end(); ++it
) {
1743 if ( !(*it
)->GetUpPartStatus().empty() && (*it
)->GetUpPartCount() == partcount
) {
1744 count
.push_back((*it
)->GetUpCompleteSourcesCount());
1748 m_nCompleteSourcesCount
= m_nCompleteSourcesCountLo
= m_nCompleteSourcesCountHi
= 0;
1750 for (uint16 i
= 0; i
< partcount
; ++i
) {
1752 m_nCompleteSourcesCount
= m_SrcpartFrequency
[i
];
1754 else if( m_nCompleteSourcesCount
> m_SrcpartFrequency
[i
]) {
1755 m_nCompleteSourcesCount
= m_SrcpartFrequency
[i
];
1758 count
.push_back(m_nCompleteSourcesCount
);
1760 int32 n
= count
.size();
1762 std::sort(count
.begin(), count
.end(), std::less
<uint16
>());
1765 int32 i
= n
>> 1; // (n / 2)
1766 int32 j
= (n
* 3) >> 2; // (n * 3) / 4
1767 int32 k
= (n
* 7) >> 3; // (n * 7) / 8
1769 //When still a part file, adjust your guesses by 20% to what you see..
1773 //Not many sources, so just use what you see..
1774 // welcome to 'plain stupid code'
1775 // m_nCompleteSourcesCount;
1776 m_nCompleteSourcesCountLo
= m_nCompleteSourcesCount
;
1777 m_nCompleteSourcesCountHi
= m_nCompleteSourcesCount
;
1778 } else if (n
< 20) {
1779 // For low guess and normal guess count
1780 // If we see more sources then the guessed low and normal, use what we see.
1781 // If we see less sources then the guessed low, adjust network accounts for 80%,
1782 // we account for 20% with what we see and make sure we are still above the normal.
1784 // Adjust 80% network and 20% what we see.
1785 if ( count
[i
] < m_nCompleteSourcesCount
) {
1786 m_nCompleteSourcesCountLo
= m_nCompleteSourcesCount
;
1788 m_nCompleteSourcesCountLo
=
1789 (uint16
)((float)(count
[i
]*.8) +
1790 (float)(m_nCompleteSourcesCount
*.2));
1792 m_nCompleteSourcesCount
= m_nCompleteSourcesCountLo
;
1793 m_nCompleteSourcesCountHi
=
1794 (uint16
)((float)(count
[j
]*.8) +
1795 (float)(m_nCompleteSourcesCount
*.2));
1796 if( m_nCompleteSourcesCountHi
< m_nCompleteSourcesCount
) {
1797 m_nCompleteSourcesCountHi
= m_nCompleteSourcesCount
;
1805 // Adjust network accounts for 80%, we account for 20% with what
1806 // we see and make sure we are still above the low.
1808 // Adjust network accounts for 80%, we account for 20% with what
1809 // we see and make sure we are still above the normal.
1811 m_nCompleteSourcesCountLo
= m_nCompleteSourcesCount
;
1812 m_nCompleteSourcesCount
= (uint16
)((float)(count
[j
]*.8)+(float)(m_nCompleteSourcesCount
*.2));
1813 if( m_nCompleteSourcesCount
< m_nCompleteSourcesCountLo
) {
1814 m_nCompleteSourcesCount
= m_nCompleteSourcesCountLo
;
1816 m_nCompleteSourcesCountHi
= (uint16
)((float)(count
[k
]*.8)+(float)(m_nCompleteSourcesCount
*.2));
1817 if( m_nCompleteSourcesCountHi
< m_nCompleteSourcesCount
) {
1818 m_nCompleteSourcesCountHi
= m_nCompleteSourcesCount
;
1822 m_nCompleteSourcesTime
= time(NULL
) + (60);
1824 UpdateDisplayedInfo();
1827 // [Maella -Enhanced Chunk Selection- (based on jicxicmic)]
1828 bool CPartFile::GetNextRequestedBlock(CUpDownClient
* sender
,
1829 std::vector
<Requested_Block_Struct
*>& toadd
, uint16
& count
)
1832 // The purpose of this function is to return a list of blocks (~180KB) to
1833 // download. To avoid a prematurely stop of the downloading, all blocks that
1834 // are requested from the same source must be located within the same
1835 // chunk (=> part ~9MB).
1837 // The selection of the chunk to download is one of the CRITICAL parts of the
1838 // edonkey network. The selection algorithm must insure the best spreading
1841 // The selection is based on 4 criteria:
1842 // 1. Frequency of the chunk (availability), very rare chunks must be downloaded
1843 // as quickly as possible to become a new available source.
1844 // 2. Parts used for preview (first + last chunk), preview or check a
1845 // file (e.g. movie, mp3)
1846 // 3. Request state (downloading in process), try to ask each source for another
1847 // chunk. Spread the requests between all sources.
1848 // 4. Completion (shortest-to-complete), partially retrieved chunks should be
1849 // completed before starting to download other one.
1851 // The frequency criterion defines three zones: very rare (<10%), rare (<50%)
1852 // and common (>30%). Inside each zone, the criteria have a specific weight, used
1853 // to calculate the priority of chunks. The chunk(s) with the highest
1854 // priority (highest=0, lowest=0xffff) is/are selected first.
1856 // very rare (preview) rare common
1857 // 0% <---- +0 pt ----> 10% <----- +10000 pt -----> 50% <---- +20000 pt ----> 100%
1858 // 1. <------- frequency: +25*frequency pt ----------->
1859 // 2. <- preview: +1 pt --><-------------- preview: set to 10000 pt ------------->
1860 // 3. <------ request: download in progress +20000 pt ------>
1861 // 4a. <- completion: 0% +100, 25% +75 .. 100% +0 pt --><-- !req => completion --->
1862 // 4b. <--- req => !completion -->
1864 // Unrolled, the priority scale is:
1866 // 0..xxxx unrequested and requested very rare chunks
1867 // 10000..1xxxx unrequested rare chunks + unrequested preview chunks
1868 // 20000..2xxxx unrequested common chunks (priority to the most complete)
1869 // 30000..3xxxx requested rare chunks + requested preview chunks
1870 // 40000..4xxxx requested common chunks (priority to the least complete)
1872 // This algorithm usually selects first the rarest chunk(s). However, partially
1873 // complete chunk(s) that is/are close to completion may overtake the priority
1874 // (priority inversion).
1875 // For the common chuncks, the algorithm tries to spread the dowload between
1879 // Check input parameters
1880 if ( sender
->GetPartStatus().empty() ) {
1883 // Define and create the list of the chunks to download
1884 const uint16 partCount
= GetPartCount();
1885 ChunkList chunksList
;
1888 uint16 newBlockCount
= 0;
1889 while(newBlockCount
!= count
) {
1890 // Create a request block stucture if a chunk has been previously selected
1891 if(sender
->GetLastPartAsked() != 0xffff) {
1892 Requested_Block_Struct
* pBlock
= new Requested_Block_Struct
;
1893 if(GetNextEmptyBlockInPart(sender
->GetLastPartAsked(), pBlock
) == true) {
1894 // Keep a track of all pending requested blocks
1895 m_requestedblocks_list
.push_back(pBlock
);
1896 // Update list of blocks to return
1897 toadd
.push_back(pBlock
);
1899 // Skip end of loop (=> CPU load)
1902 // All blocks for this chunk have been already requested
1904 // => Try to select another chunk
1905 sender
->SetLastPartAsked(0xffff);
1909 // Check if a new chunk must be selected (e.g. download starting, previous chunk complete)
1910 if(sender
->GetLastPartAsked() == 0xffff) {
1911 // Quantify all chunks (create list of chunks to download)
1912 // This is done only one time and only if it is necessary (=> CPU load)
1913 if(chunksList
.empty()) {
1914 // Indentify the locally missing part(s) that this source has
1915 for(uint16 i
=0; i
< partCount
; ++i
) {
1916 if(sender
->IsPartAvailable(i
) == true && GetNextEmptyBlockInPart(i
, NULL
) == true) {
1917 // Create a new entry for this chunk and add it to the list
1920 newEntry
.frequency
= m_SrcpartFrequency
[i
];
1921 chunksList
.push_back(newEntry
);
1925 // Check if any bloks(s) could be downloaded
1926 if(chunksList
.empty()) {
1927 break; // Exit main loop while()
1930 // Define the bounds of the three zones (very rare, rare)
1931 // more depending on available sources
1933 if (GetSourceCount()>800) {
1935 } else if (GetSourceCount()>200) {
1938 uint16 limit
= modif
*GetSourceCount()/ 100;
1942 const uint16 veryRareBound
= limit
;
1943 const uint16 rareBound
= 2*limit
;
1945 // Cache Preview state (Criterion 2)
1946 FileType type
= GetFiletype(GetFileName());
1947 const bool isPreviewEnable
=
1948 thePrefs::GetPreviewPrio() &&
1949 (type
== ftArchive
|| type
== ftVideo
);
1951 // Collect and calculate criteria for all chunks
1952 for (ChunkList::iterator it
= chunksList
.begin(); it
!= chunksList
.end(); ++it
) {
1953 Chunk
& cur_chunk
= *it
;
1956 const uint64 uStart
= cur_chunk
.part
* PARTSIZE
;
1957 const uint64 uEnd
= uStart
+ GetPartSize(cur_chunk
.part
) - 1;
1958 // Criterion 2. Parts used for preview
1959 // Remark: - We need to download the first part and the last part(s).
1960 // - When the last part is very small, it's necessary to
1961 // download the two last parts.
1962 bool critPreview
= false;
1963 if(isPreviewEnable
== true) {
1964 if(cur_chunk
.part
== 0) {
1965 critPreview
= true; // First chunk
1966 } else if(cur_chunk
.part
== partCount
-1) {
1967 critPreview
= true; // Last chunk
1968 } else if(cur_chunk
.part
== partCount
-2) {
1969 // Last chunk - 1 (only if last chunk is too small)
1970 const uint32 sizeOfLastChunk
= GetFileSize() - uEnd
;
1971 if(sizeOfLastChunk
< PARTSIZE
/3) {
1972 critPreview
= true; // Last chunk - 1
1977 // Criterion 3. Request state (downloading in process from other source(s))
1979 const bool critRequested
=
1980 cur_chunk
.frequency
> veryRareBound
&&
1981 IsAlreadyRequested(uStart
, uEnd
);
1983 // Criterion 4. Completion
1984 // PARTSIZE instead of GetPartSize() favours the last chunk - but that may be intentional
1985 uint32 partSize
= PARTSIZE
- m_gaplist
.GetGapSize(cur_chunk
.part
);
1986 const uint16 critCompletion
= (uint16
)(partSize
/(PARTSIZE
/100)); // in [%]
1988 // Calculate priority with all criteria
1989 if(cur_chunk
.frequency
<= veryRareBound
) {
1990 // 0..xxxx unrequested + requested very rare chunks
1991 cur_chunk
.rank
= (25 * cur_chunk
.frequency
) + // Criterion 1
1992 ((critPreview
== true) ? 0 : 1) + // Criterion 2
1993 (100 - critCompletion
); // Criterion 4
1994 } else if(critPreview
== true) {
1995 // 10000..10100 unrequested preview chunks
1996 // 30000..30100 requested preview chunks
1997 cur_chunk
.rank
= ((critRequested
== false) ? 10000 : 30000) + // Criterion 3
1998 (100 - critCompletion
); // Criterion 4
1999 } else if(cur_chunk
.frequency
<= rareBound
) {
2000 // 10101..1xxxx unrequested rare chunks
2001 // 30101..3xxxx requested rare chunks
2002 cur_chunk
.rank
= (25 * cur_chunk
.frequency
) + // Criterion 1
2003 ((critRequested
== false) ? 10101 : 30101) + // Criterion 3
2004 (100 - critCompletion
); // Criterion 4
2007 if(critRequested
== false) { // Criterion 3
2008 // 20000..2xxxx unrequested common chunks
2009 cur_chunk
.rank
= 20000 + // Criterion 3
2010 (100 - critCompletion
); // Criterion 4
2012 // 40000..4xxxx requested common chunks
2013 // Remark: The weight of the completion criterion is inversed
2014 // to spead the requests over the completing chunks.
2015 // Without this, the chunk closest to completion will
2016 // received every new sources.
2017 cur_chunk
.rank
= 40000 + // Criterion 3
2018 (critCompletion
); // Criterion 4
2024 // Select the next chunk to download
2025 if(!chunksList
.empty()) {
2026 // Find and count the chunck(s) with the highest priority
2027 uint16 chunkCount
= 0; // Number of found chunks with same priority
2028 uint16 rank
= 0xffff; // Highest priority found
2030 // Collect and calculate criteria for all chunks
2031 for (ChunkList::iterator it
= chunksList
.begin(); it
!= chunksList
.end(); ++it
) {
2032 const Chunk
& cur_chunk
= *it
;
2033 if(cur_chunk
.rank
< rank
) {
2035 rank
= cur_chunk
.rank
;
2036 } else if(cur_chunk
.rank
== rank
) {
2041 // Use a random access to avoid that everybody tries to download the
2042 // same chunks at the same time (=> spread the selected chunk among clients)
2043 uint16 randomness
= 1 + (int) (((float)(chunkCount
-1))*rand()/(RAND_MAX
+1.0));
2045 for (ChunkList::iterator it
= chunksList
.begin(); it
!= chunksList
.end(); ++it
) {
2046 const Chunk
& cur_chunk
= *it
;
2047 if(cur_chunk
.rank
== rank
) {
2049 if(randomness
== 0) {
2050 // Selection process is over
2051 sender
->SetLastPartAsked(cur_chunk
.part
);
2052 // Remark: this list might be reused up to *count times
2053 chunksList
.erase(it
);
2054 break; // exit loop for()
2059 // There is no remaining chunk to download
2060 break; // Exit main loop while()
2064 // Return the number of the blocks
2065 count
= newBlockCount
;
2067 return (newBlockCount
> 0);
2072 void CPartFile::RemoveBlockFromList(uint64 start
,uint64 end
)
2074 std::list
<Requested_Block_Struct
*>::iterator it
= m_requestedblocks_list
.begin();
2075 while (it
!= m_requestedblocks_list
.end()) {
2076 std::list
<Requested_Block_Struct
*>::iterator it2
= it
++;
2078 if ((*it2
)->StartOffset
<= start
&& (*it2
)->EndOffset
>= end
) {
2079 m_requestedblocks_list
.erase(it2
);
2085 void CPartFile::RemoveAllRequestedBlocks(void)
2087 m_requestedblocks_list
.clear();
2091 void CPartFile::CompleteFile(bool bIsHashingDone
)
2093 if (GetKadFileSearchID()) {
2094 Kademlia::CSearchManager::StopSearch(GetKadFileSearchID(), false);
2097 theApp
->downloadqueue
->RemoveLocalServerRequest(this);
2099 AddDebugLogLineM( false, logPartFile
, wxString( wxT("CPartFile::CompleteFile: Hash ") ) + ( bIsHashingDone
? wxT("done") : wxT("not done") ) );
2101 if (!bIsHashingDone
) {
2102 SetPartFileStatus(PS_COMPLETING
);
2105 CPath partFile
= m_partmetfilename
.RemoveExt();
2106 CThreadScheduler::AddTask(new CHashingTask(GetFilePath(), partFile
, this));
2110 m_is_A4AF_auto
=false;
2111 SetPartFileStatus(PS_COMPLETING
);
2112 // guess I was wrong about not need to spaw a thread ...
2113 // It is if the temp and incoming dirs are on different
2114 // partitions/drives and the file is large...[oz]
2117 PerformFileComplete();
2121 if (thePrefs::ShowCatTabInfos()) {
2122 Notify_ShowUpdateCatTabTitles();
2124 UpdateDisplayedInfo(true);
2128 void CPartFile::CompleteFileEnded(bool errorOccured
, const CPath
& newname
)
2132 SetPartFileStatus(PS_ERROR
);
2133 AddLogLineM(true, CFormat( _("Unexpected error while completing %s. File paused") )% GetFileName() );
2135 m_fullname
= newname
;
2137 SetFilePath(m_fullname
.GetPath());
2138 SetFileName(m_fullname
.GetFullName());
2139 m_lastDateChanged
= CPath::GetModificationTime(m_fullname
);
2141 SetPartFileStatus(PS_COMPLETE
);
2145 // TODO: What the f*** if it is already known?
2146 theApp
->knownfiles
->SafeAddKFile(this);
2148 // remove the file from the suspended uploads list
2149 theApp
->uploadqueue
->ResumeUpload(GetFileHash());
2150 theApp
->downloadqueue
->RemoveFile(this);
2151 theApp
->sharedfiles
->SafeAddKFile(this);
2152 UpdateDisplayedInfo(true);
2154 // republish that file to the ed2k-server to update the 'FT_COMPLETE_SOURCES' counter on the server.
2155 theApp
->sharedfiles
->RepublishFile(this);
2157 // Ensure that completed shows the correct value
2158 completedsize
= GetFileSize();
2160 AddLogLineM(true, CFormat( _("Finished downloading: %s") ) % GetFileName() );
2163 theApp
->downloadqueue
->StartNextFile(this);
2167 void CPartFile::PerformFileComplete()
2169 // add this file to the suspended uploads list
2170 theApp
->uploadqueue
->SuspendUpload(GetFileHash());
2173 // close permanent handle
2174 if (m_hpartfile
.IsOpened()) {
2175 m_hpartfile
.Close();
2178 // Schedule task for completion of the file
2179 CThreadScheduler::AddTask(new CCompletionTask(this));
2183 void CPartFile::RemoveAllSources(bool bTryToSwap
)
2185 for( SourceSet::iterator it
= m_SrcList
.begin(); it
!= m_SrcList
.end();) {
2186 CUpDownClient
* cur_src
= *it
++;
2188 if (!cur_src
->SwapToAnotherFile(true, true, true, NULL
)) {
2189 RemoveSource(cur_src
,true,false);
2190 // If it was not swapped, it's not on any file anymore, and should die
2193 RemoveSource(cur_src
,true,false);
2199 /* eMule 0.30c implementation, i give it a try (Creteil) BEGIN ... */
2200 // remove all links A4AF in sources to this file
2201 if(!m_A4AFsrclist
.empty()) {
2202 for( SourceSet::iterator it
= m_A4AFsrclist
.begin(); it
!= m_A4AFsrclist
.end(); ) {
2203 CUpDownClient
* cur_src
= *it
++;
2204 if ( cur_src
->DeleteFileRequest( this ) ) {
2205 Notify_DownloadCtrlRemoveSource(cur_src
, this);
2208 m_A4AFsrclist
.clear();
2210 /* eMule 0.30c implementation, i give it a try (Creteil) END ... */
2211 UpdateFileRatingCommentAvail();
2215 void CPartFile::Delete()
2217 AddLogLineM(false, CFormat(_("Deleting file: %s")) % GetFileName());
2218 // Barry - Need to tell any connected clients to stop sending the file
2220 AddDebugLogLineM(false, logPartFile
, wxT("\tStopped"));
2222 theApp
->sharedfiles
->RemoveFile(this);
2223 AddDebugLogLineM(false, logPartFile
, wxT("\tRemoved from shared"));
2224 theApp
->downloadqueue
->RemoveFile(this);
2225 AddDebugLogLineM(false, logPartFile
, wxT("\tRemoved from download queue"));
2226 Notify_DownloadCtrlRemoveFile(this);
2227 AddDebugLogLineM(false, logPartFile
, wxT("\tRemoved from transferwnd"));
2230 // eMule had same problem with lseek error ... and override with a simple
2231 // check for INVALID_HANDLE_VALUE (that, btw, does not exist on linux)
2232 // So we just guess is < 0 on error and > 2 if ok (0 stdin, 1 stdout, 2 stderr)
2233 if (m_hpartfile
.fd() > 2) { // 0 stdin, 1 stdout, 2 stderr
2234 m_hpartfile
.Close();
2237 AddDebugLogLineM(false, logPartFile
, wxT("\tClosed"));
2239 if (!CPath::RemoveFile(m_fullname
)) {
2240 AddDebugLogLineM(true, logPartFile
, CFormat(wxT("\tFailed to delete '%s'")) % m_fullname
);
2242 AddDebugLogLineM(false, logPartFile
, wxT("\tRemoved .part.met"));
2245 if (!CPath::RemoveFile(m_PartPath
)) {
2246 AddDebugLogLineM(true, logPartFile
, CFormat(wxT("Failed to delete '%s'")) % m_PartPath
);
2248 AddDebugLogLineM(false, logPartFile
, wxT("\tRemoved .part"));
2251 CPath BAKName
= m_fullname
.AppendExt(PARTMET_BAK_EXT
);
2252 if (!CPath::RemoveFile(BAKName
)) {
2253 AddDebugLogLineM(true, logPartFile
, CFormat(wxT("Failed to delete '%s'")) % BAKName
);
2255 AddDebugLogLineM(false, logPartFile
, wxT("\tRemoved .BAK"));
2258 CPath SEEDSName
= m_fullname
.AppendExt(wxT(".seeds"));
2259 if (SEEDSName
.FileExists()) {
2260 if (CPath::RemoveFile(SEEDSName
)) {
2261 AddDebugLogLineM(false, logPartFile
, wxT("\tRemoved .seeds"));
2263 AddDebugLogLineM(true, logPartFile
, CFormat(wxT("Failed to delete '%s'")) % SEEDSName
);
2267 AddDebugLogLineM(false, logPartFile
, wxT("Done"));
2273 bool CPartFile::HashSinglePart(uint16 partnumber
)
2275 if ((GetHashCount() <= partnumber
) && (GetPartCount() > 1)) {
2277 CFormat( _("WARNING: Unable to hash downloaded part - hashset incomplete for '%s'") )
2279 m_hashsetneeded
= true;
2281 } else if ((GetHashCount() <= partnumber
) && GetPartCount() != 1) {
2282 AddLogLineM(true, CFormat( _("ERROR: Unable to hash downloaded part - hashset incomplete (%s). This should never happen")) % GetFileName() );
2283 m_hashsetneeded
= true;
2286 CMD4Hash hashresult
;
2287 uint64 offset
= PARTSIZE
* partnumber
;
2288 uint32 length
= GetPartSize(partnumber
);
2290 m_hpartfile
.Seek(offset
, wxFromStart
);
2291 CreateHashFromFile(m_hpartfile
, length
, &hashresult
, NULL
);
2292 } catch (const CIOFailureException
& e
) {
2293 AddLogLineM(true, CFormat( wxT("EOF while hashing downloaded part %u with length %u (max %u) of partfile '%s' with length %u: %s"))
2294 % partnumber
% length
% (offset
+length
) % GetFileName() % GetFileSize() % e
.what());
2295 SetPartFileStatus(PS_ERROR
);
2297 } catch (const CEOFException
& e
) {
2298 AddLogLineM(true, CFormat( wxT("EOF while hashing downloaded part %u with length %u (max %u) of partfile '%s' with length %u: %s"))
2299 % partnumber
% length
% (offset
+length
) % GetFileName() % GetFileSize() % e
.what());
2303 if (GetPartCount() > 1) {
2304 if (hashresult
!= GetPartHash(partnumber
)) {
2305 AddDebugLogLineM(false, logPartFile
, CFormat( wxT("%s: Expected hash of part %d: %s")) % GetFileName() % partnumber
% GetPartHash(partnumber
).Encode() );
2306 AddDebugLogLineM(false, logPartFile
, CFormat( wxT("%s: Actual hash of part %d: %s")) % GetFileName() % partnumber
% hashresult
.Encode() );
2312 if (hashresult
!= m_abyFileHash
) {
2322 bool CPartFile::IsCorruptedPart(uint16 partnumber
)
2324 return std::find(m_corrupted_list
.begin(), m_corrupted_list
.end(), partnumber
)
2325 != m_corrupted_list
.end();
2329 void CPartFile::SetDownPriority(uint8 np
, bool bSave
, bool bRefresh
)
2331 if ( m_iDownPriority
!= np
) {
2332 m_iDownPriority
= np
;
2334 UpdateDisplayedInfo(true);
2341 void CPartFile::StopFile(bool bCancel
)
2343 // Kry - Need to set it here to get into SetPartFileStatus(status) correctly
2346 // Barry - Need to tell any connected clients to stop sending the file
2349 m_LastSearchTimeKad
= 0;
2350 m_TotalSearchesKad
= 0;
2352 RemoveAllSources(true);
2355 memset(m_anStates
,0,sizeof(m_anStates
));
2361 UpdateDisplayedInfo(true);
2365 void CPartFile::StopPausedFile()
2368 // Once an hour, remove any sources for files which are no longer active downloads
2369 switch (GetStatus()) {
2371 case PS_INSUFFICIENT
:
2373 if (time(NULL
) - m_iLastPausePurge
> (60*60)) {
2374 m_iLastPausePurge
= time(NULL
);
2383 void CPartFile::PauseFile(bool bInsufficient
)
2387 if ( status
== PS_COMPLETE
|| status
== PS_COMPLETING
) {
2391 if (GetKadFileSearchID()) {
2392 Kademlia::CSearchManager::StopSearch(GetKadFileSearchID(), true);
2393 // If we were in the middle of searching, reset timer so they can resume searching.
2394 m_LastSearchTimeKad
= 0;
2397 m_iLastPausePurge
= time(NULL
);
2399 theApp
->downloadqueue
->RemoveLocalServerRequest(this);
2401 CPacket
packet( OP_CANCELTRANSFER
, 0, OP_EDONKEYPROT
);
2402 for( SourceSet::iterator it
= m_SrcList
.begin(); it
!= m_SrcList
.end(); ) {
2403 CUpDownClient
* cur_src
= *it
++;
2404 if (cur_src
->GetDownloadState() == DS_DOWNLOADING
) {
2405 if (!cur_src
->GetSentCancelTransfer()) {
2406 theStats::AddUpOverheadOther( packet
.GetPacketSize() );
2407 AddDebugLogLineM( false, logLocalClient
, wxT("Local Client: OP_CANCELTRANSFER to ") + cur_src
->GetFullIP() );
2408 cur_src
->SendPacket( &packet
, false, true );
2409 cur_src
->SetSentCancelTransfer( true );
2411 cur_src
->SetDownloadState(DS_ONQUEUE
);
2416 m_insufficient
= bInsufficient
;
2422 m_anStates
[DS_DOWNLOADING
] = 0;
2428 void CPartFile::ResumeFile()
2430 if ( status
== PS_COMPLETE
|| status
== PS_COMPLETING
) {
2434 if ( m_insufficient
&& !CheckFreeDiskSpace() ) {
2435 // Still not enough free discspace
2441 m_insufficient
= false;
2443 m_lastsearchtime
= 0;
2445 SetActive(theApp
->IsConnected());
2447 if (m_gaplist
.IsComplete() && (GetStatus() == PS_ERROR
)) {
2448 // The file has already been hashed at this point
2452 UpdateDisplayedInfo(true);
2456 bool CPartFile::CheckFreeDiskSpace( uint64 neededSpace
)
2458 uint64 free
= CPath::GetFreeSpaceAt(GetFilePath());
2459 if (free
== static_cast<uint64
>(wxInvalidOffset
)) {
2460 // If GetFreeSpaceAt() fails, then the path probably does not exist.
2464 // The very least acceptable diskspace is a single PART
2465 if ( free
< PARTSIZE
) {
2466 // Always fail in this case, since we risk losing data if we try to
2467 // write on a full partition.
2471 // All other checks are only made if the user has enabled them
2472 if ( thePrefs::IsCheckDiskspaceEnabled() ) {
2473 neededSpace
+= thePrefs::GetMinFreeDiskSpace();
2475 // Due to the the existance of sparse files, we cannot assume that
2476 // writes within the file doesn't cause new blocks to be allocated.
2477 // Therefore, we have to simply stop writing the moment the limit has
2479 return free
>= neededSpace
;
2486 void CPartFile::SetLastAnsweredTime()
2488 m_ClientSrcAnswered
= ::GetTickCount();
2491 void CPartFile::SetLastAnsweredTimeTimeout()
2493 m_ClientSrcAnswered
= 2 * CONNECTION_LATENCY
+ ::GetTickCount() - SOURCECLIENTREASKS
;
2496 CPacket
*CPartFile::CreateSrcInfoPacket(const CUpDownClient
* forClient
, uint8 byRequestedVersion
, uint16 nRequestedOptions
)
2499 if ( m_SrcList
.empty() ) {
2504 return CKnownFile::CreateSrcInfoPacket(forClient
, byRequestedVersion
, nRequestedOptions
);
2507 if (((forClient
->GetRequestFile() != this)
2508 && (forClient
->GetUploadFile() != this)) || forClient
->GetUploadFileID() != GetFileHash()) {
2509 wxString file1
= _("Unknown");
2510 if (forClient
->GetRequestFile() && forClient
->GetRequestFile()->GetFileName().IsOk()) {
2511 file1
= forClient
->GetRequestFile()->GetFileName().GetPrintable();
2512 } else if (forClient
->GetUploadFile() && forClient
->GetUploadFile()->GetFileName().IsOk()) {
2513 file1
= forClient
->GetUploadFile()->GetFileName().GetPrintable();
2515 wxString file2
= _("Unknown");
2516 if (GetFileName().IsOk()) {
2517 file2
= GetFileName().GetPrintable();
2519 AddDebugLogLineM(false, logPartFile
, wxT("File mismatch on source packet (P) Sending: ") + file1
+ wxT(" From: ") + file2
);
2523 if ( !(GetStatus() == PS_READY
|| GetStatus() == PS_EMPTY
)) {
2527 const BitVector
& reqstatus
= forClient
->GetPartStatus();
2528 bool KnowNeededParts
= !reqstatus
.empty();
2529 //wxASSERT(rcvstatus.size() == GetPartCount()); // Obviously!
2530 if (KnowNeededParts
&& (reqstatus
.size() != GetPartCount())) {
2531 // Yuck. Same file but different part count? Seriously fucked up.
2532 // This happens rather often with reqstatus.size() == 0. Don't log then.
2533 if (reqstatus
.size()) {
2534 AddDebugLogLineM(false, logKnownFiles
, CFormat(wxT("Impossible situation: different partcounts: %i (client) and %i (file) for %s")) % reqstatus
.size() % GetPartCount() % GetFileName());
2539 CMemFile
data(1024);
2541 uint8 byUsedVersion
;
2543 if (forClient
->SupportsSourceExchange2() && byRequestedVersion
> 0){
2544 // the client uses SourceExchange2 and requested the highest version he knows
2545 // and we send the highest version we know, but of course not higher than his request
2546 byUsedVersion
= std::min(byRequestedVersion
, (uint8
)SOURCEEXCHANGE2_VERSION
);
2547 bIsSX2Packet
= true;
2548 data
.WriteUInt8(byUsedVersion
);
2550 // we don't support any special SX2 options yet, reserved for later use
2551 if (nRequestedOptions
!= 0) {
2552 AddDebugLogLineM(false, logKnownFiles
, CFormat(wxT("Client requested unknown options for SourceExchange2: %u")) % nRequestedOptions
);
2555 byUsedVersion
= forClient
->GetSourceExchange1Version();
2556 bIsSX2Packet
= false;
2557 if (forClient
->SupportsSourceExchange2()) {
2558 AddDebugLogLineM(false, logKnownFiles
, wxT("Client which announced to support SX2 sent SX1 packet instead"));
2564 data
.WriteHash(m_abyFileHash
);
2565 data
.WriteUInt16(nCount
);
2567 for (SourceSet::iterator it
= m_SrcList
.begin(); it
!= m_SrcList
.end(); ++it
) {
2569 CUpDownClient
* cur_src
= *it
;
2571 int state
= cur_src
->GetDownloadState();
2572 int valid
= ( state
== DS_DOWNLOADING
) || ( state
== DS_ONQUEUE
&& !cur_src
->IsRemoteQueueFull() );
2574 if ( cur_src
->HasLowID() || !valid
) {
2578 // only send source which have needed parts for this client if possible
2579 const BitVector
& srcstatus
= cur_src
->GetPartStatus();
2580 if ( !srcstatus
.empty() ) {
2581 //wxASSERT(srcstatus.size() == GetPartCount()); // Obviously!
2582 if (srcstatus
.size() != GetPartCount()) {
2585 if ( KnowNeededParts
) {
2586 // only send sources which have needed parts for this client
2587 for (int x
= 0; x
< GetPartCount(); ++x
) {
2588 if (srcstatus
[x
] && !reqstatus
[x
]) {
2594 // if we don't know the need parts for this client,
2595 // return any source currently a client sends it's
2596 // file status only after it has at least one complete part
2597 if (srcstatus
.size() != GetPartCount()) {
2600 for (int x
= 0; x
< GetPartCount(); ++x
){
2611 if(forClient
->GetSourceExchange1Version() > 2) {
2612 dwID
= cur_src
->GetUserIDHybrid();
2614 dwID
= wxUINT32_SWAP_ALWAYS(cur_src
->GetUserIDHybrid());
2616 data
.WriteUInt32(dwID
);
2617 data
.WriteUInt16(cur_src
->GetUserPort());
2618 data
.WriteUInt32(cur_src
->GetServerIP());
2619 data
.WriteUInt16(cur_src
->GetServerPort());
2621 if (byUsedVersion
>= 2) {
2622 data
.WriteHash(cur_src
->GetUserHash());
2625 if (byUsedVersion
>= 4){
2626 // CryptSettings - SourceExchange V4
2628 // 1 CryptLayer Required
2629 // 1 CryptLayer Requested
2630 // 1 CryptLayer Supported
2631 const uint8 uSupportsCryptLayer
= cur_src
->SupportsCryptLayer() ? 1 : 0;
2632 const uint8 uRequestsCryptLayer
= cur_src
->RequestsCryptLayer() ? 1 : 0;
2633 const uint8 uRequiresCryptLayer
= cur_src
->RequiresCryptLayer() ? 1 : 0;
2634 const uint8 byCryptOptions
= (uRequiresCryptLayer
<< 2) | (uRequestsCryptLayer
<< 1) | (uSupportsCryptLayer
<< 0);
2635 data
.WriteUInt8(byCryptOptions
);
2646 data
.Seek(bIsSX2Packet
? 17 : 16, wxFromStart
);
2647 data
.WriteUInt16(nCount
);
2649 CPacket
* result
= new CPacket(data
, OP_EMULEPROT
, bIsSX2Packet
? OP_ANSWERSOURCES2
: OP_ANSWERSOURCES
);
2651 // 16+2+501*(4+2+4+2+16) = 14046 bytes max.
2652 if (result
->GetPacketSize() > 354) {
2653 result
->PackPacket();
2659 void CPartFile::AddClientSources(CMemFile
* sources
, unsigned nSourceFrom
, uint8 uClientSXVersion
, bool bSourceExchange2
, const CUpDownClient
* /*pClient*/)
2668 uint8 uPacketSXVersion
= 0;
2669 if (!bSourceExchange2
) {
2670 nCount
= sources
->ReadUInt16();
2672 // Check if the data size matches the 'nCount' for v1 or v2 and eventually correct the source
2673 // exchange version while reading the packet data. Otherwise we could experience a higher
2674 // chance in dealing with wrong source data, userhashs and finally duplicate sources.
2675 uint32 uDataSize
= sources
->GetLength() - sources
->GetPosition();
2677 if ((uint32
)(nCount
*(4+2+4+2)) == uDataSize
) { //Checks if version 1 packet is correct size
2678 if(uClientSXVersion
!= 1) {
2681 uPacketSXVersion
= 1;
2682 } else if ((uint32
)(nCount
*(4+2+4+2+16)) == uDataSize
) { // Checks if version 2&3 packet is correct size
2683 if (uClientSXVersion
== 2) {
2684 uPacketSXVersion
= 2;
2685 } else if (uClientSXVersion
> 2) {
2686 uPacketSXVersion
= 3;
2690 } else if (nCount
*(4+2+4+2+16+1) == uDataSize
) {
2691 if (uClientSXVersion
!= 4 ) {
2694 uPacketSXVersion
= 4;
2696 // If v5 inserts additional data (like v2), the above code will correctly filter those packets.
2697 // If v5 appends additional data after <count>(<Sources>)[count], we are in trouble with the
2698 // above code. Though a client which does not understand v5+ should never receive such a packet.
2699 AddDebugLogLineM(false, logClient
, CFormat(wxT("Received invalid source exchange packet (v%u) of data size %u for %s")) % uClientSXVersion
% uDataSize
% GetFileName());
2704 // We only check if the version is known by us and do a quick sanitize check on known version
2705 // other then SX1, the packet will be ignored if any error appears, sicne it can't be a "misunderstanding" anymore
2706 if (uClientSXVersion
> SOURCEEXCHANGE2_VERSION
|| uClientSXVersion
== 0 ){
2707 AddDebugLogLineM(false, logPartFile
, CFormat(wxT("Invalid source exchange type version: %i")) % uClientSXVersion
);
2711 // all known versions use the first 2 bytes as count and unknown version are already filtered above
2712 nCount
= sources
->ReadUInt16();
2713 uint32 uDataSize
= (uint32
)(sources
->GetLength() - sources
->GetPosition());
2714 bool bError
= false;
2715 switch (uClientSXVersion
){
2717 bError
= nCount
*(4+2+4+2) != uDataSize
;
2721 bError
= nCount
*(4+2+4+2+16) != uDataSize
;
2724 bError
= nCount
*(4+2+4+2+16+1) != uDataSize
;
2732 AddDebugLogLineM(false, logPartFile
, wxT("Invalid source exchange data size."));
2735 uPacketSXVersion
= uClientSXVersion
;
2738 for (uint16 i
= 0;i
!= nCount
;++i
) {
2740 uint32 dwID
= sources
->ReadUInt32();
2741 uint16 nPort
= sources
->ReadUInt16();
2742 uint32 dwServerIP
= sources
->ReadUInt32();
2743 uint16 nServerPort
= sources
->ReadUInt16();
2746 if (uPacketSXVersion
> 1) {
2747 userHash
= sources
->ReadHash();
2750 uint8 byCryptOptions
= 0;
2751 if (uPacketSXVersion
>= 4) {
2752 byCryptOptions
= sources
->ReadUInt8();
2755 //Clients send ID's the the Hyrbid format so highID clients with *.*.*.0 won't be falsely switched to a lowID..
2757 if (uPacketSXVersion
>= 3) {
2758 dwIDED2K
= wxUINT32_SWAP_ALWAYS(dwID
);
2763 // check the HighID(IP) - "Filter LAN IPs" and "IPfilter" the received sources IP addresses
2764 if (!IsLowID(dwID
)) {
2765 if (!IsGoodIP(dwIDED2K
, thePrefs::FilterLanIPs())) {
2766 // check for 0-IP, localhost and optionally for LAN addresses
2767 AddDebugLogLineM(false, logIPFilter
, CFormat(wxT("Ignored source (IP=%s) received via %s - bad IP")) % Uint32toStringIP(dwIDED2K
) % OriginToText(nSourceFrom
));
2770 if (theApp
->ipfilter
->IsFiltered(dwIDED2K
)) {
2771 AddDebugLogLineM(false, logIPFilter
, CFormat(wxT("Ignored source (IP=%s) received via %s - IPFilter")) % Uint32toStringIP(dwIDED2K
) % OriginToText(nSourceFrom
));
2774 if (theApp
->clientlist
->IsBannedClient(dwIDED2K
)){
2779 // additionally check for LowID and own IP
2780 if (!CanAddSource(dwID
, nPort
, dwServerIP
, nServerPort
, NULL
, false)) {
2781 AddDebugLogLineM(false, logIPFilter
, CFormat(wxT("Ignored source (IP=%s) received via source exchange")) % Uint32toStringIP(dwIDED2K
));
2785 if(thePrefs::GetMaxSourcePerFile() > GetSourceCount()) {
2786 CUpDownClient
* newsource
= new CUpDownClient(nPort
,dwID
,dwServerIP
,nServerPort
,this, (uPacketSXVersion
< 3), true);
2787 if (uPacketSXVersion
> 1) {
2788 newsource
->SetUserHash(userHash
);
2791 if (uPacketSXVersion
>= 4) {
2792 newsource
->SetConnectOptions(byCryptOptions
, true, false);
2795 newsource
->SetSourceFrom((ESourceFrom
)nSourceFrom
);
2796 theApp
->downloadqueue
->CheckAndAddSource(this,newsource
);
2804 void CPartFile::UpdateAutoDownPriority()
2806 if (!IsAutoDownPriority()) {
2809 if (GetSourceCount() <= RARE_FILE
) {
2810 if ( GetDownPriority() != PR_HIGH
)
2811 SetDownPriority(PR_HIGH
, false, false);
2812 } else if (GetSourceCount() < 100) {
2813 if ( GetDownPriority() != PR_NORMAL
)
2814 SetDownPriority(PR_NORMAL
, false, false);
2816 if ( GetDownPriority() != PR_LOW
)
2817 SetDownPriority(PR_LOW
, false, false);
2821 // making this function return a higher when more sources have the extended
2822 // protocol will force you to ask a larger variety of people for sources
2824 int CPartFile::GetCommonFilePenalty()
2826 //TODO: implement, but never return less than MINCOMMONPENALTY!
2827 return MINCOMMONPENALTY
;
2830 /* Barry - Replaces BlockReceived()
2832 Originally this only wrote to disk when a full 180k block
2833 had been received from a client, and only asked for data in
2836 This meant that on average 90k was lost for every connection
2837 to a client data source. That is a lot of wasted data.
2839 To reduce the lost data, packets are now written to a buffer
2840 and flushed to disk regularly regardless of size downloaded.
2841 This includes compressed packets.
2843 Data is also requested only where gaps are, not in 180k blocks.
2844 The requests will still not exceed 180k, but may be smaller to
2848 // Kry - transize is 32bits, no packet can be more than that (this is
2849 // compressed size). Even 32bits is too much imho.As for the return size,
2850 // look at the lenData below.
2851 uint32
CPartFile::WriteToBuffer(uint32 transize
, byte
* data
, uint64 start
, uint64 end
, Requested_Block_Struct
*block
)
2853 // Increment transferred bytes counter for this file
2854 transferred
+= transize
;
2856 // This is needed a few times
2857 // Kry - should not need a uint64 here - no block is larger than
2858 // 2GB even after uncompressed.
2859 uint32 lenData
= (uint32
) (end
- start
+ 1);
2861 if(lenData
> transize
) {
2862 m_iGainDueToCompression
+= lenData
-transize
;
2865 // Occasionally packets are duplicated, no point writing it twice
2866 if (IsComplete(start
, end
)) {
2867 AddDebugLogLineM(false, logPartFile
,
2868 CFormat(wxT("File '%s' has already been written from %u to %u"))
2869 % GetFileName() % start
% end
);
2873 // security sanitize check to make sure we do not write anything into an already hashed complete chunk
2874 const uint64 nStartChunk
= start
/ PARTSIZE
;
2875 const uint64 nEndChunk
= end
/ PARTSIZE
;
2876 if (IsComplete(nStartChunk
)) {
2877 AddDebugLogLineM(false, logPartFile
, CFormat(wxT("Received data touches already hashed chunk - ignored (start): %u-%u; File=%s")) % start
% end
% GetFileName());
2879 } else if (nStartChunk
!= nEndChunk
) {
2880 if (IsComplete(nEndChunk
)) {
2881 AddDebugLogLineM(false, logPartFile
, CFormat(wxT("Received data touches already hashed chunk - ignored (end): %u-%u; File=%s")) % start
% end
% GetFileName());
2886 AddDebugLogLineM(false, logPartFile
, CFormat(wxT("Received data crosses chunk boundaries: %u-%u; File=%s")) % start
% end
% GetFileName());
2891 // Create copy of data as new buffer
2892 byte
*buffer
= new byte
[lenData
];
2893 memcpy(buffer
, data
, lenData
);
2895 // Create a new buffered queue entry
2896 PartFileBufferedData
*item
= new PartFileBufferedData(buffer
, start
, end
, block
);
2898 // Add to the queue in the correct position (most likely the end)
2901 std::list
<PartFileBufferedData
*>::iterator it
= m_BufferedData_list
.begin();
2902 for (; it
!= m_BufferedData_list
.end(); ++it
) {
2903 PartFileBufferedData
* queueItem
= *it
;
2905 if (item
->end
<= queueItem
->end
) {
2906 if (it
!= m_BufferedData_list
.begin()) {
2909 m_BufferedData_list
.insert(--it
, item
);
2917 m_BufferedData_list
.push_front(item
);
2920 // Increment buffer size marker
2921 m_nTotalBufferData
+= lenData
;
2923 // Mark this small section of the file as filled
2924 FillGap(item
->start
, item
->end
);
2926 // Update the flushed mark on the requested block
2927 // The loop here is unfortunate but necessary to detect deleted blocks.
2929 std::list
<Requested_Block_Struct
*>::iterator it2
= m_requestedblocks_list
.begin();
2930 for (; it2
!= m_requestedblocks_list
.end(); ++it2
) {
2931 if (*it2
== item
->block
) {
2932 item
->block
->transferred
+= lenData
;
2936 if (m_gaplist
.IsComplete()) {
2940 // Return the length of data written to the buffer
2944 void CPartFile::FlushBuffer(bool fromAICHRecoveryDataAvailable
)
2946 m_nLastBufferFlushTime
= GetTickCount();
2948 if (m_BufferedData_list
.empty()) {
2953 uint32 partCount
= GetPartCount();
2954 // Remember which parts need to be checked at the end of the flush
2955 std::vector
<bool> changedPart(partCount
, false);
2957 // Ensure file is big enough to write data to (the last item will be the furthest from the start)
2958 if (!CheckFreeDiskSpace(m_nTotalBufferData
)) {
2959 // Not enough free space to write the last item, bail
2960 AddLogLineM(true, CFormat( _("WARNING: Not enough free disk-space! Pausing file: %s") ) % GetFileName());
2966 // Loop through queue
2967 while ( !m_BufferedData_list
.empty() ) {
2968 // Get top item and remove it from the queue
2969 CScopedPtr
<PartFileBufferedData
> item(m_BufferedData_list
.front());
2970 m_BufferedData_list
.pop_front();
2972 // This is needed a few times
2973 wxASSERT((item
->end
- item
->start
) < 0xFFFFFFFF);
2974 uint32 lenData
= (uint32
)(item
->end
- item
->start
+ 1);
2976 // SLUGFILLER: SafeHash - could be more than one part
2977 for (uint32 curpart
= (item
->start
/PARTSIZE
); curpart
<= (item
->end
/PARTSIZE
); ++curpart
) {
2978 wxASSERT(curpart
< partCount
);
2979 changedPart
[curpart
] = true;
2981 // SLUGFILLER: SafeHash
2983 // Go to the correct position in file and write block of data
2985 m_hpartfile
.Seek(item
->start
);
2986 m_hpartfile
.Write(item
->data
.get(), lenData
);
2987 } catch (const CIOFailureException
& e
) {
2988 AddDebugLogLineM(true, logPartFile
, wxT("Error while saving part-file: ") + e
.what());
2989 SetPartFileStatus(PS_ERROR
);
2992 // Decrease buffer size
2993 m_nTotalBufferData
-= lenData
;
2997 // Update last-changed date
2998 m_lastDateChanged
= wxDateTime::GetTimeNow();
3001 // Partfile should never be too large
3002 if (m_hpartfile
.GetLength() > GetFileSize()) {
3003 // it's "last chance" correction. the real bugfix has to be applied 'somewhere' else
3004 m_hpartfile
.SetLength(GetFileSize());
3006 } catch (const CIOFailureException
& e
) {
3007 AddDebugLogLineM(true, logPartFile
,
3008 CFormat(wxT("Error while truncating part-file (%s): %s"))
3009 % m_PartPath
% e
.what());
3010 SetPartFileStatus(PS_ERROR
);
3015 // Check each part of the file
3016 for (uint32 partNumber
= 0; partNumber
< partCount
; ++partNumber
) {
3017 if (changedPart
[partNumber
] == false) {
3021 uint32 partRange
= GetPartSize(partNumber
) - 1;
3023 // Is this 9MB part complete
3024 if (IsComplete(partNumber
)) {
3026 if (!HashSinglePart(partNumber
)) {
3027 AddLogLineM(true, CFormat(
3028 _("Downloaded part %i is corrupt in file: %s") ) % partNumber
% GetFileName() );
3030 // add part to corrupted list, if not already there
3031 if (!IsCorruptedPart(partNumber
)) {
3032 m_corrupted_list
.push_back(partNumber
);
3034 // request AICH recovery data
3035 // Don't if called from the AICHRecovery. It's already there and would lead to an infinite recursion.
3036 if (!fromAICHRecoveryDataAvailable
) {
3037 RequestAICHRecovery((uint16
)partNumber
);
3039 // Reduce transferred amount by corrupt amount
3040 m_iLostDueToCorruption
+= (partRange
+ 1);
3042 if (!m_hashsetneeded
) {
3043 AddDebugLogLineM(false, logPartFile
, CFormat(
3044 wxT("Finished part %u of '%s'")) % partNumber
% GetFileName());
3047 // if this part was successfully completed (although ICH is active), remove from corrupted list
3048 EraseFirstValue(m_corrupted_list
, partNumber
);
3050 if (status
== PS_EMPTY
) {
3051 if (theApp
->IsRunning()) { // may be called during shutdown!
3052 if (GetHashCount() == GetED2KPartHashCount() && !m_hashsetneeded
) {
3053 // Successfully completed part, make it available for sharing
3054 SetStatus(PS_READY
);
3055 theApp
->sharedfiles
->SafeAddKFile(this);
3060 } else if ( IsCorruptedPart(partNumber
) && // corrupted part:
3061 (thePrefs::IsICHEnabled() // old ICH: rehash whenever we have new data hoping it will be good now
3062 || fromAICHRecoveryDataAvailable
)) {// new AICH: one rehash right before performing it (maybe it's already good)
3063 // Try to recover with minimal loss
3064 if (HashSinglePart(partNumber
)) {
3065 ++m_iTotalPacketsSavedDueToICH
;
3067 uint64 uMissingInPart
= m_gaplist
.GetGapSize(partNumber
);
3068 FillGap(partNumber
);
3069 RemoveBlockFromList(PARTSIZE
*partNumber
,(PARTSIZE
*partNumber
+ partRange
));
3071 // remove from corrupted list
3072 EraseFirstValue(m_corrupted_list
, partNumber
);
3074 AddLogLineM(true, CFormat( _("ICH: Recovered corrupted part %i for %s -> Saved bytes: %s") )
3077 % CastItoXBytes(uMissingInPart
));
3079 if (GetHashCount() == GetED2KPartHashCount() && !m_hashsetneeded
) {
3080 if (status
== PS_EMPTY
) {
3081 // Successfully recovered part, make it available for sharing
3082 SetStatus(PS_READY
);
3083 if (theApp
->IsRunning()) // may be called during shutdown!
3084 theApp
->sharedfiles
->SafeAddKFile(this);
3094 if (theApp
->IsRunning()) { // may be called during shutdown!
3095 // Is this file finished ?
3096 if (m_gaplist
.IsComplete()) {
3097 CompleteFile(false);
3103 // read data for upload, return false on error
3104 bool CPartFile::ReadData(CFileArea
& area
, uint64 offset
, uint32 toread
)
3107 if (offset
+ toread
> GetFileSize()) {
3108 AddDebugLogLineM(false, logPartFile
, CFormat(wxT("tried to read %d bytes past eof of %s"))
3109 % (offset
+ toread
- GetFileSize()) % GetFileName());
3114 m_hpartfile
.Seek(offset
, wxFromStart
);
3115 area
.Read(m_hpartfile
, toread
);
3116 // if it fails it throws (which the caller should catch)
3121 void CPartFile::UpdateFileRatingCommentAvail()
3123 bool prevComment
= m_hasComment
;
3124 int prevRating
= m_iUserRating
;
3126 m_hasComment
= false;
3128 int ratingCount
= 0;
3130 SourceSet::iterator it
= m_SrcList
.begin();
3131 for (; it
!= m_SrcList
.end(); ++it
) {
3132 CUpDownClient
* cur_src
= *it
;
3134 if (!cur_src
->GetFileComment().IsEmpty()) {
3135 if (thePrefs::IsCommentFiltered(cur_src
->GetFileComment())) {
3138 m_hasComment
= true;
3141 uint8 rating
= cur_src
->GetFileRating();
3143 wxASSERT(rating
<= 5);
3146 m_iUserRating
+= rating
;
3151 m_iUserRating
/= ratingCount
;
3152 wxASSERT(m_iUserRating
> 0 && m_iUserRating
<= 5);
3155 if ((prevComment
!= m_hasComment
) || (prevRating
!= m_iUserRating
)) {
3156 UpdateDisplayedInfo();
3161 void CPartFile::SetCategory(uint8 cat
)
3163 wxASSERT( cat
< theApp
->glob_prefs
->GetCatCount() );
3169 bool CPartFile::RemoveSource(CUpDownClient
* toremove
, bool updatewindow
, bool bDoStatsUpdate
)
3171 wxASSERT( toremove
);
3173 bool result
= theApp
->downloadqueue
->RemoveSource( toremove
, updatewindow
, bDoStatsUpdate
);
3175 // Check if the client should be deleted, but not if the client is already dying
3176 if ( !toremove
->GetSocket() && !toremove
->HasBeenDeleted() ) {
3177 if ( toremove
->Disconnected(wxT("RemoveSource - purged")) ) {
3178 toremove
->Safe_Delete();
3185 void CPartFile::AddDownloadingSource(CUpDownClient
* client
)
3187 CClientPtrList::iterator it
=
3188 std::find(m_downloadingSourcesList
.begin(), m_downloadingSourcesList
.end(), client
);
3189 if (it
== m_downloadingSourcesList
.end()) {
3190 m_downloadingSourcesList
.push_back(client
);
3195 void CPartFile::RemoveDownloadingSource(CUpDownClient
* client
)
3197 CClientPtrList::iterator it
=
3198 std::find(m_downloadingSourcesList
.begin(), m_downloadingSourcesList
.end(), client
);
3199 if (it
!= m_downloadingSourcesList
.end()) {
3200 m_downloadingSourcesList
.erase(it
);
3205 void CPartFile::SetPartFileStatus(uint8 newstatus
)
3209 if (thePrefs::GetAllcatType()) {
3210 Notify_DownloadCtrlUpdateItem(this);
3213 Notify_DownloadCtrlSort();
3217 uint64
CPartFile::GetNeededSpace()
3220 uint64 length
= m_hpartfile
.GetLength();
3222 if (length
> GetFileSize()) {
3223 return 0; // Shouldn't happen, but just in case
3226 return GetFileSize() - length
;
3227 } catch (const CIOFailureException
& e
) {
3228 AddDebugLogLineM(true, logPartFile
,
3229 CFormat(wxT("Error while retrieving file-length (%s): %s"))
3230 % m_PartPath
% e
.what());
3231 SetPartFileStatus(PS_ERROR
);
3236 void CPartFile::SetStatus(uint8 in
)
3238 wxASSERT( in
!= PS_PAUSED
&& in
!= PS_INSUFFICIENT
);
3242 if (theApp
->IsRunning()) {
3243 UpdateDisplayedInfo( true );
3245 if ( thePrefs::ShowCatTabInfos() ) {
3246 Notify_ShowUpdateCatTabTitles();
3252 void CPartFile::RequestAICHRecovery(uint16 nPart
)
3255 if ( !m_pAICHHashSet
->HasValidMasterHash() ||
3256 (m_pAICHHashSet
->GetStatus() != AICH_TRUSTED
&& m_pAICHHashSet
->GetStatus() != AICH_VERIFIED
)){
3257 AddDebugLogLineM( false, logAICHRecovery
, wxT("Unable to request AICH Recoverydata because we have no trusted Masterhash") );
3260 if (GetPartSize(nPart
) <= EMBLOCKSIZE
)
3262 if (CAICHHashSet::IsClientRequestPending(this, nPart
)){
3263 AddDebugLogLineM( false, logAICHRecovery
, wxT("RequestAICHRecovery: Already a request for this part pending"));
3267 // first check if we have already the recoverydata, no need to rerequest it then
3268 if (m_pAICHHashSet
->IsPartDataAvailable(nPart
*PARTSIZE
)){
3269 AddDebugLogLineM( false, logAICHRecovery
, wxT("Found PartRecoveryData in memory"));
3270 AICHRecoveryDataAvailable(nPart
);
3274 wxASSERT( nPart
< GetPartCount() );
3275 // find some random client which support AICH to ask for the blocks
3276 // first lets see how many we have at all, we prefer high id very much
3277 uint32 cAICHClients
= 0;
3278 uint32 cAICHLowIDClients
= 0;
3279 for ( SourceSet::iterator it
= m_SrcList
.begin(); it
!= m_SrcList
.end(); ++it
) {
3280 CUpDownClient
* pCurClient
= *(it
);
3281 if ( pCurClient
->IsSupportingAICH() &&
3282 pCurClient
->GetReqFileAICHHash() != NULL
&&
3283 !pCurClient
->IsAICHReqPending()
3284 && (*pCurClient
->GetReqFileAICHHash()) == m_pAICHHashSet
->GetMasterHash())
3286 if (pCurClient
->HasLowID()) {
3287 ++cAICHLowIDClients
;
3293 if ((cAICHClients
| cAICHLowIDClients
) == 0){
3294 AddDebugLogLineM( false, logAICHRecovery
, wxT("Unable to request AICH Recoverydata because found no client who supports it and has the same hash as the trusted one"));
3297 uint32 nSeclectedClient
;
3298 if (cAICHClients
> 0) {
3299 nSeclectedClient
= (rand() % cAICHClients
) + 1;
3301 nSeclectedClient
= (rand() % cAICHLowIDClients
) + 1;
3303 CUpDownClient
* pClient
= NULL
;
3304 for ( SourceSet::iterator it
= m_SrcList
.begin(); it
!= m_SrcList
.end(); ++it
) {
3305 CUpDownClient
* pCurClient
= *(it
);
3306 if (pCurClient
->IsSupportingAICH() && pCurClient
->GetReqFileAICHHash() != NULL
&& !pCurClient
->IsAICHReqPending()
3307 && (*pCurClient
->GetReqFileAICHHash()) == m_pAICHHashSet
->GetMasterHash())
3309 if (cAICHClients
> 0){
3310 if (!pCurClient
->HasLowID())
3314 wxASSERT( pCurClient
->HasLowID());
3317 if (nSeclectedClient
== 0){
3318 pClient
= pCurClient
;
3323 if (pClient
== NULL
){
3328 AddDebugLogLineM( false, logAICHRecovery
, CFormat( wxT("Requesting AICH Hash (%s) form client %s") ) % ( cAICHClients
? wxT("HighId") : wxT("LowID") ) % pClient
->GetClientFullInfo() );
3329 pClient
->SendAICHRequest(this, nPart
);
3334 void CPartFile::AICHRecoveryDataAvailable(uint16 nPart
)
3336 if (GetPartCount() < nPart
){
3342 uint32 length
= GetPartSize(nPart
);
3343 // if the part was already ok, it would now be complete
3344 if (IsComplete(nPart
)){
3345 AddDebugLogLineM( false, logAICHRecovery
,
3346 wxString::Format( wxT("Processing AICH Recovery data: The part (%u) is already complete, canceling"), nPart
) );
3352 CAICHHashTree
* pVerifiedHash
= m_pAICHHashSet
->m_pHashTree
.FindHash(nPart
*PARTSIZE
, length
);
3353 if (pVerifiedHash
== NULL
|| !pVerifiedHash
->GetHashValid()){
3354 AddDebugLogLineM( true, logAICHRecovery
, wxT("Processing AICH Recovery data: Unable to get verified hash from hashset (should never happen)") );
3358 CAICHHashTree
htOurHash(pVerifiedHash
->GetNDataSize(), pVerifiedHash
->GetIsLeftBranch(), pVerifiedHash
->GetNBaseSize());
3360 m_hpartfile
.Seek(PARTSIZE
* nPart
,wxFromStart
);
3361 CreateHashFromFile(m_hpartfile
, length
, NULL
, &htOurHash
);
3362 } catch (const CIOFailureException
& e
) {
3363 AddDebugLogLineM(true, logAICHRecovery
,
3364 CFormat(wxT("IO failure while hashing part-file '%s': %s"))
3365 % m_hpartfile
.GetFilePath() % e
.what());
3366 SetPartFileStatus(PS_ERROR
);
3370 if (!htOurHash
.GetHashValid()){
3371 AddDebugLogLineM( false, logAICHRecovery
, wxT("Processing AICH Recovery data: Failed to retrieve AICH Hashset of corrupt part") );
3376 // now compare the hash we just did, to the verified hash and readd all blocks which are ok
3377 uint32 nRecovered
= 0;
3378 for (uint32 pos
= 0; pos
< length
; pos
+= EMBLOCKSIZE
){
3379 const uint32 nBlockSize
= min
<uint32
>(EMBLOCKSIZE
, length
- pos
);
3380 CAICHHashTree
* pVerifiedBlock
= pVerifiedHash
->FindHash(pos
, nBlockSize
);
3381 CAICHHashTree
* pOurBlock
= htOurHash
.FindHash(pos
, nBlockSize
);
3382 if ( pVerifiedBlock
== NULL
|| pOurBlock
== NULL
|| !pVerifiedBlock
->GetHashValid() || !pOurBlock
->GetHashValid()){
3386 if (pOurBlock
->GetHash() == pVerifiedBlock
->GetHash()){
3387 FillGap(PARTSIZE
*nPart
+pos
, PARTSIZE
*nPart
+ pos
+ (nBlockSize
-1));
3388 RemoveBlockFromList(PARTSIZE
*nPart
, PARTSIZE
*nPart
+ (nBlockSize
-1));
3389 nRecovered
+= nBlockSize
;
3393 // ok now some sanity checks
3394 if (IsComplete(nPart
)){
3395 // this is a bad, but it could probably happen under some rare circumstances
3396 // make sure that MD4 agrres to this fact too
3397 if (!HashSinglePart(nPart
)){
3398 AddDebugLogLineM( false, logAICHRecovery
,
3399 wxString::Format(wxT("Processing AICH Recovery data: The part (%u) got completed while recovering - but MD4 says it corrupt! Setting hashset to error state, deleting part"), nPart
));
3400 // now we are fu... unhappy
3401 m_pAICHHashSet
->SetStatus(AICH_ERROR
);
3407 AddDebugLogLineM( false, logAICHRecovery
, wxString::Format(
3408 wxT("Processing AICH Recovery data: The part (%u) got completed while recovering and MD4 agrees"), nPart
) );
3409 if (status
== PS_EMPTY
&& theApp
->IsRunning()){
3410 if (GetHashCount() == GetED2KPartHashCount() && !m_hashsetneeded
){
3411 // Successfully recovered part, make it available for sharing
3412 SetStatus(PS_READY
);
3413 theApp
->sharedfiles
->SafeAddKFile(this);
3417 if (theApp
->IsRunning()){
3418 // Is this file finished?
3419 if (m_gaplist
.IsComplete()) {
3420 CompleteFile(false);
3424 } // end sanity check
3425 // We did the best we could. If it's still incomplete, then no need to keep
3426 // bashing it with ICH. So remove it from the list of corrupted parts.
3427 EraseFirstValue(m_corrupted_list
, nPart
);
3431 // make sure the user appreciates our great recovering work :P
3432 AddDebugLogLineM( true, logAICHRecovery
, CFormat(
3433 wxT("AICH successfully recovered %s of %s from part %u for %s") )
3434 % CastItoXBytes(nRecovered
)
3435 % CastItoXBytes(length
)
3441 void CPartFile::ClientStateChanged( int oldState
, int newState
)
3443 if ( oldState
== newState
)
3446 // If the state is -1, then it's an entirely new item
3447 if ( oldState
!= -1 ) {
3448 // Was the old state a valid state?
3449 if ( oldState
== DS_ONQUEUE
|| oldState
== DS_DOWNLOADING
) {
3452 if ( oldState
== DS_CONNECTED
/* || oldState == DS_REMOTEQUEUEFULL */ ) {
3456 m_notCurrentSources
--;
3460 // If the state is -1, then the source is being removed
3461 if ( newState
!= -1 ) {
3462 // Was the old state a valid state?
3463 if ( newState
== DS_ONQUEUE
|| newState
== DS_DOWNLOADING
) {
3466 if ( newState
== DS_CONNECTED
/* || newState == DS_REMOTEQUEUEFULL */ ) {
3470 ++m_notCurrentSources
;
3476 bool CPartFile::AddSource( CUpDownClient
* client
)
3478 if (m_SrcList
.insert( client
).second
) {
3479 theStats::AddFoundSource();
3480 theStats::AddSourceOrigin(client
->GetSourceFrom());
3488 bool CPartFile::DelSource( CUpDownClient
* client
)
3490 if (m_SrcList
.erase( client
)) {
3491 theStats::RemoveSourceOrigin(client
->GetSourceFrom());
3492 theStats::RemoveFoundSource();
3500 void CPartFile::UpdatePartsFrequency( CUpDownClient
* client
, bool increment
)
3502 const BitVector
& freq
= client
->GetPartStatus();
3504 if ( m_SrcpartFrequency
.size() != GetPartCount() ) {
3505 m_SrcpartFrequency
.clear();
3506 m_SrcpartFrequency
.insert(m_SrcpartFrequency
.begin(), GetPartCount(), 0);
3513 unsigned int size
= freq
.size();
3514 if ( size
!= m_SrcpartFrequency
.size() ) {
3519 for ( unsigned int i
= 0; i
< size
; i
++ ) {
3521 m_SrcpartFrequency
[i
]++;
3525 for ( unsigned int i
= 0; i
< size
; i
++ ) {
3527 m_SrcpartFrequency
[i
]--;
3533 const FileRatingList
&CPartFile::GetRatingAndComments()
3535 m_FileRatingList
.clear();
3536 // This can be pre-processed, but is it worth the CPU?
3537 CPartFile::SourceSet::iterator it
= m_SrcList
.begin();
3538 for ( ; it
!= m_SrcList
.end(); ++it
) {
3539 CUpDownClient
*cur_src
= *it
;
3540 if (cur_src
->GetFileComment().Length()>0 || cur_src
->GetFileRating()>0) {
3541 // AddDebugLogLineM(false, logPartFile, wxString(wxT("found a comment for ")) << GetFileName());
3542 m_FileRatingList
.push_back(SFileRating(*cur_src
));
3546 return m_FileRatingList
;
3551 CPartFile::CPartFile(CEC_PartFile_Tag
*tag
)
3555 SetFileName(CPath(tag
->FileName()));
3556 m_abyFileHash
= tag
->ID();
3557 SetFileSize(tag
->SizeFull());
3558 m_gaplist
.Init(GetFileSize(), true); // Init empty
3559 m_partmetfilename
= CPath(tag
->PartMetName());
3560 transferred
= tag
->SizeXfer();
3561 percentcompleted
= (100.0*completedsize
) / GetFileSize();
3562 completedsize
= tag
->SizeDone();
3564 m_category
= tag
->FileCat();
3566 m_SrcpartFrequency
.insert(m_SrcpartFrequency
.end(), GetPartCount(), 0);
3567 m_iDownPriority
= tag
->Prio();
3568 if ( m_iDownPriority
>= 10 ) {
3569 m_iDownPriority
-= 10;
3570 m_bAutoDownPriority
= true;
3572 m_bAutoDownPriority
= false;
3578 m_a4af_source_count
= 0;
3582 * Remote gui specific code
3584 CPartFile::~CPartFile()
3588 const FileRatingList
&CPartFile::GetRatingAndComments()
3590 return m_FileRatingList
;
3592 #endif // !CLIENT_GUI
3595 void CPartFile::UpdateDisplayedInfo(bool force
)
3597 uint32 curTick
= ::GetTickCount();
3598 m_CommentUpdated
= true;
3600 // Wait 1.5s between each redraw
3601 if(force
|| curTick
-m_lastRefreshedDLDisplay
> MINWAIT_BEFORE_DLDISPLAY_WINDOWUPDATE
) {
3602 Notify_DownloadCtrlUpdateItem(this);
3603 m_lastRefreshedDLDisplay
= curTick
;
3609 void CPartFile::Init()
3611 m_showSources
= false;
3612 m_lastsearchtime
= 0;
3613 lastpurgetime
= ::GetTickCount();
3616 m_insufficient
= false;
3621 m_iLastPausePurge
= time(NULL
);
3623 if(thePrefs::GetNewAutoDown()) {
3624 m_iDownPriority
= PR_HIGH
;
3625 m_bAutoDownPriority
= true;
3627 m_iDownPriority
= PR_NORMAL
;
3628 m_bAutoDownPriority
= false;
3631 memset(m_anStates
,0,sizeof(m_anStates
));
3633 transferingsrc
= 0; // new
3637 m_CommentUpdated
= false;
3638 m_hashsetneeded
= true;
3640 percentcompleted
= 0;
3642 m_bPreviewing
= false;
3643 lastseencomplete
= 0;
3644 m_availablePartsCount
=0;
3645 m_ClientSrcAnswered
= 0;
3646 m_LastNoNeededCheck
= 0;
3648 m_nTotalBufferData
= 0;
3649 m_nLastBufferFlushTime
= 0;
3650 m_bPercentUpdated
= false;
3651 m_bRecoveringArchive
= false;
3652 m_iGainDueToCompression
= 0;
3653 m_iLostDueToCorruption
= 0;
3654 m_iTotalPacketsSavedDueToICH
= 0;
3656 m_lastRefreshedDLDisplay
= 0;
3657 m_nDlActiveTime
= 0;
3659 m_is_A4AF_auto
= false;
3660 m_localSrcReqQueued
= false;
3661 m_nCompleteSourcesTime
= time(NULL
);
3662 m_nCompleteSourcesCount
= 0;
3663 m_nCompleteSourcesCountLo
= 0;
3664 m_nCompleteSourcesCountHi
= 0;
3667 m_notCurrentSources
= 0;
3670 m_LastSearchTimeKad
= 0;
3671 m_TotalSearchesKad
= 0;
3673 m_gapptrlist
.Init(&m_gaplist
);
3676 wxString
CPartFile::getPartfileStatus() const
3681 if ((status
== PS_HASHING
) || (status
== PS_WAITINGFORHASH
)) {
3682 mybuffer
=_("Hashing");
3683 } else if (status
== PS_ALLOCATING
) {
3684 mybuffer
= _("Allocating");
3686 switch (GetStatus()) {
3688 mybuffer
=_("Completing");
3691 mybuffer
=_("Complete");
3694 mybuffer
=_("Paused");
3697 mybuffer
=_("Erroneous");
3699 case PS_INSUFFICIENT
:
3700 mybuffer
= _("Insufficient disk space");
3703 if (GetTransferingSrcCount()>0) {
3704 mybuffer
=_("Downloading");
3706 mybuffer
=_("Waiting");
3710 if (m_stopped
&& (GetStatus()!=PS_COMPLETE
)) {
3711 mybuffer
=_("Stopped");
3718 int CPartFile::getPartfileStatusRang() const
3722 if (GetTransferingSrcCount()==0) tempstatus
=1;
3723 switch (GetStatus()) {
3725 case PS_WAITINGFORHASH
:
3745 wxString
CPartFile::GetFeedback() const
3747 wxString retval
= CKnownFile::GetFeedback();
3748 if (GetStatus() != PS_COMPLETE
) {
3749 retval
+= wxString(_("Downloaded")) + wxT(": ") + CastItoXBytes(GetCompletedSize()) + wxString::Format(wxT(" (%.2f%%)\n"), GetPercentCompleted())
3750 + _("Sources") + CFormat(wxT(": %u\n")) % GetSourceCount();
3752 return retval
+ _("Status") + wxT(": ") + getPartfileStatus() + wxT("\n");
3756 sint32
CPartFile::getTimeRemaining() const
3758 if (GetKBpsDown() < 0.001)
3761 return((GetFileSize()-GetCompletedSize()) / ((int)(GetKBpsDown()*1024.0)));
3764 bool CPartFile::PreviewAvailable()
3766 FileType type
= GetFiletype(GetFileName());
3768 return (((type
== ftVideo
) || (type
== ftAudio
)) && IsComplete(0, 256*1024));
3771 bool CPartFile::CheckShowItemInGivenCat(int inCategory
)
3773 // easy normal cases
3775 bool IsNotFiltered
= true;
3777 IsInCat
= ((inCategory
==0) || (inCategory
>0 && inCategory
==GetCategory()));
3779 switch (thePrefs::GetAllcatType()) {
3781 IsNotFiltered
= GetCategory() == 0 || inCategory
> 0;
3784 IsNotFiltered
= IsPartFile();
3787 IsNotFiltered
= !IsPartFile();
3791 (GetStatus() == PS_READY
|| GetStatus() == PS_EMPTY
) &&
3792 GetTransferingSrcCount() == 0;
3796 (GetStatus() == PS_READY
|| GetStatus()==PS_EMPTY
) &&
3797 GetTransferingSrcCount() > 0;
3800 IsNotFiltered
= GetStatus() == PS_ERROR
;
3803 IsNotFiltered
= GetStatus() == PS_PAUSED
&& !IsStopped();
3806 IsNotFiltered
= IsStopped();
3809 IsNotFiltered
= GetFiletype(GetFileName()) == ftVideo
;
3812 IsNotFiltered
= GetFiletype(GetFileName()) == ftAudio
;
3815 IsNotFiltered
= GetFiletype(GetFileName()) == ftArchive
;
3818 IsNotFiltered
= GetFiletype(GetFileName()) == ftCDImage
;
3821 IsNotFiltered
= GetFiletype(GetFileName()) == ftPicture
;
3824 IsNotFiltered
= GetFiletype(GetFileName()) == ftText
;
3827 IsNotFiltered
= !IsStopped() && GetStatus() != PS_PAUSED
;
3831 return IsNotFiltered
&& IsInCat
;
3835 void CPartFile::SetActive(bool bActive
)
3837 time_t tNow
= time(NULL
);
3839 if (theApp
->IsConnected()) {
3840 if (m_tActivated
== 0) {
3841 m_tActivated
= tNow
;
3845 if (m_tActivated
!= 0) {
3846 m_nDlActiveTime
+= tNow
- m_tActivated
;
3853 uint32
CPartFile::GetDlActiveTime() const
3855 uint32 nDlActiveTime
= m_nDlActiveTime
;
3856 if (m_tActivated
!= 0) {
3857 nDlActiveTime
+= time(NULL
) - m_tActivated
;
3859 return nDlActiveTime
;
3864 uint8
CPartFile::GetStatus(bool ignorepause
) const
3866 if ( (!m_paused
&& !m_insufficient
) ||
3867 status
== PS_ERROR
||
3868 status
== PS_COMPLETING
||
3869 status
== PS_COMPLETE
||
3872 } else if ( m_insufficient
) {
3873 return PS_INSUFFICIENT
;
3879 void CPartFile::AddDeadSource(const CUpDownClient
* client
)
3881 m_deadSources
.AddDeadSource( client
);
3885 bool CPartFile::IsDeadSource(const CUpDownClient
* client
)
3887 return m_deadSources
.IsDeadSource( client
);
3890 void CPartFile::SetFileName(const CPath
& fileName
)
3892 CKnownFile
* pFile
= theApp
->sharedfiles
->GetFileByID(GetFileHash());
3894 bool is_shared
= (pFile
&& pFile
== this);
3897 // The file is shared, we must clear the search keywords so we don't
3898 // publish the old name anymore.
3899 theApp
->sharedfiles
->RemoveKeywords(this);
3902 CKnownFile::SetFileName(fileName
);
3905 // And of course, we must advertise the new name if the file is shared.
3906 theApp
->sharedfiles
->AddKeywords(this);
3909 UpdateDisplayedInfo(true);
3913 uint16
CPartFile::GetMaxSources() const
3915 // This is just like this, while we don't import the private max sources per file
3916 return thePrefs::GetMaxSourcePerFile();
3920 uint16
CPartFile::GetMaxSourcePerFileSoft() const
3922 unsigned int temp
= ((unsigned int)GetMaxSources() * 9L) / 10;
3923 if (temp
> MAX_SOURCES_FILE_SOFT
) {
3924 return MAX_SOURCES_FILE_SOFT
;
3929 uint16
CPartFile::GetMaxSourcePerFileUDP() const
3931 unsigned int temp
= ((unsigned int)GetMaxSources() * 3L) / 4;
3932 if (temp
> MAX_SOURCES_FILE_UDP
) {
3933 return MAX_SOURCES_FILE_UDP
;
3938 #define DROP_FACTOR 2
3940 CUpDownClient
* CPartFile::GetSlowerDownloadingClient(uint32 speed
, CUpDownClient
* caller
) {
3941 // printf("Start slower source calculation\n");
3942 for( SourceSet::iterator it
= m_SrcList
.begin(); it
!= m_SrcList
.end(); ) {
3943 CUpDownClient
* cur_src
= *it
++;
3944 if ((cur_src
->GetDownloadState() == DS_DOWNLOADING
) && (cur_src
!= caller
)) {
3945 uint32 factored_bytes_per_second
= static_cast<uint32
>(
3946 (cur_src
->GetKBpsDown() * 1024) * DROP_FACTOR
);
3947 if ( factored_bytes_per_second
< speed
) {
3948 // printf("Selecting source %p to drop: %d < %d\n", cur_src, factored_bytes_per_second, speed);
3949 // printf("End slower source calculation\n");
3952 // printf("Not selecting source %p to drop: %d > %d\n", cur_src, factored_bytes_per_second, speed);
3956 // printf("End slower source calculation\n");
3960 void CPartFile::AllocationFinished()
3962 if (!m_hpartfile
.Open(m_PartPath
, CFile::read_write
)) {
3963 AddLogLineM(false, CFormat(_("ERROR: Failed to open partfile '%s'")) % GetFullName());
3964 SetPartFileStatus(PS_ERROR
);
3969 // File_checked_for_headers