2 // This file is part of the aMule Project.
4 // Copyright (c) 2003-2008 aMule Team ( admin@amule.org / http://www.amule.org )
5 // Copyright (c) 2002 Merkur ( devs@emule-project.net / http://www.emule-project.net )
7 // Any parts of this program derived from the xMule, lMule or eMule project,
8 // or contributed by third-party developers are copyrighted by their
11 // This program is free software; you can redistribute it and/or modify
12 // it under the terms of the GNU General Public License as published by
13 // the Free Software Foundation; either version 2 of the License, or
14 // (at your option) any later version.
16 // This program is distributed in the hope that it will be useful,
17 // but WITHOUT ANY WARRANTY; without even the implied warranty of
18 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 // GNU General Public License for more details.
21 // You should have received a copy of the GNU General Public License
22 // along with this program; if not, write to the Free Software
23 // Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA
28 #include "PartFile.h" // Interface declarations.
31 #include "config.h" // Needed for VERSION
34 #include <protocol/kad/Constants.h>
35 #include <protocol/ed2k/Client2Client/TCP.h>
36 #include <protocol/Protocols.h>
37 #include <common/DataFileVersion.h>
38 #include <common/Constants.h>
39 #include <tags/FileTags.h>
42 #include <wx/tokenzr.h> // Needed for wxStringTokenizer
44 #include "KnownFileList.h" // Needed for CKnownFileList
45 #include "UploadQueue.h" // Needed for CFileHash
46 #include "IPFilter.h" // Needed for CIPFilter
47 #include "Server.h" // Needed for CServer
48 #include "ServerConnect.h" // Needed for CServerConnect
49 #include "updownclient.h" // Needed for CUpDownClient
50 #include "MemFile.h" // Needed for CMemFile
51 #include "Preferences.h" // Needed for CPreferences
52 #include "DownloadQueue.h" // Needed for CDownloadQueue
53 #include "amule.h" // Needed for theApp
54 #include "ED2KLink.h" // Needed for CED2KLink
55 #include "Packet.h" // Needed for CTag
56 #include "SearchList.h" // Needed for CSearchFile
57 #include "ClientList.h" // Needed for clientlist
58 #include "Statistics.h" // Needed for theStats
60 #include <common/Format.h> // Needed for CFormat
61 #include <common/FileFunctions.h> // Needed for GetLastModificationTime
62 #include "ThreadTasks.h" // Needed for CHashingTask/CCompletionTask
63 #include "GuiEvents.h" // Needed for Notify_*
64 #include "DataToText.h" // Needed for OriginToText()
66 #include "kademlia/kademlia/Kademlia.h"
67 #include "kademlia/kademlia/Search.h"
70 SFileRating::SFileRating(const wxString
&u
, const wxString
&f
, sint16 r
, const wxString
&c
)
80 SFileRating::SFileRating(const SFileRating
&fr
)
82 UserName(fr
.UserName
),
83 FileName(fr
.FileName
),
90 SFileRating::SFileRating(const CUpDownClient
&client
)
92 UserName(client
.GetUserName()),
93 FileName(client
.GetClientFilename()),
94 Rating(client
.GetFileRating()),
95 Comment(client
.GetFileComment())
100 SFileRating::~SFileRating()
105 typedef std::list
<Chunk
> ChunkList
;
110 CPartFile::CPartFile()
115 CPartFile::CPartFile(CSearchFile
* searchresult
)
119 m_abyFileHash
= searchresult
->GetFileHash();
120 SetFileName(searchresult
->GetFileName());
121 SetFileSize(searchresult
->GetFileSize());
123 for (unsigned int i
= 0; i
< searchresult
->m_taglist
.size(); ++i
){
124 const CTag
& pTag
= searchresult
->m_taglist
[i
];
126 bool bTagAdded
= false;
127 if (pTag
.GetNameID() == 0 && !pTag
.GetName().IsEmpty() && (pTag
.IsStr() || pTag
.IsInt())) {
128 static const struct {
133 { wxT(FT_ED2K_MEDIA_ARTIST
), 2 },
134 { wxT(FT_ED2K_MEDIA_ALBUM
), 2 },
135 { wxT(FT_ED2K_MEDIA_TITLE
), 2 },
136 { wxT(FT_ED2K_MEDIA_LENGTH
), 2 },
137 { wxT(FT_ED2K_MEDIA_BITRATE
), 3 },
138 { wxT(FT_ED2K_MEDIA_CODEC
), 2 }
141 for (unsigned int t
= 0; t
< itemsof(_aMetaTags
); ++t
) {
142 if ( pTag
.GetType() == _aMetaTags
[t
].nType
&&
143 (pTag
.GetName() == _aMetaTags
[t
].pszName
)) {
144 // skip string tags with empty string values
145 if (pTag
.IsStr() && pTag
.GetStr().IsEmpty()) {
149 // skip "length" tags with "0: 0" values
150 if (pTag
.GetName() == wxT(FT_ED2K_MEDIA_LENGTH
)) {
151 if (pTag
.GetStr().IsSameAs(wxT("0: 0")) ||
152 pTag
.GetStr().IsSameAs(wxT("0:0"))) {
157 // skip "bitrate" tags with '0' values
158 if ((pTag
.GetName() == wxT(FT_ED2K_MEDIA_BITRATE
)) && !pTag
.GetInt()) {
162 AddDebugLogLineM( false, logPartFile
,
163 wxT("CPartFile::CPartFile(CSearchFile*): added tag ") +
164 pTag
.GetFullInfo() );
165 m_taglist
.push_back(pTag
);
170 } else if (pTag
.GetNameID() != 0 && pTag
.GetName().IsEmpty() && (pTag
.IsStr() || pTag
.IsInt())) {
171 static const struct {
179 for (unsigned int t
= 0; t
< itemsof(_aMetaTags
); ++t
) {
180 if (pTag
.GetType() == _aMetaTags
[t
].nType
&& pTag
.GetNameID() == _aMetaTags
[t
].nID
) {
181 // skip string tags with empty string values
182 if (pTag
.IsStr() && pTag
.GetStr().IsEmpty()) {
186 AddDebugLogLineM( false, logPartFile
,
187 wxT("CPartFile::CPartFile(CSearchFile*): added tag ") +
188 pTag
.GetFullInfo() );
189 m_taglist
.push_back(pTag
);
197 AddDebugLogLineM( false, logPartFile
,
198 wxT("CPartFile::CPartFile(CSearchFile*): ignored tag ") +
199 pTag
.GetFullInfo() );
207 CPartFile::CPartFile(const CED2KFileLink
* fileLink
)
211 SetFileName(CPath(fileLink
->GetName()));
212 SetFileSize(fileLink
->GetSize());
213 m_abyFileHash
= fileLink
->GetHashKey();
217 if (fileLink
->m_hashset
) {
218 if (!LoadHashsetFromFile(fileLink
->m_hashset
, true)) {
219 AddDebugLogLineM(true, logPartFile
, wxT("eD2K link contained invalid hashset: ") + fileLink
->GetLink());
225 CPartFile::~CPartFile()
228 // Barry - Ensure all buffered data is written
231 // eMule had same problem with lseek error ... and override with a simple
232 // check for INVALID_HANDLE_VALUE (that, btw, does not exist on linux)
233 // So we just guess is < 0 on error and > 2 if ok (0 stdin, 1 stdout, 2 stderr)
234 // But, where does this wrong handle comes from?
236 if (m_hpartfile
.IsOpened() && (m_hpartfile
.fd() > 2)) {
240 if (m_hpartfile
.IsOpened() && (m_hpartfile
.fd() > 2)) {
242 // Update met file (with current directory entry)
246 DeleteContents(m_gaplist
);
248 std::list
<PartFileBufferedData
*>::iterator it
= m_BufferedData_list
.begin();
249 for (; it
!= m_BufferedData_list
.end(); ++it
) {
250 PartFileBufferedData
* item
= *it
;
256 wxASSERT(m_SrcList
.empty());
257 wxASSERT(m_A4AFsrclist
.empty());
260 void CPartFile::CreatePartFile()
262 // use lowest free partfilenumber for free file (InterCeptor)
266 m_partmetfilename
= CPath(wxString::Format(wxT("%03i.part.met"), i
));
267 m_fullname
= thePrefs::GetTempDir().JoinPaths(m_partmetfilename
);
268 } while (m_fullname
.FileExists());
270 wxString strPartName
= m_partmetfilename
.RemoveExt().GetRaw();
271 m_taglist
.push_back(CTagString(FT_PARTFILENAME
, strPartName
));
273 Gap_Struct
* gap
= new Gap_Struct
;
275 gap
->end
= GetFileSize() - 1;
277 m_gaplist
.push_back(gap
);
279 CPath partPath
= m_fullname
.RemoveExt();
280 if (m_hpartfile
.Create(partPath
, true)) {
283 if(!m_hpartfile
.Open(partPath
, CFile::read_write
)) {
284 AddLogLineM(false,_("ERROR: Failed to open partfile)"));
285 SetPartFileStatus(PS_ERROR
);
288 AddLogLineM(false,_("ERROR: Failed to create partfile)"));
289 SetPartFileStatus(PS_ERROR
);
292 SetFilePath(thePrefs::GetTempDir());
294 if (thePrefs::GetAllocFullPart()) {
295 //#warning Code for full file alloc - should be done on thread.
299 m_hashsetneeded
= GetED2KPartHashCount();
302 SetActive(theApp
->IsConnected());
306 uint8
CPartFile::LoadPartFile(const CPath
& in_directory
, const CPath
& filename
, bool from_backup
, bool getsizeonly
)
308 bool isnewstyle
= false;
309 uint8 version
,partmettype
=PMT_UNKNOWN
;
311 std::map
<uint16
, Gap_Struct
*> gap_map
; // Slugfiller
314 m_partmetfilename
= filename
;
315 m_filePath
= in_directory
;
316 m_fullname
= m_filePath
.JoinPaths(m_partmetfilename
);
318 // readfile data form part.met file
319 CPath curMetFilename
= m_fullname
;
321 curMetFilename
= curMetFilename
.AppendExt(PARTMET_BAK_EXT
);
322 AddLogLineM(false, CFormat( _("Trying to load backup of met-file from %s") )
327 CFile
metFile(curMetFilename
, CFile::read
);
328 if (!metFile
.IsOpened()) {
329 AddLogLineM(false, CFormat( _("Error: Failed to open part.met file: %s ==> %s") )
334 } else if (metFile
.GetLength() == 0) {
335 AddLogLineM(false, CFormat( _("Error: part.met file is 0 size: %s ==> %s") )
342 version
= metFile
.ReadUInt8();
343 if (version
!= PARTFILE_VERSION
&& version
!= PARTFILE_SPLITTEDVERSION
&& version
!= PARTFILE_VERSION_LARGEFILE
){
345 //if (version == 83) return ImportShareazaTempFile(...)
346 AddLogLineM(false, CFormat( _("Error: Invalid part.met fileversion: %s ==> %s") )
352 isnewstyle
= (version
== PARTFILE_SPLITTEDVERSION
);
353 partmettype
= isnewstyle
? PMT_SPLITTED
: PMT_DEFAULTOLD
;
357 metFile
.Seek(24, wxFromStart
);
358 metFile
.Read(test
,4);
360 metFile
.Seek(1, wxFromStart
);
361 if (test
[0]==0 && test
[1]==0 && test
[2]==2 && test
[3]==1) {
362 isnewstyle
=true; // edonkeys so called "old part style"
363 partmettype
=PMT_NEWOLD
;
368 uint32 temp
= metFile
.ReadUInt32();
370 if (temp
==0) { // 0.48 partmets - different again
371 LoadHashsetFromFile(&metFile
, false);
373 metFile
.Seek(2, wxFromStart
);
374 LoadDateFromFile(&metFile
);
375 m_abyFileHash
= metFile
.ReadHash();
379 LoadDateFromFile(&metFile
);
380 LoadHashsetFromFile(&metFile
, false);
383 uint32 tagcount
= metFile
.ReadUInt32();
385 for (uint32 j
= 0; j
< tagcount
; ++j
) {
386 CTag
newtag(metFile
,true);
389 (newtag
.GetNameID() == FT_FILESIZE
||
390 newtag
.GetNameID() == FT_FILENAME
))) {
391 switch(newtag
.GetNameID()) {
393 if (!GetFileName().IsOk()) {
394 // If it's not empty, we already loaded the unicoded one
395 SetFileName(CPath(newtag
.GetStr()));
399 case FT_LASTSEENCOMPLETE
: {
400 lastseencomplete
= newtag
.GetInt();
404 SetFileSize(newtag
.GetInt());
407 case FT_TRANSFERRED
: {
408 transferred
= newtag
.GetInt();
412 //#warning needs setfiletype string
413 //SetFileType(newtag.GetStr());
417 m_category
= newtag
.GetInt();
418 if (m_category
> theApp
->glob_prefs
->GetCatCount() - 1 ) {
423 case FT_OLDDLPRIORITY
:
424 case FT_DLPRIORITY
: {
426 m_iDownPriority
= newtag
.GetInt();
427 if( m_iDownPriority
== PR_AUTO
){
428 m_iDownPriority
= PR_HIGH
;
429 SetAutoDownPriority(true);
432 if ( m_iDownPriority
!= PR_LOW
&&
433 m_iDownPriority
!= PR_NORMAL
&&
434 m_iDownPriority
!= PR_HIGH
)
435 m_iDownPriority
= PR_NORMAL
;
436 SetAutoDownPriority(false);
442 m_paused
= newtag
.GetInt();
443 m_stopped
= m_paused
;
446 case FT_OLDULPRIORITY
:
447 case FT_ULPRIORITY
: {
449 SetUpPriority(newtag
.GetInt(), false);
450 if( GetUpPriority() == PR_AUTO
){
451 SetUpPriority(PR_HIGH
, false);
452 SetAutoUpPriority(true);
454 SetAutoUpPriority(false);
459 case FT_KADLASTPUBLISHSRC
:{
460 SetLastPublishTimeKadSrc(newtag
.GetInt(), 0);
461 if(GetLastPublishTimeKadSrc() > (uint32
)time(NULL
)+KADEMLIAREPUBLISHTIMES
) {
462 //There may be a posibility of an older client that saved a random number here.. This will check for that..
463 SetLastPublishTimeKadSrc(0,0);
467 case FT_KADLASTPUBLISHNOTES
:{
468 SetLastPublishTimeKadNotes(newtag
.GetInt());
471 // old tags: as long as they are not needed, take the chance to purge them
473 case FT_KADLASTPUBLISHKEY
:
475 case FT_DL_ACTIVE_TIME
:
476 if (newtag
.IsInt()) {
477 m_nDlActiveTime
= newtag
.GetInt();
480 case FT_CORRUPTEDPARTS
: {
481 wxASSERT(m_corrupted_list
.empty());
482 wxString
strCorruptedParts(newtag
.GetStr());
483 wxStringTokenizer
tokenizer(strCorruptedParts
, wxT(","));
484 while ( tokenizer
.HasMoreTokens() ) {
485 wxString token
= tokenizer
.GetNextToken();
487 if (token
.ToULong(&uPart
)) {
488 if (uPart
< GetPartCount() && !IsCorruptedPart(uPart
)) {
489 m_corrupted_list
.push_back(uPart
);
498 hash
.DecodeBase32(newtag
.GetStr()) == CAICHHash::GetHashSize();
499 wxASSERT(hashSizeOk
);
501 m_pAICHHashSet
->SetMasterHash(hash
, AICH_VERIFIED
);
505 case FT_ATTRANSFERRED
:{
506 statistic
.SetAllTimeTransferred(statistic
.GetAllTimeTransferred() + (uint64
)newtag
.GetInt());
509 case FT_ATTRANSFERREDHI
:{
510 statistic
.SetAllTimeTransferred(statistic
.GetAllTimeTransferred() + (((uint64
)newtag
.GetInt()) << 32));
513 case FT_ATREQUESTED
:{
514 statistic
.SetAllTimeRequests(newtag
.GetInt());
518 statistic
.SetAllTimeAccepts(newtag
.GetInt());
522 // Start Changes by Slugfiller for better exception handling
524 wxCharBuffer tag_ansi_name
= newtag
.GetName().ToAscii();
525 char gap_mark
= tag_ansi_name
? tag_ansi_name
[0u] : 0;
526 if ( newtag
.IsInt() && (newtag
.GetName().Length() > 1) &&
527 ((gap_mark
== FT_GAPSTART
) ||
528 (gap_mark
== FT_GAPEND
))) {
529 Gap_Struct
*gap
= NULL
;
530 unsigned long int gapkey
;
531 if (newtag
.GetName().Mid(1).ToULong(&gapkey
)) {
532 if ( gap_map
.find( gapkey
) == gap_map
.end() ) {
533 gap
= new Gap_Struct
;
534 gap_map
[gapkey
] = gap
;
535 gap
->start
= (uint64
)-1;
536 gap
->end
= (uint64
)-1;
538 gap
= gap_map
[ gapkey
];
540 if (gap_mark
== FT_GAPSTART
) {
541 gap
->start
= newtag
.GetInt();
543 if (gap_mark
== FT_GAPEND
) {
544 gap
->end
= newtag
.GetInt()-1;
547 printf("Wrong gap map key while reading met file!\n");
550 // End Changes by Slugfiller for better exception handling
552 m_taglist
.push_back(newtag
);
557 // Nothing. Else, nothing.
561 // load the hashsets from the hybridstylepartmet
562 if (isnewstyle
&& !getsizeonly
&& (metFile
.GetPosition()<metFile
.GetLength()) ) {
563 metFile
.Seek(1, wxFromCurrent
);
565 uint16 parts
=GetPartCount(); // assuming we will get all hashsets
567 for (uint16 i
= 0; i
< parts
&& (metFile
.GetPosition()+16<metFile
.GetLength()); ++i
){
568 CMD4Hash cur_hash
= metFile
.ReadHash();
569 m_hashlist
.push_back(cur_hash
);
573 if (!m_hashlist
.empty()) {
574 CreateHashFromHashlist(m_hashlist
, &checkhash
);
577 if (m_abyFileHash
== checkhash
) {
584 } catch (const CInvalidPacket
& e
) {
585 AddLogLineM(true, CFormat(wxT("Error: %s (%s) is corrupt (bad tags: %s), unable to load file."))
590 } catch (const CIOFailureException
& e
) {
591 AddDebugLogLineM(true, logPartFile
, CFormat( wxT("IO failure while loading '%s': %s") )
595 } catch (const CEOFException
& e
) {
596 AddLogLineM(true, CFormat( _("Error: %s (%s) is corrupt (wrong tagcount), unable to load file.") )
599 AddLogLineM(true, _("Trying to recover file info..."));
601 // Safe file is that who have
604 // We have filesize, try other needed info
606 // Do we need to check gaps? I think not,
607 // because they are checked below. Worst
608 // scenario will only mark file as 0 bytes downloaded.
611 if (!GetFileName().IsOk()) {
612 // Not critical, let's put a random filename.
614 "Recovering no-named file - will try to recover it as RecoveredFile.dat"));
615 SetFileName(CPath(wxT("RecoveredFile.dat")));
619 _("Recovered all available file info :D - Trying to use it..."));
621 AddLogLineM(true, _("Unable to recover file info :("));
629 // Now to flush the map into the list (Slugfiller)
630 std::map
<uint16
, Gap_Struct
*>::iterator it
= gap_map
.begin();
631 for ( ; it
!= gap_map
.end(); ++it
) {
632 Gap_Struct
* gap
= it
->second
;
633 // SLUGFILLER: SafeHash - revised code, and extra safety
634 if ( (gap
->start
!= (uint64
)-1) &&
635 (gap
->end
!= (uint64
)-1) &&
636 gap
->start
<= gap
->end
&&
637 gap
->start
< GetFileSize()) {
638 if (gap
->end
>= GetFileSize()) {
639 gap
->end
= GetFileSize()-1; // Clipping
641 AddGap(gap
->start
, gap
->end
); // All tags accounted for, use safe adding
644 // SLUGFILLER: SafeHash
647 //check if this is a backup
648 if ( m_fullname
.GetExt().MakeLower() == wxT("backup" )) {
649 m_fullname
= m_fullname
.RemoveExt();
652 // open permanent handle
653 CPath partFilePath
= m_fullname
.RemoveExt();
654 if ( !m_hpartfile
.Open(partFilePath
, CFile::read_write
)) {
655 AddLogLineM(false, CFormat( _("Failed to open %s (%s)") )
661 SetPartFileStatus(PS_EMPTY
);
664 // SLUGFILLER: SafeHash - final safety, make sure any missing part of the file is gap
665 if (m_hpartfile
.GetLength() < GetFileSize())
666 AddGap(m_hpartfile
.GetLength(), GetFileSize()-1);
667 // Goes both ways - Partfile should never be too large
668 if (m_hpartfile
.GetLength() > GetFileSize()) {
669 AddDebugLogLineM( true, logPartFile
, CFormat( wxT("Partfile \"%s\" is too large! Truncating %llu bytes.") ) % GetFileName() % (m_hpartfile
.GetLength() - GetFileSize()));
670 m_hpartfile
.SetLength(GetFileSize());
672 // SLUGFILLER: SafeHash
673 } catch (const CIOFailureException
& e
) {
674 AddDebugLogLineM( true, logPartFile
, CFormat( wxT("Error while accessing partfile \"%s\": %s") ) % GetFileName() % e
.what());
675 SetPartFileStatus(PS_ERROR
);
679 // check hashcount, file status etc
680 if (GetHashCount() != GetED2KPartHashCount()){
681 m_hashsetneeded
= true;
684 m_hashsetneeded
= false;
685 for (size_t i
= 0; i
< m_hashlist
.size(); ++i
) {
686 if (IsComplete(i
*PARTSIZE
,((i
+1)*PARTSIZE
)-1)) {
687 SetPartFileStatus(PS_READY
);
692 if (m_gaplist
.empty()) { // is this file complete already?
697 if (!isnewstyle
) { // not for importing
698 const time_t file_date
= CPath::GetModificationTime(partFilePath
);
699 if (m_lastDateChanged
!= file_date
) {
700 // It's pointless to rehash an empty file, since the case
701 // where a user has zero'd a file is handled above ...
702 if (m_hpartfile
.GetLength()) {
703 AddLogLineM(false, CFormat( _("Warning: %s might be corrupted (%i)") )
705 % (m_lastDateChanged
- file_date
) );
707 SetPartFileStatus(PS_WAITINGFORHASH
);
709 CPath partFileName
= m_partmetfilename
.RemoveExt();
710 CThreadScheduler::AddTask(new CHashingTask(m_filePath
, partFileName
, this));
715 UpdateCompletedInfos();
716 if (completedsize
> transferred
) {
717 m_iGainDueToCompression
= completedsize
- transferred
;
718 } else if (completedsize
!= transferred
) {
719 m_iLostDueToCorruption
= transferred
- completedsize
;
726 bool CPartFile::SavePartFile(bool Initial
)
729 case PS_WAITINGFORHASH
:
735 /* Don't write anything to disk if less than 100 KB of free space is left. */
736 sint64 free
= CPath::GetFreeSpaceAt(GetFilePath());
737 if ((free
!= wxInvalidOffset
) && (free
< (100 * 1024))) {
743 if (!m_fullname
.RemoveExt().FileExists()) {
744 throw wxString(wxT(".part file not found"));
747 uint32 lsc
= lastseencomplete
;
750 CPath::BackupFile(m_fullname
, wxT(".backup"));
751 CPath::RemoveFile(m_fullname
);
754 file
.Open(m_fullname
, CFile::write
);
755 if (!file
.IsOpened()) {
756 throw wxString(wxT("Failed to open part.met file"));
760 file
.WriteUInt8(IsLargeFile() ? PARTFILE_VERSION_LARGEFILE
: PARTFILE_VERSION
);
762 file
.WriteUInt32(CPath::GetModificationTime(m_fullname
.RemoveExt()));
764 file
.WriteHash(m_abyFileHash
);
765 uint16 parts
= m_hashlist
.size();
766 file
.WriteUInt16(parts
);
767 for (int x
= 0; x
< parts
; ++x
) {
768 file
.WriteHash(m_hashlist
[x
]);
771 #define FIXED_TAGS 15
772 uint32 tagcount
= m_taglist
.size() + FIXED_TAGS
+ (m_gaplist
.size()*2);
773 if (!m_corrupted_list
.empty()) {
777 if (m_pAICHHashSet
->HasValidMasterHash() && (m_pAICHHashSet
->GetStatus() == AICH_VERIFIED
)){
781 if (GetLastPublishTimeKadSrc()){
785 if (GetLastPublishTimeKadNotes()){
789 if (GetDlActiveTime()){
793 file
.WriteUInt32(tagcount
);
795 //#warning Kry - Where are lost by coruption and gained by compression?
797 // 0 (unicoded part file name)
798 // We write it with BOM to keep eMule compatibility. Note that the 'printable' filename is saved,
799 // as presently the filename does not represent an actual file.
800 CTagString( FT_FILENAME
, GetFileName().GetPrintable()).WriteTagToFile( &file
, utf8strOptBOM
);
801 CTagString( FT_FILENAME
, GetFileName().GetPrintable()).WriteTagToFile( &file
); // 1
803 CTagIntSized( FT_FILESIZE
, GetFileSize(), IsLargeFile() ? 64 : 32).WriteTagToFile( &file
);// 2
804 CTagIntSized( FT_TRANSFERRED
, transferred
, IsLargeFile() ? 64 : 32).WriteTagToFile( &file
); // 3
805 CTagInt32( FT_STATUS
, (m_paused
?1:0)).WriteTagToFile( &file
); // 4
807 if ( IsAutoDownPriority() ) {
808 CTagInt32( FT_DLPRIORITY
, (uint8
)PR_AUTO
).WriteTagToFile( &file
); // 5
809 CTagInt32( FT_OLDDLPRIORITY
, (uint8
)PR_AUTO
).WriteTagToFile( &file
); // 6
811 CTagInt32( FT_DLPRIORITY
, m_iDownPriority
).WriteTagToFile( &file
); // 5
812 CTagInt32( FT_OLDDLPRIORITY
, m_iDownPriority
).WriteTagToFile( &file
); // 6
815 CTagInt32( FT_LASTSEENCOMPLETE
, lsc
).WriteTagToFile( &file
); // 7
817 if ( IsAutoUpPriority() ) {
818 CTagInt32( FT_ULPRIORITY
, (uint8
)PR_AUTO
).WriteTagToFile( &file
); // 8
819 CTagInt32( FT_OLDULPRIORITY
, (uint8
)PR_AUTO
).WriteTagToFile( &file
); // 9
821 CTagInt32( FT_ULPRIORITY
, GetUpPriority() ).WriteTagToFile( &file
); // 8
822 CTagInt32( FT_OLDULPRIORITY
, GetUpPriority() ).WriteTagToFile( &file
); // 9
825 CTagInt32(FT_CATEGORY
, m_category
).WriteTagToFile( &file
); // 10
826 CTagInt32(FT_ATTRANSFERRED
, statistic
.GetAllTimeTransferred() & 0xFFFFFFFF).WriteTagToFile( &file
);// 11
827 CTagInt32(FT_ATTRANSFERREDHI
, statistic
.GetAllTimeTransferred() >>32).WriteTagToFile( &file
);// 12
828 CTagInt32(FT_ATREQUESTED
, statistic
.GetAllTimeRequests()).WriteTagToFile( &file
); // 13
829 CTagInt32(FT_ATACCEPTED
, statistic
.GetAllTimeAccepts()).WriteTagToFile( &file
); // 14
831 // currupt part infos
832 if (!m_corrupted_list
.empty()) {
833 wxString strCorruptedParts
;
834 std::list
<uint16
>::iterator it
= m_corrupted_list
.begin();
835 for (; it
!= m_corrupted_list
.end(); ++it
) {
836 uint16 uCorruptedPart
= *it
;
837 if (!strCorruptedParts
.IsEmpty()) {
838 strCorruptedParts
+= wxT(",");
840 strCorruptedParts
+= wxString::Format(wxT("%u"), (unsigned)uCorruptedPart
);
842 wxASSERT( !strCorruptedParts
.IsEmpty() );
844 CTagString( FT_CORRUPTEDPARTS
, strCorruptedParts
).WriteTagToFile( &file
); // 11?
848 if (m_pAICHHashSet
->HasValidMasterHash() && (m_pAICHHashSet
->GetStatus() == AICH_VERIFIED
)){
849 CTagString
aichtag(FT_AICH_HASH
, m_pAICHHashSet
->GetMasterHash().GetString() );
850 aichtag
.WriteTagToFile(&file
); // 12?
853 if (GetLastPublishTimeKadSrc()){
854 CTagInt32(FT_KADLASTPUBLISHSRC
, GetLastPublishTimeKadSrc()).WriteTagToFile(&file
); // 15?
857 if (GetLastPublishTimeKadNotes()){
858 CTagInt32(FT_KADLASTPUBLISHNOTES
, GetLastPublishTimeKadNotes()).WriteTagToFile(&file
); // 16?
861 if (GetDlActiveTime()){
862 CTagInt32(FT_DL_ACTIVE_TIME
, GetDlActiveTime()).WriteTagToFile(&file
); // 17
865 for (uint32 j
= 0; j
< (uint32
)m_taglist
.size();++j
) {
866 m_taglist
[j
].WriteTagToFile(&file
);
871 std::list
<Gap_Struct
*>::iterator it
= m_gaplist
.begin();
872 for (; it
!= m_gaplist
.end(); ++it
) {
873 wxString tagName
= wxString::Format(wxT(" %u"), i_pos
);
875 // gap start = first missing byte but gap ends = first non-missing byte
876 // in edonkey but I think its easier to user the real limits
877 tagName
[0] = FT_GAPSTART
;
878 CTagIntSized(tagName
, (*it
)->start
, IsLargeFile() ? 64 : 32).WriteTagToFile( &file
);
880 tagName
[0] = FT_GAPEND
;
881 CTagIntSized(tagName
, ((*it
)->end
+ 1), IsLargeFile() ? 64 : 32).WriteTagToFile( &file
);
885 } catch (const wxString
& error
) {
886 AddLogLineM(false, CFormat( _("ERROR while saving partfile: %s (%s ==> %s)") )
891 wxString err
= CFormat( _("ERROR while saving partfile: %s (%s ==> %s)") )
896 printf("%s\n", (const char*)unicode2char(err
));
899 } catch (const CIOFailureException
& e
) {
900 AddDebugLogLineM(true, logPartFile
, wxT("IO failure while saving partfile: ") + e
.what());
901 printf("IO failure while saving partfile: %s\n", (const char*)unicode2char(e
.what()));
909 CPath::RemoveFile(m_fullname
.AppendExt(wxT(".backup")));
912 sint64 metLength
= m_fullname
.GetFileSize();
913 if (metLength
== wxInvalidOffset
) {
914 theApp
->ShowAlert( CFormat( _("Could not retrieve length of '%s' - using %s file.") )
919 CPath::CloneFile(m_fullname
.AppendExt(PARTMET_BAK_EXT
), m_fullname
, true);
920 } else if (metLength
== 0) {
921 // Don't backup if it's 0 size but raise a warning!!!
922 theApp
->ShowAlert( CFormat( _("'%s' is 0 size somehow - using %s file.") )
927 CPath::CloneFile(m_fullname
.AppendExt(PARTMET_BAK_EXT
), m_fullname
, true);
929 // no error, just backup
930 CPath::BackupFile(m_fullname
, PARTMET_BAK_EXT
);
937 void CPartFile::SaveSourceSeeds()
939 #define MAX_SAVED_SOURCES 10
941 // Kry - Sources seeds
942 // Based on a Feature request, this saves the last MAX_SAVED_SOURCES
943 // sources of the file, giving a 'seed' for the next run.
944 // We save the last sources because:
945 // 1 - They could be the hardest to get
946 // 2 - They will more probably be available
947 // However, if we have downloading sources, they have preference because
948 // we probably have more credits on them.
949 // Anyway, source exchange will get us the rest of the sources
950 // This feature is currently used only on rare files (< 20 sources)
953 if (GetSourceCount()>20) {
957 CClientPtrList source_seeds
;
960 CClientPtrList::iterator it
= m_downloadingSourcesList
.begin();
961 for( ; it
!= m_downloadingSourcesList
.end() && n_sources
< MAX_SAVED_SOURCES
; ++it
) {
962 CUpDownClient
*cur_src
= *it
;
963 if (!cur_src
->HasLowID()) {
964 source_seeds
.push_back(cur_src
);
969 if (n_sources
< MAX_SAVED_SOURCES
) {
970 // Not enough downloading sources to fill the list, going to sources list
971 if (GetSourceCount() > 0) {
972 SourceSet::reverse_iterator rit
= m_SrcList
.rbegin();
973 for ( ; ((rit
!= m_SrcList
.rend()) && (n_sources
<MAX_SAVED_SOURCES
)); ++rit
) {
974 CUpDownClient
* cur_src
= *rit
;
975 if (!cur_src
->HasLowID()) {
976 source_seeds
.push_back(cur_src
);
988 const CPath seedsPath
= m_fullname
.AppendExt(wxT(".seeds"));
991 file
.Create(seedsPath
, true);
992 if (!file
.IsOpened()) {
993 AddLogLineM(false, CFormat( _("Failed to save part.met.seeds file for %s") )
999 file
.WriteUInt8(0); // v3, to avoid v2 clients choking on it.
1000 file
.WriteUInt8(source_seeds
.size());
1002 CClientPtrList::iterator it2
= source_seeds
.begin();
1003 for (; it2
!= source_seeds
.end(); ++it2
) {
1004 CUpDownClient
* cur_src
= *it2
;
1005 file
.WriteUInt32(cur_src
->GetUserIDHybrid());
1006 file
.WriteUInt16(cur_src
->GetUserPort());
1007 file
.WriteHash(cur_src
->GetUserHash());
1008 // CryptSettings - See SourceExchange V4
1009 const uint8 uSupportsCryptLayer
= cur_src
->SupportsCryptLayer() ? 1 : 0;
1010 const uint8 uRequestsCryptLayer
= cur_src
->RequestsCryptLayer() ? 1 : 0;
1011 const uint8 uRequiresCryptLayer
= cur_src
->RequiresCryptLayer() ? 1 : 0;
1012 const uint8 byCryptOptions
= (uRequiresCryptLayer
<< 2) | (uRequestsCryptLayer
<< 1) | (uSupportsCryptLayer
<< 0);
1013 file
.WriteUInt8(byCryptOptions
);
1016 /* v2: Added to keep track of too old seeds */
1017 file
.WriteUInt32(wxDateTime::Now().GetTicks());
1019 AddLogLineM(false, CFormat( wxPLURAL("Saved %i source seed for partfile: %s (%s)", "Saved %i source seeds for partfile: %s (%s)", n_sources
) )
1023 } catch (const CIOFailureException
& e
) {
1024 AddDebugLogLineM(true, logPartFile
, CFormat( wxT("Error saving partfile's seeds file (%s - %s): %s") )
1031 CPath::RemoveFile(seedsPath
);
1035 void CPartFile::LoadSourceSeeds()
1037 CMemFile sources_data
;
1039 bool valid_sources
= false;
1041 const CPath seedsPath
= m_fullname
.AppendExt(wxT(".seeds"));
1042 if (!seedsPath
.FileExists()) {
1046 CFile
file(seedsPath
, CFile::read
);
1047 if (!file
.IsOpened()) {
1048 AddLogLineM(false, CFormat( _("Partfile %s (%s) has no seeds file") )
1056 if (file
.GetLength() <= 1) {
1057 AddLogLineM(false, CFormat( _("Partfile %s (%s) has a void seeds file") )
1063 uint8 src_count
= file
.ReadUInt8();
1065 bool bUseSX2Format
= (src_count
== 0);
1067 if (bUseSX2Format
) {
1069 src_count
= file
.ReadUInt8();
1072 sources_data
.WriteUInt16(src_count
);
1074 for (int i
= 0; i
< src_count
; ++i
) {
1075 uint32 dwID
= file
.ReadUInt32();
1076 uint16 nPort
= file
.ReadUInt16();
1078 sources_data
.WriteUInt32(bUseSX2Format
? dwID
: wxUINT32_SWAP_ALWAYS(dwID
));
1079 sources_data
.WriteUInt16(nPort
);
1080 sources_data
.WriteUInt32(0);
1081 sources_data
.WriteUInt16(0);
1083 if (bUseSX2Format
) {
1084 sources_data
.WriteHash(file
.ReadHash());
1085 sources_data
.WriteUInt8(file
.ReadUInt8());
1092 // v2: Added to keep track of too old seeds
1093 time_t time
= (time_t)file
.ReadUInt32();
1095 // Time frame is 2 hours. More than enough to compile
1096 // your new aMule version!.
1097 if ((time
+ MIN2S(120)) >= wxDateTime::Now().GetTicks()) {
1098 valid_sources
= true;
1102 // v1 has no time data. We can safely use
1103 // the sources, next time will be saved.
1104 valid_sources
= true;
1107 if (valid_sources
) {
1108 sources_data
.Seek(0);
1109 AddClientSources(&sources_data
, SF_SOURCE_SEEDS
, bUseSX2Format
? 4 : 1, bUseSX2Format
);
1112 } catch (const CSafeIOException
& e
) {
1113 AddLogLineM(false, CFormat( _("Error reading partfile's seeds file (%s - %s): %s") )
1122 void CPartFile::PartFileHashFinished(CKnownFile
* result
)
1124 m_lastDateChanged
= result
->m_lastDateChanged
;
1125 bool errorfound
= false;
1126 if (GetED2KPartHashCount() == 0){
1127 if (IsComplete(0, GetFileSize()-1)){
1128 if (result
->GetFileHash() != GetFileHash()){
1131 "Found corrupted part (%d) in %d part file %s - FileResultHash |%s| FileHash |%s|",
1132 "Found corrupted part (%d) in %d parts file %s - FileResultHash |%s| FileHash |%s|",
1138 % result
->GetFileHash().Encode()
1139 % GetFileHash().Encode() );
1140 AddGap(0, GetFileSize()-1);
1146 for (size_t i
= 0; i
< m_hashlist
.size(); ++i
){
1147 // Kry - trel_ar's completed parts check on rehashing.
1148 // Very nice feature, if a file is completed but .part.met don't believe it,
1151 if (!( i
< result
->GetHashCount() && (result
->GetPartHash(i
) == GetPartHash(i
)))){
1152 if (IsComplete(i
*PARTSIZE
,((i
+1)*PARTSIZE
)-1)) {
1154 if ( i
< result
->GetHashCount() )
1155 wronghash
= result
->GetPartHash(i
);
1159 "Found corrupted part (%d) in %d part file %s - FileResultHash |%s| FileHash |%s|",
1160 "Found corrupted part (%d) in %d parts file %s - FileResultHash |%s| FileHash |%s|",
1161 GetED2KPartHashCount())
1164 % GetED2KPartHashCount()
1166 % wronghash
.Encode()
1167 % GetPartHash(i
).Encode() );
1170 ((uint64
)(((i
+1)*PARTSIZE
)-1) >= GetFileSize()) ?
1171 GetFileSize()-1 : ((i
+1)*PARTSIZE
)-1);
1175 if (!IsComplete(i
*PARTSIZE
,((i
+1)*PARTSIZE
)-1)){
1176 AddLogLineM(false, CFormat( _("Found completed part (%i) in %s") )
1181 ((uint64
)(((i
+1)*PARTSIZE
)-1) >= GetFileSize()) ?
1182 GetFileSize()-1 : ((i
+1)*PARTSIZE
)-1);
1183 RemoveBlockFromList(i
*PARTSIZE
,
1184 ((uint64
)(((i
+1)*PARTSIZE
)-1) >= GetFileSize()) ?
1185 GetFileSize()-1 : ((i
+1)*PARTSIZE
)-1);
1192 result
->GetAICHHashset()->GetStatus() == AICH_HASHSETCOMPLETE
&&
1193 status
== PS_COMPLETING
) {
1194 delete m_pAICHHashSet
;
1195 m_pAICHHashSet
= result
->GetAICHHashset();
1196 result
->SetAICHHashset(NULL
);
1197 m_pAICHHashSet
->SetOwner(this);
1199 else if (status
== PS_COMPLETING
) {
1200 AddDebugLogLineM(false, logPartFile
,
1201 CFormat(wxT("Failed to store new AICH Hashset for completed file: %s"))
1208 if (status
== PS_COMPLETING
){
1213 AddLogLineM(false, CFormat( _("Finished rehashing %s") ) % GetFileName());
1217 SetStatus(PS_READY
);
1221 SetStatus(PS_READY
);
1223 theApp
->sharedfiles
->SafeAddKFile(this);
1226 void CPartFile::AddGap(uint64 start
, uint64 end
)
1228 std::list
<Gap_Struct
*>::iterator it
= m_gaplist
.begin();
1229 while (it
!= m_gaplist
.end()) {
1230 std::list
<Gap_Struct
*>::iterator it2
= it
++;
1231 Gap_Struct
* cur_gap
= *it2
;
1233 if (cur_gap
->start
>= start
&& cur_gap
->end
<= end
) {
1234 // this gap is inside the new gap - delete
1235 m_gaplist
.erase(it2
);
1238 } else if (cur_gap
->start
>= start
&& cur_gap
->start
<= end
) {
1239 // a part of this gap is in the new gap - extend limit and delete
1241 m_gaplist
.erase(it2
);
1244 } else if (cur_gap
->end
<= end
&& cur_gap
->end
>= start
) {
1245 // a part of this gap is in the new gap - extend limit and delete
1246 start
= cur_gap
->start
;
1247 m_gaplist
.erase(it2
);
1250 } else if (start
>= cur_gap
->start
&& end
<= cur_gap
->end
){
1251 // new gap is already inside this gap - return
1256 Gap_Struct
* new_gap
= new Gap_Struct
;
1257 new_gap
->start
= start
;
1259 m_gaplist
.push_back(new_gap
);
1260 UpdateDisplayedInfo();
1263 bool CPartFile::IsAlreadyRequested(uint64 start
, uint64 end
)
1265 std::list
<Requested_Block_Struct
*>::iterator it
= m_requestedblocks_list
.begin();
1266 for (; it
!= m_requestedblocks_list
.end(); ++it
) {
1267 Requested_Block_Struct
* cur_block
= *it
;
1269 if ((start
<= cur_block
->EndOffset
) && (end
>= cur_block
->StartOffset
)) {
1276 bool CPartFile::GetNextEmptyBlockInPart(uint16 partNumber
, Requested_Block_Struct
*result
)
1278 Gap_Struct
*firstGap
;
1279 Gap_Struct
*currentGap
;
1283 // Find start of this part
1284 uint64 partStart
= (PARTSIZE
* partNumber
);
1285 uint64 start
= partStart
;
1287 // What is the end limit of this block, i.e. can't go outside part (or filesize)
1288 uint64 partEnd
= (PARTSIZE
* (partNumber
+ 1)) - 1;
1289 if (partEnd
>= GetFileSize()) {
1290 partEnd
= GetFileSize() - 1;
1292 // Loop until find a suitable gap and return true, or no more gaps and return false
1296 // Find the first gap from the start position
1297 std::list
<Gap_Struct
*>::iterator it
= m_gaplist
.begin();
1298 for (; it
!= m_gaplist
.end(); ++it
) {
1301 // Want gaps that overlap start<->partEnd
1302 if ((currentGap
->start
<= partEnd
) && (currentGap
->end
>= start
)) {
1303 // Is this the first gap?
1304 if ((firstGap
== NULL
) || (currentGap
->start
< firstGap
->start
)) {
1305 firstGap
= currentGap
;
1310 // If no gaps after start, exit
1311 if (firstGap
== NULL
) {
1314 // Update start position if gap starts after current pos
1315 if (start
< firstGap
->start
) {
1316 start
= firstGap
->start
;
1318 // If this is not within part, exit
1319 if (start
> partEnd
) {
1322 // Find end, keeping within the max block size and the part limit
1323 end
= firstGap
->end
;
1324 blockLimit
= partStart
+ (BLOCKSIZE
* (((start
- partStart
) / BLOCKSIZE
) + 1)) - 1;
1325 if (end
> blockLimit
) {
1328 if (end
> partEnd
) {
1331 // If this gap has not already been requested, we have found a valid entry
1332 if (!IsAlreadyRequested(start
, end
)) {
1333 // Was this block to be returned
1334 if (result
!= NULL
) {
1335 result
->StartOffset
= start
;
1336 result
->EndOffset
= end
;
1337 md4cpy(result
->FileID
, GetFileHash().GetHash());
1338 result
->transferred
= 0;
1342 // Reposition to end of that gap
1345 // If tried all gaps then break out of the loop
1346 if (end
== partEnd
) {
1350 // No suitable gap found
1355 void CPartFile::FillGap(uint64 start
, uint64 end
)
1357 std::list
<Gap_Struct
*>::iterator it
= m_gaplist
.begin();
1358 while (it
!= m_gaplist
.end()) {
1359 std::list
<Gap_Struct
*>::iterator it2
= it
++;
1360 Gap_Struct
* cur_gap
= *it2
;
1362 if (cur_gap
->start
>= start
&& cur_gap
->end
<= end
) {
1363 // our part fills this gap completly
1364 m_gaplist
.erase(it2
);
1367 } else if (cur_gap
->start
>= start
&& cur_gap
->start
<= end
) {
1368 // a part of this gap is in the part - set limit
1369 cur_gap
->start
= end
+1;
1370 } else if (cur_gap
->end
<= end
&& cur_gap
->end
>= start
) {
1371 // a part of this gap is in the part - set limit
1372 cur_gap
->end
= start
-1;
1373 } else if (start
>= cur_gap
->start
&& end
<= cur_gap
->end
) {
1374 uint64 buffer
= cur_gap
->end
;
1375 cur_gap
->end
= start
-1;
1376 cur_gap
= new Gap_Struct
;
1377 cur_gap
->start
= end
+1;
1378 cur_gap
->end
= buffer
;
1379 m_gaplist
.insert(++it2
, cur_gap
);
1383 UpdateCompletedInfos();
1384 UpdateDisplayedInfo();
1388 void CPartFile::UpdateCompletedInfos()
1392 std::list
<Gap_Struct
*>::iterator it
= m_gaplist
.begin();
1393 for (; it
!= m_gaplist
.end(); ) {
1394 std::list
<Gap_Struct
*>::iterator it2
= it
++;
1395 Gap_Struct
* cur_gap
= *it2
;
1397 if ((cur_gap
->end
> GetFileSize()) || (cur_gap
->start
>= GetFileSize())) {
1398 m_gaplist
.erase(it2
);
1400 allgaps
+= cur_gap
->end
- cur_gap
->start
+ 1;
1404 if ((!m_gaplist
.empty()) || (!m_requestedblocks_list
.empty())) {
1405 percentcompleted
= (1.0f
-(double)allgaps
/GetFileSize()) * 100;
1406 completedsize
= GetFileSize() - allgaps
;
1408 percentcompleted
= 100;
1409 completedsize
= GetFileSize();
1414 void CPartFile::WritePartStatus(CMemFile
* file
)
1416 uint16 parts
= GetED2KPartCount();
1417 file
->WriteUInt16(parts
);
1419 while (done
!= parts
){
1421 for (uint32 i
= 0;i
!= 8;++i
) {
1422 if (IsComplete(done
*PARTSIZE
,((done
+1)*PARTSIZE
)-1)) {
1426 if (done
== parts
) {
1430 file
->WriteUInt8(towrite
);
1434 void CPartFile::WriteCompleteSourcesCount(CMemFile
* file
)
1436 file
->WriteUInt16(m_nCompleteSourcesCount
);
1439 uint32
CPartFile::Process(uint32 reducedownload
/*in percent*/,uint8 m_icounter
)
1442 uint32 dwCurTick
= ::GetTickCount();
1444 // If buffer size exceeds limit, or if not written within time limit, flush data
1445 if ( (m_nTotalBufferData
> thePrefs::GetFileBufferSize()) ||
1446 (dwCurTick
> (m_nLastBufferFlushTime
+ BUFFER_TIME_LIMIT
))) {
1447 // Avoid flushing while copying preview file
1448 if (!m_bPreviewing
) {
1454 // check if we want new sources from server --> MOVED for 16.40 version
1455 old_trans
=transferingsrc
;
1459 if (m_icounter
< 10) {
1460 // Update only downloading sources.
1461 CClientPtrList::iterator it
= m_downloadingSourcesList
.begin();
1462 for( ; it
!= m_downloadingSourcesList
.end(); ) {
1463 CUpDownClient
*cur_src
= *it
++;
1464 if(cur_src
->GetDownloadState() == DS_DOWNLOADING
) {
1466 kBpsDown
+= cur_src
->SetDownloadLimit(reducedownload
);
1470 // Update all sources (including downloading sources)
1471 for ( SourceSet::iterator it
= m_SrcList
.begin(); it
!= m_SrcList
.end(); ) {
1472 CUpDownClient
* cur_src
= *it
++;
1473 switch (cur_src
->GetDownloadState()) {
1474 case DS_DOWNLOADING
: {
1476 kBpsDown
+= cur_src
->SetDownloadLimit(reducedownload
);
1485 case DS_LOWTOLOWIP
: {
1486 if ( cur_src
->HasLowID() && !theApp
->DoCallback( cur_src
) ) {
1487 // If we are almost maxed on sources,
1488 // slowly remove these client to see
1489 // if we can find a better source.
1490 if( ((dwCurTick
- lastpurgetime
) > 30000) &&
1491 (GetSourceCount() >= (thePrefs::GetMaxSourcePerFile()*.8))) {
1492 RemoveSource( cur_src
);
1493 lastpurgetime
= dwCurTick
;
1497 cur_src
->SetDownloadState(DS_ONQUEUE
);
1502 case DS_NONEEDEDPARTS
: {
1503 // we try to purge noneeded source, even without reaching the limit
1504 if((dwCurTick
- lastpurgetime
) > 40000) {
1505 if(!cur_src
->SwapToAnotherFile(false , false, false , NULL
)) {
1506 //however we only delete them if reaching the limit
1507 if (GetSourceCount() >= (thePrefs::GetMaxSourcePerFile()*.8 )) {
1508 RemoveSource(cur_src
);
1509 lastpurgetime
= dwCurTick
;
1510 break; //Johnny-B - nothing more to do here (good eye!)
1513 lastpurgetime
= dwCurTick
;
1517 // doubled reasktime for no needed parts - save connections and traffic
1518 if ( !((!cur_src
->GetLastAskedTime()) ||
1519 (dwCurTick
- cur_src
->GetLastAskedTime()) > FILEREASKTIME
*2)) {
1522 // Recheck this client to see if still NNP..
1523 // Set to DS_NONE so that we force a TCP reask next time..
1524 cur_src
->SetDownloadState(DS_NONE
);
1529 if( cur_src
->IsRemoteQueueFull()) {
1530 if( ((dwCurTick
- lastpurgetime
) > 60000) &&
1531 (GetSourceCount() >= (thePrefs::GetMaxSourcePerFile()*.8 )) ) {
1532 RemoveSource( cur_src
);
1533 lastpurgetime
= dwCurTick
;
1534 break; //Johnny-B - nothing more to do here (good eye!)
1538 // Give up to 1 min for UDP to respond..
1539 // If we are within on min on TCP, do not try..
1540 if ( theApp
->IsConnected() &&
1541 ( (!cur_src
->GetLastAskedTime()) ||
1542 (dwCurTick
- cur_src
->GetLastAskedTime()) > FILEREASKTIME
-20000)) {
1543 cur_src
->UDPReaskForDownload();
1546 // No break here, since the next case takes care of asking for downloads.
1549 case DS_TOOMANYCONNS
:
1551 case DS_WAITCALLBACK
:
1552 case DS_WAITCALLBACKKAD
: {
1553 if ( theApp
->IsConnected() &&
1554 ( (!cur_src
->GetLastAskedTime()) ||
1555 (dwCurTick
- cur_src
->GetLastAskedTime()) > FILEREASKTIME
)) {
1556 if (!cur_src
->AskForDownload()) {
1557 // I left this break here just as a reminder
1558 // just in case re rearange things..
1567 /* eMule 0.30c implementation, i give it a try (Creteil) BEGIN ... */
1568 if (IsA4AFAuto() && ((!m_LastNoNeededCheck
) || (dwCurTick
- m_LastNoNeededCheck
> 900000))) {
1569 m_LastNoNeededCheck
= dwCurTick
;
1570 for ( SourceSet::iterator it
= m_A4AFsrclist
.begin(); it
!= m_A4AFsrclist
.end(); ) {
1571 CUpDownClient
*cur_source
= *it
++;
1572 uint8 download_state
=cur_source
->GetDownloadState();
1573 if( download_state
!= DS_DOWNLOADING
1574 && cur_source
->GetRequestFile()
1575 && ((!cur_source
->GetRequestFile()->IsA4AFAuto()) || download_state
== DS_NONEEDEDPARTS
))
1577 cur_source
->SwapToAnotherFile(false, false, false, this);
1581 /* eMule 0.30c implementation, i give it a try (Creteil) END ... */
1583 // swap No needed partfiles if possible
1585 if (((old_trans
==0) && (transferingsrc
>0)) || ((old_trans
>0) && (transferingsrc
==0))) {
1586 SetPartFileStatus(status
);
1589 // Kad source search
1590 if( GetMaxSourcePerFileUDP() > GetSourceCount()){
1591 //Once we can handle lowID users in Kad, we remove the second IsConnected
1592 if (theApp
->downloadqueue
->DoKademliaFileRequest() && (Kademlia::CKademlia::GetTotalFile() < KADEMLIATOTALFILE
) && (dwCurTick
> m_LastSearchTimeKad
) && Kademlia::CKademlia::IsConnected() && theApp
->IsConnected() && !IsStopped()){
1594 theApp
->downloadqueue
->SetLastKademliaFileRequest();
1596 if (GetKadFileSearchID()) {
1597 /* This will never happen anyway. We're talking a
1598 1h timespan and searches are at max 45secs */
1599 Kademlia::CSearchManager::StopSearch(GetKadFileSearchID(), false);
1602 Kademlia::CUInt128
kadFileID(GetFileHash().GetHash());
1603 Kademlia::CSearch
* pSearch
= Kademlia::CSearchManager::PrepareLookup(Kademlia::CSearch::FILE, true, kadFileID
);
1604 AddDebugLogLineM(false, logKadSearch
, CFormat(wxT("Preparing a Kad Search for '%s'")) % GetFileName());
1606 AddDebugLogLineM(false, logKadSearch
, CFormat(wxT("Kad lookup started for '%s'")) % GetFileName());
1607 if(m_TotalSearchesKad
< 7) {
1608 m_TotalSearchesKad
++;
1610 m_LastSearchTimeKad
= dwCurTick
+ (KADEMLIAREASKTIME
*m_TotalSearchesKad
);
1611 SetKadFileSearchID(pSearch
->GetSearchID());
1615 if(GetKadFileSearchID()) {
1616 Kademlia::CSearchManager::StopSearch(GetKadFileSearchID(), true);
1620 // check if we want new sources from server
1621 if ( !m_localSrcReqQueued
&&
1622 ( (!m_lastsearchtime
) ||
1623 (dwCurTick
- m_lastsearchtime
) > SERVERREASKTIME
) &&
1624 theApp
->IsConnectedED2K() &&
1625 thePrefs::GetMaxSourcePerFileSoft() > GetSourceCount() &&
1627 m_localSrcReqQueued
= true;
1628 theApp
->downloadqueue
->SendLocalSrcRequest(this);
1631 // calculate datarate, set limit etc.
1636 // Kry - does the 3 / 30 difference produce too much flickering or CPU?
1637 if (m_count
>= 30) {
1639 UpdateAutoDownPriority();
1640 UpdateDisplayedInfo();
1641 if(m_bPercentUpdated
== false) {
1642 UpdateCompletedInfos();
1644 m_bPercentUpdated
= false;
1645 if (thePrefs::ShowCatTabInfos()) {
1646 Notify_ShowUpdateCatTabTitles();
1650 return (uint32
)(kBpsDown
*1024.0);
1653 bool CPartFile::CanAddSource(uint32 userid
, uint16 port
, uint32 serverip
, uint16 serverport
, uint8
* pdebug_lowiddropped
, bool ed2kID
)
1656 //The incoming ID could have the userid in the Hybrid format..
1657 uint32 hybridID
= 0;
1659 if (IsLowID(userid
)) {
1662 hybridID
= wxUINT32_SWAP_ALWAYS(userid
);
1666 if (!IsLowID(userid
)) {
1667 userid
= wxUINT32_SWAP_ALWAYS(userid
);
1671 // MOD Note: Do not change this part - Merkur
1672 if (theApp
->IsConnectedED2K()) {
1673 if(::IsLowID(theApp
->GetED2KID())) {
1674 if(theApp
->GetED2KID() == userid
&& theApp
->serverconnect
->GetCurrentServer()->GetIP() == serverip
&& theApp
->serverconnect
->GetCurrentServer()->GetPort() == serverport
) {
1677 if(theApp
->GetPublicIP() == userid
) {
1681 if(theApp
->GetED2KID() == userid
&& thePrefs::GetPort() == port
) {
1687 if (Kademlia::CKademlia::IsConnected()) {
1688 if(!Kademlia::CKademlia::IsFirewalled()) {
1689 if(Kademlia::CKademlia::GetIPAddress() == hybridID
&& thePrefs::GetPort() == port
) {
1695 //This allows *.*.*.0 clients to not be removed if Ed2kID == false
1696 if ( IsLowID(hybridID
) && theApp
->IsFirewalled()) {
1697 if (pdebug_lowiddropped
) {
1698 (*pdebug_lowiddropped
)++;
1706 void CPartFile::AddSources(CMemFile
& sources
,uint32 serverip
, uint16 serverport
, unsigned /*origin*/, bool bWithObfuscationAndHash
)
1708 uint8 count
= sources
.ReadUInt8();
1709 uint8 debug_lowiddropped
= 0;
1710 uint8 debug_possiblesources
= 0;
1711 CMD4Hash achUserHash
;
1714 // since we may received multiple search source UDP results we have to "consume" all data of that packet
1715 AddDebugLogLineM(false, logPartFile
, wxT("Trying to add sources for a stopped file"));
1716 sources
.Seek(count
*(4+2), wxFromCurrent
);
1720 for (int i
= 0;i
!= count
;++i
) {
1721 uint32 userid
= sources
.ReadUInt32();
1722 uint16 port
= sources
.ReadUInt16();
1724 uint8 byCryptOptions
= 0;
1725 if (bWithObfuscationAndHash
){
1726 byCryptOptions
= sources
.ReadUInt8();
1727 if ((byCryptOptions
& 0x80) > 0) {
1728 achUserHash
= sources
.ReadHash();
1731 if ((thePrefs::IsClientCryptLayerRequested() && (byCryptOptions
& 0x01/*supported*/) > 0 && (byCryptOptions
& 0x80) == 0)
1732 || (thePrefs::IsClientCryptLayerSupported() && (byCryptOptions
& 0x02/*requested*/) > 0 && (byCryptOptions
& 0x80) == 0)) {
1733 AddDebugLogLineM(false, logPartFile
, wxString::Format(wxT("Server didn't provide UserHash for source %u, even if it was expected to (or local obfuscationsettings changed during serverconnect"), userid
));
1734 } else if (!thePrefs::IsClientCryptLayerRequested() && (byCryptOptions
& 0x02/*requested*/) == 0 && (byCryptOptions
& 0x80) != 0) {
1735 AddDebugLogLineM(false, logPartFile
, wxString::Format(wxT("Server provided UserHash for source %u, even if it wasn't expected to (or local obfuscationsettings changed during serverconnect"), userid
));
1740 // "Filter LAN IPs" and "IPfilter" the received sources IP addresses
1741 if (!IsLowID(userid
)) {
1742 // check for 0-IP, localhost and optionally for LAN addresses
1743 if ( !IsGoodIP(userid
, thePrefs::FilterLanIPs()) ) {
1746 if (theApp
->ipfilter
->IsFiltered(userid
)) {
1751 if (!CanAddSource(userid
, port
, serverip
, serverport
, &debug_lowiddropped
)) {
1755 if(thePrefs::GetMaxSourcePerFile() > GetSourceCount()) {
1756 ++debug_possiblesources
;
1757 CUpDownClient
* newsource
= new CUpDownClient(port
,userid
,serverip
,serverport
,this, true, true);
1759 newsource
->SetCryptLayerSupport((byCryptOptions
& 0x01) != 0);
1760 newsource
->SetCryptLayerRequest((byCryptOptions
& 0x02) != 0);
1761 newsource
->SetCryptLayerRequires((byCryptOptions
& 0x04) != 0);
1762 if ((byCryptOptions
& 0x80) != 0) {
1763 newsource
->SetUserHash(achUserHash
);
1766 theApp
->downloadqueue
->CheckAndAddSource(this,newsource
);
1768 AddDebugLogLineM(false, logPartFile
, wxT("Consuming a packet because of max sources reached"));
1769 // Since we may receive multiple search source UDP results we have to "consume" all data of that packet
1770 // This '+1' is added because 'i' counts from 0.
1771 sources
.Seek((count
-(i
+1))*(4+2), wxFromCurrent
);
1772 if (GetKadFileSearchID()) {
1773 Kademlia::CSearchManager::StopSearch(GetKadFileSearchID(), false);
1780 void CPartFile::UpdatePartsInfo()
1782 if( !IsPartFile() ) {
1783 CKnownFile::UpdatePartsInfo();
1788 uint16 partcount
= GetPartCount();
1789 bool flag
= (time(NULL
) - m_nCompleteSourcesTime
> 0);
1791 // Ensure the frequency-list is ready
1792 if ( m_SrcpartFrequency
.size() != GetPartCount() ) {
1793 m_SrcpartFrequency
.clear();
1794 m_SrcpartFrequency
.insert(m_SrcpartFrequency
.begin(), GetPartCount(), 0);
1797 // Find number of available parts
1798 uint16 availablecounter
= 0;
1799 for ( uint16 i
= 0; i
< partcount
; ++i
) {
1800 if ( m_SrcpartFrequency
[i
] )
1804 if ( ( availablecounter
== partcount
) && ( m_availablePartsCount
< partcount
) ) {
1805 lastseencomplete
= time(NULL
);
1808 m_availablePartsCount
= availablecounter
;
1811 ArrayOfUInts16 count
;
1813 count
.reserve(GetSourceCount());
1815 for ( SourceSet::iterator it
= m_SrcList
.begin(); it
!= m_SrcList
.end(); ++it
) {
1816 if ( !(*it
)->GetUpPartStatus().empty() && (*it
)->GetUpPartCount() == partcount
) {
1817 count
.push_back((*it
)->GetUpCompleteSourcesCount());
1821 m_nCompleteSourcesCount
= m_nCompleteSourcesCountLo
= m_nCompleteSourcesCountHi
= 0;
1823 for (uint16 i
= 0; i
< partcount
; ++i
) {
1825 m_nCompleteSourcesCount
= m_SrcpartFrequency
[i
];
1827 else if( m_nCompleteSourcesCount
> m_SrcpartFrequency
[i
]) {
1828 m_nCompleteSourcesCount
= m_SrcpartFrequency
[i
];
1831 count
.push_back(m_nCompleteSourcesCount
);
1833 int32 n
= count
.size();
1835 std::sort(count
.begin(), count
.end(), std::less
<uint16
>());
1838 int32 i
= n
>> 1; // (n / 2)
1839 int32 j
= (n
* 3) >> 2; // (n * 3) / 4
1840 int32 k
= (n
* 7) >> 3; // (n * 7) / 8
1842 //When still a part file, adjust your guesses by 20% to what you see..
1846 //Not many sources, so just use what you see..
1847 // welcome to 'plain stupid code'
1848 // m_nCompleteSourcesCount;
1849 m_nCompleteSourcesCountLo
= m_nCompleteSourcesCount
;
1850 m_nCompleteSourcesCountHi
= m_nCompleteSourcesCount
;
1851 } else if (n
< 20) {
1852 // For low guess and normal guess count
1853 // If we see more sources then the guessed low and normal, use what we see.
1854 // If we see less sources then the guessed low, adjust network accounts for 80%,
1855 // we account for 20% with what we see and make sure we are still above the normal.
1857 // Adjust 80% network and 20% what we see.
1858 if ( count
[i
] < m_nCompleteSourcesCount
) {
1859 m_nCompleteSourcesCountLo
= m_nCompleteSourcesCount
;
1861 m_nCompleteSourcesCountLo
=
1862 (uint16
)((float)(count
[i
]*.8) +
1863 (float)(m_nCompleteSourcesCount
*.2));
1865 m_nCompleteSourcesCount
= m_nCompleteSourcesCountLo
;
1866 m_nCompleteSourcesCountHi
=
1867 (uint16
)((float)(count
[j
]*.8) +
1868 (float)(m_nCompleteSourcesCount
*.2));
1869 if( m_nCompleteSourcesCountHi
< m_nCompleteSourcesCount
) {
1870 m_nCompleteSourcesCountHi
= m_nCompleteSourcesCount
;
1878 // Adjust network accounts for 80%, we account for 20% with what
1879 // we see and make sure we are still above the low.
1881 // Adjust network accounts for 80%, we account for 20% with what
1882 // we see and make sure we are still above the normal.
1884 m_nCompleteSourcesCountLo
= m_nCompleteSourcesCount
;
1885 m_nCompleteSourcesCount
= (uint16
)((float)(count
[j
]*.8)+(float)(m_nCompleteSourcesCount
*.2));
1886 if( m_nCompleteSourcesCount
< m_nCompleteSourcesCountLo
) {
1887 m_nCompleteSourcesCount
= m_nCompleteSourcesCountLo
;
1889 m_nCompleteSourcesCountHi
= (uint16
)((float)(count
[k
]*.8)+(float)(m_nCompleteSourcesCount
*.2));
1890 if( m_nCompleteSourcesCountHi
< m_nCompleteSourcesCount
) {
1891 m_nCompleteSourcesCountHi
= m_nCompleteSourcesCount
;
1895 m_nCompleteSourcesTime
= time(NULL
) + (60);
1897 UpdateDisplayedInfo();
1900 // Kry - Updated to 0.42e + bugfix
1901 // [Maella -Enhanced Chunk Selection- (based on jicxicmic)]
1902 bool CPartFile::GetNextRequestedBlock(CUpDownClient
* sender
, Requested_Block_Struct
** newblocks
, uint16
* count
)
1905 // The purpose of this function is to return a list of blocks (~180KB) to
1906 // download. To avoid a prematurely stop of the downloading, all blocks that
1907 // are requested from the same source must be located within the same
1908 // chunk (=> part ~9MB).
1910 // The selection of the chunk to download is one of the CRITICAL parts of the
1911 // edonkey network. The selection algorithm must insure the best spreading
1914 // The selection is based on 4 criteria:
1915 // 1. Frequency of the chunk (availability), very rare chunks must be downloaded
1916 // as quickly as possible to become a new available source.
1917 // 2. Parts used for preview (first + last chunk), preview or check a
1918 // file (e.g. movie, mp3)
1919 // 3. Request state (downloading in process), try to ask each source for another
1920 // chunk. Spread the requests between all sources.
1921 // 4. Completion (shortest-to-complete), partially retrieved chunks should be
1922 // completed before starting to download other one.
1924 // The frequency criterion defines three zones: very rare (<10%), rare (<50%)
1925 // and common (>30%). Inside each zone, the criteria have a specific weight, used
1926 // to calculate the priority of chunks. The chunk(s) with the highest
1927 // priority (highest=0, lowest=0xffff) is/are selected first.
1929 // very rare (preview) rare common
1930 // 0% <---- +0 pt ----> 10% <----- +10000 pt -----> 50% <---- +20000 pt ----> 100%
1931 // 1. <------- frequency: +25*frequency pt ----------->
1932 // 2. <- preview: +1 pt --><-------------- preview: set to 10000 pt ------------->
1933 // 3. <------ request: download in progress +20000 pt ------>
1934 // 4a. <- completion: 0% +100, 25% +75 .. 100% +0 pt --><-- !req => completion --->
1935 // 4b. <--- req => !completion -->
1937 // Unrolled, the priority scale is:
1939 // 0..xxxx unrequested and requested very rare chunks
1940 // 10000..1xxxx unrequested rare chunks + unrequested preview chunks
1941 // 20000..2xxxx unrequested common chunks (priority to the most complete)
1942 // 30000..3xxxx requested rare chunks + requested preview chunks
1943 // 40000..4xxxx requested common chunks (priority to the least complete)
1945 // This algorithm usually selects first the rarest chunk(s). However, partially
1946 // complete chunk(s) that is/are close to completion may overtake the priority
1947 // (priority inversion).
1948 // For the common chuncks, the algorithm tries to spread the dowload between
1952 // Check input parameters
1956 if ( sender
->GetPartStatus().empty() ) {
1959 // Define and create the list of the chunks to download
1960 const uint16 partCount
= GetPartCount();
1961 ChunkList chunksList
;
1964 uint16 newBlockCount
= 0;
1965 while(newBlockCount
!= *count
) {
1966 // Create a request block stucture if a chunk has been previously selected
1967 if(sender
->GetLastPartAsked() != 0xffff) {
1968 Requested_Block_Struct
* pBlock
= new Requested_Block_Struct
;
1969 if(GetNextEmptyBlockInPart(sender
->GetLastPartAsked(), pBlock
) == true) {
1970 // Keep a track of all pending requested blocks
1971 m_requestedblocks_list
.push_back(pBlock
);
1972 // Update list of blocks to return
1973 newblocks
[newBlockCount
++] = pBlock
;
1974 // Skip end of loop (=> CPU load)
1977 // All blocks for this chunk have been already requested
1979 // => Try to select another chunk
1980 sender
->SetLastPartAsked(0xffff);
1984 // Check if a new chunk must be selected (e.g. download starting, previous chunk complete)
1985 if(sender
->GetLastPartAsked() == 0xffff) {
1986 // Quantify all chunks (create list of chunks to download)
1987 // This is done only one time and only if it is necessary (=> CPU load)
1988 if(chunksList
.empty()) {
1989 // Indentify the locally missing part(s) that this source has
1990 for(uint16 i
=0; i
< partCount
; ++i
) {
1991 if(sender
->IsPartAvailable(i
) == true && GetNextEmptyBlockInPart(i
, NULL
) == true) {
1992 // Create a new entry for this chunk and add it to the list
1995 newEntry
.frequency
= m_SrcpartFrequency
[i
];
1996 chunksList
.push_back(newEntry
);
2000 // Check if any bloks(s) could be downloaded
2001 if(chunksList
.empty()) {
2002 break; // Exit main loop while()
2005 // Define the bounds of the three zones (very rare, rare)
2006 // more depending on available sources
2008 if (GetSourceCount()>800) {
2010 } else if (GetSourceCount()>200) {
2013 uint16 limit
= modif
*GetSourceCount()/ 100;
2017 const uint16 veryRareBound
= limit
;
2018 const uint16 rareBound
= 2*limit
;
2020 // Cache Preview state (Criterion 2)
2021 FileType type
= GetFiletype(GetFileName());
2022 const bool isPreviewEnable
=
2023 thePrefs::GetPreviewPrio() &&
2024 (type
== ftArchive
|| type
== ftVideo
);
2026 // Collect and calculate criteria for all chunks
2027 for (ChunkList::iterator it
= chunksList
.begin(); it
!= chunksList
.end(); ++it
) {
2028 Chunk
& cur_chunk
= *it
;
2031 const uint64 uStart
= cur_chunk
.part
* PARTSIZE
;
2033 ((GetFileSize() - 1) < (uStart
+ PARTSIZE
- 1)) ?
2034 (GetFileSize() - 1) : (uStart
+ PARTSIZE
- 1);
2035 // Criterion 2. Parts used for preview
2036 // Remark: - We need to download the first part and the last part(s).
2037 // - When the last part is very small, it's necessary to
2038 // download the two last parts.
2039 bool critPreview
= false;
2040 if(isPreviewEnable
== true) {
2041 if(cur_chunk
.part
== 0) {
2042 critPreview
= true; // First chunk
2043 } else if(cur_chunk
.part
== partCount
-1) {
2044 critPreview
= true; // Last chunk
2045 } else if(cur_chunk
.part
== partCount
-2) {
2046 // Last chunk - 1 (only if last chunk is too small)
2047 const uint32 sizeOfLastChunk
= GetFileSize() - uEnd
;
2048 if(sizeOfLastChunk
< PARTSIZE
/3) {
2049 critPreview
= true; // Last chunk - 1
2054 // Criterion 3. Request state (downloading in process from other source(s))
2056 const bool critRequested
=
2057 cur_chunk
.frequency
> veryRareBound
&&
2058 IsAlreadyRequested(uStart
, uEnd
);
2060 // Criterion 4. Completion
2061 uint64 partSize
= PARTSIZE
;
2063 std::list
<Gap_Struct
*>::iterator it2
= m_gaplist
.begin();
2064 for (; it2
!= m_gaplist
.end(); ++it2
) {
2065 const Gap_Struct
* cur_gap
= *it2
;
2066 // Check if Gap is into the limit
2067 if(cur_gap
->start
< uStart
) {
2068 if(cur_gap
->end
> uStart
&& cur_gap
->end
< uEnd
) {
2069 partSize
-= cur_gap
->end
- uStart
+ 1;
2070 } else if(cur_gap
->end
>= uEnd
) {
2072 break; // exit loop for()
2074 } else if(cur_gap
->start
<= uEnd
) {
2075 if(cur_gap
->end
< uEnd
) {
2076 partSize
-= cur_gap
->end
- cur_gap
->start
+ 1;
2078 partSize
-= uEnd
- cur_gap
->start
+ 1;
2082 const uint16 critCompletion
= (uint16
)(partSize
/(PARTSIZE
/100)); // in [%]
2084 // Calculate priority with all criteria
2085 if(cur_chunk
.frequency
<= veryRareBound
) {
2086 // 0..xxxx unrequested + requested very rare chunks
2087 cur_chunk
.rank
= (25 * cur_chunk
.frequency
) + // Criterion 1
2088 ((critPreview
== true) ? 0 : 1) + // Criterion 2
2089 (100 - critCompletion
); // Criterion 4
2090 } else if(critPreview
== true) {
2091 // 10000..10100 unrequested preview chunks
2092 // 30000..30100 requested preview chunks
2093 cur_chunk
.rank
= ((critRequested
== false) ? 10000 : 30000) + // Criterion 3
2094 (100 - critCompletion
); // Criterion 4
2095 } else if(cur_chunk
.frequency
<= rareBound
) {
2096 // 10101..1xxxx unrequested rare chunks
2097 // 30101..3xxxx requested rare chunks
2098 cur_chunk
.rank
= (25 * cur_chunk
.frequency
) + // Criterion 1
2099 ((critRequested
== false) ? 10101 : 30101) + // Criterion 3
2100 (100 - critCompletion
); // Criterion 4
2103 if(critRequested
== false) { // Criterion 3
2104 // 20000..2xxxx unrequested common chunks
2105 cur_chunk
.rank
= 20000 + // Criterion 3
2106 (100 - critCompletion
); // Criterion 4
2108 // 40000..4xxxx requested common chunks
2109 // Remark: The weight of the completion criterion is inversed
2110 // to spead the requests over the completing chunks.
2111 // Without this, the chunk closest to completion will
2112 // received every new sources.
2113 cur_chunk
.rank
= 40000 + // Criterion 3
2114 (critCompletion
); // Criterion 4
2120 // Select the next chunk to download
2121 if(!chunksList
.empty()) {
2122 // Find and count the chunck(s) with the highest priority
2123 uint16 chunkCount
= 0; // Number of found chunks with same priority
2124 uint16 rank
= 0xffff; // Highest priority found
2126 // Collect and calculate criteria for all chunks
2127 for (ChunkList::iterator it
= chunksList
.begin(); it
!= chunksList
.end(); ++it
) {
2128 const Chunk
& cur_chunk
= *it
;
2129 if(cur_chunk
.rank
< rank
) {
2131 rank
= cur_chunk
.rank
;
2132 } else if(cur_chunk
.rank
== rank
) {
2137 // Use a random access to avoid that everybody tries to download the
2138 // same chunks at the same time (=> spread the selected chunk among clients)
2139 uint16 randomness
= 1 + (int) (((float)(chunkCount
-1))*rand()/(RAND_MAX
+1.0));
2141 for (ChunkList::iterator it
= chunksList
.begin(); it
!= chunksList
.end(); ++it
) {
2142 const Chunk
& cur_chunk
= *it
;
2143 if(cur_chunk
.rank
== rank
) {
2145 if(randomness
== 0) {
2146 // Selection process is over
2147 sender
->SetLastPartAsked(cur_chunk
.part
);
2148 // Remark: this list might be reused up to *count times
2149 chunksList
.erase(it
);
2150 break; // exit loop for()
2155 // There is no remaining chunk to download
2156 break; // Exit main loop while()
2160 // Return the number of the blocks
2161 *count
= newBlockCount
;
2163 return (newBlockCount
> 0);
2169 void CPartFile::RemoveBlockFromList(uint64 start
,uint64 end
)
2171 std::list
<Requested_Block_Struct
*>::iterator it
= m_requestedblocks_list
.begin();
2172 while (it
!= m_requestedblocks_list
.end()) {
2173 std::list
<Requested_Block_Struct
*>::iterator it2
= it
++;
2175 if ((*it2
)->StartOffset
<= start
&& (*it2
)->EndOffset
>= end
) {
2176 m_requestedblocks_list
.erase(it2
);
2182 void CPartFile::RemoveAllRequestedBlocks(void)
2184 m_requestedblocks_list
.clear();
2188 void CPartFile::CompleteFile(bool bIsHashingDone
)
2190 if (GetKadFileSearchID()) {
2191 Kademlia::CSearchManager::StopSearch(GetKadFileSearchID(), false);
2194 theApp
->downloadqueue
->RemoveLocalServerRequest(this);
2196 AddDebugLogLineM( false, logPartFile
, wxString( wxT("CPartFile::CompleteFile: Hash ") ) + ( bIsHashingDone
? wxT("done") : wxT("not done") ) );
2198 if (!bIsHashingDone
) {
2199 SetPartFileStatus(PS_COMPLETING
);
2202 CPath partFile
= m_partmetfilename
.RemoveExt();
2203 CThreadScheduler::AddTask(new CHashingTask(GetFilePath(), partFile
, this));
2207 m_is_A4AF_auto
=false;
2208 SetPartFileStatus(PS_COMPLETING
);
2209 // guess I was wrong about not need to spaw a thread ...
2210 // It is if the temp and incoming dirs are on different
2211 // partitions/drives and the file is large...[oz]
2214 PerformFileComplete();
2218 if (thePrefs::ShowCatTabInfos()) {
2219 Notify_ShowUpdateCatTabTitles();
2221 UpdateDisplayedInfo(true);
2225 void CPartFile::CompleteFileEnded(bool errorOccured
, const CPath
& newname
)
2229 SetPartFileStatus(PS_ERROR
);
2230 AddLogLineM(true, CFormat( _("Unexpected file error while completing %s. File paused") )% GetFileName() );
2232 m_fullname
= newname
;
2234 SetFilePath(m_fullname
.GetPath());
2235 SetFileName(m_fullname
.GetFullName());
2237 SetPartFileStatus(PS_COMPLETE
);
2241 // TODO: What the f*** if it is already known?
2242 theApp
->knownfiles
->SafeAddKFile(this);
2244 // remove the file from the suspended uploads list
2245 theApp
->uploadqueue
->ResumeUpload(GetFileHash());
2246 theApp
->downloadqueue
->RemoveFile(this);
2247 theApp
->sharedfiles
->SafeAddKFile(this);
2248 UpdateDisplayedInfo(true);
2250 // republish that file to the ed2k-server to update the 'FT_COMPLETE_SOURCES' counter on the server.
2251 theApp
->sharedfiles
->RepublishFile(this);
2253 // Ensure that completed shows the correct value
2254 completedsize
= GetFileSize();
2256 AddLogLineM(true, CFormat( _("Finished downloading: %s") ) % GetFileName() );
2259 theApp
->downloadqueue
->StartNextFile(this);
2263 void CPartFile::PerformFileComplete()
2265 // add this file to the suspended uploads list
2266 theApp
->uploadqueue
->SuspendUpload(GetFileHash());
2269 // close permanent handle
2270 if (m_hpartfile
.IsOpened()) {
2271 m_hpartfile
.Close();
2274 // Schedule task for completion of the file
2275 CThreadScheduler::AddTask(new CCompletionTask(this));
2279 void CPartFile::RemoveAllSources(bool bTryToSwap
)
2281 for( SourceSet::iterator it
= m_SrcList
.begin(); it
!= m_SrcList
.end();) {
2282 CUpDownClient
* cur_src
= *it
++;
2284 if (!cur_src
->SwapToAnotherFile(true, true, true, NULL
)) {
2285 RemoveSource(cur_src
,true,false);
2286 // If it was not swapped, it's not on any file anymore, and should die
2289 RemoveSource(cur_src
,true,false);
2295 /* eMule 0.30c implementation, i give it a try (Creteil) BEGIN ... */
2296 // remove all links A4AF in sources to this file
2297 if(!m_A4AFsrclist
.empty()) {
2298 for( SourceSet::iterator it
= m_A4AFsrclist
.begin(); it
!= m_A4AFsrclist
.end(); ) {
2299 CUpDownClient
* cur_src
= *it
++;
2300 if ( cur_src
->DeleteFileRequest( this ) ) {
2301 Notify_DownloadCtrlRemoveSource(cur_src
, this);
2304 m_A4AFsrclist
.clear();
2306 /* eMule 0.30c implementation, i give it a try (Creteil) END ... */
2307 UpdateFileRatingCommentAvail();
2311 void CPartFile::Delete()
2313 AddLogLineM(false, CFormat(_("Deleting file: %s")) % GetFileName());
2314 // Barry - Need to tell any connected clients to stop sending the file
2316 AddDebugLogLineM(false, logPartFile
, wxT("\tStopped"));
2318 theApp
->sharedfiles
->RemoveFile(this);
2319 AddDebugLogLineM(false, logPartFile
, wxT("\tRemoved from shared"));
2320 theApp
->downloadqueue
->RemoveFile(this);
2321 AddDebugLogLineM(false, logPartFile
, wxT("\tRemoved from download queue"));
2322 Notify_DownloadCtrlRemoveFile(this);
2323 AddDebugLogLineM(false, logPartFile
, wxT("\tRemoved transferwnd"));
2326 // eMule had same problem with lseek error ... and override with a simple
2327 // check for INVALID_HANDLE_VALUE (that, btw, does not exist on linux)
2328 // So we just guess is < 0 on error and > 2 if ok (0 stdin, 1 stdout, 2 stderr)
2329 if (m_hpartfile
.fd() > 2) { // 0 stdin, 1 stdout, 2 stderr
2330 m_hpartfile
.Close();
2333 AddDebugLogLineM(false, logPartFile
, wxT("\tClosed"));
2335 if (!CPath::RemoveFile(m_fullname
)) {
2336 AddDebugLogLineM(true, logPartFile
, CFormat(wxT("\tFailed to delete '%s'")) % m_fullname
);
2338 AddDebugLogLineM(false, logPartFile
, wxT("\tRemoved .part.met"));
2341 CPath partFile
= m_fullname
.RemoveExt();
2342 if (!CPath::RemoveFile(partFile
)) {
2343 AddDebugLogLineM(true, logPartFile
, CFormat(wxT("Failed to delete '%s'")) % partFile
);
2345 AddDebugLogLineM(false, logPartFile
, wxT("\tRemoved .part"));
2348 CPath BAKName
= m_fullname
.AppendExt(PARTMET_BAK_EXT
);
2349 if (!CPath::RemoveFile(BAKName
)) {
2350 AddDebugLogLineM(true, logPartFile
, CFormat(wxT("Failed to delete '%s'")) % BAKName
);
2352 AddDebugLogLineM(false, logPartFile
, wxT("\tRemoved .BAK"));
2355 CPath SEEDSName
= m_fullname
.AppendExt(wxT(".seeds"));
2356 if (SEEDSName
.FileExists()) {
2357 if (CPath::RemoveFile(SEEDSName
)) {
2358 AddDebugLogLineM(false, logPartFile
, wxT("\tRemoved .seeds"));
2360 AddDebugLogLineM(true, logPartFile
, CFormat(wxT("Failed to delete '%s'")) % SEEDSName
);
2364 AddDebugLogLineM(false, logPartFile
, wxT("Done"));
2370 bool CPartFile::HashSinglePart(uint16 partnumber
)
2372 if ((GetHashCount() <= partnumber
) && (GetPartCount() > 1)) {
2374 CFormat( _("Warning: Unable to hash downloaded part - hashset incomplete for '%s'") )
2376 m_hashsetneeded
= true;
2378 } else if ((GetHashCount() <= partnumber
) && GetPartCount() != 1) {
2379 AddLogLineM(true, CFormat( _("Error: Unable to hash downloaded part - hashset incomplete (%s). This should never happen")) % GetFileName() );
2380 m_hashsetneeded
= true;
2383 CMD4Hash hashresult
;
2384 uint64 length
= PARTSIZE
;
2385 const uint64 offset
= length
* partnumber
;
2387 m_hpartfile
.Seek(offset
, wxFromStart
);
2388 if (offset
+ PARTSIZE
> m_hpartfile
.GetLength()) {
2389 length
= m_hpartfile
.GetLength() - offset
;
2390 wxASSERT( length
<= PARTSIZE
);
2392 CreateHashFromFile(&m_hpartfile
, length
, &hashresult
, NULL
);
2393 } catch (const CIOFailureException
& e
) {
2394 AddLogLineM(true, CFormat( wxT("EOF while hashing downloaded part %u with length %u (max %u) of partfile '%s' with length %u: %s"))
2395 % partnumber
% length
% (offset
+length
) % GetFileName() % GetFileSize() % e
.what());
2396 SetPartFileStatus(PS_ERROR
);
2398 } catch (const CEOFException
& e
) {
2399 AddLogLineM(true, CFormat( wxT("EOF while hashing downloaded part %u with length %u (max %u) of partfile '%s' with length %u: %s"))
2400 % partnumber
% length
% (offset
+length
) % GetFileName() % GetFileSize() % e
.what());
2404 if (GetPartCount() > 1) {
2405 if (hashresult
!= GetPartHash(partnumber
)) {
2406 AddDebugLogLineM(false, logPartFile
, CFormat( wxT("%s: Expected part-hash: %s")) % GetFileName() % GetPartHash(partnumber
).Encode() );
2407 AddDebugLogLineM(false, logPartFile
, CFormat( wxT("%s: Actual part-hash: %s")) % GetFileName() % hashresult
.Encode() );
2413 if (hashresult
!= m_abyFileHash
) {
2423 bool CPartFile::IsCorruptedPart(uint16 partnumber
)
2425 return std::find(m_corrupted_list
.begin(), m_corrupted_list
.end(), partnumber
)
2426 != m_corrupted_list
.end();
2430 void CPartFile::SetDownPriority(uint8 np
, bool bSave
, bool bRefresh
)
2432 if ( m_iDownPriority
!= np
) {
2433 m_iDownPriority
= np
;
2435 UpdateDisplayedInfo(true);
2442 void CPartFile::StopFile(bool bCancel
)
2444 // Kry - Need to set it here to get into SetPartFileStatus(status) correctly
2447 // Barry - Need to tell any connected clients to stop sending the file
2450 m_LastSearchTimeKad
= 0;
2451 m_TotalSearchesKad
= 0;
2453 RemoveAllSources(true);
2456 memset(m_anStates
,0,sizeof(m_anStates
));
2462 UpdateDisplayedInfo(true);
2466 void CPartFile::StopPausedFile()
2469 // Once an hour, remove any sources for files which are no longer active downloads
2470 switch (GetStatus()) {
2472 case PS_INSUFFICIENT
:
2474 if (time(NULL
) - m_iLastPausePurge
> (60*60)) {
2475 m_iLastPausePurge
= time(NULL
);
2483 void CPartFile::PauseFile(bool bInsufficient
)
2487 if ( status
== PS_COMPLETE
|| status
== PS_COMPLETING
) {
2491 if (GetKadFileSearchID()) {
2492 Kademlia::CSearchManager::StopSearch(GetKadFileSearchID(), true);
2493 // If we were in the middle of searching, reset timer so they can resume searching.
2494 m_LastSearchTimeKad
= 0;
2497 m_iLastPausePurge
= time(NULL
);
2499 theApp
->downloadqueue
->RemoveLocalServerRequest(this);
2501 CPacket
packet( OP_CANCELTRANSFER
, 0, OP_EDONKEYPROT
);
2502 for( SourceSet::iterator it
= m_SrcList
.begin(); it
!= m_SrcList
.end(); ) {
2503 CUpDownClient
* cur_src
= *it
++;
2504 if (cur_src
->GetDownloadState() == DS_DOWNLOADING
) {
2505 if (!cur_src
->GetSentCancelTransfer()) {
2506 theStats::AddUpOverheadOther( packet
.GetPacketSize() );
2507 AddDebugLogLineM( false, logLocalClient
, wxT("Local Client: OP_CANCELTRANSFER to ") + cur_src
->GetFullIP() );
2508 cur_src
->SendPacket( &packet
, false, true );
2509 cur_src
->SetSentCancelTransfer( true );
2511 cur_src
->SetDownloadState(DS_ONQUEUE
);
2516 m_insufficient
= bInsufficient
;
2522 m_anStates
[DS_DOWNLOADING
] = 0;
2528 void CPartFile::ResumeFile()
2530 if ( status
== PS_COMPLETE
|| status
== PS_COMPLETING
) {
2534 if ( m_insufficient
&& !CheckFreeDiskSpace() ) {
2535 // Still not enough free discspace
2541 m_insufficient
= false;
2543 m_lastsearchtime
= 0;
2545 SetActive(theApp
->IsConnected());
2547 if (m_gaplist
.empty() && (GetStatus() == PS_ERROR
)) {
2548 // The file has already been hashed at this point
2552 UpdateDisplayedInfo(true);
2556 bool CPartFile::CheckFreeDiskSpace( uint32 neededSpace
)
2558 uint64 free
= CPath::GetFreeSpaceAt(GetFilePath());
2559 if (free
== static_cast<uint64
>(wxInvalidOffset
)) {
2560 // If GetFreeSpaceAt() fails, then the path probably does not exist.
2564 // The very least acceptable diskspace is a single PART
2565 if ( free
< PARTSIZE
) {
2566 // Always fail in this case, since we risk losing data if we try to
2567 // write on a full partition.
2571 // All other checks are only made if the user has enabled them
2572 if ( thePrefs::IsCheckDiskspaceEnabled() ) {
2573 neededSpace
+= thePrefs::GetMinFreeDiskSpace();
2575 // Due to the the existance of sparse files, we cannot assume that
2576 // writes within the file doesn't cause new blocks to be allocated.
2577 // Therefore, we have to simply stop writing the moment the limit has
2579 return free
>= neededSpace
;
2586 void CPartFile::SetLastAnsweredTime()
2588 m_ClientSrcAnswered
= ::GetTickCount();
2591 void CPartFile::SetLastAnsweredTimeTimeout()
2593 m_ClientSrcAnswered
= 2 * CONNECTION_LATENCY
+ ::GetTickCount() - SOURCECLIENTREASKS
;
2596 CPacket
*CPartFile::CreateSrcInfoPacket(const CUpDownClient
* forClient
, uint8 byRequestedVersion
, uint16 nRequestedOptions
)
2599 if ( m_SrcList
.empty() ) {
2604 return CKnownFile::CreateSrcInfoPacket(forClient
, byRequestedVersion
, nRequestedOptions
);
2607 if (((forClient
->GetRequestFile() != this)
2608 && (forClient
->GetUploadFile() != this)) || forClient
->GetUploadFileID() != GetFileHash()) {
2609 wxString file1
= _("Unknown");
2610 if (forClient
->GetRequestFile() && forClient
->GetRequestFile()->GetFileName().IsOk()) {
2611 file1
= forClient
->GetRequestFile()->GetFileName().GetPrintable();
2612 } else if (forClient
->GetUploadFile() && forClient
->GetUploadFile()->GetFileName().IsOk()) {
2613 file1
= forClient
->GetUploadFile()->GetFileName().GetPrintable();
2615 wxString file2
= _("Unknown");
2616 if (GetFileName().IsOk()) {
2617 file2
= GetFileName().GetPrintable();
2619 AddDebugLogLineM(false, logPartFile
, wxT("File mismatch on source packet (P) Sending: ") + file1
+ wxT(" From: ") + file2
);
2623 if ( !(GetStatus() == PS_READY
|| GetStatus() == PS_EMPTY
)) {
2627 const BitVector
& reqstatus
= forClient
->GetPartStatus();
2628 bool KnowNeededParts
= !reqstatus
.empty();
2629 //wxASSERT(rcvstatus.size() == GetPartCount()); // Obviously!
2630 if (reqstatus
.size() != GetPartCount()) {
2631 // Yuck. Same file but different part count? Seriously fucked up.
2632 AddDebugLogLineM(false, logPartFile
, wxString::Format(wxT("Impossible situation: different partcounts for the same part file: %i (client) and %i (file)"),reqstatus
.size(),GetPartCount()));
2636 CMemFile
data(1024);
2638 uint8 byUsedVersion
;
2640 if (forClient
->SupportsSourceExchange2() && byRequestedVersion
> 0){
2641 // the client uses SourceExchange2 and requested the highest version he knows
2642 // and we send the highest version we know, but of course not higher than his request
2643 byUsedVersion
= std::min(byRequestedVersion
, (uint8
)SOURCEEXCHANGE2_VERSION
);
2644 bIsSX2Packet
= true;
2645 data
.WriteUInt8(byUsedVersion
);
2647 // we don't support any special SX2 options yet, reserved for later use
2648 if (nRequestedOptions
!= 0) {
2649 AddDebugLogLineM(false, logKnownFiles
, CFormat(wxT("Client requested unknown options for SourceExchange2: %u")) % nRequestedOptions
);
2652 byUsedVersion
= forClient
->GetSourceExchange1Version();
2653 bIsSX2Packet
= false;
2654 if (forClient
->SupportsSourceExchange2()) {
2655 AddDebugLogLineM(false, logKnownFiles
, wxT("Client which announced to support SX2 sent SX1 packet instead"));
2661 data
.WriteHash(m_abyFileHash
);
2662 data
.WriteUInt16(nCount
);
2664 for (SourceSet::iterator it
= m_SrcList
.begin(); it
!= m_SrcList
.end(); ++it
) {
2666 CUpDownClient
* cur_src
= *it
;
2668 int state
= cur_src
->GetDownloadState();
2669 int valid
= ( state
== DS_DOWNLOADING
) || ( state
== DS_ONQUEUE
&& !cur_src
->IsRemoteQueueFull() );
2671 if ( cur_src
->HasLowID() || !valid
) {
2675 // only send source which have needed parts for this client if possible
2676 const BitVector
& srcstatus
= cur_src
->GetPartStatus();
2677 if ( !srcstatus
.empty() ) {
2678 //wxASSERT(srcstatus.size() == GetPartCount()); // Obviously!
2679 if (srcstatus
.size() != GetPartCount()) {
2682 if ( KnowNeededParts
) {
2683 // only send sources which have needed parts for this client
2684 for (int x
= 0; x
< GetPartCount(); ++x
) {
2685 if (srcstatus
[x
] && !reqstatus
[x
]) {
2691 // if we don't know the need parts for this client,
2692 // return any source currently a client sends it's
2693 // file status only after it has at least one complete part
2694 if (srcstatus
.size() != GetPartCount()) {
2697 for (int x
= 0; x
< GetPartCount(); ++x
){
2708 if(forClient
->GetSourceExchange1Version() > 2) {
2709 dwID
= cur_src
->GetUserIDHybrid();
2711 dwID
= wxUINT32_SWAP_ALWAYS(cur_src
->GetUserIDHybrid());
2713 data
.WriteUInt32(dwID
);
2714 data
.WriteUInt16(cur_src
->GetUserPort());
2715 data
.WriteUInt32(cur_src
->GetServerIP());
2716 data
.WriteUInt16(cur_src
->GetServerPort());
2718 if (byUsedVersion
>= 2) {
2719 data
.WriteHash(cur_src
->GetUserHash());
2722 if (byUsedVersion
>= 4){
2723 // CryptSettings - SourceExchange V4
2725 // 1 CryptLayer Required
2726 // 1 CryptLayer Requested
2727 // 1 CryptLayer Supported
2728 const uint8 uSupportsCryptLayer
= cur_src
->SupportsCryptLayer() ? 1 : 0;
2729 const uint8 uRequestsCryptLayer
= cur_src
->RequestsCryptLayer() ? 1 : 0;
2730 const uint8 uRequiresCryptLayer
= cur_src
->RequiresCryptLayer() ? 1 : 0;
2731 const uint8 byCryptOptions
= (uRequiresCryptLayer
<< 2) | (uRequestsCryptLayer
<< 1) | (uSupportsCryptLayer
<< 0);
2732 data
.WriteUInt8(byCryptOptions
);
2743 data
.Seek(bIsSX2Packet
? 17 : 16, wxFromStart
);
2744 data
.WriteUInt16(nCount
);
2746 CPacket
* result
= new CPacket(data
, OP_EMULEPROT
, bIsSX2Packet
? OP_ANSWERSOURCES2
: OP_ANSWERSOURCES
);
2748 // 16+2+501*(4+2+4+2+16) = 14046 bytes max.
2749 if (result
->GetPacketSize() > 354) {
2750 result
->PackPacket();
2756 void CPartFile::AddClientSources(CMemFile
* sources
, unsigned nSourceFrom
, uint8 uClientSXVersion
, bool bSourceExchange2
, const CUpDownClient
* /*pClient*/)
2765 uint8 uPacketSXVersion
= 0;
2766 if (!bSourceExchange2
) {
2767 nCount
= sources
->ReadUInt16();
2769 // Check if the data size matches the 'nCount' for v1 or v2 and eventually correct the source
2770 // exchange version while reading the packet data. Otherwise we could experience a higher
2771 // chance in dealing with wrong source data, userhashs and finally duplicate sources.
2772 uint32 uDataSize
= sources
->GetLength() - sources
->GetPosition();
2774 if ((uint32
)(nCount
*(4+2+4+2)) == uDataSize
) { //Checks if version 1 packet is correct size
2775 if(uClientSXVersion
!= 1) {
2778 uPacketSXVersion
= 1;
2779 } else if ((uint32
)(nCount
*(4+2+4+2+16)) == uDataSize
) { // Checks if version 2&3 packet is correct size
2780 if (uClientSXVersion
== 2) {
2781 uPacketSXVersion
= 2;
2782 } else if (uClientSXVersion
> 2) {
2783 uPacketSXVersion
= 3;
2787 } else if (nCount
*(4+2+4+2+16+1) == uDataSize
) {
2788 if (uClientSXVersion
!= 4 ) {
2791 uPacketSXVersion
= 4;
2793 // If v5 inserts additional data (like v2), the above code will correctly filter those packets.
2794 // If v5 appends additional data after <count>(<Sources>)[count], we are in trouble with the
2795 // above code. Though a client which does not understand v5+ should never receive such a packet.
2796 AddDebugLogLineM(false, logClient
, CFormat(wxT("Received invalid source exchange packet (v%u) of data size %u for %s")) % uClientSXVersion
% uDataSize
% GetFileName());
2801 // We only check if the version is known by us and do a quick sanitize check on known version
2802 // other then SX1, the packet will be ignored if any error appears, sicne it can't be a "misunderstanding" anymore
2803 if (uClientSXVersion
> SOURCEEXCHANGE2_VERSION
|| uClientSXVersion
== 0 ){
2804 AddDebugLogLineM(false, logPartFile
, CFormat(wxT("Invalid source exchange type version: %i")) % uClientSXVersion
);
2808 // all known versions use the first 2 bytes as count and unknown version are already filtered above
2809 nCount
= sources
->ReadUInt16();
2810 uint32 uDataSize
= (uint32
)(sources
->GetLength() - sources
->GetPosition());
2811 bool bError
= false;
2812 switch (uClientSXVersion
){
2814 bError
= nCount
*(4+2+4+2) != uDataSize
;
2818 bError
= nCount
*(4+2+4+2+16) != uDataSize
;
2821 bError
= nCount
*(4+2+4+2+16+1) != uDataSize
;
2829 AddDebugLogLineM(false, logPartFile
, wxT("Invalid source exchange data size."));
2832 uPacketSXVersion
= uClientSXVersion
;
2835 for (uint16 i
= 0;i
!= nCount
;++i
) {
2837 uint32 dwID
= sources
->ReadUInt32();
2838 uint16 nPort
= sources
->ReadUInt16();
2839 uint32 dwServerIP
= sources
->ReadUInt32();
2840 uint16 nServerPort
= sources
->ReadUInt16();
2843 if (uPacketSXVersion
> 1) {
2844 userHash
= sources
->ReadHash();
2847 uint8 byCryptOptions
= 0;
2848 if (uPacketSXVersion
>= 4) {
2849 byCryptOptions
= sources
->ReadUInt8();
2852 //Clients send ID's the the Hyrbid format so highID clients with *.*.*.0 won't be falsely switched to a lowID..
2854 if (uPacketSXVersion
>= 3) {
2855 dwIDED2K
= wxUINT32_SWAP_ALWAYS(dwID
);
2860 // check the HighID(IP) - "Filter LAN IPs" and "IPfilter" the received sources IP addresses
2861 if (!IsLowID(dwID
)) {
2862 if (!IsGoodIP(dwIDED2K
, thePrefs::FilterLanIPs())) {
2863 // check for 0-IP, localhost and optionally for LAN addresses
2864 AddDebugLogLineM(false, logIPFilter
, CFormat(wxT("Ignored source (IP=%s) received via %s - bad IP")) % Uint32toStringIP(dwIDED2K
) % OriginToText(nSourceFrom
));
2867 if (theApp
->ipfilter
->IsFiltered(dwIDED2K
)) {
2868 AddDebugLogLineM(false, logIPFilter
, CFormat(wxT("Ignored source (IP=%s) received via %s - IPFilter")) % Uint32toStringIP(dwIDED2K
) % OriginToText(nSourceFrom
));
2871 if (theApp
->clientlist
->IsBannedClient(dwIDED2K
)){
2876 // additionally check for LowID and own IP
2877 if (!CanAddSource(dwID
, nPort
, dwServerIP
, nServerPort
, NULL
, false)) {
2878 AddDebugLogLineM(false, logIPFilter
, CFormat(wxT("Ignored source (IP=%s) received via source exchange")) % Uint32toStringIP(dwIDED2K
));
2882 if(thePrefs::GetMaxSourcePerFile() > GetSourceCount()) {
2883 CUpDownClient
* newsource
= new CUpDownClient(nPort
,dwID
,dwServerIP
,nServerPort
,this, (uPacketSXVersion
< 3), true);
2884 if (uPacketSXVersion
> 1) {
2885 newsource
->SetUserHash(userHash
);
2888 if (uPacketSXVersion
>= 4) {
2889 newsource
->SetCryptLayerSupport((byCryptOptions
& 0x01) != 0);
2890 newsource
->SetCryptLayerRequest((byCryptOptions
& 0x02) != 0);
2891 newsource
->SetCryptLayerRequires((byCryptOptions
& 0x04) != 0);
2894 newsource
->SetSourceFrom((ESourceFrom
)nSourceFrom
);
2895 theApp
->downloadqueue
->CheckAndAddSource(this,newsource
);
2903 void CPartFile::UpdateAutoDownPriority()
2905 if (!IsAutoDownPriority()) {
2908 if (GetSourceCount() <= RARE_FILE
) {
2909 if ( GetDownPriority() != PR_HIGH
)
2910 SetDownPriority(PR_HIGH
, false, false);
2911 } else if (GetSourceCount() < 100) {
2912 if ( GetDownPriority() != PR_NORMAL
)
2913 SetDownPriority(PR_NORMAL
, false, false);
2915 if ( GetDownPriority() != PR_LOW
)
2916 SetDownPriority(PR_LOW
, false, false);
2920 // making this function return a higher when more sources have the extended
2921 // protocol will force you to ask a larger variety of people for sources
2923 int CPartFile::GetCommonFilePenalty()
2925 //TODO: implement, but never return less than MINCOMMONPENALTY!
2926 return MINCOMMONPENALTY
;
2929 /* Barry - Replaces BlockReceived()
2931 Originally this only wrote to disk when a full 180k block
2932 had been received from a client, and only asked for data in
2935 This meant that on average 90k was lost for every connection
2936 to a client data source. That is a lot of wasted data.
2938 To reduce the lost data, packets are now written to a buffer
2939 and flushed to disk regularly regardless of size downloaded.
2940 This includes compressed packets.
2942 Data is also requested only where gaps are, not in 180k blocks.
2943 The requests will still not exceed 180k, but may be smaller to
2947 // Kry - transize is 32bits, no packet can be more than that (this is
2948 // compressed size). Even 32bits is too much imho.As for the return size,
2949 // look at the lenData below.
2950 uint32
CPartFile::WriteToBuffer(uint32 transize
, byte
* data
, uint64 start
, uint64 end
, Requested_Block_Struct
*block
)
2952 // Increment transferred bytes counter for this file
2953 transferred
+= transize
;
2955 // This is needed a few times
2956 // Kry - should not need a uint64 here - no block is larger than
2957 // 2GB even after uncompressed.
2958 uint32 lenData
= (uint32
) (end
- start
+ 1);
2960 if(lenData
> transize
) {
2961 m_iGainDueToCompression
+= lenData
-transize
;
2964 // Occasionally packets are duplicated, no point writing it twice
2965 if (IsComplete(start
, end
)) {
2966 AddDebugLogLineM(false, logPartFile
,
2967 CFormat(wxT("File '%s' has already been written from %u to %u"))
2968 % GetFileName() % start
% end
);
2972 // Create copy of data as new buffer
2973 byte
*buffer
= new byte
[lenData
];
2974 memcpy(buffer
, data
, lenData
);
2976 // Create a new buffered queue entry
2977 PartFileBufferedData
*item
= new PartFileBufferedData
;
2978 item
->data
= buffer
;
2979 item
->start
= start
;
2981 item
->block
= block
;
2983 // Add to the queue in the correct position (most likely the end)
2986 std::list
<PartFileBufferedData
*>::iterator it
= m_BufferedData_list
.begin();
2987 for (; it
!= m_BufferedData_list
.end(); ++it
) {
2988 PartFileBufferedData
* queueItem
= *it
;
2990 if (item
->end
<= queueItem
->end
) {
2991 if (it
!= m_BufferedData_list
.begin()) {
2994 m_BufferedData_list
.insert(--it
, item
);
3002 m_BufferedData_list
.push_front(item
);
3005 // Increment buffer size marker
3006 m_nTotalBufferData
+= lenData
;
3008 // Mark this small section of the file as filled
3009 FillGap(item
->start
, item
->end
);
3011 // Update the flushed mark on the requested block
3012 // The loop here is unfortunate but necessary to detect deleted blocks.
3014 std::list
<Requested_Block_Struct
*>::iterator it2
= m_requestedblocks_list
.begin();
3015 for (; it2
!= m_requestedblocks_list
.end(); ++it2
) {
3016 if (*it2
== item
->block
) {
3017 item
->block
->transferred
+= lenData
;
3021 if (m_gaplist
.empty()) {
3025 // Return the length of data written to the buffer
3029 void CPartFile::FlushBuffer(bool /*forcewait*/, bool bForceICH
, bool bNoAICH
)
3031 m_nLastBufferFlushTime
= GetTickCount();
3033 if (m_BufferedData_list
.empty()) {
3038 uint32 partCount
= GetPartCount();
3039 std::vector
<bool> changedPart(partCount
);
3041 // Remember which parts need to be checked at the end of the flush
3042 for ( uint32 i
= 0; i
< partCount
; ++i
) {
3043 changedPart
[ i
] = false;
3047 // Ensure file is big enough to write data to (the last item will be the furthest from the start)
3050 std::list
<PartFileBufferedData
*>::iterator it
= m_BufferedData_list
.begin();
3051 for (; it
!= m_BufferedData_list
.end(); ++it
) {
3052 PartFileBufferedData
* item
= *it
;
3053 wxASSERT((item
->end
- item
->start
) < 0xFFFFFFFF);
3054 newData
+= (uint32
) (item
->end
- item
->start
+ 1);
3057 if ( !CheckFreeDiskSpace( newData
) ) {
3058 // Not enough free space to write the last item, bail
3059 AddLogLineM(true, CFormat( _("WARNING: Not enough free disk-space! Pausing file: %s") ) % GetFileName());
3065 // Loop through queue
3066 while ( !m_BufferedData_list
.empty() ) {
3067 // Get top item and remove it from the queue
3068 PartFileBufferedData
* item
= m_BufferedData_list
.front();
3069 m_BufferedData_list
.pop_front();
3071 // This is needed a few times
3072 wxASSERT((item
->end
- item
->start
) < 0xFFFFFFFF);
3073 uint32 lenData
= (uint32
)(item
->end
- item
->start
+ 1);
3075 // SLUGFILLER: SafeHash - could be more than one part
3076 for (uint32 curpart
= (item
->start
/PARTSIZE
); curpart
<= (item
->end
/PARTSIZE
); ++curpart
) {
3077 wxASSERT(curpart
< partCount
);
3078 changedPart
[curpart
] = true;
3080 // SLUGFILLER: SafeHash
3082 // Go to the correct position in file and write block of data
3084 m_hpartfile
.Seek(item
->start
);
3085 m_hpartfile
.Write(item
->data
, lenData
);
3086 } catch (const CIOFailureException
& e
) {
3087 AddDebugLogLineM(true, logPartFile
, wxT("Error while saving part-file: ") + e
.what());
3088 SetPartFileStatus(PS_ERROR
);
3091 // Decrease buffer size
3092 m_nTotalBufferData
-= lenData
;
3094 // Release memory used by this item
3095 delete [] item
->data
;
3100 // Update last-changed date
3101 m_lastDateChanged
= wxDateTime::GetTimeNow();
3104 // Partfile should never be too large
3105 if (m_hpartfile
.GetLength() > GetFileSize()) {
3106 // it's "last chance" correction. the real bugfix has to be applied 'somewhere' else
3107 m_hpartfile
.SetLength(GetFileSize());
3109 } catch (const CIOFailureException
& e
) {
3110 AddDebugLogLineM(true, logPartFile
,
3111 CFormat(wxT("Error while truncating part-file (%s): %s"))
3112 % m_fullname
.RemoveExt() % e
.what());
3113 SetPartFileStatus(PS_ERROR
);
3118 // Check each part of the file
3119 uint32 partRange
= 0;
3121 uint64 curLength
= m_hpartfile
.GetLength();
3123 partRange
= (uint32
)((curLength
% PARTSIZE
> 0) ? ((curLength
% PARTSIZE
) - 1) : (PARTSIZE
- 1));
3124 } catch (const CIOFailureException
& e
) {
3125 AddDebugLogLineM(true, logPartFile
,
3126 CFormat(wxT("Error while accessing part-file (%s): %s"))
3127 % m_fullname
.RemoveExt() % e
.what());
3128 SetPartFileStatus(PS_ERROR
);
3131 wxASSERT(partRange
);
3132 for (int partNumber
= partCount
-1; partRange
&& partNumber
>= 0; partNumber
--) {
3133 if (changedPart
[partNumber
] == false) {
3134 // Any parts other than last must be full size
3135 partRange
= PARTSIZE
- 1;
3139 // Is this 9MB part complete
3140 if (IsComplete(PARTSIZE
* partNumber
, (PARTSIZE
* (partNumber
+ 1)) - 1)) {
3142 if (!HashSinglePart(partNumber
)) {
3143 AddLogLineM(true, CFormat(
3144 _("Downloaded part %i is corrupt in file: %s") ) % partNumber
% GetFileName() );
3145 AddGap(PARTSIZE
*partNumber
, (PARTSIZE
*partNumber
+ partRange
));
3146 // add part to corrupted list, if not already there
3147 if (!IsCorruptedPart(partNumber
)) {
3148 m_corrupted_list
.push_back(partNumber
);
3150 // request AICH recovery data
3152 RequestAICHRecovery((uint16
)partNumber
);
3154 // Reduce transferred amount by corrupt amount
3155 m_iLostDueToCorruption
+= (partRange
+ 1);
3157 if (!m_hashsetneeded
) {
3158 AddDebugLogLineM(false, logPartFile
, CFormat(
3159 wxT("Finished part %u of '%s'")) % partNumber
% GetFileName());
3162 // if this part was successfully completed (although ICH is active), remove from corrupted list
3163 EraseFirstValue(m_corrupted_list
, partNumber
);
3165 if (status
== PS_EMPTY
) {
3166 if (theApp
->IsRunning()) { // may be called during shutdown!
3167 if (GetHashCount() == GetED2KPartHashCount() && !m_hashsetneeded
) {
3168 // Successfully completed part, make it available for sharing
3169 SetStatus(PS_READY
);
3170 theApp
->sharedfiles
->SafeAddKFile(this);
3175 } else if ( IsCorruptedPart(partNumber
) && (thePrefs::IsICHEnabled() || bForceICH
)) {
3176 // Try to recover with minimal loss
3177 if (HashSinglePart(partNumber
)) {
3178 ++m_iTotalPacketsSavedDueToICH
;
3180 uint64 uMissingInPart
= GetTotalGapSizeInPart(partNumber
);
3181 FillGap(PARTSIZE
*partNumber
,(PARTSIZE
*partNumber
+partRange
));
3182 RemoveBlockFromList(PARTSIZE
*partNumber
,(PARTSIZE
*partNumber
+ partRange
));
3184 // remove from corrupted list
3185 EraseFirstValue(m_corrupted_list
, partNumber
);
3187 AddLogLineM(true, CFormat( _("ICH: Recovered corrupted part %i for %s -> Saved bytes: %s") )
3190 % CastItoXBytes(uMissingInPart
));
3192 if (GetHashCount() == GetED2KPartHashCount() && !m_hashsetneeded
) {
3193 if (status
== PS_EMPTY
) {
3194 // Successfully recovered part, make it available for sharing
3195 SetStatus(PS_READY
);
3196 if (theApp
->IsRunning()) // may be called during shutdown!
3197 theApp
->sharedfiles
->SafeAddKFile(this);
3202 // Any parts other than last must be full size
3203 partRange
= PARTSIZE
- 1;
3209 if (theApp
->IsRunning()) { // may be called during shutdown!
3210 // Is this file finished ?
3211 if (m_gaplist
.empty()) {
3212 CompleteFile(false);
3218 void CPartFile::UpdateFileRatingCommentAvail()
3220 bool prevComment
= m_hasComment
;
3221 int prevRating
= m_iUserRating
;
3223 m_hasComment
= false;
3225 int ratingCount
= 0;
3227 SourceSet::iterator it
= m_SrcList
.begin();
3228 for (; it
!= m_SrcList
.end(); ++it
) {
3229 CUpDownClient
* cur_src
= *it
;
3231 if (!cur_src
->GetFileComment().IsEmpty()) {
3232 m_hasComment
= true;
3235 uint8 rating
= cur_src
->GetFileRating();
3237 wxASSERT(rating
<= 5);
3240 m_iUserRating
+= rating
;
3245 m_iUserRating
/= ratingCount
;
3246 wxASSERT(m_iUserRating
> 0 && m_iUserRating
<= 5);
3249 if ((prevComment
!= m_hasComment
) || (prevRating
!= m_iUserRating
)) {
3250 UpdateDisplayedInfo();
3255 void CPartFile::SetCategory(uint8 cat
)
3257 wxASSERT( cat
< theApp
->glob_prefs
->GetCatCount() );
3263 bool CPartFile::RemoveSource(CUpDownClient
* toremove
, bool updatewindow
, bool bDoStatsUpdate
)
3265 wxASSERT( toremove
);
3267 bool result
= theApp
->downloadqueue
->RemoveSource( toremove
, updatewindow
, bDoStatsUpdate
);
3269 // Check if the client should be deleted, but not if the client is already dying
3270 if ( !toremove
->GetSocket() && !toremove
->HasBeenDeleted() ) {
3271 if ( toremove
->Disconnected(wxT("RemoveSource - purged")) ) {
3272 toremove
->Safe_Delete();
3279 void CPartFile::AddDownloadingSource(CUpDownClient
* client
)
3281 CClientPtrList::iterator it
=
3282 std::find(m_downloadingSourcesList
.begin(), m_downloadingSourcesList
.end(), client
);
3283 if (it
== m_downloadingSourcesList
.end()) {
3284 m_downloadingSourcesList
.push_back(client
);
3289 void CPartFile::RemoveDownloadingSource(CUpDownClient
* client
)
3291 CClientPtrList::iterator it
=
3292 std::find(m_downloadingSourcesList
.begin(), m_downloadingSourcesList
.end(), client
);
3293 if (it
!= m_downloadingSourcesList
.end()) {
3294 m_downloadingSourcesList
.erase(it
);
3299 void CPartFile::SetPartFileStatus(uint8 newstatus
)
3303 if (thePrefs::GetAllcatType()) {
3304 Notify_DownloadCtrlUpdateItem(this);
3307 Notify_DownloadCtrlSort();
3311 uint64
CPartFile::GetNeededSpace()
3314 uint64 length
= m_hpartfile
.GetLength();
3316 if (length
> GetFileSize()) {
3317 return 0; // Shouldn't happen, but just in case
3320 return GetFileSize() - length
;
3321 } catch (const CIOFailureException
& e
) {
3322 AddDebugLogLineM(true, logPartFile
,
3323 CFormat(wxT("Error while retrieving file-length (%s): %s"))
3324 % m_fullname
.RemoveExt() % e
.what());
3325 SetPartFileStatus(PS_ERROR
);
3330 void CPartFile::SetStatus(uint8 in
)
3332 wxASSERT( in
!= PS_PAUSED
&& in
!= PS_INSUFFICIENT
);
3336 if (theApp
->IsRunning()) {
3337 UpdateDisplayedInfo( true );
3339 if ( thePrefs::ShowCatTabInfos() ) {
3340 Notify_ShowUpdateCatTabTitles();
3346 uint64
CPartFile::GetTotalGapSizeInRange(uint64 uRangeStart
, uint64 uRangeEnd
) const
3348 uint64 uTotalGapSize
= 0;
3350 if (uRangeEnd
>= GetFileSize()) {
3351 uRangeEnd
= GetFileSize() - 1;
3354 std::list
<Gap_Struct
*>::const_iterator it
= m_gaplist
.begin();
3355 for (; it
!= m_gaplist
.end(); ++it
) {
3356 const Gap_Struct
* pGap
= *it
;
3358 if (pGap
->start
< uRangeStart
&& pGap
->end
> uRangeEnd
) {
3359 uTotalGapSize
+= uRangeEnd
- uRangeStart
+ 1;
3363 if (pGap
->start
>= uRangeStart
&& pGap
->start
<= uRangeEnd
) {
3364 uint64 uEnd
= (pGap
->end
> uRangeEnd
) ? uRangeEnd
: pGap
->end
;
3365 uTotalGapSize
+= uEnd
- pGap
->start
+ 1;
3366 } else if (pGap
->end
>= uRangeStart
&& pGap
->end
<= uRangeEnd
) {
3367 uTotalGapSize
+= pGap
->end
- uRangeStart
+ 1;
3371 wxASSERT( uTotalGapSize
<= uRangeEnd
- uRangeStart
+ 1 );
3373 return uTotalGapSize
;
3376 uint64
CPartFile::GetTotalGapSizeInPart(uint32 uPart
) const
3378 uint64 uRangeStart
= uPart
* PARTSIZE
;
3379 uint64 uRangeEnd
= uRangeStart
+ PARTSIZE
- 1;
3380 if (uRangeEnd
>= GetFileSize()) {
3381 uRangeEnd
= GetFileSize();
3383 return GetTotalGapSizeInRange(uRangeStart
, uRangeEnd
);
3387 void CPartFile::RequestAICHRecovery(uint16 nPart
)
3390 if ( !m_pAICHHashSet
->HasValidMasterHash() ||
3391 (m_pAICHHashSet
->GetStatus() != AICH_TRUSTED
&& m_pAICHHashSet
->GetStatus() != AICH_VERIFIED
)){
3392 AddDebugLogLineM( false, logAICHRecovery
, wxT("Unable to request AICH Recoverydata because we have no trusted Masterhash") );
3395 if (GetFileSize() <= EMBLOCKSIZE
|| GetFileSize() - PARTSIZE
*nPart
<= EMBLOCKSIZE
)
3397 if (CAICHHashSet::IsClientRequestPending(this, nPart
)){
3398 AddDebugLogLineM( false, logAICHRecovery
, wxT("RequestAICHRecovery: Already a request for this part pending"));
3402 // first check if we have already the recoverydata, no need to rerequest it then
3403 if (m_pAICHHashSet
->IsPartDataAvailable(nPart
*PARTSIZE
)){
3404 AddDebugLogLineM( false, logAICHRecovery
, wxT("Found PartRecoveryData in memory"));
3405 AICHRecoveryDataAvailable(nPart
);
3409 wxASSERT( nPart
< GetPartCount() );
3410 // find some random client which support AICH to ask for the blocks
3411 // first lets see how many we have at all, we prefer high id very much
3412 uint32 cAICHClients
= 0;
3413 uint32 cAICHLowIDClients
= 0;
3414 for ( SourceSet::iterator it
= m_SrcList
.begin(); it
!= m_SrcList
.end(); ++it
) {
3415 CUpDownClient
* pCurClient
= *(it
);
3416 if ( pCurClient
->IsSupportingAICH() &&
3417 pCurClient
->GetReqFileAICHHash() != NULL
&&
3418 !pCurClient
->IsAICHReqPending()
3419 && (*pCurClient
->GetReqFileAICHHash()) == m_pAICHHashSet
->GetMasterHash())
3421 if (pCurClient
->HasLowID()) {
3422 ++cAICHLowIDClients
;
3428 if ((cAICHClients
| cAICHLowIDClients
) == 0){
3429 AddDebugLogLineM( false, logAICHRecovery
, wxT("Unable to request AICH Recoverydata because found no client who supports it and has the same hash as the trusted one"));
3432 uint32 nSeclectedClient
;
3433 if (cAICHClients
> 0) {
3434 nSeclectedClient
= (rand() % cAICHClients
) + 1;
3436 nSeclectedClient
= (rand() % cAICHLowIDClients
) + 1;
3438 CUpDownClient
* pClient
= NULL
;
3439 for ( SourceSet::iterator it
= m_SrcList
.begin(); it
!= m_SrcList
.end(); ++it
) {
3440 CUpDownClient
* pCurClient
= *(it
);
3441 if (pCurClient
->IsSupportingAICH() && pCurClient
->GetReqFileAICHHash() != NULL
&& !pCurClient
->IsAICHReqPending()
3442 && (*pCurClient
->GetReqFileAICHHash()) == m_pAICHHashSet
->GetMasterHash())
3444 if (cAICHClients
> 0){
3445 if (!pCurClient
->HasLowID())
3449 wxASSERT( pCurClient
->HasLowID());
3452 if (nSeclectedClient
== 0){
3453 pClient
= pCurClient
;
3458 if (pClient
== NULL
){
3463 AddDebugLogLineM( false, logAICHRecovery
, CFormat( wxT("Requesting AICH Hash (%s) form client %s") ) % ( cAICHClients
? wxT("HighId") : wxT("LowID") ) % pClient
->GetClientFullInfo() );
3464 pClient
->SendAICHRequest(this, nPart
);
3469 void CPartFile::AICHRecoveryDataAvailable(uint16 nPart
)
3471 if (GetPartCount() < nPart
){
3476 FlushBuffer(true, true, true);
3478 uint64 length
= PARTSIZE
;
3481 if ((unsigned)(PARTSIZE
* (nPart
+ 1)) > m_hpartfile
.GetLength()){
3482 length
= (m_hpartfile
.GetLength() - (PARTSIZE
* nPart
));
3483 wxASSERT( length
<= PARTSIZE
);
3485 } catch (const CIOFailureException
& e
) {
3486 AddDebugLogLineM(true, logPartFile
,
3487 CFormat(wxT("Error while retrieving file-length (%s): %s"))
3488 % m_fullname
.RemoveExt() % e
.what());
3489 SetPartFileStatus(PS_ERROR
);
3493 // if the part was already ok, it would now be complete
3494 if (IsComplete(nPart
*PARTSIZE
, ((nPart
*PARTSIZE
)+length
)-1)){
3495 AddDebugLogLineM( false, logAICHRecovery
,
3496 wxString::Format( wxT("Processing AICH Recovery data: The part (%u) is already complete, canceling"), nPart
) );
3502 CAICHHashTree
* pVerifiedHash
= m_pAICHHashSet
->m_pHashTree
.FindHash(nPart
*PARTSIZE
, length
);
3503 if (pVerifiedHash
== NULL
|| !pVerifiedHash
->GetHashValid()){
3504 AddDebugLogLineM( true, logAICHRecovery
, wxT("Processing AICH Recovery data: Unable to get verified hash from hashset (should never happen)") );
3508 CAICHHashTree
htOurHash(pVerifiedHash
->GetNDataSize(), pVerifiedHash
->GetIsLeftBranch(), pVerifiedHash
->GetNBaseSize());
3510 m_hpartfile
.Seek(PARTSIZE
* nPart
,wxFromStart
);
3511 CreateHashFromFile(&m_hpartfile
,length
, NULL
, &htOurHash
);
3512 } catch (const CIOFailureException
& e
) {
3513 AddDebugLogLineM(true, logAICHRecovery
,
3514 CFormat(wxT("IO failure while hashing part-file '%s': %s"))
3515 % m_hpartfile
.GetFilePath() % e
.what());
3516 SetPartFileStatus(PS_ERROR
);
3520 if (!htOurHash
.GetHashValid()){
3521 AddDebugLogLineM( false, logAICHRecovery
, wxT("Processing AICH Recovery data: Failed to retrieve AICH Hashset of corrupt part") );
3526 // now compare the hash we just did, to the verified hash and readd all blocks which are ok
3527 uint32 nRecovered
= 0;
3528 for (uint32 pos
= 0; pos
< length
; pos
+= EMBLOCKSIZE
){
3529 const uint32 nBlockSize
= min
<uint32
>(EMBLOCKSIZE
, length
- pos
);
3530 CAICHHashTree
* pVerifiedBlock
= pVerifiedHash
->FindHash(pos
, nBlockSize
);
3531 CAICHHashTree
* pOurBlock
= htOurHash
.FindHash(pos
, nBlockSize
);
3532 if ( pVerifiedBlock
== NULL
|| pOurBlock
== NULL
|| !pVerifiedBlock
->GetHashValid() || !pOurBlock
->GetHashValid()){
3536 if (pOurBlock
->GetHash() == pVerifiedBlock
->GetHash()){
3537 FillGap(PARTSIZE
*nPart
+pos
, PARTSIZE
*nPart
+ pos
+ (nBlockSize
-1));
3538 RemoveBlockFromList(PARTSIZE
*nPart
, PARTSIZE
*nPart
+ (nBlockSize
-1));
3539 nRecovered
+= nBlockSize
;
3543 // ok now some sanity checks
3544 if (IsComplete(nPart
*PARTSIZE
, ((nPart
*PARTSIZE
)+length
)-1)){
3545 // this is a bad, but it could probably happen under some rare circumstances
3546 // make sure that MD4 agrres to this fact too
3547 if (!HashSinglePart(nPart
)){
3548 AddDebugLogLineM( false, logAICHRecovery
,
3549 wxString::Format(wxT("Processing AICH Recovery data: The part (%u) got completed while recovering - but MD4 says it corrupt! Setting hashset to error state, deleting part"), nPart
));
3550 // now we are fu... unhappy
3551 m_pAICHHashSet
->SetStatus(AICH_ERROR
);
3552 AddGap(PARTSIZE
*nPart
, ((nPart
*PARTSIZE
)+length
)-1);
3557 AddDebugLogLineM( false, logAICHRecovery
, wxString::Format(
3558 wxT("Processing AICH Recovery data: The part (%u) got completed while recovering and MD4 agrees"), nPart
) );
3559 // alrighty not so bad
3560 EraseFirstValue(m_corrupted_list
, nPart
);
3561 if (status
== PS_EMPTY
&& theApp
->IsRunning()){
3562 if (GetHashCount() == GetED2KPartHashCount() && !m_hashsetneeded
){
3563 // Successfully recovered part, make it available for sharing
3564 SetStatus(PS_READY
);
3565 theApp
->sharedfiles
->SafeAddKFile(this);
3569 if (theApp
->IsRunning()){
3570 // Is this file finished?
3571 if (m_gaplist
.empty()) {
3572 CompleteFile(false);
3576 } // end sanity check
3580 // make sure the user appreciates our great recovering work :P
3581 AddDebugLogLineM( true, logAICHRecovery
, CFormat(
3582 wxT("AICH successfully recovered %s of %s from part %u for %s") )
3583 % CastItoXBytes(nRecovered
)
3584 % CastItoXBytes(length
)
3590 void CPartFile::ClientStateChanged( int oldState
, int newState
)
3592 if ( oldState
== newState
)
3595 // If the state is -1, then it's an entirely new item
3596 if ( oldState
!= -1 ) {
3597 // Was the old state a valid state?
3598 if ( oldState
== DS_ONQUEUE
|| oldState
== DS_DOWNLOADING
) {
3601 if ( oldState
== DS_CONNECTED
/* || oldState == DS_REMOTEQUEUEFULL */ ) {
3605 m_notCurrentSources
--;
3609 // If the state is -1, then the source is being removed
3610 if ( newState
!= -1 ) {
3611 // Was the old state a valid state?
3612 if ( newState
== DS_ONQUEUE
|| newState
== DS_DOWNLOADING
) {
3615 if ( newState
== DS_CONNECTED
/* || newState == DS_REMOTEQUEUEFULL */ ) {
3619 ++m_notCurrentSources
;
3625 bool CPartFile::AddSource( CUpDownClient
* client
)
3627 if (m_SrcList
.insert( client
).second
) {
3628 theStats::AddFoundSource();
3629 theStats::AddSourceOrigin(client
->GetSourceFrom());
3637 bool CPartFile::DelSource( CUpDownClient
* client
)
3639 if (m_SrcList
.erase( client
)) {
3640 theStats::RemoveSourceOrigin(client
->GetSourceFrom());
3641 theStats::RemoveFoundSource();
3649 void CPartFile::UpdatePartsFrequency( CUpDownClient
* client
, bool increment
)
3651 const BitVector
& freq
= client
->GetPartStatus();
3653 if ( m_SrcpartFrequency
.size() != GetPartCount() ) {
3654 m_SrcpartFrequency
.clear();
3655 m_SrcpartFrequency
.insert(m_SrcpartFrequency
.begin(), GetPartCount(), 0);
3662 unsigned int size
= freq
.size();
3663 if ( size
!= m_SrcpartFrequency
.size() ) {
3668 for ( unsigned int i
= 0; i
< size
; i
++ ) {
3670 m_SrcpartFrequency
[i
]++;
3674 for ( unsigned int i
= 0; i
< size
; i
++ ) {
3676 m_SrcpartFrequency
[i
]--;
3682 const FileRatingList
&CPartFile::GetRatingAndComments()
3684 m_FileRatingList
.clear();
3685 // This can be pre-processed, but is it worth the CPU?
3686 CPartFile::SourceSet::iterator it
= m_SrcList
.begin();
3687 for ( ; it
!= m_SrcList
.end(); ++it
) {
3688 CUpDownClient
*cur_src
= *it
;
3689 if (cur_src
->GetFileComment().Length()>0 || cur_src
->GetFileRating()>0) {
3690 // AddDebugLogLineM(false, logPartFile, wxString(wxT("found a comment for ")) << GetFileName());
3691 m_FileRatingList
.push_back(SFileRating(*cur_src
));
3695 return m_FileRatingList
;
3700 CPartFile::CPartFile(CEC_PartFile_Tag
*tag
)
3704 SetFileName(CPath(tag
->FileName()));
3705 m_abyFileHash
= tag
->ID();
3706 SetFileSize(tag
->SizeFull());
3707 m_partmetfilename
= CPath(tag
->PartMetName());
3708 transferred
= tag
->SizeXfer();
3709 percentcompleted
= (100.0*completedsize
) / GetFileSize();
3710 completedsize
= tag
->SizeDone();
3712 m_category
= tag
->FileCat();
3714 m_iPartCount
= ((uint64
)GetFileSize() + (PARTSIZE
- 1)) / PARTSIZE
;
3715 m_SrcpartFrequency
.insert(m_SrcpartFrequency
.end(), m_iPartCount
, 0);
3716 m_iDownPriority
= tag
->Prio();
3717 if ( m_iDownPriority
>= 10 ) {
3718 m_iDownPriority
-= 10;
3719 m_bAutoDownPriority
= true;
3721 m_bAutoDownPriority
= false;
3727 m_a4af_source_count
= 0;
3731 * Remote gui specific code
3733 CPartFile::~CPartFile()
3737 const FileRatingList
&CPartFile::GetRatingAndComments()
3739 return m_FileRatingList
;
3741 #endif // !CLIENT_GUI
3744 void CPartFile::UpdateDisplayedInfo(bool force
)
3746 uint32 curTick
= ::GetTickCount();
3747 m_CommentUpdated
= true;
3749 // Wait 1.5s between each redraw
3750 if(force
|| curTick
-m_lastRefreshedDLDisplay
> MINWAIT_BEFORE_DLDISPLAY_WINDOWUPDATE
) {
3751 Notify_DownloadCtrlUpdateItem(this);
3752 m_lastRefreshedDLDisplay
= curTick
;
3758 void CPartFile::Init()
3760 m_showSources
= false;
3761 m_lastsearchtime
= 0;
3762 lastpurgetime
= ::GetTickCount();
3765 m_insufficient
= false;
3770 m_iLastPausePurge
= time(NULL
);
3772 if(thePrefs::GetNewAutoDown()) {
3773 m_iDownPriority
= PR_HIGH
;
3774 m_bAutoDownPriority
= true;
3776 m_iDownPriority
= PR_NORMAL
;
3777 m_bAutoDownPriority
= false;
3780 memset(m_anStates
,0,sizeof(m_anStates
));
3782 transferingsrc
= 0; // new
3786 m_CommentUpdated
= false;
3787 m_hashsetneeded
= true;
3789 percentcompleted
= 0;
3791 m_bPreviewing
= false;
3792 lastseencomplete
= 0;
3793 m_availablePartsCount
=0;
3794 m_ClientSrcAnswered
= 0;
3795 m_LastNoNeededCheck
= 0;
3797 m_nTotalBufferData
= 0;
3798 m_nLastBufferFlushTime
= 0;
3799 m_bPercentUpdated
= false;
3800 m_bRecoveringArchive
= false;
3801 m_iGainDueToCompression
= 0;
3802 m_iLostDueToCorruption
= 0;
3803 m_iTotalPacketsSavedDueToICH
= 0;
3805 m_lastRefreshedDLDisplay
= 0;
3806 m_nDlActiveTime
= 0;
3808 m_is_A4AF_auto
= false;
3809 m_localSrcReqQueued
= false;
3810 m_nCompleteSourcesTime
= time(NULL
);
3811 m_nCompleteSourcesCount
= 0;
3812 m_nCompleteSourcesCountLo
= 0;
3813 m_nCompleteSourcesCountHi
= 0;
3816 m_notCurrentSources
= 0;
3819 m_LastSearchTimeKad
= 0;
3820 m_TotalSearchesKad
= 0;
3824 wxString
CPartFile::getPartfileStatus() const
3829 if ((status
== PS_HASHING
) || (status
== PS_WAITINGFORHASH
)) {
3830 mybuffer
=_("Hashing");
3832 switch (GetStatus()) {
3834 mybuffer
=_("Completing");
3837 mybuffer
=_("Complete");
3840 mybuffer
=_("Paused");
3843 mybuffer
=_("Erroneous");
3845 case PS_INSUFFICIENT
:
3846 mybuffer
= _("Insufficient Diskspace");
3849 if (GetTransferingSrcCount()>0) {
3850 mybuffer
=_("Downloading");
3852 mybuffer
=_("Waiting");
3856 if (m_stopped
&& (GetStatus()!=PS_COMPLETE
)) {
3857 mybuffer
=_("Stopped");
3864 int CPartFile::getPartfileStatusRang() const
3868 if (GetTransferingSrcCount()==0) tempstatus
=1;
3869 switch (GetStatus()) {
3871 case PS_WAITINGFORHASH
:
3891 wxString
CPartFile::GetFeedback()
3894 = CFormat(wxT("Feedback from: %s (%s)\n")) % thePrefs::GetUserNick() % GetFullMuleVersion()
3895 + CFormat(wxT("File name: %s\n")) % GetFileName()
3896 + CFormat(wxT("File size: %s\n")) % CastItoXBytes(GetFileSize());
3898 if (GetStatus() == PS_COMPLETE
) {
3899 retval
+= wxT("Downloaded: Complete\n");
3901 retval
+= CFormat(wxT("Downloaded: %s (%.2f%%)\n")) % CastItoXBytes(GetCompletedSize()) % GetPercentCompleted()
3902 + CFormat(wxT("Transferred: %s (%s)\n")) % CastItoXBytes(statistic
.GetTransferred()) % CastItoXBytes(statistic
.GetAllTimeTransferred())
3903 + CFormat(wxT("Requested: %u (%u)\n")) % statistic
.GetRequests() % statistic
.GetAllTimeRequests()
3904 + CFormat(wxT("Accepted: %d (%d)\n")) % statistic
.GetAccepts() % statistic
.GetAllTimeAccepts()
3905 + CFormat(wxT("Sources: %u\n")) % GetSourceCount();
3908 return retval
+ wxString::Format(wxT("Complete Sources: %u\n"), m_nCompleteSourcesCount
);
3912 sint32
CPartFile::getTimeRemaining() const
3914 if (GetKBpsDown() < 0.001)
3917 return((GetFileSize()-GetCompletedSize()) / ((int)(GetKBpsDown()*1024.0)));
3920 bool CPartFile::PreviewAvailable()
3922 FileType type
= GetFiletype(GetFileName());
3924 return (((type
== ftVideo
) || (type
== ftAudio
)) && IsComplete(0, 256*1024));
3927 bool CPartFile::CheckShowItemInGivenCat(int inCategory
)
3929 // easy normal cases
3931 bool IsNotFiltered
= true;
3933 IsInCat
= ((inCategory
==0) || (inCategory
>0 && inCategory
==GetCategory()));
3935 switch (thePrefs::GetAllcatType()) {
3937 IsNotFiltered
= GetCategory() == 0 || inCategory
> 0;
3940 IsNotFiltered
= IsPartFile();
3943 IsNotFiltered
= !IsPartFile();
3947 (GetStatus() == PS_READY
|| GetStatus() == PS_EMPTY
) &&
3948 GetTransferingSrcCount() == 0;
3952 (GetStatus() == PS_READY
|| GetStatus()==PS_EMPTY
) &&
3953 GetTransferingSrcCount() > 0;
3956 IsNotFiltered
= GetStatus() == PS_ERROR
;
3959 IsNotFiltered
= GetStatus() == PS_PAUSED
&& !IsStopped();
3962 IsNotFiltered
= IsStopped();
3965 IsNotFiltered
= GetFiletype(GetFileName()) == ftVideo
;
3968 IsNotFiltered
= GetFiletype(GetFileName()) == ftAudio
;
3971 IsNotFiltered
= GetFiletype(GetFileName()) == ftArchive
;
3974 IsNotFiltered
= GetFiletype(GetFileName()) == ftCDImage
;
3977 IsNotFiltered
= GetFiletype(GetFileName()) == ftPicture
;
3980 IsNotFiltered
= GetFiletype(GetFileName()) == ftText
;
3983 IsNotFiltered
= !IsStopped() && GetStatus() != PS_PAUSED
;
3987 return IsNotFiltered
&& IsInCat
;
3990 bool CPartFile::IsComplete(uint64 start
, uint64 end
)
3992 if (end
>= GetFileSize()) {
3993 end
= GetFileSize()-1;
3996 std::list
<Gap_Struct
*>::iterator it
= m_gaplist
.begin();
3997 for (; it
!= m_gaplist
.end(); ++it
) {
3998 Gap_Struct
* cur_gap
= *it
;
3999 if ((cur_gap
->start
>= start
&& cur_gap
->end
<= end
)||(cur_gap
->start
>= start
4000 && cur_gap
->start
<= end
)||(cur_gap
->end
<= end
&& cur_gap
->end
>= start
)
4001 ||(start
>= cur_gap
->start
&& end
<= cur_gap
->end
)) {
4009 void CPartFile::SetActive(bool bActive
)
4011 time_t tNow
= time(NULL
);
4013 if (theApp
->IsConnected()) {
4014 if (m_tActivated
== 0) {
4015 m_tActivated
= tNow
;
4019 if (m_tActivated
!= 0) {
4020 m_nDlActiveTime
+= tNow
- m_tActivated
;
4027 uint32
CPartFile::GetDlActiveTime() const
4029 uint32 nDlActiveTime
= m_nDlActiveTime
;
4030 if (m_tActivated
!= 0) {
4031 nDlActiveTime
+= time(NULL
) - m_tActivated
;
4033 return nDlActiveTime
;
4039 uint8
CPartFile::GetStatus(bool ignorepause
) const
4041 if ( (!m_paused
&& !m_insufficient
) ||
4042 status
== PS_ERROR
||
4043 status
== PS_COMPLETING
||
4044 status
== PS_COMPLETE
||
4047 } else if ( m_insufficient
) {
4048 return PS_INSUFFICIENT
;
4054 void CPartFile::AddDeadSource(const CUpDownClient
* client
)
4056 m_deadSources
.AddDeadSource( client
);
4060 bool CPartFile::IsDeadSource(const CUpDownClient
* client
)
4062 return m_deadSources
.IsDeadSource( client
);
4065 void CPartFile::SetFileName(const CPath
& fileName
)
4067 CKnownFile
* pFile
= theApp
->sharedfiles
->GetFileByID(GetFileHash());
4069 bool is_shared
= (pFile
&& pFile
== this);
4072 // The file is shared, we must clear the search keywords so we don't
4073 // publish the old name anymore.
4074 theApp
->sharedfiles
->RemoveKeywords(this);
4077 CKnownFile::SetFileName(fileName
);
4080 // And of course, we must advertise the new name if the file is shared.
4081 theApp
->sharedfiles
->AddKeywords(this);
4084 UpdateDisplayedInfo(true);
4088 uint16
CPartFile::GetMaxSources() const
4090 // This is just like this, while we don't import the private max sources per file
4091 return thePrefs::GetMaxSourcePerFile();
4095 uint16
CPartFile::GetMaxSourcePerFileSoft() const
4097 unsigned int temp
= ((unsigned int)GetMaxSources() * 9L) / 10;
4098 if (temp
> MAX_SOURCES_FILE_SOFT
) {
4099 return MAX_SOURCES_FILE_SOFT
;
4104 uint16
CPartFile::GetMaxSourcePerFileUDP() const
4106 unsigned int temp
= ((unsigned int)GetMaxSources() * 3L) / 4;
4107 if (temp
> MAX_SOURCES_FILE_UDP
) {
4108 return MAX_SOURCES_FILE_UDP
;
4113 #define DROP_FACTOR 2
4115 CUpDownClient
* CPartFile::GetSlowerDownloadingClient(uint32 speed
, CUpDownClient
* caller
) {
4116 // printf("Start slower source calculation\n");
4117 for( SourceSet::iterator it
= m_SrcList
.begin(); it
!= m_SrcList
.end(); ) {
4118 CUpDownClient
* cur_src
= *it
++;
4119 if ((cur_src
->GetDownloadState() == DS_DOWNLOADING
) && (cur_src
!= caller
)) {
4120 uint32 factored_bytes_per_second
= static_cast<uint32
>(
4121 (cur_src
->GetKBpsDown() * 1024) * DROP_FACTOR
);
4122 if ( factored_bytes_per_second
< speed
) {
4123 // printf("Selecting source %p to drop: %d < %d\n", cur_src, factored_bytes_per_second, speed);
4124 // printf("End slower source calculation\n");
4127 // printf("Not selecting source %p to drop: %d > %d\n", cur_src, factored_bytes_per_second, speed);
4131 // printf("End slower source calculation\n");
4136 // File_checked_for_headers