2 // This file is part of the aMule Project.
4 // Copyright (c) 2003-2008 aMule Team ( admin@amule.org / http://www.amule.org )
5 // Copyright (c) 2002-2008 Merkur ( devs@emule-project.net / http://www.emule-project.net )
7 // Any parts of this program derived from the xMule, lMule or eMule project,
8 // or contributed by third-party developers are copyrighted by their
11 // This program is free software; you can redistribute it and/or modify
12 // it under the terms of the GNU General Public License as published by
13 // the Free Software Foundation; either version 2 of the License, or
14 // (at your option) any later version.
16 // This program is distributed in the hope that it will be useful,
17 // but WITHOUT ANY WARRANTY; without even the implied warranty of
18 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 // GNU General Public License for more details.
21 // You should have received a copy of the GNU General Public License
22 // along with this program; if not, write to the Free Software
23 // Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA
28 #include "PartFile.h" // Interface declarations.
31 #include "config.h" // Needed for VERSION
34 #include <protocol/kad/Constants.h>
35 #include <protocol/ed2k/Client2Client/TCP.h>
36 #include <protocol/Protocols.h>
37 #include <common/DataFileVersion.h>
38 #include <common/Constants.h>
39 #include <tags/FileTags.h>
42 #include <wx/tokenzr.h> // Needed for wxStringTokenizer
44 #include "KnownFileList.h" // Needed for CKnownFileList
45 #include "UploadQueue.h" // Needed for CFileHash
46 #include "IPFilter.h" // Needed for CIPFilter
47 #include "Server.h" // Needed for CServer
48 #include "ServerConnect.h" // Needed for CServerConnect
49 #include "updownclient.h" // Needed for CUpDownClient
50 #include "MemFile.h" // Needed for CMemFile
51 #include "Preferences.h" // Needed for CPreferences
52 #include "DownloadQueue.h" // Needed for CDownloadQueue
53 #include "amule.h" // Needed for theApp
54 #include "ED2KLink.h" // Needed for CED2KLink
55 #include "Packet.h" // Needed for CTag
56 #include "SearchList.h" // Needed for CSearchFile
57 #include "ClientList.h" // Needed for clientlist
58 #include "Statistics.h" // Needed for theStats
60 #include <common/Format.h> // Needed for CFormat
61 #include <common/FileFunctions.h> // Needed for GetLastModificationTime
62 #include "ThreadTasks.h" // Needed for CHashingTask/CCompletionTask/CAllocateFileTask
63 #include "GuiEvents.h" // Needed for Notify_*
64 #include "DataToText.h" // Needed for OriginToText()
65 #include "PlatformSpecific.h" // Needed for CreateSparseFile()
66 #include "FileArea.h" // Needed for CFileArea
67 #include "ScopedPtr.h" // Needed for CScopedArray
68 #include "CorruptionBlackBox.h"
70 #include "kademlia/kademlia/Kademlia.h"
71 #include "kademlia/kademlia/Search.h"
74 SFileRating::SFileRating(const wxString
&u
, const wxString
&f
, sint16 r
, const wxString
&c
)
84 SFileRating::SFileRating(const SFileRating
&fr
)
86 UserName(fr
.UserName
),
87 FileName(fr
.FileName
),
94 SFileRating::SFileRating(const CUpDownClient
&client
)
96 UserName(client
.GetUserName()),
97 FileName(client
.GetClientFilename()),
98 Rating(client
.GetFileRating()),
99 Comment(client
.GetFileComment())
104 SFileRating::~SFileRating()
109 class PartFileBufferedData
112 CScopedArray
<byte
> data
; // This is the data to be written
113 uint64 start
; // This is the start offset of the data
114 uint64 end
; // This is the end offset of the data
115 Requested_Block_Struct
*block
; // This is the requested block that this data relates to
117 PartFileBufferedData(byte
* _data
, uint64 _start
, uint64 _end
, Requested_Block_Struct
*_block
)
118 : data(_data
), start(_start
), end(_end
), block(_block
)
123 typedef std::list
<Chunk
> ChunkList
;
128 CPartFile::CPartFile()
133 CPartFile::CPartFile(CSearchFile
* searchresult
)
137 m_abyFileHash
= searchresult
->GetFileHash();
138 SetFileName(searchresult
->GetFileName());
139 SetFileSize(searchresult
->GetFileSize());
141 for (unsigned int i
= 0; i
< searchresult
->m_taglist
.size(); ++i
){
142 const CTag
& pTag
= searchresult
->m_taglist
[i
];
144 bool bTagAdded
= false;
145 if (pTag
.GetNameID() == 0 && !pTag
.GetName().IsEmpty() && (pTag
.IsStr() || pTag
.IsInt())) {
146 static const struct {
151 { wxT(FT_ED2K_MEDIA_ARTIST
), 2 },
152 { wxT(FT_ED2K_MEDIA_ALBUM
), 2 },
153 { wxT(FT_ED2K_MEDIA_TITLE
), 2 },
154 { wxT(FT_ED2K_MEDIA_LENGTH
), 2 },
155 { wxT(FT_ED2K_MEDIA_BITRATE
), 3 },
156 { wxT(FT_ED2K_MEDIA_CODEC
), 2 }
159 for (unsigned int t
= 0; t
< itemsof(_aMetaTags
); ++t
) {
160 if ( pTag
.GetType() == _aMetaTags
[t
].nType
&&
161 (pTag
.GetName() == _aMetaTags
[t
].pszName
)) {
162 // skip string tags with empty string values
163 if (pTag
.IsStr() && pTag
.GetStr().IsEmpty()) {
167 // skip "length" tags with "0: 0" values
168 if (pTag
.GetName() == wxT(FT_ED2K_MEDIA_LENGTH
)) {
169 if (pTag
.GetStr().IsSameAs(wxT("0: 0")) ||
170 pTag
.GetStr().IsSameAs(wxT("0:0"))) {
175 // skip "bitrate" tags with '0' values
176 if ((pTag
.GetName() == wxT(FT_ED2K_MEDIA_BITRATE
)) && !pTag
.GetInt()) {
180 AddDebugLogLineM( false, logPartFile
,
181 wxT("CPartFile::CPartFile(CSearchFile*): added tag ") +
182 pTag
.GetFullInfo() );
183 m_taglist
.push_back(pTag
);
188 } else if (pTag
.GetNameID() != 0 && pTag
.GetName().IsEmpty() && (pTag
.IsStr() || pTag
.IsInt())) {
189 static const struct {
197 for (unsigned int t
= 0; t
< itemsof(_aMetaTags
); ++t
) {
198 if (pTag
.GetType() == _aMetaTags
[t
].nType
&& pTag
.GetNameID() == _aMetaTags
[t
].nID
) {
199 // skip string tags with empty string values
200 if (pTag
.IsStr() && pTag
.GetStr().IsEmpty()) {
204 AddDebugLogLineM( false, logPartFile
,
205 wxT("CPartFile::CPartFile(CSearchFile*): added tag ") +
206 pTag
.GetFullInfo() );
207 m_taglist
.push_back(pTag
);
215 AddDebugLogLineM( false, logPartFile
,
216 wxT("CPartFile::CPartFile(CSearchFile*): ignored tag ") +
217 pTag
.GetFullInfo() );
225 CPartFile::CPartFile(const CED2KFileLink
* fileLink
)
229 SetFileName(CPath(fileLink
->GetName()));
230 SetFileSize(fileLink
->GetSize());
231 m_abyFileHash
= fileLink
->GetHashKey();
235 if (fileLink
->m_hashset
) {
236 if (!LoadHashsetFromFile(fileLink
->m_hashset
, true)) {
237 AddDebugLogLineM(true, logPartFile
, wxT("eD2K link contained invalid hashset: ") + fileLink
->GetLink());
243 CPartFile::~CPartFile()
245 // if it's not opened, it was completed or deleted
246 if (m_hpartfile
.IsOpened()) {
249 // Update met file (with current directory entry)
253 DeleteContents(m_BufferedData_list
);
254 delete m_CorruptionBlackBox
;
256 wxASSERT(m_SrcList
.empty());
257 wxASSERT(m_A4AFsrclist
.empty());
260 void CPartFile::CreatePartFile()
262 // use lowest free partfilenumber for free file (InterCeptor)
266 m_partmetfilename
= CPath(wxString::Format(wxT("%03i.part.met"), i
));
267 m_fullname
= thePrefs::GetTempDir().JoinPaths(m_partmetfilename
);
268 } while (m_fullname
.FileExists());
270 wxString strPartName
= m_partmetfilename
.RemoveExt().GetRaw();
271 m_taglist
.push_back(CTagString(FT_PARTFILENAME
, strPartName
));
273 m_gaplist
.Init(GetFileSize(), true); // Init empty
275 m_PartPath
= m_fullname
.RemoveExt();
277 if (thePrefs::GetAllocFullFile()) {
278 fileCreated
= m_hpartfile
.Create(m_PartPath
, true);
281 fileCreated
= PlatformSpecific::CreateSparseFile(m_PartPath
, GetFileSize());
284 AddLogLineM(false,_("ERROR: Failed to create partfile)"));
285 SetPartFileStatus(PS_ERROR
);
288 SetFilePath(thePrefs::GetTempDir());
290 if (thePrefs::GetAllocFullFile()) {
291 SetPartFileStatus(PS_ALLOCATING
);
292 CThreadScheduler::AddTask(new CAllocateFileTask(this, thePrefs::AddNewFilesPaused()));
294 AllocationFinished();
297 m_hashsetneeded
= (GetED2KPartHashCount() > 0);
300 SetActive(theApp
->IsConnected());
304 uint8
CPartFile::LoadPartFile(const CPath
& in_directory
, const CPath
& filename
, bool from_backup
, bool getsizeonly
)
306 bool isnewstyle
= false;
307 uint8 version
,partmettype
=PMT_UNKNOWN
;
309 std::map
<uint16
, Gap_Struct
*> gap_map
; // Slugfiller
312 m_partmetfilename
= filename
;
313 m_filePath
= in_directory
;
314 m_fullname
= m_filePath
.JoinPaths(m_partmetfilename
);
315 m_PartPath
= m_fullname
.RemoveExt();
317 // readfile data form part.met file
318 CPath curMetFilename
= m_fullname
;
320 curMetFilename
= curMetFilename
.AppendExt(PARTMET_BAK_EXT
);
321 AddLogLineM(false, CFormat( _("Trying to load backup of met-file from %s") )
326 CFile
metFile(curMetFilename
, CFile::read
);
327 if (!metFile
.IsOpened()) {
328 AddLogLineM(false, CFormat( _("ERROR: Failed to open part.met file: %s ==> %s") )
333 } else if (metFile
.GetLength() == 0) {
334 AddLogLineM(false, CFormat( _("ERROR: part.met file is 0 size: %s ==> %s") )
341 version
= metFile
.ReadUInt8();
342 if (version
!= PARTFILE_VERSION
&& version
!= PARTFILE_SPLITTEDVERSION
&& version
!= PARTFILE_VERSION_LARGEFILE
){
344 //if (version == 83) return ImportShareazaTempFile(...)
345 AddLogLineM(false, CFormat( _("ERROR: Invalid part.met file version: %s ==> %s") )
351 isnewstyle
= (version
== PARTFILE_SPLITTEDVERSION
);
352 partmettype
= isnewstyle
? PMT_SPLITTED
: PMT_DEFAULTOLD
;
354 if (version
== PARTFILE_VERSION
) {// Do we still need this check ?
355 uint8 test
[4]; // It will fail for certain files.
356 metFile
.Seek(24, wxFromStart
);
357 metFile
.Read(test
,4);
359 metFile
.Seek(1, wxFromStart
);
360 if (test
[0]==0 && test
[1]==0 && test
[2]==2 && test
[3]==1) {
361 isnewstyle
=true; // edonkeys so called "old part style"
362 partmettype
=PMT_NEWOLD
;
367 uint32 temp
= metFile
.ReadUInt32();
369 if (temp
==0) { // 0.48 partmets - different again
370 LoadHashsetFromFile(&metFile
, false);
372 metFile
.Seek(2, wxFromStart
);
373 LoadDateFromFile(&metFile
);
374 m_abyFileHash
= metFile
.ReadHash();
378 LoadDateFromFile(&metFile
);
379 LoadHashsetFromFile(&metFile
, false);
382 uint32 tagcount
= metFile
.ReadUInt32();
384 for (uint32 j
= 0; j
< tagcount
; ++j
) {
385 CTag
newtag(metFile
,true);
388 (newtag
.GetNameID() == FT_FILESIZE
||
389 newtag
.GetNameID() == FT_FILENAME
))) {
390 switch(newtag
.GetNameID()) {
392 if (!GetFileName().IsOk()) {
393 // If it's not empty, we already loaded the unicoded one
394 SetFileName(CPath(newtag
.GetStr()));
398 case FT_LASTSEENCOMPLETE
: {
399 lastseencomplete
= newtag
.GetInt();
403 SetFileSize(newtag
.GetInt());
406 case FT_TRANSFERRED
: {
407 transferred
= newtag
.GetInt();
411 //#warning needs setfiletype string
412 //SetFileType(newtag.GetStr());
416 m_category
= newtag
.GetInt();
417 if (m_category
> theApp
->glob_prefs
->GetCatCount() - 1 ) {
422 case FT_OLDDLPRIORITY
:
423 case FT_DLPRIORITY
: {
425 m_iDownPriority
= newtag
.GetInt();
426 if( m_iDownPriority
== PR_AUTO
){
427 m_iDownPriority
= PR_HIGH
;
428 SetAutoDownPriority(true);
431 if ( m_iDownPriority
!= PR_LOW
&&
432 m_iDownPriority
!= PR_NORMAL
&&
433 m_iDownPriority
!= PR_HIGH
)
434 m_iDownPriority
= PR_NORMAL
;
435 SetAutoDownPriority(false);
441 m_paused
= (newtag
.GetInt() == 1);
442 m_stopped
= m_paused
;
445 case FT_OLDULPRIORITY
:
446 case FT_ULPRIORITY
: {
448 SetUpPriority(newtag
.GetInt(), false);
449 if( GetUpPriority() == PR_AUTO
){
450 SetUpPriority(PR_HIGH
, false);
451 SetAutoUpPriority(true);
453 SetAutoUpPriority(false);
458 case FT_KADLASTPUBLISHSRC
:{
459 SetLastPublishTimeKadSrc(newtag
.GetInt(), 0);
460 if(GetLastPublishTimeKadSrc() > (uint32
)time(NULL
)+KADEMLIAREPUBLISHTIMES
) {
461 //There may be a posibility of an older client that saved a random number here.. This will check for that..
462 SetLastPublishTimeKadSrc(0,0);
466 case FT_KADLASTPUBLISHNOTES
:{
467 SetLastPublishTimeKadNotes(newtag
.GetInt());
470 // old tags: as long as they are not needed, take the chance to purge them
472 case FT_KADLASTPUBLISHKEY
:
474 case FT_DL_ACTIVE_TIME
:
475 if (newtag
.IsInt()) {
476 m_nDlActiveTime
= newtag
.GetInt();
479 case FT_CORRUPTEDPARTS
: {
480 wxASSERT(m_corrupted_list
.empty());
481 wxString
strCorruptedParts(newtag
.GetStr());
482 wxStringTokenizer
tokenizer(strCorruptedParts
, wxT(","));
483 while ( tokenizer
.HasMoreTokens() ) {
484 wxString token
= tokenizer
.GetNextToken();
486 if (token
.ToULong(&uPart
)) {
487 if (uPart
< GetPartCount() && !IsCorruptedPart(uPart
)) {
488 m_corrupted_list
.push_back(uPart
);
497 hash
.DecodeBase32(newtag
.GetStr()) == CAICHHash::GetHashSize();
498 wxASSERT(hashSizeOk
);
500 m_pAICHHashSet
->SetMasterHash(hash
, AICH_VERIFIED
);
504 case FT_ATTRANSFERRED
:{
505 statistic
.SetAllTimeTransferred(statistic
.GetAllTimeTransferred() + (uint64
)newtag
.GetInt());
508 case FT_ATTRANSFERREDHI
:{
509 statistic
.SetAllTimeTransferred(statistic
.GetAllTimeTransferred() + (((uint64
)newtag
.GetInt()) << 32));
512 case FT_ATREQUESTED
:{
513 statistic
.SetAllTimeRequests(newtag
.GetInt());
517 statistic
.SetAllTimeAccepts(newtag
.GetInt());
521 // Start Changes by Slugfiller for better exception handling
523 wxCharBuffer tag_ansi_name
= newtag
.GetName().ToAscii();
524 char gap_mark
= tag_ansi_name
? tag_ansi_name
[0u] : 0;
525 if ( newtag
.IsInt() && (newtag
.GetName().Length() > 1) &&
526 ((gap_mark
== FT_GAPSTART
) ||
527 (gap_mark
== FT_GAPEND
))) {
528 Gap_Struct
*gap
= NULL
;
529 unsigned long int gapkey
;
530 if (newtag
.GetName().Mid(1).ToULong(&gapkey
)) {
531 if ( gap_map
.find( gapkey
) == gap_map
.end() ) {
532 gap
= new Gap_Struct
;
533 gap_map
[gapkey
] = gap
;
534 gap
->start
= (uint64
)-1;
535 gap
->end
= (uint64
)-1;
537 gap
= gap_map
[ gapkey
];
539 if (gap_mark
== FT_GAPSTART
) {
540 gap
->start
= newtag
.GetInt();
542 if (gap_mark
== FT_GAPEND
) {
543 gap
->end
= newtag
.GetInt()-1;
546 AddDebugLogLineN(logPartFile
, wxT("Wrong gap map key while reading met file!"));
549 // End Changes by Slugfiller for better exception handling
551 m_taglist
.push_back(newtag
);
556 // Nothing. Else, nothing.
560 // load the hashsets from the hybridstylepartmet
561 if (isnewstyle
&& !getsizeonly
&& (metFile
.GetPosition()<metFile
.GetLength()) ) {
562 metFile
.Seek(1, wxFromCurrent
);
564 uint16 parts
=GetPartCount(); // assuming we will get all hashsets
566 for (uint16 i
= 0; i
< parts
&& (metFile
.GetPosition()+16<metFile
.GetLength()); ++i
){
567 CMD4Hash cur_hash
= metFile
.ReadHash();
568 m_hashlist
.push_back(cur_hash
);
572 if (!m_hashlist
.empty()) {
573 CreateHashFromHashlist(m_hashlist
, &checkhash
);
576 if (m_abyFileHash
== checkhash
) {
583 } catch (const CInvalidPacket
& e
) {
584 AddLogLineM(true, CFormat(wxT("Error: %s (%s) is corrupt (bad tags: %s), unable to load file."))
589 } catch (const CIOFailureException
& e
) {
590 AddDebugLogLineM(true, logPartFile
, CFormat( wxT("IO failure while loading '%s': %s") )
594 } catch (const CEOFException
& WXUNUSED(e
)) {
595 AddLogLineM(true, CFormat( _("ERROR: %s (%s) is corrupt (wrong tagcount), unable to load file.") )
598 AddLogLineM(true, _("Trying to recover file info..."));
600 // Safe file is that who have
603 // We have filesize, try other needed info
605 // Do we need to check gaps? I think not,
606 // because they are checked below. Worst
607 // scenario will only mark file as 0 bytes downloaded.
610 if (!GetFileName().IsOk()) {
611 // Not critical, let's put a random filename.
613 "Recovering no-named file - will try to recover it as RecoveredFile.dat"));
614 SetFileName(CPath(wxT("RecoveredFile.dat")));
618 _("Recovered all available file info :D - Trying to use it..."));
620 AddLogLineM(true, _("Unable to recover file info :("));
629 m_gaplist
.Init(GetFileSize(), false); // Init full, then add gaps
630 // Now to flush the map into the list (Slugfiller)
631 std::map
<uint16
, Gap_Struct
*>::iterator it
= gap_map
.begin();
632 for ( ; it
!= gap_map
.end(); ++it
) {
633 Gap_Struct
* gap
= it
->second
;
634 // SLUGFILLER: SafeHash - revised code, and extra safety
635 if ( (gap
->start
!= (uint64
)-1) &&
636 (gap
->end
!= (uint64
)-1) &&
637 gap
->start
<= gap
->end
&&
638 gap
->start
< GetFileSize()) {
639 if (gap
->end
>= GetFileSize()) {
640 gap
->end
= GetFileSize()-1; // Clipping
642 m_gaplist
.AddGap(gap
->start
, gap
->end
); // All tags accounted for, use safe adding
645 // SLUGFILLER: SafeHash
648 //check if this is a backup
649 if ( m_fullname
.GetExt().MakeLower() == wxT("backup" )) {
650 m_fullname
= m_fullname
.RemoveExt();
653 // open permanent handle
654 if ( !m_hpartfile
.Open(m_PartPath
, CFile::read_write
)) {
655 AddLogLineM(false, CFormat( _("Failed to open %s (%s)") )
661 SetPartFileStatus(PS_EMPTY
);
664 // SLUGFILLER: SafeHash - final safety, make sure any missing part of the file is gap
665 if (m_hpartfile
.GetLength() < GetFileSize())
666 AddGap(m_hpartfile
.GetLength(), GetFileSize()-1);
667 // Goes both ways - Partfile should never be too large
668 if (m_hpartfile
.GetLength() > GetFileSize()) {
669 AddDebugLogLineM( true, logPartFile
, CFormat( wxT("Partfile \"%s\" is too large! Truncating %llu bytes.") ) % GetFileName() % (m_hpartfile
.GetLength() - GetFileSize()));
670 m_hpartfile
.SetLength(GetFileSize());
672 // SLUGFILLER: SafeHash
673 } catch (const CIOFailureException
& e
) {
674 AddDebugLogLineM( true, logPartFile
, CFormat( wxT("Error while accessing partfile \"%s\": %s") ) % GetFileName() % e
.what());
675 SetPartFileStatus(PS_ERROR
);
678 // now close the file again until needed
679 m_hpartfile
.Release(true);
681 // check hashcount, file status etc
682 if (GetHashCount() != GetED2KPartHashCount()){
683 m_hashsetneeded
= true;
686 m_hashsetneeded
= false;
687 for (size_t i
= 0; i
< m_hashlist
.size(); ++i
) {
689 SetPartFileStatus(PS_READY
);
694 if (m_gaplist
.IsComplete()) { // is this file complete already?
699 if (!isnewstyle
) { // not for importing
700 const time_t file_date
= CPath::GetModificationTime(m_PartPath
);
701 if (m_lastDateChanged
!= file_date
) {
702 // It's pointless to rehash an empty file, since the case
703 // where a user has zero'd a file is handled above ...
704 if (m_hpartfile
.GetLength()) {
705 AddLogLineM(false, CFormat( _("WARNING: %s might be corrupted (%i)") )
707 % (m_lastDateChanged
- file_date
) );
709 SetPartFileStatus(PS_WAITINGFORHASH
);
711 CPath partFileName
= m_partmetfilename
.RemoveExt();
712 CThreadScheduler::AddTask(new CHashingTask(m_filePath
, partFileName
, this));
717 UpdateCompletedInfos();
718 if (completedsize
> transferred
) {
719 m_iGainDueToCompression
= completedsize
- transferred
;
720 } else if (completedsize
!= transferred
) {
721 m_iLostDueToCorruption
= transferred
- completedsize
;
728 bool CPartFile::SavePartFile(bool Initial
)
731 case PS_WAITINGFORHASH
:
737 /* Don't write anything to disk if less than 100 KB of free space is left. */
738 sint64 free
= CPath::GetFreeSpaceAt(GetFilePath());
739 if ((free
!= wxInvalidOffset
) && (free
< (100 * 1024))) {
745 if (!m_PartPath
.FileExists()) {
746 throw wxString(wxT(".part file not found"));
749 uint32 lsc
= lastseencomplete
;
752 CPath::BackupFile(m_fullname
, wxT(".backup"));
753 CPath::RemoveFile(m_fullname
);
756 file
.Open(m_fullname
, CFile::write
);
757 if (!file
.IsOpened()) {
758 throw wxString(wxT("Failed to open part.met file"));
762 file
.WriteUInt8(IsLargeFile() ? PARTFILE_VERSION_LARGEFILE
: PARTFILE_VERSION
);
764 file
.WriteUInt32(CPath::GetModificationTime(m_PartPath
));
766 file
.WriteHash(m_abyFileHash
);
767 uint16 parts
= m_hashlist
.size();
768 file
.WriteUInt16(parts
);
769 for (int x
= 0; x
< parts
; ++x
) {
770 file
.WriteHash(m_hashlist
[x
]);
773 #define FIXED_TAGS 15
774 uint32 tagcount
= m_taglist
.size() + FIXED_TAGS
+ (m_gaplist
.size()*2);
775 if (!m_corrupted_list
.empty()) {
779 if (m_pAICHHashSet
->HasValidMasterHash() && (m_pAICHHashSet
->GetStatus() == AICH_VERIFIED
)){
783 if (GetLastPublishTimeKadSrc()){
787 if (GetLastPublishTimeKadNotes()){
791 if (GetDlActiveTime()){
795 file
.WriteUInt32(tagcount
);
797 //#warning Kry - Where are lost by coruption and gained by compression?
799 // 0 (unicoded part file name)
800 // We write it with BOM to keep eMule compatibility. Note that the 'printable' filename is saved,
801 // as presently the filename does not represent an actual file.
802 CTagString( FT_FILENAME
, GetFileName().GetPrintable()).WriteTagToFile( &file
, utf8strOptBOM
);
803 CTagString( FT_FILENAME
, GetFileName().GetPrintable()).WriteTagToFile( &file
); // 1
805 CTagIntSized( FT_FILESIZE
, GetFileSize(), IsLargeFile() ? 64 : 32).WriteTagToFile( &file
);// 2
806 CTagIntSized( FT_TRANSFERRED
, transferred
, IsLargeFile() ? 64 : 32).WriteTagToFile( &file
); // 3
807 CTagInt32( FT_STATUS
, (m_paused
?1:0)).WriteTagToFile( &file
); // 4
809 if ( IsAutoDownPriority() ) {
810 CTagInt32( FT_DLPRIORITY
, (uint8
)PR_AUTO
).WriteTagToFile( &file
); // 5
811 CTagInt32( FT_OLDDLPRIORITY
, (uint8
)PR_AUTO
).WriteTagToFile( &file
); // 6
813 CTagInt32( FT_DLPRIORITY
, m_iDownPriority
).WriteTagToFile( &file
); // 5
814 CTagInt32( FT_OLDDLPRIORITY
, m_iDownPriority
).WriteTagToFile( &file
); // 6
817 CTagInt32( FT_LASTSEENCOMPLETE
, lsc
).WriteTagToFile( &file
); // 7
819 if ( IsAutoUpPriority() ) {
820 CTagInt32( FT_ULPRIORITY
, (uint8
)PR_AUTO
).WriteTagToFile( &file
); // 8
821 CTagInt32( FT_OLDULPRIORITY
, (uint8
)PR_AUTO
).WriteTagToFile( &file
); // 9
823 CTagInt32( FT_ULPRIORITY
, GetUpPriority() ).WriteTagToFile( &file
); // 8
824 CTagInt32( FT_OLDULPRIORITY
, GetUpPriority() ).WriteTagToFile( &file
); // 9
827 CTagInt32(FT_CATEGORY
, m_category
).WriteTagToFile( &file
); // 10
828 CTagInt32(FT_ATTRANSFERRED
, statistic
.GetAllTimeTransferred() & 0xFFFFFFFF).WriteTagToFile( &file
);// 11
829 CTagInt32(FT_ATTRANSFERREDHI
, statistic
.GetAllTimeTransferred() >>32).WriteTagToFile( &file
);// 12
830 CTagInt32(FT_ATREQUESTED
, statistic
.GetAllTimeRequests()).WriteTagToFile( &file
); // 13
831 CTagInt32(FT_ATACCEPTED
, statistic
.GetAllTimeAccepts()).WriteTagToFile( &file
); // 14
833 // currupt part infos
834 if (!m_corrupted_list
.empty()) {
835 wxString strCorruptedParts
;
836 std::list
<uint16
>::iterator it
= m_corrupted_list
.begin();
837 for (; it
!= m_corrupted_list
.end(); ++it
) {
838 uint16 uCorruptedPart
= *it
;
839 if (!strCorruptedParts
.IsEmpty()) {
840 strCorruptedParts
+= wxT(",");
842 strCorruptedParts
+= wxString::Format(wxT("%u"), (unsigned)uCorruptedPart
);
844 wxASSERT( !strCorruptedParts
.IsEmpty() );
846 CTagString( FT_CORRUPTEDPARTS
, strCorruptedParts
).WriteTagToFile( &file
); // 11?
850 if (m_pAICHHashSet
->HasValidMasterHash() && (m_pAICHHashSet
->GetStatus() == AICH_VERIFIED
)){
851 CTagString
aichtag(FT_AICH_HASH
, m_pAICHHashSet
->GetMasterHash().GetString() );
852 aichtag
.WriteTagToFile(&file
); // 12?
855 if (GetLastPublishTimeKadSrc()){
856 CTagInt32(FT_KADLASTPUBLISHSRC
, GetLastPublishTimeKadSrc()).WriteTagToFile(&file
); // 15?
859 if (GetLastPublishTimeKadNotes()){
860 CTagInt32(FT_KADLASTPUBLISHNOTES
, GetLastPublishTimeKadNotes()).WriteTagToFile(&file
); // 16?
863 if (GetDlActiveTime()){
864 CTagInt32(FT_DL_ACTIVE_TIME
, GetDlActiveTime()).WriteTagToFile(&file
); // 17
867 for (uint32 j
= 0; j
< (uint32
)m_taglist
.size();++j
) {
868 m_taglist
[j
].WriteTagToFile(&file
);
873 for (CGapList::const_iterator it
= m_gaplist
.begin(); it
!= m_gaplist
.end(); ++it
) {
874 wxString tagName
= wxString::Format(wxT(" %u"), i_pos
);
876 // gap start = first missing byte but gap ends = first non-missing byte
877 // in edonkey but I think its easier to user the real limits
878 tagName
[0] = FT_GAPSTART
;
879 CTagIntSized(tagName
, it
.start() , IsLargeFile() ? 64 : 32).WriteTagToFile( &file
);
881 tagName
[0] = FT_GAPEND
;
882 CTagIntSized(tagName
, it
.end() + 1, IsLargeFile() ? 64 : 32).WriteTagToFile( &file
);
886 } catch (const wxString
& error
) {
887 AddLogLineNS(CFormat( _("ERROR while saving partfile: %s (%s ==> %s)") )
893 } catch (const CIOFailureException
& e
) {
894 AddLogLineCS(_("IO failure while saving partfile: ") + e
.what());
902 CPath::RemoveFile(m_fullname
.AppendExt(wxT(".backup")));
905 sint64 metLength
= m_fullname
.GetFileSize();
906 if (metLength
== wxInvalidOffset
) {
907 theApp
->ShowAlert( CFormat( _("Could not retrieve length of '%s' - using %s file.") )
912 CPath::CloneFile(m_fullname
.AppendExt(PARTMET_BAK_EXT
), m_fullname
, true);
913 } else if (metLength
== 0) {
914 // Don't backup if it's 0 size but raise a warning!!!
915 theApp
->ShowAlert( CFormat( _("'%s' is 0 size somehow - using %s file.") )
920 CPath::CloneFile(m_fullname
.AppendExt(PARTMET_BAK_EXT
), m_fullname
, true);
922 // no error, just backup
923 CPath::BackupFile(m_fullname
, PARTMET_BAK_EXT
);
930 void CPartFile::SaveSourceSeeds()
932 #define MAX_SAVED_SOURCES 10
934 // Kry - Sources seeds
935 // Based on a Feature request, this saves the last MAX_SAVED_SOURCES
936 // sources of the file, giving a 'seed' for the next run.
937 // We save the last sources because:
938 // 1 - They could be the hardest to get
939 // 2 - They will more probably be available
940 // However, if we have downloading sources, they have preference because
941 // we probably have more credits on them.
942 // Anyway, source exchange will get us the rest of the sources
943 // This feature is currently used only on rare files (< 20 sources)
946 if (GetSourceCount()>20) {
950 CClientPtrList source_seeds
;
953 CClientPtrList::iterator it
= m_downloadingSourcesList
.begin();
954 for( ; it
!= m_downloadingSourcesList
.end() && n_sources
< MAX_SAVED_SOURCES
; ++it
) {
955 CUpDownClient
*cur_src
= *it
;
956 if (!cur_src
->HasLowID()) {
957 source_seeds
.push_back(cur_src
);
962 if (n_sources
< MAX_SAVED_SOURCES
) {
963 // Not enough downloading sources to fill the list, going to sources list
964 if (GetSourceCount() > 0) {
965 SourceSet::reverse_iterator rit
= m_SrcList
.rbegin();
966 for ( ; ((rit
!= m_SrcList
.rend()) && (n_sources
<MAX_SAVED_SOURCES
)); ++rit
) {
967 CUpDownClient
* cur_src
= *rit
;
968 if (!cur_src
->HasLowID()) {
969 source_seeds
.push_back(cur_src
);
981 const CPath seedsPath
= m_fullname
.AppendExt(wxT(".seeds"));
984 file
.Create(seedsPath
, true);
985 if (!file
.IsOpened()) {
986 AddLogLineM(false, CFormat( _("Failed to save part.met.seeds file for %s") )
992 file
.WriteUInt8(0); // v3, to avoid v2 clients choking on it.
993 file
.WriteUInt8(source_seeds
.size());
995 CClientPtrList::iterator it2
= source_seeds
.begin();
996 for (; it2
!= source_seeds
.end(); ++it2
) {
997 CUpDownClient
* cur_src
= *it2
;
998 file
.WriteUInt32(cur_src
->GetUserIDHybrid());
999 file
.WriteUInt16(cur_src
->GetUserPort());
1000 file
.WriteHash(cur_src
->GetUserHash());
1001 // CryptSettings - See SourceExchange V4
1002 const uint8 uSupportsCryptLayer
= cur_src
->SupportsCryptLayer() ? 1 : 0;
1003 const uint8 uRequestsCryptLayer
= cur_src
->RequestsCryptLayer() ? 1 : 0;
1004 const uint8 uRequiresCryptLayer
= cur_src
->RequiresCryptLayer() ? 1 : 0;
1005 const uint8 byCryptOptions
= (uRequiresCryptLayer
<< 2) | (uRequestsCryptLayer
<< 1) | (uSupportsCryptLayer
<< 0);
1006 file
.WriteUInt8(byCryptOptions
);
1009 /* v2: Added to keep track of too old seeds */
1010 file
.WriteUInt32(wxDateTime::Now().GetTicks());
1012 AddLogLineM(false, CFormat( wxPLURAL("Saved %i source seed for partfile: %s (%s)", "Saved %i source seeds for partfile: %s (%s)", n_sources
) )
1016 } catch (const CIOFailureException
& e
) {
1017 AddDebugLogLineM(true, logPartFile
, CFormat( wxT("Error saving partfile's seeds file (%s - %s): %s") )
1024 CPath::RemoveFile(seedsPath
);
1028 void CPartFile::LoadSourceSeeds()
1030 CMemFile sources_data
;
1032 bool valid_sources
= false;
1034 const CPath seedsPath
= m_fullname
.AppendExt(wxT(".seeds"));
1035 if (!seedsPath
.FileExists()) {
1039 CFile
file(seedsPath
, CFile::read
);
1040 if (!file
.IsOpened()) {
1041 AddLogLineM(false, CFormat( _("Partfile %s (%s) has no seeds file") )
1049 if (file
.GetLength() <= 1) {
1050 AddLogLineM(false, CFormat( _("Partfile %s (%s) has a void seeds file") )
1056 uint8 src_count
= file
.ReadUInt8();
1058 bool bUseSX2Format
= (src_count
== 0);
1060 if (bUseSX2Format
) {
1062 src_count
= file
.ReadUInt8();
1065 sources_data
.WriteUInt16(src_count
);
1067 for (int i
= 0; i
< src_count
; ++i
) {
1068 uint32 dwID
= file
.ReadUInt32();
1069 uint16 nPort
= file
.ReadUInt16();
1071 sources_data
.WriteUInt32(bUseSX2Format
? dwID
: wxUINT32_SWAP_ALWAYS(dwID
));
1072 sources_data
.WriteUInt16(nPort
);
1073 sources_data
.WriteUInt32(0);
1074 sources_data
.WriteUInt16(0);
1076 if (bUseSX2Format
) {
1077 sources_data
.WriteHash(file
.ReadHash());
1078 sources_data
.WriteUInt8(file
.ReadUInt8());
1085 // v2: Added to keep track of too old seeds
1086 time_t time
= (time_t)file
.ReadUInt32();
1088 // Time frame is 2 hours. More than enough to compile
1089 // your new aMule version!.
1090 if ((time
+ MIN2S(120)) >= wxDateTime::Now().GetTicks()) {
1091 valid_sources
= true;
1095 // v1 has no time data. We can safely use
1096 // the sources, next time will be saved.
1097 valid_sources
= true;
1100 if (valid_sources
) {
1101 sources_data
.Seek(0);
1102 AddClientSources(&sources_data
, SF_SOURCE_SEEDS
, bUseSX2Format
? 4 : 1, bUseSX2Format
);
1105 } catch (const CSafeIOException
& e
) {
1106 AddLogLineM(false, CFormat( _("Error reading partfile's seeds file (%s - %s): %s") )
1115 void CPartFile::PartFileHashFinished(CKnownFile
* result
)
1117 m_lastDateChanged
= result
->m_lastDateChanged
;
1118 bool errorfound
= false;
1119 if (GetED2KPartHashCount() == 0){
1120 if (IsComplete(0, GetFileSize()-1)){
1121 if (result
->GetFileHash() != GetFileHash()){
1124 "Found corrupted part (%d) in %d part file %s - FileResultHash |%s| FileHash |%s|",
1125 "Found corrupted part (%d) in %d parts file %s - FileResultHash |%s| FileHash |%s|",
1131 % result
->GetFileHash().Encode()
1132 % GetFileHash().Encode() );
1133 AddGap(0, GetFileSize()-1);
1139 for (size_t i
= 0; i
< m_hashlist
.size(); ++i
){
1140 // Kry - trel_ar's completed parts check on rehashing.
1141 // Very nice feature, if a file is completed but .part.met don't believe it,
1144 uint64 partStart
= i
* PARTSIZE
;
1145 uint64 partEnd
= partStart
+ GetPartSize(i
) - 1;
1146 if (!( i
< result
->GetHashCount() && (result
->GetPartHash(i
) == GetPartHash(i
)))){
1147 if (IsComplete(i
)) {
1149 if ( i
< result
->GetHashCount() )
1150 wronghash
= result
->GetPartHash(i
);
1154 "Found corrupted part (%d) in %d part file %s - FileResultHash |%s| FileHash |%s|",
1155 "Found corrupted part (%d) in %d parts file %s - FileResultHash |%s| FileHash |%s|",
1156 GetED2KPartHashCount())
1159 % GetED2KPartHashCount()
1161 % wronghash
.Encode()
1162 % GetPartHash(i
).Encode() );
1168 if (!IsComplete(i
)){
1169 AddLogLineM(false, CFormat( _("Found completed part (%i) in %s") )
1174 RemoveBlockFromList(partStart
, partEnd
);
1181 result
->GetAICHHashset()->GetStatus() == AICH_HASHSETCOMPLETE
&&
1182 status
== PS_COMPLETING
) {
1183 delete m_pAICHHashSet
;
1184 m_pAICHHashSet
= result
->GetAICHHashset();
1185 result
->SetAICHHashset(NULL
);
1186 m_pAICHHashSet
->SetOwner(this);
1188 else if (status
== PS_COMPLETING
) {
1189 AddDebugLogLineM(false, logPartFile
,
1190 CFormat(wxT("Failed to store new AICH Hashset for completed file: %s"))
1197 if (status
== PS_COMPLETING
){
1202 AddLogLineM(false, CFormat( _("Finished rehashing %s") ) % GetFileName());
1206 SetStatus(PS_READY
);
1210 SetStatus(PS_READY
);
1212 theApp
->sharedfiles
->SafeAddKFile(this);
1215 void CPartFile::AddGap(uint64 start
, uint64 end
)
1217 m_gaplist
.AddGap(start
, end
);
1218 UpdateDisplayedInfo();
1221 void CPartFile::AddGap(uint16 part
)
1223 m_gaplist
.AddGap(part
);
1224 UpdateDisplayedInfo();
1227 bool CPartFile::IsAlreadyRequested(uint64 start
, uint64 end
)
1229 std::list
<Requested_Block_Struct
*>::iterator it
= m_requestedblocks_list
.begin();
1230 for (; it
!= m_requestedblocks_list
.end(); ++it
) {
1231 Requested_Block_Struct
* cur_block
= *it
;
1233 if ((start
<= cur_block
->EndOffset
) && (end
>= cur_block
->StartOffset
)) {
1240 bool CPartFile::GetNextEmptyBlockInPart(uint16 partNumber
, Requested_Block_Struct
*result
)
1242 // Find start of this part
1243 uint64 partStart
= (PARTSIZE
* partNumber
);
1244 uint64 start
= partStart
;
1246 // What is the end limit of this block, i.e. can't go outside part (or filesize)
1247 uint64 partEnd
= partStart
+ GetPartSize(partNumber
) - 1;
1248 // Loop until find a suitable gap and return true, or no more gaps and return false
1249 CGapList::const_iterator it
= m_gaplist
.begin();
1252 uint64 gapStart
, end
;
1254 // Find the first gap from the start position
1255 for (; it
!= m_gaplist
.end(); ++it
) {
1256 gapStart
= it
.start();
1259 // Want gaps that overlap start<->partEnd
1260 if (gapStart
<= partEnd
&& end
>= start
) {
1263 } else if (gapStart
> partEnd
) {
1268 // If no gaps after start, exit
1272 // Update start position if gap starts after current pos
1273 if (start
< gapStart
) {
1276 // Find end, keeping within the max block size and the part limit
1277 uint64 blockLimit
= partStart
+ (BLOCKSIZE
* (((start
- partStart
) / BLOCKSIZE
) + 1)) - 1;
1278 if (end
> blockLimit
) {
1281 if (end
> partEnd
) {
1284 // If this gap has not already been requested, we have found a valid entry
1285 if (!IsAlreadyRequested(start
, end
)) {
1286 // Was this block to be returned
1287 if (result
!= NULL
) {
1288 result
->StartOffset
= start
;
1289 result
->EndOffset
= end
;
1290 md4cpy(result
->FileID
, GetFileHash().GetHash());
1291 result
->transferred
= 0;
1295 // Reposition to end of that gap
1298 // If tried all gaps then break out of the loop
1299 if (end
== partEnd
) {
1303 // No suitable gap found
1308 void CPartFile::FillGap(uint64 start
, uint64 end
)
1310 m_gaplist
.FillGap(start
, end
);
1311 UpdateCompletedInfos();
1312 UpdateDisplayedInfo();
1315 void CPartFile::FillGap(uint16 part
)
1317 m_gaplist
.FillGap(part
);
1318 UpdateCompletedInfos();
1319 UpdateDisplayedInfo();
1323 void CPartFile::UpdateCompletedInfos()
1325 uint64 allgaps
= m_gaplist
.GetGapSize();
1327 percentcompleted
= (1.0 - (double)allgaps
/GetFileSize()) * 100.0;
1328 completedsize
= GetFileSize() - allgaps
;
1332 void CPartFile::WritePartStatus(CMemFile
* file
)
1334 uint16 parts
= GetED2KPartCount();
1335 file
->WriteUInt16(parts
);
1337 while (done
!= parts
){
1339 for (uint32 i
= 0;i
!= 8;++i
) {
1340 if (IsComplete(i
)) {
1344 if (done
== parts
) {
1348 file
->WriteUInt8(towrite
);
1352 void CPartFile::WriteCompleteSourcesCount(CMemFile
* file
)
1354 file
->WriteUInt16(m_nCompleteSourcesCount
);
1357 uint32
CPartFile::Process(uint32 reducedownload
/*in percent*/,uint8 m_icounter
)
1360 uint32 dwCurTick
= ::GetTickCount();
1362 // If buffer size exceeds limit, or if not written within time limit, flush data
1363 if ( (m_nTotalBufferData
> thePrefs::GetFileBufferSize()) ||
1364 (dwCurTick
> (m_nLastBufferFlushTime
+ BUFFER_TIME_LIMIT
))) {
1365 // Avoid flushing while copying preview file
1366 if (!m_bPreviewing
) {
1372 // check if we want new sources from server --> MOVED for 16.40 version
1373 old_trans
=transferingsrc
;
1377 if (m_icounter
< 10) {
1378 // Update only downloading sources.
1379 CClientPtrList::iterator it
= m_downloadingSourcesList
.begin();
1380 for( ; it
!= m_downloadingSourcesList
.end(); ) {
1381 CUpDownClient
*cur_src
= *it
++;
1382 if(cur_src
->GetDownloadState() == DS_DOWNLOADING
) {
1384 kBpsDown
+= cur_src
->SetDownloadLimit(reducedownload
);
1388 // Update all sources (including downloading sources)
1389 for ( SourceSet::iterator it
= m_SrcList
.begin(); it
!= m_SrcList
.end(); ) {
1390 CUpDownClient
* cur_src
= *it
++;
1391 switch (cur_src
->GetDownloadState()) {
1392 case DS_DOWNLOADING
: {
1394 kBpsDown
+= cur_src
->SetDownloadLimit(reducedownload
);
1403 case DS_LOWTOLOWIP
: {
1404 if ( cur_src
->HasLowID() && !theApp
->DoCallback( cur_src
) ) {
1405 // If we are almost maxed on sources,
1406 // slowly remove these client to see
1407 // if we can find a better source.
1408 if( ((dwCurTick
- lastpurgetime
) > 30000) &&
1409 (GetSourceCount() >= (thePrefs::GetMaxSourcePerFile()*.8))) {
1410 RemoveSource( cur_src
);
1411 lastpurgetime
= dwCurTick
;
1415 cur_src
->SetDownloadState(DS_ONQUEUE
);
1420 case DS_NONEEDEDPARTS
: {
1421 // we try to purge noneeded source, even without reaching the limit
1422 if((dwCurTick
- lastpurgetime
) > 40000) {
1423 if(!cur_src
->SwapToAnotherFile(false , false, false , NULL
)) {
1424 //however we only delete them if reaching the limit
1425 if (GetSourceCount() >= (thePrefs::GetMaxSourcePerFile()*.8 )) {
1426 RemoveSource(cur_src
);
1427 lastpurgetime
= dwCurTick
;
1428 break; //Johnny-B - nothing more to do here (good eye!)
1431 lastpurgetime
= dwCurTick
;
1435 // doubled reasktime for no needed parts - save connections and traffic
1436 if ( !((!cur_src
->GetLastAskedTime()) ||
1437 (dwCurTick
- cur_src
->GetLastAskedTime()) > FILEREASKTIME
*2)) {
1440 // Recheck this client to see if still NNP..
1441 // Set to DS_NONE so that we force a TCP reask next time..
1442 cur_src
->SetDownloadState(DS_NONE
);
1447 if( cur_src
->IsRemoteQueueFull()) {
1448 if( ((dwCurTick
- lastpurgetime
) > 60000) &&
1449 (GetSourceCount() >= (thePrefs::GetMaxSourcePerFile()*.8 )) ) {
1450 RemoveSource( cur_src
);
1451 lastpurgetime
= dwCurTick
;
1452 break; //Johnny-B - nothing more to do here (good eye!)
1456 // Give up to 1 min for UDP to respond..
1457 // If we are within on min on TCP, do not try..
1458 if ( theApp
->IsConnected() &&
1459 ( (!cur_src
->GetLastAskedTime()) ||
1460 (dwCurTick
- cur_src
->GetLastAskedTime()) > FILEREASKTIME
-20000)) {
1461 cur_src
->UDPReaskForDownload();
1464 // No break here, since the next case takes care of asking for downloads.
1467 case DS_TOOMANYCONNS
:
1469 case DS_WAITCALLBACK
:
1470 case DS_WAITCALLBACKKAD
: {
1471 if ( theApp
->IsConnected() &&
1472 ( (!cur_src
->GetLastAskedTime()) ||
1473 (dwCurTick
- cur_src
->GetLastAskedTime()) > FILEREASKTIME
)) {
1474 if (!cur_src
->AskForDownload()) {
1475 // I left this break here just as a reminder
1476 // just in case re rearange things..
1485 /* eMule 0.30c implementation, i give it a try (Creteil) BEGIN ... */
1486 if (IsA4AFAuto() && ((!m_LastNoNeededCheck
) || (dwCurTick
- m_LastNoNeededCheck
> 900000))) {
1487 m_LastNoNeededCheck
= dwCurTick
;
1488 for ( SourceSet::iterator it
= m_A4AFsrclist
.begin(); it
!= m_A4AFsrclist
.end(); ) {
1489 CUpDownClient
*cur_source
= *it
++;
1490 uint8 download_state
=cur_source
->GetDownloadState();
1491 if( download_state
!= DS_DOWNLOADING
1492 && cur_source
->GetRequestFile()
1493 && ((!cur_source
->GetRequestFile()->IsA4AFAuto()) || download_state
== DS_NONEEDEDPARTS
))
1495 cur_source
->SwapToAnotherFile(false, false, false, this);
1499 /* eMule 0.30c implementation, i give it a try (Creteil) END ... */
1501 // swap No needed partfiles if possible
1503 if (((old_trans
==0) && (transferingsrc
>0)) || ((old_trans
>0) && (transferingsrc
==0))) {
1504 SetPartFileStatus(status
);
1507 // Kad source search
1508 if( GetMaxSourcePerFileUDP() > GetSourceCount()){
1509 //Once we can handle lowID users in Kad, we remove the second IsConnected
1510 if (theApp
->downloadqueue
->DoKademliaFileRequest() && (Kademlia::CKademlia::GetTotalFile() < KADEMLIATOTALFILE
) && (dwCurTick
> m_LastSearchTimeKad
) && Kademlia::CKademlia::IsConnected() && theApp
->IsConnected() && !IsStopped()){
1512 theApp
->downloadqueue
->SetLastKademliaFileRequest();
1514 if (GetKadFileSearchID()) {
1515 /* This will never happen anyway. We're talking a
1516 1h timespan and searches are at max 45secs */
1517 Kademlia::CSearchManager::StopSearch(GetKadFileSearchID(), false);
1520 Kademlia::CUInt128
kadFileID(GetFileHash().GetHash());
1521 Kademlia::CSearch
* pSearch
= Kademlia::CSearchManager::PrepareLookup(Kademlia::CSearch::FILE, true, kadFileID
);
1522 AddDebugLogLineM(false, logKadSearch
, CFormat(wxT("Preparing a Kad Search for '%s'")) % GetFileName());
1524 AddDebugLogLineM(false, logKadSearch
, CFormat(wxT("Kad lookup started for '%s'")) % GetFileName());
1525 if(m_TotalSearchesKad
< 7) {
1526 m_TotalSearchesKad
++;
1528 m_LastSearchTimeKad
= dwCurTick
+ (KADEMLIAREASKTIME
*m_TotalSearchesKad
);
1529 SetKadFileSearchID(pSearch
->GetSearchID());
1533 if(GetKadFileSearchID()) {
1534 Kademlia::CSearchManager::StopSearch(GetKadFileSearchID(), true);
1538 // check if we want new sources from server
1539 if ( !m_localSrcReqQueued
&&
1540 ( (!m_lastsearchtime
) ||
1541 (dwCurTick
- m_lastsearchtime
) > SERVERREASKTIME
) &&
1542 theApp
->IsConnectedED2K() &&
1543 thePrefs::GetMaxSourcePerFileSoft() > GetSourceCount() &&
1545 m_localSrcReqQueued
= true;
1546 theApp
->downloadqueue
->SendLocalSrcRequest(this);
1549 // calculate datarate, set limit etc.
1554 // Kry - does the 3 / 30 difference produce too much flickering or CPU?
1555 if (m_count
>= 30) {
1557 UpdateAutoDownPriority();
1558 UpdateDisplayedInfo();
1559 if(m_bPercentUpdated
== false) {
1560 UpdateCompletedInfos();
1562 m_bPercentUpdated
= false;
1563 if (thePrefs::ShowCatTabInfos()) {
1564 Notify_ShowUpdateCatTabTitles();
1568 // release file handle if unused for some time
1569 m_hpartfile
.Release();
1571 return (uint32
)(kBpsDown
*1024.0);
1574 bool CPartFile::CanAddSource(uint32 userid
, uint16 port
, uint32 serverip
, uint16 serverport
, uint8
* pdebug_lowiddropped
, bool ed2kID
)
1577 //The incoming ID could have the userid in the Hybrid format..
1578 uint32 hybridID
= 0;
1580 if (IsLowID(userid
)) {
1583 hybridID
= wxUINT32_SWAP_ALWAYS(userid
);
1587 if (!IsLowID(userid
)) {
1588 userid
= wxUINT32_SWAP_ALWAYS(userid
);
1592 // MOD Note: Do not change this part - Merkur
1593 if (theApp
->IsConnectedED2K()) {
1594 if(::IsLowID(theApp
->GetED2KID())) {
1595 if(theApp
->GetED2KID() == userid
&& theApp
->serverconnect
->GetCurrentServer()->GetIP() == serverip
&& theApp
->serverconnect
->GetCurrentServer()->GetPort() == serverport
) {
1598 if(theApp
->GetPublicIP() == userid
) {
1602 if(theApp
->GetED2KID() == userid
&& thePrefs::GetPort() == port
) {
1608 if (Kademlia::CKademlia::IsConnected()) {
1609 if(!Kademlia::CKademlia::IsFirewalled()) {
1610 if(Kademlia::CKademlia::GetIPAddress() == hybridID
&& thePrefs::GetPort() == port
) {
1616 //This allows *.*.*.0 clients to not be removed if Ed2kID == false
1617 if ( IsLowID(hybridID
) && theApp
->IsFirewalled()) {
1618 if (pdebug_lowiddropped
) {
1619 (*pdebug_lowiddropped
)++;
1627 void CPartFile::AddSources(CMemFile
& sources
,uint32 serverip
, uint16 serverport
, unsigned origin
, bool bWithObfuscationAndHash
)
1629 uint8 count
= sources
.ReadUInt8();
1630 uint8 debug_lowiddropped
= 0;
1631 uint8 debug_possiblesources
= 0;
1632 CMD4Hash achUserHash
;
1635 // since we may received multiple search source UDP results we have to "consume" all data of that packet
1636 AddDebugLogLineM(false, logPartFile
, wxT("Trying to add sources for a stopped file"));
1637 sources
.Seek(count
*(4+2), wxFromCurrent
);
1641 for (int i
= 0;i
!= count
;++i
) {
1642 uint32 userid
= sources
.ReadUInt32();
1643 uint16 port
= sources
.ReadUInt16();
1645 uint8 byCryptOptions
= 0;
1646 if (bWithObfuscationAndHash
){
1647 byCryptOptions
= sources
.ReadUInt8();
1648 if ((byCryptOptions
& 0x80) > 0) {
1649 achUserHash
= sources
.ReadHash();
1652 if ((thePrefs::IsClientCryptLayerRequested() && (byCryptOptions
& 0x01/*supported*/) > 0 && (byCryptOptions
& 0x80) == 0)
1653 || (thePrefs::IsClientCryptLayerSupported() && (byCryptOptions
& 0x02/*requested*/) > 0 && (byCryptOptions
& 0x80) == 0)) {
1654 AddDebugLogLineM(false, logPartFile
, wxString::Format(wxT("Server didn't provide UserHash for source %u, even if it was expected to (or local obfuscationsettings changed during serverconnect"), userid
));
1655 } else if (!thePrefs::IsClientCryptLayerRequested() && (byCryptOptions
& 0x02/*requested*/) == 0 && (byCryptOptions
& 0x80) != 0) {
1656 AddDebugLogLineM(false, logPartFile
, wxString::Format(wxT("Server provided UserHash for source %u, even if it wasn't expected to (or local obfuscationsettings changed during serverconnect"), userid
));
1661 // "Filter LAN IPs" and "IPfilter" the received sources IP addresses
1662 if (!IsLowID(userid
)) {
1663 // check for 0-IP, localhost and optionally for LAN addresses
1664 if ( !IsGoodIP(userid
, thePrefs::FilterLanIPs()) ) {
1667 if (theApp
->ipfilter
->IsFiltered(userid
)) {
1672 if (!CanAddSource(userid
, port
, serverip
, serverport
, &debug_lowiddropped
)) {
1676 if(thePrefs::GetMaxSourcePerFile() > GetSourceCount()) {
1677 ++debug_possiblesources
;
1678 CUpDownClient
* newsource
= new CUpDownClient(port
,userid
,serverip
,serverport
,this, true, true);
1680 newsource
->SetSourceFrom((ESourceFrom
)origin
);
1681 newsource
->SetConnectOptions(byCryptOptions
, true, false);
1683 if ((byCryptOptions
& 0x80) != 0) {
1684 newsource
->SetUserHash(achUserHash
);
1687 theApp
->downloadqueue
->CheckAndAddSource(this,newsource
);
1689 AddDebugLogLineM(false, logPartFile
, wxT("Consuming a packet because of max sources reached"));
1690 // Since we may receive multiple search source UDP results we have to "consume" all data of that packet
1691 // This '+1' is added because 'i' counts from 0.
1692 sources
.Seek((count
-(i
+1))*(4+2), wxFromCurrent
);
1693 if (GetKadFileSearchID()) {
1694 Kademlia::CSearchManager::StopSearch(GetKadFileSearchID(), false);
1701 void CPartFile::UpdatePartsInfo()
1703 if( !IsPartFile() ) {
1704 CKnownFile::UpdatePartsInfo();
1709 uint16 partcount
= GetPartCount();
1710 bool flag
= (time(NULL
) - m_nCompleteSourcesTime
> 0);
1712 // Ensure the frequency-list is ready
1713 if ( m_SrcpartFrequency
.size() != GetPartCount() ) {
1714 m_SrcpartFrequency
.clear();
1715 m_SrcpartFrequency
.insert(m_SrcpartFrequency
.begin(), GetPartCount(), 0);
1718 // Find number of available parts
1719 uint16 availablecounter
= 0;
1720 for ( uint16 i
= 0; i
< partcount
; ++i
) {
1721 if ( m_SrcpartFrequency
[i
] )
1725 if ( ( availablecounter
== partcount
) && ( m_availablePartsCount
< partcount
) ) {
1726 lastseencomplete
= time(NULL
);
1729 m_availablePartsCount
= availablecounter
;
1732 ArrayOfUInts16 count
;
1734 count
.reserve(GetSourceCount());
1736 for ( SourceSet::iterator it
= m_SrcList
.begin(); it
!= m_SrcList
.end(); ++it
) {
1737 if ( !(*it
)->GetUpPartStatus().empty() && (*it
)->GetUpPartCount() == partcount
) {
1738 count
.push_back((*it
)->GetUpCompleteSourcesCount());
1742 m_nCompleteSourcesCount
= m_nCompleteSourcesCountLo
= m_nCompleteSourcesCountHi
= 0;
1744 for (uint16 i
= 0; i
< partcount
; ++i
) {
1746 m_nCompleteSourcesCount
= m_SrcpartFrequency
[i
];
1748 else if( m_nCompleteSourcesCount
> m_SrcpartFrequency
[i
]) {
1749 m_nCompleteSourcesCount
= m_SrcpartFrequency
[i
];
1752 count
.push_back(m_nCompleteSourcesCount
);
1754 int32 n
= count
.size();
1756 std::sort(count
.begin(), count
.end(), std::less
<uint16
>());
1759 int32 i
= n
>> 1; // (n / 2)
1760 int32 j
= (n
* 3) >> 2; // (n * 3) / 4
1761 int32 k
= (n
* 7) >> 3; // (n * 7) / 8
1763 //When still a part file, adjust your guesses by 20% to what you see..
1767 //Not many sources, so just use what you see..
1768 // welcome to 'plain stupid code'
1769 // m_nCompleteSourcesCount;
1770 m_nCompleteSourcesCountLo
= m_nCompleteSourcesCount
;
1771 m_nCompleteSourcesCountHi
= m_nCompleteSourcesCount
;
1772 } else if (n
< 20) {
1773 // For low guess and normal guess count
1774 // If we see more sources then the guessed low and normal, use what we see.
1775 // If we see less sources then the guessed low, adjust network accounts for 80%,
1776 // we account for 20% with what we see and make sure we are still above the normal.
1778 // Adjust 80% network and 20% what we see.
1779 if ( count
[i
] < m_nCompleteSourcesCount
) {
1780 m_nCompleteSourcesCountLo
= m_nCompleteSourcesCount
;
1782 m_nCompleteSourcesCountLo
=
1783 (uint16
)((float)(count
[i
]*.8) +
1784 (float)(m_nCompleteSourcesCount
*.2));
1786 m_nCompleteSourcesCount
= m_nCompleteSourcesCountLo
;
1787 m_nCompleteSourcesCountHi
=
1788 (uint16
)((float)(count
[j
]*.8) +
1789 (float)(m_nCompleteSourcesCount
*.2));
1790 if( m_nCompleteSourcesCountHi
< m_nCompleteSourcesCount
) {
1791 m_nCompleteSourcesCountHi
= m_nCompleteSourcesCount
;
1799 // Adjust network accounts for 80%, we account for 20% with what
1800 // we see and make sure we are still above the low.
1802 // Adjust network accounts for 80%, we account for 20% with what
1803 // we see and make sure we are still above the normal.
1805 m_nCompleteSourcesCountLo
= m_nCompleteSourcesCount
;
1806 m_nCompleteSourcesCount
= (uint16
)((float)(count
[j
]*.8)+(float)(m_nCompleteSourcesCount
*.2));
1807 if( m_nCompleteSourcesCount
< m_nCompleteSourcesCountLo
) {
1808 m_nCompleteSourcesCount
= m_nCompleteSourcesCountLo
;
1810 m_nCompleteSourcesCountHi
= (uint16
)((float)(count
[k
]*.8)+(float)(m_nCompleteSourcesCount
*.2));
1811 if( m_nCompleteSourcesCountHi
< m_nCompleteSourcesCount
) {
1812 m_nCompleteSourcesCountHi
= m_nCompleteSourcesCount
;
1816 m_nCompleteSourcesTime
= time(NULL
) + (60);
1818 UpdateDisplayedInfo();
1821 // [Maella -Enhanced Chunk Selection- (based on jicxicmic)]
1822 bool CPartFile::GetNextRequestedBlock(CUpDownClient
* sender
,
1823 std::vector
<Requested_Block_Struct
*>& toadd
, uint16
& count
)
1826 // The purpose of this function is to return a list of blocks (~180KB) to
1827 // download. To avoid a prematurely stop of the downloading, all blocks that
1828 // are requested from the same source must be located within the same
1829 // chunk (=> part ~9MB).
1831 // The selection of the chunk to download is one of the CRITICAL parts of the
1832 // edonkey network. The selection algorithm must insure the best spreading
1835 // The selection is based on 4 criteria:
1836 // 1. Frequency of the chunk (availability), very rare chunks must be downloaded
1837 // as quickly as possible to become a new available source.
1838 // 2. Parts used for preview (first + last chunk), preview or check a
1839 // file (e.g. movie, mp3)
1840 // 3. Request state (downloading in process), try to ask each source for another
1841 // chunk. Spread the requests between all sources.
1842 // 4. Completion (shortest-to-complete), partially retrieved chunks should be
1843 // completed before starting to download other one.
1845 // The frequency criterion defines three zones: very rare (<10%), rare (<50%)
1846 // and common (>30%). Inside each zone, the criteria have a specific weight, used
1847 // to calculate the priority of chunks. The chunk(s) with the highest
1848 // priority (highest=0, lowest=0xffff) is/are selected first.
1850 // very rare (preview) rare common
1851 // 0% <---- +0 pt ----> 10% <----- +10000 pt -----> 50% <---- +20000 pt ----> 100%
1852 // 1. <------- frequency: +25*frequency pt ----------->
1853 // 2. <- preview: +1 pt --><-------------- preview: set to 10000 pt ------------->
1854 // 3. <------ request: download in progress +20000 pt ------>
1855 // 4a. <- completion: 0% +100, 25% +75 .. 100% +0 pt --><-- !req => completion --->
1856 // 4b. <--- req => !completion -->
1858 // Unrolled, the priority scale is:
1860 // 0..xxxx unrequested and requested very rare chunks
1861 // 10000..1xxxx unrequested rare chunks + unrequested preview chunks
1862 // 20000..2xxxx unrequested common chunks (priority to the most complete)
1863 // 30000..3xxxx requested rare chunks + requested preview chunks
1864 // 40000..4xxxx requested common chunks (priority to the least complete)
1866 // This algorithm usually selects first the rarest chunk(s). However, partially
1867 // complete chunk(s) that is/are close to completion may overtake the priority
1868 // (priority inversion).
1869 // For the common chuncks, the algorithm tries to spread the dowload between
1873 // Check input parameters
1874 if ( sender
->GetPartStatus().empty() ) {
1877 // Define and create the list of the chunks to download
1878 const uint16 partCount
= GetPartCount();
1879 ChunkList chunksList
;
1882 uint16 newBlockCount
= 0;
1883 while(newBlockCount
!= count
) {
1884 // Create a request block stucture if a chunk has been previously selected
1885 if(sender
->GetLastPartAsked() != 0xffff) {
1886 Requested_Block_Struct
* pBlock
= new Requested_Block_Struct
;
1887 if(GetNextEmptyBlockInPart(sender
->GetLastPartAsked(), pBlock
) == true) {
1888 // Keep a track of all pending requested blocks
1889 m_requestedblocks_list
.push_back(pBlock
);
1890 // Update list of blocks to return
1891 toadd
.push_back(pBlock
);
1893 // Skip end of loop (=> CPU load)
1896 // All blocks for this chunk have been already requested
1898 // => Try to select another chunk
1899 sender
->SetLastPartAsked(0xffff);
1903 // Check if a new chunk must be selected (e.g. download starting, previous chunk complete)
1904 if(sender
->GetLastPartAsked() == 0xffff) {
1905 // Quantify all chunks (create list of chunks to download)
1906 // This is done only one time and only if it is necessary (=> CPU load)
1907 if(chunksList
.empty()) {
1908 // Indentify the locally missing part(s) that this source has
1909 for(uint16 i
=0; i
< partCount
; ++i
) {
1910 if(sender
->IsPartAvailable(i
) == true && GetNextEmptyBlockInPart(i
, NULL
) == true) {
1911 // Create a new entry for this chunk and add it to the list
1914 newEntry
.frequency
= m_SrcpartFrequency
[i
];
1915 chunksList
.push_back(newEntry
);
1919 // Check if any bloks(s) could be downloaded
1920 if(chunksList
.empty()) {
1921 break; // Exit main loop while()
1924 // Define the bounds of the three zones (very rare, rare)
1925 // more depending on available sources
1927 if (GetSourceCount()>800) {
1929 } else if (GetSourceCount()>200) {
1932 uint16 limit
= modif
*GetSourceCount()/ 100;
1936 const uint16 veryRareBound
= limit
;
1937 const uint16 rareBound
= 2*limit
;
1939 // Cache Preview state (Criterion 2)
1940 FileType type
= GetFiletype(GetFileName());
1941 const bool isPreviewEnable
=
1942 thePrefs::GetPreviewPrio() &&
1943 (type
== ftArchive
|| type
== ftVideo
);
1945 // Collect and calculate criteria for all chunks
1946 for (ChunkList::iterator it
= chunksList
.begin(); it
!= chunksList
.end(); ++it
) {
1947 Chunk
& cur_chunk
= *it
;
1950 const uint64 uStart
= cur_chunk
.part
* PARTSIZE
;
1951 const uint64 uEnd
= uStart
+ GetPartSize(cur_chunk
.part
) - 1;
1952 // Criterion 2. Parts used for preview
1953 // Remark: - We need to download the first part and the last part(s).
1954 // - When the last part is very small, it's necessary to
1955 // download the two last parts.
1956 bool critPreview
= false;
1957 if(isPreviewEnable
== true) {
1958 if(cur_chunk
.part
== 0) {
1959 critPreview
= true; // First chunk
1960 } else if(cur_chunk
.part
== partCount
-1) {
1961 critPreview
= true; // Last chunk
1962 } else if(cur_chunk
.part
== partCount
-2) {
1963 // Last chunk - 1 (only if last chunk is too small)
1964 const uint32 sizeOfLastChunk
= GetFileSize() - uEnd
;
1965 if(sizeOfLastChunk
< PARTSIZE
/3) {
1966 critPreview
= true; // Last chunk - 1
1971 // Criterion 3. Request state (downloading in process from other source(s))
1973 const bool critRequested
=
1974 cur_chunk
.frequency
> veryRareBound
&&
1975 IsAlreadyRequested(uStart
, uEnd
);
1977 // Criterion 4. Completion
1978 // PARTSIZE instead of GetPartSize() favours the last chunk - but that may be intentional
1979 uint32 partSize
= PARTSIZE
- m_gaplist
.GetGapSize(cur_chunk
.part
);
1980 const uint16 critCompletion
= (uint16
)(partSize
/(PARTSIZE
/100)); // in [%]
1982 // Calculate priority with all criteria
1983 if(cur_chunk
.frequency
<= veryRareBound
) {
1984 // 0..xxxx unrequested + requested very rare chunks
1985 cur_chunk
.rank
= (25 * cur_chunk
.frequency
) + // Criterion 1
1986 ((critPreview
== true) ? 0 : 1) + // Criterion 2
1987 (100 - critCompletion
); // Criterion 4
1988 } else if(critPreview
== true) {
1989 // 10000..10100 unrequested preview chunks
1990 // 30000..30100 requested preview chunks
1991 cur_chunk
.rank
= ((critRequested
== false) ? 10000 : 30000) + // Criterion 3
1992 (100 - critCompletion
); // Criterion 4
1993 } else if(cur_chunk
.frequency
<= rareBound
) {
1994 // 10101..1xxxx unrequested rare chunks
1995 // 30101..3xxxx requested rare chunks
1996 cur_chunk
.rank
= (25 * cur_chunk
.frequency
) + // Criterion 1
1997 ((critRequested
== false) ? 10101 : 30101) + // Criterion 3
1998 (100 - critCompletion
); // Criterion 4
2001 if(critRequested
== false) { // Criterion 3
2002 // 20000..2xxxx unrequested common chunks
2003 cur_chunk
.rank
= 20000 + // Criterion 3
2004 (100 - critCompletion
); // Criterion 4
2006 // 40000..4xxxx requested common chunks
2007 // Remark: The weight of the completion criterion is inversed
2008 // to spead the requests over the completing chunks.
2009 // Without this, the chunk closest to completion will
2010 // received every new sources.
2011 cur_chunk
.rank
= 40000 + // Criterion 3
2012 (critCompletion
); // Criterion 4
2018 // Select the next chunk to download
2019 if(!chunksList
.empty()) {
2020 // Find and count the chunck(s) with the highest priority
2021 uint16 chunkCount
= 0; // Number of found chunks with same priority
2022 uint16 rank
= 0xffff; // Highest priority found
2024 // Collect and calculate criteria for all chunks
2025 for (ChunkList::iterator it
= chunksList
.begin(); it
!= chunksList
.end(); ++it
) {
2026 const Chunk
& cur_chunk
= *it
;
2027 if(cur_chunk
.rank
< rank
) {
2029 rank
= cur_chunk
.rank
;
2030 } else if(cur_chunk
.rank
== rank
) {
2035 // Use a random access to avoid that everybody tries to download the
2036 // same chunks at the same time (=> spread the selected chunk among clients)
2037 uint16 randomness
= 1 + (int) (((float)(chunkCount
-1))*rand()/(RAND_MAX
+1.0));
2039 for (ChunkList::iterator it
= chunksList
.begin(); it
!= chunksList
.end(); ++it
) {
2040 const Chunk
& cur_chunk
= *it
;
2041 if(cur_chunk
.rank
== rank
) {
2043 if(randomness
== 0) {
2044 // Selection process is over
2045 sender
->SetLastPartAsked(cur_chunk
.part
);
2046 // Remark: this list might be reused up to *count times
2047 chunksList
.erase(it
);
2048 break; // exit loop for()
2053 // There is no remaining chunk to download
2054 break; // Exit main loop while()
2058 // Return the number of the blocks
2059 count
= newBlockCount
;
2061 return (newBlockCount
> 0);
2066 void CPartFile::RemoveBlockFromList(uint64 start
,uint64 end
)
2068 std::list
<Requested_Block_Struct
*>::iterator it
= m_requestedblocks_list
.begin();
2069 while (it
!= m_requestedblocks_list
.end()) {
2070 std::list
<Requested_Block_Struct
*>::iterator it2
= it
++;
2072 if ((*it2
)->StartOffset
<= start
&& (*it2
)->EndOffset
>= end
) {
2073 m_requestedblocks_list
.erase(it2
);
2079 void CPartFile::RemoveAllRequestedBlocks(void)
2081 m_requestedblocks_list
.clear();
2085 void CPartFile::CompleteFile(bool bIsHashingDone
)
2087 if (GetKadFileSearchID()) {
2088 Kademlia::CSearchManager::StopSearch(GetKadFileSearchID(), false);
2091 theApp
->downloadqueue
->RemoveLocalServerRequest(this);
2093 AddDebugLogLineM( false, logPartFile
, wxString( wxT("CPartFile::CompleteFile: Hash ") ) + ( bIsHashingDone
? wxT("done") : wxT("not done") ) );
2095 if (!bIsHashingDone
) {
2096 SetPartFileStatus(PS_COMPLETING
);
2099 CPath partFile
= m_partmetfilename
.RemoveExt();
2100 CThreadScheduler::AddTask(new CHashingTask(GetFilePath(), partFile
, this));
2104 m_is_A4AF_auto
=false;
2105 SetPartFileStatus(PS_COMPLETING
);
2106 // guess I was wrong about not need to spaw a thread ...
2107 // It is if the temp and incoming dirs are on different
2108 // partitions/drives and the file is large...[oz]
2111 PerformFileComplete();
2115 if (thePrefs::ShowCatTabInfos()) {
2116 Notify_ShowUpdateCatTabTitles();
2118 UpdateDisplayedInfo(true);
2122 void CPartFile::CompleteFileEnded(bool errorOccured
, const CPath
& newname
)
2126 SetPartFileStatus(PS_ERROR
);
2127 AddLogLineM(true, CFormat( _("Unexpected error while completing %s. File paused") )% GetFileName() );
2129 m_fullname
= newname
;
2131 SetFilePath(m_fullname
.GetPath());
2132 SetFileName(m_fullname
.GetFullName());
2133 m_lastDateChanged
= CPath::GetModificationTime(m_fullname
);
2135 SetPartFileStatus(PS_COMPLETE
);
2139 // TODO: What the f*** if it is already known?
2140 theApp
->knownfiles
->SafeAddKFile(this);
2142 // remove the file from the suspended uploads list
2143 theApp
->uploadqueue
->ResumeUpload(GetFileHash());
2144 theApp
->downloadqueue
->RemoveFile(this);
2145 theApp
->sharedfiles
->SafeAddKFile(this);
2146 UpdateDisplayedInfo(true);
2148 // republish that file to the ed2k-server to update the 'FT_COMPLETE_SOURCES' counter on the server.
2149 theApp
->sharedfiles
->RepublishFile(this);
2151 // Ensure that completed shows the correct value
2152 completedsize
= GetFileSize();
2154 // clear the blackbox to free up memory
2155 m_CorruptionBlackBox
->Free();
2157 AddLogLineM(true, CFormat( _("Finished downloading: %s") ) % GetFileName() );
2160 theApp
->downloadqueue
->StartNextFile(this);
2164 void CPartFile::PerformFileComplete()
2166 // add this file to the suspended uploads list
2167 theApp
->uploadqueue
->SuspendUpload(GetFileHash());
2170 // close permanent handle
2171 if (m_hpartfile
.IsOpened()) {
2172 m_hpartfile
.Close();
2175 // Schedule task for completion of the file
2176 CThreadScheduler::AddTask(new CCompletionTask(this));
2180 void CPartFile::RemoveAllSources(bool bTryToSwap
)
2182 for( SourceSet::iterator it
= m_SrcList
.begin(); it
!= m_SrcList
.end();) {
2183 CUpDownClient
* cur_src
= *it
++;
2185 if (!cur_src
->SwapToAnotherFile(true, true, true, NULL
)) {
2186 RemoveSource(cur_src
,true,false);
2187 // If it was not swapped, it's not on any file anymore, and should die
2190 RemoveSource(cur_src
,true,false);
2196 /* eMule 0.30c implementation, i give it a try (Creteil) BEGIN ... */
2197 // remove all links A4AF in sources to this file
2198 if(!m_A4AFsrclist
.empty()) {
2199 for( SourceSet::iterator it
= m_A4AFsrclist
.begin(); it
!= m_A4AFsrclist
.end(); ) {
2200 CUpDownClient
* cur_src
= *it
++;
2201 if ( cur_src
->DeleteFileRequest( this ) ) {
2202 Notify_DownloadCtrlRemoveSource(cur_src
, this);
2205 m_A4AFsrclist
.clear();
2207 /* eMule 0.30c implementation, i give it a try (Creteil) END ... */
2208 UpdateFileRatingCommentAvail();
2212 void CPartFile::Delete()
2214 AddLogLineM(false, CFormat(_("Deleting file: %s")) % GetFileName());
2215 // Barry - Need to tell any connected clients to stop sending the file
2217 AddDebugLogLineM(false, logPartFile
, wxT("\tStopped"));
2219 theApp
->sharedfiles
->RemoveFile(this);
2220 AddDebugLogLineM(false, logPartFile
, wxT("\tRemoved from shared"));
2221 theApp
->downloadqueue
->RemoveFile(this);
2222 AddDebugLogLineM(false, logPartFile
, wxT("\tRemoved from download queue"));
2223 Notify_DownloadCtrlRemoveFile(this);
2224 AddDebugLogLineM(false, logPartFile
, wxT("\tRemoved from transferwnd"));
2226 if (m_hpartfile
.IsOpened()) {
2227 m_hpartfile
.Close();
2230 AddDebugLogLineM(false, logPartFile
, wxT("\tClosed"));
2232 if (!CPath::RemoveFile(m_fullname
)) {
2233 AddDebugLogLineM(true, logPartFile
, CFormat(wxT("\tFailed to delete '%s'")) % m_fullname
);
2235 AddDebugLogLineM(false, logPartFile
, wxT("\tRemoved .part.met"));
2238 if (!CPath::RemoveFile(m_PartPath
)) {
2239 AddDebugLogLineM(true, logPartFile
, CFormat(wxT("Failed to delete '%s'")) % m_PartPath
);
2241 AddDebugLogLineM(false, logPartFile
, wxT("\tRemoved .part"));
2244 CPath BAKName
= m_fullname
.AppendExt(PARTMET_BAK_EXT
);
2245 if (!CPath::RemoveFile(BAKName
)) {
2246 AddDebugLogLineM(true, logPartFile
, CFormat(wxT("Failed to delete '%s'")) % BAKName
);
2248 AddDebugLogLineM(false, logPartFile
, wxT("\tRemoved .BAK"));
2251 CPath SEEDSName
= m_fullname
.AppendExt(wxT(".seeds"));
2252 if (SEEDSName
.FileExists()) {
2253 if (CPath::RemoveFile(SEEDSName
)) {
2254 AddDebugLogLineM(false, logPartFile
, wxT("\tRemoved .seeds"));
2256 AddDebugLogLineM(true, logPartFile
, CFormat(wxT("Failed to delete '%s'")) % SEEDSName
);
2260 AddDebugLogLineM(false, logPartFile
, wxT("Done"));
2266 bool CPartFile::HashSinglePart(uint16 partnumber
)
2268 if ((GetHashCount() <= partnumber
) && (GetPartCount() > 1)) {
2270 CFormat( _("WARNING: Unable to hash downloaded part - hashset incomplete for '%s'") )
2272 m_hashsetneeded
= true;
2274 } else if ((GetHashCount() <= partnumber
) && GetPartCount() != 1) {
2275 AddLogLineM(true, CFormat( _("ERROR: Unable to hash downloaded part - hashset incomplete (%s). This should never happen")) % GetFileName() );
2276 m_hashsetneeded
= true;
2279 CMD4Hash hashresult
;
2280 uint64 offset
= PARTSIZE
* partnumber
;
2281 uint32 length
= GetPartSize(partnumber
);
2283 m_hpartfile
.Seek(offset
, wxFromStart
);
2284 CreateHashFromFile(m_hpartfile
, length
, &hashresult
, NULL
);
2285 } catch (const CIOFailureException
& e
) {
2286 AddLogLineM(true, CFormat( wxT("EOF while hashing downloaded part %u with length %u (max %u) of partfile '%s' with length %u: %s"))
2287 % partnumber
% length
% (offset
+length
) % GetFileName() % GetFileSize() % e
.what());
2288 SetPartFileStatus(PS_ERROR
);
2290 } catch (const CEOFException
& e
) {
2291 AddLogLineM(true, CFormat( wxT("EOF while hashing downloaded part %u with length %u (max %u) of partfile '%s' with length %u: %s"))
2292 % partnumber
% length
% (offset
+length
) % GetFileName() % GetFileSize() % e
.what());
2296 if (GetPartCount() > 1) {
2297 if (hashresult
!= GetPartHash(partnumber
)) {
2298 AddDebugLogLineM(false, logPartFile
, CFormat( wxT("%s: Expected hash of part %d: %s")) % GetFileName() % partnumber
% GetPartHash(partnumber
).Encode() );
2299 AddDebugLogLineM(false, logPartFile
, CFormat( wxT("%s: Actual hash of part %d: %s")) % GetFileName() % partnumber
% hashresult
.Encode() );
2305 if (hashresult
!= m_abyFileHash
) {
2315 bool CPartFile::IsCorruptedPart(uint16 partnumber
)
2317 return std::find(m_corrupted_list
.begin(), m_corrupted_list
.end(), partnumber
)
2318 != m_corrupted_list
.end();
2322 void CPartFile::SetDownPriority(uint8 np
, bool bSave
, bool bRefresh
)
2324 if ( m_iDownPriority
!= np
) {
2325 m_iDownPriority
= np
;
2327 UpdateDisplayedInfo(true);
2334 void CPartFile::StopFile(bool bCancel
)
2336 // Kry - Need to set it here to get into SetPartFileStatus(status) correctly
2339 // Barry - Need to tell any connected clients to stop sending the file
2342 m_LastSearchTimeKad
= 0;
2343 m_TotalSearchesKad
= 0;
2345 RemoveAllSources(true);
2348 memset(m_anStates
,0,sizeof(m_anStates
));
2354 UpdateDisplayedInfo(true);
2358 void CPartFile::StopPausedFile()
2361 // Once an hour, remove any sources for files which are no longer active downloads
2362 switch (GetStatus()) {
2364 case PS_INSUFFICIENT
:
2366 if (time(NULL
) - m_iLastPausePurge
> (60*60)) {
2367 m_iLastPausePurge
= time(NULL
);
2373 // release file handle if unused for some time
2374 m_hpartfile
.Release();
2378 void CPartFile::PauseFile(bool bInsufficient
)
2382 if ( status
== PS_COMPLETE
|| status
== PS_COMPLETING
) {
2386 if (GetKadFileSearchID()) {
2387 Kademlia::CSearchManager::StopSearch(GetKadFileSearchID(), true);
2388 // If we were in the middle of searching, reset timer so they can resume searching.
2389 m_LastSearchTimeKad
= 0;
2392 m_iLastPausePurge
= time(NULL
);
2394 theApp
->downloadqueue
->RemoveLocalServerRequest(this);
2396 CPacket
packet( OP_CANCELTRANSFER
, 0, OP_EDONKEYPROT
);
2397 for( SourceSet::iterator it
= m_SrcList
.begin(); it
!= m_SrcList
.end(); ) {
2398 CUpDownClient
* cur_src
= *it
++;
2399 if (cur_src
->GetDownloadState() == DS_DOWNLOADING
) {
2400 if (!cur_src
->GetSentCancelTransfer()) {
2401 theStats::AddUpOverheadOther( packet
.GetPacketSize() );
2402 AddDebugLogLineM( false, logLocalClient
, wxT("Local Client: OP_CANCELTRANSFER to ") + cur_src
->GetFullIP() );
2403 cur_src
->SendPacket( &packet
, false, true );
2404 cur_src
->SetSentCancelTransfer( true );
2406 cur_src
->SetDownloadState(DS_ONQUEUE
);
2411 m_insufficient
= bInsufficient
;
2417 m_anStates
[DS_DOWNLOADING
] = 0;
2423 void CPartFile::ResumeFile()
2425 if ( status
== PS_COMPLETE
|| status
== PS_COMPLETING
) {
2429 if ( m_insufficient
&& !CheckFreeDiskSpace() ) {
2430 // Still not enough free discspace
2436 m_insufficient
= false;
2438 m_lastsearchtime
= 0;
2440 SetActive(theApp
->IsConnected());
2442 if (m_gaplist
.IsComplete() && (GetStatus() == PS_ERROR
)) {
2443 // The file has already been hashed at this point
2447 UpdateDisplayedInfo(true);
2451 bool CPartFile::CheckFreeDiskSpace( uint64 neededSpace
)
2453 uint64 free
= CPath::GetFreeSpaceAt(GetFilePath());
2454 if (free
== static_cast<uint64
>(wxInvalidOffset
)) {
2455 // If GetFreeSpaceAt() fails, then the path probably does not exist.
2459 // The very least acceptable diskspace is a single PART
2460 if ( free
< PARTSIZE
) {
2461 // Always fail in this case, since we risk losing data if we try to
2462 // write on a full partition.
2466 // All other checks are only made if the user has enabled them
2467 if ( thePrefs::IsCheckDiskspaceEnabled() ) {
2468 neededSpace
+= thePrefs::GetMinFreeDiskSpace();
2470 // Due to the the existance of sparse files, we cannot assume that
2471 // writes within the file doesn't cause new blocks to be allocated.
2472 // Therefore, we have to simply stop writing the moment the limit has
2474 return free
>= neededSpace
;
2481 void CPartFile::SetLastAnsweredTime()
2483 m_ClientSrcAnswered
= ::GetTickCount();
2486 void CPartFile::SetLastAnsweredTimeTimeout()
2488 m_ClientSrcAnswered
= 2 * CONNECTION_LATENCY
+ ::GetTickCount() - SOURCECLIENTREASKS
;
2491 CPacket
*CPartFile::CreateSrcInfoPacket(const CUpDownClient
* forClient
, uint8 byRequestedVersion
, uint16 nRequestedOptions
)
2494 if ( m_SrcList
.empty() ) {
2499 return CKnownFile::CreateSrcInfoPacket(forClient
, byRequestedVersion
, nRequestedOptions
);
2502 if (((forClient
->GetRequestFile() != this)
2503 && (forClient
->GetUploadFile() != this)) || forClient
->GetUploadFileID() != GetFileHash()) {
2504 wxString file1
= _("Unknown");
2505 if (forClient
->GetRequestFile() && forClient
->GetRequestFile()->GetFileName().IsOk()) {
2506 file1
= forClient
->GetRequestFile()->GetFileName().GetPrintable();
2507 } else if (forClient
->GetUploadFile() && forClient
->GetUploadFile()->GetFileName().IsOk()) {
2508 file1
= forClient
->GetUploadFile()->GetFileName().GetPrintable();
2510 wxString file2
= _("Unknown");
2511 if (GetFileName().IsOk()) {
2512 file2
= GetFileName().GetPrintable();
2514 AddDebugLogLineM(false, logPartFile
, wxT("File mismatch on source packet (P) Sending: ") + file1
+ wxT(" From: ") + file2
);
2518 if ( !(GetStatus() == PS_READY
|| GetStatus() == PS_EMPTY
)) {
2522 const BitVector
& reqstatus
= forClient
->GetPartStatus();
2523 bool KnowNeededParts
= !reqstatus
.empty();
2524 //wxASSERT(rcvstatus.size() == GetPartCount()); // Obviously!
2525 if (KnowNeededParts
&& (reqstatus
.size() != GetPartCount())) {
2526 // Yuck. Same file but different part count? Seriously fucked up.
2527 // This happens rather often with reqstatus.size() == 0. Don't log then.
2528 if (reqstatus
.size()) {
2529 AddDebugLogLineM(false, logKnownFiles
, CFormat(wxT("Impossible situation: different partcounts: %i (client) and %i (file) for %s")) % reqstatus
.size() % GetPartCount() % GetFileName());
2534 CMemFile
data(1024);
2536 uint8 byUsedVersion
;
2538 if (forClient
->SupportsSourceExchange2() && byRequestedVersion
> 0){
2539 // the client uses SourceExchange2 and requested the highest version he knows
2540 // and we send the highest version we know, but of course not higher than his request
2541 byUsedVersion
= std::min(byRequestedVersion
, (uint8
)SOURCEEXCHANGE2_VERSION
);
2542 bIsSX2Packet
= true;
2543 data
.WriteUInt8(byUsedVersion
);
2545 // we don't support any special SX2 options yet, reserved for later use
2546 if (nRequestedOptions
!= 0) {
2547 AddDebugLogLineM(false, logKnownFiles
, CFormat(wxT("Client requested unknown options for SourceExchange2: %u")) % nRequestedOptions
);
2550 byUsedVersion
= forClient
->GetSourceExchange1Version();
2551 bIsSX2Packet
= false;
2552 if (forClient
->SupportsSourceExchange2()) {
2553 AddDebugLogLineM(false, logKnownFiles
, wxT("Client which announced to support SX2 sent SX1 packet instead"));
2559 data
.WriteHash(m_abyFileHash
);
2560 data
.WriteUInt16(nCount
);
2562 for (SourceSet::iterator it
= m_SrcList
.begin(); it
!= m_SrcList
.end(); ++it
) {
2564 CUpDownClient
* cur_src
= *it
;
2566 int state
= cur_src
->GetDownloadState();
2567 int valid
= ( state
== DS_DOWNLOADING
) || ( state
== DS_ONQUEUE
&& !cur_src
->IsRemoteQueueFull() );
2569 if ( cur_src
->HasLowID() || !valid
) {
2573 // only send source which have needed parts for this client if possible
2574 const BitVector
& srcstatus
= cur_src
->GetPartStatus();
2575 if ( !srcstatus
.empty() ) {
2576 //wxASSERT(srcstatus.size() == GetPartCount()); // Obviously!
2577 if (srcstatus
.size() != GetPartCount()) {
2580 if ( KnowNeededParts
) {
2581 // only send sources which have needed parts for this client
2582 for (int x
= 0; x
< GetPartCount(); ++x
) {
2583 if (srcstatus
[x
] && !reqstatus
[x
]) {
2589 // if we don't know the need parts for this client,
2590 // return any source currently a client sends it's
2591 // file status only after it has at least one complete part
2592 if (srcstatus
.size() != GetPartCount()) {
2595 for (int x
= 0; x
< GetPartCount(); ++x
){
2606 if(forClient
->GetSourceExchange1Version() > 2) {
2607 dwID
= cur_src
->GetUserIDHybrid();
2609 dwID
= wxUINT32_SWAP_ALWAYS(cur_src
->GetUserIDHybrid());
2611 data
.WriteUInt32(dwID
);
2612 data
.WriteUInt16(cur_src
->GetUserPort());
2613 data
.WriteUInt32(cur_src
->GetServerIP());
2614 data
.WriteUInt16(cur_src
->GetServerPort());
2616 if (byUsedVersion
>= 2) {
2617 data
.WriteHash(cur_src
->GetUserHash());
2620 if (byUsedVersion
>= 4){
2621 // CryptSettings - SourceExchange V4
2623 // 1 CryptLayer Required
2624 // 1 CryptLayer Requested
2625 // 1 CryptLayer Supported
2626 const uint8 uSupportsCryptLayer
= cur_src
->SupportsCryptLayer() ? 1 : 0;
2627 const uint8 uRequestsCryptLayer
= cur_src
->RequestsCryptLayer() ? 1 : 0;
2628 const uint8 uRequiresCryptLayer
= cur_src
->RequiresCryptLayer() ? 1 : 0;
2629 const uint8 byCryptOptions
= (uRequiresCryptLayer
<< 2) | (uRequestsCryptLayer
<< 1) | (uSupportsCryptLayer
<< 0);
2630 data
.WriteUInt8(byCryptOptions
);
2641 data
.Seek(bIsSX2Packet
? 17 : 16, wxFromStart
);
2642 data
.WriteUInt16(nCount
);
2644 CPacket
* result
= new CPacket(data
, OP_EMULEPROT
, bIsSX2Packet
? OP_ANSWERSOURCES2
: OP_ANSWERSOURCES
);
2646 // 16+2+501*(4+2+4+2+16) = 14046 bytes max.
2647 if (result
->GetPacketSize() > 354) {
2648 result
->PackPacket();
2654 void CPartFile::AddClientSources(CMemFile
* sources
, unsigned nSourceFrom
, uint8 uClientSXVersion
, bool bSourceExchange2
, const CUpDownClient
* /*pClient*/)
2663 uint8 uPacketSXVersion
= 0;
2664 if (!bSourceExchange2
) {
2665 nCount
= sources
->ReadUInt16();
2667 // Check if the data size matches the 'nCount' for v1 or v2 and eventually correct the source
2668 // exchange version while reading the packet data. Otherwise we could experience a higher
2669 // chance in dealing with wrong source data, userhashs and finally duplicate sources.
2670 uint32 uDataSize
= sources
->GetLength() - sources
->GetPosition();
2672 if ((uint32
)(nCount
*(4+2+4+2)) == uDataSize
) { //Checks if version 1 packet is correct size
2673 if(uClientSXVersion
!= 1) {
2676 uPacketSXVersion
= 1;
2677 } else if ((uint32
)(nCount
*(4+2+4+2+16)) == uDataSize
) { // Checks if version 2&3 packet is correct size
2678 if (uClientSXVersion
== 2) {
2679 uPacketSXVersion
= 2;
2680 } else if (uClientSXVersion
> 2) {
2681 uPacketSXVersion
= 3;
2685 } else if (nCount
*(4+2+4+2+16+1) == uDataSize
) {
2686 if (uClientSXVersion
!= 4 ) {
2689 uPacketSXVersion
= 4;
2691 // If v5 inserts additional data (like v2), the above code will correctly filter those packets.
2692 // If v5 appends additional data after <count>(<Sources>)[count], we are in trouble with the
2693 // above code. Though a client which does not understand v5+ should never receive such a packet.
2694 AddDebugLogLineM(false, logClient
, CFormat(wxT("Received invalid source exchange packet (v%u) of data size %u for %s")) % uClientSXVersion
% uDataSize
% GetFileName());
2699 // We only check if the version is known by us and do a quick sanitize check on known version
2700 // other then SX1, the packet will be ignored if any error appears, sicne it can't be a "misunderstanding" anymore
2701 if (uClientSXVersion
> SOURCEEXCHANGE2_VERSION
|| uClientSXVersion
== 0 ){
2702 AddDebugLogLineM(false, logPartFile
, CFormat(wxT("Invalid source exchange type version: %i")) % uClientSXVersion
);
2706 // all known versions use the first 2 bytes as count and unknown version are already filtered above
2707 nCount
= sources
->ReadUInt16();
2708 uint32 uDataSize
= (uint32
)(sources
->GetLength() - sources
->GetPosition());
2709 bool bError
= false;
2710 switch (uClientSXVersion
){
2712 bError
= nCount
*(4+2+4+2) != uDataSize
;
2716 bError
= nCount
*(4+2+4+2+16) != uDataSize
;
2719 bError
= nCount
*(4+2+4+2+16+1) != uDataSize
;
2727 AddDebugLogLineM(false, logPartFile
, wxT("Invalid source exchange data size."));
2730 uPacketSXVersion
= uClientSXVersion
;
2733 for (uint16 i
= 0;i
!= nCount
;++i
) {
2735 uint32 dwID
= sources
->ReadUInt32();
2736 uint16 nPort
= sources
->ReadUInt16();
2737 uint32 dwServerIP
= sources
->ReadUInt32();
2738 uint16 nServerPort
= sources
->ReadUInt16();
2741 if (uPacketSXVersion
> 1) {
2742 userHash
= sources
->ReadHash();
2745 uint8 byCryptOptions
= 0;
2746 if (uPacketSXVersion
>= 4) {
2747 byCryptOptions
= sources
->ReadUInt8();
2750 //Clients send ID's the the Hyrbid format so highID clients with *.*.*.0 won't be falsely switched to a lowID..
2752 if (uPacketSXVersion
>= 3) {
2753 dwIDED2K
= wxUINT32_SWAP_ALWAYS(dwID
);
2758 // check the HighID(IP) - "Filter LAN IPs" and "IPfilter" the received sources IP addresses
2759 if (!IsLowID(dwID
)) {
2760 if (!IsGoodIP(dwIDED2K
, thePrefs::FilterLanIPs())) {
2761 // check for 0-IP, localhost and optionally for LAN addresses
2762 AddDebugLogLineM(false, logIPFilter
, CFormat(wxT("Ignored source (IP=%s) received via %s - bad IP")) % Uint32toStringIP(dwIDED2K
) % OriginToText(nSourceFrom
));
2765 if (theApp
->ipfilter
->IsFiltered(dwIDED2K
)) {
2766 AddDebugLogLineM(false, logIPFilter
, CFormat(wxT("Ignored source (IP=%s) received via %s - IPFilter")) % Uint32toStringIP(dwIDED2K
) % OriginToText(nSourceFrom
));
2769 if (theApp
->clientlist
->IsBannedClient(dwIDED2K
)){
2774 // additionally check for LowID and own IP
2775 if (!CanAddSource(dwID
, nPort
, dwServerIP
, nServerPort
, NULL
, false)) {
2776 AddDebugLogLineM(false, logIPFilter
, CFormat(wxT("Ignored source (IP=%s) received via source exchange")) % Uint32toStringIP(dwIDED2K
));
2780 if(thePrefs::GetMaxSourcePerFile() > GetSourceCount()) {
2781 CUpDownClient
* newsource
= new CUpDownClient(nPort
,dwID
,dwServerIP
,nServerPort
,this, (uPacketSXVersion
< 3), true);
2782 if (uPacketSXVersion
> 1) {
2783 newsource
->SetUserHash(userHash
);
2786 if (uPacketSXVersion
>= 4) {
2787 newsource
->SetConnectOptions(byCryptOptions
, true, false);
2790 newsource
->SetSourceFrom((ESourceFrom
)nSourceFrom
);
2791 theApp
->downloadqueue
->CheckAndAddSource(this,newsource
);
2799 void CPartFile::UpdateAutoDownPriority()
2801 if (!IsAutoDownPriority()) {
2804 if (GetSourceCount() <= RARE_FILE
) {
2805 if ( GetDownPriority() != PR_HIGH
)
2806 SetDownPriority(PR_HIGH
, false, false);
2807 } else if (GetSourceCount() < 100) {
2808 if ( GetDownPriority() != PR_NORMAL
)
2809 SetDownPriority(PR_NORMAL
, false, false);
2811 if ( GetDownPriority() != PR_LOW
)
2812 SetDownPriority(PR_LOW
, false, false);
2816 // making this function return a higher when more sources have the extended
2817 // protocol will force you to ask a larger variety of people for sources
2819 int CPartFile::GetCommonFilePenalty()
2821 //TODO: implement, but never return less than MINCOMMONPENALTY!
2822 return MINCOMMONPENALTY
;
2825 /* Barry - Replaces BlockReceived()
2827 Originally this only wrote to disk when a full 180k block
2828 had been received from a client, and only asked for data in
2831 This meant that on average 90k was lost for every connection
2832 to a client data source. That is a lot of wasted data.
2834 To reduce the lost data, packets are now written to a buffer
2835 and flushed to disk regularly regardless of size downloaded.
2836 This includes compressed packets.
2838 Data is also requested only where gaps are, not in 180k blocks.
2839 The requests will still not exceed 180k, but may be smaller to
2843 // Kry - transize is 32bits, no packet can be more than that (this is
2844 // compressed size). Even 32bits is too much imho.As for the return size,
2845 // look at the lenData below.
2846 uint32
CPartFile::WriteToBuffer(uint32 transize
, byte
* data
, uint64 start
, uint64 end
, Requested_Block_Struct
*block
, const CUpDownClient
* client
)
2848 // Increment transferred bytes counter for this file
2849 transferred
+= transize
;
2851 // This is needed a few times
2852 // Kry - should not need a uint64 here - no block is larger than
2853 // 2GB even after uncompressed.
2854 uint32 lenData
= (uint32
) (end
- start
+ 1);
2856 if(lenData
> transize
) {
2857 m_iGainDueToCompression
+= lenData
-transize
;
2860 // Occasionally packets are duplicated, no point writing it twice
2861 if (IsComplete(start
, end
)) {
2862 AddDebugLogLineM(false, logPartFile
,
2863 CFormat(wxT("File '%s' has already been written from %u to %u"))
2864 % GetFileName() % start
% end
);
2868 // security sanitize check to make sure we do not write anything into an already hashed complete chunk
2869 const uint64 nStartChunk
= start
/ PARTSIZE
;
2870 const uint64 nEndChunk
= end
/ PARTSIZE
;
2871 if (IsComplete(nStartChunk
)) {
2872 AddDebugLogLineM(false, logPartFile
, CFormat(wxT("Received data touches already hashed chunk - ignored (start): %u-%u; File=%s")) % start
% end
% GetFileName());
2874 } else if (nStartChunk
!= nEndChunk
) {
2875 if (IsComplete(nEndChunk
)) {
2876 AddDebugLogLineM(false, logPartFile
, CFormat(wxT("Received data touches already hashed chunk - ignored (end): %u-%u; File=%s")) % start
% end
% GetFileName());
2881 AddDebugLogLineM(false, logPartFile
, CFormat(wxT("Received data crosses chunk boundaries: %u-%u; File=%s")) % start
% end
% GetFileName());
2886 // log transferinformation in our "blackbox"
2887 m_CorruptionBlackBox
->TransferredData(start
, end
, client
);
2889 // Create copy of data as new buffer
2890 byte
*buffer
= new byte
[lenData
];
2891 memcpy(buffer
, data
, lenData
);
2893 // Create a new buffered queue entry
2894 PartFileBufferedData
*item
= new PartFileBufferedData(buffer
, start
, end
, block
);
2896 // Add to the queue in the correct position (most likely the end)
2899 std::list
<PartFileBufferedData
*>::iterator it
= m_BufferedData_list
.begin();
2900 for (; it
!= m_BufferedData_list
.end(); ++it
) {
2901 PartFileBufferedData
* queueItem
= *it
;
2903 if (item
->end
<= queueItem
->end
) {
2904 if (it
!= m_BufferedData_list
.begin()) {
2907 m_BufferedData_list
.insert(--it
, item
);
2915 m_BufferedData_list
.push_front(item
);
2918 // Increment buffer size marker
2919 m_nTotalBufferData
+= lenData
;
2921 // Mark this small section of the file as filled
2922 FillGap(item
->start
, item
->end
);
2924 // Update the flushed mark on the requested block
2925 // The loop here is unfortunate but necessary to detect deleted blocks.
2927 std::list
<Requested_Block_Struct
*>::iterator it2
= m_requestedblocks_list
.begin();
2928 for (; it2
!= m_requestedblocks_list
.end(); ++it2
) {
2929 if (*it2
== item
->block
) {
2930 item
->block
->transferred
+= lenData
;
2934 if (m_gaplist
.IsComplete()) {
2938 // Return the length of data written to the buffer
2942 void CPartFile::FlushBuffer(bool fromAICHRecoveryDataAvailable
)
2944 m_nLastBufferFlushTime
= GetTickCount();
2946 if (m_BufferedData_list
.empty()) {
2951 uint32 partCount
= GetPartCount();
2952 // Remember which parts need to be checked at the end of the flush
2953 std::vector
<bool> changedPart(partCount
, false);
2955 // Ensure file is big enough to write data to (the last item will be the furthest from the start)
2956 if (!CheckFreeDiskSpace(m_nTotalBufferData
)) {
2957 // Not enough free space to write the last item, bail
2958 AddLogLineM(true, CFormat( _("WARNING: Not enough free disk-space! Pausing file: %s") ) % GetFileName());
2964 // Loop through queue
2965 while ( !m_BufferedData_list
.empty() ) {
2966 // Get top item and remove it from the queue
2967 CScopedPtr
<PartFileBufferedData
> item(m_BufferedData_list
.front());
2968 m_BufferedData_list
.pop_front();
2970 // This is needed a few times
2971 wxASSERT((item
->end
- item
->start
) < 0xFFFFFFFF);
2972 uint32 lenData
= (uint32
)(item
->end
- item
->start
+ 1);
2974 // SLUGFILLER: SafeHash - could be more than one part
2975 for (uint32 curpart
= (item
->start
/PARTSIZE
); curpart
<= (item
->end
/PARTSIZE
); ++curpart
) {
2976 wxASSERT(curpart
< partCount
);
2977 changedPart
[curpart
] = true;
2979 // SLUGFILLER: SafeHash
2981 // Go to the correct position in file and write block of data
2983 m_hpartfile
.Seek(item
->start
);
2984 m_hpartfile
.Write(item
->data
.get(), lenData
);
2985 // Decrease buffer size
2986 m_nTotalBufferData
-= lenData
;
2987 } catch (const CIOFailureException
& e
) {
2988 AddDebugLogLineM(true, logPartFile
, wxT("Error while saving part-file: ") + e
.what());
2989 SetPartFileStatus(PS_ERROR
);
2990 // No need to bang your head against it again and again if it has already failed.
2991 DeleteContents(m_BufferedData_list
);
2992 m_nTotalBufferData
= 0;
2998 // Update last-changed date
2999 m_lastDateChanged
= wxDateTime::GetTimeNow();
3002 // Partfile should never be too large
3003 if (m_hpartfile
.GetLength() > GetFileSize()) {
3004 // it's "last chance" correction. the real bugfix has to be applied 'somewhere' else
3005 m_hpartfile
.SetLength(GetFileSize());
3007 } catch (const CIOFailureException
& e
) {
3008 AddDebugLogLineM(true, logPartFile
,
3009 CFormat(wxT("Error while truncating part-file (%s): %s"))
3010 % m_PartPath
% e
.what());
3011 SetPartFileStatus(PS_ERROR
);
3016 // Check each part of the file
3017 for (uint16 partNumber
= 0; partNumber
< partCount
; ++partNumber
) {
3018 if (changedPart
[partNumber
] == false) {
3022 uint32 partRange
= GetPartSize(partNumber
) - 1;
3024 // Is this 9MB part complete
3025 if (IsComplete(partNumber
)) {
3027 if (!HashSinglePart(partNumber
)) {
3028 AddLogLineM(true, CFormat(
3029 _("Downloaded part %i is corrupt in file: %s") ) % partNumber
% GetFileName() );
3031 // add part to corrupted list, if not already there
3032 if (!IsCorruptedPart(partNumber
)) {
3033 m_corrupted_list
.push_back(partNumber
);
3035 // request AICH recovery data
3036 // Don't if called from the AICHRecovery. It's already there and would lead to an infinite recursion.
3037 if (!fromAICHRecoveryDataAvailable
) {
3038 RequestAICHRecovery(partNumber
);
3040 // Reduce transferred amount by corrupt amount
3041 m_iLostDueToCorruption
+= (partRange
+ 1);
3043 if (!m_hashsetneeded
) {
3044 AddDebugLogLineM(false, logPartFile
, CFormat(
3045 wxT("Finished part %u of '%s'")) % partNumber
% GetFileName());
3048 // tell the blackbox about the verified data
3049 m_CorruptionBlackBox
->VerifiedData(PARTSIZE
*partNumber
, PARTSIZE
*partNumber
+ partRange
);
3051 // if this part was successfully completed (although ICH is active), remove from corrupted list
3052 EraseFirstValue(m_corrupted_list
, partNumber
);
3054 if (status
== PS_EMPTY
) {
3055 if (theApp
->IsRunning()) { // may be called during shutdown!
3056 if (GetHashCount() == GetED2KPartHashCount() && !m_hashsetneeded
) {
3057 // Successfully completed part, make it available for sharing
3058 SetStatus(PS_READY
);
3059 theApp
->sharedfiles
->SafeAddKFile(this);
3064 } else if ( IsCorruptedPart(partNumber
) && // corrupted part:
3065 (thePrefs::IsICHEnabled() // old ICH: rehash whenever we have new data hoping it will be good now
3066 || fromAICHRecoveryDataAvailable
)) {// new AICH: one rehash right before performing it (maybe it's already good)
3067 // Try to recover with minimal loss
3068 if (HashSinglePart(partNumber
)) {
3069 ++m_iTotalPacketsSavedDueToICH
;
3071 uint64 uMissingInPart
= m_gaplist
.GetGapSize(partNumber
);
3072 FillGap(partNumber
);
3073 RemoveBlockFromList(PARTSIZE
*partNumber
,(PARTSIZE
*partNumber
+ partRange
));
3075 // tell the blackbox about the verified data
3076 m_CorruptionBlackBox
->VerifiedData(PARTSIZE
*partNumber
, PARTSIZE
*partNumber
+ partRange
);
3078 // remove from corrupted list
3079 EraseFirstValue(m_corrupted_list
, partNumber
);
3081 AddLogLineM(true, CFormat( _("ICH: Recovered corrupted part %i for %s -> Saved bytes: %s") )
3084 % CastItoXBytes(uMissingInPart
));
3086 if (GetHashCount() == GetED2KPartHashCount() && !m_hashsetneeded
) {
3087 if (status
== PS_EMPTY
) {
3088 // Successfully recovered part, make it available for sharing
3089 SetStatus(PS_READY
);
3090 if (theApp
->IsRunning()) // may be called during shutdown!
3091 theApp
->sharedfiles
->SafeAddKFile(this);
3101 if (theApp
->IsRunning()) { // may be called during shutdown!
3102 // Is this file finished ?
3103 if (m_gaplist
.IsComplete()) {
3104 CompleteFile(false);
3110 // read data for upload, return false on error
3111 bool CPartFile::ReadData(CFileArea
& area
, uint64 offset
, uint32 toread
)
3114 if (offset
+ toread
> GetFileSize()) {
3115 AddDebugLogLineM(false, logPartFile
, CFormat(wxT("tried to read %d bytes past eof of %s"))
3116 % (offset
+ toread
- GetFileSize()) % GetFileName());
3121 m_hpartfile
.Seek(offset
, wxFromStart
);
3122 area
.Read(m_hpartfile
, toread
);
3123 // if it fails it throws (which the caller should catch)
3128 void CPartFile::UpdateFileRatingCommentAvail()
3130 bool prevComment
= m_hasComment
;
3131 int prevRating
= m_iUserRating
;
3133 m_hasComment
= false;
3135 int ratingCount
= 0;
3137 SourceSet::iterator it
= m_SrcList
.begin();
3138 for (; it
!= m_SrcList
.end(); ++it
) {
3139 CUpDownClient
* cur_src
= *it
;
3141 if (!cur_src
->GetFileComment().IsEmpty()) {
3142 if (thePrefs::IsCommentFiltered(cur_src
->GetFileComment())) {
3145 m_hasComment
= true;
3148 uint8 rating
= cur_src
->GetFileRating();
3150 wxASSERT(rating
<= 5);
3153 m_iUserRating
+= rating
;
3158 m_iUserRating
/= ratingCount
;
3159 wxASSERT(m_iUserRating
> 0 && m_iUserRating
<= 5);
3162 if ((prevComment
!= m_hasComment
) || (prevRating
!= m_iUserRating
)) {
3163 UpdateDisplayedInfo();
3168 void CPartFile::SetCategory(uint8 cat
)
3170 wxASSERT( cat
< theApp
->glob_prefs
->GetCatCount() );
3176 bool CPartFile::RemoveSource(CUpDownClient
* toremove
, bool updatewindow
, bool bDoStatsUpdate
)
3178 wxASSERT( toremove
);
3180 bool result
= theApp
->downloadqueue
->RemoveSource( toremove
, updatewindow
, bDoStatsUpdate
);
3182 // Check if the client should be deleted, but not if the client is already dying
3183 if ( !toremove
->GetSocket() && !toremove
->HasBeenDeleted() ) {
3184 if ( toremove
->Disconnected(wxT("RemoveSource - purged")) ) {
3185 toremove
->Safe_Delete();
3192 void CPartFile::AddDownloadingSource(CUpDownClient
* client
)
3194 CClientPtrList::iterator it
=
3195 std::find(m_downloadingSourcesList
.begin(), m_downloadingSourcesList
.end(), client
);
3196 if (it
== m_downloadingSourcesList
.end()) {
3197 m_downloadingSourcesList
.push_back(client
);
3202 void CPartFile::RemoveDownloadingSource(CUpDownClient
* client
)
3204 CClientPtrList::iterator it
=
3205 std::find(m_downloadingSourcesList
.begin(), m_downloadingSourcesList
.end(), client
);
3206 if (it
!= m_downloadingSourcesList
.end()) {
3207 m_downloadingSourcesList
.erase(it
);
3212 void CPartFile::SetPartFileStatus(uint8 newstatus
)
3216 if (thePrefs::GetAllcatType()) {
3217 Notify_DownloadCtrlUpdateItem(this);
3220 Notify_DownloadCtrlSort();
3224 uint64
CPartFile::GetNeededSpace()
3227 uint64 length
= m_hpartfile
.GetLength();
3229 if (length
> GetFileSize()) {
3230 return 0; // Shouldn't happen, but just in case
3233 return GetFileSize() - length
;
3234 } catch (const CIOFailureException
& e
) {
3235 AddDebugLogLineM(true, logPartFile
,
3236 CFormat(wxT("Error while retrieving file-length (%s): %s"))
3237 % m_PartPath
% e
.what());
3238 SetPartFileStatus(PS_ERROR
);
3243 void CPartFile::SetStatus(uint8 in
)
3245 wxASSERT( in
!= PS_PAUSED
&& in
!= PS_INSUFFICIENT
);
3249 if (theApp
->IsRunning()) {
3250 UpdateDisplayedInfo( true );
3252 if ( thePrefs::ShowCatTabInfos() ) {
3253 Notify_ShowUpdateCatTabTitles();
3259 void CPartFile::RequestAICHRecovery(uint16 nPart
)
3262 if ( !m_pAICHHashSet
->HasValidMasterHash() ||
3263 (m_pAICHHashSet
->GetStatus() != AICH_TRUSTED
&& m_pAICHHashSet
->GetStatus() != AICH_VERIFIED
)){
3264 AddDebugLogLineM( false, logAICHRecovery
, wxT("Unable to request AICH Recoverydata because we have no trusted Masterhash") );
3267 if (GetPartSize(nPart
) <= EMBLOCKSIZE
)
3269 if (CAICHHashSet::IsClientRequestPending(this, nPart
)){
3270 AddDebugLogLineM( false, logAICHRecovery
, wxT("RequestAICHRecovery: Already a request for this part pending"));
3274 // first check if we have already the recoverydata, no need to rerequest it then
3275 if (m_pAICHHashSet
->IsPartDataAvailable(nPart
*PARTSIZE
)){
3276 AddDebugLogLineM( false, logAICHRecovery
, wxT("Found PartRecoveryData in memory"));
3277 AICHRecoveryDataAvailable(nPart
);
3281 wxASSERT( nPart
< GetPartCount() );
3282 // find some random client which support AICH to ask for the blocks
3283 // first lets see how many we have at all, we prefer high id very much
3284 uint32 cAICHClients
= 0;
3285 uint32 cAICHLowIDClients
= 0;
3286 for ( SourceSet::iterator it
= m_SrcList
.begin(); it
!= m_SrcList
.end(); ++it
) {
3287 CUpDownClient
* pCurClient
= *(it
);
3288 if ( pCurClient
->IsSupportingAICH() &&
3289 pCurClient
->GetReqFileAICHHash() != NULL
&&
3290 !pCurClient
->IsAICHReqPending()
3291 && (*pCurClient
->GetReqFileAICHHash()) == m_pAICHHashSet
->GetMasterHash())
3293 if (pCurClient
->HasLowID()) {
3294 ++cAICHLowIDClients
;
3300 if ((cAICHClients
| cAICHLowIDClients
) == 0){
3301 AddDebugLogLineM( false, logAICHRecovery
, wxT("Unable to request AICH Recoverydata because found no client who supports it and has the same hash as the trusted one"));
3304 uint32 nSeclectedClient
;
3305 if (cAICHClients
> 0) {
3306 nSeclectedClient
= (rand() % cAICHClients
) + 1;
3308 nSeclectedClient
= (rand() % cAICHLowIDClients
) + 1;
3310 CUpDownClient
* pClient
= NULL
;
3311 for ( SourceSet::iterator it
= m_SrcList
.begin(); it
!= m_SrcList
.end(); ++it
) {
3312 CUpDownClient
* pCurClient
= *(it
);
3313 if (pCurClient
->IsSupportingAICH() && pCurClient
->GetReqFileAICHHash() != NULL
&& !pCurClient
->IsAICHReqPending()
3314 && (*pCurClient
->GetReqFileAICHHash()) == m_pAICHHashSet
->GetMasterHash())
3316 if (cAICHClients
> 0){
3317 if (!pCurClient
->HasLowID())
3321 wxASSERT( pCurClient
->HasLowID());
3324 if (nSeclectedClient
== 0){
3325 pClient
= pCurClient
;
3330 if (pClient
== NULL
){
3335 AddDebugLogLineM( false, logAICHRecovery
, CFormat( wxT("Requesting AICH Hash (%s) form client %s") ) % ( cAICHClients
? wxT("HighId") : wxT("LowID") ) % pClient
->GetClientFullInfo() );
3336 pClient
->SendAICHRequest(this, nPart
);
3341 void CPartFile::AICHRecoveryDataAvailable(uint16 nPart
)
3343 if (GetPartCount() < nPart
){
3349 uint32 length
= GetPartSize(nPart
);
3350 // if the part was already ok, it would now be complete
3351 if (IsComplete(nPart
)){
3352 AddDebugLogLineM( false, logAICHRecovery
,
3353 wxString::Format( wxT("Processing AICH Recovery data: The part (%u) is already complete, canceling"), nPart
) );
3359 CAICHHashTree
* pVerifiedHash
= m_pAICHHashSet
->m_pHashTree
.FindHash(nPart
*PARTSIZE
, length
);
3360 if (pVerifiedHash
== NULL
|| !pVerifiedHash
->GetHashValid()){
3361 AddDebugLogLineM( true, logAICHRecovery
, wxT("Processing AICH Recovery data: Unable to get verified hash from hashset (should never happen)") );
3365 CAICHHashTree
htOurHash(pVerifiedHash
->GetNDataSize(), pVerifiedHash
->GetIsLeftBranch(), pVerifiedHash
->GetNBaseSize());
3367 m_hpartfile
.Seek(PARTSIZE
* nPart
,wxFromStart
);
3368 CreateHashFromFile(m_hpartfile
, length
, NULL
, &htOurHash
);
3369 } catch (const CIOFailureException
& e
) {
3370 AddDebugLogLineM(true, logAICHRecovery
,
3371 CFormat(wxT("IO failure while hashing part-file '%s': %s"))
3372 % m_hpartfile
.GetFilePath() % e
.what());
3373 SetPartFileStatus(PS_ERROR
);
3377 if (!htOurHash
.GetHashValid()){
3378 AddDebugLogLineM( false, logAICHRecovery
, wxT("Processing AICH Recovery data: Failed to retrieve AICH Hashset of corrupt part") );
3383 // now compare the hash we just did, to the verified hash and readd all blocks which are ok
3384 uint32 nRecovered
= 0;
3385 for (uint32 pos
= 0; pos
< length
; pos
+= EMBLOCKSIZE
){
3386 const uint32 nBlockSize
= min
<uint32
>(EMBLOCKSIZE
, length
- pos
);
3387 CAICHHashTree
* pVerifiedBlock
= pVerifiedHash
->FindHash(pos
, nBlockSize
);
3388 CAICHHashTree
* pOurBlock
= htOurHash
.FindHash(pos
, nBlockSize
);
3389 if ( pVerifiedBlock
== NULL
|| pOurBlock
== NULL
|| !pVerifiedBlock
->GetHashValid() || !pOurBlock
->GetHashValid()){
3393 if (pOurBlock
->GetHash() == pVerifiedBlock
->GetHash()){
3394 FillGap(PARTSIZE
*nPart
+pos
, PARTSIZE
*nPart
+ pos
+ (nBlockSize
-1));
3395 RemoveBlockFromList(PARTSIZE
*nPart
, PARTSIZE
*nPart
+ (nBlockSize
-1));
3396 nRecovered
+= nBlockSize
;
3397 // tell the blackbox about the verified data
3398 m_CorruptionBlackBox
->VerifiedData(PARTSIZE
*nPart
+pos
, PARTSIZE
*nPart
+ pos
+ (nBlockSize
-1));
3400 // inform our "blackbox" about the corrupted block which may ban clients who sent it
3401 m_CorruptionBlackBox
->CorruptedData(PARTSIZE
*nPart
+pos
, PARTSIZE
*nPart
+ pos
+ (nBlockSize
-1));
3404 m_CorruptionBlackBox
->EvaluateData(nPart
);
3406 // ok now some sanity checks
3407 if (IsComplete(nPart
)){
3408 // this is a bad, but it could probably happen under some rare circumstances
3409 // make sure that MD4 agrres to this fact too
3410 if (!HashSinglePart(nPart
)){
3411 AddDebugLogLineM( false, logAICHRecovery
,
3412 wxString::Format(wxT("Processing AICH Recovery data: The part (%u) got completed while recovering - but MD4 says it corrupt! Setting hashset to error state, deleting part"), nPart
));
3413 // now we are fu... unhappy
3414 m_pAICHHashSet
->SetStatus(AICH_ERROR
);
3420 AddDebugLogLineM( false, logAICHRecovery
, wxString::Format(
3421 wxT("Processing AICH Recovery data: The part (%u) got completed while recovering and MD4 agrees"), nPart
) );
3422 if (status
== PS_EMPTY
&& theApp
->IsRunning()){
3423 if (GetHashCount() == GetED2KPartHashCount() && !m_hashsetneeded
){
3424 // Successfully recovered part, make it available for sharing
3425 SetStatus(PS_READY
);
3426 theApp
->sharedfiles
->SafeAddKFile(this);
3430 if (theApp
->IsRunning()){
3431 // Is this file finished?
3432 if (m_gaplist
.IsComplete()) {
3433 CompleteFile(false);
3437 } // end sanity check
3438 // We did the best we could. If it's still incomplete, then no need to keep
3439 // bashing it with ICH. So remove it from the list of corrupted parts.
3440 EraseFirstValue(m_corrupted_list
, nPart
);
3444 // make sure the user appreciates our great recovering work :P
3445 AddDebugLogLineM( true, logAICHRecovery
, CFormat(
3446 wxT("AICH successfully recovered %s of %s from part %u for %s") )
3447 % CastItoXBytes(nRecovered
)
3448 % CastItoXBytes(length
)
3454 void CPartFile::ClientStateChanged( int oldState
, int newState
)
3456 if ( oldState
== newState
)
3459 // If the state is -1, then it's an entirely new item
3460 if ( oldState
!= -1 ) {
3461 // Was the old state a valid state?
3462 if ( oldState
== DS_ONQUEUE
|| oldState
== DS_DOWNLOADING
) {
3465 if ( oldState
== DS_CONNECTED
/* || oldState == DS_REMOTEQUEUEFULL */ ) {
3469 m_notCurrentSources
--;
3473 // If the state is -1, then the source is being removed
3474 if ( newState
!= -1 ) {
3475 // Was the old state a valid state?
3476 if ( newState
== DS_ONQUEUE
|| newState
== DS_DOWNLOADING
) {
3479 if ( newState
== DS_CONNECTED
/* || newState == DS_REMOTEQUEUEFULL */ ) {
3483 ++m_notCurrentSources
;
3489 bool CPartFile::AddSource( CUpDownClient
* client
)
3491 if (m_SrcList
.insert( client
).second
) {
3492 theStats::AddFoundSource();
3493 theStats::AddSourceOrigin(client
->GetSourceFrom());
3501 bool CPartFile::DelSource( CUpDownClient
* client
)
3503 if (m_SrcList
.erase( client
)) {
3504 theStats::RemoveSourceOrigin(client
->GetSourceFrom());
3505 theStats::RemoveFoundSource();
3513 void CPartFile::UpdatePartsFrequency( CUpDownClient
* client
, bool increment
)
3515 const BitVector
& freq
= client
->GetPartStatus();
3517 if ( m_SrcpartFrequency
.size() != GetPartCount() ) {
3518 m_SrcpartFrequency
.clear();
3519 m_SrcpartFrequency
.insert(m_SrcpartFrequency
.begin(), GetPartCount(), 0);
3526 unsigned int size
= freq
.size();
3527 if ( size
!= m_SrcpartFrequency
.size() ) {
3532 for ( unsigned int i
= 0; i
< size
; i
++ ) {
3534 m_SrcpartFrequency
[i
]++;
3538 for ( unsigned int i
= 0; i
< size
; i
++ ) {
3540 m_SrcpartFrequency
[i
]--;
3546 const FileRatingList
&CPartFile::GetRatingAndComments()
3548 m_FileRatingList
.clear();
3549 // This can be pre-processed, but is it worth the CPU?
3550 CPartFile::SourceSet::iterator it
= m_SrcList
.begin();
3551 for ( ; it
!= m_SrcList
.end(); ++it
) {
3552 CUpDownClient
*cur_src
= *it
;
3553 if (cur_src
->GetFileComment().Length()>0 || cur_src
->GetFileRating()>0) {
3554 // AddDebugLogLineM(false, logPartFile, wxString(wxT("found a comment for ")) << GetFileName());
3555 m_FileRatingList
.push_back(SFileRating(*cur_src
));
3559 return m_FileRatingList
;
3564 CPartFile::CPartFile(CEC_PartFile_Tag
*tag
)
3568 SetFileName(CPath(tag
->FileName()));
3569 m_abyFileHash
= tag
->ID();
3570 SetFileSize(tag
->SizeFull());
3571 m_gaplist
.Init(GetFileSize(), true); // Init empty
3572 m_partmetfilename
= CPath(tag
->PartMetName());
3573 transferred
= tag
->SizeXfer();
3574 percentcompleted
= (100.0*completedsize
) / GetFileSize();
3575 completedsize
= tag
->SizeDone();
3577 m_category
= tag
->FileCat();
3579 m_SrcpartFrequency
.insert(m_SrcpartFrequency
.end(), GetPartCount(), 0);
3580 m_iDownPriority
= tag
->Prio();
3581 if ( m_iDownPriority
>= 10 ) {
3582 m_iDownPriority
-= 10;
3583 m_bAutoDownPriority
= true;
3585 m_bAutoDownPriority
= false;
3591 m_a4af_source_count
= 0;
3595 * Remote gui specific code
3597 CPartFile::~CPartFile()
3601 const FileRatingList
&CPartFile::GetRatingAndComments()
3603 return m_FileRatingList
;
3605 #endif // !CLIENT_GUI
3608 void CPartFile::UpdateDisplayedInfo(bool force
)
3610 uint32 curTick
= ::GetTickCount();
3611 m_CommentUpdated
= true;
3613 // Wait 1.5s between each redraw
3614 if(force
|| curTick
-m_lastRefreshedDLDisplay
> MINWAIT_BEFORE_DLDISPLAY_WINDOWUPDATE
) {
3615 Notify_DownloadCtrlUpdateItem(this);
3616 m_lastRefreshedDLDisplay
= curTick
;
3622 void CPartFile::Init()
3624 m_showSources
= false;
3625 m_lastsearchtime
= 0;
3626 lastpurgetime
= ::GetTickCount();
3629 m_insufficient
= false;
3634 m_iLastPausePurge
= time(NULL
);
3636 if(thePrefs::GetNewAutoDown()) {
3637 m_iDownPriority
= PR_HIGH
;
3638 m_bAutoDownPriority
= true;
3640 m_iDownPriority
= PR_NORMAL
;
3641 m_bAutoDownPriority
= false;
3644 memset(m_anStates
,0,sizeof(m_anStates
));
3646 transferingsrc
= 0; // new
3650 m_CommentUpdated
= false;
3651 m_hashsetneeded
= true;
3653 percentcompleted
= 0;
3655 m_bPreviewing
= false;
3656 lastseencomplete
= 0;
3657 m_availablePartsCount
=0;
3658 m_ClientSrcAnswered
= 0;
3659 m_LastNoNeededCheck
= 0;
3661 m_nTotalBufferData
= 0;
3662 m_nLastBufferFlushTime
= 0;
3663 m_bPercentUpdated
= false;
3664 m_bRecoveringArchive
= false;
3665 m_iGainDueToCompression
= 0;
3666 m_iLostDueToCorruption
= 0;
3667 m_iTotalPacketsSavedDueToICH
= 0;
3669 m_lastRefreshedDLDisplay
= 0;
3670 m_nDlActiveTime
= 0;
3672 m_is_A4AF_auto
= false;
3673 m_localSrcReqQueued
= false;
3674 m_nCompleteSourcesTime
= time(NULL
);
3675 m_nCompleteSourcesCount
= 0;
3676 m_nCompleteSourcesCountLo
= 0;
3677 m_nCompleteSourcesCountHi
= 0;
3680 m_notCurrentSources
= 0;
3683 m_LastSearchTimeKad
= 0;
3684 m_TotalSearchesKad
= 0;
3686 m_gapptrlist
.Init(&m_gaplist
);
3689 m_CorruptionBlackBox
= new CCorruptionBlackBox();
3693 wxString
CPartFile::getPartfileStatus() const
3698 if ((status
== PS_HASHING
) || (status
== PS_WAITINGFORHASH
)) {
3699 mybuffer
=_("Hashing");
3700 } else if (status
== PS_ALLOCATING
) {
3701 mybuffer
= _("Allocating");
3703 switch (GetStatus()) {
3705 mybuffer
=_("Completing");
3708 mybuffer
=_("Complete");
3711 mybuffer
=_("Paused");
3714 mybuffer
=_("Erroneous");
3716 case PS_INSUFFICIENT
:
3717 mybuffer
= _("Insufficient disk space");
3720 if (GetTransferingSrcCount()>0) {
3721 mybuffer
=_("Downloading");
3723 mybuffer
=_("Waiting");
3727 if (m_stopped
&& (GetStatus()!=PS_COMPLETE
)) {
3728 mybuffer
=_("Stopped");
3735 int CPartFile::getPartfileStatusRang() const
3739 if (GetTransferingSrcCount()==0) tempstatus
=1;
3740 switch (GetStatus()) {
3742 case PS_WAITINGFORHASH
:
3762 wxString
CPartFile::GetFeedback() const
3764 wxString retval
= CKnownFile::GetFeedback();
3765 if (GetStatus() != PS_COMPLETE
) {
3766 retval
+= wxString(_("Downloaded")) + wxT(": ") + CastItoXBytes(GetCompletedSize()) + wxString::Format(wxT(" (%.2f%%)\n"), GetPercentCompleted())
3767 + _("Sources") + CFormat(wxT(": %u\n")) % GetSourceCount();
3769 return retval
+ _("Status") + wxT(": ") + getPartfileStatus() + wxT("\n");
3773 sint32
CPartFile::getTimeRemaining() const
3775 if (GetKBpsDown() < 0.001)
3778 return((GetFileSize()-GetCompletedSize()) / ((int)(GetKBpsDown()*1024.0)));
3781 bool CPartFile::PreviewAvailable()
3783 FileType type
= GetFiletype(GetFileName());
3785 return (((type
== ftVideo
) || (type
== ftAudio
)) && IsComplete(0, 256*1024));
3788 bool CPartFile::CheckShowItemInGivenCat(int inCategory
)
3790 // easy normal cases
3792 bool IsNotFiltered
= true;
3794 IsInCat
= ((inCategory
==0) || (inCategory
>0 && inCategory
==GetCategory()));
3796 switch (thePrefs::GetAllcatType()) {
3798 IsNotFiltered
= GetCategory() == 0 || inCategory
> 0;
3801 IsNotFiltered
= IsPartFile();
3804 IsNotFiltered
= !IsPartFile();
3808 (GetStatus() == PS_READY
|| GetStatus() == PS_EMPTY
) &&
3809 GetTransferingSrcCount() == 0;
3813 (GetStatus() == PS_READY
|| GetStatus()==PS_EMPTY
) &&
3814 GetTransferingSrcCount() > 0;
3817 IsNotFiltered
= GetStatus() == PS_ERROR
;
3820 IsNotFiltered
= GetStatus() == PS_PAUSED
&& !IsStopped();
3823 IsNotFiltered
= IsStopped();
3826 IsNotFiltered
= GetFiletype(GetFileName()) == ftVideo
;
3829 IsNotFiltered
= GetFiletype(GetFileName()) == ftAudio
;
3832 IsNotFiltered
= GetFiletype(GetFileName()) == ftArchive
;
3835 IsNotFiltered
= GetFiletype(GetFileName()) == ftCDImage
;
3838 IsNotFiltered
= GetFiletype(GetFileName()) == ftPicture
;
3841 IsNotFiltered
= GetFiletype(GetFileName()) == ftText
;
3844 IsNotFiltered
= !IsStopped() && GetStatus() != PS_PAUSED
;
3848 return IsNotFiltered
&& IsInCat
;
3852 void CPartFile::SetActive(bool bActive
)
3854 time_t tNow
= time(NULL
);
3856 if (theApp
->IsConnected()) {
3857 if (m_tActivated
== 0) {
3858 m_tActivated
= tNow
;
3862 if (m_tActivated
!= 0) {
3863 m_nDlActiveTime
+= tNow
- m_tActivated
;
3870 uint32
CPartFile::GetDlActiveTime() const
3872 uint32 nDlActiveTime
= m_nDlActiveTime
;
3873 if (m_tActivated
!= 0) {
3874 nDlActiveTime
+= time(NULL
) - m_tActivated
;
3876 return nDlActiveTime
;
3881 uint8
CPartFile::GetStatus(bool ignorepause
) const
3883 if ( (!m_paused
&& !m_insufficient
) ||
3884 status
== PS_ERROR
||
3885 status
== PS_COMPLETING
||
3886 status
== PS_COMPLETE
||
3889 } else if ( m_insufficient
) {
3890 return PS_INSUFFICIENT
;
3896 void CPartFile::AddDeadSource(const CUpDownClient
* client
)
3898 m_deadSources
.AddDeadSource( client
);
3902 bool CPartFile::IsDeadSource(const CUpDownClient
* client
)
3904 return m_deadSources
.IsDeadSource( client
);
3907 void CPartFile::SetFileName(const CPath
& fileName
)
3909 CKnownFile
* pFile
= theApp
->sharedfiles
->GetFileByID(GetFileHash());
3911 bool is_shared
= (pFile
&& pFile
== this);
3914 // The file is shared, we must clear the search keywords so we don't
3915 // publish the old name anymore.
3916 theApp
->sharedfiles
->RemoveKeywords(this);
3919 CKnownFile::SetFileName(fileName
);
3922 // And of course, we must advertise the new name if the file is shared.
3923 theApp
->sharedfiles
->AddKeywords(this);
3926 UpdateDisplayedInfo(true);
3930 uint16
CPartFile::GetMaxSources() const
3932 // This is just like this, while we don't import the private max sources per file
3933 return thePrefs::GetMaxSourcePerFile();
3937 uint16
CPartFile::GetMaxSourcePerFileSoft() const
3939 unsigned int temp
= ((unsigned int)GetMaxSources() * 9L) / 10;
3940 if (temp
> MAX_SOURCES_FILE_SOFT
) {
3941 return MAX_SOURCES_FILE_SOFT
;
3946 uint16
CPartFile::GetMaxSourcePerFileUDP() const
3948 unsigned int temp
= ((unsigned int)GetMaxSources() * 3L) / 4;
3949 if (temp
> MAX_SOURCES_FILE_UDP
) {
3950 return MAX_SOURCES_FILE_UDP
;
3955 #define DROP_FACTOR 2
3957 CUpDownClient
* CPartFile::GetSlowerDownloadingClient(uint32 speed
, CUpDownClient
* caller
) {
3958 // printf("Start slower source calculation\n");
3959 for( SourceSet::iterator it
= m_SrcList
.begin(); it
!= m_SrcList
.end(); ) {
3960 CUpDownClient
* cur_src
= *it
++;
3961 if ((cur_src
->GetDownloadState() == DS_DOWNLOADING
) && (cur_src
!= caller
)) {
3962 uint32 factored_bytes_per_second
= static_cast<uint32
>(
3963 (cur_src
->GetKBpsDown() * 1024) * DROP_FACTOR
);
3964 if ( factored_bytes_per_second
< speed
) {
3965 // printf("Selecting source %p to drop: %d < %d\n", cur_src, factored_bytes_per_second, speed);
3966 // printf("End slower source calculation\n");
3969 // printf("Not selecting source %p to drop: %d > %d\n", cur_src, factored_bytes_per_second, speed);
3973 // printf("End slower source calculation\n");
3977 void CPartFile::AllocationFinished()
3979 // see if it can be opened
3980 if (!m_hpartfile
.Open(m_PartPath
, CFile::read_write
)) {
3981 AddLogLineM(false, CFormat(_("ERROR: Failed to open partfile '%s'")) % GetFullName());
3982 SetPartFileStatus(PS_ERROR
);
3984 // then close the handle again
3985 m_hpartfile
.Release(true);
3989 // File_checked_for_headers