2 // This file is part of the aMule Project.
4 // Copyright (c) 2003-2008 aMule Team ( admin@amule.org / http://www.amule.org )
5 // Copyright (c) 2002-2008 Merkur ( devs@emule-project.net / http://www.emule-project.net )
7 // Any parts of this program derived from the xMule, lMule or eMule project,
8 // or contributed by third-party developers are copyrighted by their
11 // This program is free software; you can redistribute it and/or modify
12 // it under the terms of the GNU General Public License as published by
13 // the Free Software Foundation; either version 2 of the License, or
14 // (at your option) any later version.
16 // This program is distributed in the hope that it will be useful,
17 // but WITHOUT ANY WARRANTY; without even the implied warranty of
18 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 // GNU General Public License for more details.
21 // You should have received a copy of the GNU General Public License
22 // along with this program; if not, write to the Free Software
23 // Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA
28 #include "PartFile.h" // Interface declarations.
31 #include "config.h" // Needed for VERSION
34 #include <protocol/kad/Constants.h>
35 #include <protocol/ed2k/Client2Client/TCP.h>
36 #include <protocol/Protocols.h>
37 #include <common/DataFileVersion.h>
38 #include <common/Constants.h>
39 #include <tags/FileTags.h>
42 #include <wx/tokenzr.h> // Needed for wxStringTokenizer
44 #include "KnownFileList.h" // Needed for CKnownFileList
45 #include "UploadQueue.h" // Needed for CFileHash
46 #include "IPFilter.h" // Needed for CIPFilter
47 #include "Server.h" // Needed for CServer
48 #include "ServerConnect.h" // Needed for CServerConnect
49 #include "updownclient.h" // Needed for CUpDownClient
50 #include "MemFile.h" // Needed for CMemFile
51 #include "Preferences.h" // Needed for CPreferences
52 #include "DownloadQueue.h" // Needed for CDownloadQueue
53 #include "amule.h" // Needed for theApp
54 #include "ED2KLink.h" // Needed for CED2KLink
55 #include "Packet.h" // Needed for CTag
56 #include "SearchList.h" // Needed for CSearchFile
57 #include "ClientList.h" // Needed for clientlist
58 #include "Statistics.h" // Needed for theStats
60 #include <common/Format.h> // Needed for CFormat
61 #include <common/FileFunctions.h> // Needed for GetLastModificationTime
62 #include "ThreadTasks.h" // Needed for CHashingTask/CCompletionTask/CAllocateFileTask
63 #include "GuiEvents.h" // Needed for Notify_*
64 #include "DataToText.h" // Needed for OriginToText()
65 #include "PlatformSpecific.h" // Needed for CreateSparseFile()
66 #include "FileArea.h" // Needed for CFileArea
67 #include "ScopedPtr.h" // Needed for CScopedArray
68 #include "CorruptionBlackBox.h"
70 #include "kademlia/kademlia/Kademlia.h"
71 #include "kademlia/kademlia/Search.h"
74 SFileRating::SFileRating(const wxString
&u
, const wxString
&f
, sint16 r
, const wxString
&c
)
84 SFileRating::SFileRating(const SFileRating
&fr
)
86 UserName(fr
.UserName
),
87 FileName(fr
.FileName
),
94 SFileRating::SFileRating(const CUpDownClient
&client
)
96 UserName(client
.GetUserName()),
97 FileName(client
.GetClientFilename()),
98 Rating(client
.GetFileRating()),
99 Comment(client
.GetFileComment())
104 SFileRating::~SFileRating()
109 class PartFileBufferedData
112 CScopedArray
<byte
> data
; // This is the data to be written
113 uint64 start
; // This is the start offset of the data
114 uint64 end
; // This is the end offset of the data
115 Requested_Block_Struct
*block
; // This is the requested block that this data relates to
117 PartFileBufferedData(byte
* _data
, uint64 _start
, uint64 _end
, Requested_Block_Struct
*_block
)
118 : data(_data
), start(_start
), end(_end
), block(_block
)
123 typedef std::list
<Chunk
> ChunkList
;
128 CPartFile::CPartFile()
133 CPartFile::CPartFile(CSearchFile
* searchresult
)
137 m_abyFileHash
= searchresult
->GetFileHash();
138 SetFileName(searchresult
->GetFileName());
139 SetFileSize(searchresult
->GetFileSize());
141 for (unsigned int i
= 0; i
< searchresult
->m_taglist
.size(); ++i
){
142 const CTag
& pTag
= searchresult
->m_taglist
[i
];
144 bool bTagAdded
= false;
145 if (pTag
.GetNameID() == 0 && !pTag
.GetName().IsEmpty() && (pTag
.IsStr() || pTag
.IsInt())) {
146 static const struct {
151 { wxT(FT_ED2K_MEDIA_ARTIST
), 2 },
152 { wxT(FT_ED2K_MEDIA_ALBUM
), 2 },
153 { wxT(FT_ED2K_MEDIA_TITLE
), 2 },
154 { wxT(FT_ED2K_MEDIA_LENGTH
), 2 },
155 { wxT(FT_ED2K_MEDIA_BITRATE
), 3 },
156 { wxT(FT_ED2K_MEDIA_CODEC
), 2 }
159 for (unsigned int t
= 0; t
< itemsof(_aMetaTags
); ++t
) {
160 if ( pTag
.GetType() == _aMetaTags
[t
].nType
&&
161 (pTag
.GetName() == _aMetaTags
[t
].pszName
)) {
162 // skip string tags with empty string values
163 if (pTag
.IsStr() && pTag
.GetStr().IsEmpty()) {
167 // skip "length" tags with "0: 0" values
168 if (pTag
.GetName() == wxT(FT_ED2K_MEDIA_LENGTH
)) {
169 if (pTag
.GetStr().IsSameAs(wxT("0: 0")) ||
170 pTag
.GetStr().IsSameAs(wxT("0:0"))) {
175 // skip "bitrate" tags with '0' values
176 if ((pTag
.GetName() == wxT(FT_ED2K_MEDIA_BITRATE
)) && !pTag
.GetInt()) {
180 AddDebugLogLineM( false, logPartFile
,
181 wxT("CPartFile::CPartFile(CSearchFile*): added tag ") +
182 pTag
.GetFullInfo() );
183 m_taglist
.push_back(pTag
);
188 } else if (pTag
.GetNameID() != 0 && pTag
.GetName().IsEmpty() && (pTag
.IsStr() || pTag
.IsInt())) {
189 static const struct {
197 for (unsigned int t
= 0; t
< itemsof(_aMetaTags
); ++t
) {
198 if (pTag
.GetType() == _aMetaTags
[t
].nType
&& pTag
.GetNameID() == _aMetaTags
[t
].nID
) {
199 // skip string tags with empty string values
200 if (pTag
.IsStr() && pTag
.GetStr().IsEmpty()) {
204 AddDebugLogLineM( false, logPartFile
,
205 wxT("CPartFile::CPartFile(CSearchFile*): added tag ") +
206 pTag
.GetFullInfo() );
207 m_taglist
.push_back(pTag
);
215 AddDebugLogLineM( false, logPartFile
,
216 wxT("CPartFile::CPartFile(CSearchFile*): ignored tag ") +
217 pTag
.GetFullInfo() );
225 CPartFile::CPartFile(const CED2KFileLink
* fileLink
)
229 SetFileName(CPath(fileLink
->GetName()));
230 SetFileSize(fileLink
->GetSize());
231 m_abyFileHash
= fileLink
->GetHashKey();
235 if (fileLink
->m_hashset
) {
236 if (!LoadHashsetFromFile(fileLink
->m_hashset
, true)) {
237 AddDebugLogLineM(true, logPartFile
, wxT("eD2K link contained invalid hashset: ") + fileLink
->GetLink());
243 CPartFile::~CPartFile()
245 // if it's not opened, it was completed or deleted
246 if (m_hpartfile
.IsOpened()) {
249 // Update met file (with current directory entry)
253 DeleteContents(m_BufferedData_list
);
254 delete m_CorruptionBlackBox
;
256 wxASSERT(m_SrcList
.empty());
257 wxASSERT(m_A4AFsrclist
.empty());
260 void CPartFile::CreatePartFile()
262 // use lowest free partfilenumber for free file (InterCeptor)
266 m_partmetfilename
= CPath(wxString::Format(wxT("%03i.part.met"), i
));
267 m_fullname
= thePrefs::GetTempDir().JoinPaths(m_partmetfilename
);
268 } while (m_fullname
.FileExists());
270 wxString strPartName
= m_partmetfilename
.RemoveExt().GetRaw();
271 m_taglist
.push_back(CTagString(FT_PARTFILENAME
, strPartName
));
273 m_gaplist
.Init(GetFileSize(), true); // Init empty
275 m_PartPath
= m_fullname
.RemoveExt();
277 if (thePrefs::GetAllocFullFile()) {
278 fileCreated
= m_hpartfile
.Create(m_PartPath
, true);
281 fileCreated
= PlatformSpecific::CreateSparseFile(m_PartPath
, GetFileSize());
284 AddLogLineM(false,_("ERROR: Failed to create partfile)"));
285 SetPartFileStatus(PS_ERROR
);
288 SetFilePath(thePrefs::GetTempDir());
290 if (thePrefs::GetAllocFullFile()) {
291 SetPartFileStatus(PS_ALLOCATING
);
292 CThreadScheduler::AddTask(new CAllocateFileTask(this, thePrefs::AddNewFilesPaused()));
294 AllocationFinished();
297 m_hashsetneeded
= (GetED2KPartHashCount() > 0);
300 SetActive(theApp
->IsConnected());
304 uint8
CPartFile::LoadPartFile(const CPath
& in_directory
, const CPath
& filename
, bool from_backup
, bool getsizeonly
)
306 bool isnewstyle
= false;
307 uint8 version
,partmettype
=PMT_UNKNOWN
;
309 std::map
<uint16
, Gap_Struct
*> gap_map
; // Slugfiller
312 m_partmetfilename
= filename
;
313 m_filePath
= in_directory
;
314 m_fullname
= m_filePath
.JoinPaths(m_partmetfilename
);
315 m_PartPath
= m_fullname
.RemoveExt();
317 // readfile data form part.met file
318 CPath curMetFilename
= m_fullname
;
320 curMetFilename
= curMetFilename
.AppendExt(PARTMET_BAK_EXT
);
321 AddLogLineM(false, CFormat( _("Trying to load backup of met-file from %s") )
326 CFile
metFile(curMetFilename
, CFile::read
);
327 if (!metFile
.IsOpened()) {
328 AddLogLineM(false, CFormat( _("ERROR: Failed to open part.met file: %s ==> %s") )
333 } else if (metFile
.GetLength() == 0) {
334 AddLogLineM(false, CFormat( _("ERROR: part.met file is 0 size: %s ==> %s") )
341 version
= metFile
.ReadUInt8();
342 if (version
!= PARTFILE_VERSION
&& version
!= PARTFILE_SPLITTEDVERSION
&& version
!= PARTFILE_VERSION_LARGEFILE
){
344 //if (version == 83) return ImportShareazaTempFile(...)
345 AddLogLineM(false, CFormat( _("ERROR: Invalid part.met file version: %s ==> %s") )
351 isnewstyle
= (version
== PARTFILE_SPLITTEDVERSION
);
352 partmettype
= isnewstyle
? PMT_SPLITTED
: PMT_DEFAULTOLD
;
354 if (version
== PARTFILE_VERSION
) {// Do we still need this check ?
355 uint8 test
[4]; // It will fail for certain files.
356 metFile
.Seek(24, wxFromStart
);
357 metFile
.Read(test
,4);
359 metFile
.Seek(1, wxFromStart
);
360 if (test
[0]==0 && test
[1]==0 && test
[2]==2 && test
[3]==1) {
361 isnewstyle
=true; // edonkeys so called "old part style"
362 partmettype
=PMT_NEWOLD
;
367 uint32 temp
= metFile
.ReadUInt32();
369 if (temp
==0) { // 0.48 partmets - different again
370 LoadHashsetFromFile(&metFile
, false);
372 metFile
.Seek(2, wxFromStart
);
373 LoadDateFromFile(&metFile
);
374 m_abyFileHash
= metFile
.ReadHash();
378 LoadDateFromFile(&metFile
);
379 LoadHashsetFromFile(&metFile
, false);
382 uint32 tagcount
= metFile
.ReadUInt32();
384 for (uint32 j
= 0; j
< tagcount
; ++j
) {
385 CTag
newtag(metFile
,true);
388 (newtag
.GetNameID() == FT_FILESIZE
||
389 newtag
.GetNameID() == FT_FILENAME
))) {
390 switch(newtag
.GetNameID()) {
392 if (!GetFileName().IsOk()) {
393 // If it's not empty, we already loaded the unicoded one
394 SetFileName(CPath(newtag
.GetStr()));
398 case FT_LASTSEENCOMPLETE
: {
399 lastseencomplete
= newtag
.GetInt();
403 SetFileSize(newtag
.GetInt());
406 case FT_TRANSFERRED
: {
407 transferred
= newtag
.GetInt();
411 //#warning needs setfiletype string
412 //SetFileType(newtag.GetStr());
416 m_category
= newtag
.GetInt();
417 if (m_category
> theApp
->glob_prefs
->GetCatCount() - 1 ) {
422 case FT_OLDDLPRIORITY
:
423 case FT_DLPRIORITY
: {
425 m_iDownPriority
= newtag
.GetInt();
426 if( m_iDownPriority
== PR_AUTO
){
427 m_iDownPriority
= PR_HIGH
;
428 SetAutoDownPriority(true);
431 if ( m_iDownPriority
!= PR_LOW
&&
432 m_iDownPriority
!= PR_NORMAL
&&
433 m_iDownPriority
!= PR_HIGH
)
434 m_iDownPriority
= PR_NORMAL
;
435 SetAutoDownPriority(false);
441 m_paused
= (newtag
.GetInt() == 1);
442 m_stopped
= m_paused
;
445 case FT_OLDULPRIORITY
:
446 case FT_ULPRIORITY
: {
448 SetUpPriority(newtag
.GetInt(), false);
449 if( GetUpPriority() == PR_AUTO
){
450 SetUpPriority(PR_HIGH
, false);
451 SetAutoUpPriority(true);
453 SetAutoUpPriority(false);
458 case FT_KADLASTPUBLISHSRC
:{
459 SetLastPublishTimeKadSrc(newtag
.GetInt(), 0);
460 if(GetLastPublishTimeKadSrc() > (uint32
)time(NULL
)+KADEMLIAREPUBLISHTIMES
) {
461 //There may be a posibility of an older client that saved a random number here.. This will check for that..
462 SetLastPublishTimeKadSrc(0,0);
466 case FT_KADLASTPUBLISHNOTES
:{
467 SetLastPublishTimeKadNotes(newtag
.GetInt());
470 // old tags: as long as they are not needed, take the chance to purge them
472 case FT_KADLASTPUBLISHKEY
:
474 case FT_DL_ACTIVE_TIME
:
475 if (newtag
.IsInt()) {
476 m_nDlActiveTime
= newtag
.GetInt();
479 case FT_CORRUPTEDPARTS
: {
480 wxASSERT(m_corrupted_list
.empty());
481 wxString
strCorruptedParts(newtag
.GetStr());
482 wxStringTokenizer
tokenizer(strCorruptedParts
, wxT(","));
483 while ( tokenizer
.HasMoreTokens() ) {
484 wxString token
= tokenizer
.GetNextToken();
486 if (token
.ToULong(&uPart
)) {
487 if (uPart
< GetPartCount() && !IsCorruptedPart(uPart
)) {
488 m_corrupted_list
.push_back(uPart
);
497 hash
.DecodeBase32(newtag
.GetStr()) == CAICHHash::GetHashSize();
498 wxASSERT(hashSizeOk
);
500 m_pAICHHashSet
->SetMasterHash(hash
, AICH_VERIFIED
);
504 case FT_ATTRANSFERRED
:{
505 statistic
.SetAllTimeTransferred(statistic
.GetAllTimeTransferred() + (uint64
)newtag
.GetInt());
508 case FT_ATTRANSFERREDHI
:{
509 statistic
.SetAllTimeTransferred(statistic
.GetAllTimeTransferred() + (((uint64
)newtag
.GetInt()) << 32));
512 case FT_ATREQUESTED
:{
513 statistic
.SetAllTimeRequests(newtag
.GetInt());
517 statistic
.SetAllTimeAccepts(newtag
.GetInt());
521 // Start Changes by Slugfiller for better exception handling
523 wxCharBuffer tag_ansi_name
= newtag
.GetName().ToAscii();
524 char gap_mark
= tag_ansi_name
? tag_ansi_name
[0u] : 0;
525 if ( newtag
.IsInt() && (newtag
.GetName().Length() > 1) &&
526 ((gap_mark
== FT_GAPSTART
) ||
527 (gap_mark
== FT_GAPEND
))) {
528 Gap_Struct
*gap
= NULL
;
529 unsigned long int gapkey
;
530 if (newtag
.GetName().Mid(1).ToULong(&gapkey
)) {
531 if ( gap_map
.find( gapkey
) == gap_map
.end() ) {
532 gap
= new Gap_Struct
;
533 gap_map
[gapkey
] = gap
;
534 gap
->start
= (uint64
)-1;
535 gap
->end
= (uint64
)-1;
537 gap
= gap_map
[ gapkey
];
539 if (gap_mark
== FT_GAPSTART
) {
540 gap
->start
= newtag
.GetInt();
542 if (gap_mark
== FT_GAPEND
) {
543 gap
->end
= newtag
.GetInt()-1;
546 AddDebugLogLineN(logPartFile
, wxT("Wrong gap map key while reading met file!"));
549 // End Changes by Slugfiller for better exception handling
551 m_taglist
.push_back(newtag
);
556 // Nothing. Else, nothing.
560 // load the hashsets from the hybridstylepartmet
561 if (isnewstyle
&& !getsizeonly
&& (metFile
.GetPosition()<metFile
.GetLength()) ) {
562 metFile
.Seek(1, wxFromCurrent
);
564 uint16 parts
=GetPartCount(); // assuming we will get all hashsets
566 for (uint16 i
= 0; i
< parts
&& (metFile
.GetPosition()+16<metFile
.GetLength()); ++i
){
567 CMD4Hash cur_hash
= metFile
.ReadHash();
568 m_hashlist
.push_back(cur_hash
);
572 if (!m_hashlist
.empty()) {
573 CreateHashFromHashlist(m_hashlist
, &checkhash
);
576 if (m_abyFileHash
== checkhash
) {
583 } catch (const CInvalidPacket
& e
) {
584 AddLogLineM(true, CFormat(wxT("Error: %s (%s) is corrupt (bad tags: %s), unable to load file."))
589 } catch (const CIOFailureException
& e
) {
590 AddDebugLogLineM(true, logPartFile
, CFormat( wxT("IO failure while loading '%s': %s") )
594 } catch (const CEOFException
& WXUNUSED(e
)) {
595 AddLogLineM(true, CFormat( _("ERROR: %s (%s) is corrupt (wrong tagcount), unable to load file.") )
598 AddLogLineM(true, _("Trying to recover file info..."));
600 // Safe file is that who have
603 // We have filesize, try other needed info
605 // Do we need to check gaps? I think not,
606 // because they are checked below. Worst
607 // scenario will only mark file as 0 bytes downloaded.
610 if (!GetFileName().IsOk()) {
611 // Not critical, let's put a random filename.
613 "Recovering no-named file - will try to recover it as RecoveredFile.dat"));
614 SetFileName(CPath(wxT("RecoveredFile.dat")));
618 _("Recovered all available file info :D - Trying to use it..."));
620 AddLogLineM(true, _("Unable to recover file info :("));
629 m_gaplist
.Init(GetFileSize(), false); // Init full, then add gaps
630 // Now to flush the map into the list (Slugfiller)
631 std::map
<uint16
, Gap_Struct
*>::iterator it
= gap_map
.begin();
632 for ( ; it
!= gap_map
.end(); ++it
) {
633 Gap_Struct
* gap
= it
->second
;
634 // SLUGFILLER: SafeHash - revised code, and extra safety
635 if ( (gap
->start
!= (uint64
)-1) &&
636 (gap
->end
!= (uint64
)-1) &&
637 gap
->start
<= gap
->end
&&
638 gap
->start
< GetFileSize()) {
639 if (gap
->end
>= GetFileSize()) {
640 gap
->end
= GetFileSize()-1; // Clipping
642 m_gaplist
.AddGap(gap
->start
, gap
->end
); // All tags accounted for, use safe adding
645 // SLUGFILLER: SafeHash
648 //check if this is a backup
649 if ( m_fullname
.GetExt().MakeLower() == wxT("backup" )) {
650 m_fullname
= m_fullname
.RemoveExt();
653 // open permanent handle
654 if ( !m_hpartfile
.Open(m_PartPath
, CFile::read_write
)) {
655 AddLogLineM(false, CFormat( _("Failed to open %s (%s)") )
661 SetPartFileStatus(PS_EMPTY
);
664 // SLUGFILLER: SafeHash - final safety, make sure any missing part of the file is gap
665 if (m_hpartfile
.GetLength() < GetFileSize())
666 AddGap(m_hpartfile
.GetLength(), GetFileSize()-1);
667 // Goes both ways - Partfile should never be too large
668 if (m_hpartfile
.GetLength() > GetFileSize()) {
669 AddDebugLogLineM( true, logPartFile
, CFormat( wxT("Partfile \"%s\" is too large! Truncating %llu bytes.") ) % GetFileName() % (m_hpartfile
.GetLength() - GetFileSize()));
670 m_hpartfile
.SetLength(GetFileSize());
672 // SLUGFILLER: SafeHash
673 } catch (const CIOFailureException
& e
) {
674 AddDebugLogLineM( true, logPartFile
, CFormat( wxT("Error while accessing partfile \"%s\": %s") ) % GetFileName() % e
.what());
675 SetPartFileStatus(PS_ERROR
);
678 // now close the file again until needed
679 m_hpartfile
.Release(true);
681 // check hashcount, file status etc
682 if (GetHashCount() != GetED2KPartHashCount()){
683 m_hashsetneeded
= true;
686 m_hashsetneeded
= false;
687 for (size_t i
= 0; i
< m_hashlist
.size(); ++i
) {
689 SetPartFileStatus(PS_READY
);
694 if (m_gaplist
.IsComplete()) { // is this file complete already?
699 if (!isnewstyle
) { // not for importing
700 const time_t file_date
= CPath::GetModificationTime(m_PartPath
);
701 if (m_lastDateChanged
!= file_date
) {
702 // It's pointless to rehash an empty file, since the case
703 // where a user has zero'd a file is handled above ...
704 if (m_hpartfile
.GetLength()) {
705 AddLogLineM(false, CFormat( _("WARNING: %s might be corrupted (%i)") )
707 % (m_lastDateChanged
- file_date
) );
709 SetPartFileStatus(PS_WAITINGFORHASH
);
711 CPath partFileName
= m_partmetfilename
.RemoveExt();
712 CThreadScheduler::AddTask(new CHashingTask(m_filePath
, partFileName
, this));
717 UpdateCompletedInfos();
718 if (completedsize
> transferred
) {
719 m_iGainDueToCompression
= completedsize
- transferred
;
720 } else if (completedsize
!= transferred
) {
721 m_iLostDueToCorruption
= transferred
- completedsize
;
728 bool CPartFile::SavePartFile(bool Initial
)
731 case PS_WAITINGFORHASH
:
737 /* Don't write anything to disk if less than 100 KB of free space is left. */
738 sint64 free
= CPath::GetFreeSpaceAt(GetFilePath());
739 if ((free
!= wxInvalidOffset
) && (free
< (100 * 1024))) {
745 if (!m_PartPath
.FileExists()) {
746 throw wxString(wxT(".part file not found"));
749 uint32 lsc
= lastseencomplete
;
752 CPath::BackupFile(m_fullname
, wxT(".backup"));
753 CPath::RemoveFile(m_fullname
);
756 file
.Open(m_fullname
, CFile::write
);
757 if (!file
.IsOpened()) {
758 throw wxString(wxT("Failed to open part.met file"));
762 file
.WriteUInt8(IsLargeFile() ? PARTFILE_VERSION_LARGEFILE
: PARTFILE_VERSION
);
764 file
.WriteUInt32(CPath::GetModificationTime(m_PartPath
));
766 file
.WriteHash(m_abyFileHash
);
767 uint16 parts
= m_hashlist
.size();
768 file
.WriteUInt16(parts
);
769 for (int x
= 0; x
< parts
; ++x
) {
770 file
.WriteHash(m_hashlist
[x
]);
773 #define FIXED_TAGS 15
774 uint32 tagcount
= m_taglist
.size() + FIXED_TAGS
+ (m_gaplist
.size()*2);
775 if (!m_corrupted_list
.empty()) {
779 if (m_pAICHHashSet
->HasValidMasterHash() && (m_pAICHHashSet
->GetStatus() == AICH_VERIFIED
)){
783 if (GetLastPublishTimeKadSrc()){
787 if (GetLastPublishTimeKadNotes()){
791 if (GetDlActiveTime()){
795 file
.WriteUInt32(tagcount
);
797 //#warning Kry - Where are lost by coruption and gained by compression?
799 // 0 (unicoded part file name)
800 // We write it with BOM to keep eMule compatibility. Note that the 'printable' filename is saved,
801 // as presently the filename does not represent an actual file.
802 CTagString( FT_FILENAME
, GetFileName().GetPrintable()).WriteTagToFile( &file
, utf8strOptBOM
);
803 CTagString( FT_FILENAME
, GetFileName().GetPrintable()).WriteTagToFile( &file
); // 1
805 CTagIntSized( FT_FILESIZE
, GetFileSize(), IsLargeFile() ? 64 : 32).WriteTagToFile( &file
);// 2
806 CTagIntSized( FT_TRANSFERRED
, transferred
, IsLargeFile() ? 64 : 32).WriteTagToFile( &file
); // 3
807 CTagInt32( FT_STATUS
, (m_paused
?1:0)).WriteTagToFile( &file
); // 4
809 if ( IsAutoDownPriority() ) {
810 CTagInt32( FT_DLPRIORITY
, (uint8
)PR_AUTO
).WriteTagToFile( &file
); // 5
811 CTagInt32( FT_OLDDLPRIORITY
, (uint8
)PR_AUTO
).WriteTagToFile( &file
); // 6
813 CTagInt32( FT_DLPRIORITY
, m_iDownPriority
).WriteTagToFile( &file
); // 5
814 CTagInt32( FT_OLDDLPRIORITY
, m_iDownPriority
).WriteTagToFile( &file
); // 6
817 CTagInt32( FT_LASTSEENCOMPLETE
, lsc
).WriteTagToFile( &file
); // 7
819 if ( IsAutoUpPriority() ) {
820 CTagInt32( FT_ULPRIORITY
, (uint8
)PR_AUTO
).WriteTagToFile( &file
); // 8
821 CTagInt32( FT_OLDULPRIORITY
, (uint8
)PR_AUTO
).WriteTagToFile( &file
); // 9
823 CTagInt32( FT_ULPRIORITY
, GetUpPriority() ).WriteTagToFile( &file
); // 8
824 CTagInt32( FT_OLDULPRIORITY
, GetUpPriority() ).WriteTagToFile( &file
); // 9
827 CTagInt32(FT_CATEGORY
, m_category
).WriteTagToFile( &file
); // 10
828 CTagInt32(FT_ATTRANSFERRED
, statistic
.GetAllTimeTransferred() & 0xFFFFFFFF).WriteTagToFile( &file
);// 11
829 CTagInt32(FT_ATTRANSFERREDHI
, statistic
.GetAllTimeTransferred() >>32).WriteTagToFile( &file
);// 12
830 CTagInt32(FT_ATREQUESTED
, statistic
.GetAllTimeRequests()).WriteTagToFile( &file
); // 13
831 CTagInt32(FT_ATACCEPTED
, statistic
.GetAllTimeAccepts()).WriteTagToFile( &file
); // 14
833 // currupt part infos
834 if (!m_corrupted_list
.empty()) {
835 wxString strCorruptedParts
;
836 std::list
<uint16
>::iterator it
= m_corrupted_list
.begin();
837 for (; it
!= m_corrupted_list
.end(); ++it
) {
838 uint16 uCorruptedPart
= *it
;
839 if (!strCorruptedParts
.IsEmpty()) {
840 strCorruptedParts
+= wxT(",");
842 strCorruptedParts
+= wxString::Format(wxT("%u"), (unsigned)uCorruptedPart
);
844 wxASSERT( !strCorruptedParts
.IsEmpty() );
846 CTagString( FT_CORRUPTEDPARTS
, strCorruptedParts
).WriteTagToFile( &file
); // 11?
850 if (m_pAICHHashSet
->HasValidMasterHash() && (m_pAICHHashSet
->GetStatus() == AICH_VERIFIED
)){
851 CTagString
aichtag(FT_AICH_HASH
, m_pAICHHashSet
->GetMasterHash().GetString() );
852 aichtag
.WriteTagToFile(&file
); // 12?
855 if (GetLastPublishTimeKadSrc()){
856 CTagInt32(FT_KADLASTPUBLISHSRC
, GetLastPublishTimeKadSrc()).WriteTagToFile(&file
); // 15?
859 if (GetLastPublishTimeKadNotes()){
860 CTagInt32(FT_KADLASTPUBLISHNOTES
, GetLastPublishTimeKadNotes()).WriteTagToFile(&file
); // 16?
863 if (GetDlActiveTime()){
864 CTagInt32(FT_DL_ACTIVE_TIME
, GetDlActiveTime()).WriteTagToFile(&file
); // 17
867 for (uint32 j
= 0; j
< (uint32
)m_taglist
.size();++j
) {
868 m_taglist
[j
].WriteTagToFile(&file
);
873 for (CGapList::const_iterator it
= m_gaplist
.begin(); it
!= m_gaplist
.end(); ++it
) {
874 wxString tagName
= wxString::Format(wxT(" %u"), i_pos
);
876 // gap start = first missing byte but gap ends = first non-missing byte
877 // in edonkey but I think its easier to user the real limits
878 tagName
[0] = FT_GAPSTART
;
879 CTagIntSized(tagName
, it
.start() , IsLargeFile() ? 64 : 32).WriteTagToFile( &file
);
881 tagName
[0] = FT_GAPEND
;
882 CTagIntSized(tagName
, it
.end() + 1, IsLargeFile() ? 64 : 32).WriteTagToFile( &file
);
886 } catch (const wxString
& error
) {
887 AddLogLineNS(CFormat( _("ERROR while saving partfile: %s (%s ==> %s)") )
893 } catch (const CIOFailureException
& e
) {
894 AddLogLineCS(_("IO failure while saving partfile: ") + e
.what());
902 CPath::RemoveFile(m_fullname
.AppendExt(wxT(".backup")));
905 sint64 metLength
= m_fullname
.GetFileSize();
906 if (metLength
== wxInvalidOffset
) {
907 theApp
->ShowAlert( CFormat( _("Could not retrieve length of '%s' - using %s file.") )
912 CPath::CloneFile(m_fullname
.AppendExt(PARTMET_BAK_EXT
), m_fullname
, true);
913 } else if (metLength
== 0) {
914 // Don't backup if it's 0 size but raise a warning!!!
915 theApp
->ShowAlert( CFormat( _("'%s' is 0 size somehow - using %s file.") )
920 CPath::CloneFile(m_fullname
.AppendExt(PARTMET_BAK_EXT
), m_fullname
, true);
922 // no error, just backup
923 CPath::BackupFile(m_fullname
, PARTMET_BAK_EXT
);
930 void CPartFile::SaveSourceSeeds()
932 #define MAX_SAVED_SOURCES 10
934 // Kry - Sources seeds
935 // Based on a Feature request, this saves the last MAX_SAVED_SOURCES
936 // sources of the file, giving a 'seed' for the next run.
937 // We save the last sources because:
938 // 1 - They could be the hardest to get
939 // 2 - They will more probably be available
940 // However, if we have downloading sources, they have preference because
941 // we probably have more credits on them.
942 // Anyway, source exchange will get us the rest of the sources
943 // This feature is currently used only on rare files (< 20 sources)
946 if (GetSourceCount()>20) {
950 CClientPtrList source_seeds
;
953 CClientPtrList::iterator it
= m_downloadingSourcesList
.begin();
954 for( ; it
!= m_downloadingSourcesList
.end() && n_sources
< MAX_SAVED_SOURCES
; ++it
) {
955 CUpDownClient
*cur_src
= *it
;
956 if (!cur_src
->HasLowID()) {
957 source_seeds
.push_back(cur_src
);
962 if (n_sources
< MAX_SAVED_SOURCES
) {
963 // Not enough downloading sources to fill the list, going to sources list
964 if (GetSourceCount() > 0) {
965 SourceSet::reverse_iterator rit
= m_SrcList
.rbegin();
966 for ( ; ((rit
!= m_SrcList
.rend()) && (n_sources
<MAX_SAVED_SOURCES
)); ++rit
) {
967 CUpDownClient
* cur_src
= *rit
;
968 if (!cur_src
->HasLowID()) {
969 source_seeds
.push_back(cur_src
);
981 const CPath seedsPath
= m_fullname
.AppendExt(wxT(".seeds"));
984 file
.Create(seedsPath
, true);
985 if (!file
.IsOpened()) {
986 AddLogLineM(false, CFormat( _("Failed to save part.met.seeds file for %s") )
992 file
.WriteUInt8(0); // v3, to avoid v2 clients choking on it.
993 file
.WriteUInt8(source_seeds
.size());
995 CClientPtrList::iterator it2
= source_seeds
.begin();
996 for (; it2
!= source_seeds
.end(); ++it2
) {
997 CUpDownClient
* cur_src
= *it2
;
998 file
.WriteUInt32(cur_src
->GetUserIDHybrid());
999 file
.WriteUInt16(cur_src
->GetUserPort());
1000 file
.WriteHash(cur_src
->GetUserHash());
1001 // CryptSettings - See SourceExchange V4
1002 const uint8 uSupportsCryptLayer
= cur_src
->SupportsCryptLayer() ? 1 : 0;
1003 const uint8 uRequestsCryptLayer
= cur_src
->RequestsCryptLayer() ? 1 : 0;
1004 const uint8 uRequiresCryptLayer
= cur_src
->RequiresCryptLayer() ? 1 : 0;
1005 const uint8 byCryptOptions
= (uRequiresCryptLayer
<< 2) | (uRequestsCryptLayer
<< 1) | (uSupportsCryptLayer
<< 0);
1006 file
.WriteUInt8(byCryptOptions
);
1009 /* v2: Added to keep track of too old seeds */
1010 file
.WriteUInt32(wxDateTime::Now().GetTicks());
1012 AddLogLineM(false, CFormat( wxPLURAL("Saved %i source seed for partfile: %s (%s)", "Saved %i source seeds for partfile: %s (%s)", n_sources
) )
1016 } catch (const CIOFailureException
& e
) {
1017 AddDebugLogLineM(true, logPartFile
, CFormat( wxT("Error saving partfile's seeds file (%s - %s): %s") )
1024 CPath::RemoveFile(seedsPath
);
1028 void CPartFile::LoadSourceSeeds()
1030 CMemFile sources_data
;
1032 bool valid_sources
= false;
1034 const CPath seedsPath
= m_fullname
.AppendExt(wxT(".seeds"));
1035 if (!seedsPath
.FileExists()) {
1039 CFile
file(seedsPath
, CFile::read
);
1040 if (!file
.IsOpened()) {
1041 AddLogLineM(false, CFormat( _("Partfile %s (%s) has no seeds file") )
1049 if (file
.GetLength() <= 1) {
1050 AddLogLineM(false, CFormat( _("Partfile %s (%s) has a void seeds file") )
1056 uint8 src_count
= file
.ReadUInt8();
1058 bool bUseSX2Format
= (src_count
== 0);
1060 if (bUseSX2Format
) {
1062 src_count
= file
.ReadUInt8();
1065 sources_data
.WriteUInt16(src_count
);
1067 for (int i
= 0; i
< src_count
; ++i
) {
1068 uint32 dwID
= file
.ReadUInt32();
1069 uint16 nPort
= file
.ReadUInt16();
1071 sources_data
.WriteUInt32(bUseSX2Format
? dwID
: wxUINT32_SWAP_ALWAYS(dwID
));
1072 sources_data
.WriteUInt16(nPort
);
1073 sources_data
.WriteUInt32(0);
1074 sources_data
.WriteUInt16(0);
1076 if (bUseSX2Format
) {
1077 sources_data
.WriteHash(file
.ReadHash());
1078 sources_data
.WriteUInt8(file
.ReadUInt8());
1085 // v2: Added to keep track of too old seeds
1086 time_t time
= (time_t)file
.ReadUInt32();
1088 // Time frame is 2 hours. More than enough to compile
1089 // your new aMule version!.
1090 if ((time
+ MIN2S(120)) >= wxDateTime::Now().GetTicks()) {
1091 valid_sources
= true;
1095 // v1 has no time data. We can safely use
1096 // the sources, next time will be saved.
1097 valid_sources
= true;
1100 if (valid_sources
) {
1101 sources_data
.Seek(0);
1102 AddClientSources(&sources_data
, SF_SOURCE_SEEDS
, bUseSX2Format
? 4 : 1, bUseSX2Format
);
1105 } catch (const CSafeIOException
& e
) {
1106 AddLogLineM(false, CFormat( _("Error reading partfile's seeds file (%s - %s): %s") )
1115 void CPartFile::PartFileHashFinished(CKnownFile
* result
)
1117 m_lastDateChanged
= result
->m_lastDateChanged
;
1118 bool errorfound
= false;
1119 if (GetED2KPartHashCount() == 0){
1120 if (IsComplete(0, GetFileSize()-1)){
1121 if (result
->GetFileHash() != GetFileHash()){
1124 "Found corrupted part (%d) in %d part file %s - FileResultHash |%s| FileHash |%s|",
1125 "Found corrupted part (%d) in %d parts file %s - FileResultHash |%s| FileHash |%s|",
1131 % result
->GetFileHash().Encode()
1132 % GetFileHash().Encode() );
1133 AddGap(0, GetFileSize()-1);
1139 for (size_t i
= 0; i
< m_hashlist
.size(); ++i
){
1140 // Kry - trel_ar's completed parts check on rehashing.
1141 // Very nice feature, if a file is completed but .part.met don't believe it,
1144 uint64 partStart
= i
* PARTSIZE
;
1145 uint64 partEnd
= partStart
+ GetPartSize(i
) - 1;
1146 if (!( i
< result
->GetHashCount() && (result
->GetPartHash(i
) == GetPartHash(i
)))){
1147 if (IsComplete(i
)) {
1149 if ( i
< result
->GetHashCount() )
1150 wronghash
= result
->GetPartHash(i
);
1154 "Found corrupted part (%d) in %d part file %s - FileResultHash |%s| FileHash |%s|",
1155 "Found corrupted part (%d) in %d parts file %s - FileResultHash |%s| FileHash |%s|",
1156 GetED2KPartHashCount())
1159 % GetED2KPartHashCount()
1161 % wronghash
.Encode()
1162 % GetPartHash(i
).Encode() );
1168 if (!IsComplete(i
)){
1169 AddLogLineM(false, CFormat( _("Found completed part (%i) in %s") )
1174 RemoveBlockFromList(partStart
, partEnd
);
1181 result
->GetAICHHashset()->GetStatus() == AICH_HASHSETCOMPLETE
&&
1182 status
== PS_COMPLETING
) {
1183 delete m_pAICHHashSet
;
1184 m_pAICHHashSet
= result
->GetAICHHashset();
1185 result
->SetAICHHashset(NULL
);
1186 m_pAICHHashSet
->SetOwner(this);
1188 else if (status
== PS_COMPLETING
) {
1189 AddDebugLogLineM(false, logPartFile
,
1190 CFormat(wxT("Failed to store new AICH Hashset for completed file: %s"))
1197 if (status
== PS_COMPLETING
){
1202 AddLogLineM(false, CFormat( _("Finished rehashing %s") ) % GetFileName());
1206 SetStatus(PS_READY
);
1210 SetStatus(PS_READY
);
1212 theApp
->sharedfiles
->SafeAddKFile(this);
1215 void CPartFile::AddGap(uint64 start
, uint64 end
)
1217 m_gaplist
.AddGap(start
, end
);
1218 UpdateDisplayedInfo();
1221 void CPartFile::AddGap(uint16 part
)
1223 m_gaplist
.AddGap(part
);
1224 UpdateDisplayedInfo();
1227 bool CPartFile::IsAlreadyRequested(uint64 start
, uint64 end
)
1229 std::list
<Requested_Block_Struct
*>::iterator it
= m_requestedblocks_list
.begin();
1230 for (; it
!= m_requestedblocks_list
.end(); ++it
) {
1231 Requested_Block_Struct
* cur_block
= *it
;
1233 if ((start
<= cur_block
->EndOffset
) && (end
>= cur_block
->StartOffset
)) {
1240 bool CPartFile::GetNextEmptyBlockInPart(uint16 partNumber
, Requested_Block_Struct
*result
)
1242 // Find start of this part
1243 uint64 partStart
= (PARTSIZE
* partNumber
);
1244 uint64 start
= partStart
;
1246 // What is the end limit of this block, i.e. can't go outside part (or filesize)
1247 uint64 partEnd
= partStart
+ GetPartSize(partNumber
) - 1;
1248 // Loop until find a suitable gap and return true, or no more gaps and return false
1249 CGapList::const_iterator it
= m_gaplist
.begin();
1252 uint64 gapStart
, end
;
1254 // Find the first gap from the start position
1255 for (; it
!= m_gaplist
.end(); ++it
) {
1256 gapStart
= it
.start();
1259 // Want gaps that overlap start<->partEnd
1260 if (gapStart
<= partEnd
&& end
>= start
) {
1263 } else if (gapStart
> partEnd
) {
1268 // If no gaps after start, exit
1272 // Update start position if gap starts after current pos
1273 if (start
< gapStart
) {
1276 // Find end, keeping within the max block size and the part limit
1277 uint64 blockLimit
= partStart
+ (BLOCKSIZE
* (((start
- partStart
) / BLOCKSIZE
) + 1)) - 1;
1278 if (end
> blockLimit
) {
1281 if (end
> partEnd
) {
1284 // If this gap has not already been requested, we have found a valid entry
1285 if (!IsAlreadyRequested(start
, end
)) {
1286 // Was this block to be returned
1287 if (result
!= NULL
) {
1288 result
->StartOffset
= start
;
1289 result
->EndOffset
= end
;
1290 md4cpy(result
->FileID
, GetFileHash().GetHash());
1291 result
->transferred
= 0;
1295 // Reposition to end of that gap
1298 // If tried all gaps then break out of the loop
1299 if (end
== partEnd
) {
1303 // No suitable gap found
1308 void CPartFile::FillGap(uint64 start
, uint64 end
)
1310 m_gaplist
.FillGap(start
, end
);
1311 UpdateCompletedInfos();
1312 UpdateDisplayedInfo();
1315 void CPartFile::FillGap(uint16 part
)
1317 m_gaplist
.FillGap(part
);
1318 UpdateCompletedInfos();
1319 UpdateDisplayedInfo();
1323 void CPartFile::UpdateCompletedInfos()
1325 uint64 allgaps
= m_gaplist
.GetGapSize();
1327 percentcompleted
= (1.0 - (double)allgaps
/GetFileSize()) * 100.0;
1328 completedsize
= GetFileSize() - allgaps
;
1332 void CPartFile::WritePartStatus(CMemFile
* file
)
1334 uint16 parts
= GetED2KPartCount();
1335 file
->WriteUInt16(parts
);
1337 while (done
!= parts
){
1339 for (uint32 i
= 0;i
!= 8;++i
) {
1340 if (IsComplete(i
)) {
1344 if (done
== parts
) {
1348 file
->WriteUInt8(towrite
);
1352 void CPartFile::WriteCompleteSourcesCount(CMemFile
* file
)
1354 file
->WriteUInt16(m_nCompleteSourcesCount
);
1357 uint32
CPartFile::Process(uint32 reducedownload
/*in percent*/,uint8 m_icounter
)
1360 uint32 dwCurTick
= ::GetTickCount();
1362 // If buffer size exceeds limit, or if not written within time limit, flush data
1363 if ( (m_nTotalBufferData
> thePrefs::GetFileBufferSize()) ||
1364 (dwCurTick
> (m_nLastBufferFlushTime
+ BUFFER_TIME_LIMIT
))) {
1365 // Avoid flushing while copying preview file
1366 if (!m_bPreviewing
) {
1372 // check if we want new sources from server --> MOVED for 16.40 version
1373 old_trans
=transferingsrc
;
1377 if (m_icounter
< 10) {
1378 // Update only downloading sources.
1379 CClientPtrList::iterator it
= m_downloadingSourcesList
.begin();
1380 for( ; it
!= m_downloadingSourcesList
.end(); ) {
1381 CUpDownClient
*cur_src
= *it
++;
1382 if(cur_src
->GetDownloadState() == DS_DOWNLOADING
) {
1384 kBpsDown
+= cur_src
->SetDownloadLimit(reducedownload
);
1388 // Update all sources (including downloading sources)
1389 for ( SourceSet::iterator it
= m_SrcList
.begin(); it
!= m_SrcList
.end(); ) {
1390 CUpDownClient
* cur_src
= *it
++;
1391 switch (cur_src
->GetDownloadState()) {
1392 case DS_DOWNLOADING
: {
1394 kBpsDown
+= cur_src
->SetDownloadLimit(reducedownload
);
1403 case DS_LOWTOLOWIP
: {
1404 if ( cur_src
->HasLowID() && !theApp
->DoCallback( cur_src
) ) {
1405 // If we are almost maxed on sources,
1406 // slowly remove these client to see
1407 // if we can find a better source.
1408 if( ((dwCurTick
- lastpurgetime
) > 30000) &&
1409 (GetSourceCount() >= (thePrefs::GetMaxSourcePerFile()*.8))) {
1410 RemoveSource( cur_src
);
1411 lastpurgetime
= dwCurTick
;
1415 cur_src
->SetDownloadState(DS_ONQUEUE
);
1420 case DS_NONEEDEDPARTS
: {
1421 // we try to purge noneeded source, even without reaching the limit
1422 if((dwCurTick
- lastpurgetime
) > 40000) {
1423 if(!cur_src
->SwapToAnotherFile(false , false, false , NULL
)) {
1424 //however we only delete them if reaching the limit
1425 if (GetSourceCount() >= (thePrefs::GetMaxSourcePerFile()*.8 )) {
1426 RemoveSource(cur_src
);
1427 lastpurgetime
= dwCurTick
;
1428 break; //Johnny-B - nothing more to do here (good eye!)
1431 lastpurgetime
= dwCurTick
;
1435 // doubled reasktime for no needed parts - save connections and traffic
1436 if ( !((!cur_src
->GetLastAskedTime()) ||
1437 (dwCurTick
- cur_src
->GetLastAskedTime()) > FILEREASKTIME
*2)) {
1440 // Recheck this client to see if still NNP..
1441 // Set to DS_NONE so that we force a TCP reask next time..
1442 cur_src
->SetDownloadState(DS_NONE
);
1447 if( cur_src
->IsRemoteQueueFull()) {
1448 if( ((dwCurTick
- lastpurgetime
) > 60000) &&
1449 (GetSourceCount() >= (thePrefs::GetMaxSourcePerFile()*.8 )) ) {
1450 RemoveSource( cur_src
);
1451 lastpurgetime
= dwCurTick
;
1452 break; //Johnny-B - nothing more to do here (good eye!)
1456 // Give up to 1 min for UDP to respond..
1457 // If we are within on min on TCP, do not try..
1458 if ( theApp
->IsConnected() &&
1459 ( (!cur_src
->GetLastAskedTime()) ||
1460 (dwCurTick
- cur_src
->GetLastAskedTime()) > FILEREASKTIME
-20000)) {
1461 cur_src
->UDPReaskForDownload();
1464 // No break here, since the next case takes care of asking for downloads.
1467 case DS_TOOMANYCONNS
:
1469 case DS_WAITCALLBACK
:
1470 case DS_WAITCALLBACKKAD
: {
1471 if ( theApp
->IsConnected() &&
1472 ( (!cur_src
->GetLastAskedTime()) ||
1473 (dwCurTick
- cur_src
->GetLastAskedTime()) > FILEREASKTIME
)) {
1474 if (!cur_src
->AskForDownload()) {
1475 // I left this break here just as a reminder
1476 // just in case re rearange things..
1485 /* eMule 0.30c implementation, i give it a try (Creteil) BEGIN ... */
1486 if (IsA4AFAuto() && ((!m_LastNoNeededCheck
) || (dwCurTick
- m_LastNoNeededCheck
> 900000))) {
1487 m_LastNoNeededCheck
= dwCurTick
;
1488 for ( SourceSet::iterator it
= m_A4AFsrclist
.begin(); it
!= m_A4AFsrclist
.end(); ) {
1489 CUpDownClient
*cur_source
= *it
++;
1490 uint8 download_state
=cur_source
->GetDownloadState();
1491 if( download_state
!= DS_DOWNLOADING
1492 && cur_source
->GetRequestFile()
1493 && ((!cur_source
->GetRequestFile()->IsA4AFAuto()) || download_state
== DS_NONEEDEDPARTS
))
1495 cur_source
->SwapToAnotherFile(false, false, false, this);
1499 /* eMule 0.30c implementation, i give it a try (Creteil) END ... */
1501 // swap No needed partfiles if possible
1503 if (((old_trans
==0) && (transferingsrc
>0)) || ((old_trans
>0) && (transferingsrc
==0))) {
1504 SetPartFileStatus(status
);
1507 // Kad source search
1508 if( GetMaxSourcePerFileUDP() > GetSourceCount()){
1509 //Once we can handle lowID users in Kad, we remove the second IsConnected
1510 if (theApp
->downloadqueue
->DoKademliaFileRequest() && (Kademlia::CKademlia::GetTotalFile() < KADEMLIATOTALFILE
) && (dwCurTick
> m_LastSearchTimeKad
) && Kademlia::CKademlia::IsConnected() && theApp
->IsConnected() && !IsStopped()){
1512 theApp
->downloadqueue
->SetLastKademliaFileRequest();
1514 if (GetKadFileSearchID()) {
1515 /* This will never happen anyway. We're talking a
1516 1h timespan and searches are at max 45secs */
1517 Kademlia::CSearchManager::StopSearch(GetKadFileSearchID(), false);
1520 Kademlia::CUInt128
kadFileID(GetFileHash().GetHash());
1521 Kademlia::CSearch
* pSearch
= Kademlia::CSearchManager::PrepareLookup(Kademlia::CSearch::FILE, true, kadFileID
);
1522 AddDebugLogLineM(false, logKadSearch
, CFormat(wxT("Preparing a Kad Search for '%s'")) % GetFileName());
1524 AddDebugLogLineM(false, logKadSearch
, CFormat(wxT("Kad lookup started for '%s'")) % GetFileName());
1525 if(m_TotalSearchesKad
< 7) {
1526 m_TotalSearchesKad
++;
1528 m_LastSearchTimeKad
= dwCurTick
+ (KADEMLIAREASKTIME
*m_TotalSearchesKad
);
1529 SetKadFileSearchID(pSearch
->GetSearchID());
1533 if(GetKadFileSearchID()) {
1534 Kademlia::CSearchManager::StopSearch(GetKadFileSearchID(), true);
1538 // check if we want new sources from server
1539 if ( !m_localSrcReqQueued
&&
1540 ( (!m_lastsearchtime
) ||
1541 (dwCurTick
- m_lastsearchtime
) > SERVERREASKTIME
) &&
1542 theApp
->IsConnectedED2K() &&
1543 thePrefs::GetMaxSourcePerFileSoft() > GetSourceCount() &&
1545 m_localSrcReqQueued
= true;
1546 theApp
->downloadqueue
->SendLocalSrcRequest(this);
1549 // calculate datarate, set limit etc.
1554 // Kry - does the 3 / 30 difference produce too much flickering or CPU?
1555 if (m_count
>= 30) {
1557 UpdateAutoDownPriority();
1558 UpdateDisplayedInfo();
1559 if(m_bPercentUpdated
== false) {
1560 UpdateCompletedInfos();
1562 m_bPercentUpdated
= false;
1563 if (thePrefs::ShowCatTabInfos()) {
1564 Notify_ShowUpdateCatTabTitles();
1568 // release file handle if unused for some time
1569 m_hpartfile
.Release();
1571 return (uint32
)(kBpsDown
*1024.0);
1574 bool CPartFile::CanAddSource(uint32 userid
, uint16 port
, uint32 serverip
, uint16 serverport
, uint8
* pdebug_lowiddropped
, bool ed2kID
)
1577 //The incoming ID could have the userid in the Hybrid format..
1578 uint32 hybridID
= 0;
1580 if (IsLowID(userid
)) {
1583 hybridID
= wxUINT32_SWAP_ALWAYS(userid
);
1587 if (!IsLowID(userid
)) {
1588 userid
= wxUINT32_SWAP_ALWAYS(userid
);
1592 // MOD Note: Do not change this part - Merkur
1593 if (theApp
->IsConnectedED2K()) {
1594 if(::IsLowID(theApp
->GetED2KID())) {
1595 if(theApp
->GetED2KID() == userid
&& theApp
->serverconnect
->GetCurrentServer()->GetIP() == serverip
&& theApp
->serverconnect
->GetCurrentServer()->GetPort() == serverport
) {
1598 if(theApp
->GetPublicIP() == userid
) {
1602 if(theApp
->GetED2KID() == userid
&& thePrefs::GetPort() == port
) {
1608 if (Kademlia::CKademlia::IsConnected()) {
1609 if(!Kademlia::CKademlia::IsFirewalled()) {
1610 if(Kademlia::CKademlia::GetIPAddress() == hybridID
&& thePrefs::GetPort() == port
) {
1616 //This allows *.*.*.0 clients to not be removed if Ed2kID == false
1617 if ( IsLowID(hybridID
) && theApp
->IsFirewalled()) {
1618 if (pdebug_lowiddropped
) {
1619 (*pdebug_lowiddropped
)++;
1627 void CPartFile::AddSources(CMemFile
& sources
,uint32 serverip
, uint16 serverport
, unsigned origin
, bool bWithObfuscationAndHash
)
1629 uint8 count
= sources
.ReadUInt8();
1630 uint8 debug_lowiddropped
= 0;
1631 uint8 debug_possiblesources
= 0;
1632 CMD4Hash achUserHash
;
1635 // since we may received multiple search source UDP results we have to "consume" all data of that packet
1636 AddDebugLogLineM(false, logPartFile
, wxT("Trying to add sources for a stopped file"));
1637 sources
.Seek(count
*(4+2), wxFromCurrent
);
1641 for (int i
= 0;i
!= count
;++i
) {
1642 uint32 userid
= sources
.ReadUInt32();
1643 uint16 port
= sources
.ReadUInt16();
1645 uint8 byCryptOptions
= 0;
1646 if (bWithObfuscationAndHash
){
1647 byCryptOptions
= sources
.ReadUInt8();
1648 if ((byCryptOptions
& 0x80) > 0) {
1649 achUserHash
= sources
.ReadHash();
1652 if ((thePrefs::IsClientCryptLayerRequested() && (byCryptOptions
& 0x01/*supported*/) > 0 && (byCryptOptions
& 0x80) == 0)
1653 || (thePrefs::IsClientCryptLayerSupported() && (byCryptOptions
& 0x02/*requested*/) > 0 && (byCryptOptions
& 0x80) == 0)) {
1654 AddDebugLogLineM(false, logPartFile
, wxString::Format(wxT("Server didn't provide UserHash for source %u, even if it was expected to (or local obfuscationsettings changed during serverconnect"), userid
));
1655 } else if (!thePrefs::IsClientCryptLayerRequested() && (byCryptOptions
& 0x02/*requested*/) == 0 && (byCryptOptions
& 0x80) != 0) {
1656 AddDebugLogLineM(false, logPartFile
, wxString::Format(wxT("Server provided UserHash for source %u, even if it wasn't expected to (or local obfuscationsettings changed during serverconnect"), userid
));
1661 // "Filter LAN IPs" and "IPfilter" the received sources IP addresses
1662 if (!IsLowID(userid
)) {
1663 // check for 0-IP, localhost and optionally for LAN addresses
1664 if ( !IsGoodIP(userid
, thePrefs::FilterLanIPs()) ) {
1667 if (theApp
->ipfilter
->IsFiltered(userid
)) {
1672 if (!CanAddSource(userid
, port
, serverip
, serverport
, &debug_lowiddropped
)) {
1676 if(thePrefs::GetMaxSourcePerFile() > GetSourceCount()) {
1677 ++debug_possiblesources
;
1678 CUpDownClient
* newsource
= new CUpDownClient(port
,userid
,serverip
,serverport
,this, true, true);
1680 newsource
->SetSourceFrom((ESourceFrom
)origin
);
1681 newsource
->SetConnectOptions(byCryptOptions
, true, false);
1683 if ((byCryptOptions
& 0x80) != 0) {
1684 newsource
->SetUserHash(achUserHash
);
1687 theApp
->downloadqueue
->CheckAndAddSource(this,newsource
);
1689 AddDebugLogLineM(false, logPartFile
, wxT("Consuming a packet because of max sources reached"));
1690 // Since we may receive multiple search source UDP results we have to "consume" all data of that packet
1691 // This '+1' is added because 'i' counts from 0.
1692 sources
.Seek((count
-(i
+1))*(4+2), wxFromCurrent
);
1693 if (GetKadFileSearchID()) {
1694 Kademlia::CSearchManager::StopSearch(GetKadFileSearchID(), false);
1701 void CPartFile::UpdatePartsInfo()
1703 if( !IsPartFile() ) {
1704 CKnownFile::UpdatePartsInfo();
1709 uint16 partcount
= GetPartCount();
1710 bool flag
= (time(NULL
) - m_nCompleteSourcesTime
> 0);
1712 // Ensure the frequency-list is ready
1713 if ( m_SrcpartFrequency
.size() != GetPartCount() ) {
1714 m_SrcpartFrequency
.clear();
1715 m_SrcpartFrequency
.insert(m_SrcpartFrequency
.begin(), GetPartCount(), 0);
1718 // Find number of available parts
1719 uint16 availablecounter
= 0;
1720 for ( uint16 i
= 0; i
< partcount
; ++i
) {
1721 if ( m_SrcpartFrequency
[i
] )
1725 if ( ( availablecounter
== partcount
) && ( m_availablePartsCount
< partcount
) ) {
1726 lastseencomplete
= time(NULL
);
1729 m_availablePartsCount
= availablecounter
;
1732 ArrayOfUInts16 count
;
1734 count
.reserve(GetSourceCount());
1736 for ( SourceSet::iterator it
= m_SrcList
.begin(); it
!= m_SrcList
.end(); ++it
) {
1737 if ( !(*it
)->GetUpPartStatus().empty() && (*it
)->GetUpPartCount() == partcount
) {
1738 count
.push_back((*it
)->GetUpCompleteSourcesCount());
1742 m_nCompleteSourcesCount
= m_nCompleteSourcesCountLo
= m_nCompleteSourcesCountHi
= 0;
1744 for (uint16 i
= 0; i
< partcount
; ++i
) {
1746 m_nCompleteSourcesCount
= m_SrcpartFrequency
[i
];
1748 else if( m_nCompleteSourcesCount
> m_SrcpartFrequency
[i
]) {
1749 m_nCompleteSourcesCount
= m_SrcpartFrequency
[i
];
1752 count
.push_back(m_nCompleteSourcesCount
);
1754 int32 n
= count
.size();
1756 std::sort(count
.begin(), count
.end(), std::less
<uint16
>());
1759 int32 i
= n
>> 1; // (n / 2)
1760 int32 j
= (n
* 3) >> 2; // (n * 3) / 4
1761 int32 k
= (n
* 7) >> 3; // (n * 7) / 8
1763 //When still a part file, adjust your guesses by 20% to what you see..
1767 //Not many sources, so just use what you see..
1768 // welcome to 'plain stupid code'
1769 // m_nCompleteSourcesCount;
1770 m_nCompleteSourcesCountLo
= m_nCompleteSourcesCount
;
1771 m_nCompleteSourcesCountHi
= m_nCompleteSourcesCount
;
1772 } else if (n
< 20) {
1773 // For low guess and normal guess count
1774 // If we see more sources then the guessed low and normal, use what we see.
1775 // If we see less sources then the guessed low, adjust network accounts for 80%,
1776 // we account for 20% with what we see and make sure we are still above the normal.
1778 // Adjust 80% network and 20% what we see.
1779 if ( count
[i
] < m_nCompleteSourcesCount
) {
1780 m_nCompleteSourcesCountLo
= m_nCompleteSourcesCount
;
1782 m_nCompleteSourcesCountLo
=
1783 (uint16
)((float)(count
[i
]*.8) +
1784 (float)(m_nCompleteSourcesCount
*.2));
1786 m_nCompleteSourcesCount
= m_nCompleteSourcesCountLo
;
1787 m_nCompleteSourcesCountHi
=
1788 (uint16
)((float)(count
[j
]*.8) +
1789 (float)(m_nCompleteSourcesCount
*.2));
1790 if( m_nCompleteSourcesCountHi
< m_nCompleteSourcesCount
) {
1791 m_nCompleteSourcesCountHi
= m_nCompleteSourcesCount
;
1799 // Adjust network accounts for 80%, we account for 20% with what
1800 // we see and make sure we are still above the low.
1802 // Adjust network accounts for 80%, we account for 20% with what
1803 // we see and make sure we are still above the normal.
1805 m_nCompleteSourcesCountLo
= m_nCompleteSourcesCount
;
1806 m_nCompleteSourcesCount
= (uint16
)((float)(count
[j
]*.8)+(float)(m_nCompleteSourcesCount
*.2));
1807 if( m_nCompleteSourcesCount
< m_nCompleteSourcesCountLo
) {
1808 m_nCompleteSourcesCount
= m_nCompleteSourcesCountLo
;
1810 m_nCompleteSourcesCountHi
= (uint16
)((float)(count
[k
]*.8)+(float)(m_nCompleteSourcesCount
*.2));
1811 if( m_nCompleteSourcesCountHi
< m_nCompleteSourcesCount
) {
1812 m_nCompleteSourcesCountHi
= m_nCompleteSourcesCount
;
1816 m_nCompleteSourcesTime
= time(NULL
) + (60);
1818 UpdateDisplayedInfo();
1821 // [Maella -Enhanced Chunk Selection- (based on jicxicmic)]
1822 bool CPartFile::GetNextRequestedBlock(CUpDownClient
* sender
,
1823 std::vector
<Requested_Block_Struct
*>& toadd
, uint16
& count
)
1826 // The purpose of this function is to return a list of blocks (~180KB) to
1827 // download. To avoid a prematurely stop of the downloading, all blocks that
1828 // are requested from the same source must be located within the same
1829 // chunk (=> part ~9MB).
1831 // The selection of the chunk to download is one of the CRITICAL parts of the
1832 // edonkey network. The selection algorithm must insure the best spreading
1835 // The selection is based on 4 criteria:
1836 // 1. Frequency of the chunk (availability), very rare chunks must be downloaded
1837 // as quickly as possible to become a new available source.
1838 // 2. Parts used for preview (first + last chunk), preview or check a
1839 // file (e.g. movie, mp3)
1840 // 3. Request state (downloading in process), try to ask each source for another
1841 // chunk. Spread the requests between all sources.
1842 // 4. Completion (shortest-to-complete), partially retrieved chunks should be
1843 // completed before starting to download other one.
1845 // The frequency criterion defines three zones: very rare (<10%), rare (<50%)
1846 // and common (>30%). Inside each zone, the criteria have a specific weight, used
1847 // to calculate the priority of chunks. The chunk(s) with the highest
1848 // priority (highest=0, lowest=0xffff) is/are selected first.
1850 // very rare (preview) rare common
1851 // 0% <---- +0 pt ----> 10% <----- +10000 pt -----> 50% <---- +20000 pt ----> 100%
1852 // 1. <------- frequency: +25*frequency pt ----------->
1853 // 2. <- preview: +1 pt --><-------------- preview: set to 10000 pt ------------->
1854 // 3. <------ request: download in progress +20000 pt ------>
1855 // 4a. <- completion: 0% +100, 25% +75 .. 100% +0 pt --><-- !req => completion --->
1856 // 4b. <--- req => !completion -->
1858 // Unrolled, the priority scale is:
1860 // 0..xxxx unrequested and requested very rare chunks
1861 // 10000..1xxxx unrequested rare chunks + unrequested preview chunks
1862 // 20000..2xxxx unrequested common chunks (priority to the most complete)
1863 // 30000..3xxxx requested rare chunks + requested preview chunks
1864 // 40000..4xxxx requested common chunks (priority to the least complete)
1866 // This algorithm usually selects first the rarest chunk(s). However, partially
1867 // complete chunk(s) that is/are close to completion may overtake the priority
1868 // (priority inversion).
1869 // For the common chuncks, the algorithm tries to spread the dowload between
1873 // Check input parameters
1874 if ( sender
->GetPartStatus().empty() ) {
1877 // Define and create the list of the chunks to download
1878 const uint16 partCount
= GetPartCount();
1879 ChunkList chunksList
;
1882 uint16 newBlockCount
= 0;
1883 while(newBlockCount
!= count
) {
1884 // Create a request block stucture if a chunk has been previously selected
1885 if(sender
->GetLastPartAsked() != 0xffff) {
1886 Requested_Block_Struct
* pBlock
= new Requested_Block_Struct
;
1887 if(GetNextEmptyBlockInPart(sender
->GetLastPartAsked(), pBlock
) == true) {
1888 // Keep a track of all pending requested blocks
1889 m_requestedblocks_list
.push_back(pBlock
);
1890 // Update list of blocks to return
1891 toadd
.push_back(pBlock
);
1893 // Skip end of loop (=> CPU load)
1896 // All blocks for this chunk have been already requested
1898 // => Try to select another chunk
1899 sender
->SetLastPartAsked(0xffff);
1903 // Check if a new chunk must be selected (e.g. download starting, previous chunk complete)
1904 if(sender
->GetLastPartAsked() == 0xffff) {
1905 // Quantify all chunks (create list of chunks to download)
1906 // This is done only one time and only if it is necessary (=> CPU load)
1907 if(chunksList
.empty()) {
1908 // Indentify the locally missing part(s) that this source has
1909 for(uint16 i
=0; i
< partCount
; ++i
) {
1910 if(sender
->IsPartAvailable(i
) == true && GetNextEmptyBlockInPart(i
, NULL
) == true) {
1911 // Create a new entry for this chunk and add it to the list
1914 newEntry
.frequency
= m_SrcpartFrequency
[i
];
1915 chunksList
.push_back(newEntry
);
1919 // Check if any bloks(s) could be downloaded
1920 if(chunksList
.empty()) {
1921 break; // Exit main loop while()
1924 // Define the bounds of the three zones (very rare, rare)
1925 // more depending on available sources
1927 if (GetSourceCount()>800) {
1929 } else if (GetSourceCount()>200) {
1932 uint16 limit
= modif
*GetSourceCount()/ 100;
1936 const uint16 veryRareBound
= limit
;
1937 const uint16 rareBound
= 2*limit
;
1939 // Cache Preview state (Criterion 2)
1940 FileType type
= GetFiletype(GetFileName());
1941 const bool isPreviewEnable
=
1942 thePrefs::GetPreviewPrio() &&
1943 (type
== ftArchive
|| type
== ftVideo
);
1945 // Collect and calculate criteria for all chunks
1946 for (ChunkList::iterator it
= chunksList
.begin(); it
!= chunksList
.end(); ++it
) {
1947 Chunk
& cur_chunk
= *it
;
1950 const uint64 uStart
= cur_chunk
.part
* PARTSIZE
;
1951 const uint64 uEnd
= uStart
+ GetPartSize(cur_chunk
.part
) - 1;
1952 // Criterion 2. Parts used for preview
1953 // Remark: - We need to download the first part and the last part(s).
1954 // - When the last part is very small, it's necessary to
1955 // download the two last parts.
1956 bool critPreview
= false;
1957 if(isPreviewEnable
== true) {
1958 if(cur_chunk
.part
== 0) {
1959 critPreview
= true; // First chunk
1960 } else if(cur_chunk
.part
== partCount
-1) {
1961 critPreview
= true; // Last chunk
1962 } else if(cur_chunk
.part
== partCount
-2) {
1963 // Last chunk - 1 (only if last chunk is too small)
1964 const uint32 sizeOfLastChunk
= GetFileSize() - uEnd
;
1965 if(sizeOfLastChunk
< PARTSIZE
/3) {
1966 critPreview
= true; // Last chunk - 1
1971 // Criterion 3. Request state (downloading in process from other source(s))
1973 const bool critRequested
=
1974 cur_chunk
.frequency
> veryRareBound
&&
1975 IsAlreadyRequested(uStart
, uEnd
);
1977 // Criterion 4. Completion
1978 // PARTSIZE instead of GetPartSize() favours the last chunk - but that may be intentional
1979 uint32 partSize
= PARTSIZE
- m_gaplist
.GetGapSize(cur_chunk
.part
);
1980 const uint16 critCompletion
= (uint16
)(partSize
/(PARTSIZE
/100)); // in [%]
1982 // Calculate priority with all criteria
1983 if(cur_chunk
.frequency
<= veryRareBound
) {
1984 // 0..xxxx unrequested + requested very rare chunks
1985 cur_chunk
.rank
= (25 * cur_chunk
.frequency
) + // Criterion 1
1986 ((critPreview
== true) ? 0 : 1) + // Criterion 2
1987 (100 - critCompletion
); // Criterion 4
1988 } else if(critPreview
== true) {
1989 // 10000..10100 unrequested preview chunks
1990 // 30000..30100 requested preview chunks
1991 cur_chunk
.rank
= ((critRequested
== false) ? 10000 : 30000) + // Criterion 3
1992 (100 - critCompletion
); // Criterion 4
1993 } else if(cur_chunk
.frequency
<= rareBound
) {
1994 // 10101..1xxxx unrequested rare chunks
1995 // 30101..3xxxx requested rare chunks
1996 cur_chunk
.rank
= (25 * cur_chunk
.frequency
) + // Criterion 1
1997 ((critRequested
== false) ? 10101 : 30101) + // Criterion 3
1998 (100 - critCompletion
); // Criterion 4
2001 if(critRequested
== false) { // Criterion 3
2002 // 20000..2xxxx unrequested common chunks
2003 cur_chunk
.rank
= 20000 + // Criterion 3
2004 (100 - critCompletion
); // Criterion 4
2006 // 40000..4xxxx requested common chunks
2007 // Remark: The weight of the completion criterion is inversed
2008 // to spead the requests over the completing chunks.
2009 // Without this, the chunk closest to completion will
2010 // received every new sources.
2011 cur_chunk
.rank
= 40000 + // Criterion 3
2012 (critCompletion
); // Criterion 4
2018 // Select the next chunk to download
2019 if(!chunksList
.empty()) {
2020 // Find and count the chunck(s) with the highest priority
2021 uint16 chunkCount
= 0; // Number of found chunks with same priority
2022 uint16 rank
= 0xffff; // Highest priority found
2024 // Collect and calculate criteria for all chunks
2025 for (ChunkList::iterator it
= chunksList
.begin(); it
!= chunksList
.end(); ++it
) {
2026 const Chunk
& cur_chunk
= *it
;
2027 if(cur_chunk
.rank
< rank
) {
2029 rank
= cur_chunk
.rank
;
2030 } else if(cur_chunk
.rank
== rank
) {
2035 // Use a random access to avoid that everybody tries to download the
2036 // same chunks at the same time (=> spread the selected chunk among clients)
2037 uint16 randomness
= 1 + (int) (((float)(chunkCount
-1))*rand()/(RAND_MAX
+1.0));
2039 for (ChunkList::iterator it
= chunksList
.begin(); it
!= chunksList
.end(); ++it
) {
2040 const Chunk
& cur_chunk
= *it
;
2041 if(cur_chunk
.rank
== rank
) {
2043 if(randomness
== 0) {
2044 // Selection process is over
2045 sender
->SetLastPartAsked(cur_chunk
.part
);
2046 // Remark: this list might be reused up to *count times
2047 chunksList
.erase(it
);
2048 break; // exit loop for()
2053 // There is no remaining chunk to download
2054 break; // Exit main loop while()
2058 // Return the number of the blocks
2059 count
= newBlockCount
;
2061 return (newBlockCount
> 0);
2066 void CPartFile::RemoveBlockFromList(uint64 start
,uint64 end
)
2068 std::list
<Requested_Block_Struct
*>::iterator it
= m_requestedblocks_list
.begin();
2069 while (it
!= m_requestedblocks_list
.end()) {
2070 std::list
<Requested_Block_Struct
*>::iterator it2
= it
++;
2072 if ((*it2
)->StartOffset
<= start
&& (*it2
)->EndOffset
>= end
) {
2073 m_requestedblocks_list
.erase(it2
);
2079 void CPartFile::RemoveAllRequestedBlocks(void)
2081 m_requestedblocks_list
.clear();
2085 void CPartFile::CompleteFile(bool bIsHashingDone
)
2087 if (GetKadFileSearchID()) {
2088 Kademlia::CSearchManager::StopSearch(GetKadFileSearchID(), false);
2091 theApp
->downloadqueue
->RemoveLocalServerRequest(this);
2093 AddDebugLogLineM( false, logPartFile
, wxString( wxT("CPartFile::CompleteFile: Hash ") ) + ( bIsHashingDone
? wxT("done") : wxT("not done") ) );
2095 if (!bIsHashingDone
) {
2096 SetPartFileStatus(PS_COMPLETING
);
2099 CPath partFile
= m_partmetfilename
.RemoveExt();
2100 CThreadScheduler::AddTask(new CHashingTask(GetFilePath(), partFile
, this));
2104 m_is_A4AF_auto
=false;
2105 SetPartFileStatus(PS_COMPLETING
);
2106 // guess I was wrong about not need to spaw a thread ...
2107 // It is if the temp and incoming dirs are on different
2108 // partitions/drives and the file is large...[oz]
2111 PerformFileComplete();
2115 if (thePrefs::ShowCatTabInfos()) {
2116 Notify_ShowUpdateCatTabTitles();
2118 UpdateDisplayedInfo(true);
2122 void CPartFile::CompleteFileEnded(bool errorOccured
, const CPath
& newname
)
2126 SetPartFileStatus(PS_ERROR
);
2127 AddLogLineM(true, CFormat( _("Unexpected error while completing %s. File paused") )% GetFileName() );
2129 m_fullname
= newname
;
2131 SetFilePath(m_fullname
.GetPath());
2132 SetFileName(m_fullname
.GetFullName());
2133 m_lastDateChanged
= CPath::GetModificationTime(m_fullname
);
2135 SetPartFileStatus(PS_COMPLETE
);
2139 // TODO: What the f*** if it is already known?
2140 theApp
->knownfiles
->SafeAddKFile(this);
2142 // remove the file from the suspended uploads list
2143 theApp
->uploadqueue
->ResumeUpload(GetFileHash());
2144 theApp
->downloadqueue
->RemoveFile(this);
2145 theApp
->sharedfiles
->SafeAddKFile(this);
2146 UpdateDisplayedInfo(true);
2148 // republish that file to the ed2k-server to update the 'FT_COMPLETE_SOURCES' counter on the server.
2149 theApp
->sharedfiles
->RepublishFile(this);
2151 // Ensure that completed shows the correct value
2152 completedsize
= GetFileSize();
2154 // clear the blackbox to free up memory
2155 m_CorruptionBlackBox
->Free();
2157 AddLogLineM(true, CFormat( _("Finished downloading: %s") ) % GetFileName() );
2160 theApp
->downloadqueue
->StartNextFile(this);
2164 void CPartFile::PerformFileComplete()
2166 // add this file to the suspended uploads list
2167 theApp
->uploadqueue
->SuspendUpload(GetFileHash());
2170 // close permanent handle
2171 if (m_hpartfile
.IsOpened()) {
2172 m_hpartfile
.Close();
2175 // Schedule task for completion of the file
2176 CThreadScheduler::AddTask(new CCompletionTask(this));
2180 void CPartFile::RemoveAllSources(bool bTryToSwap
)
2182 for( SourceSet::iterator it
= m_SrcList
.begin(); it
!= m_SrcList
.end();) {
2183 CUpDownClient
* cur_src
= *it
++;
2185 if (!cur_src
->SwapToAnotherFile(true, true, true, NULL
)) {
2186 RemoveSource(cur_src
,true,false);
2187 // If it was not swapped, it's not on any file anymore, and should die
2190 RemoveSource(cur_src
,true,false);
2196 /* eMule 0.30c implementation, i give it a try (Creteil) BEGIN ... */
2197 // remove all links A4AF in sources to this file
2198 if(!m_A4AFsrclist
.empty()) {
2199 for( SourceSet::iterator it
= m_A4AFsrclist
.begin(); it
!= m_A4AFsrclist
.end(); ) {
2200 CUpDownClient
* cur_src
= *it
++;
2201 if ( cur_src
->DeleteFileRequest( this ) ) {
2202 Notify_DownloadCtrlRemoveSource(cur_src
, this);
2205 m_A4AFsrclist
.clear();
2207 /* eMule 0.30c implementation, i give it a try (Creteil) END ... */
2208 UpdateFileRatingCommentAvail();
2212 void CPartFile::Delete()
2214 AddLogLineM(false, CFormat(_("Deleting file: %s")) % GetFileName());
2215 // Barry - Need to tell any connected clients to stop sending the file
2217 AddDebugLogLineM(false, logPartFile
, wxT("\tStopped"));
2219 theApp
->sharedfiles
->RemoveFile(this);
2220 AddDebugLogLineM(false, logPartFile
, wxT("\tRemoved from shared"));
2221 theApp
->downloadqueue
->RemoveFile(this);
2222 AddDebugLogLineM(false, logPartFile
, wxT("\tRemoved from download queue"));
2223 Notify_DownloadCtrlRemoveFile(this);
2224 AddDebugLogLineM(false, logPartFile
, wxT("\tRemoved from transferwnd"));
2226 if (m_hpartfile
.IsOpened()) {
2227 m_hpartfile
.Close();
2230 AddDebugLogLineM(false, logPartFile
, wxT("\tClosed"));
2232 if (!CPath::RemoveFile(m_fullname
)) {
2233 AddDebugLogLineM(true, logPartFile
, CFormat(wxT("\tFailed to delete '%s'")) % m_fullname
);
2235 AddDebugLogLineM(false, logPartFile
, wxT("\tRemoved .part.met"));
2238 if (!CPath::RemoveFile(m_PartPath
)) {
2239 AddDebugLogLineM(true, logPartFile
, CFormat(wxT("Failed to delete '%s'")) % m_PartPath
);
2241 AddDebugLogLineM(false, logPartFile
, wxT("\tRemoved .part"));
2244 CPath BAKName
= m_fullname
.AppendExt(PARTMET_BAK_EXT
);
2245 if (!CPath::RemoveFile(BAKName
)) {
2246 AddDebugLogLineM(true, logPartFile
, CFormat(wxT("Failed to delete '%s'")) % BAKName
);
2248 AddDebugLogLineM(false, logPartFile
, wxT("\tRemoved .BAK"));
2251 CPath SEEDSName
= m_fullname
.AppendExt(wxT(".seeds"));
2252 if (SEEDSName
.FileExists()) {
2253 if (CPath::RemoveFile(SEEDSName
)) {
2254 AddDebugLogLineM(false, logPartFile
, wxT("\tRemoved .seeds"));
2256 AddDebugLogLineM(true, logPartFile
, CFormat(wxT("Failed to delete '%s'")) % SEEDSName
);
2260 AddDebugLogLineM(false, logPartFile
, wxT("Done"));
2266 bool CPartFile::HashSinglePart(uint16 partnumber
)
2268 if ((GetHashCount() <= partnumber
) && (GetPartCount() > 1)) {
2270 CFormat( _("WARNING: Unable to hash downloaded part - hashset incomplete for '%s'") )
2272 m_hashsetneeded
= true;
2274 } else if ((GetHashCount() <= partnumber
) && GetPartCount() != 1) {
2275 AddLogLineM(true, CFormat( _("ERROR: Unable to hash downloaded part - hashset incomplete (%s). This should never happen")) % GetFileName() );
2276 m_hashsetneeded
= true;
2279 CMD4Hash hashresult
;
2280 uint64 offset
= PARTSIZE
* partnumber
;
2281 uint32 length
= GetPartSize(partnumber
);
2283 CreateHashFromFile(m_hpartfile
, offset
, length
, &hashresult
, NULL
);
2284 } catch (const CIOFailureException
& e
) {
2285 AddLogLineM(true, CFormat( wxT("EOF while hashing downloaded part %u with length %u (max %u) of partfile '%s' with length %u: %s"))
2286 % partnumber
% length
% (offset
+length
) % GetFileName() % GetFileSize() % e
.what());
2287 SetPartFileStatus(PS_ERROR
);
2289 } catch (const CEOFException
& e
) {
2290 AddLogLineM(true, CFormat( wxT("EOF while hashing downloaded part %u with length %u (max %u) of partfile '%s' with length %u: %s"))
2291 % partnumber
% length
% (offset
+length
) % GetFileName() % GetFileSize() % e
.what());
2295 if (GetPartCount() > 1) {
2296 if (hashresult
!= GetPartHash(partnumber
)) {
2297 AddDebugLogLineM(false, logPartFile
, CFormat( wxT("%s: Expected hash of part %d: %s")) % GetFileName() % partnumber
% GetPartHash(partnumber
).Encode() );
2298 AddDebugLogLineM(false, logPartFile
, CFormat( wxT("%s: Actual hash of part %d: %s")) % GetFileName() % partnumber
% hashresult
.Encode() );
2304 if (hashresult
!= m_abyFileHash
) {
2314 bool CPartFile::IsCorruptedPart(uint16 partnumber
)
2316 return std::find(m_corrupted_list
.begin(), m_corrupted_list
.end(), partnumber
)
2317 != m_corrupted_list
.end();
2321 void CPartFile::SetDownPriority(uint8 np
, bool bSave
, bool bRefresh
)
2323 if ( m_iDownPriority
!= np
) {
2324 m_iDownPriority
= np
;
2326 UpdateDisplayedInfo(true);
2333 void CPartFile::StopFile(bool bCancel
)
2335 // Kry - Need to set it here to get into SetPartFileStatus(status) correctly
2338 // Barry - Need to tell any connected clients to stop sending the file
2341 m_LastSearchTimeKad
= 0;
2342 m_TotalSearchesKad
= 0;
2344 RemoveAllSources(true);
2347 memset(m_anStates
,0,sizeof(m_anStates
));
2353 UpdateDisplayedInfo(true);
2357 void CPartFile::StopPausedFile()
2360 // Once an hour, remove any sources for files which are no longer active downloads
2361 switch (GetStatus()) {
2363 case PS_INSUFFICIENT
:
2365 if (time(NULL
) - m_iLastPausePurge
> (60*60)) {
2366 m_iLastPausePurge
= time(NULL
);
2372 // release file handle if unused for some time
2373 m_hpartfile
.Release();
2377 void CPartFile::PauseFile(bool bInsufficient
)
2381 if ( status
== PS_COMPLETE
|| status
== PS_COMPLETING
) {
2385 if (GetKadFileSearchID()) {
2386 Kademlia::CSearchManager::StopSearch(GetKadFileSearchID(), true);
2387 // If we were in the middle of searching, reset timer so they can resume searching.
2388 m_LastSearchTimeKad
= 0;
2391 m_iLastPausePurge
= time(NULL
);
2393 theApp
->downloadqueue
->RemoveLocalServerRequest(this);
2395 CPacket
packet( OP_CANCELTRANSFER
, 0, OP_EDONKEYPROT
);
2396 for( SourceSet::iterator it
= m_SrcList
.begin(); it
!= m_SrcList
.end(); ) {
2397 CUpDownClient
* cur_src
= *it
++;
2398 if (cur_src
->GetDownloadState() == DS_DOWNLOADING
) {
2399 if (!cur_src
->GetSentCancelTransfer()) {
2400 theStats::AddUpOverheadOther( packet
.GetPacketSize() );
2401 AddDebugLogLineM( false, logLocalClient
, wxT("Local Client: OP_CANCELTRANSFER to ") + cur_src
->GetFullIP() );
2402 cur_src
->SendPacket( &packet
, false, true );
2403 cur_src
->SetSentCancelTransfer( true );
2405 cur_src
->SetDownloadState(DS_ONQUEUE
);
2410 m_insufficient
= bInsufficient
;
2416 m_anStates
[DS_DOWNLOADING
] = 0;
2422 void CPartFile::ResumeFile()
2424 if ( status
== PS_COMPLETE
|| status
== PS_COMPLETING
) {
2428 if ( m_insufficient
&& !CheckFreeDiskSpace() ) {
2429 // Still not enough free discspace
2435 m_insufficient
= false;
2437 m_lastsearchtime
= 0;
2439 SetActive(theApp
->IsConnected());
2441 if (m_gaplist
.IsComplete() && (GetStatus() == PS_ERROR
)) {
2442 // The file has already been hashed at this point
2446 UpdateDisplayedInfo(true);
2450 bool CPartFile::CheckFreeDiskSpace( uint64 neededSpace
)
2452 uint64 free
= CPath::GetFreeSpaceAt(GetFilePath());
2453 if (free
== static_cast<uint64
>(wxInvalidOffset
)) {
2454 // If GetFreeSpaceAt() fails, then the path probably does not exist.
2458 // The very least acceptable diskspace is a single PART
2459 if ( free
< PARTSIZE
) {
2460 // Always fail in this case, since we risk losing data if we try to
2461 // write on a full partition.
2465 // All other checks are only made if the user has enabled them
2466 if ( thePrefs::IsCheckDiskspaceEnabled() ) {
2467 neededSpace
+= thePrefs::GetMinFreeDiskSpace();
2469 // Due to the the existance of sparse files, we cannot assume that
2470 // writes within the file doesn't cause new blocks to be allocated.
2471 // Therefore, we have to simply stop writing the moment the limit has
2473 return free
>= neededSpace
;
2480 void CPartFile::SetLastAnsweredTime()
2482 m_ClientSrcAnswered
= ::GetTickCount();
2485 void CPartFile::SetLastAnsweredTimeTimeout()
2487 m_ClientSrcAnswered
= 2 * CONNECTION_LATENCY
+ ::GetTickCount() - SOURCECLIENTREASKS
;
2490 CPacket
*CPartFile::CreateSrcInfoPacket(const CUpDownClient
* forClient
, uint8 byRequestedVersion
, uint16 nRequestedOptions
)
2493 if ( m_SrcList
.empty() ) {
2498 return CKnownFile::CreateSrcInfoPacket(forClient
, byRequestedVersion
, nRequestedOptions
);
2501 if (((forClient
->GetRequestFile() != this)
2502 && (forClient
->GetUploadFile() != this)) || forClient
->GetUploadFileID() != GetFileHash()) {
2503 wxString file1
= _("Unknown");
2504 if (forClient
->GetRequestFile() && forClient
->GetRequestFile()->GetFileName().IsOk()) {
2505 file1
= forClient
->GetRequestFile()->GetFileName().GetPrintable();
2506 } else if (forClient
->GetUploadFile() && forClient
->GetUploadFile()->GetFileName().IsOk()) {
2507 file1
= forClient
->GetUploadFile()->GetFileName().GetPrintable();
2509 wxString file2
= _("Unknown");
2510 if (GetFileName().IsOk()) {
2511 file2
= GetFileName().GetPrintable();
2513 AddDebugLogLineM(false, logPartFile
, wxT("File mismatch on source packet (P) Sending: ") + file1
+ wxT(" From: ") + file2
);
2517 if ( !(GetStatus() == PS_READY
|| GetStatus() == PS_EMPTY
)) {
2521 const BitVector
& reqstatus
= forClient
->GetPartStatus();
2522 bool KnowNeededParts
= !reqstatus
.empty();
2523 //wxASSERT(rcvstatus.size() == GetPartCount()); // Obviously!
2524 if (KnowNeededParts
&& (reqstatus
.size() != GetPartCount())) {
2525 // Yuck. Same file but different part count? Seriously fucked up.
2526 // This happens rather often with reqstatus.size() == 0. Don't log then.
2527 if (reqstatus
.size()) {
2528 AddDebugLogLineM(false, logKnownFiles
, CFormat(wxT("Impossible situation: different partcounts: %i (client) and %i (file) for %s")) % reqstatus
.size() % GetPartCount() % GetFileName());
2533 CMemFile
data(1024);
2535 uint8 byUsedVersion
;
2537 if (forClient
->SupportsSourceExchange2() && byRequestedVersion
> 0){
2538 // the client uses SourceExchange2 and requested the highest version he knows
2539 // and we send the highest version we know, but of course not higher than his request
2540 byUsedVersion
= std::min(byRequestedVersion
, (uint8
)SOURCEEXCHANGE2_VERSION
);
2541 bIsSX2Packet
= true;
2542 data
.WriteUInt8(byUsedVersion
);
2544 // we don't support any special SX2 options yet, reserved for later use
2545 if (nRequestedOptions
!= 0) {
2546 AddDebugLogLineM(false, logKnownFiles
, CFormat(wxT("Client requested unknown options for SourceExchange2: %u")) % nRequestedOptions
);
2549 byUsedVersion
= forClient
->GetSourceExchange1Version();
2550 bIsSX2Packet
= false;
2551 if (forClient
->SupportsSourceExchange2()) {
2552 AddDebugLogLineM(false, logKnownFiles
, wxT("Client which announced to support SX2 sent SX1 packet instead"));
2558 data
.WriteHash(m_abyFileHash
);
2559 data
.WriteUInt16(nCount
);
2561 for (SourceSet::iterator it
= m_SrcList
.begin(); it
!= m_SrcList
.end(); ++it
) {
2563 CUpDownClient
* cur_src
= *it
;
2565 int state
= cur_src
->GetDownloadState();
2566 int valid
= ( state
== DS_DOWNLOADING
) || ( state
== DS_ONQUEUE
&& !cur_src
->IsRemoteQueueFull() );
2568 if ( cur_src
->HasLowID() || !valid
) {
2572 // only send source which have needed parts for this client if possible
2573 const BitVector
& srcstatus
= cur_src
->GetPartStatus();
2574 if ( !srcstatus
.empty() ) {
2575 //wxASSERT(srcstatus.size() == GetPartCount()); // Obviously!
2576 if (srcstatus
.size() != GetPartCount()) {
2579 if ( KnowNeededParts
) {
2580 // only send sources which have needed parts for this client
2581 for (int x
= 0; x
< GetPartCount(); ++x
) {
2582 if (srcstatus
[x
] && !reqstatus
[x
]) {
2588 // if we don't know the need parts for this client,
2589 // return any source currently a client sends it's
2590 // file status only after it has at least one complete part
2591 if (srcstatus
.size() != GetPartCount()) {
2594 for (int x
= 0; x
< GetPartCount(); ++x
){
2605 if(forClient
->GetSourceExchange1Version() > 2) {
2606 dwID
= cur_src
->GetUserIDHybrid();
2608 dwID
= wxUINT32_SWAP_ALWAYS(cur_src
->GetUserIDHybrid());
2610 data
.WriteUInt32(dwID
);
2611 data
.WriteUInt16(cur_src
->GetUserPort());
2612 data
.WriteUInt32(cur_src
->GetServerIP());
2613 data
.WriteUInt16(cur_src
->GetServerPort());
2615 if (byUsedVersion
>= 2) {
2616 data
.WriteHash(cur_src
->GetUserHash());
2619 if (byUsedVersion
>= 4){
2620 // CryptSettings - SourceExchange V4
2622 // 1 CryptLayer Required
2623 // 1 CryptLayer Requested
2624 // 1 CryptLayer Supported
2625 const uint8 uSupportsCryptLayer
= cur_src
->SupportsCryptLayer() ? 1 : 0;
2626 const uint8 uRequestsCryptLayer
= cur_src
->RequestsCryptLayer() ? 1 : 0;
2627 const uint8 uRequiresCryptLayer
= cur_src
->RequiresCryptLayer() ? 1 : 0;
2628 const uint8 byCryptOptions
= (uRequiresCryptLayer
<< 2) | (uRequestsCryptLayer
<< 1) | (uSupportsCryptLayer
<< 0);
2629 data
.WriteUInt8(byCryptOptions
);
2640 data
.Seek(bIsSX2Packet
? 17 : 16, wxFromStart
);
2641 data
.WriteUInt16(nCount
);
2643 CPacket
* result
= new CPacket(data
, OP_EMULEPROT
, bIsSX2Packet
? OP_ANSWERSOURCES2
: OP_ANSWERSOURCES
);
2645 // 16+2+501*(4+2+4+2+16) = 14046 bytes max.
2646 if (result
->GetPacketSize() > 354) {
2647 result
->PackPacket();
2653 void CPartFile::AddClientSources(CMemFile
* sources
, unsigned nSourceFrom
, uint8 uClientSXVersion
, bool bSourceExchange2
, const CUpDownClient
* /*pClient*/)
2662 uint8 uPacketSXVersion
= 0;
2663 if (!bSourceExchange2
) {
2664 nCount
= sources
->ReadUInt16();
2666 // Check if the data size matches the 'nCount' for v1 or v2 and eventually correct the source
2667 // exchange version while reading the packet data. Otherwise we could experience a higher
2668 // chance in dealing with wrong source data, userhashs and finally duplicate sources.
2669 uint32 uDataSize
= sources
->GetLength() - sources
->GetPosition();
2671 if ((uint32
)(nCount
*(4+2+4+2)) == uDataSize
) { //Checks if version 1 packet is correct size
2672 if(uClientSXVersion
!= 1) {
2675 uPacketSXVersion
= 1;
2676 } else if ((uint32
)(nCount
*(4+2+4+2+16)) == uDataSize
) { // Checks if version 2&3 packet is correct size
2677 if (uClientSXVersion
== 2) {
2678 uPacketSXVersion
= 2;
2679 } else if (uClientSXVersion
> 2) {
2680 uPacketSXVersion
= 3;
2684 } else if (nCount
*(4+2+4+2+16+1) == uDataSize
) {
2685 if (uClientSXVersion
!= 4 ) {
2688 uPacketSXVersion
= 4;
2690 // If v5 inserts additional data (like v2), the above code will correctly filter those packets.
2691 // If v5 appends additional data after <count>(<Sources>)[count], we are in trouble with the
2692 // above code. Though a client which does not understand v5+ should never receive such a packet.
2693 AddDebugLogLineM(false, logClient
, CFormat(wxT("Received invalid source exchange packet (v%u) of data size %u for %s")) % uClientSXVersion
% uDataSize
% GetFileName());
2698 // We only check if the version is known by us and do a quick sanitize check on known version
2699 // other then SX1, the packet will be ignored if any error appears, sicne it can't be a "misunderstanding" anymore
2700 if (uClientSXVersion
> SOURCEEXCHANGE2_VERSION
|| uClientSXVersion
== 0 ){
2701 AddDebugLogLineM(false, logPartFile
, CFormat(wxT("Invalid source exchange type version: %i")) % uClientSXVersion
);
2705 // all known versions use the first 2 bytes as count and unknown version are already filtered above
2706 nCount
= sources
->ReadUInt16();
2707 uint32 uDataSize
= (uint32
)(sources
->GetLength() - sources
->GetPosition());
2708 bool bError
= false;
2709 switch (uClientSXVersion
){
2711 bError
= nCount
*(4+2+4+2) != uDataSize
;
2715 bError
= nCount
*(4+2+4+2+16) != uDataSize
;
2718 bError
= nCount
*(4+2+4+2+16+1) != uDataSize
;
2726 AddDebugLogLineM(false, logPartFile
, wxT("Invalid source exchange data size."));
2729 uPacketSXVersion
= uClientSXVersion
;
2732 for (uint16 i
= 0;i
!= nCount
;++i
) {
2734 uint32 dwID
= sources
->ReadUInt32();
2735 uint16 nPort
= sources
->ReadUInt16();
2736 uint32 dwServerIP
= sources
->ReadUInt32();
2737 uint16 nServerPort
= sources
->ReadUInt16();
2740 if (uPacketSXVersion
> 1) {
2741 userHash
= sources
->ReadHash();
2744 uint8 byCryptOptions
= 0;
2745 if (uPacketSXVersion
>= 4) {
2746 byCryptOptions
= sources
->ReadUInt8();
2749 //Clients send ID's the the Hyrbid format so highID clients with *.*.*.0 won't be falsely switched to a lowID..
2751 if (uPacketSXVersion
>= 3) {
2752 dwIDED2K
= wxUINT32_SWAP_ALWAYS(dwID
);
2757 // check the HighID(IP) - "Filter LAN IPs" and "IPfilter" the received sources IP addresses
2758 if (!IsLowID(dwID
)) {
2759 if (!IsGoodIP(dwIDED2K
, thePrefs::FilterLanIPs())) {
2760 // check for 0-IP, localhost and optionally for LAN addresses
2761 AddDebugLogLineM(false, logIPFilter
, CFormat(wxT("Ignored source (IP=%s) received via %s - bad IP")) % Uint32toStringIP(dwIDED2K
) % OriginToText(nSourceFrom
));
2764 if (theApp
->ipfilter
->IsFiltered(dwIDED2K
)) {
2765 AddDebugLogLineM(false, logIPFilter
, CFormat(wxT("Ignored source (IP=%s) received via %s - IPFilter")) % Uint32toStringIP(dwIDED2K
) % OriginToText(nSourceFrom
));
2768 if (theApp
->clientlist
->IsBannedClient(dwIDED2K
)){
2773 // additionally check for LowID and own IP
2774 if (!CanAddSource(dwID
, nPort
, dwServerIP
, nServerPort
, NULL
, false)) {
2775 AddDebugLogLineM(false, logIPFilter
, CFormat(wxT("Ignored source (IP=%s) received via source exchange")) % Uint32toStringIP(dwIDED2K
));
2779 if(thePrefs::GetMaxSourcePerFile() > GetSourceCount()) {
2780 CUpDownClient
* newsource
= new CUpDownClient(nPort
,dwID
,dwServerIP
,nServerPort
,this, (uPacketSXVersion
< 3), true);
2781 if (uPacketSXVersion
> 1) {
2782 newsource
->SetUserHash(userHash
);
2785 if (uPacketSXVersion
>= 4) {
2786 newsource
->SetConnectOptions(byCryptOptions
, true, false);
2789 newsource
->SetSourceFrom((ESourceFrom
)nSourceFrom
);
2790 theApp
->downloadqueue
->CheckAndAddSource(this,newsource
);
2798 void CPartFile::UpdateAutoDownPriority()
2800 if (!IsAutoDownPriority()) {
2803 if (GetSourceCount() <= RARE_FILE
) {
2804 if ( GetDownPriority() != PR_HIGH
)
2805 SetDownPriority(PR_HIGH
, false, false);
2806 } else if (GetSourceCount() < 100) {
2807 if ( GetDownPriority() != PR_NORMAL
)
2808 SetDownPriority(PR_NORMAL
, false, false);
2810 if ( GetDownPriority() != PR_LOW
)
2811 SetDownPriority(PR_LOW
, false, false);
2815 // making this function return a higher when more sources have the extended
2816 // protocol will force you to ask a larger variety of people for sources
2818 int CPartFile::GetCommonFilePenalty()
2820 //TODO: implement, but never return less than MINCOMMONPENALTY!
2821 return MINCOMMONPENALTY
;
2824 /* Barry - Replaces BlockReceived()
2826 Originally this only wrote to disk when a full 180k block
2827 had been received from a client, and only asked for data in
2830 This meant that on average 90k was lost for every connection
2831 to a client data source. That is a lot of wasted data.
2833 To reduce the lost data, packets are now written to a buffer
2834 and flushed to disk regularly regardless of size downloaded.
2835 This includes compressed packets.
2837 Data is also requested only where gaps are, not in 180k blocks.
2838 The requests will still not exceed 180k, but may be smaller to
2842 // Kry - transize is 32bits, no packet can be more than that (this is
2843 // compressed size). Even 32bits is too much imho.As for the return size,
2844 // look at the lenData below.
2845 uint32
CPartFile::WriteToBuffer(uint32 transize
, byte
* data
, uint64 start
, uint64 end
, Requested_Block_Struct
*block
, const CUpDownClient
* client
)
2847 // Increment transferred bytes counter for this file
2848 transferred
+= transize
;
2850 // This is needed a few times
2851 // Kry - should not need a uint64 here - no block is larger than
2852 // 2GB even after uncompressed.
2853 uint32 lenData
= (uint32
) (end
- start
+ 1);
2855 if(lenData
> transize
) {
2856 m_iGainDueToCompression
+= lenData
-transize
;
2859 // Occasionally packets are duplicated, no point writing it twice
2860 if (IsComplete(start
, end
)) {
2861 AddDebugLogLineM(false, logPartFile
,
2862 CFormat(wxT("File '%s' has already been written from %u to %u"))
2863 % GetFileName() % start
% end
);
2867 // security sanitize check to make sure we do not write anything into an already hashed complete chunk
2868 const uint64 nStartChunk
= start
/ PARTSIZE
;
2869 const uint64 nEndChunk
= end
/ PARTSIZE
;
2870 if (IsComplete(nStartChunk
)) {
2871 AddDebugLogLineM(false, logPartFile
, CFormat(wxT("Received data touches already hashed chunk - ignored (start): %u-%u; File=%s")) % start
% end
% GetFileName());
2873 } else if (nStartChunk
!= nEndChunk
) {
2874 if (IsComplete(nEndChunk
)) {
2875 AddDebugLogLineM(false, logPartFile
, CFormat(wxT("Received data touches already hashed chunk - ignored (end): %u-%u; File=%s")) % start
% end
% GetFileName());
2880 AddDebugLogLineM(false, logPartFile
, CFormat(wxT("Received data crosses chunk boundaries: %u-%u; File=%s")) % start
% end
% GetFileName());
2885 // log transferinformation in our "blackbox"
2886 m_CorruptionBlackBox
->TransferredData(start
, end
, client
);
2888 // Create copy of data as new buffer
2889 byte
*buffer
= new byte
[lenData
];
2890 memcpy(buffer
, data
, lenData
);
2892 // Create a new buffered queue entry
2893 PartFileBufferedData
*item
= new PartFileBufferedData(buffer
, start
, end
, block
);
2895 // Add to the queue in the correct position (most likely the end)
2898 std::list
<PartFileBufferedData
*>::iterator it
= m_BufferedData_list
.begin();
2899 for (; it
!= m_BufferedData_list
.end(); ++it
) {
2900 PartFileBufferedData
* queueItem
= *it
;
2902 if (item
->end
<= queueItem
->end
) {
2903 if (it
!= m_BufferedData_list
.begin()) {
2906 m_BufferedData_list
.insert(--it
, item
);
2914 m_BufferedData_list
.push_front(item
);
2917 // Increment buffer size marker
2918 m_nTotalBufferData
+= lenData
;
2920 // Mark this small section of the file as filled
2921 FillGap(item
->start
, item
->end
);
2923 // Update the flushed mark on the requested block
2924 // The loop here is unfortunate but necessary to detect deleted blocks.
2926 std::list
<Requested_Block_Struct
*>::iterator it2
= m_requestedblocks_list
.begin();
2927 for (; it2
!= m_requestedblocks_list
.end(); ++it2
) {
2928 if (*it2
== item
->block
) {
2929 item
->block
->transferred
+= lenData
;
2933 if (m_gaplist
.IsComplete()) {
2937 // Return the length of data written to the buffer
2941 void CPartFile::FlushBuffer(bool fromAICHRecoveryDataAvailable
)
2943 m_nLastBufferFlushTime
= GetTickCount();
2945 if (m_BufferedData_list
.empty()) {
2950 uint32 partCount
= GetPartCount();
2951 // Remember which parts need to be checked at the end of the flush
2952 std::vector
<bool> changedPart(partCount
, false);
2954 // Ensure file is big enough to write data to (the last item will be the furthest from the start)
2955 if (!CheckFreeDiskSpace(m_nTotalBufferData
)) {
2956 // Not enough free space to write the last item, bail
2957 AddLogLineM(true, CFormat( _("WARNING: Not enough free disk-space! Pausing file: %s") ) % GetFileName());
2963 // Loop through queue
2964 while ( !m_BufferedData_list
.empty() ) {
2965 // Get top item and remove it from the queue
2966 CScopedPtr
<PartFileBufferedData
> item(m_BufferedData_list
.front());
2967 m_BufferedData_list
.pop_front();
2969 // This is needed a few times
2970 wxASSERT((item
->end
- item
->start
) < 0xFFFFFFFF);
2971 uint32 lenData
= (uint32
)(item
->end
- item
->start
+ 1);
2973 // SLUGFILLER: SafeHash - could be more than one part
2974 for (uint32 curpart
= (item
->start
/PARTSIZE
); curpart
<= (item
->end
/PARTSIZE
); ++curpart
) {
2975 wxASSERT(curpart
< partCount
);
2976 changedPart
[curpart
] = true;
2978 // SLUGFILLER: SafeHash
2980 // Go to the correct position in file and write block of data
2982 m_hpartfile
.WriteAt(item
->data
.get(), item
->start
, lenData
);
2983 // Decrease buffer size
2984 m_nTotalBufferData
-= lenData
;
2985 } catch (const CIOFailureException
& e
) {
2986 AddDebugLogLineM(true, logPartFile
, wxT("Error while saving part-file: ") + e
.what());
2987 SetPartFileStatus(PS_ERROR
);
2988 // No need to bang your head against it again and again if it has already failed.
2989 DeleteContents(m_BufferedData_list
);
2990 m_nTotalBufferData
= 0;
2996 // Update last-changed date
2997 m_lastDateChanged
= wxDateTime::GetTimeNow();
3000 // Partfile should never be too large
3001 if (m_hpartfile
.GetLength() > GetFileSize()) {
3002 // it's "last chance" correction. the real bugfix has to be applied 'somewhere' else
3003 m_hpartfile
.SetLength(GetFileSize());
3005 } catch (const CIOFailureException
& e
) {
3006 AddDebugLogLineM(true, logPartFile
,
3007 CFormat(wxT("Error while truncating part-file (%s): %s"))
3008 % m_PartPath
% e
.what());
3009 SetPartFileStatus(PS_ERROR
);
3014 // Check each part of the file
3015 for (uint16 partNumber
= 0; partNumber
< partCount
; ++partNumber
) {
3016 if (changedPart
[partNumber
] == false) {
3020 uint32 partRange
= GetPartSize(partNumber
) - 1;
3022 // Is this 9MB part complete
3023 if (IsComplete(partNumber
)) {
3025 if (!HashSinglePart(partNumber
)) {
3026 AddLogLineM(true, CFormat(
3027 _("Downloaded part %i is corrupt in file: %s") ) % partNumber
% GetFileName() );
3029 // add part to corrupted list, if not already there
3030 if (!IsCorruptedPart(partNumber
)) {
3031 m_corrupted_list
.push_back(partNumber
);
3033 // request AICH recovery data
3034 // Don't if called from the AICHRecovery. It's already there and would lead to an infinite recursion.
3035 if (!fromAICHRecoveryDataAvailable
) {
3036 RequestAICHRecovery(partNumber
);
3038 // Reduce transferred amount by corrupt amount
3039 m_iLostDueToCorruption
+= (partRange
+ 1);
3041 if (!m_hashsetneeded
) {
3042 AddDebugLogLineM(false, logPartFile
, CFormat(
3043 wxT("Finished part %u of '%s'")) % partNumber
% GetFileName());
3046 // tell the blackbox about the verified data
3047 m_CorruptionBlackBox
->VerifiedData(PARTSIZE
*partNumber
, PARTSIZE
*partNumber
+ partRange
);
3049 // if this part was successfully completed (although ICH is active), remove from corrupted list
3050 EraseFirstValue(m_corrupted_list
, partNumber
);
3052 if (status
== PS_EMPTY
) {
3053 if (theApp
->IsRunning()) { // may be called during shutdown!
3054 if (GetHashCount() == GetED2KPartHashCount() && !m_hashsetneeded
) {
3055 // Successfully completed part, make it available for sharing
3056 SetStatus(PS_READY
);
3057 theApp
->sharedfiles
->SafeAddKFile(this);
3062 } else if ( IsCorruptedPart(partNumber
) && // corrupted part:
3063 (thePrefs::IsICHEnabled() // old ICH: rehash whenever we have new data hoping it will be good now
3064 || fromAICHRecoveryDataAvailable
)) {// new AICH: one rehash right before performing it (maybe it's already good)
3065 // Try to recover with minimal loss
3066 if (HashSinglePart(partNumber
)) {
3067 ++m_iTotalPacketsSavedDueToICH
;
3069 uint64 uMissingInPart
= m_gaplist
.GetGapSize(partNumber
);
3070 FillGap(partNumber
);
3071 RemoveBlockFromList(PARTSIZE
*partNumber
,(PARTSIZE
*partNumber
+ partRange
));
3073 // tell the blackbox about the verified data
3074 m_CorruptionBlackBox
->VerifiedData(PARTSIZE
*partNumber
, PARTSIZE
*partNumber
+ partRange
);
3076 // remove from corrupted list
3077 EraseFirstValue(m_corrupted_list
, partNumber
);
3079 AddLogLineM(true, CFormat( _("ICH: Recovered corrupted part %i for %s -> Saved bytes: %s") )
3082 % CastItoXBytes(uMissingInPart
));
3084 if (GetHashCount() == GetED2KPartHashCount() && !m_hashsetneeded
) {
3085 if (status
== PS_EMPTY
) {
3086 // Successfully recovered part, make it available for sharing
3087 SetStatus(PS_READY
);
3088 if (theApp
->IsRunning()) // may be called during shutdown!
3089 theApp
->sharedfiles
->SafeAddKFile(this);
3099 if (theApp
->IsRunning()) { // may be called during shutdown!
3100 // Is this file finished ?
3101 if (m_gaplist
.IsComplete()) {
3102 CompleteFile(false);
3108 // read data for upload, return false on error
3109 bool CPartFile::ReadData(CFileArea
& area
, uint64 offset
, uint32 toread
)
3112 if (offset
+ toread
> GetFileSize()) {
3113 AddDebugLogLineM(false, logPartFile
, CFormat(wxT("tried to read %d bytes past eof of %s"))
3114 % (offset
+ toread
- GetFileSize()) % GetFileName());
3119 area
.ReadAt(m_hpartfile
, offset
, toread
);
3120 // if it fails it throws (which the caller should catch)
3125 void CPartFile::UpdateFileRatingCommentAvail()
3127 bool prevComment
= m_hasComment
;
3128 int prevRating
= m_iUserRating
;
3130 m_hasComment
= false;
3132 int ratingCount
= 0;
3134 SourceSet::iterator it
= m_SrcList
.begin();
3135 for (; it
!= m_SrcList
.end(); ++it
) {
3136 CUpDownClient
* cur_src
= *it
;
3138 if (!cur_src
->GetFileComment().IsEmpty()) {
3139 if (thePrefs::IsCommentFiltered(cur_src
->GetFileComment())) {
3142 m_hasComment
= true;
3145 uint8 rating
= cur_src
->GetFileRating();
3147 wxASSERT(rating
<= 5);
3150 m_iUserRating
+= rating
;
3155 m_iUserRating
/= ratingCount
;
3156 wxASSERT(m_iUserRating
> 0 && m_iUserRating
<= 5);
3159 if ((prevComment
!= m_hasComment
) || (prevRating
!= m_iUserRating
)) {
3160 UpdateDisplayedInfo();
3165 void CPartFile::SetCategory(uint8 cat
)
3167 wxASSERT( cat
< theApp
->glob_prefs
->GetCatCount() );
3173 bool CPartFile::RemoveSource(CUpDownClient
* toremove
, bool updatewindow
, bool bDoStatsUpdate
)
3175 wxASSERT( toremove
);
3177 bool result
= theApp
->downloadqueue
->RemoveSource( toremove
, updatewindow
, bDoStatsUpdate
);
3179 // Check if the client should be deleted, but not if the client is already dying
3180 if ( !toremove
->GetSocket() && !toremove
->HasBeenDeleted() ) {
3181 if ( toremove
->Disconnected(wxT("RemoveSource - purged")) ) {
3182 toremove
->Safe_Delete();
3189 void CPartFile::AddDownloadingSource(CUpDownClient
* client
)
3191 CClientPtrList::iterator it
=
3192 std::find(m_downloadingSourcesList
.begin(), m_downloadingSourcesList
.end(), client
);
3193 if (it
== m_downloadingSourcesList
.end()) {
3194 m_downloadingSourcesList
.push_back(client
);
3199 void CPartFile::RemoveDownloadingSource(CUpDownClient
* client
)
3201 CClientPtrList::iterator it
=
3202 std::find(m_downloadingSourcesList
.begin(), m_downloadingSourcesList
.end(), client
);
3203 if (it
!= m_downloadingSourcesList
.end()) {
3204 m_downloadingSourcesList
.erase(it
);
3209 void CPartFile::SetPartFileStatus(uint8 newstatus
)
3213 if (thePrefs::GetAllcatType()) {
3214 Notify_DownloadCtrlUpdateItem(this);
3217 Notify_DownloadCtrlSort();
3221 uint64
CPartFile::GetNeededSpace()
3224 uint64 length
= m_hpartfile
.GetLength();
3226 if (length
> GetFileSize()) {
3227 return 0; // Shouldn't happen, but just in case
3230 return GetFileSize() - length
;
3231 } catch (const CIOFailureException
& e
) {
3232 AddDebugLogLineM(true, logPartFile
,
3233 CFormat(wxT("Error while retrieving file-length (%s): %s"))
3234 % m_PartPath
% e
.what());
3235 SetPartFileStatus(PS_ERROR
);
3240 void CPartFile::SetStatus(uint8 in
)
3242 wxASSERT( in
!= PS_PAUSED
&& in
!= PS_INSUFFICIENT
);
3246 if (theApp
->IsRunning()) {
3247 UpdateDisplayedInfo( true );
3249 if ( thePrefs::ShowCatTabInfos() ) {
3250 Notify_ShowUpdateCatTabTitles();
3256 void CPartFile::RequestAICHRecovery(uint16 nPart
)
3259 if ( !m_pAICHHashSet
->HasValidMasterHash() ||
3260 (m_pAICHHashSet
->GetStatus() != AICH_TRUSTED
&& m_pAICHHashSet
->GetStatus() != AICH_VERIFIED
)){
3261 AddDebugLogLineM( false, logAICHRecovery
, wxT("Unable to request AICH Recoverydata because we have no trusted Masterhash") );
3264 if (GetPartSize(nPart
) <= EMBLOCKSIZE
)
3266 if (CAICHHashSet::IsClientRequestPending(this, nPart
)){
3267 AddDebugLogLineM( false, logAICHRecovery
, wxT("RequestAICHRecovery: Already a request for this part pending"));
3271 // first check if we have already the recoverydata, no need to rerequest it then
3272 if (m_pAICHHashSet
->IsPartDataAvailable(nPart
*PARTSIZE
)){
3273 AddDebugLogLineM( false, logAICHRecovery
, wxT("Found PartRecoveryData in memory"));
3274 AICHRecoveryDataAvailable(nPart
);
3278 wxASSERT( nPart
< GetPartCount() );
3279 // find some random client which support AICH to ask for the blocks
3280 // first lets see how many we have at all, we prefer high id very much
3281 uint32 cAICHClients
= 0;
3282 uint32 cAICHLowIDClients
= 0;
3283 for ( SourceSet::iterator it
= m_SrcList
.begin(); it
!= m_SrcList
.end(); ++it
) {
3284 CUpDownClient
* pCurClient
= *(it
);
3285 if ( pCurClient
->IsSupportingAICH() &&
3286 pCurClient
->GetReqFileAICHHash() != NULL
&&
3287 !pCurClient
->IsAICHReqPending()
3288 && (*pCurClient
->GetReqFileAICHHash()) == m_pAICHHashSet
->GetMasterHash())
3290 if (pCurClient
->HasLowID()) {
3291 ++cAICHLowIDClients
;
3297 if ((cAICHClients
| cAICHLowIDClients
) == 0){
3298 AddDebugLogLineM( false, logAICHRecovery
, wxT("Unable to request AICH Recoverydata because found no client who supports it and has the same hash as the trusted one"));
3301 uint32 nSeclectedClient
;
3302 if (cAICHClients
> 0) {
3303 nSeclectedClient
= (rand() % cAICHClients
) + 1;
3305 nSeclectedClient
= (rand() % cAICHLowIDClients
) + 1;
3307 CUpDownClient
* pClient
= NULL
;
3308 for ( SourceSet::iterator it
= m_SrcList
.begin(); it
!= m_SrcList
.end(); ++it
) {
3309 CUpDownClient
* pCurClient
= *(it
);
3310 if (pCurClient
->IsSupportingAICH() && pCurClient
->GetReqFileAICHHash() != NULL
&& !pCurClient
->IsAICHReqPending()
3311 && (*pCurClient
->GetReqFileAICHHash()) == m_pAICHHashSet
->GetMasterHash())
3313 if (cAICHClients
> 0){
3314 if (!pCurClient
->HasLowID())
3318 wxASSERT( pCurClient
->HasLowID());
3321 if (nSeclectedClient
== 0){
3322 pClient
= pCurClient
;
3327 if (pClient
== NULL
){
3332 AddDebugLogLineM( false, logAICHRecovery
, CFormat( wxT("Requesting AICH Hash (%s) form client %s") ) % ( cAICHClients
? wxT("HighId") : wxT("LowID") ) % pClient
->GetClientFullInfo() );
3333 pClient
->SendAICHRequest(this, nPart
);
3338 void CPartFile::AICHRecoveryDataAvailable(uint16 nPart
)
3340 if (GetPartCount() < nPart
){
3346 uint32 length
= GetPartSize(nPart
);
3347 // if the part was already ok, it would now be complete
3348 if (IsComplete(nPart
)){
3349 AddDebugLogLineM( false, logAICHRecovery
,
3350 wxString::Format( wxT("Processing AICH Recovery data: The part (%u) is already complete, canceling"), nPart
) );
3356 CAICHHashTree
* pVerifiedHash
= m_pAICHHashSet
->m_pHashTree
.FindHash(nPart
*PARTSIZE
, length
);
3357 if (pVerifiedHash
== NULL
|| !pVerifiedHash
->GetHashValid()){
3358 AddDebugLogLineM( true, logAICHRecovery
, wxT("Processing AICH Recovery data: Unable to get verified hash from hashset (should never happen)") );
3362 CAICHHashTree
htOurHash(pVerifiedHash
->GetNDataSize(), pVerifiedHash
->GetIsLeftBranch(), pVerifiedHash
->GetNBaseSize());
3364 CreateHashFromFile(m_hpartfile
, PARTSIZE
* nPart
, length
, NULL
, &htOurHash
);
3365 } catch (const CIOFailureException
& e
) {
3366 AddDebugLogLineM(true, logAICHRecovery
,
3367 CFormat(wxT("IO failure while hashing part-file '%s': %s"))
3368 % m_hpartfile
.GetFilePath() % e
.what());
3369 SetPartFileStatus(PS_ERROR
);
3373 if (!htOurHash
.GetHashValid()){
3374 AddDebugLogLineM( false, logAICHRecovery
, wxT("Processing AICH Recovery data: Failed to retrieve AICH Hashset of corrupt part") );
3379 // now compare the hash we just did, to the verified hash and readd all blocks which are ok
3380 uint32 nRecovered
= 0;
3381 for (uint32 pos
= 0; pos
< length
; pos
+= EMBLOCKSIZE
){
3382 const uint32 nBlockSize
= min
<uint32
>(EMBLOCKSIZE
, length
- pos
);
3383 CAICHHashTree
* pVerifiedBlock
= pVerifiedHash
->FindHash(pos
, nBlockSize
);
3384 CAICHHashTree
* pOurBlock
= htOurHash
.FindHash(pos
, nBlockSize
);
3385 if ( pVerifiedBlock
== NULL
|| pOurBlock
== NULL
|| !pVerifiedBlock
->GetHashValid() || !pOurBlock
->GetHashValid()){
3389 if (pOurBlock
->GetHash() == pVerifiedBlock
->GetHash()){
3390 FillGap(PARTSIZE
*nPart
+pos
, PARTSIZE
*nPart
+ pos
+ (nBlockSize
-1));
3391 RemoveBlockFromList(PARTSIZE
*nPart
, PARTSIZE
*nPart
+ (nBlockSize
-1));
3392 nRecovered
+= nBlockSize
;
3393 // tell the blackbox about the verified data
3394 m_CorruptionBlackBox
->VerifiedData(PARTSIZE
*nPart
+pos
, PARTSIZE
*nPart
+ pos
+ (nBlockSize
-1));
3396 // inform our "blackbox" about the corrupted block which may ban clients who sent it
3397 m_CorruptionBlackBox
->CorruptedData(PARTSIZE
*nPart
+pos
, PARTSIZE
*nPart
+ pos
+ (nBlockSize
-1));
3400 m_CorruptionBlackBox
->EvaluateData(nPart
);
3402 // ok now some sanity checks
3403 if (IsComplete(nPart
)){
3404 // this is a bad, but it could probably happen under some rare circumstances
3405 // make sure that MD4 agrres to this fact too
3406 if (!HashSinglePart(nPart
)){
3407 AddDebugLogLineM( false, logAICHRecovery
,
3408 wxString::Format(wxT("Processing AICH Recovery data: The part (%u) got completed while recovering - but MD4 says it corrupt! Setting hashset to error state, deleting part"), nPart
));
3409 // now we are fu... unhappy
3410 m_pAICHHashSet
->SetStatus(AICH_ERROR
);
3416 AddDebugLogLineM( false, logAICHRecovery
, wxString::Format(
3417 wxT("Processing AICH Recovery data: The part (%u) got completed while recovering and MD4 agrees"), nPart
) );
3418 if (status
== PS_EMPTY
&& theApp
->IsRunning()){
3419 if (GetHashCount() == GetED2KPartHashCount() && !m_hashsetneeded
){
3420 // Successfully recovered part, make it available for sharing
3421 SetStatus(PS_READY
);
3422 theApp
->sharedfiles
->SafeAddKFile(this);
3426 if (theApp
->IsRunning()){
3427 // Is this file finished?
3428 if (m_gaplist
.IsComplete()) {
3429 CompleteFile(false);
3433 } // end sanity check
3434 // We did the best we could. If it's still incomplete, then no need to keep
3435 // bashing it with ICH. So remove it from the list of corrupted parts.
3436 EraseFirstValue(m_corrupted_list
, nPart
);
3440 // make sure the user appreciates our great recovering work :P
3441 AddDebugLogLineM( true, logAICHRecovery
, CFormat(
3442 wxT("AICH successfully recovered %s of %s from part %u for %s") )
3443 % CastItoXBytes(nRecovered
)
3444 % CastItoXBytes(length
)
3450 void CPartFile::ClientStateChanged( int oldState
, int newState
)
3452 if ( oldState
== newState
)
3455 // If the state is -1, then it's an entirely new item
3456 if ( oldState
!= -1 ) {
3457 // Was the old state a valid state?
3458 if ( oldState
== DS_ONQUEUE
|| oldState
== DS_DOWNLOADING
) {
3461 if ( oldState
== DS_CONNECTED
/* || oldState == DS_REMOTEQUEUEFULL */ ) {
3465 m_notCurrentSources
--;
3469 // If the state is -1, then the source is being removed
3470 if ( newState
!= -1 ) {
3471 // Was the old state a valid state?
3472 if ( newState
== DS_ONQUEUE
|| newState
== DS_DOWNLOADING
) {
3475 if ( newState
== DS_CONNECTED
/* || newState == DS_REMOTEQUEUEFULL */ ) {
3479 ++m_notCurrentSources
;
3485 bool CPartFile::AddSource( CUpDownClient
* client
)
3487 if (m_SrcList
.insert( client
).second
) {
3488 theStats::AddFoundSource();
3489 theStats::AddSourceOrigin(client
->GetSourceFrom());
3497 bool CPartFile::DelSource( CUpDownClient
* client
)
3499 if (m_SrcList
.erase( client
)) {
3500 theStats::RemoveSourceOrigin(client
->GetSourceFrom());
3501 theStats::RemoveFoundSource();
3509 void CPartFile::UpdatePartsFrequency( CUpDownClient
* client
, bool increment
)
3511 const BitVector
& freq
= client
->GetPartStatus();
3513 if ( m_SrcpartFrequency
.size() != GetPartCount() ) {
3514 m_SrcpartFrequency
.clear();
3515 m_SrcpartFrequency
.insert(m_SrcpartFrequency
.begin(), GetPartCount(), 0);
3522 unsigned int size
= freq
.size();
3523 if ( size
!= m_SrcpartFrequency
.size() ) {
3528 for ( unsigned int i
= 0; i
< size
; i
++ ) {
3530 m_SrcpartFrequency
[i
]++;
3534 for ( unsigned int i
= 0; i
< size
; i
++ ) {
3536 m_SrcpartFrequency
[i
]--;
3542 const FileRatingList
&CPartFile::GetRatingAndComments()
3544 m_FileRatingList
.clear();
3545 // This can be pre-processed, but is it worth the CPU?
3546 CPartFile::SourceSet::iterator it
= m_SrcList
.begin();
3547 for ( ; it
!= m_SrcList
.end(); ++it
) {
3548 CUpDownClient
*cur_src
= *it
;
3549 if (cur_src
->GetFileComment().Length()>0 || cur_src
->GetFileRating()>0) {
3550 // AddDebugLogLineM(false, logPartFile, wxString(wxT("found a comment for ")) << GetFileName());
3551 m_FileRatingList
.push_back(SFileRating(*cur_src
));
3555 return m_FileRatingList
;
3560 CPartFile::CPartFile(CEC_PartFile_Tag
*tag
)
3564 SetFileName(CPath(tag
->FileName()));
3565 m_abyFileHash
= tag
->ID();
3566 SetFileSize(tag
->SizeFull());
3567 m_gaplist
.Init(GetFileSize(), true); // Init empty
3568 m_partmetfilename
= CPath(tag
->PartMetName());
3569 transferred
= tag
->SizeXfer();
3570 percentcompleted
= (100.0*completedsize
) / GetFileSize();
3571 completedsize
= tag
->SizeDone();
3573 m_category
= tag
->FileCat();
3575 m_SrcpartFrequency
.insert(m_SrcpartFrequency
.end(), GetPartCount(), 0);
3576 m_iDownPriority
= tag
->Prio();
3577 if ( m_iDownPriority
>= 10 ) {
3578 m_iDownPriority
-= 10;
3579 m_bAutoDownPriority
= true;
3581 m_bAutoDownPriority
= false;
3587 m_a4af_source_count
= 0;
3591 * Remote gui specific code
3593 CPartFile::~CPartFile()
3597 const FileRatingList
&CPartFile::GetRatingAndComments()
3599 return m_FileRatingList
;
3601 #endif // !CLIENT_GUI
3604 void CPartFile::UpdateDisplayedInfo(bool force
)
3606 uint32 curTick
= ::GetTickCount();
3607 m_CommentUpdated
= true;
3609 // Wait 1.5s between each redraw
3610 if(force
|| curTick
-m_lastRefreshedDLDisplay
> MINWAIT_BEFORE_DLDISPLAY_WINDOWUPDATE
) {
3611 Notify_DownloadCtrlUpdateItem(this);
3612 m_lastRefreshedDLDisplay
= curTick
;
3618 void CPartFile::Init()
3620 m_showSources
= false;
3621 m_lastsearchtime
= 0;
3622 lastpurgetime
= ::GetTickCount();
3625 m_insufficient
= false;
3630 m_iLastPausePurge
= time(NULL
);
3632 if(thePrefs::GetNewAutoDown()) {
3633 m_iDownPriority
= PR_HIGH
;
3634 m_bAutoDownPriority
= true;
3636 m_iDownPriority
= PR_NORMAL
;
3637 m_bAutoDownPriority
= false;
3640 memset(m_anStates
,0,sizeof(m_anStates
));
3642 transferingsrc
= 0; // new
3646 m_CommentUpdated
= false;
3647 m_hashsetneeded
= true;
3649 percentcompleted
= 0;
3651 m_bPreviewing
= false;
3652 lastseencomplete
= 0;
3653 m_availablePartsCount
=0;
3654 m_ClientSrcAnswered
= 0;
3655 m_LastNoNeededCheck
= 0;
3657 m_nTotalBufferData
= 0;
3658 m_nLastBufferFlushTime
= 0;
3659 m_bPercentUpdated
= false;
3660 m_bRecoveringArchive
= false;
3661 m_iGainDueToCompression
= 0;
3662 m_iLostDueToCorruption
= 0;
3663 m_iTotalPacketsSavedDueToICH
= 0;
3665 m_lastRefreshedDLDisplay
= 0;
3666 m_nDlActiveTime
= 0;
3668 m_is_A4AF_auto
= false;
3669 m_localSrcReqQueued
= false;
3670 m_nCompleteSourcesTime
= time(NULL
);
3671 m_nCompleteSourcesCount
= 0;
3672 m_nCompleteSourcesCountLo
= 0;
3673 m_nCompleteSourcesCountHi
= 0;
3676 m_notCurrentSources
= 0;
3679 m_LastSearchTimeKad
= 0;
3680 m_TotalSearchesKad
= 0;
3682 m_gapptrlist
.Init(&m_gaplist
);
3685 m_CorruptionBlackBox
= new CCorruptionBlackBox();
3689 wxString
CPartFile::getPartfileStatus() const
3694 if ((status
== PS_HASHING
) || (status
== PS_WAITINGFORHASH
)) {
3695 mybuffer
=_("Hashing");
3696 } else if (status
== PS_ALLOCATING
) {
3697 mybuffer
= _("Allocating");
3699 switch (GetStatus()) {
3701 mybuffer
=_("Completing");
3704 mybuffer
=_("Complete");
3707 mybuffer
=_("Paused");
3710 mybuffer
=_("Erroneous");
3712 case PS_INSUFFICIENT
:
3713 mybuffer
= _("Insufficient disk space");
3716 if (GetTransferingSrcCount()>0) {
3717 mybuffer
=_("Downloading");
3719 mybuffer
=_("Waiting");
3723 if (m_stopped
&& (GetStatus()!=PS_COMPLETE
)) {
3724 mybuffer
=_("Stopped");
3731 int CPartFile::getPartfileStatusRang() const
3735 if (GetTransferingSrcCount()==0) tempstatus
=1;
3736 switch (GetStatus()) {
3738 case PS_WAITINGFORHASH
:
3758 wxString
CPartFile::GetFeedback() const
3760 wxString retval
= CKnownFile::GetFeedback();
3761 if (GetStatus() != PS_COMPLETE
) {
3762 retval
+= wxString(_("Downloaded")) + wxT(": ") + CastItoXBytes(GetCompletedSize()) + wxString::Format(wxT(" (%.2f%%)\n"), GetPercentCompleted())
3763 + _("Sources") + CFormat(wxT(": %u\n")) % GetSourceCount();
3765 return retval
+ _("Status") + wxT(": ") + getPartfileStatus() + wxT("\n");
3769 sint32
CPartFile::getTimeRemaining() const
3771 if (GetKBpsDown() < 0.001)
3774 return((GetFileSize()-GetCompletedSize()) / ((int)(GetKBpsDown()*1024.0)));
3777 bool CPartFile::PreviewAvailable()
3779 FileType type
= GetFiletype(GetFileName());
3781 return (((type
== ftVideo
) || (type
== ftAudio
)) && IsComplete(0, 256*1024));
3784 bool CPartFile::CheckShowItemInGivenCat(int inCategory
)
3786 // easy normal cases
3788 bool IsNotFiltered
= true;
3790 IsInCat
= ((inCategory
==0) || (inCategory
>0 && inCategory
==GetCategory()));
3792 switch (thePrefs::GetAllcatType()) {
3794 IsNotFiltered
= GetCategory() == 0 || inCategory
> 0;
3797 IsNotFiltered
= IsPartFile();
3800 IsNotFiltered
= !IsPartFile();
3804 (GetStatus() == PS_READY
|| GetStatus() == PS_EMPTY
) &&
3805 GetTransferingSrcCount() == 0;
3809 (GetStatus() == PS_READY
|| GetStatus()==PS_EMPTY
) &&
3810 GetTransferingSrcCount() > 0;
3813 IsNotFiltered
= GetStatus() == PS_ERROR
;
3816 IsNotFiltered
= GetStatus() == PS_PAUSED
&& !IsStopped();
3819 IsNotFiltered
= IsStopped();
3822 IsNotFiltered
= GetFiletype(GetFileName()) == ftVideo
;
3825 IsNotFiltered
= GetFiletype(GetFileName()) == ftAudio
;
3828 IsNotFiltered
= GetFiletype(GetFileName()) == ftArchive
;
3831 IsNotFiltered
= GetFiletype(GetFileName()) == ftCDImage
;
3834 IsNotFiltered
= GetFiletype(GetFileName()) == ftPicture
;
3837 IsNotFiltered
= GetFiletype(GetFileName()) == ftText
;
3840 IsNotFiltered
= !IsStopped() && GetStatus() != PS_PAUSED
;
3844 return IsNotFiltered
&& IsInCat
;
3848 void CPartFile::SetActive(bool bActive
)
3850 time_t tNow
= time(NULL
);
3852 if (theApp
->IsConnected()) {
3853 if (m_tActivated
== 0) {
3854 m_tActivated
= tNow
;
3858 if (m_tActivated
!= 0) {
3859 m_nDlActiveTime
+= tNow
- m_tActivated
;
3866 uint32
CPartFile::GetDlActiveTime() const
3868 uint32 nDlActiveTime
= m_nDlActiveTime
;
3869 if (m_tActivated
!= 0) {
3870 nDlActiveTime
+= time(NULL
) - m_tActivated
;
3872 return nDlActiveTime
;
3877 uint8
CPartFile::GetStatus(bool ignorepause
) const
3879 if ( (!m_paused
&& !m_insufficient
) ||
3880 status
== PS_ERROR
||
3881 status
== PS_COMPLETING
||
3882 status
== PS_COMPLETE
||
3885 } else if ( m_insufficient
) {
3886 return PS_INSUFFICIENT
;
3892 void CPartFile::AddDeadSource(const CUpDownClient
* client
)
3894 m_deadSources
.AddDeadSource( client
);
3898 bool CPartFile::IsDeadSource(const CUpDownClient
* client
)
3900 return m_deadSources
.IsDeadSource( client
);
3903 void CPartFile::SetFileName(const CPath
& fileName
)
3905 CKnownFile
* pFile
= theApp
->sharedfiles
->GetFileByID(GetFileHash());
3907 bool is_shared
= (pFile
&& pFile
== this);
3910 // The file is shared, we must clear the search keywords so we don't
3911 // publish the old name anymore.
3912 theApp
->sharedfiles
->RemoveKeywords(this);
3915 CKnownFile::SetFileName(fileName
);
3918 // And of course, we must advertise the new name if the file is shared.
3919 theApp
->sharedfiles
->AddKeywords(this);
3922 UpdateDisplayedInfo(true);
3926 uint16
CPartFile::GetMaxSources() const
3928 // This is just like this, while we don't import the private max sources per file
3929 return thePrefs::GetMaxSourcePerFile();
3933 uint16
CPartFile::GetMaxSourcePerFileSoft() const
3935 unsigned int temp
= ((unsigned int)GetMaxSources() * 9L) / 10;
3936 if (temp
> MAX_SOURCES_FILE_SOFT
) {
3937 return MAX_SOURCES_FILE_SOFT
;
3942 uint16
CPartFile::GetMaxSourcePerFileUDP() const
3944 unsigned int temp
= ((unsigned int)GetMaxSources() * 3L) / 4;
3945 if (temp
> MAX_SOURCES_FILE_UDP
) {
3946 return MAX_SOURCES_FILE_UDP
;
3951 #define DROP_FACTOR 2
3953 CUpDownClient
* CPartFile::GetSlowerDownloadingClient(uint32 speed
, CUpDownClient
* caller
) {
3954 // printf("Start slower source calculation\n");
3955 for( SourceSet::iterator it
= m_SrcList
.begin(); it
!= m_SrcList
.end(); ) {
3956 CUpDownClient
* cur_src
= *it
++;
3957 if ((cur_src
->GetDownloadState() == DS_DOWNLOADING
) && (cur_src
!= caller
)) {
3958 uint32 factored_bytes_per_second
= static_cast<uint32
>(
3959 (cur_src
->GetKBpsDown() * 1024) * DROP_FACTOR
);
3960 if ( factored_bytes_per_second
< speed
) {
3961 // printf("Selecting source %p to drop: %d < %d\n", cur_src, factored_bytes_per_second, speed);
3962 // printf("End slower source calculation\n");
3965 // printf("Not selecting source %p to drop: %d > %d\n", cur_src, factored_bytes_per_second, speed);
3969 // printf("End slower source calculation\n");
3973 void CPartFile::AllocationFinished()
3975 // see if it can be opened
3976 if (!m_hpartfile
.Open(m_PartPath
, CFile::read_write
)) {
3977 AddLogLineM(false, CFormat(_("ERROR: Failed to open partfile '%s'")) % GetFullName());
3978 SetPartFileStatus(PS_ERROR
);
3980 // then close the handle again
3981 m_hpartfile
.Release(true);
3985 // File_checked_for_headers