2 // LuceneQueryingDriver.cs
4 // Copyright (C) 2004-2005 Novell, Inc.
8 // Permission is hereby granted, free of charge, to any person obtaining a
9 // copy of this software and associated documentation files (the "Software"),
10 // to deal in the Software without restriction, including without limitation
11 // the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 // and/or sell copies of the Software, and to permit persons to whom the
13 // Software is furnished to do so, subject to the following conditions:
15 // The above copyright notice and this permission notice shall be included in
16 // all copies or substantial portions of the Software.
18 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
21 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
23 // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
24 // DEALINGS IN THE SOFTWARE.
28 using System
.Collections
;
29 using System
.Diagnostics
;
30 using System
.Globalization
;
33 using System
.Threading
;
35 using System
.Xml
.Serialization
;
37 using Lucene
.Net
.Analysis
;
38 using Lucene
.Net
.Analysis
.Standard
;
39 using Lucene
.Net
.Documents
;
40 using Lucene
.Net
.Index
;
41 using Lucene
.Net
.QueryParsers
;
42 using LNS
= Lucene
.Net
.Search
;
46 namespace Beagle
.Daemon
{
48 public class LuceneQueryingDriver
: LuceneCommon
{
50 static public bool Debug
= false;
52 public delegate bool UriFilter (Uri uri
);
53 public delegate double RelevancyMultiplier (Hit hit
);
55 public LuceneQueryingDriver (string index_name
, int minor_version
, bool read_only
)
56 : base (index_name
, minor_version
)
58 // FIXME: Maybe the LuceneQueryingDriver should never try to create the index?
64 // We're in read-only mode, but we can't create an index.
65 // Maybe a different exception would be better? This one is caught
66 // in QueryDriver.LoadStaticQueryable ()
67 throw new InvalidOperationException ();
70 // Initialize the user text cache only if we're not in
71 // read-only mode. StaticQueryables instantiate their
72 // own text caches that are stored in a separate
75 text_cache
= TextCache
.UserCache
;
78 ////////////////////////////////////////////////////////////////
81 ////////////////////////////////////////////////////////////////
83 public Uri
[] PropertyQuery (Property prop
)
85 // FIXME: Should we support scanning the secondary
88 IndexReader primary_reader
;
89 LNS
.IndexSearcher primary_searcher
;
91 primary_reader
= LuceneCommon
.GetReader (PrimaryStore
);
92 primary_searcher
= new LNS
.IndexSearcher (primary_reader
);
94 Term term
= new Term (PropertyToFieldName (prop
.Type
, prop
.Key
), prop
.Value
);
95 LNS
.TermQuery query
= new LNS
.TermQuery (term
);
96 LNS
.Hits hits
= primary_searcher
.Search (query
);
98 Uri
[] uri_list
= new Uri
[hits
.Length ()];
99 for (int i
= 0; i
< hits
.Length (); i
++) {
102 uri_list
[i
] = GetUriFromDocument (doc
);
105 primary_searcher
.Close ();
106 ReleaseReader (primary_reader
);
111 ////////////////////////////////////////////////////////////////
113 // Returns the lowest matching score before the results are
115 public void DoQuery (Query query
,
117 ICollection search_subset_uris
, // should be internal uris
118 UriFilter uri_filter
,
119 HitFilter hit_filter
)
122 Logger
.Log
.Debug ("###### {0}: Starting low-level queries", IndexName
);
125 sw
= new Stopwatch ();
128 // Assemble all of the parts into a bunch of Lucene queries
130 ArrayList primary_required_part_queries
= null;
131 ArrayList secondary_required_part_queries
= null;
133 LNS
.BooleanQuery primary_prohibited_part_query
= null;
134 LNS
.BooleanQuery secondary_prohibited_part_query
= null;
136 AndHitFilter all_hit_filters
;
137 all_hit_filters
= new AndHitFilter ();
138 if (hit_filter
!= null)
139 all_hit_filters
.Add (hit_filter
);
141 ArrayList term_list
= new ArrayList ();
143 foreach (QueryPart part
in query
.Parts
) {
144 LNS
.Query primary_part_query
;
145 LNS
.Query secondary_part_query
;
146 HitFilter part_hit_filter
;
147 QueryPartToQuery (part
,
148 false, // we want both primary and secondary queries
149 part
.Logic
== QueryPartLogic
.Required
? term_list
: null,
150 out primary_part_query
,
151 out secondary_part_query
,
152 out part_hit_filter
);
154 if (primary_part_query
== null)
157 switch (part
.Logic
) {
159 case QueryPartLogic
.Required
:
160 if (primary_required_part_queries
== null) {
161 primary_required_part_queries
= new ArrayList ();
162 secondary_required_part_queries
= new ArrayList ();
164 primary_required_part_queries
.Add (primary_part_query
);
165 secondary_required_part_queries
.Add (secondary_part_query
);
167 if (part_hit_filter
!= null)
168 all_hit_filters
.Add (part_hit_filter
);
172 case QueryPartLogic
.Prohibited
:
173 if (primary_prohibited_part_query
== null)
174 primary_prohibited_part_query
= new LNS
.BooleanQuery ();
175 primary_prohibited_part_query
.Add (primary_part_query
, false, false);
177 if (secondary_part_query
!= null) {
178 if (secondary_prohibited_part_query
== null)
179 secondary_prohibited_part_query
= new LNS
.BooleanQuery ();
180 secondary_prohibited_part_query
.Add (secondary_part_query
, false, false);
183 if (part_hit_filter
!= null) {
185 nhf
= new NotHitFilter (part_hit_filter
);
186 all_hit_filters
.Add (new HitFilter (nhf
.HitFilter
));
193 // If we have no required parts, give up.
194 if (primary_required_part_queries
== null)
198 // Now that we have all of these nice queries, let's execute them!
201 // Create the searchers that we will need.
203 IndexReader primary_reader
;
204 LNS
.IndexSearcher primary_searcher
;
205 IndexReader secondary_reader
= null;
206 LNS
.IndexSearcher secondary_searcher
= null;
208 primary_reader
= LuceneCommon
.GetReader (PrimaryStore
);
209 primary_searcher
= new LNS
.IndexSearcher (primary_reader
);
211 if (SecondaryStore
!= null) {
212 secondary_reader
= LuceneCommon
.GetReader (SecondaryStore
);
213 if (secondary_reader
.NumDocs () == 0) {
214 ReleaseReader (secondary_reader
);
215 secondary_reader
= null;
219 if (secondary_reader
!= null)
220 secondary_searcher
= new LNS
.IndexSearcher (secondary_reader
);
223 // Possibly create our whitelists from the search subset.
225 LuceneBitArray primary_whitelist
= null;
226 LuceneBitArray secondary_whitelist
= null;
228 if (search_subset_uris
!= null && search_subset_uris
.Count
> 0) {
229 primary_whitelist
= new LuceneBitArray (primary_searcher
);
230 if (secondary_searcher
!= null)
231 secondary_whitelist
= new LuceneBitArray (secondary_searcher
);
233 foreach (Uri uri
in search_subset_uris
) {
234 primary_whitelist
.AddUri (uri
);
235 if (secondary_whitelist
!= null)
236 secondary_whitelist
.AddUri (uri
);
238 primary_whitelist
.FlushUris ();
239 if (secondary_whitelist
!= null)
240 secondary_whitelist
.FlushUris ();
244 // Build blacklists from our prohibited parts.
246 LuceneBitArray primary_blacklist
= null;
247 LuceneBitArray secondary_blacklist
= null;
249 if (primary_prohibited_part_query
!= null) {
250 primary_blacklist
= new LuceneBitArray (primary_searcher
,
251 primary_prohibited_part_query
);
253 if (secondary_searcher
!= null) {
254 secondary_blacklist
= new LuceneBitArray (secondary_searcher
);
255 if (secondary_prohibited_part_query
!= null)
256 secondary_blacklist
.Or (secondary_prohibited_part_query
);
257 primary_blacklist
.Join (secondary_blacklist
);
262 // Combine our whitelist and blacklist into just a whitelist.
264 if (primary_blacklist
!= null) {
265 if (primary_whitelist
== null) {
266 primary_blacklist
.Not ();
267 primary_whitelist
= primary_blacklist
;
269 primary_whitelist
.AndNot (primary_blacklist
);
273 if (secondary_blacklist
!= null) {
274 if (secondary_whitelist
== null) {
275 secondary_blacklist
.Not ();
276 secondary_whitelist
= secondary_blacklist
;
278 secondary_whitelist
.AndNot (secondary_blacklist
);
282 BetterBitArray primary_matches
= null;
284 if (primary_required_part_queries
!= null) {
286 if (secondary_searcher
!= null)
287 primary_matches
= DoRequiredQueries_TwoIndex (primary_searcher
,
289 primary_required_part_queries
,
290 secondary_required_part_queries
,
292 secondary_whitelist
);
294 primary_matches
= DoRequiredQueries (primary_searcher
,
295 primary_required_part_queries
,
302 Logger
.Log
.Debug ("###### {0}: Finished low-level queries in {1}", IndexName
, sw
);
306 // Only generate results if we got some matches
307 if (primary_matches
!= null && primary_matches
.ContainsTrue ()) {
308 GenerateQueryResults (primary_reader
,
316 new HitFilter (all_hit_filters
.HitFilter
),
321 // Finally, we clean up after ourselves.
324 primary_searcher
.Close ();
325 if (secondary_searcher
!= null)
326 secondary_searcher
.Close ();
327 ReleaseReader (primary_reader
);
328 if (secondary_reader
!= null)
329 ReleaseReader (secondary_reader
);
334 Logger
.Log
.Debug ("###### {0}: Processed query in {1}", IndexName
, sw
);
338 ////////////////////////////////////////////////////////////////
341 // Special logic for handling our set of required queries
344 // This is the easy case: we just combine all of the queries
345 // into one big BooleanQuery.
346 private static BetterBitArray
DoRequiredQueries (LNS
.IndexSearcher primary_searcher
,
347 ArrayList primary_queries
,
348 BetterBitArray primary_whitelist
)
350 LNS
.BooleanQuery combined_query
;
351 combined_query
= new LNS
.BooleanQuery ();
352 foreach (LNS
.Query query
in primary_queries
)
353 combined_query
.Add (query
, true, false);
355 LuceneBitArray matches
;
356 matches
= new LuceneBitArray (primary_searcher
, combined_query
);
357 if (primary_whitelist
!= null)
358 matches
.And (primary_whitelist
);
363 // This code attempts to execute N required queries in the
364 // most efficient order to minimize the amount of time spent
365 // joining between the two indexes. It returns a joined bit
366 // array of matches against the primary index.
368 private class MatchInfo
: IComparable
{
370 public LuceneBitArray PrimaryMatches
= null;
371 public LuceneBitArray SecondaryMatches
= null;
372 public int UpperBound
= 0;
376 PrimaryMatches
.Join (SecondaryMatches
);
379 public void RestrictBy (MatchInfo joined
)
381 if (joined
!= null) {
382 this.PrimaryMatches
.And (joined
.PrimaryMatches
);
383 this.SecondaryMatches
.And (joined
.SecondaryMatches
);
387 UpperBound
+= PrimaryMatches
.TrueCount
;
388 UpperBound
+= SecondaryMatches
.TrueCount
;
391 public int CompareTo (object obj
)
393 MatchInfo other
= (MatchInfo
) obj
;
394 return this.UpperBound
- other
.UpperBound
;
398 // Any whitelists that are passed in must be fully joined, or
399 // query results will be incorrect.
400 private static BetterBitArray
DoRequiredQueries_TwoIndex (LNS
.IndexSearcher primary_searcher
,
401 LNS
.IndexSearcher secondary_searcher
,
402 ArrayList primary_queries
,
403 ArrayList secondary_queries
,
404 BetterBitArray primary_whitelist
,
405 BetterBitArray secondary_whitelist
)
407 ArrayList match_info_list
;
408 match_info_list
= new ArrayList ();
410 // First, do all of the low-level queries
411 // and store them in our MatchInfo
412 for (int i
= 0; i
< primary_queries
.Count
; ++i
) {
414 pq
= primary_queries
[i
] as LNS
.Query
;
415 sq
= secondary_queries
[i
] as LNS
.Query
;
417 LuceneBitArray p_matches
= null, s_matches
= null;
418 p_matches
= new LuceneBitArray (primary_searcher
);
421 if (primary_whitelist
!= null)
422 p_matches
.And (primary_whitelist
);
425 s_matches
= new LuceneBitArray (secondary_searcher
);
428 if (secondary_whitelist
!= null)
429 s_matches
.And (secondary_whitelist
);
433 info
= new MatchInfo ();
434 info
.PrimaryMatches
= p_matches
;
435 info
.SecondaryMatches
= s_matches
;
436 info
.RestrictBy (null); // a hack to initialize the UpperBound
437 match_info_list
.Add (info
);
440 // We want to be smart about the order we do this in,
441 // to minimize the expense of the Join.
442 while (match_info_list
.Count
> 1) {
444 // linear scan to find the minimum
446 for (int i
= 1; i
< match_info_list
.Count
; ++i
)
447 if (((MatchInfo
) match_info_list
[i
]).CompareTo ((MatchInfo
) match_info_list
[index_min
]) < 0)
451 smallest
= match_info_list
[index_min
] as MatchInfo
;
452 match_info_list
.RemoveAt (index_min
);
454 // We can short-circuit if our smallest set of
456 if (smallest
.UpperBound
== 0)
457 return smallest
.PrimaryMatches
; // this must be an empty array.
461 foreach (MatchInfo info
in match_info_list
)
462 info
.RestrictBy (smallest
);
465 // For the final pair, we don't need to do a full join:
466 // mapping the secondary onto the primary is sufficient
468 last
= match_info_list
[0] as MatchInfo
;
469 last
.SecondaryMatches
.ProjectOnto (last
.PrimaryMatches
);
471 return last
.PrimaryMatches
;
474 ////////////////////////////////////////////////////////////////
476 static private void ScoreHits (Hashtable hits_by_id
,
478 ICollection term_list
)
481 sw
= new Stopwatch ();
484 LNS
.Similarity similarity
;
485 similarity
= LNS
.Similarity
.GetDefault ();
487 foreach (Term term
in term_list
) {
490 idf
= similarity
.Ldf (reader
.DocFreq (term
), reader
.MaxDoc ());
493 hit_count
= hits_by_id
.Count
;
496 term_docs
= reader
.TermDocs (term
);
497 while (term_docs
.Next () && hit_count
> 0) {
500 id
= term_docs
.Doc ();
503 hit
= hits_by_id
[id
] as Hit
;
506 tf
= similarity
.Tf (term_docs
.Freq ());
507 hit
.Score
+= tf
* idf
;
516 ////////////////////////////////////////////////////////////////
518 // Lame iterator methods because we use two different ones
519 // depending on which algorithm we use.
520 private static IEnumerable
IterateMatches (BetterBitArray primary_matches
)
522 int j
= primary_matches
.Count
;
524 // Walk across the matches backwards, since newer
525 // documents are more likely to be at the end of
528 int i
= primary_matches
.GetPreviousTrueIndex (j
);
531 j
= i
-1; // This way we can't forget to adjust i
537 private static IEnumerable
IterateOrderedMatches (int[] ordered_matches
)
541 for (i
= 0; i
< ordered_matches
.Length
; i
++)
542 yield return ordered_matches
[i
];
545 private class DocAndId
{
551 // Given a set of hits, broadcast some set out as our query
555 private static void GenerateQueryResults (IndexReader primary_reader
,
556 LNS
.IndexSearcher primary_searcher
,
557 LNS
.IndexSearcher secondary_searcher
,
558 BetterBitArray primary_matches
,
560 ICollection query_term_list
,
562 UriFilter uri_filter
,
563 HitFilter hit_filter
,
566 TopScores top_docs
= null;
567 ArrayList all_docs
= null;
570 Logger
.Log
.Debug (">>> {0}: Initially handed {1} matches", index_name
, primary_matches
.TrueCount
);
572 if (primary_matches
.TrueCount
<= max_results
) {
574 Logger
.Log
.Debug (">>> {0}: Initial count is within our limit of {1}", index_name
, max_results
);
575 all_docs
= new ArrayList ();
578 Logger
.Log
.Debug (">>> {0}: Number of hits is capped at {1}", index_name
, max_results
);
579 top_docs
= new TopScores (max_results
);
582 Stopwatch total
, a
, b
, c
, d
;
583 total
= new Stopwatch ();
585 b
= new Stopwatch ();
586 c
= new Stopwatch ();
587 d
= new Stopwatch ();
591 // There are two ways we can determine the max_results
592 // most recent items:
594 // One is to instantiate Lucene documents for each of
595 // the document IDs in primary_matches. This is a
596 // fairly expensive operation.
598 // The other is to walk through the list of all
599 // document IDs in descending time order. This is
600 // a less expensive operation, but adds up over time
601 // on large data sets.
603 // We can walk about 2.5 docs for every Document we
604 // instantiate. So what we'll do, if we have more
605 // matches than available hits, is walk (m * 1.25)
606 // docs to see if we can fill out the top 100 hits.
607 // If not, we'll fall back to creating documents
610 int[] ordered_matches
= null;
611 if (primary_matches
.TrueCount
> max_results
) {
612 a
= new Stopwatch ();
615 TermDocs docs
= primary_reader
.TermDocs ();
616 TermEnum enumerator
= primary_reader
.Terms (new Term ("InvertedTimestamp", ""));
617 ordered_matches
= new int [max_results
];
620 int max_docs
= (int) (primary_matches
.TrueCount
* 1.25);
623 Term term
= enumerator
.Term ();
625 if (term
.Field () != "InvertedTimestamp")
628 docs
.Seek (enumerator
);
631 && docs_found
< ordered_matches
.Length
632 && docs_walked
< max_docs
) {
633 int doc_id
= docs
.Doc ();
635 if (primary_matches
.Get (doc_id
)) {
636 ordered_matches
[docs_found
] = docs
.Doc ();
642 } while (enumerator
.Next ()
643 && docs_found
< ordered_matches
.Length
644 && docs_walked
< max_docs
);
648 // We've found all the docs we can return in a subset!
649 // Fantastic, we've probably short circuited a slow search.
650 if (docs_found
== max_results
) {
651 all_docs
= new ArrayList ();
654 // Bad luck! Not all docs found
655 // Start afresh - this time traversing all results
656 ordered_matches
= null;
661 Log
.Debug (">>> {0}: Walked {1} items, populated an enum with {2} items", index_name
, docs_walked
, docs_found
, a
);
663 if (docs_found
== max_results
)
664 Log
.Debug (">>> {0}: Successfully short circuited timestamp ordering!", index_name
);
671 IEnumerable enumerable
;
673 if (ordered_matches
!= null) {
674 enumerable
= IterateOrderedMatches (ordered_matches
);
676 enumerable
= IterateMatches (primary_matches
);
678 foreach (int i
in enumerable
) {
682 doc
= primary_searcher
.Doc (i
);
684 // Check the timestamp --- if we have already reached our
685 // limit, we might be able to reject it immediately.
686 string timestamp_str
;
687 long timestamp_num
= 0;
689 timestamp_str
= doc
.Get ("Timestamp");
690 if (timestamp_str
== null) {
691 Logger
.Log
.Warn ("No timestamp on {0}!", GetUriFromDocument (doc
));
693 timestamp_num
= Int64
.Parse (doc
.Get ("Timestamp"));
694 if (top_docs
!= null && ! top_docs
.WillAccept (timestamp_num
))
698 // If we have a UriFilter, apply it.
699 if (uri_filter
!= null) {
701 uri
= GetUriFromDocument (doc
);
702 if (! uri_filter (uri
))
706 DocAndId doc_and_id
= new DocAndId ();
707 doc_and_id
.Doc
= doc
;
710 // Add the document to the appropriate data structure.
711 // We use the timestamp_num as the score, so high
712 // scores correspond to more-recent timestamps.
713 if (all_docs
!= null)
714 all_docs
.Add (doc_and_id
);
716 top_docs
.Add (timestamp_num
, doc_and_id
);
720 Log
.Debug (">>> {0}: Processed roughly {1} documents", index_name
, count
);
726 ICollection final_list_of_docs
;
727 if (all_docs
!= null)
728 final_list_of_docs
= all_docs
;
730 final_list_of_docs
= top_docs
.TopScoringObjects
;
732 ArrayList final_list_of_hits
;
733 final_list_of_hits
= new ArrayList (final_list_of_docs
.Count
);
735 // This is used only for scoring
736 Hashtable hits_by_id
= null;
737 hits_by_id
= new Hashtable ();
739 // If we aren't using the secondary index, the next step is
740 // very straightforward.
741 if (secondary_searcher
== null) {
743 foreach (DocAndId doc_and_id
in final_list_of_docs
) {
745 hit
= DocumentToHit (doc_and_id
.Doc
);
746 hits_by_id
[doc_and_id
.Id
] = hit
;
747 final_list_of_hits
.Add (hit
);
753 Logger
.Log
.Debug (">>> {0}: Performing cross-index Hit reunification", index_name
);
755 Hashtable hits_by_uri
;
756 hits_by_uri
= UriFu
.NewHashtable ();
758 LuceneBitArray secondary_matches
;
759 secondary_matches
= new LuceneBitArray (secondary_searcher
);
761 foreach (DocAndId doc_and_id
in final_list_of_docs
) {
763 hit
= DocumentToHit (doc_and_id
.Doc
);
764 hits_by_id
[doc_and_id
.Id
] = hit
;
765 hits_by_uri
[hit
.Uri
] = hit
;
766 secondary_matches
.AddUri (hit
.Uri
);
769 secondary_matches
.FlushUris ();
771 // Attach all of our secondary properties
775 int i
= secondary_matches
.GetNextTrueIndex (j
);
776 if (i
>= secondary_matches
.Count
)
780 Document secondary_doc
;
781 secondary_doc
= secondary_searcher
.Doc (i
);
784 uri
= GetUriFromDocument (secondary_doc
);
787 hit
= hits_by_uri
[uri
] as Hit
;
789 AddPropertiesToHit (hit
, secondary_doc
, false);
791 final_list_of_hits
.Add (hit
);
795 ScoreHits (hits_by_id
, primary_reader
, query_term_list
);
799 // If we used the TopScores object, we got our original
800 // list of documents sorted for us. If not, sort the
802 if (top_docs
== null)
803 final_list_of_hits
.Sort ();
807 // If we have a hit_filter, use it now.
808 if (hit_filter
!= null) {
809 for (int i
= 0; i
< final_list_of_hits
.Count
; ++i
) {
811 hit
= final_list_of_hits
[i
] as Hit
;
812 if (! hit_filter (hit
)) {
814 Logger
.Log
.Debug ("Filtered out {0}", hit
.Uri
);
815 final_list_of_hits
[i
] = null;
820 // Before we broadcast a hit, we strip out any
821 // properties in the PrivateNamespace. We
822 // manipulate the property ArrayList directory,
823 // which is pretty gross... but this is safe,
824 // since removing items will not change the sort
826 foreach (Hit hit
in final_list_of_hits
) {
830 while (i
< hit
.Properties
.Count
) {
831 Property prop
= hit
.Properties
[i
] as Property
;
832 if (prop
.Key
.StartsWith (Property
.PrivateNamespace
))
833 hit
.Properties
.RemoveAt (i
);
839 result
.Add (final_list_of_hits
, primary_matches
.TrueCount
);
845 Logger
.Log
.Debug (">>> {0}: GenerateQueryResults time statistics:", index_name
);
846 Logger
.Log
.Debug (">>> {0}: Short circuit {1,6} ({2:0.0}%)", index_name
, a
== null ? "N/A" : a
.ToString (), a
== null ? 0.0 : 100 * a
.ElapsedTime
/ total
.ElapsedTime
);
847 Logger
.Log
.Debug (">>> {0}: First pass {1,6} ({2:0.0}%)", index_name
, b
, 100 * b
.ElapsedTime
/ total
.ElapsedTime
);
848 Logger
.Log
.Debug (">>> {0}: Hit assembly {1,6} ({2:0.0}%)", index_name
, c
, 100 * c
.ElapsedTime
/ total
.ElapsedTime
);
849 Logger
.Log
.Debug (">>> {0}: Final pass {1,6} ({2:0.0}%)", index_name
, d
, 100 * d
.ElapsedTime
/ total
.ElapsedTime
);
850 Logger
.Log
.Debug (">>> {0}: TOTAL {1,6}", index_name
, total
);