2 // LuceneQueryingDriver.cs
4 // Copyright (C) 2004-2005 Novell, Inc.
8 // Permission is hereby granted, free of charge, to any person obtaining a
9 // copy of this software and associated documentation files (the "Software"),
10 // to deal in the Software without restriction, including without limitation
11 // the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 // and/or sell copies of the Software, and to permit persons to whom the
13 // Software is furnished to do so, subject to the following conditions:
15 // The above copyright notice and this permission notice shall be included in
16 // all copies or substantial portions of the Software.
18 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
21 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
23 // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
24 // DEALINGS IN THE SOFTWARE.
28 using System
.Collections
;
29 using System
.Diagnostics
;
30 using System
.Globalization
;
33 using System
.Threading
;
35 using System
.Xml
.Serialization
;
37 using Lucene
.Net
.Analysis
;
38 using Lucene
.Net
.Analysis
.Standard
;
39 using Lucene
.Net
.Documents
;
40 using Lucene
.Net
.Index
;
41 using Lucene
.Net
.QueryParsers
;
42 using LNS
= Lucene
.Net
.Search
;
46 namespace Beagle
.Daemon
{
48 public class LuceneQueryingDriver
: LuceneCommon
{
50 static public bool Debug
= false;
52 public const string PrivateNamespace
= "_private:";
54 public delegate bool UriFilter (Uri uri
);
55 public delegate double RelevancyMultiplier (Hit hit
);
57 public LuceneQueryingDriver (string index_name
, int minor_version
, bool read_only
)
58 : base (index_name
, minor_version
)
60 // FIXME: Maybe the LuceneQueryingDriver should never try to create the index?
66 // We're in read-only mode, but we can't create an index.
67 // Maybe a different exception would be better? This one is caught
68 // in QueryDriver.LoadStaticQueryable ()
69 throw new InvalidOperationException ();
72 // Initialize the user text cache only if we're not in
73 // read-only mode. StaticQueryables instantiate their
74 // own text caches that are stored in a separate
77 text_cache
= TextCache
.UserCache
;
80 ////////////////////////////////////////////////////////////////
83 ////////////////////////////////////////////////////////////////
85 public Uri
[] PropertyQuery (Property prop
)
87 // FIXME: Should we support scanning the secondary
90 IndexReader primary_reader
;
91 LNS
.IndexSearcher primary_searcher
;
93 primary_reader
= LuceneCommon
.GetReader (PrimaryStore
);
94 primary_searcher
= new LNS
.IndexSearcher (primary_reader
);
96 Term term
= new Term (PropertyToFieldName (prop
.Type
, prop
.Key
), prop
.Value
);
97 LNS
.TermQuery query
= new LNS
.TermQuery (term
);
98 LNS
.Hits hits
= primary_searcher
.Search (query
);
100 Uri
[] uri_list
= new Uri
[hits
.Length ()];
101 for (int i
= 0; i
< hits
.Length (); i
++) {
104 uri_list
[i
] = GetUriFromDocument (doc
);
107 primary_searcher
.Close ();
108 ReleaseReader (primary_reader
);
113 ////////////////////////////////////////////////////////////////
115 // Returns the lowest matching score before the results are
117 public void DoQuery (Query query
,
119 ICollection search_subset_uris
, // should be internal uris
120 UriFilter uri_filter
,
121 HitFilter hit_filter
)
124 Logger
.Log
.Debug ("###### {0}: Starting low-level queries", IndexName
);
127 sw
= new Stopwatch ();
130 // Assemble all of the parts into a bunch of Lucene queries
132 ArrayList primary_required_part_queries
= null;
133 ArrayList secondary_required_part_queries
= null;
135 LNS
.BooleanQuery primary_prohibited_part_query
= null;
136 LNS
.BooleanQuery secondary_prohibited_part_query
= null;
138 AndHitFilter all_hit_filters
;
139 all_hit_filters
= new AndHitFilter ();
140 if (hit_filter
!= null)
141 all_hit_filters
.Add (hit_filter
);
143 ArrayList term_list
= new ArrayList ();
145 foreach (QueryPart part
in query
.Parts
) {
146 LNS
.Query primary_part_query
;
147 LNS
.Query secondary_part_query
;
148 HitFilter part_hit_filter
;
149 QueryPartToQuery (part
,
150 false, // we want both primary and secondary queries
151 part
.Logic
== QueryPartLogic
.Required
? term_list
: null,
152 out primary_part_query
,
153 out secondary_part_query
,
154 out part_hit_filter
);
156 if (primary_part_query
== null)
159 switch (part
.Logic
) {
161 case QueryPartLogic
.Required
:
162 if (primary_required_part_queries
== null) {
163 primary_required_part_queries
= new ArrayList ();
164 secondary_required_part_queries
= new ArrayList ();
166 primary_required_part_queries
.Add (primary_part_query
);
167 secondary_required_part_queries
.Add (secondary_part_query
);
169 if (part_hit_filter
!= null)
170 all_hit_filters
.Add (part_hit_filter
);
174 case QueryPartLogic
.Prohibited
:
175 if (primary_prohibited_part_query
== null)
176 primary_prohibited_part_query
= new LNS
.BooleanQuery ();
177 primary_prohibited_part_query
.Add (primary_part_query
, false, false);
179 if (secondary_part_query
!= null) {
180 if (secondary_prohibited_part_query
== null)
181 secondary_prohibited_part_query
= new LNS
.BooleanQuery ();
182 secondary_prohibited_part_query
.Add (secondary_part_query
, false, false);
185 if (part_hit_filter
!= null) {
187 nhf
= new NotHitFilter (part_hit_filter
);
188 all_hit_filters
.Add (new HitFilter (nhf
.HitFilter
));
195 // If we have no required parts, give up.
196 if (primary_required_part_queries
== null)
200 // Now that we have all of these nice queries, let's execute them!
203 // Create the searchers that we will need.
205 IndexReader primary_reader
;
206 LNS
.IndexSearcher primary_searcher
;
207 IndexReader secondary_reader
= null;
208 LNS
.IndexSearcher secondary_searcher
= null;
210 primary_reader
= LuceneCommon
.GetReader (PrimaryStore
);
211 primary_searcher
= new LNS
.IndexSearcher (primary_reader
);
213 if (SecondaryStore
!= null) {
214 secondary_reader
= LuceneCommon
.GetReader (SecondaryStore
);
215 if (secondary_reader
.NumDocs () == 0) {
216 ReleaseReader (secondary_reader
);
217 secondary_reader
= null;
221 if (secondary_reader
!= null)
222 secondary_searcher
= new LNS
.IndexSearcher (secondary_reader
);
225 // Possibly create our whitelists from the search subset.
227 LuceneBitArray primary_whitelist
= null;
228 LuceneBitArray secondary_whitelist
= null;
230 if (search_subset_uris
!= null && search_subset_uris
.Count
> 0) {
231 primary_whitelist
= new LuceneBitArray (primary_searcher
);
232 if (secondary_searcher
!= null)
233 secondary_whitelist
= new LuceneBitArray (secondary_searcher
);
235 foreach (Uri uri
in search_subset_uris
) {
236 primary_whitelist
.AddUri (uri
);
237 if (secondary_whitelist
!= null)
238 secondary_whitelist
.AddUri (uri
);
240 primary_whitelist
.FlushUris ();
241 if (secondary_whitelist
!= null)
242 secondary_whitelist
.FlushUris ();
246 // Build blacklists from our prohibited parts.
248 LuceneBitArray primary_blacklist
= null;
249 LuceneBitArray secondary_blacklist
= null;
251 if (primary_prohibited_part_query
!= null) {
252 primary_blacklist
= new LuceneBitArray (primary_searcher
,
253 primary_prohibited_part_query
);
255 if (secondary_searcher
!= null) {
256 secondary_blacklist
= new LuceneBitArray (secondary_searcher
);
257 if (secondary_prohibited_part_query
!= null)
258 secondary_blacklist
.Or (secondary_prohibited_part_query
);
259 primary_blacklist
.Join (secondary_blacklist
);
264 // Combine our whitelist and blacklist into just a whitelist.
266 if (primary_blacklist
!= null) {
267 if (primary_whitelist
== null) {
268 primary_blacklist
.Not ();
269 primary_whitelist
= primary_blacklist
;
271 primary_whitelist
.AndNot (primary_blacklist
);
275 if (secondary_blacklist
!= null) {
276 if (secondary_whitelist
== null) {
277 secondary_blacklist
.Not ();
278 secondary_whitelist
= secondary_blacklist
;
280 secondary_whitelist
.AndNot (secondary_blacklist
);
284 BetterBitArray primary_matches
= null;
286 if (primary_required_part_queries
!= null) {
288 if (secondary_searcher
!= null)
289 primary_matches
= DoRequiredQueries_TwoIndex (primary_searcher
,
291 primary_required_part_queries
,
292 secondary_required_part_queries
,
294 secondary_whitelist
);
296 primary_matches
= DoRequiredQueries (primary_searcher
,
297 primary_required_part_queries
,
304 Logger
.Log
.Debug ("###### {0}: Finished low-level queries in {1}", IndexName
, sw
);
308 // Only generate results if we got some matches
309 if (primary_matches
!= null && primary_matches
.ContainsTrue ()) {
310 GenerateQueryResults (primary_reader
,
318 new HitFilter (all_hit_filters
.HitFilter
),
323 // Finally, we clean up after ourselves.
326 primary_searcher
.Close ();
327 if (secondary_searcher
!= null)
328 secondary_searcher
.Close ();
329 ReleaseReader (primary_reader
);
330 if (secondary_reader
!= null)
331 ReleaseReader (secondary_reader
);
336 Logger
.Log
.Debug ("###### {0}: Processed query in {1}", IndexName
, sw
);
340 ////////////////////////////////////////////////////////////////
343 // Special logic for handling our set of required queries
346 // This is the easy case: we just combine all of the queries
347 // into one big BooleanQuery.
348 private static BetterBitArray
DoRequiredQueries (LNS
.IndexSearcher primary_searcher
,
349 ArrayList primary_queries
,
350 BetterBitArray primary_whitelist
)
352 LNS
.BooleanQuery combined_query
;
353 combined_query
= new LNS
.BooleanQuery ();
354 foreach (LNS
.Query query
in primary_queries
)
355 combined_query
.Add (query
, true, false);
357 LuceneBitArray matches
;
358 matches
= new LuceneBitArray (primary_searcher
, combined_query
);
359 if (primary_whitelist
!= null)
360 matches
.And (primary_whitelist
);
365 // This code attempts to execute N required queries in the
366 // most efficient order to minimize the amount of time spent
367 // joining between the two indexes. It returns a joined bit
368 // array of matches against the primary index.
370 private class MatchInfo
: IComparable
{
372 public LuceneBitArray PrimaryMatches
= null;
373 public LuceneBitArray SecondaryMatches
= null;
374 public int UpperBound
= 0;
378 PrimaryMatches
.Join (SecondaryMatches
);
381 public void RestrictBy (MatchInfo joined
)
383 if (joined
!= null) {
384 this.PrimaryMatches
.And (joined
.PrimaryMatches
);
385 this.SecondaryMatches
.And (joined
.SecondaryMatches
);
389 UpperBound
+= PrimaryMatches
.TrueCount
;
390 UpperBound
+= SecondaryMatches
.TrueCount
;
393 public int CompareTo (object obj
)
395 MatchInfo other
= (MatchInfo
) obj
;
396 return this.UpperBound
- other
.UpperBound
;
400 // Any whitelists that are passed in must be fully joined, or
401 // query results will be incorrect.
402 private static BetterBitArray
DoRequiredQueries_TwoIndex (LNS
.IndexSearcher primary_searcher
,
403 LNS
.IndexSearcher secondary_searcher
,
404 ArrayList primary_queries
,
405 ArrayList secondary_queries
,
406 BetterBitArray primary_whitelist
,
407 BetterBitArray secondary_whitelist
)
409 ArrayList match_info_list
;
410 match_info_list
= new ArrayList ();
412 // First, do all of the low-level queries
413 // and store them in our MatchInfo
414 for (int i
= 0; i
< primary_queries
.Count
; ++i
) {
416 pq
= primary_queries
[i
] as LNS
.Query
;
417 sq
= secondary_queries
[i
] as LNS
.Query
;
419 LuceneBitArray p_matches
= null, s_matches
= null;
420 p_matches
= new LuceneBitArray (primary_searcher
);
423 if (primary_whitelist
!= null)
424 p_matches
.And (primary_whitelist
);
427 s_matches
= new LuceneBitArray (secondary_searcher
);
430 if (secondary_whitelist
!= null)
431 s_matches
.And (secondary_whitelist
);
435 info
= new MatchInfo ();
436 info
.PrimaryMatches
= p_matches
;
437 info
.SecondaryMatches
= s_matches
;
438 info
.RestrictBy (null); // a hack to initialize the UpperBound
439 match_info_list
.Add (info
);
442 // We want to be smart about the order we do this in,
443 // to minimize the expense of the Join.
444 while (match_info_list
.Count
> 1) {
446 // linear scan to find the minimum
448 for (int i
= 1; i
< match_info_list
.Count
; ++i
)
449 if (((MatchInfo
) match_info_list
[i
]).CompareTo ((MatchInfo
) match_info_list
[index_min
]) < 0)
453 smallest
= match_info_list
[index_min
] as MatchInfo
;
454 match_info_list
.RemoveAt (index_min
);
456 // We can short-circuit if our smallest set of
458 if (smallest
.UpperBound
== 0)
459 return smallest
.PrimaryMatches
; // this must be an empty array.
463 foreach (MatchInfo info
in match_info_list
)
464 info
.RestrictBy (smallest
);
467 // For the final pair, we don't need to do a full join:
468 // mapping the secondary onto the primary is sufficient
470 last
= match_info_list
[0] as MatchInfo
;
471 last
.SecondaryMatches
.ProjectOnto (last
.PrimaryMatches
);
473 return last
.PrimaryMatches
;
476 ////////////////////////////////////////////////////////////////
478 static private void ScoreHits (Hashtable hits_by_id
,
480 ICollection term_list
)
483 sw
= new Stopwatch ();
486 LNS
.Similarity similarity
;
487 similarity
= LNS
.Similarity
.GetDefault ();
489 foreach (Term term
in term_list
) {
492 idf
= similarity
.Ldf (reader
.DocFreq (term
), reader
.MaxDoc ());
495 hit_count
= hits_by_id
.Count
;
498 term_docs
= reader
.TermDocs (term
);
499 while (term_docs
.Next () && hit_count
> 0) {
502 id
= term_docs
.Doc ();
505 hit
= hits_by_id
[id
] as Hit
;
508 tf
= similarity
.Tf (term_docs
.Freq ());
509 hit
.Score
+= tf
* idf
;
518 ////////////////////////////////////////////////////////////////
520 // Lame iterator methods because we use two different ones
521 // depending on which algorithm we use.
522 private static IEnumerable
IterateMatches (BetterBitArray primary_matches
)
524 int j
= primary_matches
.Count
;
526 // Walk across the matches backwards, since newer
527 // documents are more likely to be at the end of
530 int i
= primary_matches
.GetPreviousTrueIndex (j
);
533 j
= i
-1; // This way we can't forget to adjust i
539 private static IEnumerable
IterateOrderedMatches (int[] ordered_matches
)
543 for (i
= 0; i
< ordered_matches
.Length
; i
++)
544 yield return ordered_matches
[i
];
547 private class DocAndId
{
553 // Given a set of hits, broadcast some set out as our query
557 private static void GenerateQueryResults (IndexReader primary_reader
,
558 LNS
.IndexSearcher primary_searcher
,
559 LNS
.IndexSearcher secondary_searcher
,
560 BetterBitArray primary_matches
,
562 ICollection query_term_list
,
564 UriFilter uri_filter
,
565 HitFilter hit_filter
,
568 TopScores top_docs
= null;
569 ArrayList all_docs
= null;
572 Logger
.Log
.Debug (">>> {0}: Initially handed {1} matches", index_name
, primary_matches
.TrueCount
);
574 if (primary_matches
.TrueCount
<= max_results
) {
576 Logger
.Log
.Debug (">>> {0}: Initial count is within our limit of {1}", index_name
, max_results
);
577 all_docs
= new ArrayList ();
580 Logger
.Log
.Debug (">>> {0}: Number of hits is capped at {1}", index_name
, max_results
);
581 top_docs
= new TopScores (max_results
);
584 Stopwatch total
, a
, b
, c
, d
;
585 total
= new Stopwatch ();
587 b
= new Stopwatch ();
588 c
= new Stopwatch ();
589 d
= new Stopwatch ();
593 // There are two ways we can determine the max_results
594 // most recent items:
596 // One is to instantiate Lucene documents for each of
597 // the document IDs in primary_matches. This is a
598 // fairly expensive operation.
600 // The other is to walk through the list of all
601 // document IDs in descending time order. This is
602 // a less expensive operation, but adds up over time
603 // on large data sets.
605 // We can walk about 2.5 docs for every Document we
606 // instantiate. So what we'll do, if we have more
607 // matches than available hits, is walk (m * 1.25)
608 // docs to see if we can fill out the top 100 hits.
609 // If not, we'll fall back to creating documents
612 int[] ordered_matches
= null;
613 if (primary_matches
.TrueCount
> max_results
) {
614 a
= new Stopwatch ();
617 TermDocs docs
= primary_reader
.TermDocs ();
618 TermEnum enumerator
= primary_reader
.Terms (new Term ("InvertedTimestamp", ""));
619 ordered_matches
= new int [max_results
];
622 int max_docs
= (int) (primary_matches
.TrueCount
* 1.25);
625 Term term
= enumerator
.Term ();
627 if (term
.Field () != "InvertedTimestamp")
630 docs
.Seek (enumerator
);
633 && docs_found
< ordered_matches
.Length
634 && docs_walked
< max_docs
) {
635 int doc_id
= docs
.Doc ();
637 if (primary_matches
.Get (doc_id
)) {
638 ordered_matches
[docs_found
] = docs
.Doc ();
644 } while (enumerator
.Next ()
645 && docs_found
< ordered_matches
.Length
646 && docs_walked
< max_docs
);
650 // We've found all the docs we can return in a subset!
651 // Fantastic, we've probably short circuited a slow search.
652 if (docs_found
== max_results
) {
653 all_docs
= new ArrayList ();
659 Log
.Debug (">>> {0}: Walked {1} items, populated an enum with {2} items", index_name
, docs_walked
, docs_found
, a
);
661 if (docs_found
== max_results
)
662 Log
.Debug (">>> {0}: Successfully short circuited timestamp ordering!", index_name
);
669 IEnumerable enumerable
;
671 if (ordered_matches
!= null) {
672 enumerable
= IterateOrderedMatches (ordered_matches
);
674 enumerable
= IterateMatches (primary_matches
);
676 foreach (int i
in enumerable
) {
680 doc
= primary_searcher
.Doc (i
);
682 // Check the timestamp --- if we have already reached our
683 // limit, we might be able to reject it immediately.
684 string timestamp_str
;
685 long timestamp_num
= 0;
687 timestamp_str
= doc
.Get ("Timestamp");
688 if (timestamp_str
== null) {
689 Logger
.Log
.Warn ("No timestamp on {0}!", GetUriFromDocument (doc
));
691 timestamp_num
= Int64
.Parse (doc
.Get ("Timestamp"));
692 if (top_docs
!= null && ! top_docs
.WillAccept (timestamp_num
))
696 // If we have a UriFilter, apply it.
697 if (uri_filter
!= null) {
699 uri
= GetUriFromDocument (doc
);
700 if (! uri_filter (uri
))
704 DocAndId doc_and_id
= new DocAndId ();
705 doc_and_id
.Doc
= doc
;
708 // Add the document to the appropriate data structure.
709 // We use the timestamp_num as the score, so high
710 // scores correspond to more-recent timestamps.
711 if (all_docs
!= null)
712 all_docs
.Add (doc_and_id
);
714 top_docs
.Add (timestamp_num
, doc_and_id
);
718 Log
.Debug (">>> {0}: Processed roughly {1} documents", index_name
, count
);
724 ICollection final_list_of_docs
;
725 if (all_docs
!= null)
726 final_list_of_docs
= all_docs
;
728 final_list_of_docs
= top_docs
.TopScoringObjects
;
730 ArrayList final_list_of_hits
;
731 final_list_of_hits
= new ArrayList (final_list_of_docs
.Count
);
733 // This is used only for scoring
734 Hashtable hits_by_id
= null;
735 hits_by_id
= new Hashtable ();
737 // If we aren't using the secondary index, the next step is
738 // very straightforward.
739 if (secondary_searcher
== null) {
741 foreach (DocAndId doc_and_id
in final_list_of_docs
) {
743 hit
= DocumentToHit (doc_and_id
.Doc
);
744 hits_by_id
[doc_and_id
.Id
] = hit
;
745 final_list_of_hits
.Add (hit
);
751 Logger
.Log
.Debug (">>> {0}: Performing cross-index Hit reunification", index_name
);
753 Hashtable hits_by_uri
;
754 hits_by_uri
= UriFu
.NewHashtable ();
756 LuceneBitArray secondary_matches
;
757 secondary_matches
= new LuceneBitArray (secondary_searcher
);
759 foreach (DocAndId doc_and_id
in final_list_of_docs
) {
761 hit
= DocumentToHit (doc_and_id
.Doc
);
762 hits_by_id
[doc_and_id
.Id
] = hit
;
763 hits_by_uri
[hit
.Uri
] = hit
;
764 secondary_matches
.AddUri (hit
.Uri
);
767 secondary_matches
.FlushUris ();
769 // Attach all of our secondary properties
773 int i
= secondary_matches
.GetNextTrueIndex (j
);
774 if (i
>= secondary_matches
.Count
)
778 Document secondary_doc
;
779 secondary_doc
= secondary_searcher
.Doc (i
);
782 uri
= GetUriFromDocument (secondary_doc
);
785 hit
= hits_by_uri
[uri
] as Hit
;
787 AddPropertiesToHit (hit
, secondary_doc
, false);
789 final_list_of_hits
.Add (hit
);
793 ScoreHits (hits_by_id
, primary_reader
, query_term_list
);
797 // If we used the TopScores object, we got our original
798 // list of documents sorted for us. If not, sort the
800 if (top_docs
== null)
801 final_list_of_hits
.Sort ();
805 // If we have a hit_filter, use it now.
806 if (hit_filter
!= null) {
807 for (int i
= 0; i
< final_list_of_hits
.Count
; ++i
) {
809 hit
= final_list_of_hits
[i
] as Hit
;
810 if (! hit_filter (hit
)) {
812 Logger
.Log
.Debug ("Filtered out {0}", hit
.Uri
);
813 final_list_of_hits
[i
] = null;
818 // Before we broadcast a hit, we strip out any
819 // properties in the PrivateNamespace. We
820 // manipulate the property ArrayList directory,
821 // which is pretty gross... but this is safe,
822 // since removing items will not change the sort
824 foreach (Hit hit
in final_list_of_hits
) {
828 while (i
< hit
.Properties
.Count
) {
829 Property prop
= hit
.Properties
[i
] as Property
;
830 if (prop
.Key
.StartsWith (PrivateNamespace
))
831 hit
.Properties
.RemoveAt (i
);
837 result
.Add (final_list_of_hits
);
843 Logger
.Log
.Debug (">>> {0}: GenerateQueryResults time statistics:", index_name
);
844 Logger
.Log
.Debug (">>> {0}: Short circuit {1,6} ({2:0.0}%)", index_name
, a
== null ? "N/A" : a
.ToString (), a
== null ? 0.0 : 100 * a
.ElapsedTime
/ total
.ElapsedTime
);
845 Logger
.Log
.Debug (">>> {0}: First pass {1,6} ({2:0.0}%)", index_name
, b
, 100 * b
.ElapsedTime
/ total
.ElapsedTime
);
846 Logger
.Log
.Debug (">>> {0}: Hit assembly {1,6} ({2:0.0}%)", index_name
, c
, 100 * c
.ElapsedTime
/ total
.ElapsedTime
);
847 Logger
.Log
.Debug (">>> {0}: Final pass {1,6} ({2:0.0}%)", index_name
, d
, 100 * d
.ElapsedTime
/ total
.ElapsedTime
);
848 Logger
.Log
.Debug (">>> {0}: TOTAL {1,6}", index_name
, total
);