1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "components/omnibox/browser/scored_history_match.h"
12 #include "base/logging.h"
13 #include "base/numerics/safe_conversions.h"
14 #include "base/strings/string_number_conversions.h"
15 #include "base/strings/string_split.h"
16 #include "base/strings/string_util.h"
17 #include "base/strings/utf_offset_string_conversions.h"
18 #include "base/strings/utf_string_conversions.h"
19 #include "components/bookmarks/browser/bookmark_utils.h"
20 #include "components/omnibox/browser/history_url_provider.h"
21 #include "components/omnibox/browser/omnibox_field_trial.h"
22 #include "components/omnibox/browser/url_prefix.h"
26 // The number of days of recency scores to precompute.
27 const int kDaysToPrecomputeRecencyScoresFor
= 366;
29 // The number of raw term score buckets use; raw term scores greater this are
30 // capped at the score of the largest bucket.
31 const int kMaxRawTermScore
= 30;
33 // Pre-computed information to speed up calculating recency scores.
34 // |days_ago_to_recency_score| is a simple array mapping how long ago a page was
35 // visited (in days) to the recency score we should assign it. This allows easy
36 // lookups of scores without requiring math. This is initialized by
37 // InitDaysAgoToRecencyScoreArray called by
38 // ScoredHistoryMatch::Init().
39 float days_ago_to_recency_score
[kDaysToPrecomputeRecencyScoresFor
];
41 // Pre-computed information to speed up calculating topicality scores.
42 // |raw_term_score_to_topicality_score| is a simple array mapping how raw terms
43 // scores (a weighted sum of the number of hits for the term, weighted by how
44 // important the hit is: hostname, path, etc.) to the topicality score we should
45 // assign it. This allows easy lookups of scores without requiring math. This
46 // is initialized by InitRawTermScoreToTopicalityScoreArray() called from
47 // ScoredHistoryMatch::Init().
48 float raw_term_score_to_topicality_score
[kMaxRawTermScore
];
50 // Precalculates raw_term_score_to_topicality_score, used in
51 // GetTopicalityScore().
52 void InitRawTermScoreToTopicalityScoreArray() {
53 for (int term_score
= 0; term_score
< kMaxRawTermScore
; ++term_score
) {
54 float topicality_score
;
55 if (term_score
< 10) {
56 // If the term scores less than 10 points (no full-credit hit, or
57 // no combination of hits that score that well), then the topicality
58 // score is linear in the term score.
59 topicality_score
= 0.1 * term_score
;
61 // For term scores of at least ten points, pass them through a log
62 // function so a score of 10 points gets a 1.0 (to meet up exactly
63 // with the linear component) and increases logarithmically until
64 // maxing out at 30 points, with computes to a score around 2.1.
65 topicality_score
= (1.0 + 2.25 * log10(0.1 * term_score
));
67 raw_term_score_to_topicality_score
[term_score
] = topicality_score
;
71 // Pre-calculates days_ago_to_recency_score, used in GetRecencyScore().
72 void InitDaysAgoToRecencyScoreArray() {
73 for (int days_ago
= 0; days_ago
< kDaysToPrecomputeRecencyScoresFor
;
75 int unnormalized_recency_score
;
77 unnormalized_recency_score
= 100;
78 } else if (days_ago
<= 14) {
79 // Linearly extrapolate between 4 and 14 days so 14 days has a score
81 unnormalized_recency_score
= 70 + (14 - days_ago
) * (100 - 70) / (14 - 4);
82 } else if (days_ago
<= 31) {
83 // Linearly extrapolate between 14 and 31 days so 31 days has a score
85 unnormalized_recency_score
= 50 + (31 - days_ago
) * (70 - 50) / (31 - 14);
86 } else if (days_ago
<= 90) {
87 // Linearly extrapolate between 30 and 90 days so 90 days has a score
89 unnormalized_recency_score
= 30 + (90 - days_ago
) * (50 - 30) / (90 - 30);
91 // Linearly extrapolate between 90 and 365 days so 365 days has a score
93 unnormalized_recency_score
=
94 10 + (365 - days_ago
) * (20 - 10) / (365 - 90);
96 days_ago_to_recency_score
[days_ago
] = unnormalized_recency_score
/ 100.0;
98 DCHECK_LE(days_ago_to_recency_score
[days_ago
],
99 days_ago_to_recency_score
[days_ago
- 1]);
107 const size_t ScoredHistoryMatch::kMaxVisitsToScore
= 10;
108 bool ScoredHistoryMatch::also_do_hup_like_scoring_
= false;
109 int ScoredHistoryMatch::bookmark_value_
= 1;
110 bool ScoredHistoryMatch::fix_frequency_bugs_
= false;
111 bool ScoredHistoryMatch::allow_tld_matches_
= false;
112 bool ScoredHistoryMatch::allow_scheme_matches_
= false;
113 size_t ScoredHistoryMatch::num_title_words_to_allow_
= 10u;
114 bool ScoredHistoryMatch::hqp_experimental_scoring_enabled_
= false;
115 float ScoredHistoryMatch::topicality_threshold_
= 0.8f
;
116 // Default HQP relevance buckets. See GetFinalRelevancyScore()
117 // for more details on these numbers.
118 char ScoredHistoryMatch::hqp_relevance_buckets_str_
[] =
119 "0.0:400,1.5:600,5.0:900,10.5:1203,15.0:1300,20.0:1399";
120 std::vector
<ScoredHistoryMatch::ScoreMaxRelevance
>*
121 ScoredHistoryMatch::hqp_relevance_buckets_
= nullptr;
123 ScoredHistoryMatch::ScoredHistoryMatch()
124 : ScoredHistoryMatch(history::URLRow(),
135 ScoredHistoryMatch::ScoredHistoryMatch(
136 const history::URLRow
& row
,
137 const VisitInfoVector
& visits
,
138 const std::string
& languages
,
139 const base::string16
& lower_string
,
140 const String16Vector
& terms_vector
,
141 const WordStarts
& terms_to_word_starts_offsets
,
142 const RowWordStarts
& word_starts
,
143 bool is_url_bookmarked
,
145 : HistoryMatch(row
, 0, false, false), raw_score(0), can_inline(false) {
146 // NOTE: Call Init() before doing any validity checking to ensure that the
147 // class is always initialized after an instance has been constructed. In
148 // particular, this ensures that the class is initialized after an instance
149 // has been constructed via the no-args constructor.
150 ScoredHistoryMatch::Init();
152 GURL gurl
= row
.url();
153 if (!gurl
.is_valid())
156 // Figure out where each search term appears in the URL and/or page title
157 // so that we can score as well as provide autocomplete highlighting.
158 base::OffsetAdjuster::Adjustments adjustments
;
160 bookmarks::CleanUpUrlForMatching(gurl
, languages
, &adjustments
);
161 base::string16 title
= bookmarks::CleanUpTitleForMatching(row
.title());
163 for (const auto& term
: terms_vector
) {
164 TermMatches url_term_matches
= MatchTermInString(term
, url
, term_num
);
165 TermMatches title_term_matches
= MatchTermInString(term
, title
, term_num
);
166 if (url_term_matches
.empty() && title_term_matches
.empty()) {
167 // A term was not found in either URL or title - reject.
170 url_matches
.insert(url_matches
.end(), url_term_matches
.begin(),
171 url_term_matches
.end());
172 title_matches
.insert(title_matches
.end(), title_term_matches
.begin(),
173 title_term_matches
.end());
177 // Sort matches by offset and eliminate any which overlap.
178 // TODO(mpearson): Investigate whether this has any meaningful
179 // effect on scoring. (It's necessary at some point: removing
180 // overlaps and sorting is needed to decide what to highlight in the
181 // suggestion string. But this sort and de-overlap doesn't have to
182 // be done before scoring.)
183 url_matches
= SortAndDeoverlapMatches(url_matches
);
184 title_matches
= SortAndDeoverlapMatches(title_matches
);
186 // We can inline autocomplete a match if:
187 // 1) there is only one search term
188 // 2) AND the match begins immediately after one of the prefixes in
189 // URLPrefix such as http://www and https:// (note that one of these
190 // is the empty prefix, for cases where the user has typed the scheme)
191 // 3) AND the search string does not end in whitespace (making it look to
192 // the IMUI as though there is a single search term when actually there
193 // is a second, empty term).
194 // |best_inlineable_prefix| stores the inlineable prefix computed in
195 // clause (2) or NULL if no such prefix exists. (The URL is not inlineable.)
196 // Note that using the best prefix here means that when multiple
197 // prefixes match, we'll choose to inline following the longest one.
198 // For a URL like "http://www.washingtonmutual.com", this means
199 // typing "w" will inline "ashington..." instead of "ww.washington...".
200 if (!url_matches
.empty() && (terms_vector
.size() == 1) &&
201 !base::IsUnicodeWhitespace(*lower_string
.rbegin())) {
202 const base::string16 gurl_spec
= base::UTF8ToUTF16(gurl
.spec());
203 const URLPrefix
* best_inlineable_prefix
=
204 URLPrefix::BestURLPrefix(gurl_spec
, terms_vector
[0]);
205 if (best_inlineable_prefix
) {
206 // When inline autocompleting this match, we're going to use the part of
207 // the URL following the end of the matching text. However, it's possible
208 // that FormatUrl(), when formatting this suggestion for display,
209 // mucks with the text. We need to ensure that the text we're thinking
210 // about highlighting isn't in the middle of a mucked sequence. In
211 // particular, for the omnibox input of "x" or "xn", we may get a match
212 // in a punycoded domain name such as http://www.xn--blahblah.com/.
213 // When FormatUrl() processes the xn--blahblah part of the hostname, it'll
214 // transform the whole thing into a series of unicode characters. It's
215 // impossible to give the user an inline autocompletion of the text
216 // following "x" or "xn" in this case because those characters no longer
217 // exist in the displayed URL string.
219 best_inlineable_prefix
->prefix
.length() + terms_vector
[0].length();
220 base::OffsetAdjuster::UnadjustOffset(adjustments
, &offset
);
221 if (offset
!= base::string16::npos
) {
222 // Initialize innermost_match.
223 // The idea here is that matches that occur in the scheme or
224 // "www." are worse than matches which don't. For the URLs
225 // "http://www.google.com" and "http://wellsfargo.com", we want
226 // the omnibox input "w" to cause the latter URL to rank higher
227 // than the former. Note that this is not the same as checking
228 // whether one match's inlinable prefix has more components than
229 // the other match's, since in this example, both matches would
230 // have an inlinable prefix of "http://", which is one component.
232 // Instead, we look for the overall best (i.e., most components)
233 // prefix of the current URL, and then check whether the inlinable
234 // prefix has that many components. If it does, this is an
235 // "innermost" match, and should be boosted. In the example
236 // above, the best prefixes for the two URLs have two and one
237 // components respectively, while the inlinable prefixes each
238 // have one component; this means the first match is not innermost
239 // and the second match is innermost, resulting in us boosting the
242 // Now, the code that implements this.
243 // The deepest prefix for this URL regardless of where the match is.
244 const URLPrefix
* best_prefix
=
245 URLPrefix::BestURLPrefix(gurl_spec
, base::string16());
247 // If the URL is inlineable, we must have a match. Note the prefix that
248 // makes it inlineable may be empty.
250 innermost_match
= (best_inlineable_prefix
->num_components
==
251 best_prefix
->num_components
);
256 const float topicality_score
= GetTopicalityScore(
257 terms_vector
.size(), url
, terms_to_word_starts_offsets
, word_starts
);
258 const float frequency_score
= GetFrequency(now
, is_url_bookmarked
, visits
);
259 raw_score
= base::saturated_cast
<int>(GetFinalRelevancyScore(
260 topicality_score
, frequency_score
, *hqp_relevance_buckets_
));
262 if (also_do_hup_like_scoring_
&& can_inline
) {
263 // HistoryURL-provider-like scoring gives any match that is
264 // capable of being inlined a certain minimum score. Some of these
265 // are given a higher score that lets them be shown in inline.
266 // This test here derives from the test in
267 // HistoryURLProvider::PromoteMatchForInlineAutocomplete().
268 const bool promote_to_inline
=
269 (row
.typed_count() > 1) || (IsHostOnly() && (row
.typed_count() == 1));
272 ? HistoryURLProvider::kScoreForBestInlineableResult
273 : HistoryURLProvider::kBaseScoreForNonInlineableResult
;
275 // Also, if the user types the hostname of a host with a typed
276 // visit, then everything from that host get given inlineable scores
277 // (because the URL-that-you-typed will go first and everything
278 // else will be assigned one minus the previous score, as coded
279 // at the end of HistoryURLProvider::DoAutocomplete().
280 if (base::UTF8ToUTF16(gurl
.host()) == terms_vector
[0])
281 hup_like_score
= HistoryURLProvider::kScoreForBestInlineableResult
;
283 // HistoryURLProvider has the function PromoteOrCreateShorterSuggestion()
284 // that's meant to promote prefixes of the best match (if they've
285 // been visited enough related to the best match) or
286 // create/promote host-only suggestions (even if they've never
287 // been typed). The code is complicated and we don't try to
288 // duplicate the logic here. Instead, we handle a simple case: in
289 // low-typed-count ranges, give host-only matches (i.e.,
290 // http://www.foo.com/ vs. http://www.foo.com/bar.html) a boost so
291 // that the host-only match outscores all the other matches that
292 // would normally have the same base score. This behavior is not
293 // identical to what happens in HistoryURLProvider even in these
294 // low typed count ranges--sometimes it will create/promote when
295 // this test does not (indeed, we cannot create matches like HUP
296 // can) and vice versa--but the underlying philosophy is similar.
297 if (!promote_to_inline
&& IsHostOnly())
300 // All the other logic to goes into hup-like-scoring happens in
301 // the tie-breaker case of MatchScoreGreater().
303 // Incorporate hup_like_score into raw_score.
304 raw_score
= std::max(raw_score
, hup_like_score
);
307 // Now that we're done processing this entry, correct the offsets of the
308 // matches in |url_matches| so they point to offsets in the original URL
309 // spec, not the cleaned-up URL string that we used for matching.
310 std::vector
<size_t> offsets
= OffsetsFromTermMatches(url_matches
);
311 base::OffsetAdjuster::UnadjustOffsets(adjustments
, &offsets
);
312 url_matches
= ReplaceOffsetsInTermMatches(url_matches
, offsets
);
315 ScoredHistoryMatch::~ScoredHistoryMatch() {
318 // Comparison function for sorting ScoredMatches by their scores with
319 // intelligent tie-breaking.
320 bool ScoredHistoryMatch::MatchScoreGreater(const ScoredHistoryMatch
& m1
,
321 const ScoredHistoryMatch
& m2
) {
322 if (m1
.raw_score
!= m2
.raw_score
)
323 return m1
.raw_score
> m2
.raw_score
;
325 // This tie-breaking logic is inspired by / largely copied from the
326 // ordering logic in history_url_provider.cc CompareHistoryMatch().
328 // A URL that has been typed at all is better than one that has never been
329 // typed. (Note "!"s on each side.)
330 if (!m1
.url_info
.typed_count() != !m2
.url_info
.typed_count())
331 return m1
.url_info
.typed_count() > m2
.url_info
.typed_count();
333 // Innermost matches (matches after any scheme or "www.") are better than
334 // non-innermost matches.
335 if (m1
.innermost_match
!= m2
.innermost_match
)
336 return m1
.innermost_match
;
338 // URLs that have been typed more often are better.
339 if (m1
.url_info
.typed_count() != m2
.url_info
.typed_count())
340 return m1
.url_info
.typed_count() > m2
.url_info
.typed_count();
342 // For URLs that have each been typed once, a host (alone) is better
343 // than a page inside.
344 if (m1
.url_info
.typed_count() == 1) {
345 if (m1
.IsHostOnly() != m2
.IsHostOnly())
346 return m1
.IsHostOnly();
349 // URLs that have been visited more often are better.
350 if (m1
.url_info
.visit_count() != m2
.url_info
.visit_count())
351 return m1
.url_info
.visit_count() > m2
.url_info
.visit_count();
353 // URLs that have been visited more recently are better.
354 return m1
.url_info
.last_visit() > m2
.url_info
.last_visit();
358 TermMatches
ScoredHistoryMatch::FilterTermMatchesByWordStarts(
359 const TermMatches
& term_matches
,
360 const WordStarts
& terms_to_word_starts_offsets
,
361 const WordStarts
& word_starts
,
364 // Return early if no filtering is needed.
365 if (start_pos
== std::string::npos
)
367 TermMatches filtered_matches
;
368 WordStarts::const_iterator next_word_starts
= word_starts
.begin();
369 WordStarts::const_iterator end_word_starts
= word_starts
.end();
370 for (const auto& term_match
: term_matches
) {
371 const size_t term_offset
=
372 terms_to_word_starts_offsets
[term_match
.term_num
];
373 // Advance next_word_starts until it's >= the position of the term we're
374 // considering (adjusted for where the word begins within the term).
375 while ((next_word_starts
!= end_word_starts
) &&
376 (*next_word_starts
< (term_match
.offset
+ term_offset
)))
378 // Add the match if it's before the position we start filtering at or
379 // after the position we stop filtering at (assuming we have a position
380 // to stop filtering at) or if it's at a word boundary.
381 if ((term_match
.offset
< start_pos
) ||
382 ((end_pos
!= std::string::npos
) && (term_match
.offset
>= end_pos
)) ||
383 ((next_word_starts
!= end_word_starts
) &&
384 (*next_word_starts
== term_match
.offset
+ term_offset
)))
385 filtered_matches
.push_back(term_match
);
387 return filtered_matches
;
391 void ScoredHistoryMatch::Init() {
392 static bool initialized
= false;
398 also_do_hup_like_scoring_
= OmniboxFieldTrial::HQPAlsoDoHUPLikeScoring();
399 bookmark_value_
= OmniboxFieldTrial::HQPBookmarkValue();
400 fix_frequency_bugs_
= OmniboxFieldTrial::HQPFixFrequencyScoringBugs();
401 allow_tld_matches_
= OmniboxFieldTrial::HQPAllowMatchInTLDValue();
402 allow_scheme_matches_
= OmniboxFieldTrial::HQPAllowMatchInSchemeValue();
403 num_title_words_to_allow_
= OmniboxFieldTrial::HQPNumTitleWordsToAllow();
405 InitRawTermScoreToTopicalityScoreArray();
406 InitDaysAgoToRecencyScoreArray();
407 InitHQPExperimentalParams();
410 float ScoredHistoryMatch::GetTopicalityScore(
412 const base::string16
& url
,
413 const WordStarts
& terms_to_word_starts_offsets
,
414 const RowWordStarts
& word_starts
) {
415 // A vector that accumulates per-term scores. The strongest match--a
416 // match in the hostname at a word boundary--is worth 10 points.
417 // Everything else is less. In general, a match that's not at a word
418 // boundary is worth about 1/4th or 1/5th of a match at the word boundary
419 // in the same part of the URL/title.
420 DCHECK_GT(num_terms
, 0);
421 std::vector
<int> term_scores(num_terms
, 0);
422 WordStarts::const_iterator next_word_starts
=
423 word_starts
.url_word_starts_
.begin();
424 WordStarts::const_iterator end_word_starts
=
425 word_starts
.url_word_starts_
.end();
426 const size_t question_mark_pos
= url
.find('?');
427 const size_t colon_pos
= url
.find(':');
428 // The + 3 skips the // that probably appears in the protocol
429 // after the colon. If the protocol doesn't have two slashes after
430 // the colon, that's okay--all this ends up doing is starting our
431 // search for the next / a few characters into the hostname. The
432 // only times this can cause problems is if we have a protocol without
433 // a // after the colon and the hostname is only one or two characters.
434 // This isn't worth worrying about.
435 const size_t end_of_hostname_pos
= (colon_pos
!= std::string::npos
)
436 ? url
.find('/', colon_pos
+ 3)
438 size_t last_part_of_hostname_pos
= (end_of_hostname_pos
!= std::string::npos
)
439 ? url
.rfind('.', end_of_hostname_pos
)
441 // Loop through all URL matches and score them appropriately.
442 // First, filter all matches not at a word boundary and in the path (or
444 url_matches
= FilterTermMatchesByWordStarts(
445 url_matches
, terms_to_word_starts_offsets
, word_starts
.url_word_starts_
,
446 end_of_hostname_pos
, std::string::npos
);
447 if (colon_pos
!= std::string::npos
) {
448 // Also filter matches not at a word boundary and in the scheme.
449 url_matches
= FilterTermMatchesByWordStarts(
450 url_matches
, terms_to_word_starts_offsets
, word_starts
.url_word_starts_
,
453 for (const auto& url_match
: url_matches
) {
454 const size_t term_offset
= terms_to_word_starts_offsets
[url_match
.term_num
];
455 // Advance next_word_starts until it's >= the position of the term we're
456 // considering (adjusted for where the word begins within the term).
457 while ((next_word_starts
!= end_word_starts
) &&
458 (*next_word_starts
< (url_match
.offset
+ term_offset
))) {
461 const bool at_word_boundary
=
462 (next_word_starts
!= end_word_starts
) &&
463 (*next_word_starts
== url_match
.offset
+ term_offset
);
464 if ((question_mark_pos
!= std::string::npos
) &&
465 (url_match
.offset
> question_mark_pos
)) {
466 // The match is in a CGI ?... fragment.
467 DCHECK(at_word_boundary
);
468 term_scores
[url_match
.term_num
] += 5;
469 } else if ((end_of_hostname_pos
!= std::string::npos
) &&
470 (url_match
.offset
> end_of_hostname_pos
)) {
471 // The match is in the path.
472 DCHECK(at_word_boundary
);
473 term_scores
[url_match
.term_num
] += 8;
474 } else if ((colon_pos
== std::string::npos
) ||
475 (url_match
.offset
> colon_pos
)) {
476 // The match is in the hostname.
477 if ((last_part_of_hostname_pos
== std::string::npos
) ||
478 (url_match
.offset
< last_part_of_hostname_pos
)) {
479 // Either there are no dots in the hostname or this match isn't
480 // the last dotted component.
481 term_scores
[url_match
.term_num
] += at_word_boundary
? 10 : 2;
483 // The match is in the last part of a dotted hostname (usually this
484 // is the top-level domain .com, .net, etc.).
485 if (allow_tld_matches_
)
486 term_scores
[url_match
.term_num
] += at_word_boundary
? 10 : 0;
489 // The match is in the protocol (a.k.a. scheme).
490 // Matches not at a word boundary should have been filtered already.
491 DCHECK(at_word_boundary
);
492 match_in_scheme
= true;
493 if (allow_scheme_matches_
)
494 term_scores
[url_match
.term_num
] += 10;
497 // Now do the analogous loop over all matches in the title.
498 next_word_starts
= word_starts
.title_word_starts_
.begin();
499 end_word_starts
= word_starts
.title_word_starts_
.end();
501 title_matches
= FilterTermMatchesByWordStarts(
502 title_matches
, terms_to_word_starts_offsets
,
503 word_starts
.title_word_starts_
, 0, std::string::npos
);
504 for (const auto& title_match
: title_matches
) {
505 const size_t term_offset
=
506 terms_to_word_starts_offsets
[title_match
.term_num
];
507 // Advance next_word_starts until it's >= the position of the term we're
508 // considering (adjusted for where the word begins within the term).
509 while ((next_word_starts
!= end_word_starts
) &&
510 (*next_word_starts
< (title_match
.offset
+ term_offset
))) {
514 if (word_num
>= num_title_words_to_allow_
)
515 break; // only count the first ten words
516 DCHECK(next_word_starts
!= end_word_starts
);
517 DCHECK_EQ(*next_word_starts
, title_match
.offset
+ term_offset
)
518 << "not at word boundary";
519 term_scores
[title_match
.term_num
] += 8;
521 // TODO(mpearson): Restore logic for penalizing out-of-order matches.
522 // (Perhaps discount them by 0.8?)
523 // TODO(mpearson): Consider: if the earliest match occurs late in the string,
524 // should we discount it?
525 // TODO(mpearson): Consider: do we want to score based on how much of the
526 // input string the input covers? (I'm leaning toward no.)
528 // Compute the topicality_score as the sum of transformed term_scores.
529 float topicality_score
= 0;
530 for (int term_score
: term_scores
) {
531 // Drop this URL if it seems like a term didn't appear or, more precisely,
532 // didn't appear in a part of the URL or title that we trust enough
533 // to give it credit for. For instance, terms that appear in the middle
534 // of a CGI parameter get no credit. Almost all the matches dropped
535 // due to this test would look stupid if shown to the user.
538 topicality_score
+= raw_term_score_to_topicality_score
[std::min(
539 term_score
, kMaxRawTermScore
- 1)];
541 // TODO(mpearson): If there are multiple terms, consider taking the
542 // geometric mean of per-term scores rather than the arithmetic mean.
544 const float final_topicality_score
= topicality_score
/ num_terms
;
546 // Demote the URL if the topicality score is less than threshold.
547 if (final_topicality_score
< topicality_threshold_
) {
551 return final_topicality_score
;
554 float ScoredHistoryMatch::GetRecencyScore(int last_visit_days_ago
) const {
555 // Lookup the score in days_ago_to_recency_score, treating
556 // everything older than what we've precomputed as the oldest thing
557 // we've precomputed. The std::max is to protect against corruption
558 // in the database (in case last_visit_days_ago is negative).
559 return days_ago_to_recency_score
[std::max(
560 std::min(last_visit_days_ago
, kDaysToPrecomputeRecencyScoresFor
- 1), 0)];
563 float ScoredHistoryMatch::GetFrequency(const base::Time
& now
,
564 const bool bookmarked
,
565 const VisitInfoVector
& visits
) const {
566 // Compute the weighted average |value_of_transition| over the last at
567 // most kMaxVisitsToScore visits, where each visit is weighted using
568 // GetRecencyScore() based on how many days ago it happened. Use
569 // kMaxVisitsToScore as the denominator for the average regardless of
570 // how many visits there were in order to penalize a match that has
571 // fewer visits than kMaxVisitsToScore.
572 float summed_visit_points
= 0;
573 const size_t max_visit_to_score
=
574 std::min(visits
.size(), ScoredHistoryMatch::kMaxVisitsToScore
);
575 for (size_t i
= 0; i
< max_visit_to_score
; ++i
) {
576 const ui::PageTransition page_transition
= fix_frequency_bugs_
?
577 ui::PageTransitionStripQualifier(visits
[i
].second
) : visits
[i
].second
;
578 int value_of_transition
=
579 (page_transition
== ui::PAGE_TRANSITION_TYPED
) ? 20 : 1;
581 value_of_transition
= std::max(value_of_transition
, bookmark_value_
);
582 const float bucket_weight
=
583 GetRecencyScore((now
- visits
[i
].first
).InDays());
584 summed_visit_points
+= (value_of_transition
* bucket_weight
);
586 if (fix_frequency_bugs_
)
587 return summed_visit_points
/ ScoredHistoryMatch::kMaxVisitsToScore
;
588 return visits
.size() * summed_visit_points
/
589 ScoredHistoryMatch::kMaxVisitsToScore
;
593 float ScoredHistoryMatch::GetFinalRelevancyScore(
594 float topicality_score
,
595 float frequency_score
,
596 const std::vector
<ScoreMaxRelevance
>& hqp_relevance_buckets
) {
597 DCHECK(hqp_relevance_buckets
.size() > 0);
598 DCHECK_EQ(hqp_relevance_buckets
[0].first
, 0.0);
600 if (topicality_score
== 0)
602 // Here's how to interpret intermediate_score: Suppose the omnibox
603 // has one input term. Suppose we have a URL for which the omnibox
604 // input term has a single URL hostname hit at a word boundary. (This
605 // implies topicality_score = 1.0.). Then the intermediate_score for
606 // this URL will depend entirely on the frequency_score with
607 // this interpretation:
608 // - a single typed visit more than three months ago, no other visits -> 0.2
609 // - a visit every three days, no typed visits -> 0.706
610 // - a visit every day, no typed visits -> 0.916
611 // - a single typed visit yesterday, no other visits -> 2.0
612 // - a typed visit once a week -> 11.77
613 // - a typed visit every three days -> 14.12
614 // - at least ten typed visits today -> 20.0 (maximum score)
616 // The below code maps intermediate_score to the range [0, 1399].
618 // HQP default scoring buckets: "0.0:400,1.5:600,12.0:1300,20.0:1399"
619 // We will linearly interpolate the scores between:
620 // 0 to 1.5 --> 400 to 600
621 // 1.5 to 12.0 --> 600 to 1300
622 // 12.0 to 20.0 --> 1300 to 1399
625 // The score maxes out at 1399 (i.e., cannot beat a good inlineable result
626 // from HistoryURL provider).
627 const float intermediate_score
= topicality_score
* frequency_score
;
629 // Find the threshold where intermediate score is greater than bucket.
631 for (; i
< hqp_relevance_buckets
.size(); ++i
) {
632 const ScoreMaxRelevance
& hqp_bucket
= hqp_relevance_buckets
[i
];
633 if (intermediate_score
>= hqp_bucket
.first
) {
636 const ScoreMaxRelevance
& previous_bucket
= hqp_relevance_buckets
[i
- 1];
637 const float slope
= ((hqp_bucket
.second
- previous_bucket
.second
) /
638 (hqp_bucket
.first
- previous_bucket
.first
));
639 return (previous_bucket
.second
+
640 (slope
* (intermediate_score
- previous_bucket
.first
)));
642 // It will reach this stage when the score is > highest bucket score.
643 // Return the highest bucket score.
644 return hqp_relevance_buckets
[i
- 1].second
;
648 void ScoredHistoryMatch::InitHQPExperimentalParams() {
649 // These are default HQP relevance scoring buckets.
650 // See GetFinalRelevancyScore() for details.
651 std::string hqp_relevance_buckets_str
= std::string(
652 hqp_relevance_buckets_str_
);
654 // Fetch the experiment params if they are any.
655 hqp_experimental_scoring_enabled_
=
656 OmniboxFieldTrial::HQPExperimentalScoringEnabled();
658 if (hqp_experimental_scoring_enabled_
) {
659 // Add the topicality threshold from experiment params.
660 float hqp_experimental_topicality_threhold
=
661 OmniboxFieldTrial::HQPExperimentalTopicalityThreshold();
662 topicality_threshold_
= hqp_experimental_topicality_threhold
;
664 // Add the HQP experimental scoring buckets.
665 std::string hqp_experimental_scoring_buckets
=
666 OmniboxFieldTrial::HQPExperimentalScoringBuckets();
667 if (!hqp_experimental_scoring_buckets
.empty())
668 hqp_relevance_buckets_str
= hqp_experimental_scoring_buckets
;
671 // Parse the hqp_relevance_buckets_str string once and store them in vector
672 // which is easy to access.
673 hqp_relevance_buckets_
=
674 new std::vector
<ScoredHistoryMatch::ScoreMaxRelevance
>();
676 bool is_valid_bucket_str
= GetHQPBucketsFromString(hqp_relevance_buckets_str
,
677 hqp_relevance_buckets_
);
678 DCHECK(is_valid_bucket_str
);
682 bool ScoredHistoryMatch::GetHQPBucketsFromString(
683 const std::string
& buckets_str
,
684 std::vector
<ScoreMaxRelevance
>* hqp_buckets
) {
685 DCHECK(hqp_buckets
!= NULL
);
686 DCHECK(!buckets_str
.empty());
688 base::StringPairs kv_pairs
;
689 if (base::SplitStringIntoKeyValuePairs(buckets_str
, ':', ',', &kv_pairs
)) {
690 for (base::StringPairs::const_iterator it
= kv_pairs
.begin();
691 it
!= kv_pairs
.end(); ++it
) {
692 ScoreMaxRelevance bucket
;
693 bool is_valid_intermediate_score
=
694 base::StringToDouble(it
->first
, &bucket
.first
);
695 DCHECK(is_valid_intermediate_score
);
696 bool is_valid_hqp_score
= base::StringToInt(it
->second
, &bucket
.second
);
697 DCHECK(is_valid_hqp_score
);
698 hqp_buckets
->push_back(bucket
);