NoiseFilter: Dont drop last word of apparent hostnames. Too many non-hostnames can...
[beagle.git] / beagled / NoiseFilter.cs
blob14e3822d5345d4d579cb4ed98910a6edaa5b1580
1 //
2 // NoiseFilter.cs
3 //
4 // Copyright (C) 2006 Debajyoti Bera <dbera.web@gmail.com>
5 // Copyright (C) 2004-2005 Novell, Inc.
6 //
8 //
9 // Permission is hereby granted, free of charge, to any person obtaining a
10 // copy of this software and associated documentation files (the "Software"),
11 // to deal in the Software without restriction, including without limitation
12 // the rights to use, copy, modify, merge, publish, distribute, sublicense,
13 // and/or sell copies of the Software, and to permit persons to whom the
14 // Software is furnished to do so, subject to the following conditions:
16 // The above copyright notice and this permission notice shall be included in
17 // all copies or substantial portions of the Software.
19 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
22 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
25 // DEALINGS IN THE SOFTWARE.
28 using System;
29 using System.Collections;
31 using Lucene.Net.Analysis;
32 using LNSA = Lucene.Net.Analysis.Standard;
34 namespace Beagle.Daemon {
36 // TokenFilter which does several fancy things
37 // 1. Removes words which are potential noise like dhyhy8ju7q9
38 // 2. Splits email addresses into meaningful tokens
39 // 3. Splits hostnames into subparts
40 class NoiseEmailHostFilter : TokenFilter {
42 static int total_count = 0;
43 static int noise_count = 0;
45 TokenStream token_stream;
47 public NoiseEmailHostFilter (TokenStream input) : base (input)
49 token_stream = input;
52 // FIXME: we should add some heuristics that are stricter
53 // but explicitly try to avoid filtering out dates,
54 // phone numbers, etc.
55 private static bool IsNoise (string text)
57 // Anything really long is almost certainly noise.
58 if (text.Length > 30)
59 return true;
61 // Look at how often we switch between numbers and letters.
62 // Scoring:
63 // <letter> <digit> 1
64 // <digit> <letter> 1
65 // <x> <punct>+ <x> 1
66 // <x> <punct>+ <y> 2
67 const int transitions_cutoff = 4;
68 int last_type = -1, last_non_punct_type = -1, first_type = -1;
69 bool has_letter = false, has_digit = false, has_punctuation = false;
70 int transitions = 0;
71 for (int i = 0; i < text.Length && transitions < transitions_cutoff; ++i) {
72 char c = text [i];
73 int type = -1;
74 if (Char.IsLetter (c)) {
75 type = 1;
76 has_letter = true;
77 } else if (Char.IsDigit (c)) {
78 type = 2;
79 has_digit = true;
80 } else if (Char.IsPunctuation (c)) {
81 type = 3;
82 has_punctuation = true;
85 if (type != -1) {
87 if (type != last_type) {
88 if (last_type == 3) {
89 if (type != last_non_punct_type)
90 ++transitions;
91 } else {
92 ++transitions;
96 if (first_type == -1)
97 first_type = type;
99 last_type = type;
100 if (type != 3)
101 last_non_punct_type = type;
105 // If we make too many transitions, it must be noise.
106 if (transitions >= transitions_cutoff)
107 return true;
109 // If we consist of nothing but digits and punctuation, treat it
110 // as noise if it is too long.
111 if (transitions == 1 && first_type != 1 && text.Length > 10)
112 return true;
114 // We are very suspicious of long things that make lots of
115 // transitions
116 if (transitions > 3 && text.Length > 10)
117 return true;
119 // Beware of anything long that contains a little of everything.
120 if (has_letter && has_digit && has_punctuation && text.Length > 10)
121 return true;
123 //Logger.Log.Debug ("BeagleNoiseFilter accepted '{0}'", text);
124 return false;
128 // Dont scan these tokens for additional noise
129 // Someone might like to search for emails, hostnames and
130 // phone numbers (which fall under type NUM)
131 private static readonly string tokentype_email
132 = LNSA.StandardTokenizerConstants.tokenImage [LNSA.StandardTokenizerConstants.EMAIL];
133 private static readonly string tokentype_host
134 = LNSA.StandardTokenizerConstants.tokenImage [LNSA.StandardTokenizerConstants.HOST];
135 private static readonly string tokentype_number
136 = LNSA.StandardTokenizerConstants.tokenImage [LNSA.StandardTokenizerConstants.NUM];
138 private bool ProcessToken (Lucene.Net.Analysis.Token token)
140 string type = token.Type ();
142 if (type == tokentype_email) {
143 ProcessEmailToken (token);
144 return true;
145 } else if (type == tokentype_host) {
146 ProcessURLToken (token);
147 return true;
148 } else if (type == tokentype_number)
149 // nobody will remember more than 10 digits
150 return (token.TermText ().Length <= 10);
151 else
152 return false;
155 private Queue parts = new Queue ();
156 private Lucene.Net.Analysis.Token token;
158 public override Lucene.Net.Analysis.Token Next ()
160 if (parts.Count != 0) {
161 string part = (string) parts.Dequeue ();
162 Lucene.Net.Analysis.Token part_token;
163 // FIXME: Searching for google.com will not match www.google.com.
164 // If we decide to allow google-style "abcd.1234" which means
165 // "abcd 1234" as a consequtive phrase, then adjusting
166 // the startOffset and endOffset would enable matching
167 // google.com to www.google.com
168 part_token = new Lucene.Net.Analysis.Token (part,
169 token.StartOffset (),
170 token.EndOffset (),
171 token.Type ());
172 part_token.SetPositionIncrement (0);
173 return part_token;
176 while ( (token = token_stream.Next ()) != null) {
177 //Console.WriteLine ("Found token: [{0}]", token.TermText ());
178 #if false
179 if (total_count > 0 && total_count % 5000 == 0)
180 Logger.Log.Debug ("BeagleNoiseFilter filtered {0} of {1} ({2:0.0}%)",
181 noise_count, total_count, 100.0 * noise_count / total_count);
182 #endif
183 ++total_count;
184 if (ProcessToken (token))
185 return token;
186 if (IsNoise (token.TermText ())) {
187 ++noise_count;
188 continue;
190 return token;
192 return null;
195 char[] replace_array = { '@', '.', '-', '_', '+' };
196 private void ProcessEmailToken (Lucene.Net.Analysis.Token token)
198 string email = token.TermText ();
199 string[] tmp = email.Split (replace_array);
200 int l = tmp.Length;
202 // store username part as a large token
203 int index_at = email.IndexOf ('@');
204 tmp [l-1] = email.Substring (0, index_at);
206 foreach (string s in tmp)
207 parts.Enqueue (s);
211 private void ProcessURLToken (Lucene.Net.Analysis.Token token)
213 string hostname = token.TermText ();
214 string[] host_parts = hostname.Split ('.');
216 // remove initial www
217 int begin_index = (host_parts [0] == "www" ? 1 : 0);
218 // FIXME: Remove final tld
219 // Any string of form "<alnum> '.')+<alnum>" has type HOST
220 // Removing last token might remove important words from non-host
221 // string of that form. To fix that, we need match against the
222 // huge list of TLDs.
223 for (int i = begin_index; i < host_parts.Length; ++i)
224 parts.Enqueue (host_parts [i]);