Merge Chromium + Blink git repositories
[chromium-blink-merge.git] / chrome / renderer / spellchecker / spellcheck_worditerator.cc
blob4b653507505e11cab06f8df7cd1865ac8c28cbcb
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 // Implements a custom word iterator used for our spellchecker.
7 #include "chrome/renderer/spellchecker/spellcheck_worditerator.h"
9 #include <map>
10 #include <string>
12 #include "base/basictypes.h"
13 #include "base/i18n/break_iterator.h"
14 #include "base/logging.h"
15 #include "base/strings/stringprintf.h"
16 #include "base/strings/utf_string_conversions.h"
17 #include "chrome/renderer/spellchecker/spellcheck.h"
18 #include "third_party/icu/source/common/unicode/normlzr.h"
19 #include "third_party/icu/source/common/unicode/schriter.h"
20 #include "third_party/icu/source/common/unicode/uscript.h"
21 #include "third_party/icu/source/i18n/unicode/ulocdata.h"
23 using base::i18n::BreakIterator;
25 // SpellcheckCharAttribute implementation:
27 SpellcheckCharAttribute::SpellcheckCharAttribute()
28 : script_code_(USCRIPT_LATIN) {
31 SpellcheckCharAttribute::~SpellcheckCharAttribute() {
34 void SpellcheckCharAttribute::SetDefaultLanguage(const std::string& language) {
35 CreateRuleSets(language);
38 base::string16 SpellcheckCharAttribute::GetRuleSet(
39 bool allow_contraction) const {
40 return allow_contraction ?
41 ruleset_allow_contraction_ : ruleset_disallow_contraction_;
44 void SpellcheckCharAttribute::CreateRuleSets(const std::string& language) {
45 // The template for our custom rule sets, which is based on the word-break
46 // rules of ICU 4.0:
47 // <http://source.icu-project.org/repos/icu/icu/tags/release-4-0/source/data/brkitr/word.txt>.
48 // The major differences from the original one are listed below:
49 // * It discards comments in the original rules.
50 // * It discards characters not needed by our spellchecker (e.g. numbers,
51 // punctuation characters, Hiraganas, Katakanas, CJK Ideographs, and so on).
52 // * It allows customization of the $ALetter value (i.e. word characters).
53 // * It allows customization of the $ALetterPlus value (i.e. whether or not to
54 // use the dictionary data).
55 // * It allows choosing whether or not to split a text at contraction
56 // characters.
57 // This template only changes the forward-iteration rules. So, calling
58 // ubrk_prev() returns the same results as the original template.
59 static const char kRuleTemplate[] =
60 "!!chain;"
61 "$CR = [\\p{Word_Break = CR}];"
62 "$LF = [\\p{Word_Break = LF}];"
63 "$Newline = [\\p{Word_Break = Newline}];"
64 "$Extend = [\\p{Word_Break = Extend}];"
65 "$Format = [\\p{Word_Break = Format}];"
66 "$Katakana = [\\p{Word_Break = Katakana}];"
67 // Not all the characters in a given script are ALetter.
68 // For instance, U+05F4 is MidLetter. So, this may be
69 // better, but it leads to an empty set error in Thai.
70 // "$ALetter = [[\\p{script=%s}] & [\\p{Word_Break = ALetter}]];"
71 "$ALetter = [\\p{script=%s}%s];"
72 // U+0027 (single quote/apostrophe) is not in MidNumLet any more
73 // in UAX 29 rev 21 or later. For our purpose, U+0027
74 // has to be treated as MidNumLet. ( http://crbug.com/364072 )
75 "$MidNumLet = [\\p{Word_Break = MidNumLet} \\u0027];"
76 "$MidLetter = [\\p{Word_Break = MidLetter}%s];"
77 "$MidNum = [\\p{Word_Break = MidNum}];"
78 "$Numeric = [\\p{Word_Break = Numeric}];"
79 "$ExtendNumLet = [\\p{Word_Break = ExtendNumLet}];"
81 "$Control = [\\p{Grapheme_Cluster_Break = Control}]; "
82 "%s" // ALetterPlus
84 "$KatakanaEx = $Katakana ($Extend | $Format)*;"
85 "$ALetterEx = $ALetterPlus ($Extend | $Format)*;"
86 "$MidNumLetEx = $MidNumLet ($Extend | $Format)*;"
87 "$MidLetterEx = $MidLetter ($Extend | $Format)*;"
88 "$MidNumEx = $MidNum ($Extend | $Format)*;"
89 "$NumericEx = $Numeric ($Extend | $Format)*;"
90 "$ExtendNumLetEx = $ExtendNumLet ($Extend | $Format)*;"
92 "$Hiragana = [\\p{script=Hiragana}];"
93 "$Ideographic = [\\p{Ideographic}];"
94 "$HiraganaEx = $Hiragana ($Extend | $Format)*;"
95 "$IdeographicEx = $Ideographic ($Extend | $Format)*;"
97 "!!forward;"
98 "$CR $LF;"
99 "[^$CR $LF $Newline]? ($Extend | $Format)+;"
100 "$ALetterEx {200};"
101 "$ALetterEx $ALetterEx {200};"
102 "%s" // (Allow|Disallow) Contraction
104 "!!reverse;"
105 "$BackALetterEx = ($Format | $Extend)* $ALetterPlus;"
106 "$BackMidNumLetEx = ($Format | $Extend)* $MidNumLet;"
107 "$BackNumericEx = ($Format | $Extend)* $Numeric;"
108 "$BackMidNumEx = ($Format | $Extend)* $MidNum;"
109 "$BackMidLetterEx = ($Format | $Extend)* $MidLetter;"
110 "$BackKatakanaEx = ($Format | $Extend)* $Katakana;"
111 "$BackExtendNumLetEx= ($Format | $Extend)* $ExtendNumLet;"
112 "$LF $CR;"
113 "($Format | $Extend)* [^$CR $LF $Newline]?;"
114 "$BackALetterEx $BackALetterEx;"
115 "$BackALetterEx ($BackMidLetterEx | $BackMidNumLetEx) $BackALetterEx;"
116 "$BackNumericEx $BackNumericEx;"
117 "$BackNumericEx $BackALetterEx;"
118 "$BackALetterEx $BackNumericEx;"
119 "$BackNumericEx ($BackMidNumEx | $BackMidNumLetEx) $BackNumericEx;"
120 "$BackKatakanaEx $BackKatakanaEx;"
121 "$BackExtendNumLetEx ($BackALetterEx | $BackNumericEx |"
122 " $BackKatakanaEx | $BackExtendNumLetEx);"
123 "($BackALetterEx | $BackNumericEx | $BackKatakanaEx)"
124 " $BackExtendNumLetEx;"
126 "!!safe_reverse;"
127 "($Extend | $Format)+ .?;"
128 "($MidLetter | $MidNumLet) $BackALetterEx;"
129 "($MidNum | $MidNumLet) $BackNumericEx;"
131 "!!safe_forward;"
132 "($Extend | $Format)+ .?;"
133 "($MidLetterEx | $MidNumLetEx) $ALetterEx;"
134 "($MidNumEx | $MidNumLetEx) $NumericEx;";
136 // Retrieve the script codes used by the given language from ICU. When the
137 // given language consists of two or more scripts, we just use the first
138 // script. The size of returned script codes is always < 8. Therefore, we use
139 // an array of size 8 so we can include all script codes without insufficient
140 // buffer errors.
141 UErrorCode error = U_ZERO_ERROR;
142 UScriptCode script_code[8];
143 int scripts = uscript_getCode(language.c_str(), script_code,
144 arraysize(script_code), &error);
145 if (U_SUCCESS(error) && scripts >= 1)
146 script_code_ = script_code[0];
148 // Retrieve the values for $ALetter and $ALetterPlus. We use the dictionary
149 // only for the languages which need it (i.e. Korean and Thai) to prevent ICU
150 // from returning dictionary words (i.e. Korean or Thai words) for languages
151 // which don't need them.
152 const char* aletter = uscript_getName(script_code_);
153 if (!aletter)
154 aletter = "Latin";
156 const char kWithDictionary[] =
157 "$dictionary = [:LineBreak = Complex_Context:];"
158 "$ALetterPlus = [$ALetter [$dictionary-$Extend-$Control]];";
159 const char kWithoutDictionary[] = "$ALetterPlus = $ALetter;";
160 const char* aletter_plus = kWithoutDictionary;
161 if (script_code_ == USCRIPT_HANGUL || script_code_ == USCRIPT_THAI ||
162 script_code_ == USCRIPT_LAO || script_code_ == USCRIPT_KHMER)
163 aletter_plus = kWithDictionary;
165 // Treat numbers as word characters except for Arabic and Hebrew.
166 const char* aletter_extra = " [0123456789]";
167 if (script_code_ == USCRIPT_HEBREW || script_code_ == USCRIPT_ARABIC)
168 aletter_extra = "";
170 const char kMidLetterExtra[] = "";
171 // For Hebrew, treat single/double quoation marks as MidLetter.
172 const char kMidLetterExtraHebrew[] = "\"'";
173 const char* midletter_extra = kMidLetterExtra;
174 if (script_code_ == USCRIPT_HEBREW)
175 midletter_extra = kMidLetterExtraHebrew;
177 // Create two custom rule-sets: one allows contraction and the other does not.
178 // We save these strings in UTF-16 so we can use it without conversions. (ICU
179 // needs UTF-16 strings.)
180 const char kAllowContraction[] =
181 "$ALetterEx ($MidLetterEx | $MidNumLetEx) $ALetterEx {200};";
182 const char kDisallowContraction[] = "";
184 ruleset_allow_contraction_ = base::ASCIIToUTF16(
185 base::StringPrintf(kRuleTemplate,
186 aletter,
187 aletter_extra,
188 midletter_extra,
189 aletter_plus,
190 kAllowContraction));
191 ruleset_disallow_contraction_ = base::ASCIIToUTF16(
192 base::StringPrintf(kRuleTemplate,
193 aletter,
194 aletter_extra,
195 midletter_extra,
196 aletter_plus,
197 kDisallowContraction));
200 bool SpellcheckCharAttribute::OutputChar(UChar c,
201 base::string16* output) const {
202 // Call the language-specific function if necessary.
203 // Otherwise, we call the default one.
204 switch (script_code_) {
205 case USCRIPT_ARABIC:
206 return OutputArabic(c, output);
208 case USCRIPT_HANGUL:
209 return OutputHangul(c, output);
211 case USCRIPT_HEBREW:
212 return OutputHebrew(c, output);
214 default:
215 return OutputDefault(c, output);
219 bool SpellcheckCharAttribute::OutputArabic(UChar c,
220 base::string16* output) const {
221 // Discard characters not from Arabic alphabets. We also discard vowel marks
222 // of Arabic (Damma, Fatha, Kasra, etc.) to prevent our Arabic dictionary from
223 // marking an Arabic word including vowel marks as misspelled. (We need to
224 // check these vowel marks manually and filter them out since their script
225 // codes are USCRIPT_ARABIC.)
226 if (0x0621 <= c && c <= 0x064D)
227 output->push_back(c);
228 return true;
231 bool SpellcheckCharAttribute::OutputHangul(UChar c,
232 base::string16* output) const {
233 // Decompose a Hangul character to a Hangul vowel and consonants used by our
234 // spellchecker. A Hangul character of Unicode is a ligature consisting of a
235 // Hangul vowel and consonants, e.g. U+AC01 "Gag" consists of U+1100 "G",
236 // U+1161 "a", and U+11A8 "g". That is, we can treat each Hangul character as
237 // a point of a cubic linear space consisting of (first consonant, vowel, last
238 // consonant). Therefore, we can compose a Hangul character from a vowel and
239 // two consonants with linear composition:
240 // character = 0xAC00 +
241 // (first consonant - 0x1100) * 28 * 21 +
242 // (vowel - 0x1161) * 28 +
243 // (last consonant - 0x11A7);
244 // We can also decompose a Hangul character with linear decomposition:
245 // first consonant = (character - 0xAC00) / 28 / 21;
246 // vowel = (character - 0xAC00) / 28 % 21;
247 // last consonant = (character - 0xAC00) % 28;
248 // This code is copied from Unicode Standard Annex #15
249 // <http://unicode.org/reports/tr15> and added some comments.
250 const int kSBase = 0xAC00; // U+AC00: the top of Hangul characters.
251 const int kLBase = 0x1100; // U+1100: the top of Hangul first consonants.
252 const int kVBase = 0x1161; // U+1161: the top of Hangul vowels.
253 const int kTBase = 0x11A7; // U+11A7: the top of Hangul last consonants.
254 const int kLCount = 19; // The number of Hangul first consonants.
255 const int kVCount = 21; // The number of Hangul vowels.
256 const int kTCount = 28; // The number of Hangul last consonants.
257 const int kNCount = kVCount * kTCount;
258 const int kSCount = kLCount * kNCount;
260 int index = c - kSBase;
261 if (index < 0 || index >= kSBase + kSCount) {
262 // This is not a Hangul syllable. Call the default output function since we
263 // should output this character when it is a Hangul syllable.
264 return OutputDefault(c, output);
267 // This is a Hangul character. Decompose this characters into Hangul vowels
268 // and consonants.
269 int l = kLBase + index / kNCount;
270 int v = kVBase + (index % kNCount) / kTCount;
271 int t = kTBase + index % kTCount;
272 output->push_back(l);
273 output->push_back(v);
274 if (t != kTBase)
275 output->push_back(t);
276 return true;
279 bool SpellcheckCharAttribute::OutputHebrew(UChar c,
280 base::string16* output) const {
281 // Discard characters except Hebrew alphabets. We also discard Hebrew niqquds
282 // to prevent our Hebrew dictionary from marking a Hebrew word including
283 // niqquds as misspelled. (Same as Arabic vowel marks, we need to check
284 // niqquds manually and filter them out since their script codes are
285 // USCRIPT_HEBREW.)
286 // Pass through ASCII single/double quotation marks and Hebrew Geresh and
287 // Gershayim.
288 if ((0x05D0 <= c && c <= 0x05EA) || c == 0x22 || c == 0x27 ||
289 c == 0x05F4 || c == 0x05F3)
290 output->push_back(c);
291 return true;
294 bool SpellcheckCharAttribute::OutputDefault(UChar c,
295 base::string16* output) const {
296 // Check the script code of this character and output only if it is the one
297 // used by the spellchecker language.
298 UErrorCode status = U_ZERO_ERROR;
299 UScriptCode script_code = uscript_getScript(c, &status);
300 if (script_code == script_code_ || script_code == USCRIPT_COMMON)
301 output->push_back(c);
302 return true;
305 // SpellcheckWordIterator implementation:
307 SpellcheckWordIterator::SpellcheckWordIterator()
308 : text_(NULL),
309 attribute_(NULL),
310 iterator_() {
313 SpellcheckWordIterator::~SpellcheckWordIterator() {
314 Reset();
317 bool SpellcheckWordIterator::Initialize(
318 const SpellcheckCharAttribute* attribute,
319 bool allow_contraction) {
320 // Create a custom ICU break iterator with empty text used in this object. (We
321 // allow setting text later so we can re-use this iterator.)
322 DCHECK(attribute);
323 const base::string16 rule(attribute->GetRuleSet(allow_contraction));
325 // If there is no rule set, the attributes were invalid.
326 if (rule.empty())
327 return false;
329 scoped_ptr<BreakIterator> iterator(new BreakIterator(base::string16(), rule));
330 if (!iterator->Init()) {
331 // Since we're not passing in any text, the only reason this could fail
332 // is if we fail to parse the rules. Since the rules are hardcoded,
333 // that would be a bug in this class.
334 NOTREACHED() << "failed to open iterator (broken rules)";
335 return false;
337 iterator_ = iterator.Pass();
339 // Set the character attributes so we can normalize the words extracted by
340 // this iterator.
341 attribute_ = attribute;
342 return true;
345 bool SpellcheckWordIterator::IsInitialized() const {
346 // Return true iff we have an iterator.
347 return !!iterator_;
350 bool SpellcheckWordIterator::SetText(const base::char16* text, size_t length) {
351 DCHECK(!!iterator_);
353 // Set the text to be split by this iterator.
354 if (!iterator_->SetText(text, length)) {
355 LOG(ERROR) << "failed to set text";
356 return false;
359 text_ = text;
360 return true;
363 SpellcheckWordIterator::WordIteratorStatus SpellcheckWordIterator::GetNextWord(
364 base::string16* word_string,
365 int* word_start,
366 int* word_length) {
367 DCHECK(!!text_);
369 word_string->clear();
370 *word_start = 0;
371 *word_length = 0;
373 if (!text_) {
374 return IS_END_OF_TEXT;
377 // Find a word that can be checked for spelling or a character that can be
378 // skipped over. Rather than moving past a skippable character this returns
379 // IS_SKIPPABLE and defers handling the character to the calling function.
380 while (iterator_->Advance()) {
381 const size_t start = iterator_->prev();
382 const size_t length = iterator_->pos() - start;
383 switch (iterator_->GetWordBreakStatus()) {
384 case BreakIterator::IS_WORD_BREAK: {
385 if (Normalize(start, length, word_string)) {
386 *word_start = start;
387 *word_length = length;
388 return IS_WORD;
390 break;
392 case BreakIterator::IS_SKIPPABLE_WORD: {
393 *word_string = iterator_->GetString();
394 *word_start = start;
395 *word_length = length;
396 return IS_SKIPPABLE;
398 // |iterator_| is RULE_BASED so the break status should never be
399 // IS_LINE_OR_CHAR_BREAK.
400 case BreakIterator::IS_LINE_OR_CHAR_BREAK: {
401 NOTREACHED();
402 break;
407 // There aren't any more words in the given text.
408 return IS_END_OF_TEXT;
411 void SpellcheckWordIterator::Reset() {
412 iterator_.reset();
415 bool SpellcheckWordIterator::Normalize(int input_start,
416 int input_length,
417 base::string16* output_string) const {
418 // We use NFKC (Normalization Form, Compatible decomposition, followed by
419 // canonical Composition) defined in Unicode Standard Annex #15 to normalize
420 // this token because it it the most suitable normalization algorithm for our
421 // spellchecker. Nevertheless, it is not a perfect algorithm for our
422 // spellchecker and we need manual normalization as well. The normalized
423 // text does not have to be NUL-terminated since its characters are copied to
424 // string16, which adds a NUL character when we need.
425 icu::UnicodeString input(FALSE, &text_[input_start], input_length);
426 UErrorCode status = U_ZERO_ERROR;
427 icu::UnicodeString output;
428 icu::Normalizer::normalize(input, UNORM_NFKC, 0, output, status);
429 if (status != U_ZERO_ERROR && status != U_STRING_NOT_TERMINATED_WARNING)
430 return false;
432 // Copy the normalized text to the output.
433 icu::StringCharacterIterator it(output);
434 for (UChar c = it.first(); c != icu::CharacterIterator::DONE; c = it.next())
435 attribute_->OutputChar(c, output_string);
437 return !output_string->empty();