2 * Copyright © 2011,2012 Google, Inc.
4 * This is part of HarfBuzz, a text shaping library.
6 * Permission is hereby granted, without written agreement and without
7 * license or royalty fees, to use, copy, modify, and distribute this
8 * software and its documentation for any purpose, provided that the
9 * above copyright notice and the following two paragraphs appear in
10 * all copies of this software.
12 * IN NO EVENT SHALL THE COPYRIGHT HOLDER BE LIABLE TO ANY PARTY FOR
13 * DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES
14 * ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN
15 * IF THE COPYRIGHT HOLDER HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
18 * THE COPYRIGHT HOLDER SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING,
19 * BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
20 * FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS
21 * ON AN "AS IS" BASIS, AND THE COPYRIGHT HOLDER HAS NO OBLIGATION TO
22 * PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
24 * Google Author(s): Behdad Esfahbod
27 #include "hb-ot-shape-normalize-private.hh"
28 #include "hb-ot-shape-complex-private.hh"
29 #include "hb-ot-shape-private.hh"
35 * This file exports one main function: _hb_ot_shape_normalize().
37 * This function closely reflects the Unicode Normalization Algorithm,
40 * Each shaper specifies whether it prefers decomposed (NFD) or composed (NFC).
41 * The logic however tries to use whatever the font can support.
43 * In general what happens is that: each grapheme is decomposed in a chain
44 * of 1:2 decompositions, marks reordered, and then recomposed if desired,
45 * so far it's like Unicode Normalization. However, the decomposition and
46 * recomposition only happens if the font supports the resulting characters.
50 * - Try to render all canonically equivalent strings similarly. To really
51 * achieve this we have to always do the full decomposition and then
52 * selectively recompose from there. It's kinda too expensive though, so
53 * we skip some cases. For example, if composed is desired, we simply
54 * don't touch 1-character clusters that are supported by the font, even
55 * though their NFC may be different.
57 * - When a font has a precomposed character for a sequence but the 'ccmp'
58 * feature in the font is not adequate, use the precomposed character
59 * which typically has better mark positioning.
61 * - When a font does not support a combining mark, but supports it precomposed
62 * with previous base, use that. This needs the itemizer to have this
63 * knowledge too. We need to provide assistance to the itemizer.
65 * - When a font does not support a character but supports its decomposition,
66 * well, use the decomposition (preferring the canonical decomposition, but
67 * falling back to the compatibility decomposition if necessary). The
68 * compatibility decomposition is really nice to have, for characters like
69 * ellipsis, or various-sized space characters.
71 * - The complex shapers can customize the compose and decompose functions to
72 * offload some of their requirements to the normalizer. For example, the
73 * Indic shaper may want to disallow recomposing of two matras.
75 * - We try compatibility decomposition if decomposing through canonical
76 * decomposition alone failed to find a sequence that the font supports.
77 * We don't try compatibility decomposition recursively during the canonical
78 * decomposition phase. This has minimal impact. There are only a handful
79 * of Greek letter that have canonical decompositions that include characters
80 * with compatibility decomposition. Those can be found using this command:
82 * egrep "`echo -n ';('; grep ';<' UnicodeData.txt | cut -d';' -f1 | tr '\n' '|'; echo ') '`" UnicodeData.txt
86 decompose_unicode (const hb_ot_shape_normalize_context_t
*c
,
91 return c
->unicode
->decompose (ab
, a
, b
);
95 compose_unicode (const hb_ot_shape_normalize_context_t
*c
,
100 return c
->unicode
->compose (a
, b
, ab
);
104 set_glyph (hb_glyph_info_t
&info
, hb_font_t
*font
)
106 font
->get_glyph (info
.codepoint
, 0, &info
.glyph_index());
110 output_char (hb_buffer_t
*buffer
, hb_codepoint_t unichar
, hb_codepoint_t glyph
)
112 buffer
->cur().glyph_index() = glyph
;
113 buffer
->output_glyph (unichar
);
114 _hb_glyph_info_set_unicode_props (&buffer
->prev(), buffer
->unicode
);
118 next_char (hb_buffer_t
*buffer
, hb_codepoint_t glyph
)
120 buffer
->cur().glyph_index() = glyph
;
121 buffer
->next_glyph ();
125 skip_char (hb_buffer_t
*buffer
)
127 buffer
->skip_glyph ();
130 /* Returns 0 if didn't decompose, number of resulting characters otherwise. */
131 static inline unsigned int
132 decompose (const hb_ot_shape_normalize_context_t
*c
, bool shortest
, hb_codepoint_t ab
)
134 hb_codepoint_t a
, b
, a_glyph
, b_glyph
;
136 if (!c
->decompose (c
, ab
, &a
, &b
) ||
137 (b
&& !c
->font
->get_glyph (b
, 0, &b_glyph
)))
140 bool has_a
= c
->font
->get_glyph (a
, 0, &a_glyph
);
141 if (shortest
&& has_a
) {
143 output_char (c
->buffer
, a
, a_glyph
);
145 output_char (c
->buffer
, b
, b_glyph
);
152 if ((ret
= decompose (c
, shortest
, a
))) {
154 output_char (c
->buffer
, b
, b_glyph
);
161 output_char (c
->buffer
, a
, a_glyph
);
163 output_char (c
->buffer
, b
, b_glyph
);
172 /* Returns 0 if didn't decompose, number of resulting characters otherwise. */
173 static inline unsigned int
174 decompose_compatibility (const hb_ot_shape_normalize_context_t
*c
, hb_codepoint_t u
)
177 hb_codepoint_t decomposed
[HB_UNICODE_MAX_DECOMPOSITION_LEN
];
178 hb_codepoint_t glyphs
[HB_UNICODE_MAX_DECOMPOSITION_LEN
];
180 len
= c
->buffer
->unicode
->decompose_compatibility (u
, decomposed
);
184 for (i
= 0; i
< len
; i
++)
185 if (!c
->font
->get_glyph (decomposed
[i
], 0, &glyphs
[i
]))
188 for (i
= 0; i
< len
; i
++)
189 output_char (c
->buffer
, decomposed
[i
], glyphs
[i
]);
195 decompose_current_character (const hb_ot_shape_normalize_context_t
*c
, bool shortest
)
197 hb_buffer_t
* const buffer
= c
->buffer
;
198 hb_codepoint_t glyph
;
200 /* Kind of a cute waterfall here... */
201 if (shortest
&& c
->font
->get_glyph (buffer
->cur().codepoint
, 0, &glyph
))
202 next_char (buffer
, glyph
);
203 else if (decompose (c
, shortest
, buffer
->cur().codepoint
))
205 else if (!shortest
&& c
->font
->get_glyph (buffer
->cur().codepoint
, 0, &glyph
))
206 next_char (buffer
, glyph
);
207 else if (decompose_compatibility (c
, buffer
->cur().codepoint
))
210 next_char (buffer
, glyph
); /* glyph is initialized in earlier branches. */
214 handle_variation_selector_cluster (const hb_ot_shape_normalize_context_t
*c
, unsigned int end
)
216 hb_buffer_t
* const buffer
= c
->buffer
;
217 for (; buffer
->idx
< end
- 1;) {
218 if (unlikely (buffer
->unicode
->is_variation_selector (buffer
->cur(+1).codepoint
))) {
219 /* The next two lines are some ugly lines... But work. */
220 c
->font
->get_glyph (buffer
->cur().codepoint
, buffer
->cur(+1).codepoint
, &buffer
->cur().glyph_index());
221 buffer
->replace_glyphs (2, 1, &buffer
->cur().codepoint
);
223 set_glyph (buffer
->cur(), c
->font
);
224 buffer
->next_glyph ();
227 if (likely (buffer
->idx
< end
)) {
228 set_glyph (buffer
->cur(), c
->font
);
229 buffer
->next_glyph ();
234 decompose_multi_char_cluster (const hb_ot_shape_normalize_context_t
*c
, unsigned int end
)
236 hb_buffer_t
* const buffer
= c
->buffer
;
237 /* TODO Currently if there's a variation-selector we give-up, it's just too hard. */
238 for (unsigned int i
= buffer
->idx
; i
< end
; i
++)
239 if (unlikely (buffer
->unicode
->is_variation_selector (buffer
->info
[i
].codepoint
))) {
240 handle_variation_selector_cluster (c
, end
);
244 while (buffer
->idx
< end
)
245 decompose_current_character (c
, false);
249 decompose_cluster (const hb_ot_shape_normalize_context_t
*c
, bool short_circuit
, unsigned int end
)
251 if (likely (c
->buffer
->idx
+ 1 == end
))
252 decompose_current_character (c
, short_circuit
);
254 decompose_multi_char_cluster (c
, end
);
259 compare_combining_class (const hb_glyph_info_t
*pa
, const hb_glyph_info_t
*pb
)
261 unsigned int a
= _hb_glyph_info_get_modified_combining_class (pa
);
262 unsigned int b
= _hb_glyph_info_get_modified_combining_class (pb
);
264 return a
< b
? -1 : a
== b
? 0 : +1;
269 _hb_ot_shape_normalize (const hb_ot_shape_plan_t
*plan
,
273 hb_ot_shape_normalization_mode_t mode
= plan
->shaper
->normalization_preference
?
274 plan
->shaper
->normalization_preference (&buffer
->props
) :
275 HB_OT_SHAPE_NORMALIZATION_MODE_DEFAULT
;
276 const hb_ot_shape_normalize_context_t c
= {
281 plan
->shaper
->decompose
? plan
->shaper
->decompose
: decompose_unicode
,
282 plan
->shaper
->compose
? plan
->shaper
->compose
: compose_unicode
285 bool short_circuit
= mode
!= HB_OT_SHAPE_NORMALIZATION_MODE_DECOMPOSED
&&
286 mode
!= HB_OT_SHAPE_NORMALIZATION_MODE_COMPOSED_DIACRITICS_NO_SHORT_CIRCUIT
;
289 /* We do a fairly straightforward yet custom normalization process in three
290 * separate rounds: decompose, reorder, recompose (if desired). Currently
291 * this makes two buffer swaps. We can make it faster by moving the last
292 * two rounds into the inner loop for the first round, but it's more readable
296 /* First round, decompose */
298 buffer
->clear_output ();
300 for (buffer
->idx
= 0; buffer
->idx
< count
;)
303 for (end
= buffer
->idx
+ 1; end
< count
; end
++)
304 if (buffer
->cur().cluster
!= buffer
->info
[end
].cluster
)
307 decompose_cluster (&c
, short_circuit
, end
);
309 buffer
->swap_buffers ();
312 /* Second round, reorder (inplace) */
315 for (unsigned int i
= 0; i
< count
; i
++)
317 if (_hb_glyph_info_get_modified_combining_class (&buffer
->info
[i
]) == 0)
321 for (end
= i
+ 1; end
< count
; end
++)
322 if (_hb_glyph_info_get_modified_combining_class (&buffer
->info
[end
]) == 0)
325 /* We are going to do a bubble-sort. Only do this if the
326 * sequence is short. Doing it on long sequences can result
327 * in an O(n^2) DoS. */
333 hb_bubble_sort (buffer
->info
+ i
, end
- i
, compare_combining_class
);
339 if (mode
== HB_OT_SHAPE_NORMALIZATION_MODE_DECOMPOSED
)
342 /* Third round, recompose */
344 /* As noted in the comment earlier, we don't try to combine
345 * ccc=0 chars with their previous Starter. */
347 buffer
->clear_output ();
349 unsigned int starter
= 0;
350 buffer
->next_glyph ();
351 while (buffer
->idx
< count
)
353 hb_codepoint_t composed
, glyph
;
354 if (/* We don't try to compose a non-mark character with it's preceding starter.
355 * This is both an optimization to avoid trying to compose every two neighboring
356 * glyphs in most scripts AND a desired feature for Hangul. Apparently Hangul
357 * fonts are not designed to mix-and-match pre-composed syllables and Jamo. */
358 HB_UNICODE_GENERAL_CATEGORY_IS_MARK (_hb_glyph_info_get_general_category (&buffer
->cur())) &&
359 /* If there's anything between the starter and this char, they should have CCC
360 * smaller than this character's. */
361 (starter
== buffer
->out_len
- 1 ||
362 _hb_glyph_info_get_modified_combining_class (&buffer
->prev()) < _hb_glyph_info_get_modified_combining_class (&buffer
->cur())) &&
365 buffer
->out_info
[starter
].codepoint
,
366 buffer
->cur().codepoint
,
368 /* And the font has glyph for the composite. */
369 font
->get_glyph (composed
, 0, &glyph
))
372 buffer
->next_glyph (); /* Copy to out-buffer. */
373 if (unlikely (buffer
->in_error
))
375 buffer
->merge_out_clusters (starter
, buffer
->out_len
);
376 buffer
->out_len
--; /* Remove the second composable. */
377 buffer
->out_info
[starter
].codepoint
= composed
; /* Modify starter and carry on. */
378 set_glyph (buffer
->out_info
[starter
], font
);
379 _hb_glyph_info_set_unicode_props (&buffer
->out_info
[starter
], buffer
->unicode
);
384 /* Blocked, or doesn't compose. */
385 buffer
->next_glyph ();
387 if (_hb_glyph_info_get_modified_combining_class (&buffer
->prev()) == 0)
388 starter
= buffer
->out_len
- 1;
390 buffer
->swap_buffers ();