2 * Copyright © 2009,2010 Red Hat, Inc.
3 * Copyright © 2010,2011,2012,2013 Google, Inc.
5 * This is part of HarfBuzz, a text shaping library.
7 * Permission is hereby granted, without written agreement and without
8 * license or royalty fees, to use, copy, modify, and distribute this
9 * software and its documentation for any purpose, provided that the
10 * above copyright notice and the following two paragraphs appear in
11 * all copies of this software.
13 * IN NO EVENT SHALL THE COPYRIGHT HOLDER BE LIABLE TO ANY PARTY FOR
14 * DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES
15 * ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN
16 * IF THE COPYRIGHT HOLDER HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
19 * THE COPYRIGHT HOLDER SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING,
20 * BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
21 * FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS
22 * ON AN "AS IS" BASIS, AND THE COPYRIGHT HOLDER HAS NO OBLIGATION TO
23 * PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
25 * Red Hat Author(s): Behdad Esfahbod
26 * Google Author(s): Behdad Esfahbod
29 #ifndef HB_OT_MAP_PRIVATE_HH
30 #define HB_OT_MAP_PRIVATE_HH
32 #include "hb-buffer-private.hh"
35 struct hb_ot_shape_plan_t
;
37 static const hb_tag_t table_tags
[2] = {HB_OT_TAG_GSUB
, HB_OT_TAG_GPOS
};
41 friend struct hb_ot_map_builder_t
;
45 struct feature_map_t
{
46 hb_tag_t tag
; /* should be first for our bsearch to work */
47 unsigned int index
[2]; /* GSUB/GPOS */
48 unsigned int stage
[2]; /* GSUB/GPOS */
51 hb_mask_t _1_mask
; /* mask for value=1, for quick access */
52 unsigned int needs_fallback
: 1;
53 unsigned int auto_zwj
: 1;
55 static int cmp (const feature_map_t
*a
, const feature_map_t
*b
)
56 { return a
->tag
< b
->tag
? -1 : a
->tag
> b
->tag
? 1 : 0; }
61 unsigned short auto_zwj
: 1;
64 static int cmp (const lookup_map_t
*a
, const lookup_map_t
*b
)
65 { return a
->index
< b
->index
? -1 : a
->index
> b
->index
? 1 : 0; }
68 typedef void (*pause_func_t
) (const struct hb_ot_shape_plan_t
*plan
, hb_font_t
*font
, hb_buffer_t
*buffer
);
71 unsigned int last_lookup
; /* Cumulative */
72 pause_func_t pause_func
;
76 hb_ot_map_t (void) { memset (this, 0, sizeof (*this)); }
78 inline hb_mask_t
get_global_mask (void) const { return global_mask
; }
80 inline hb_mask_t
get_mask (hb_tag_t feature_tag
, unsigned int *shift
= NULL
) const {
81 const feature_map_t
*map
= features
.bsearch (&feature_tag
);
82 if (shift
) *shift
= map
? map
->shift
: 0;
83 return map
? map
->mask
: 0;
86 inline bool needs_fallback (hb_tag_t feature_tag
) const {
87 const feature_map_t
*map
= features
.bsearch (&feature_tag
);
88 return map
? map
->needs_fallback
: false;
91 inline hb_mask_t
get_1_mask (hb_tag_t feature_tag
) const {
92 const feature_map_t
*map
= features
.bsearch (&feature_tag
);
93 return map
? map
->_1_mask
: 0;
96 inline unsigned int get_feature_index (unsigned int table_index
, hb_tag_t feature_tag
) const {
97 const feature_map_t
*map
= features
.bsearch (&feature_tag
);
98 return map
? map
->index
[table_index
] : HB_OT_LAYOUT_NO_FEATURE_INDEX
;
101 inline unsigned int get_feature_stage (unsigned int table_index
, hb_tag_t feature_tag
) const {
102 const feature_map_t
*map
= features
.bsearch (&feature_tag
);
103 return map
? map
->stage
[table_index
] : (unsigned int) -1;
106 inline void get_stage_lookups (unsigned int table_index
, unsigned int stage
,
107 const struct lookup_map_t
**plookups
, unsigned int *lookup_count
) const {
108 if (unlikely (stage
== (unsigned int) -1)) {
113 assert (stage
<= stages
[table_index
].len
);
114 unsigned int start
= stage
? stages
[table_index
][stage
- 1].last_lookup
: 0;
115 unsigned int end
= stage
< stages
[table_index
].len
? stages
[table_index
][stage
].last_lookup
: lookups
[table_index
].len
;
116 *plookups
= &lookups
[table_index
][start
];
117 *lookup_count
= end
- start
;
120 HB_INTERNAL
void collect_lookups (unsigned int table_index
, hb_set_t
*lookups
) const;
121 template <typename Proxy
>
122 HB_INTERNAL
inline void apply (const Proxy
&proxy
,
123 const struct hb_ot_shape_plan_t
*plan
, hb_font_t
*font
, hb_buffer_t
*buffer
) const;
124 HB_INTERNAL
void substitute (const struct hb_ot_shape_plan_t
*plan
, hb_font_t
*font
, hb_buffer_t
*buffer
) const;
125 HB_INTERNAL
void position (const struct hb_ot_shape_plan_t
*plan
, hb_font_t
*font
, hb_buffer_t
*buffer
) const;
127 inline void finish (void) {
129 for (unsigned int table_index
= 0; table_index
< 2; table_index
++)
131 lookups
[table_index
].finish ();
132 stages
[table_index
].finish ();
137 hb_tag_t chosen_script
[2];
138 bool found_script
[2];
142 HB_INTERNAL
void add_lookups (hb_face_t
*face
,
143 unsigned int table_index
,
144 unsigned int feature_index
,
148 hb_mask_t global_mask
;
150 hb_prealloced_array_t
<feature_map_t
, 8> features
;
151 hb_prealloced_array_t
<lookup_map_t
, 32> lookups
[2]; /* GSUB/GPOS */
152 hb_prealloced_array_t
<stage_map_t
, 4> stages
[2]; /* GSUB/GPOS */
155 enum hb_ot_map_feature_flags_t
{
158 F_HAS_FALLBACK
= 0x0002,
159 F_MANUAL_ZWJ
= 0x0004
161 /* Macro version for where const is desired. */
162 #define F_COMBINE(l,r) (hb_ot_map_feature_flags_t ((unsigned int) (l) | (unsigned int) (r)))
163 inline hb_ot_map_feature_flags_t
164 operator | (hb_ot_map_feature_flags_t l
, hb_ot_map_feature_flags_t r
)
165 { return hb_ot_map_feature_flags_t ((unsigned int) l
| (unsigned int) r
); }
166 inline hb_ot_map_feature_flags_t
167 operator & (hb_ot_map_feature_flags_t l
, hb_ot_map_feature_flags_t r
)
168 { return hb_ot_map_feature_flags_t ((unsigned int) l
& (unsigned int) r
); }
169 inline hb_ot_map_feature_flags_t
170 operator ~ (hb_ot_map_feature_flags_t r
)
171 { return hb_ot_map_feature_flags_t (~(unsigned int) r
); }
172 inline hb_ot_map_feature_flags_t
&
173 operator |= (hb_ot_map_feature_flags_t
&l
, hb_ot_map_feature_flags_t r
)
174 { l
= l
| r
; return l
; }
175 inline hb_ot_map_feature_flags_t
&
176 operator &= (hb_ot_map_feature_flags_t
& l
, hb_ot_map_feature_flags_t r
)
177 { l
= l
& r
; return l
; }
180 struct hb_ot_map_builder_t
184 HB_INTERNAL
hb_ot_map_builder_t (hb_face_t
*face_
,
185 const hb_segment_properties_t
*props_
);
187 HB_INTERNAL
void add_feature (hb_tag_t tag
, unsigned int value
,
188 hb_ot_map_feature_flags_t flags
);
190 inline void add_global_bool_feature (hb_tag_t tag
)
191 { add_feature (tag
, 1, F_GLOBAL
); }
193 inline void add_gsub_pause (hb_ot_map_t::pause_func_t pause_func
)
194 { add_pause (0, pause_func
); }
195 inline void add_gpos_pause (hb_ot_map_t::pause_func_t pause_func
)
196 { add_pause (1, pause_func
); }
198 HB_INTERNAL
void compile (struct hb_ot_map_t
&m
);
200 inline void finish (void) {
201 feature_infos
.finish ();
202 for (unsigned int table_index
= 0; table_index
< 2; table_index
++)
204 stages
[table_index
].finish ();
210 struct feature_info_t
{
212 unsigned int seq
; /* sequence#, used for stable sorting only */
213 unsigned int max_value
;
214 hb_ot_map_feature_flags_t flags
;
215 unsigned int default_value
; /* for non-global features, what should the unset glyphs take */
216 unsigned int stage
[2]; /* GSUB/GPOS */
218 static int cmp (const feature_info_t
*a
, const feature_info_t
*b
)
219 { return (a
->tag
!= b
->tag
) ? (a
->tag
< b
->tag
? -1 : 1) : (a
->seq
< b
->seq
? -1 : 1); }
222 struct stage_info_t
{
224 hb_ot_map_t::pause_func_t pause_func
;
227 HB_INTERNAL
void add_pause (unsigned int table_index
, hb_ot_map_t::pause_func_t pause_func
);
232 hb_segment_properties_t props
;
234 hb_tag_t chosen_script
[2];
235 bool found_script
[2];
236 unsigned int script_index
[2], language_index
[2];
240 unsigned int current_stage
[2]; /* GSUB/GPOS */
241 hb_prealloced_array_t
<feature_info_t
, 32> feature_infos
;
242 hb_prealloced_array_t
<stage_info_t
, 8> stages
[2]; /* GSUB/GPOS */
247 #endif /* HB_OT_MAP_PRIVATE_HH */