2 # -*- coding: utf-8 -*-
4 # This script builds unaccent.rules on standard output when given the
5 # contents of UnicodeData.txt [1] and Latin-ASCII.xml [2] given as
6 # arguments. Optionally includes ligature expansion and Unicode CLDR
7 # Latin-ASCII transliterator, enabled by default, this can be disabled
8 # with "--no-ligatures-expansion" command line option.
10 # The approach is to use the Unicode decomposition data to identify
11 # precomposed codepoints that are equivalent to a ligature of several
12 # letters, or a base letter with any number of diacritical marks.
14 # This approach handles most letters with diacritical marks and some
15 # ligatures. However, several characters (notably a majority of
16 # ligatures) don't have decomposition. To handle all these cases, one can
17 # use a standard Unicode transliterator available in Common Locale Data
18 # Repository (CLDR): Latin-ASCII. This transliterator associates Unicode
19 # characters to ASCII-range equivalent. Unless "--no-ligatures-expansion"
20 # option is enabled, the XML file of this transliterator [2] -- given as a
21 # command line argument -- will be parsed and used.
23 # Ideally you should use the latest release for each data set. This
24 # script is compatible with at least CLDR release 29.
26 # [1] https://www.unicode.org/Public/${UNICODE_VERSION}/ucd/UnicodeData.txt
27 # [2] https://raw.githubusercontent.com/unicode-org/cldr/${TAG}/common/transforms/Latin-ASCII.xml
33 import xml
.etree
.ElementTree
as ET
35 sys
.stdout
= codecs
.getwriter('utf8')(sys
.stdout
.buffer)
37 # The ranges of Unicode characters that we consider to be "plain letters".
38 # For now we are being conservative by including only Latin and Greek. This
39 # could be extended in future based on feedback from people with relevant
41 PLAIN_LETTER_RANGES
= ((ord('a'), ord('z')), # Latin lower case
42 (ord('A'), ord('Z')), # Latin upper case
43 (0x03b1, 0x03c9), # GREEK SMALL LETTER ALPHA, GREEK SMALL LETTER OMEGA
44 (0x0391, 0x03a9)) # GREEK CAPITAL LETTER ALPHA, GREEK CAPITAL LETTER OMEGA
46 # Combining marks follow a "base" character, and result in a composite
47 # character. Example: "U&'A\0300'"produces "À".There are three types of
48 # combining marks: enclosing (Me), non-spacing combining (Mn), spacing
49 # combining (Mc). We identify the ranges of marks we feel safe removing.
51 # https://en.wikipedia.org/wiki/Combining_character
52 # https://www.unicode.org/charts/PDF/U0300.pdf
53 # https://www.unicode.org/charts/PDF/U20D0.pdf
54 COMBINING_MARK_RANGES
= ((0x0300, 0x0362), # Mn: Accents, IPA
55 (0x20dd, 0x20E0), # Me: Symbols
56 (0x20e2, 0x20e4),) # Me: Screen, keycap, triangle
59 def print_record(codepoint
, letter
):
61 # If the letter has whitespace or double quotes, escape double
62 # quotes and apply more quotes around it.
63 if (' ' in letter
) or ('"' in letter
):
64 letter
= '"' + letter
.replace('"', '""') + '"'
65 output
= chr(codepoint
) + "\t" + letter
67 output
= chr(codepoint
)
73 def __init__(self
, id, general_category
, combining_ids
):
75 self
.general_category
= general_category
76 self
.combining_ids
= combining_ids
79 def is_mark_to_remove(codepoint
):
80 """Return true if this is a combining mark to remove."""
81 if not is_mark(codepoint
):
84 for begin
, end
in COMBINING_MARK_RANGES
:
85 if codepoint
.id >= begin
and codepoint
.id <= end
:
90 def is_plain_letter(codepoint
):
91 """Return true if codepoint represents a "plain letter"."""
92 for begin
, end
in PLAIN_LETTER_RANGES
:
93 if codepoint
.id >= begin
and codepoint
.id <= end
:
98 def is_mark(codepoint
):
99 """Returns true for diacritical marks (combining codepoints)."""
100 return codepoint
.general_category
in ("Mn", "Me", "Mc")
103 def is_letter_with_marks(codepoint
, table
):
104 """Returns true for letters combined with one or more marks."""
105 # See https://www.unicode.org/reports/tr44/tr44-14.html#General_Category_Values
107 # Letter may have no combining characters, in which case it has
109 if len(codepoint
.combining_ids
) == 1:
112 # A letter without diacritical marks has none of them.
113 if any(is_mark(table
[i
]) for i
in codepoint
.combining_ids
[1:]) is False:
116 # Check if the base letter of this letter has marks.
117 codepoint_base
= codepoint
.combining_ids
[0]
118 if is_plain_letter(table
[codepoint_base
]) is False and \
119 is_letter_with_marks(table
[codepoint_base
], table
) is False:
125 def is_letter(codepoint
, table
):
126 """Return true for letter with or without diacritical marks."""
127 return is_plain_letter(codepoint
) or is_letter_with_marks(codepoint
, table
)
130 def get_plain_letter(codepoint
, table
):
131 """Return the base codepoint without marks. If this codepoint has more
132 than one combining character, do a recursive lookup on the table to
133 find out its plain base letter."""
134 if is_letter_with_marks(codepoint
, table
):
135 if len(table
[codepoint
.combining_ids
[0]].combining_ids
) > 1:
136 return get_plain_letter(table
[codepoint
.combining_ids
[0]], table
)
137 elif is_plain_letter(table
[codepoint
.combining_ids
[0]]):
138 return table
[codepoint
.combining_ids
[0]]
140 # Should not come here
141 assert False, 'Codepoint U+%0.2X' % codepoint
.id
142 elif is_plain_letter(codepoint
):
145 # Should not come here
146 assert False, 'Codepoint U+%0.2X' % codepoint
.id
149 def is_ligature(codepoint
, table
):
150 """Return true for letters combined with letters."""
151 return all(is_letter(table
[i
], table
) for i
in codepoint
.combining_ids
)
154 def get_plain_letters(codepoint
, table
):
155 """Return a list of plain letters from a ligature."""
156 assert(is_ligature(codepoint
, table
))
157 return [get_plain_letter(table
[id], table
) for id in codepoint
.combining_ids
]
160 def parse_cldr_latin_ascii_transliterator(latinAsciiFilePath
):
161 """Parse the XML file and return a set of tuples (src, trg), where "src"
162 is the original character and "trg" the substitute."""
163 charactersSet
= set()
165 # RegEx to parse rules
166 rulePattern
= re
.compile(r
'^(?:(.)|(\\u[0-9a-fA-F]{4})) \u2192 (?:\'(.+)\'|
(.+)) ;')
168 # construct tree from XML
169 transliterationTree = ET.parse(latinAsciiFilePath)
170 transliterationTreeRoot = transliterationTree.getroot()
172 # Fetch all the transliteration rules. Since release 29 of Latin-ASCII.xml
173 # all the transliteration rules are located in a single tRule block with
174 # all rules separated into separate lines.
175 blockRules = transliterationTreeRoot.findall("./transforms/transform/tRule")
176 assert(len(blockRules) == 1)
178 # Split the block of rules into one element per line.
179 rules = blockRules[0].text.splitlines()
181 # And finish the processing of each individual rule.
183 matches = rulePattern.search(rule)
185 # The regular expression capture four groups corresponding
188 # Group 1: plain "src" char. Empty if group 2 is not.
189 # Group 2: unicode-escaped "src" char (e.g. "\u0110"). Empty if group 1 is not.
191 # Group 3: plain "trg" char. Empty if group 4 is not.
192 # Group 4: plain "trg" char between quotes. Empty if group 3 is not.
193 if matches is not None:
194 src = matches.group(1) if matches.group(1) is not None else bytes(matches.group(2), 'UTF
-8').decode('unicode-escape
')
195 trg = matches.group(3) if matches.group(3) is not None else matches.group(4)
197 # "'" and """ are escaped
198 trg = trg.replace("\\'", "'").replace('\\"', '"')
200 # the parser of unaccent only accepts non-whitespace characters
201 # for "src
" and "trg
" (see unaccent.c)
202 if not src.isspace() and not trg.isspace():
203 charactersSet.add((ord(src), trg))
209 """Returns the special cases which are not handled by other methods"""
210 charactersSet = set()
213 charactersSet.add((0x0401, "\u0415
")) # CYRILLIC CAPITAL LETTER IO
214 charactersSet.add((0x0451, "\u0435
")) # CYRILLIC SMALL LETTER IO
216 # Symbols of "Letterlike Symbols
" Unicode Block (U+2100 to U+214F)
217 charactersSet.add((0x2103, "\xb0C
")) # DEGREE CELSIUS
218 charactersSet.add((0x2109, "\xb0F
")) # DEGREE FAHRENHEIT
224 # https://www.unicode.org/reports/tr44/tr44-14.html#Character_Decomposition_Mappings
225 decomposition_type_pattern = re.compile(" *<[^
>]*> *")
230 # unordered set for ensure uniqueness
231 charactersSet = set()
233 # read file UnicodeData.txt
235 args.unicodeDataFilePath, mode='r', encoding='UTF-8',
236 ) as unicodeDataFile:
237 # read everything we need into memory
238 for line in unicodeDataFile:
239 fields = line.split(";")
241 # https://www.unicode.org/reports/tr44/tr44-14.html#UnicodeData.txt
242 general_category = fields[2]
243 decomposition = fields[5]
244 decomposition = re.sub(decomposition_type_pattern, ' ', decomposition)
245 id = int(fields[0], 16)
246 combining_ids = [int(s, 16) for s in decomposition.split(" ") if s != ""]
247 codepoint = Codepoint(id, general_category, combining_ids)
248 table[id] = codepoint
249 all.append(codepoint)
251 # walk through all the codepoints looking for interesting mappings
252 for codepoint in all:
253 if codepoint.general_category.startswith('L') and \
254 len(codepoint.combining_ids) > 1:
255 if is_letter_with_marks(codepoint, table):
256 charactersSet.add((codepoint.id,
257 chr(get_plain_letter(codepoint, table).id)))
258 elif args.noLigaturesExpansion is False and is_ligature(codepoint, table):
259 charactersSet.add((codepoint.id,
260 "".join(chr(combining_codepoint.id)
261 for combining_codepoint
262 in get_plain_letters(codepoint, table))))
263 elif is_mark_to_remove(codepoint):
264 charactersSet.add((codepoint.id, None))
266 # add CLDR Latin-ASCII characters
267 if not args.noLigaturesExpansion:
268 charactersSet |= parse_cldr_latin_ascii_transliterator(args.latinAsciiFilePath)
269 charactersSet |= special_cases()
271 # sort for more convenient display
272 charactersList = sorted(charactersSet, key=lambda characterPair: characterPair[0])
274 for characterPair in charactersList:
275 print_record(characterPair[0], characterPair[1])
278 if __name__ == "__main__
":
279 parser = argparse.ArgumentParser(description='This script builds unaccent.rules on standard output when given the contents of UnicodeData.txt and Latin-ASCII.xml given as arguments.')
280 parser.add_argument("--unicode-data
-file", help="Path to formatted text
file corresponding to UnicodeData
.txt
.", type=str, required=True, dest='unicodeDataFilePath')
281 parser.add_argument("--latin
-ascii
-file", help="Path to XML
file from Unicode Common Locale Data
Repository (CLDR
) corresponding to Latin
-ASCII
transliterator (Latin
-ASCII
.xml
).", type=str, dest='latinAsciiFilePath')
282 parser.add_argument("--no
-ligatures
-expansion
", help="Do
not expand ligatures
and do
not use Unicode CLDR Latin
-ASCII transliterator
. By default
, this option
is not enabled
and \"--latin
-ascii
-file\" argument
is required
. If this option
is enabled
, \"--latin
-ascii
-file\" argument
is optional
and ignored
.", action="store_true
", dest='noLigaturesExpansion')
283 args = parser.parse_args()
285 if args.noLigaturesExpansion is False and args.latinAsciiFilePath is None:
286 sys.stderr.write('You must specify the path to Latin-ASCII transliterator file with \"--latin-ascii-file\" option or use \"--no-ligatures-expansion\" option. Use \"-h\" option for help.')