2 # Secret Labs' Regular Expression Engine
4 # convert template to internal format
6 # Copyright (c) 1997-2001 by Secret Labs AB. All rights reserved.
8 # See the sre.py file for information on usage and redistribution.
13 from sre_constants
import *
15 assert _sre
.MAGIC
== MAGIC
, "SRE module mismatch"
19 def _compile(code
, pattern
, flags
):
20 # internal: compile a (sub)pattern
22 for op
, av
in pattern
:
23 if op
in (LITERAL
, NOT_LITERAL
):
24 if flags
& SRE_FLAG_IGNORECASE
:
25 emit(OPCODES
[OP_IGNORE
[op
]])
26 emit(_sre
.getlower(av
, flags
))
31 if flags
& SRE_FLAG_IGNORECASE
:
32 emit(OPCODES
[OP_IGNORE
[op
]])
33 def fixup(literal
, flags
=flags
):
34 return _sre
.getlower(literal
, flags
)
38 skip
= len(code
); emit(0)
39 _compile_charset(av
, flags
, code
, fixup
)
40 code
[skip
] = len(code
) - skip
42 if flags
& SRE_FLAG_DOTALL
:
43 emit(OPCODES
[ANY_ALL
])
46 elif op
in (REPEAT
, MIN_REPEAT
, MAX_REPEAT
):
47 if flags
& SRE_FLAG_TEMPLATE
:
48 raise error
, "internal: unsupported template operator"
50 skip
= len(code
); emit(0)
53 _compile(code
, av
[2], flags
)
54 emit(OPCODES
[SUCCESS
])
55 code
[skip
] = len(code
) - skip
56 elif _simple(av
) and op
== MAX_REPEAT
:
57 emit(OPCODES
[REPEAT_ONE
])
58 skip
= len(code
); emit(0)
61 _compile(code
, av
[2], flags
)
62 emit(OPCODES
[SUCCESS
])
63 code
[skip
] = len(code
) - skip
66 skip
= len(code
); emit(0)
69 _compile(code
, av
[2], flags
)
70 code
[skip
] = len(code
) - skip
72 emit(OPCODES
[MAX_UNTIL
])
74 emit(OPCODES
[MIN_UNTIL
])
75 elif op
is SUBPATTERN
:
79 # _compile_info(code, av[1], flags)
80 _compile(code
, av
[1], flags
)
84 elif op
in (SUCCESS
, FAILURE
):
86 elif op
in (ASSERT
, ASSERT_NOT
):
88 skip
= len(code
); emit(0)
92 lo
, hi
= av
[1].getwidth()
94 raise error
, "look-behind requires fixed-width pattern"
95 emit(lo
) # look behind
96 _compile(code
, av
[1], flags
)
97 emit(OPCODES
[SUCCESS
])
98 code
[skip
] = len(code
) - skip
101 skip
= len(code
); emit(0)
102 _compile(code
, av
, flags
)
103 emit(OPCODES
[SUCCESS
])
104 code
[skip
] = len(code
) - skip
107 if flags
& SRE_FLAG_MULTILINE
:
108 av
= AT_MULTILINE
.get(av
, av
)
109 if flags
& SRE_FLAG_LOCALE
:
110 av
= AT_LOCALE
.get(av
, av
)
111 elif flags
& SRE_FLAG_UNICODE
:
112 av
= AT_UNICODE
.get(av
, av
)
118 skip
= len(code
); emit(0)
119 # _compile_info(code, av, flags)
120 _compile(code
, av
, flags
)
122 tail
.append(len(code
)); emit(0)
123 code
[skip
] = len(code
) - skip
124 emit(0) # end of branch
126 code
[tail
] = len(code
) - tail
129 if flags
& SRE_FLAG_LOCALE
:
131 elif flags
& SRE_FLAG_UNICODE
:
135 if flags
& SRE_FLAG_IGNORECASE
:
136 emit(OPCODES
[OP_IGNORE
[op
]])
141 raise ValueError, ("unsupported operand type", op
)
143 def _compile_charset(charset
, flags
, code
, fixup
=None):
144 # compile charset subprogram
148 for op
, av
in _optimize_charset(charset
, fixup
):
159 elif op
is BIGCHARSET
:
162 if flags
& SRE_FLAG_LOCALE
:
163 emit(CHCODES
[CH_LOCALE
[av
]])
164 elif flags
& SRE_FLAG_UNICODE
:
165 emit(CHCODES
[CH_UNICODE
[av
]])
169 raise error
, "internal: unsupported set operator"
170 emit(OPCODES
[FAILURE
])
172 def _optimize_charset(charset
, fixup
):
173 # internal: optimize character set
177 for op
, av
in charset
:
181 charmap
[fixup(av
)] = 1
183 for i
in range(fixup(av
[0]), fixup(av
[1])+1):
186 # XXX: could append to charmap tail
187 return charset
# cannot compress
189 # character set contains unicode characters
190 return _optimize_unicode(charset
, fixup
)
191 # compress character map
209 out
.append((LITERAL
, p
))
211 out
.append((RANGE
, (p
, p
+n
-1)))
212 if len(out
) < len(charset
):
216 data
= _mk_bitmap(charmap
)
217 out
.append((CHARSET
, data
))
221 def _mk_bitmap(bits
):
233 # To represent a big charset, first a bitmap of all characters in the
234 # set is constructed. Then, this bitmap is sliced into chunks of 256
235 # characters, duplicate chunks are eliminitated, and each chunk is
236 # given a number. In the compiled expression, the charset is
237 # represented by a 16-bit word sequence, consisting of one word for
238 # the number of different chunks, a sequence of 256 bytes (128 words)
239 # of chunk numbers indexed by their original chunk position, and a
240 # sequence of chunks (16 words each).
242 # Compression is normally good: in a typical charset, large ranges of
243 # Unicode will be either completely excluded (e.g. if only cyrillic
244 # letters are to be matched), or completely included (e.g. if large
245 # subranges of Kanji match). These ranges will be represented by
246 # chunks of all one-bits or all zero-bits.
248 # Matching can be also done efficiently: the more significant byte of
249 # the Unicode character is an index into the chunk number, and the
250 # less significant byte is a bit index in the chunk (just like the
253 def _optimize_unicode(charset
, fixup
):
256 for op
, av
in charset
:
260 charmap
[fixup(av
)] = 1
262 for i
in range(fixup(av
[0]), fixup(av
[1])+1):
265 # XXX: could expand category
266 return charset
# cannot compress
268 for i
in range(65536):
269 charmap
[i
] = not charmap
[i
]
275 chunk
= tuple(charmap
[i
*256:(i
+1)*256])
276 new
= comps
.setdefault(chunk
, block
)
280 data
+= _mk_bitmap(chunk
)
282 assert MAXCODE
== 65535
284 header
.append(mapping
[2*i
]+256*mapping
[2*i
+1])
286 return [(BIGCHARSET
, data
)]
289 # check if av is a "simple" operator
290 lo
, hi
= av
[2].getwidth()
291 if lo
== 0 and hi
== MAXREPEAT
:
292 raise error
, "nothing to repeat"
293 return lo
== hi
== 1 and av
[2][0][0] != SUBPATTERN
295 def _compile_info(code
, pattern
, flags
):
296 # internal: compile an info block. in the current version,
297 # this contains min/max pattern width, and an optional literal
298 # prefix or a character map
299 lo
, hi
= pattern
.getwidth()
301 return # not worth it
302 # look for a literal prefix
305 charset
= [] # not used
306 if not (flags
& SRE_FLAG_IGNORECASE
):
307 # look for literal prefix
308 for op
, av
in pattern
.data
:
310 if len(prefix
) == prefix_skip
:
311 prefix_skip
= prefix_skip
+ 1
313 elif op
is SUBPATTERN
and len(av
[1]) == 1:
321 # if no prefix, look for charset prefix
322 if not prefix
and pattern
.data
:
323 op
, av
= pattern
.data
[0]
324 if op
is SUBPATTERN
and av
[1]:
327 charset
.append((op
, av
))
355 ## print "*** PREFIX", prefix, prefix_skip
357 ## print "*** CHARSET", charset
361 skip
= len(code
); emit(0)
365 mask
= SRE_INFO_PREFIX
366 if len(prefix
) == prefix_skip
== len(pattern
.data
):
367 mask
= mask
+ SRE_INFO_LITERAL
369 mask
= mask
+ SRE_INFO_CHARSET
376 prefix
= prefix
[:MAXCODE
]
383 emit(len(prefix
)) # length
384 emit(prefix_skip
) # skip
386 # generate overlap table
387 table
= [-1] + ([0]*len(prefix
))
388 for i
in range(len(prefix
)):
389 table
[i
+1] = table
[i
]+1
390 while table
[i
+1] > 0 and prefix
[i
] != prefix
[table
[i
+1]-1]:
391 table
[i
+1] = table
[table
[i
+1]-1]+1
392 code
.extend(table
[1:]) # don't store first entry
394 _compile_charset(charset
, 0, code
)
395 code
[skip
] = len(code
) - skip
397 STRING_TYPES
= [type("")]
400 STRING_TYPES
.append(type(unicode("")))
406 flags
= p
.pattern
.flags | flags
410 _compile_info(code
, p
, flags
)
412 # compile the pattern
413 _compile(code
, p
.data
, flags
)
415 code
.append(OPCODES
[SUCCESS
])
419 def compile(p
, flags
=0):
420 # internal: convert pattern list to internal format
422 if type(p
) in STRING_TYPES
:
425 p
= sre_parse
.parse(p
, flags
)
429 code
= _code(p
, flags
)
433 # XXX: <fl> get rid of this limitation!
434 assert p
.pattern
.groups
<= 100,\
435 "sorry, but this version only supports 100 named groups"
437 # map in either direction
438 groupindex
= p
.pattern
.groupdict
439 indexgroup
= [None] * p
.pattern
.groups
440 for k
, i
in groupindex
.items():
444 pattern
, flags
, code
,
446 groupindex
, indexgroup