6 cython
.declare(sys
=object, os
=object, copy
=object,
7 Builtin
=object, error
=object, warning
=object, Naming
=object, PyrexTypes
=object,
8 py_object_type
=object, ModuleScope
=object, LocalScope
=object, ClosureScope
=object,
9 StructOrUnionScope
=object, PyClassScope
=object,
10 CppClassScope
=object, UtilityCode
=object, EncodedString
=object,
11 absolute_path_length
=cython
.Py_ssize_t
)
14 from itertools
import chain
17 from Errors
import error
, warning
, InternalError
, CompileError
21 from PyrexTypes
import py_object_type
, error_type
22 from Symtab
import (ModuleScope
, LocalScope
, ClosureScope
,
23 StructOrUnionScope
, PyClassScope
, CppClassScope
, TemplateScope
)
24 from Code
import UtilityCode
25 from StringEncoding
import EncodedString
, escape_byte_string
, split_string_literal
28 from Cython
.Utils
import cached_function
30 absolute_path_length
= 0
32 def relative_position(pos
):
34 We embed the relative filename in the generated C file, since we
35 don't want to have to regenerate and compile all the source code
36 whenever the Python install directory moves (which could happen,
37 e.g,. when distributing binaries.)
40 a position tuple -- (absolute filename, line number column position)
48 global absolute_path_length
49 if absolute_path_length
==0:
50 absolute_path_length
= len(os
.path
.abspath(os
.getcwd()))
51 return (pos
[0].get_filenametable_entry()[absolute_path_length
+1:], pos
[1])
53 def embed_position(pos
, docstring
):
54 if not Options
.embed_pos_in_docstring
:
56 pos_line
= u
'File: %s (starting at line %s)' % relative_position(pos
)
59 return EncodedString(pos_line
)
61 # make sure we can encode the filename in the docstring encoding
62 # otherwise make the docstring a unicode string
63 encoding
= docstring
.encoding
64 if encoding
is not None:
66 pos_line
.encode(encoding
)
67 except UnicodeEncodeError:
71 # reuse the string encoding of the original docstring
72 doc
= EncodedString(pos_line
)
74 doc
= EncodedString(pos_line
+ u
'\n' + docstring
)
75 doc
.encoding
= encoding
79 def write_func_call(func
, codewriter_class
):
81 if len(args
) > 1 and isinstance(args
[1], codewriter_class
):
82 # here we annotate the code with this function call
83 # but only if new code is generated
85 marker
= ' /* %s -> %s.%s %s */' % (
86 ' ' * code
.call_level
,
87 node
.__class
__.__name
__,
90 pristine
= code
.buffer.stream
.tell()
92 start
= code
.buffer.stream
.tell()
94 res
= func(*args
, **kwds
)
96 if start
== code
.buffer.stream
.tell():
97 code
.buffer.stream
.seek(pristine
)
99 marker
= marker
.replace('->', '<-')
103 return func(*args
, **kwds
)
106 class VerboseCodeWriter(type):
107 # Set this as a metaclass to trace function calls in code.
108 # This slows down code generation and makes much larger files.
109 def __new__(cls
, name
, bases
, attrs
):
110 from types
import FunctionType
111 from Code
import CCodeWriter
113 for mname
, m
in attrs
.items():
114 if isinstance(m
, FunctionType
):
115 attrs
[mname
] = write_func_call(m
, CCodeWriter
)
116 return super(VerboseCodeWriter
, cls
).__new
__(cls
, name
, bases
, attrs
)
119 class CheckAnalysers(type):
120 """Metaclass to check that type analysis functions return a node.
122 methods
= set(['analyse_types',
123 'analyse_expressions',
124 'analyse_target_types'])
126 def __new__(cls
, name
, bases
, attrs
):
127 from types
import FunctionType
128 def check(name
, func
):
129 def call(*args
, **kwargs
):
130 retval
= func(*args
, **kwargs
)
132 print name
, args
, kwargs
137 for mname
, m
in attrs
.items():
138 if isinstance(m
, FunctionType
) and mname
in cls
.methods
:
139 attrs
[mname
] = check(mname
, m
)
140 return super(CheckAnalysers
, cls
).__new
__(cls
, name
, bases
, attrs
)
144 # pos (string, int, int) Source file position
145 # is_name boolean Is a NameNode
146 # is_literal boolean Is a ConstNode
148 #__metaclass__ = CheckAnalysers
149 if DebugFlags
.debug_trace_code_generation
:
150 __metaclass__
= VerboseCodeWriter
159 # All descendants should set child_attrs to a list of the attributes
160 # containing nodes considered "children" in the tree. Each such attribute
161 # can either contain a single node or a list of nodes. See Visitor.py.
166 # This may be an additional (or 'actual') type that will be checked when
167 # this node is coerced to another type. This could be useful to set when
168 # the actual type to which it can coerce is known, but you want to leave
169 # the type a py_object_type
172 def __init__(self
, pos
, **kw
):
174 self
.__dict
__.update(kw
)
176 gil_message
= "Operation"
180 def gil_error(self
, env
=None):
181 error(self
.pos
, "%s not allowed without gil" % self
.gil_message
)
183 cpp_message
= "Operation"
185 def cpp_check(self
, env
):
190 error(self
.pos
, "%s only allowed in c++" % self
.cpp_message
)
192 def clone_node(self
):
193 """Clone the node. This is defined as a shallow copy, except for member lists
194 amongst the child attributes (from get_child_accessors) which are also
195 copied. Lists containing child nodes are thus seen as a way for the node
196 to hold multiple children directly; the list is not treated as a separate
197 level in the tree."""
198 result
= copy
.copy(self
)
199 for attrname
in result
.child_attrs
:
200 value
= getattr(result
, attrname
)
201 if isinstance(value
, list):
202 setattr(result
, attrname
, [x
for x
in value
])
207 # There are 3 phases of parse tree processing, applied in order to
208 # all the statements in a given scope-block:
210 # (0) analyse_declarations
211 # Make symbol table entries for all declarations at the current
212 # level, both explicit (def, cdef, etc.) and implicit (assignment
213 # to an otherwise undeclared name).
215 # (1) analyse_expressions
216 # Determine the result types of expressions and fill in the
217 # 'type' attribute of each ExprNode. Insert coercion nodes into the
218 # tree where needed to convert to and from Python objects.
219 # Allocate temporary locals for intermediate results. Fill
220 # in the 'result_code' attribute of each ExprNode with a C code
224 # Emit C code for all declarations, statements and expressions.
225 # Recursively applies the 3 processing phases to the bodies of
229 def analyse_declarations(self
, env
):
232 def analyse_expressions(self
, env
):
233 raise InternalError("analyse_expressions not implemented for %s" % \
234 self
.__class
__.__name
__)
236 def generate_code(self
, code
):
237 raise InternalError("generate_code not implemented for %s" % \
238 self
.__class
__.__name
__)
240 def annotate(self
, code
):
241 # mro does the wrong thing
242 if isinstance(self
, BlockNode
):
243 self
.body
.annotate(code
)
248 except AttributeError:
250 if not self
.child_attrs
:
253 for attr
in self
.child_attrs
:
254 child
= getattr(self
, attr
)
255 # Sometimes lists, sometimes nodes
258 elif isinstance(child
, list):
260 pos
= max(pos
, c
.end_pos())
262 pos
= max(pos
, child
.end_pos())
266 def dump(self
, level
=0, filter_out
=("pos",), cutoff
=100, encountered
=None):
267 """Debug helper method that returns a recursive string representation of this node.
270 return "<...nesting level cutoff...>"
271 if encountered
is None:
273 if id(self
) in encountered
:
274 return "<%s (0x%x) -- already output>" % (self
.__class
__.__name
__, id(self
))
275 encountered
.add(id(self
))
277 def dump_child(x
, level
):
278 if isinstance(x
, Node
):
279 return x
.dump(level
, filter_out
, cutoff
-1, encountered
)
280 elif isinstance(x
, list):
281 return "[%s]" % ", ".join([dump_child(item
, level
) for item
in x
])
286 attrs
= [(key
, value
) for key
, value
in self
.__dict
__.items() if key
not in filter_out
]
288 return "<%s (0x%x)>" % (self
.__class
__.__name
__, id(self
))
291 res
= "<%s (0x%x)\n" % (self
.__class
__.__name
__, id(self
))
292 for key
, value
in attrs
:
293 res
+= "%s %s: %s\n" % (indent
, key
, dump_child(value
, level
+ 1))
294 res
+= "%s>" % indent
297 def dump_pos(self
, mark_column
=False, marker
='(#)'):
298 """Debug helper method that returns the source code context of this node as a string.
302 source_desc
, line
, col
= self
.pos
303 contents
= source_desc
.get_lines(encoding
='ASCII',
304 error_handling
='ignore')
305 # line numbers start at 1
306 lines
= contents
[max(0,line
-3):line
]
309 current
= current
[:col
] + marker
+ current
[col
:]
310 lines
[-1] = current
.rstrip() + u
' # <<<<<<<<<<<<<<\n'
311 lines
+= contents
[line
:line
+2]
312 return u
'"%s":%d:%d\n%s\n' % (
313 source_desc
.get_escaped_description(), line
, col
, u
''.join(lines
))
315 class CompilerDirectivesNode(Node
):
317 Sets compiler directives for the children nodes
319 # directives {string:value} A dictionary holding the right value for
320 # *all* possible directives.
322 child_attrs
= ["body"]
324 def analyse_declarations(self
, env
):
326 env
.directives
= self
.directives
327 self
.body
.analyse_declarations(env
)
330 def analyse_expressions(self
, env
):
332 env
.directives
= self
.directives
333 self
.body
= self
.body
.analyse_expressions(env
)
337 def generate_function_definitions(self
, env
, code
):
338 env_old
= env
.directives
339 code_old
= code
.globalstate
.directives
340 code
.globalstate
.directives
= self
.directives
341 self
.body
.generate_function_definitions(env
, code
)
342 env
.directives
= env_old
343 code
.globalstate
.directives
= code_old
345 def generate_execution_code(self
, code
):
346 old
= code
.globalstate
.directives
347 code
.globalstate
.directives
= self
.directives
348 self
.body
.generate_execution_code(code
)
349 code
.globalstate
.directives
= old
351 def annotate(self
, code
):
352 old
= code
.globalstate
.directives
353 code
.globalstate
.directives
= self
.directives
354 self
.body
.annotate(code
)
355 code
.globalstate
.directives
= old
357 class BlockNode(object):
358 # Mixin class for nodes representing a declaration block.
360 def generate_cached_builtins_decls(self
, env
, code
):
361 entries
= env
.global_scope().undeclared_cached_builtins
362 for entry
in entries
:
363 code
.globalstate
.add_cached_builtin_decl(entry
)
366 def generate_lambda_definitions(self
, env
, code
):
367 for node
in env
.lambda_defs
:
368 node
.generate_function_definitions(env
, code
)
370 class StatListNode(Node
):
371 # stats a list of StatNode
373 child_attrs
= ["stats"]
375 def create_analysed(pos
, env
, *args
, **kw
):
376 node
= StatListNode(pos
, *args
, **kw
)
377 return node
# No node-specific analysis necesarry
378 create_analysed
= staticmethod(create_analysed
)
380 def analyse_declarations(self
, env
):
381 #print "StatListNode.analyse_declarations" ###
382 for stat
in self
.stats
:
383 stat
.analyse_declarations(env
)
385 def analyse_expressions(self
, env
):
386 #print "StatListNode.analyse_expressions" ###
387 self
.stats
= [ stat
.analyse_expressions(env
)
388 for stat
in self
.stats
]
391 def generate_function_definitions(self
, env
, code
):
392 #print "StatListNode.generate_function_definitions" ###
393 for stat
in self
.stats
:
394 stat
.generate_function_definitions(env
, code
)
396 def generate_execution_code(self
, code
):
397 #print "StatListNode.generate_execution_code" ###
398 for stat
in self
.stats
:
399 code
.mark_pos(stat
.pos
)
400 stat
.generate_execution_code(code
)
402 def annotate(self
, code
):
403 for stat
in self
.stats
:
407 class StatNode(Node
):
409 # Code generation for statements is split into the following subphases:
411 # (1) generate_function_definitions
412 # Emit C code for the definitions of any structs,
413 # unions, enums and functions defined in the current
416 # (2) generate_execution_code
417 # Emit C code for executable statements.
420 def generate_function_definitions(self
, env
, code
):
423 def generate_execution_code(self
, code
):
424 raise InternalError("generate_execution_code not implemented for %s" % \
425 self
.__class
__.__name
__)
428 class CDefExternNode(StatNode
):
429 # include_file string or None
432 child_attrs
= ["body"]
434 def analyse_declarations(self
, env
):
435 if self
.include_file
:
436 env
.add_include_file(self
.include_file
)
437 old_cinclude_flag
= env
.in_cinclude
439 self
.body
.analyse_declarations(env
)
440 env
.in_cinclude
= old_cinclude_flag
442 def analyse_expressions(self
, env
):
445 def generate_execution_code(self
, code
):
448 def annotate(self
, code
):
449 self
.body
.annotate(code
)
452 class CDeclaratorNode(Node
):
453 # Part of a C declaration.
455 # Processing during analyse_declarations phase:
458 # Returns (name, type) pair where name is the
459 # CNameDeclaratorNode of the name being declared
460 # and type is the type it is being declared as.
462 # calling_convention string Calling convention of CFuncDeclaratorNode
463 # for which this is a base
467 calling_convention
= ""
469 def analyse_templates(self
):
470 # Only C++ functions have templates.
473 class CNameDeclaratorNode(CDeclaratorNode
):
474 # name string The Cython name being declared
475 # cname string or None C name, if specified
476 # default ExprNode or None the value assigned on declaration
478 child_attrs
= ['default']
482 def analyse(self
, base_type
, env
, nonempty
= 0):
483 if nonempty
and self
.name
== '':
484 # May have mistaken the name for the type.
485 if base_type
.is_ptr
or base_type
.is_array
or base_type
.is_buffer
:
486 error(self
.pos
, "Missing argument name")
487 elif base_type
.is_void
:
488 error(self
.pos
, "Use spam() rather than spam(void) to declare a function with no arguments.")
490 self
.name
= base_type
.declaration_code("", for_display
=1, pyrex
=1)
491 base_type
= py_object_type
493 if base_type
.is_fused
and env
.fused_to_specific
:
494 base_type
= base_type
.specialize(env
.fused_to_specific
)
496 self
.type = base_type
497 return self
, base_type
499 class CPtrDeclaratorNode(CDeclaratorNode
):
500 # base CDeclaratorNode
502 child_attrs
= ["base"]
504 def analyse(self
, base_type
, env
, nonempty
= 0):
505 if base_type
.is_pyobject
:
507 "Pointer base type cannot be a Python object")
508 ptr_type
= PyrexTypes
.c_ptr_type(base_type
)
509 return self
.base
.analyse(ptr_type
, env
, nonempty
= nonempty
)
511 class CReferenceDeclaratorNode(CDeclaratorNode
):
512 # base CDeclaratorNode
514 child_attrs
= ["base"]
516 def analyse(self
, base_type
, env
, nonempty
= 0):
517 if base_type
.is_pyobject
:
519 "Reference base type cannot be a Python object")
520 ref_type
= PyrexTypes
.c_ref_type(base_type
)
521 return self
.base
.analyse(ref_type
, env
, nonempty
= nonempty
)
523 class CArrayDeclaratorNode(CDeclaratorNode
):
524 # base CDeclaratorNode
527 child_attrs
= ["base", "dimension"]
529 def analyse(self
, base_type
, env
, nonempty
= 0):
530 if base_type
.is_cpp_class
or base_type
.is_cfunction
:
531 from ExprNodes
import TupleNode
532 if isinstance(self
.dimension
, TupleNode
):
533 args
= self
.dimension
.args
535 args
= self
.dimension
,
536 values
= [v
.analyse_as_type(env
) for v
in args
]
538 ix
= values
.index(None)
539 error(args
[ix
].pos
, "Template parameter not a type")
540 base_type
= error_type
542 base_type
= base_type
.specialize_here(self
.pos
, values
)
543 return self
.base
.analyse(base_type
, env
, nonempty
= nonempty
)
545 self
.dimension
= self
.dimension
.analyse_const_expression(env
)
546 if not self
.dimension
.type.is_int
:
547 error(self
.dimension
.pos
, "Array dimension not integer")
548 size
= self
.dimension
.get_constant_c_result_code()
557 if not base_type
.is_complete():
559 "Array element type '%s' is incomplete" % base_type
)
560 if base_type
.is_pyobject
:
562 "Array element cannot be a Python object")
563 if base_type
.is_cfunction
:
565 "Array element cannot be a function")
566 array_type
= PyrexTypes
.c_array_type(base_type
, size
)
567 return self
.base
.analyse(array_type
, env
, nonempty
= nonempty
)
570 class CFuncDeclaratorNode(CDeclaratorNode
):
571 # base CDeclaratorNode
572 # args [CArgDeclNode]
573 # templates [TemplatePlaceholderType]
574 # has_varargs boolean
575 # exception_value ConstNode
576 # exception_check boolean True if PyErr_Occurred check needed
577 # nogil boolean Can be called without gil
578 # with_gil boolean Acquire gil around function body
579 # is_const_method boolean Whether this is a const method
581 child_attrs
= ["base", "args", "exception_value"]
584 optional_arg_count
= 0
588 def analyse_templates(self
):
589 if isinstance(self
.base
, CArrayDeclaratorNode
):
590 from ExprNodes
import TupleNode
, NameNode
591 template_node
= self
.base
.dimension
592 if isinstance(template_node
, TupleNode
):
593 template_nodes
= template_node
.args
594 elif isinstance(template_node
, NameNode
):
595 template_nodes
= [template_node
]
597 error(template_node
.pos
, "Template arguments must be a list of names")
600 for template
in template_nodes
:
601 if isinstance(template
, NameNode
):
602 self
.templates
.append(PyrexTypes
.TemplatePlaceholderType(template
.name
))
604 error(template
.pos
, "Template arguments must be a list of names")
605 self
.base
= self
.base
.base
606 return self
.templates
610 def analyse(self
, return_type
, env
, nonempty
= 0, directive_locals
= {}):
614 for i
, arg_node
in enumerate(self
.args
):
615 name_declarator
, type = arg_node
.analyse(env
, nonempty
= nonempty
,
616 is_self_arg
= (i
== 0 and env
.is_c_class_scope
))
617 name
= name_declarator
.name
618 if name
in directive_locals
:
619 type_node
= directive_locals
[name
]
620 other_type
= type_node
.analyse_as_type(env
)
621 if other_type
is None:
622 error(type_node
.pos
, "Not a type")
623 elif (type is not PyrexTypes
.py_object_type
624 and not type.same_as(other_type
)):
625 error(self
.base
.pos
, "Signature does not agree with previous declaration")
626 error(type_node
.pos
, "Previous declaration here")
629 if name_declarator
.cname
:
631 "Function argument cannot have C name specification")
632 if i
==0 and env
.is_c_class_scope
and type.is_unspecified
:
633 # fix the type of self
634 type = env
.parent_type
635 # Turn *[] argument into **
637 type = PyrexTypes
.c_ptr_type(type.base_type
)
638 # Catch attempted C-style func(void) decl
640 error(arg_node
.pos
, "Use spam() rather than spam(void) to declare a function with no arguments.")
641 func_type_args
.append(
642 PyrexTypes
.CFuncTypeArg(name
, type, arg_node
.pos
))
644 self
.optional_arg_count
+= 1
645 elif self
.optional_arg_count
:
646 error(self
.pos
, "Non-default argument follows default argument")
650 if self
.exception_check
== '+':
651 env
.add_include_file('ios') # for std::ios_base::failure
652 env
.add_include_file('new') # for std::bad_alloc
653 env
.add_include_file('stdexcept')
654 env
.add_include_file('typeinfo') # for std::bad_cast
655 if (return_type
.is_pyobject
656 and (self
.exception_value
or self
.exception_check
)
657 and self
.exception_check
!= '+'):
659 "Exception clause not allowed for function returning Python object")
661 if self
.exception_value
:
662 self
.exception_value
= self
.exception_value
.analyse_const_expression(env
)
663 if self
.exception_check
== '+':
664 exc_val_type
= self
.exception_value
.type
665 if (not exc_val_type
.is_error
666 and not exc_val_type
.is_pyobject
667 and not (exc_val_type
.is_cfunction
668 and not exc_val_type
.return_type
.is_pyobject
669 and not exc_val_type
.args
)):
670 error(self
.exception_value
.pos
,
671 "Exception value must be a Python exception or cdef function with no arguments.")
672 exc_val
= self
.exception_value
674 self
.exception_value
= self
.exception_value
.coerce_to(
675 return_type
, env
).analyse_const_expression(env
)
676 exc_val
= self
.exception_value
.get_constant_c_result_code()
679 "get_constant_c_result_code not implemented for %s" %
680 self
.exception_value
.__class
__.__name
__)
681 if not return_type
.assignable_from(self
.exception_value
.type):
682 error(self
.exception_value
.pos
,
683 "Exception value incompatible with function return type")
684 exc_check
= self
.exception_check
685 if return_type
.is_cfunction
:
687 "Function cannot return a function")
688 func_type
= PyrexTypes
.CFuncType(
689 return_type
, func_type_args
, self
.has_varargs
,
690 optional_arg_count
= self
.optional_arg_count
,
691 exception_value
= exc_val
, exception_check
= exc_check
,
692 calling_convention
= self
.base
.calling_convention
,
693 nogil
= self
.nogil
, with_gil
= self
.with_gil
, is_overridable
= self
.overridable
,
694 is_const_method
= self
.is_const_method
,
695 templates
= self
.templates
)
697 if self
.optional_arg_count
:
698 if func_type
.is_fused
:
699 # This is a bit of a hack... When we need to create specialized CFuncTypes
700 # on the fly because the cdef is defined in a pxd, we need to declare the specialized optional arg
702 def declare_opt_arg_struct(func_type
, fused_cname
):
703 self
.declare_optional_arg_struct(func_type
, env
, fused_cname
)
705 func_type
.declare_opt_arg_struct
= declare_opt_arg_struct
707 self
.declare_optional_arg_struct(func_type
, env
)
709 callspec
= env
.directives
['callspec']
711 current
= func_type
.calling_convention
712 if current
and current
!= callspec
:
713 error(self
.pos
, "cannot have both '%s' and '%s' "
714 "calling conventions" % (current
, callspec
))
715 func_type
.calling_convention
= callspec
716 return self
.base
.analyse(func_type
, env
)
718 def declare_optional_arg_struct(self
, func_type
, env
, fused_cname
=None):
720 Declares the optional argument struct (the struct used to hold the
721 values for optional arguments). For fused cdef functions, this is
722 deferred as analyse_declarations is called only once (on the fused
725 scope
= StructOrUnionScope()
726 arg_count_member
= '%sn' % Naming
.pyrex_prefix
727 scope
.declare_var(arg_count_member
, PyrexTypes
.c_int_type
, self
.pos
)
729 for arg
in func_type
.args
[len(func_type
.args
)-self
.optional_arg_count
:]:
730 scope
.declare_var(arg
.name
, arg
.type, arg
.pos
, allow_pyobject
= 1)
732 struct_cname
= env
.mangle(Naming
.opt_arg_prefix
, self
.base
.name
)
734 if fused_cname
is not None:
735 struct_cname
= PyrexTypes
.get_fused_cname(fused_cname
, struct_cname
)
737 op_args_struct
= env
.global_scope().declare_struct_or_union(
743 cname
= struct_cname
)
745 op_args_struct
.defined_in_pxd
= 1
746 op_args_struct
.used
= 1
748 func_type
.op_arg_struct
= PyrexTypes
.c_ptr_type(op_args_struct
.type)
751 class CConstDeclaratorNode(CDeclaratorNode
):
752 # base CDeclaratorNode
754 child_attrs
= ["base"]
756 def analyse(self
, base_type
, env
, nonempty
= 0):
757 if base_type
.is_pyobject
:
759 "Const base type cannot be a Python object")
760 const
= PyrexTypes
.c_const_type(base_type
)
761 return self
.base
.analyse(const
, env
, nonempty
= nonempty
)
764 class CArgDeclNode(Node
):
765 # Item in a function declaration argument list.
767 # base_type CBaseTypeNode
768 # declarator CDeclaratorNode
769 # not_none boolean Tagged with 'not None'
770 # or_none boolean Tagged with 'or None'
771 # accept_none boolean Resolved boolean for not_none/or_none
772 # default ExprNode or None
773 # default_value PyObjectConst constant for default value
774 # annotation ExprNode or None Py3 function arg annotation
775 # is_self_arg boolean Is the "self" arg of an extension type method
776 # is_type_arg boolean Is the "class" arg of an extension type classmethod
777 # is_kw_only boolean Is a keyword-only argument
778 # is_dynamic boolean Non-literal arg stored inside CyFunction
780 child_attrs
= ["base_type", "declarator", "default", "annotation"]
789 name_declarator
= None
794 def analyse(self
, env
, nonempty
= 0, is_self_arg
= False):
796 self
.base_type
.is_self_arg
= self
.is_self_arg
= True
797 if self
.type is None:
798 # The parser may misinterpret names as types. We fix that here.
799 if isinstance(self
.declarator
, CNameDeclaratorNode
) and self
.declarator
.name
== '':
801 if self
.base_type
.is_basic_c_type
:
802 # char, short, long called "int"
803 type = self
.base_type
.analyse(env
, could_be_name
= True)
804 arg_name
= type.declaration_code("")
806 arg_name
= self
.base_type
.name
807 self
.declarator
.name
= EncodedString(arg_name
)
808 self
.base_type
.name
= None
809 self
.base_type
.is_basic_c_type
= False
812 could_be_name
= False
813 self
.base_type
.is_arg
= True
814 base_type
= self
.base_type
.analyse(env
, could_be_name
= could_be_name
)
815 if hasattr(self
.base_type
, 'arg_name') and self
.base_type
.arg_name
:
816 self
.declarator
.name
= self
.base_type
.arg_name
817 # The parser is unable to resolve the ambiguity of [] as part of the
818 # type (e.g. in buffers) or empty declarator (as with arrays).
819 # This is only arises for empty multi-dimensional arrays.
820 if (base_type
.is_array
821 and isinstance(self
.base_type
, TemplatedTypeNode
)
822 and isinstance(self
.declarator
, CArrayDeclaratorNode
)):
823 declarator
= self
.declarator
824 while isinstance(declarator
.base
, CArrayDeclaratorNode
):
825 declarator
= declarator
.base
826 declarator
.base
= self
.base_type
.array_declarator
827 base_type
= base_type
.base_type
828 return self
.declarator
.analyse(base_type
, env
, nonempty
= nonempty
)
830 return self
.name_declarator
, self
.type
832 def calculate_default_value_code(self
, code
):
833 if self
.default_value
is None:
835 if self
.default
.is_literal
:
836 # will not output any code, just assign the result_code
837 self
.default
.generate_evaluation_code(code
)
838 return self
.type.cast_code(self
.default
.result())
839 self
.default_value
= code
.get_argument_default_const(self
.type)
840 return self
.default_value
842 def annotate(self
, code
):
844 self
.default
.annotate(code
)
846 def generate_assignment_code(self
, code
, target
=None):
847 default
= self
.default
848 if default
is None or default
.is_literal
:
851 target
= self
.calculate_default_value_code(code
)
852 default
.generate_evaluation_code(code
)
853 default
.make_owned_reference(code
)
854 result
= default
.result_as(self
.type)
855 code
.putln("%s = %s;" % (target
, result
))
856 if self
.type.is_pyobject
:
857 code
.put_giveref(default
.result())
858 default
.generate_post_assignment_code(code
)
859 default
.free_temps(code
)
862 class CBaseTypeNode(Node
):
863 # Abstract base class for C base type nodes.
865 # Processing during analyse_declarations phase:
872 def analyse_as_type(self
, env
):
873 return self
.analyse(env
)
875 class CAnalysedBaseTypeNode(Node
):
880 def analyse(self
, env
, could_be_name
= False):
883 class CSimpleBaseTypeNode(CBaseTypeNode
):
885 # module_path [string] Qualifying name components
886 # is_basic_c_type boolean
890 # is_self_arg boolean Is self argument of C method
891 # ##is_type_arg boolean Is type argument of class method
894 arg_name
= None # in case the argument name was interpreted as a type
896 is_basic_c_type
= False
899 def analyse(self
, env
, could_be_name
= False):
900 # Return type descriptor.
901 #print "CSimpleBaseTypeNode.analyse: is_self_arg =", self.is_self_arg ###
903 if self
.is_basic_c_type
:
904 type = PyrexTypes
.simple_c_type(self
.signed
, self
.longness
, self
.name
)
906 error(self
.pos
, "Unrecognised type modifier combination")
907 elif self
.name
== "object" and not self
.module_path
:
908 type = py_object_type
909 elif self
.name
is None:
910 if self
.is_self_arg
and env
.is_c_class_scope
:
911 #print "CSimpleBaseTypeNode.analyse: defaulting to parent type" ###
912 type = env
.parent_type
913 ## elif self.is_type_arg and env.is_c_class_scope:
914 ## type = Builtin.type_type
916 type = py_object_type
919 # Maybe it's a nested C++ class.
921 for item
in self
.module_path
:
922 entry
= scope
.lookup(item
)
923 if entry
is not None and entry
.is_cpp_class
:
924 scope
= entry
.type.scope
930 # Maybe it's a cimport.
931 scope
= env
.find_imported_module(self
.module_path
, self
.pos
)
933 scope
.fused_to_specific
= env
.fused_to_specific
938 if scope
.is_c_class_scope
:
939 scope
= scope
.global_scope()
941 type = scope
.lookup_type(self
.name
)
945 if self
.is_self_arg
and env
.is_c_class_scope
:
946 type = env
.parent_type
947 ## elif self.is_type_arg and env.is_c_class_scope:
948 ## type = Builtin.type_type
950 type = py_object_type
951 self
.arg_name
= EncodedString(self
.name
)
954 if not self
.name
in self
.templates
:
955 error(self
.pos
, "'%s' is not a type identifier" % self
.name
)
956 type = PyrexTypes
.TemplatePlaceholderType(self
.name
)
958 error(self
.pos
, "'%s' is not a type identifier" % self
.name
)
960 if not type.is_numeric
or type.is_complex
:
961 error(self
.pos
, "can only complexify c numeric types")
962 type = PyrexTypes
.CComplexType(type)
963 type.create_declaration_utility_code(env
)
964 elif type is Builtin
.complex_type
:
965 # Special case: optimise builtin complex type into C's
966 # double complex. The parser cannot do this (as for the
967 # normal scalar types) as the user may have redeclared the
968 # 'complex' type. Testing for the exact type here works.
969 type = PyrexTypes
.c_double_complex_type
970 type.create_declaration_utility_code(env
)
975 return PyrexTypes
.error_type
977 class MemoryViewSliceTypeNode(CBaseTypeNode
):
980 child_attrs
= ['base_type_node', 'axes']
982 def analyse(self
, env
, could_be_name
= False):
984 base_type
= self
.base_type_node
.analyse(env
)
985 if base_type
.is_error
: return base_type
990 axes_specs
= MemoryView
.get_axes_specs(env
, self
.axes
)
991 except CompileError
, e
:
992 error(e
.position
, e
.message_only
)
993 self
.type = PyrexTypes
.ErrorType()
996 if not MemoryView
.validate_axes(self
.pos
, axes_specs
):
997 self
.type = error_type
999 MemoryView
.validate_memslice_dtype(self
.pos
, base_type
)
1000 self
.type = PyrexTypes
.MemoryViewSliceType(base_type
, axes_specs
)
1001 self
.use_memview_utilities(env
)
1005 def use_memview_utilities(self
, env
):
1007 env
.use_utility_code(MemoryView
.view_utility_code
)
1010 class CNestedBaseTypeNode(CBaseTypeNode
):
1011 # For C++ classes that live inside other C++ classes.
1014 # base_type CBaseTypeNode
1016 child_attrs
= ['base_type']
1018 def analyse(self
, env
, could_be_name
= None):
1019 base_type
= self
.base_type
.analyse(env
)
1020 if base_type
is PyrexTypes
.error_type
:
1021 return PyrexTypes
.error_type
1022 if not base_type
.is_cpp_class
:
1023 error(self
.pos
, "'%s' is not a valid type scope" % base_type
)
1024 return PyrexTypes
.error_type
1025 type_entry
= base_type
.scope
.lookup_here(self
.name
)
1026 if not type_entry
or not type_entry
.is_type
:
1027 error(self
.pos
, "'%s.%s' is not a type identifier" % (base_type
, self
.name
))
1028 return PyrexTypes
.error_type
1029 return type_entry
.type
1032 class TemplatedTypeNode(CBaseTypeNode
):
1034 # positional_args [ExprNode] List of positional arguments
1035 # keyword_args DictNode Keyword arguments
1036 # base_type_node CBaseTypeNode
1039 # type PyrexTypes.BufferType or PyrexTypes.CppClassType ...containing the right options
1041 child_attrs
= ["base_type_node", "positional_args",
1042 "keyword_args", "dtype_node"]
1048 def analyse(self
, env
, could_be_name
= False, base_type
= None):
1049 if base_type
is None:
1050 base_type
= self
.base_type_node
.analyse(env
)
1051 if base_type
.is_error
: return base_type
1053 if base_type
.is_cpp_class
:
1055 if self
.keyword_args
and self
.keyword_args
.key_value_pairs
:
1056 error(self
.pos
, "c++ templates cannot take keyword arguments")
1057 self
.type = PyrexTypes
.error_type
1060 for template_node
in self
.positional_args
:
1061 type = template_node
.analyse_as_type(env
)
1063 error(template_node
.pos
, "unknown type in template argument")
1065 template_types
.append(type)
1066 self
.type = base_type
.specialize_here(self
.pos
, template_types
)
1068 elif base_type
.is_pyobject
:
1072 options
= Buffer
.analyse_buffer_options(
1075 self
.positional_args
,
1077 base_type
.buffer_defaults
)
1079 if sys
.version_info
[0] < 3:
1080 # Py 2.x enforces byte strings as keyword arguments ...
1081 options
= dict([ (name
.encode('ASCII'), value
)
1082 for name
, value
in options
.items() ])
1084 self
.type = PyrexTypes
.BufferType(base_type
, **options
)
1088 empty_declarator
= CNameDeclaratorNode(self
.pos
, name
="", cname
=None)
1089 if len(self
.positional_args
) > 1 or self
.keyword_args
.key_value_pairs
:
1090 error(self
.pos
, "invalid array declaration")
1091 self
.type = PyrexTypes
.error_type
1093 # It would be nice to merge this class with CArrayDeclaratorNode,
1094 # but arrays are part of the declaration, not the type...
1095 if not self
.positional_args
:
1098 dimension
= self
.positional_args
[0]
1099 self
.array_declarator
= CArrayDeclaratorNode(self
.pos
,
1100 base
= empty_declarator
,
1101 dimension
= dimension
)
1102 self
.type = self
.array_declarator
.analyse(base_type
, env
)[1]
1104 if self
.type.is_fused
and env
.fused_to_specific
:
1105 self
.type = self
.type.specialize(env
.fused_to_specific
)
1109 class CComplexBaseTypeNode(CBaseTypeNode
):
1110 # base_type CBaseTypeNode
1111 # declarator CDeclaratorNode
1113 child_attrs
= ["base_type", "declarator"]
1115 def analyse(self
, env
, could_be_name
= False):
1116 base
= self
.base_type
.analyse(env
, could_be_name
)
1117 _
, type = self
.declarator
.analyse(base
, env
)
1121 class FusedTypeNode(CBaseTypeNode
):
1123 Represents a fused type in a ctypedef statement:
1125 ctypedef cython.fused_type(int, long, long long) integral
1127 name str name of this fused type
1128 types [CSimpleBaseTypeNode] is the list of types to be fused
1133 def analyse_declarations(self
, env
):
1134 type = self
.analyse(env
)
1135 entry
= env
.declare_typedef(self
.name
, type, self
.pos
)
1137 # Omit the typedef declaration that self.declarator would produce
1138 entry
.in_cinclude
= True
1140 def analyse(self
, env
):
1142 for type_node
in self
.types
:
1143 type = type_node
.analyse_as_type(env
)
1146 error(type_node
.pos
, "Not a type")
1150 error(type_node
.pos
, "Type specified multiple times")
1152 error(type_node
.pos
, "Cannot fuse a fused type")
1156 # if len(self.types) == 1:
1159 return PyrexTypes
.FusedType(types
, name
=self
.name
)
1162 class CConstTypeNode(CBaseTypeNode
):
1163 # base_type CBaseTypeNode
1165 child_attrs
= ["base_type"]
1167 def analyse(self
, env
, could_be_name
= False):
1168 base
= self
.base_type
.analyse(env
, could_be_name
)
1169 if base
.is_pyobject
:
1171 "Const base type cannot be a Python object")
1172 return PyrexTypes
.c_const_type(base
)
1175 class CVarDefNode(StatNode
):
1176 # C variable definition or forward/extern function declaration.
1178 # visibility 'private' or 'public' or 'extern'
1179 # base_type CBaseTypeNode
1180 # declarators [CDeclaratorNode]
1183 # overridable boolean whether it is a cpdef
1184 # modifiers ['inline']
1186 # decorators [cython.locals(...)] or None
1187 # directive_locals { string : NameNode } locals defined by cython.locals(...)
1189 child_attrs
= ["base_type", "declarators"]
1192 directive_locals
= None
1194 def analyse_declarations(self
, env
, dest_scope
= None):
1195 if self
.directive_locals
is None:
1196 self
.directive_locals
= {}
1199 self
.dest_scope
= dest_scope
1201 if self
.declarators
:
1202 templates
= self
.declarators
[0].analyse_templates()
1205 if templates
is not None:
1206 if self
.visibility
!= 'extern':
1207 error(self
.pos
, "Only extern functions allowed")
1208 if len(self
.declarators
) > 1:
1209 error(self
.declarators
[1].pos
, "Can't multiply declare template types")
1210 env
= TemplateScope('func_template', env
)
1211 env
.directives
= env
.outer_scope
.directives
1212 for template_param
in templates
:
1213 env
.declare_type(template_param
.name
, template_param
, self
.pos
)
1215 base_type
= self
.base_type
.analyse(env
)
1217 if base_type
.is_fused
and not self
.in_pxd
and (env
.is_c_class_scope
or
1218 env
.is_module_scope
):
1219 error(self
.pos
, "Fused types not allowed here")
1223 visibility
= self
.visibility
1225 for declarator
in self
.declarators
:
1227 if (len(self
.declarators
) > 1
1228 and not isinstance(declarator
, CNameDeclaratorNode
)
1229 and env
.directives
['warn.multiple_declarators']):
1230 warning(declarator
.pos
,
1231 "Non-trivial type declarators in shared declaration (e.g. mix of pointers and values). " +
1232 "Each pointer declaration should be on its own line.", 1)
1234 if isinstance(declarator
, CFuncDeclaratorNode
):
1235 name_declarator
, type = declarator
.analyse(base_type
, env
, directive_locals
=self
.directive_locals
)
1237 name_declarator
, type = declarator
.analyse(base_type
, env
)
1238 if not type.is_complete():
1239 if not (self
.visibility
== 'extern' and type.is_array
or type.is_memoryviewslice
):
1240 error(declarator
.pos
,
1241 "Variable type '%s' is incomplete" % type)
1242 if self
.visibility
== 'extern' and type.is_pyobject
:
1243 error(declarator
.pos
,
1244 "Python object cannot be declared extern")
1245 name
= name_declarator
.name
1246 cname
= name_declarator
.cname
1248 error(declarator
.pos
, "Missing name in declaration.")
1250 if type.is_cfunction
:
1251 self
.entry
= dest_scope
.declare_cfunction(name
, type, declarator
.pos
,
1252 cname
= cname
, visibility
= self
.visibility
, in_pxd
= self
.in_pxd
,
1253 api
= self
.api
, modifiers
= self
.modifiers
)
1254 if self
.entry
is not None:
1255 self
.entry
.is_overridable
= self
.overridable
1256 self
.entry
.directive_locals
= copy
.copy(self
.directive_locals
)
1258 if self
.directive_locals
:
1259 error(self
.pos
, "Decorators can only be followed by functions")
1260 self
.entry
= dest_scope
.declare_var(name
, type, declarator
.pos
,
1261 cname
=cname
, visibility
=visibility
, in_pxd
=self
.in_pxd
,
1262 api
=self
.api
, is_cdef
=1)
1263 if Options
.docstrings
:
1264 self
.entry
.doc
= embed_position(self
.pos
, self
.doc
)
1267 class CStructOrUnionDefNode(StatNode
):
1269 # cname string or None
1270 # kind "struct" or "union"
1271 # typedef_flag boolean
1272 # visibility "public" or "private"
1275 # attributes [CVarDefNode] or None
1279 child_attrs
= ["attributes"]
1281 def declare(self
, env
, scope
=None):
1282 if self
.visibility
== 'extern' and self
.packed
and not scope
:
1283 error(self
.pos
, "Cannot declare extern struct as 'packed'")
1284 self
.entry
= env
.declare_struct_or_union(
1285 self
.name
, self
.kind
, scope
, self
.typedef_flag
, self
.pos
,
1286 self
.cname
, visibility
= self
.visibility
, api
= self
.api
,
1287 packed
= self
.packed
)
1289 def analyse_declarations(self
, env
):
1291 if self
.attributes
is not None:
1292 scope
= StructOrUnionScope(self
.name
)
1293 self
.declare(env
, scope
)
1294 if self
.attributes
is not None:
1295 if self
.in_pxd
and not env
.in_cinclude
:
1296 self
.entry
.defined_in_pxd
= 1
1297 for attr
in self
.attributes
:
1298 attr
.analyse_declarations(env
, scope
)
1299 if self
.visibility
!= 'extern':
1300 for attr
in scope
.var_entries
:
1302 while type.is_array
:
1303 type = type.base_type
1304 if type == self
.entry
.type:
1305 error(attr
.pos
, "Struct cannot contain itself as a member.")
1307 def analyse_expressions(self
, env
):
1310 def generate_execution_code(self
, code
):
1314 class CppClassNode(CStructOrUnionDefNode
, BlockNode
):
1317 # cname string or None
1318 # visibility "extern"
1320 # attributes [CVarDefNode] or None
1322 # base_classes [CBaseTypeNode]
1323 # templates [string] or None
1325 def declare(self
, env
):
1326 if self
.templates
is None:
1327 template_types
= None
1329 template_types
= [PyrexTypes
.TemplatePlaceholderType(template_name
) for template_name
in self
.templates
]
1330 self
.entry
= env
.declare_cpp_class(
1331 self
.name
, None, self
.pos
,
1332 self
.cname
, base_classes
= [], visibility
= self
.visibility
, templates
= template_types
)
1334 def analyse_declarations(self
, env
):
1336 if self
.attributes
is not None:
1337 scope
= CppClassScope(self
.name
, env
, templates
= self
.templates
)
1338 def base_ok(base_class
):
1339 if base_class
.is_cpp_class
or base_class
.is_struct
:
1342 error(self
.pos
, "Base class '%s' not a struct or class." % base_class
)
1343 base_class_types
= filter(base_ok
, [b
.analyse(scope
or env
) for b
in self
.base_classes
])
1344 if self
.templates
is None:
1345 template_types
= None
1347 template_types
= [PyrexTypes
.TemplatePlaceholderType(template_name
) for template_name
in self
.templates
]
1348 self
.entry
= env
.declare_cpp_class(
1349 self
.name
, scope
, self
.pos
,
1350 self
.cname
, base_class_types
, visibility
= self
.visibility
, templates
= template_types
)
1351 if self
.entry
is None:
1353 self
.entry
.is_cpp_class
= 1
1354 if scope
is not None:
1355 scope
.type = self
.entry
.type
1357 if self
.attributes
is not None:
1358 if self
.in_pxd
and not env
.in_cinclude
:
1359 self
.entry
.defined_in_pxd
= 1
1360 for attr
in self
.attributes
:
1361 attr
.analyse_declarations(scope
)
1362 if isinstance(attr
, CFuncDefNode
):
1363 defined_funcs
.append(attr
)
1364 if self
.templates
is not None:
1365 attr
.template_declaration
= "template <typename %s>" % ", typename ".join(self
.templates
)
1366 self
.body
= StatListNode(self
.pos
, stats
=defined_funcs
)
1369 def analyse_expressions(self
, env
):
1370 self
.body
= self
.body
.analyse_expressions(self
.entry
.type.scope
)
1373 def generate_function_definitions(self
, env
, code
):
1374 self
.body
.generate_function_definitions(self
.entry
.type.scope
, code
)
1376 def generate_execution_code(self
, code
):
1377 self
.body
.generate_execution_code(code
)
1379 def annotate(self
, code
):
1380 self
.body
.annotate(code
)
1383 class CEnumDefNode(StatNode
):
1384 # name string or None
1385 # cname string or None
1386 # items [CEnumDefItemNode]
1387 # typedef_flag boolean
1388 # visibility "public" or "private"
1393 child_attrs
= ["items"]
1395 def declare(self
, env
):
1396 self
.entry
= env
.declare_enum(self
.name
, self
.pos
,
1397 cname
= self
.cname
, typedef_flag
= self
.typedef_flag
,
1398 visibility
= self
.visibility
, api
= self
.api
)
1400 def analyse_declarations(self
, env
):
1401 if self
.items
is not None:
1402 if self
.in_pxd
and not env
.in_cinclude
:
1403 self
.entry
.defined_in_pxd
= 1
1404 for item
in self
.items
:
1405 item
.analyse_declarations(env
, self
.entry
)
1407 def analyse_expressions(self
, env
):
1410 def generate_execution_code(self
, code
):
1411 if self
.visibility
== 'public' or self
.api
:
1412 temp
= code
.funcstate
.allocate_temp(PyrexTypes
.py_object_type
, manage_ref
=True)
1413 for item
in self
.entry
.enum_values
:
1414 code
.putln("%s = PyInt_FromLong(%s); %s" % (
1417 code
.error_goto_if_null(temp
, item
.pos
)))
1418 code
.put_gotref(temp
)
1419 code
.putln('if (PyDict_SetItemString(%s, "%s", %s) < 0) %s' % (
1420 Naming
.moddict_cname
,
1423 code
.error_goto(item
.pos
)))
1424 code
.put_decref_clear(temp
, PyrexTypes
.py_object_type
)
1425 code
.funcstate
.release_temp(temp
)
1428 class CEnumDefItemNode(StatNode
):
1430 # cname string or None
1431 # value ExprNode or None
1433 child_attrs
= ["value"]
1435 def analyse_declarations(self
, env
, enum_entry
):
1437 self
.value
= self
.value
.analyse_const_expression(env
)
1438 if not self
.value
.type.is_int
:
1439 self
.value
= self
.value
.coerce_to(PyrexTypes
.c_int_type
, env
)
1440 self
.value
= self
.value
.analyse_const_expression(env
)
1441 entry
= env
.declare_const(self
.name
, enum_entry
.type,
1442 self
.value
, self
.pos
, cname
= self
.cname
,
1443 visibility
= enum_entry
.visibility
, api
= enum_entry
.api
)
1444 enum_entry
.enum_values
.append(entry
)
1447 class CTypeDefNode(StatNode
):
1448 # base_type CBaseTypeNode
1449 # declarator CDeclaratorNode
1450 # visibility "public" or "private"
1454 child_attrs
= ["base_type", "declarator"]
1456 def analyse_declarations(self
, env
):
1457 base
= self
.base_type
.analyse(env
)
1458 name_declarator
, type = self
.declarator
.analyse(base
, env
)
1459 name
= name_declarator
.name
1460 cname
= name_declarator
.cname
1462 entry
= env
.declare_typedef(name
, type, self
.pos
,
1463 cname
= cname
, visibility
= self
.visibility
, api
= self
.api
)
1466 entry
.in_cinclude
= True
1468 if self
.in_pxd
and not env
.in_cinclude
:
1469 entry
.defined_in_pxd
= 1
1471 def analyse_expressions(self
, env
):
1474 def generate_execution_code(self
, code
):
1478 class FuncDefNode(StatNode
, BlockNode
):
1479 # Base class for function definition nodes.
1481 # return_type PyrexType
1482 # #filename string C name of filename string const
1483 # entry Symtab.Entry
1484 # needs_closure boolean Whether or not this function has inner functions/classes/yield
1485 # needs_outer_scope boolean Whether or not this function requires outer scope
1486 # pymethdef_required boolean Force Python method struct generation
1487 # directive_locals { string : ExprNode } locals defined by cython.locals(...)
1488 # directive_returns [ExprNode] type defined by cython.returns(...)
1489 # star_arg PyArgDeclNode or None * argument
1490 # starstar_arg PyArgDeclNode or None ** argument
1492 # has_fused_arguments boolean
1493 # Whether this cdef function has fused parameters. This is needed
1494 # by AnalyseDeclarationsTransform, so it can replace CFuncDefNodes
1495 # with fused argument types with a FusedCFuncDefNode
1498 needs_closure
= False
1499 needs_outer_scope
= False
1500 pymethdef_required
= False
1501 is_generator
= False
1502 is_generator_body
= False
1504 has_fused_arguments
= False
1507 is_cyfunction
= False
1509 def analyse_default_values(self
, env
):
1511 for arg
in self
.args
:
1515 arg
.default
= arg
.default
.analyse_types(env
)
1516 arg
.default
= arg
.default
.coerce_to(arg
.type, env
)
1519 "This argument cannot have a default value")
1524 error(arg
.pos
, "Non-default argument following default argument")
1526 def align_argument_type(self
, env
, arg
):
1527 directive_locals
= self
.directive_locals
1529 if arg
.name
in directive_locals
:
1530 type_node
= directive_locals
[arg
.name
]
1531 other_type
= type_node
.analyse_as_type(env
)
1532 if other_type
is None:
1533 error(type_node
.pos
, "Not a type")
1534 elif (type is not PyrexTypes
.py_object_type
1535 and not type.same_as(other_type
)):
1536 error(arg
.base_type
.pos
, "Signature does not agree with previous declaration")
1537 error(type_node
.pos
, "Previous declaration here")
1539 arg
.type = other_type
1542 def need_gil_acquisition(self
, lenv
):
1545 def create_local_scope(self
, env
):
1547 while genv
.is_py_class_scope
or genv
.is_c_class_scope
:
1548 genv
= genv
.outer_scope
1549 if self
.needs_closure
:
1550 lenv
= ClosureScope(name
=self
.entry
.name
,
1553 scope_name
=self
.entry
.cname
)
1555 lenv
= LocalScope(name
=self
.entry
.name
,
1558 lenv
.return_type
= self
.return_type
1559 type = self
.entry
.type
1560 if type.is_cfunction
:
1561 lenv
.nogil
= type.nogil
and not type.with_gil
1562 self
.local_scope
= lenv
1563 lenv
.directives
= env
.directives
1566 def generate_function_body(self
, env
, code
):
1567 self
.body
.generate_execution_code(code
)
1569 def generate_function_definitions(self
, env
, code
):
1571 if self
.return_type
.is_memoryviewslice
:
1574 lenv
= self
.local_scope
1575 if lenv
.is_closure_scope
and not lenv
.is_passthrough
:
1576 outer_scope_cname
= "%s->%s" % (Naming
.cur_scope_cname
,
1577 Naming
.outer_scope_cname
)
1579 outer_scope_cname
= Naming
.outer_scope_cname
1580 lenv
.mangle_closure_cnames(outer_scope_cname
)
1581 # Generate closure function definitions
1582 self
.body
.generate_function_definitions(lenv
, code
)
1583 # generate lambda function definitions
1584 self
.generate_lambda_definitions(lenv
, code
)
1586 is_getbuffer_slot
= (self
.entry
.name
== "__getbuffer__" and
1587 self
.entry
.scope
.is_c_class_scope
)
1588 is_releasebuffer_slot
= (self
.entry
.name
== "__releasebuffer__" and
1589 self
.entry
.scope
.is_c_class_scope
)
1590 is_buffer_slot
= is_getbuffer_slot
or is_releasebuffer_slot
1592 if 'cython_unused' not in self
.modifiers
:
1593 self
.modifiers
= self
.modifiers
+ ['cython_unused']
1595 preprocessor_guard
= self
.get_preprocessor_guard()
1597 profile
= code
.globalstate
.directives
['profile']
1598 linetrace
= code
.globalstate
.directives
['linetrace']
1599 if (linetrace
or profile
) and lenv
.nogil
:
1600 warning(self
.pos
, "Cannot profile nogil function.", 1)
1601 profile
= linetrace
= False
1602 if profile
or linetrace
:
1603 code
.globalstate
.use_utility_code(
1604 UtilityCode
.load_cached("Profile", "Profile.c"))
1606 # Generate C code for header and body of function
1607 code
.enter_cfunc_scope()
1608 code
.return_from_error_cleanup_label
= code
.new_label()
1610 # ----- Top-level constants used by this function
1611 code
.mark_pos(self
.pos
)
1612 self
.generate_cached_builtins_decls(lenv
, code
)
1613 # ----- Function header
1616 if preprocessor_guard
:
1617 code
.putln(preprocessor_guard
)
1619 with_pymethdef
= (self
.needs_assignment_synthesis(env
, code
) or
1620 self
.pymethdef_required
)
1622 self
.py_func
.generate_function_header(code
,
1623 with_pymethdef
= with_pymethdef
,
1625 self
.generate_function_header(code
,
1626 with_pymethdef
= with_pymethdef
)
1627 # ----- Local variable declarations
1628 # Find function scope
1630 while cenv
.is_py_class_scope
or cenv
.is_c_class_scope
:
1631 cenv
= cenv
.outer_scope
1632 if self
.needs_closure
:
1633 code
.put(lenv
.scope_class
.type.declaration_code(Naming
.cur_scope_cname
))
1635 elif self
.needs_outer_scope
:
1636 if lenv
.is_passthrough
:
1637 code
.put(lenv
.scope_class
.type.declaration_code(Naming
.cur_scope_cname
))
1639 code
.put(cenv
.scope_class
.type.declaration_code(Naming
.outer_scope_cname
))
1641 self
.generate_argument_declarations(lenv
, code
)
1643 for entry
in lenv
.var_entries
:
1644 if not (entry
.in_closure
or entry
.is_arg
):
1645 code
.put_var_declaration(entry
)
1647 # Initialize the return variable __pyx_r
1649 if not self
.return_type
.is_void
:
1650 if self
.return_type
.is_pyobject
:
1652 elif self
.return_type
.is_memoryviewslice
:
1653 init
= ' = ' + MemoryView
.memslice_entry_init
1657 (self
.return_type
.declaration_code(Naming
.retval_cname
),
1660 tempvardecl_code
= code
.insertion_point()
1661 self
.generate_keyword_list(code
)
1663 if profile
or linetrace
:
1664 code
.put_trace_declarations()
1666 # ----- Extern library function declarations
1667 lenv
.generate_library_function_declarations(code
)
1669 # ----- GIL acquisition
1670 acquire_gil
= self
.acquire_gil
1672 # See if we need to acquire the GIL for variable declarations, or for
1675 # Profiling or closures are not currently possible for cdef nogil
1676 # functions, but check them anyway
1677 have_object_args
= (self
.needs_closure
or self
.needs_outer_scope
or
1678 profile
or linetrace
)
1679 for arg
in lenv
.arg_entries
:
1680 if arg
.type.is_pyobject
:
1681 have_object_args
= True
1684 acquire_gil_for_var_decls_only
= (
1685 lenv
.nogil
and lenv
.has_with_gil_block
and
1686 (have_object_args
or lenv
.buffer_entries
))
1688 acquire_gil_for_refnanny_only
= (
1689 lenv
.nogil
and lenv
.has_with_gil_block
and not
1690 acquire_gil_for_var_decls_only
)
1692 use_refnanny
= not lenv
.nogil
or lenv
.has_with_gil_block
1694 if acquire_gil
or acquire_gil_for_var_decls_only
:
1695 code
.put_ensure_gil()
1696 elif lenv
.nogil
and lenv
.has_with_gil_block
:
1697 code
.declare_gilstate()
1699 # ----- set up refnanny
1701 tempvardecl_code
.put_declare_refcount_context()
1702 code
.put_setup_refcount_context(
1703 self
.entry
.name
, acquire_gil
=acquire_gil_for_refnanny_only
)
1705 # ----- Automatic lead-ins for certain special functions
1706 if is_getbuffer_slot
:
1707 self
.getbuffer_init(code
)
1708 # ----- Create closure scope object
1709 if self
.needs_closure
:
1710 tp_slot
= TypeSlots
.ConstructorSlot("tp_new", '__new__')
1711 slot_func_cname
= TypeSlots
.get_slot_function(lenv
.scope_class
.type.scope
, tp_slot
)
1712 if not slot_func_cname
:
1713 slot_func_cname
= '%s->tp_new' % lenv
.scope_class
.type.typeptr_cname
1714 code
.putln("%s = (%s)%s(%s, %s, NULL);" % (
1715 Naming
.cur_scope_cname
,
1716 lenv
.scope_class
.type.declaration_code(''),
1718 lenv
.scope_class
.type.typeptr_cname
,
1719 Naming
.empty_tuple
))
1720 code
.putln("if (unlikely(!%s)) {" % Naming
.cur_scope_cname
)
1721 if is_getbuffer_slot
:
1722 self
.getbuffer_error_cleanup(code
)
1725 code
.put_finish_refcount_context()
1726 if acquire_gil
or acquire_gil_for_var_decls_only
:
1727 code
.put_release_ensured_gil()
1729 # FIXME: what if the error return value is a Python value?
1730 code
.putln("return %s;" % self
.error_value())
1732 code
.put_gotref(Naming
.cur_scope_cname
)
1733 # Note that it is unsafe to decref the scope at this point.
1734 if self
.needs_outer_scope
:
1735 if self
.is_cyfunction
:
1736 code
.putln("%s = (%s) __Pyx_CyFunction_GetClosure(%s);" % (
1738 cenv
.scope_class
.type.declaration_code(''),
1741 code
.putln("%s = (%s) %s;" % (
1743 cenv
.scope_class
.type.declaration_code(''),
1745 if lenv
.is_passthrough
:
1746 code
.putln("%s = %s;" % (Naming
.cur_scope_cname
, outer_scope_cname
))
1747 elif self
.needs_closure
:
1748 # inner closures own a reference to their outer parent
1749 code
.put_incref(outer_scope_cname
, cenv
.scope_class
.type)
1750 code
.put_giveref(outer_scope_cname
)
1751 # ----- Trace function call
1752 if profile
or linetrace
:
1753 # this looks a bit late, but if we don't get here due to a
1754 # fatal error before hand, it's not really worth tracing
1755 code
.put_trace_call(self
.entry
.name
, self
.pos
)
1756 code
.funcstate
.can_trace
= True
1757 # ----- Fetch arguments
1758 self
.generate_argument_parsing_code(env
, code
)
1759 # If an argument is assigned to in the body, we must
1760 # incref it to properly keep track of refcounts.
1761 is_cdef
= isinstance(self
, CFuncDefNode
)
1762 for entry
in lenv
.arg_entries
:
1763 if entry
.type.is_pyobject
:
1764 if ((acquire_gil
or len(entry
.cf_assignments
) > 1) and
1765 not entry
.in_closure
):
1766 code
.put_var_incref(entry
)
1768 # Note: defaults are always incref-ed. For def functions, we
1769 # we aquire arguments from object converstion, so we have
1770 # new references. If we are a cdef function, we need to
1771 # incref our arguments
1772 elif (is_cdef
and entry
.type.is_memoryviewslice
and
1773 len(entry
.cf_assignments
) > 1):
1774 code
.put_incref_memoryviewslice(entry
.cname
,
1775 have_gil
=not lenv
.nogil
)
1776 for entry
in lenv
.var_entries
:
1777 if entry
.is_arg
and len(entry
.cf_assignments
) > 1:
1778 code
.put_var_incref(entry
)
1780 # ----- Initialise local buffer auxiliary variables
1781 for entry
in lenv
.var_entries
+ lenv
.arg_entries
:
1782 if entry
.type.is_buffer
and entry
.buffer_aux
.buflocal_nd_var
.used
:
1783 Buffer
.put_init_vars(entry
, code
)
1785 # ----- Check and convert arguments
1786 self
.generate_argument_type_tests(code
)
1787 # ----- Acquire buffer arguments
1788 for entry
in lenv
.arg_entries
:
1789 if entry
.type.is_buffer
:
1790 Buffer
.put_acquire_arg_buffer(entry
, code
, self
.pos
)
1792 if acquire_gil_for_var_decls_only
:
1793 code
.put_release_ensured_gil()
1795 # -------------------------
1796 # ----- Function body -----
1797 # -------------------------
1798 self
.generate_function_body(env
, code
)
1800 code
.mark_pos(self
.pos
)
1802 code
.putln("/* function exit code */")
1804 # ----- Default return value
1805 if not self
.body
.is_terminator
:
1806 if self
.return_type
.is_pyobject
:
1807 #if self.return_type.is_extension_type:
1808 # lhs = "(PyObject *)%s" % Naming.retval_cname
1810 lhs
= Naming
.retval_cname
1811 code
.put_init_to_py_none(lhs
, self
.return_type
)
1813 val
= self
.return_type
.default_value
1815 code
.putln("%s = %s;" % (Naming
.retval_cname
, val
))
1816 # ----- Error cleanup
1817 if code
.error_label
in code
.labels_used
:
1818 if not self
.body
.is_terminator
:
1819 code
.put_goto(code
.return_label
)
1820 code
.put_label(code
.error_label
)
1821 for cname
, type in code
.funcstate
.all_managed_temps():
1822 code
.put_xdecref(cname
, type, have_gil
=not lenv
.nogil
)
1824 # Clean up buffers -- this calls a Python function
1825 # so need to save and restore error state
1826 buffers_present
= len(lenv
.buffer_entries
) > 0
1827 memslice_entries
= [e
for e
in lenv
.entries
.itervalues()
1828 if e
.type.is_memoryviewslice
]
1830 code
.globalstate
.use_utility_code(restore_exception_utility_code
)
1831 code
.putln("{ PyObject *__pyx_type, *__pyx_value, *__pyx_tb;")
1832 code
.putln("__Pyx_ErrFetch(&__pyx_type, &__pyx_value, &__pyx_tb);")
1833 for entry
in lenv
.buffer_entries
:
1834 Buffer
.put_release_buffer_code(code
, entry
)
1835 #code.putln("%s = 0;" % entry.cname)
1836 code
.putln("__Pyx_ErrRestore(__pyx_type, __pyx_value, __pyx_tb);}")
1838 if self
.return_type
.is_memoryviewslice
:
1839 MemoryView
.put_init_entry(Naming
.retval_cname
, code
)
1840 err_val
= Naming
.retval_cname
1842 err_val
= self
.error_value()
1844 exc_check
= self
.caller_will_check_exceptions()
1845 if err_val
is not None or exc_check
:
1846 # TODO: Fix exception tracing (though currently unused by cProfile).
1847 # code.globalstate.use_utility_code(get_exception_tuple_utility_code)
1848 # code.put_trace_exception()
1850 if lenv
.nogil
and not lenv
.has_with_gil_block
:
1852 code
.put_ensure_gil()
1854 code
.put_add_traceback(self
.entry
.qualified_name
)
1856 if lenv
.nogil
and not lenv
.has_with_gil_block
:
1857 code
.put_release_ensured_gil()
1860 warning(self
.entry
.pos
,
1861 "Unraisable exception in function '%s'." %
1862 self
.entry
.qualified_name
, 0)
1863 code
.put_unraisable(self
.entry
.qualified_name
)
1864 default_retval
= self
.return_type
.default_value
1865 if err_val
is None and default_retval
:
1866 err_val
= default_retval
1867 if err_val
is not None:
1868 code
.putln("%s = %s;" % (Naming
.retval_cname
, err_val
))
1870 if is_getbuffer_slot
:
1871 self
.getbuffer_error_cleanup(code
)
1873 # If we are using the non-error cleanup section we should
1874 # jump past it if we have an error. The if-test below determine
1875 # whether this section is used.
1876 if buffers_present
or is_getbuffer_slot
or self
.return_type
.is_memoryviewslice
:
1877 code
.put_goto(code
.return_from_error_cleanup_label
)
1879 # ----- Non-error return cleanup
1880 code
.put_label(code
.return_label
)
1881 for entry
in lenv
.buffer_entries
:
1883 Buffer
.put_release_buffer_code(code
, entry
)
1884 if is_getbuffer_slot
:
1885 self
.getbuffer_normal_cleanup(code
)
1887 if self
.return_type
.is_memoryviewslice
:
1888 # See if our return value is uninitialized on non-error return
1890 # MemoryView.err_if_nogil_initialized_check(self.pos, env)
1891 cond
= code
.unlikely(self
.return_type
.error_condition(
1892 Naming
.retval_cname
))
1896 code
.put_ensure_gil()
1900 '"Memoryview return value is not initialized");')
1902 code
.put_release_ensured_gil()
1906 # ----- Return cleanup for both error and no-error return
1907 code
.put_label(code
.return_from_error_cleanup_label
)
1909 for entry
in lenv
.var_entries
:
1910 if not entry
.used
or entry
.in_closure
:
1913 if entry
.type.is_memoryviewslice
:
1914 code
.put_xdecref_memoryviewslice(entry
.cname
,
1915 have_gil
=not lenv
.nogil
)
1916 elif entry
.type.is_pyobject
:
1917 if not entry
.is_arg
or len(entry
.cf_assignments
) > 1:
1918 code
.put_var_decref(entry
)
1920 # Decref any increfed args
1921 for entry
in lenv
.arg_entries
:
1922 if entry
.type.is_pyobject
:
1923 if ((acquire_gil
or len(entry
.cf_assignments
) > 1) and
1924 not entry
.in_closure
):
1925 code
.put_var_decref(entry
)
1926 elif (entry
.type.is_memoryviewslice
and
1927 (not is_cdef
or len(entry
.cf_assignments
) > 1)):
1928 # decref slices of def functions and acquired slices from cdef
1929 # functions, but not borrowed slices from cdef functions.
1930 code
.put_xdecref_memoryviewslice(entry
.cname
,
1931 have_gil
=not lenv
.nogil
)
1932 if self
.needs_closure
:
1933 code
.put_decref(Naming
.cur_scope_cname
, lenv
.scope_class
.type)
1936 # This code is duplicated in ModuleNode.generate_module_init_func
1938 default_retval
= self
.return_type
.default_value
1939 err_val
= self
.error_value()
1940 if err_val
is None and default_retval
:
1941 err_val
= default_retval
# FIXME: why is err_val not used?
1942 if self
.return_type
.is_pyobject
:
1943 code
.put_xgiveref(self
.return_type
.as_pyobject(Naming
.retval_cname
))
1945 if self
.entry
.is_special
and self
.entry
.name
== "__hash__":
1946 # Returning -1 for __hash__ is supposed to signal an error
1947 # We do as Python instances and coerce -1 into -2.
1948 code
.putln("if (unlikely(%s == -1) && !PyErr_Occurred()) %s = -2;" % (
1949 Naming
.retval_cname
, Naming
.retval_cname
))
1951 if profile
or linetrace
:
1952 code
.funcstate
.can_trace
= False
1953 if self
.return_type
.is_pyobject
:
1954 code
.put_trace_return(Naming
.retval_cname
)
1956 code
.put_trace_return("Py_None")
1959 # GIL holding function
1960 code
.put_finish_refcount_context()
1962 if acquire_gil
or (lenv
.nogil
and lenv
.has_with_gil_block
):
1963 # release the GIL (note that with-gil blocks acquire it on exit in their EnsureGILNode)
1964 code
.put_release_ensured_gil()
1966 if not self
.return_type
.is_void
:
1967 code
.putln("return %s;" % Naming
.retval_cname
)
1971 if preprocessor_guard
:
1972 code
.putln("#endif /*!(%s)*/" % preprocessor_guard
)
1974 # ----- Go back and insert temp variable declarations
1975 tempvardecl_code
.put_temp_declarations(code
.funcstate
)
1977 # ----- Python version
1978 code
.exit_cfunc_scope()
1980 self
.py_func
.generate_function_definitions(env
, code
)
1981 self
.generate_wrapper_functions(code
)
1983 def declare_argument(self
, env
, arg
):
1984 if arg
.type.is_void
:
1985 error(arg
.pos
, "Invalid use of 'void'")
1986 elif not arg
.type.is_complete() and not (arg
.type.is_array
or arg
.type.is_memoryviewslice
):
1988 "Argument type '%s' is incomplete" % arg
.type)
1989 return env
.declare_arg(arg
.name
, arg
.type, arg
.pos
)
1991 def generate_arg_type_test(self
, arg
, code
):
1992 # Generate type test for one argument.
1993 if arg
.type.typeobj_is_available():
1994 code
.globalstate
.use_utility_code(
1995 UtilityCode
.load_cached("ArgTypeTest", "FunctionArguments.c"))
1996 typeptr_cname
= arg
.type.typeptr_cname
1997 arg_code
= "((PyObject *)%s)" % arg
.entry
.cname
1999 'if (unlikely(!__Pyx_ArgTypeTest(%s, %s, %d, "%s", %s))) %s' % (
2004 arg
.type.is_builtin_type
,
2005 code
.error_goto(arg
.pos
)))
2007 error(arg
.pos
, "Cannot test type of extern C class "
2008 "without type object name specification")
2010 def generate_arg_none_check(self
, arg
, code
):
2011 # Generate None check for one argument.
2012 if arg
.type.is_memoryviewslice
:
2013 cname
= "%s.memview" % arg
.entry
.cname
2015 cname
= arg
.entry
.cname
2017 code
.putln('if (unlikely(((PyObject *)%s) == Py_None)) {' % cname
)
2018 code
.putln('''PyErr_Format(PyExc_TypeError, "Argument '%%.%ds' must not be None", "%s"); %s''' % (
2019 max(200, len(arg
.name
)), arg
.name
,
2020 code
.error_goto(arg
.pos
)))
2023 def generate_wrapper_functions(self
, code
):
2026 def generate_execution_code(self
, code
):
2027 # Evaluate and store argument default values
2028 for arg
in self
.args
:
2029 if not arg
.is_dynamic
:
2030 arg
.generate_assignment_code(code
)
2033 # Special code for the __getbuffer__ function
2035 def getbuffer_init(self
, code
):
2036 info
= self
.local_scope
.arg_entries
[1].cname
2037 # Python 3.0 betas have a bug in memoryview which makes it call
2038 # getbuffer with a NULL parameter. For now we work around this;
2039 # the following block should be removed when this bug is fixed.
2040 code
.putln("if (%s != NULL) {" % info
)
2041 code
.putln("%s->obj = Py_None; __Pyx_INCREF(Py_None);" % info
)
2042 code
.put_giveref("%s->obj" % info
) # Do not refnanny object within structs
2045 def getbuffer_error_cleanup(self
, code
):
2046 info
= self
.local_scope
.arg_entries
[1].cname
2047 code
.putln("if (%s != NULL && %s->obj != NULL) {"
2049 code
.put_gotref("%s->obj" % info
)
2050 code
.putln("__Pyx_DECREF(%s->obj); %s->obj = NULL;"
2054 def getbuffer_normal_cleanup(self
, code
):
2055 info
= self
.local_scope
.arg_entries
[1].cname
2056 code
.putln("if (%s != NULL && %s->obj == Py_None) {" % (info
, info
))
2057 code
.put_gotref("Py_None")
2058 code
.putln("__Pyx_DECREF(Py_None); %s->obj = NULL;" % info
)
2061 def get_preprocessor_guard(self
):
2062 if not self
.entry
.is_special
:
2064 name
= self
.entry
.name
2065 slot
= TypeSlots
.method_name_to_slot
.get(name
)
2068 if name
== '__long__' and not self
.entry
.scope
.lookup_here('__int__'):
2070 if name
in ("__getbuffer__", "__releasebuffer__") and self
.entry
.scope
.is_c_class_scope
:
2072 return slot
.preprocessor_guard_code()
2075 class CFuncDefNode(FuncDefNode
):
2076 # C function definition.
2078 # modifiers ['inline']
2079 # visibility 'private' or 'public' or 'extern'
2080 # base_type CBaseTypeNode
2081 # declarator CDeclaratorNode
2082 # cfunc_declarator the CFuncDeclarator of this function
2083 # (this is also available through declarator or a
2087 # decorators [DecoratorNode] list of decorators
2089 # with_gil boolean Acquire GIL around body
2091 # py_func wrapper for calling from Python
2092 # overridable whether or not this is a cpdef function
2093 # inline_in_pxd whether this is an inline function in a pxd file
2094 # template_declaration String or None Used for c++ class methods
2095 # is_const_method whether this is a const method
2097 child_attrs
= ["base_type", "declarator", "body", "py_func"]
2099 inline_in_pxd
= False
2101 directive_locals
= None
2102 directive_returns
= None
2104 template_declaration
= None
2105 is_const_method
= False
2107 def unqualified_name(self
):
2108 return self
.entry
.name
2110 def analyse_declarations(self
, env
):
2111 if self
.directive_locals
is None:
2112 self
.directive_locals
= {}
2113 self
.directive_locals
.update(env
.directives
['locals'])
2114 if self
.directive_returns
is not None:
2115 base_type
= self
.directive_returns
.analyse_as_type(env
)
2116 if base_type
is None:
2117 error(self
.directive_returns
.pos
, "Not a type")
2118 base_type
= PyrexTypes
.error_type
2120 base_type
= self
.base_type
.analyse(env
)
2121 # The 2 here is because we need both function and argument names.
2122 if isinstance(self
.declarator
, CFuncDeclaratorNode
):
2123 name_declarator
, type = self
.declarator
.analyse(base_type
, env
,
2124 nonempty
= 2 * (self
.body
is not None),
2125 directive_locals
= self
.directive_locals
)
2127 name_declarator
, type = self
.declarator
.analyse(base_type
, env
, nonempty
= 2 * (self
.body
is not None))
2128 if not type.is_cfunction
:
2130 "Suite attached to non-function declaration")
2131 # Remember the actual type according to the function header
2132 # written here, because the type in the symbol table entry
2133 # may be different if we're overriding a C method inherited
2134 # from the base type of an extension type.
2136 type.is_overridable
= self
.overridable
2137 declarator
= self
.declarator
2138 while not hasattr(declarator
, 'args'):
2139 declarator
= declarator
.base
2141 self
.cfunc_declarator
= declarator
2142 self
.args
= declarator
.args
2144 opt_arg_count
= self
.cfunc_declarator
.optional_arg_count
2145 if (self
.visibility
== 'public' or self
.api
) and opt_arg_count
:
2146 error(self
.cfunc_declarator
.pos
,
2147 "Function with optional arguments may not be declared "
2150 if (type.exception_check
== '+' and self
.visibility
!= 'extern'):
2151 warning(self
.cfunc_declarator
.pos
,
2152 "Only extern functions can throw C++ exceptions.")
2154 for formal_arg
, type_arg
in zip(self
.args
, type.args
):
2155 self
.align_argument_type(env
, type_arg
)
2156 formal_arg
.type = type_arg
.type
2157 formal_arg
.name
= type_arg
.name
2158 formal_arg
.cname
= type_arg
.cname
2160 self
._validate
_type
_visibility
(type_arg
.type, type_arg
.pos
, env
)
2162 if type_arg
.type.is_fused
:
2163 self
.has_fused_arguments
= True
2165 if type_arg
.type.is_buffer
and 'inline' in self
.modifiers
:
2166 warning(formal_arg
.pos
, "Buffer unpacking not optimized away.", 1)
2168 if type_arg
.type.is_buffer
:
2170 error(formal_arg
.pos
,
2171 "Buffer may not be acquired without the GIL. "
2172 "Consider using memoryview slices instead.")
2173 elif 'inline' in self
.modifiers
:
2174 warning(formal_arg
.pos
, "Buffer unpacking not optimized away.", 1)
2176 self
._validate
_type
_visibility
(type.return_type
, self
.pos
, env
)
2178 name
= name_declarator
.name
2179 cname
= name_declarator
.cname
2181 type.is_const_method
= self
.is_const_method
2182 self
.entry
= env
.declare_cfunction(
2183 name
, type, self
.pos
,
2184 cname
= cname
, visibility
= self
.visibility
, api
= self
.api
,
2185 defining
= self
.body
is not None, modifiers
= self
.modifiers
)
2186 self
.entry
.inline_func_in_pxd
= self
.inline_in_pxd
2187 self
.return_type
= type.return_type
2188 if self
.return_type
.is_array
and self
.visibility
!= 'extern':
2190 "Function cannot return an array")
2191 if self
.return_type
.is_cpp_class
:
2192 self
.return_type
.check_nullary_constructor(self
.pos
, "used as a return value")
2194 if self
.overridable
and not env
.is_module_scope
:
2195 if len(self
.args
) < 1 or not self
.args
[0].type.is_pyobject
:
2196 # An error will be produced in the cdef function
2197 self
.overridable
= False
2199 self
.declare_cpdef_wrapper(env
)
2200 self
.create_local_scope(env
)
2202 def declare_cpdef_wrapper(self
, env
):
2203 if self
.overridable
:
2204 name
= self
.entry
.name
2205 py_func_body
= self
.call_self_node(is_module_scope
= env
.is_module_scope
)
2206 self
.py_func
= DefNode(pos
= self
.pos
,
2207 name
= self
.entry
.name
,
2210 starstar_arg
= None,
2212 body
= py_func_body
,
2214 self
.py_func
.is_module_scope
= env
.is_module_scope
2215 self
.py_func
.analyse_declarations(env
)
2216 self
.entry
.as_variable
= self
.py_func
.entry
2217 self
.entry
.used
= self
.entry
.as_variable
.used
= True
2218 # Reset scope entry the above cfunction
2219 env
.entries
[name
] = self
.entry
2220 if (not self
.entry
.is_final_cmethod
and
2221 (not env
.is_module_scope
or Options
.lookup_module_cpdef
)):
2222 self
.override
= OverrideCheckNode(self
.pos
, py_func
= self
.py_func
)
2223 self
.body
= StatListNode(self
.pos
, stats
=[self
.override
, self
.body
])
2225 def _validate_type_visibility(self
, type, pos
, env
):
2227 Ensure that types used in cdef functions are public or api, or
2228 defined in a C header.
2230 public_or_api
= (self
.visibility
== 'public' or self
.api
)
2231 entry
= getattr(type, 'entry', None)
2232 if public_or_api
and entry
and env
.is_module_scope
:
2233 if not (entry
.visibility
in ('public', 'extern') or
2234 entry
.api
or entry
.in_cinclude
):
2235 error(pos
, "Function declared public or api may not have "
2238 def call_self_node(self
, omit_optional_args
=0, is_module_scope
=0):
2240 args
= self
.type.args
2241 if omit_optional_args
:
2242 args
= args
[:len(args
) - self
.type.optional_arg_count
]
2243 arg_names
= [arg
.name
for arg
in args
]
2245 cfunc
= ExprNodes
.NameNode(self
.pos
, name
=self
.entry
.name
)
2247 self_arg
= ExprNodes
.NameNode(self
.pos
, name
=arg_names
[0])
2248 cfunc
= ExprNodes
.AttributeNode(self
.pos
, obj
=self_arg
, attribute
=self
.entry
.name
)
2249 skip_dispatch
= not is_module_scope
or Options
.lookup_module_cpdef
2250 c_call
= ExprNodes
.SimpleCallNode(self
.pos
, function
=cfunc
, args
=[ExprNodes
.NameNode(self
.pos
, name
=n
) for n
in arg_names
[1-is_module_scope
:]], wrapper_call
=skip_dispatch
)
2251 return ReturnStatNode(pos
=self
.pos
, return_type
=PyrexTypes
.py_object_type
, value
=c_call
)
2253 def declare_arguments(self
, env
):
2254 for arg
in self
.type.args
:
2256 error(arg
.pos
, "Missing argument name")
2257 self
.declare_argument(env
, arg
)
2259 def need_gil_acquisition(self
, lenv
):
2260 return self
.type.with_gil
2262 def nogil_check(self
, env
):
2264 with_gil
= type.with_gil
2265 if type.nogil
and not with_gil
:
2266 if type.return_type
.is_pyobject
:
2268 "Function with Python return type cannot be declared nogil")
2269 for entry
in self
.local_scope
.var_entries
:
2270 if entry
.type.is_pyobject
and not entry
.in_with_gil_block
:
2271 error(self
.pos
, "Function declared nogil has Python locals or temporaries")
2273 def analyse_expressions(self
, env
):
2274 self
.local_scope
.directives
= env
.directives
2275 if self
.py_func
is not None:
2276 # this will also analyse the default values
2277 self
.py_func
= self
.py_func
.analyse_expressions(env
)
2279 self
.analyse_default_values(env
)
2280 self
.acquire_gil
= self
.need_gil_acquisition(self
.local_scope
)
2283 def needs_assignment_synthesis(self
, env
, code
=None):
2286 def generate_function_header(self
, code
, with_pymethdef
, with_opt_args
= 1, with_dispatch
= 1, cname
= None):
2287 scope
= self
.local_scope
2290 for arg
in type.args
[:len(type.args
)-type.optional_arg_count
]:
2291 arg_decl
= arg
.declaration_code()
2292 entry
= scope
.lookup(arg
.name
)
2293 if not entry
.cf_used
:
2294 arg_decl
= 'CYTHON_UNUSED %s' % arg_decl
2295 arg_decls
.append(arg_decl
)
2296 if with_dispatch
and self
.overridable
:
2297 dispatch_arg
= PyrexTypes
.c_int_type
.declaration_code(
2298 Naming
.skip_dispatch_cname
)
2300 arg_decls
.append(dispatch_arg
)
2302 arg_decls
.append('CYTHON_UNUSED %s' % dispatch_arg
)
2303 if type.optional_arg_count
and with_opt_args
:
2304 arg_decls
.append(type.op_arg_struct
.declaration_code(Naming
.optional_args_cname
))
2305 if type.has_varargs
:
2306 arg_decls
.append("...")
2308 arg_decls
= ["void"]
2310 cname
= self
.entry
.func_cname
2311 entity
= type.function_header_code(cname
, ', '.join(arg_decls
))
2312 if self
.entry
.visibility
== 'private' and '::' not in cname
:
2313 storage_class
= "static "
2317 modifiers
= code
.build_function_modifiers(self
.entry
.func_modifiers
)
2319 header
= self
.return_type
.declaration_code(entity
, dll_linkage
=dll_linkage
)
2320 #print (storage_class, modifiers, header)
2321 if self
.template_declaration
:
2322 code
.putln(self
.template_declaration
)
2323 code
.putln("%s%s%s {" % (storage_class
, modifiers
, header
))
2325 def generate_argument_declarations(self
, env
, code
):
2326 scope
= self
.local_scope
2327 for arg
in self
.args
:
2329 entry
= scope
.lookup(arg
.name
)
2330 if self
.override
or entry
.cf_used
:
2331 result
= arg
.calculate_default_value_code(code
)
2332 code
.putln('%s = %s;' % (
2333 arg
.type.declaration_code(arg
.cname
), result
))
2335 def generate_keyword_list(self
, code
):
2338 def generate_argument_parsing_code(self
, env
, code
):
2341 if self
.type.optional_arg_count
:
2342 scope
= self
.local_scope
2343 code
.putln('if (%s) {' % Naming
.optional_args_cname
)
2344 for arg
in self
.args
:
2346 entry
= scope
.lookup(arg
.name
)
2347 if self
.override
or entry
.cf_used
:
2348 code
.putln('if (%s->%sn > %s) {' %
2349 (Naming
.optional_args_cname
,
2350 Naming
.pyrex_prefix
, i
))
2351 declarator
= arg
.declarator
2352 while not hasattr(declarator
, 'name'):
2353 declarator
= declarator
.base
2354 code
.putln('%s = %s->%s;' %
2355 (arg
.cname
, Naming
.optional_args_cname
,
2356 self
.type.opt_arg_cname(declarator
.name
)))
2359 for _
in range(used
):
2363 def generate_argument_conversion_code(self
, code
):
2366 def generate_argument_type_tests(self
, code
):
2367 # Generate type tests for args whose type in a parent
2368 # class is a supertype of the declared type.
2369 for arg
in self
.type.args
:
2370 if arg
.needs_type_test
:
2371 self
.generate_arg_type_test(arg
, code
)
2372 elif arg
.type.is_pyobject
and not arg
.accept_none
:
2373 self
.generate_arg_none_check(arg
, code
)
2375 def error_value(self
):
2376 if self
.return_type
.is_pyobject
:
2380 return self
.entry
.type.exception_value
2382 def caller_will_check_exceptions(self
):
2383 return self
.entry
.type.exception_check
2385 def generate_wrapper_functions(self
, code
):
2386 # If the C signature of a function has changed, we need to generate
2387 # wrappers to put in the slots here.
2390 func_type
= entry
.type
2391 while entry
.prev_entry
is not None:
2393 entry
= entry
.prev_entry
2394 entry
.func_cname
= "%s%swrap_%s" % (self
.entry
.func_cname
, Naming
.pyrex_prefix
, k
)
2396 self
.generate_function_header(code
,
2398 with_dispatch
= entry
.type.is_overridable
,
2399 with_opt_args
= entry
.type.optional_arg_count
,
2400 cname
= entry
.func_cname
)
2401 if not self
.return_type
.is_void
:
2403 args
= self
.type.args
2404 arglist
= [arg
.cname
for arg
in args
[:len(args
)-self
.type.optional_arg_count
]]
2405 if entry
.type.is_overridable
:
2406 arglist
.append(Naming
.skip_dispatch_cname
)
2407 elif func_type
.is_overridable
:
2409 if entry
.type.optional_arg_count
:
2410 arglist
.append(Naming
.optional_args_cname
)
2411 elif func_type
.optional_arg_count
:
2412 arglist
.append('NULL')
2413 code
.putln('%s(%s);' % (self
.entry
.func_cname
, ', '.join(arglist
)))
2417 class PyArgDeclNode(Node
):
2418 # Argument which must be a Python object (used
2419 # for * and ** arguments).
2422 # entry Symtab.Entry
2423 # annotation ExprNode or None Py3 argument annotation
2428 def generate_function_definitions(self
, env
, code
):
2429 self
.entry
.generate_function_definitions(env
, code
)
2431 class DecoratorNode(Node
):
2434 # decorator NameNode or CallNode or AttributeNode
2435 child_attrs
= ['decorator']
2438 class DefNode(FuncDefNode
):
2439 # A Python function definition.
2441 # name string the Python name of the function
2442 # lambda_name string the internal name of a lambda 'function'
2443 # decorators [DecoratorNode] list of decorators
2444 # args [CArgDeclNode] formal arguments
2445 # doc EncodedString or None
2447 # return_type_annotation
2448 # ExprNode or None the Py3 return type annotation
2450 # The following subnode is constructed internally
2451 # when the def statement is inside a Python class definition.
2453 # fused_py_func DefNode The original fused cpdef DefNode
2454 # (in case this is a specialization)
2455 # specialized_cpdefs [DefNode] list of specialized cpdef DefNodes
2456 # py_cfunc_node PyCFunctionNode/InnerFunctionNode The PyCFunction to create and assign
2458 # decorator_indirection IndirectionNode Used to remove __Pyx_Method_ClassMethod for fused functions
2460 child_attrs
= ["args", "star_arg", "starstar_arg", "body", "decorators"]
2463 reqd_kw_flags_cname
= "0"
2465 no_assignment_synthesis
= 0
2467 return_type_annotation
= None
2471 py_cfunc_node
= None
2472 requires_classobj
= False
2473 defaults_struct
= None # Dynamic kwrds structure name
2476 fused_py_func
= False
2477 specialized_cpdefs
= None
2479 py_wrapper_required
= True
2482 defaults_getter
= None
2484 def __init__(self
, pos
, **kwds
):
2485 FuncDefNode
.__init
__(self
, pos
, **kwds
)
2487 for arg
in self
.args
:
2494 self
.num_kwonly_args
= k
2495 self
.num_required_kw_args
= rk
2496 self
.num_required_args
= r
2498 def as_cfunction(self
, cfunc
=None, scope
=None, overridable
=True, returns
=None):
2500 error(self
.star_arg
.pos
, "cdef function cannot have star argument")
2501 if self
.starstar_arg
:
2502 error(self
.starstar_arg
.pos
, "cdef function cannot have starstar argument")
2505 for formal_arg
in self
.args
:
2506 name_declarator
, type = formal_arg
.analyse(scope
, nonempty
=1)
2507 cfunc_args
.append(PyrexTypes
.CFuncTypeArg(name
= name_declarator
.name
,
2509 type = py_object_type
,
2510 pos
= formal_arg
.pos
))
2511 cfunc_type
= PyrexTypes
.CFuncType(return_type
= py_object_type
,
2513 has_varargs
= False,
2514 exception_value
= None,
2515 exception_check
= False,
2518 is_overridable
= overridable
)
2519 cfunc
= CVarDefNode(self
.pos
, type=cfunc_type
)
2523 cfunc_type
= cfunc
.type
2524 if len(self
.args
) != len(cfunc_type
.args
) or cfunc_type
.has_varargs
:
2525 error(self
.pos
, "wrong number of arguments")
2526 error(cfunc
.pos
, "previous declaration here")
2527 for i
, (formal_arg
, type_arg
) in enumerate(zip(self
.args
, cfunc_type
.args
)):
2528 name_declarator
, type = formal_arg
.analyse(scope
, nonempty
=1,
2529 is_self_arg
= (i
== 0 and scope
.is_c_class_scope
))
2530 if type is None or type is PyrexTypes
.py_object_type
:
2531 formal_arg
.type = type_arg
.type
2532 formal_arg
.name_declarator
= name_declarator
2534 if cfunc_type
.exception_value
is None:
2535 exception_value
= None
2537 exception_value
= ExprNodes
.ConstNode(self
.pos
, value
=cfunc_type
.exception_value
, type=cfunc_type
.return_type
)
2538 declarator
= CFuncDeclaratorNode(self
.pos
,
2539 base
= CNameDeclaratorNode(self
.pos
, name
=self
.name
, cname
=None),
2541 has_varargs
= False,
2542 exception_check
= cfunc_type
.exception_check
,
2543 exception_value
= exception_value
,
2544 with_gil
= cfunc_type
.with_gil
,
2545 nogil
= cfunc_type
.nogil
)
2546 return CFuncDefNode(self
.pos
,
2548 base_type
= CAnalysedBaseTypeNode(self
.pos
, type=cfunc_type
.return_type
),
2549 declarator
= declarator
,
2552 overridable
= cfunc_type
.is_overridable
,
2554 with_gil
= cfunc_type
.with_gil
,
2555 nogil
= cfunc_type
.nogil
,
2556 visibility
= 'private',
2558 directive_locals
= getattr(cfunc
, 'directive_locals', {}),
2559 directive_returns
= returns
)
2561 def is_cdef_func_compatible(self
):
2562 """Determines if the function's signature is compatible with a
2563 cdef function. This can be used before calling
2564 .as_cfunction() to see if that will be successful.
2566 if self
.needs_closure
:
2568 if self
.star_arg
or self
.starstar_arg
:
2572 def analyse_declarations(self
, env
):
2573 self
.is_classmethod
= self
.is_staticmethod
= False
2575 for decorator
in self
.decorators
:
2576 func
= decorator
.decorator
2578 self
.is_classmethod |
= func
.name
== 'classmethod'
2579 self
.is_staticmethod |
= func
.name
== 'staticmethod'
2581 if self
.is_classmethod
and env
.lookup_here('classmethod'):
2582 # classmethod() was overridden - not much we can do here ...
2583 self
.is_classmethod
= False
2584 if self
.is_staticmethod
and env
.lookup_here('staticmethod'):
2585 # staticmethod() was overridden - not much we can do here ...
2586 self
.is_staticmethod
= False
2588 if self
.name
== '__new__' and env
.is_py_class_scope
:
2589 self
.is_staticmethod
= 1
2591 self
.analyse_argument_types(env
)
2592 if self
.name
== '<lambda>':
2593 self
.declare_lambda_function(env
)
2595 self
.declare_pyfunction(env
)
2597 self
.analyse_signature(env
)
2598 self
.return_type
= self
.entry
.signature
.return_type()
2599 self
.create_local_scope(env
)
2601 self
.py_wrapper
= DefNodeWrapper(
2604 name
=self
.entry
.name
,
2606 star_arg
=self
.star_arg
,
2607 starstar_arg
=self
.starstar_arg
,
2608 return_type
=self
.return_type
)
2609 self
.py_wrapper
.analyse_declarations(env
)
2611 def analyse_argument_types(self
, env
):
2612 self
.directive_locals
= env
.directives
['locals']
2613 allow_none_for_extension_args
= env
.directives
['allow_none_for_extension_args']
2615 f2s
= env
.fused_to_specific
2616 env
.fused_to_specific
= None
2618 for arg
in self
.args
:
2619 if hasattr(arg
, 'name'):
2620 name_declarator
= None
2622 base_type
= arg
.base_type
.analyse(env
)
2623 name_declarator
, type = \
2624 arg
.declarator
.analyse(base_type
, env
)
2625 arg
.name
= name_declarator
.name
2629 self
.has_fused_arguments
= True
2631 self
.align_argument_type(env
, arg
)
2632 if name_declarator
and name_declarator
.cname
:
2634 "Python function argument cannot have C name specification")
2635 arg
.type = arg
.type.as_argument_type()
2637 arg
.needs_conversion
= 0
2638 arg
.needs_type_test
= 0
2640 if arg
.type.is_pyobject
or arg
.type.is_buffer
or arg
.type.is_memoryviewslice
:
2642 arg
.accept_none
= True
2644 arg
.accept_none
= False
2645 elif (arg
.type.is_extension_type
or arg
.type.is_builtin_type
2646 or arg
.type.is_buffer
or arg
.type.is_memoryviewslice
):
2647 if arg
.default
and arg
.default
.constant_result
is None:
2648 # special case: def func(MyType obj = None)
2649 arg
.accept_none
= True
2651 # default depends on compiler directive
2652 arg
.accept_none
= allow_none_for_extension_args
2654 # probably just a plain 'object'
2655 arg
.accept_none
= True
2657 arg
.accept_none
= True # won't be used, but must be there
2659 error(arg
.pos
, "Only Python type arguments can have 'not None'")
2661 error(arg
.pos
, "Only Python type arguments can have 'or None'")
2663 env
.fused_to_specific
= f2s
2665 def analyse_signature(self
, env
):
2666 if self
.entry
.is_special
:
2668 error(self
.pos
, "special functions of cdef classes cannot have decorators")
2669 self
.entry
.trivial_signature
= len(self
.args
) == 1 and not (self
.star_arg
or self
.starstar_arg
)
2670 elif not env
.directives
['always_allow_keywords'] and not (self
.star_arg
or self
.starstar_arg
):
2671 # Use the simpler calling signature for zero- and one-argument functions.
2672 if self
.entry
.signature
is TypeSlots
.pyfunction_signature
:
2673 if len(self
.args
) == 0:
2674 self
.entry
.signature
= TypeSlots
.pyfunction_noargs
2675 elif len(self
.args
) == 1:
2676 if self
.args
[0].default
is None and not self
.args
[0].kw_only
:
2677 self
.entry
.signature
= TypeSlots
.pyfunction_onearg
2678 elif self
.entry
.signature
is TypeSlots
.pymethod_signature
:
2679 if len(self
.args
) == 1:
2680 self
.entry
.signature
= TypeSlots
.unaryfunc
2681 elif len(self
.args
) == 2:
2682 if self
.args
[1].default
is None and not self
.args
[1].kw_only
:
2683 self
.entry
.signature
= TypeSlots
.ibinaryfunc
2685 sig
= self
.entry
.signature
2686 nfixed
= sig
.num_fixed_args()
2687 if sig
is TypeSlots
.pymethod_signature
and nfixed
== 1 \
2688 and len(self
.args
) == 0 and self
.star_arg
:
2689 # this is the only case where a diverging number of
2690 # arguments is not an error - when we have no explicit
2691 # 'self' parameter as in method(*args)
2692 sig
= self
.entry
.signature
= TypeSlots
.pyfunction_signature
# self is not 'really' used
2693 self
.self_in_stararg
= 1
2696 if self
.is_staticmethod
and env
.is_c_class_scope
:
2698 self
.self_in_stararg
= True # FIXME: why for staticmethods?
2700 self
.entry
.signature
= sig
= copy
.copy(sig
)
2701 sig
.fixed_arg_format
= "*"
2702 sig
.is_staticmethod
= True
2703 sig
.has_generic_args
= True
2705 if ((self
.is_classmethod
or self
.is_staticmethod
) and
2706 self
.has_fused_arguments
and env
.is_c_class_scope
):
2707 del self
.decorator_indirection
.stats
[:]
2709 for i
in range(min(nfixed
, len(self
.args
))):
2712 if sig
.is_self_arg(i
) and not self
.is_staticmethod
:
2713 if self
.is_classmethod
:
2715 arg
.hdr_type
= arg
.type = Builtin
.type_type
2718 arg
.hdr_type
= arg
.type = env
.parent_type
2719 arg
.needs_conversion
= 0
2721 arg
.hdr_type
= sig
.fixed_arg_type(i
)
2722 if not arg
.type.same_as(arg
.hdr_type
):
2723 if arg
.hdr_type
.is_pyobject
and arg
.type.is_pyobject
:
2724 arg
.needs_type_test
= 1
2726 arg
.needs_conversion
= 1
2727 if arg
.needs_conversion
:
2728 arg
.hdr_cname
= Naming
.arg_prefix
+ arg
.name
2730 arg
.hdr_cname
= Naming
.var_prefix
+ arg
.name
2732 if nfixed
> len(self
.args
):
2733 self
.bad_signature()
2735 elif nfixed
< len(self
.args
):
2736 if not sig
.has_generic_args
:
2737 self
.bad_signature()
2738 for arg
in self
.args
:
2739 if arg
.is_generic
and \
2740 (arg
.type.is_extension_type
or arg
.type.is_builtin_type
):
2741 arg
.needs_type_test
= 1
2743 def bad_signature(self
):
2744 sig
= self
.entry
.signature
2745 expected_str
= "%d" % sig
.num_fixed_args()
2746 if sig
.has_generic_args
:
2747 expected_str
+= " or more"
2749 if name
.startswith("__") and name
.endswith("__"):
2750 desc
= "Special method"
2754 "%s %s has wrong number of arguments "
2755 "(%d declared, %s expected)" % (
2756 desc
, self
.name
, len(self
.args
), expected_str
))
2758 def declare_pyfunction(self
, env
):
2759 #print "DefNode.declare_pyfunction:", self.name, "in", env ###
2761 entry
= env
.lookup_here(name
)
2763 if entry
.is_final_cmethod
and not env
.parent_type
.is_final_type
:
2764 error(self
.pos
, "Only final types can have final Python (def/cpdef) methods")
2765 if (entry
.type.is_cfunction
and not entry
.is_builtin_cmethod
2766 and not self
.is_wrapper
):
2767 warning(self
.pos
, "Overriding cdef method with def method.", 5)
2768 entry
= env
.declare_pyfunction(name
, self
.pos
, allow_redefine
=not self
.is_wrapper
)
2770 prefix
= env
.next_id(env
.scope_prefix
)
2771 self
.entry
.pyfunc_cname
= Naming
.pyfunc_prefix
+ prefix
+ name
2772 if Options
.docstrings
:
2773 entry
.doc
= embed_position(self
.pos
, self
.doc
)
2774 entry
.doc_cname
= Naming
.funcdoc_prefix
+ prefix
+ name
2775 if entry
.is_special
:
2776 if entry
.name
in TypeSlots
.invisible
or not entry
.doc
or (entry
.name
in '__getattr__' and env
.directives
['fast_getattr']):
2777 entry
.wrapperbase_cname
= None
2779 entry
.wrapperbase_cname
= Naming
.wrapperbase_prefix
+ prefix
+ name
2783 def declare_lambda_function(self
, env
):
2784 entry
= env
.declare_lambda_function(self
.lambda_name
, self
.pos
)
2787 self
.entry
.pyfunc_cname
= entry
.cname
2789 def declare_arguments(self
, env
):
2790 for arg
in self
.args
:
2792 error(arg
.pos
, "Missing argument name")
2793 if arg
.needs_conversion
:
2794 arg
.entry
= env
.declare_var(arg
.name
, arg
.type, arg
.pos
)
2795 if arg
.type.is_pyobject
:
2796 arg
.entry
.init
= "0"
2798 arg
.entry
= self
.declare_argument(env
, arg
)
2799 arg
.entry
.is_arg
= 1
2801 arg
.entry
.is_self_arg
= arg
.is_self_arg
2802 self
.declare_python_arg(env
, self
.star_arg
)
2803 self
.declare_python_arg(env
, self
.starstar_arg
)
2805 def declare_python_arg(self
, env
, arg
):
2807 if env
.directives
['infer_types'] != False:
2808 type = PyrexTypes
.unspecified_type
2810 type = py_object_type
2811 entry
= env
.declare_var(arg
.name
, type, arg
.pos
)
2815 entry
.xdecref_cleanup
= 1
2818 def analyse_expressions(self
, env
):
2819 self
.local_scope
.directives
= env
.directives
2820 self
.analyse_default_values(env
)
2822 if not self
.needs_assignment_synthesis(env
) and self
.decorators
:
2823 for decorator
in self
.decorators
[::-1]:
2824 decorator
.decorator
= decorator
.decorator
.analyse_expressions(env
)
2826 self
.py_wrapper
.prepare_argument_coercion(env
)
2829 def needs_assignment_synthesis(self
, env
, code
=None):
2830 if self
.is_wrapper
or self
.specialized_cpdefs
or self
.entry
.is_fused_specialized
:
2832 if self
.is_staticmethod
:
2834 if self
.no_assignment_synthesis
:
2836 # Should enable for module level as well, that will require more testing...
2837 if self
.entry
.is_anonymous
:
2839 if env
.is_module_scope
:
2841 return env
.directives
['binding']
2843 return code
.globalstate
.directives
['binding']
2844 return env
.is_py_class_scope
or env
.is_closure_scope
2846 def error_value(self
):
2847 return self
.entry
.signature
.error_value
2849 def caller_will_check_exceptions(self
):
2850 return self
.entry
.signature
.exception_check
2852 def generate_function_definitions(self
, env
, code
):
2853 if self
.defaults_getter
:
2854 self
.defaults_getter
.generate_function_definitions(env
, code
)
2856 # Before closure cnames are mangled
2857 if self
.py_wrapper_required
:
2858 # func_cname might be modified by @cname
2859 self
.py_wrapper
.func_cname
= self
.entry
.func_cname
2860 self
.py_wrapper
.generate_function_definitions(env
, code
)
2861 FuncDefNode
.generate_function_definitions(self
, env
, code
)
2863 def generate_function_header(self
, code
, with_pymethdef
, proto_only
=0):
2865 if self
.py_wrapper_required
:
2866 self
.py_wrapper
.generate_function_header(
2867 code
, with_pymethdef
, True)
2870 if self
.entry
.signature
.has_dummy_arg
:
2871 self_arg
= 'PyObject *%s' % Naming
.self_cname
2872 if not self
.needs_outer_scope
:
2873 self_arg
= 'CYTHON_UNUSED ' + self_arg
2874 arg_code_list
.append(self_arg
)
2876 def arg_decl_code(arg
):
2878 if entry
.in_closure
:
2879 cname
= entry
.original_cname
2882 decl
= entry
.type.declaration_code(cname
)
2883 if not entry
.cf_used
:
2884 decl
= 'CYTHON_UNUSED ' + decl
2887 for arg
in self
.args
:
2888 arg_code_list
.append(arg_decl_code(arg
))
2890 arg_code_list
.append(arg_decl_code(self
.star_arg
))
2891 if self
.starstar_arg
:
2892 arg_code_list
.append(arg_decl_code(self
.starstar_arg
))
2893 arg_code
= ', '.join(arg_code_list
)
2894 dc
= self
.return_type
.declaration_code(self
.entry
.pyfunc_cname
)
2896 decls_code
= code
.globalstate
['decls']
2897 preprocessor_guard
= self
.get_preprocessor_guard()
2898 if preprocessor_guard
:
2899 decls_code
.putln(preprocessor_guard
)
2901 "static %s(%s); /* proto */" % (dc
, arg_code
))
2902 if preprocessor_guard
:
2903 decls_code
.putln("#endif")
2904 code
.putln("static %s(%s) {" % (dc
, arg_code
))
2906 def generate_argument_declarations(self
, env
, code
):
2909 def generate_keyword_list(self
, code
):
2912 def generate_argument_parsing_code(self
, env
, code
):
2913 # Move arguments into closure if required
2914 def put_into_closure(entry
):
2915 if entry
.in_closure
:
2916 code
.putln('%s = %s;' % (entry
.cname
, entry
.original_cname
))
2917 code
.put_var_incref(entry
)
2918 code
.put_var_giveref(entry
)
2919 for arg
in self
.args
:
2920 put_into_closure(arg
.entry
)
2921 for arg
in self
.star_arg
, self
.starstar_arg
:
2923 put_into_closure(arg
.entry
)
2925 def generate_argument_type_tests(self
, code
):
2929 class DefNodeWrapper(FuncDefNode
):
2930 # DefNode python wrapper code generator
2933 target
= None # Target DefNode
2935 def __init__(self
, *args
, **kwargs
):
2936 FuncDefNode
.__init
__(self
, *args
, **kwargs
)
2937 self
.num_kwonly_args
= self
.target
.num_kwonly_args
2938 self
.num_required_kw_args
= self
.target
.num_required_kw_args
2939 self
.num_required_args
= self
.target
.num_required_args
2940 self
.self_in_stararg
= self
.target
.self_in_stararg
2941 self
.signature
= None
2943 def analyse_declarations(self
, env
):
2944 target_entry
= self
.target
.entry
2946 prefix
= env
.next_id(env
.scope_prefix
)
2947 target_entry
.func_cname
= Naming
.pywrap_prefix
+ prefix
+ name
2948 target_entry
.pymethdef_cname
= Naming
.pymethdef_prefix
+ prefix
+ name
2950 self
.signature
= target_entry
.signature
2952 def prepare_argument_coercion(self
, env
):
2953 # This is only really required for Cython utility code at this time,
2954 # everything else can be done during code generation. But we expand
2955 # all utility code here, simply because we cannot easily distinguish
2956 # different code types.
2957 for arg
in self
.args
:
2958 if not arg
.type.is_pyobject
:
2959 if not arg
.type.create_from_py_utility_code(env
):
2960 pass # will fail later
2961 elif arg
.hdr_type
and not arg
.hdr_type
.is_pyobject
:
2962 if not arg
.hdr_type
.create_to_py_utility_code(env
):
2963 pass # will fail later
2965 def signature_has_nongeneric_args(self
):
2966 argcount
= len(self
.args
)
2967 if argcount
== 0 or (
2968 argcount
== 1 and (self
.args
[0].is_self_arg
or
2969 self
.args
[0].is_type_arg
)):
2973 def signature_has_generic_args(self
):
2974 return self
.signature
.has_generic_args
2976 def generate_function_body(self
, code
):
2978 if self
.signature
.has_dummy_arg
:
2979 args
.append(Naming
.self_cname
)
2980 for arg
in self
.args
:
2981 if arg
.hdr_type
and not (arg
.type.is_memoryviewslice
or
2982 arg
.type.is_struct
or
2983 arg
.type.is_complex
):
2984 args
.append(arg
.type.cast_code(arg
.entry
.cname
))
2986 args
.append(arg
.entry
.cname
)
2988 args
.append(self
.star_arg
.entry
.cname
)
2989 if self
.starstar_arg
:
2990 args
.append(self
.starstar_arg
.entry
.cname
)
2991 args
= ', '.join(args
)
2992 if not self
.return_type
.is_void
:
2993 code
.put('%s = ' % Naming
.retval_cname
)
2994 code
.putln('%s(%s);' % (
2995 self
.target
.entry
.pyfunc_cname
, args
))
2997 def generate_function_definitions(self
, env
, code
):
2998 lenv
= self
.target
.local_scope
2999 # Generate C code for header and body of function
3000 code
.mark_pos(self
.pos
)
3002 code
.putln("/* Python wrapper */")
3003 preprocessor_guard
= self
.target
.get_preprocessor_guard()
3004 if preprocessor_guard
:
3005 code
.putln(preprocessor_guard
)
3007 code
.enter_cfunc_scope()
3008 code
.return_from_error_cleanup_label
= code
.new_label()
3010 with_pymethdef
= (self
.target
.needs_assignment_synthesis(env
, code
) or
3011 self
.target
.pymethdef_required
)
3012 self
.generate_function_header(code
, with_pymethdef
)
3013 self
.generate_argument_declarations(lenv
, code
)
3014 tempvardecl_code
= code
.insertion_point()
3016 if self
.return_type
.is_pyobject
:
3017 retval_init
= ' = 0'
3020 if not self
.return_type
.is_void
:
3021 code
.putln('%s%s;' % (
3022 self
.return_type
.declaration_code(Naming
.retval_cname
),
3024 code
.put_declare_refcount_context()
3025 code
.put_setup_refcount_context('%s (wrapper)' % self
.name
)
3027 self
.generate_argument_parsing_code(lenv
, code
)
3028 self
.generate_argument_type_tests(code
)
3029 self
.generate_function_body(code
)
3031 # ----- Go back and insert temp variable declarations
3032 tempvardecl_code
.put_temp_declarations(code
.funcstate
)
3034 code
.mark_pos(self
.pos
)
3036 code
.putln("/* function exit code */")
3038 # ----- Error cleanup
3039 if code
.error_label
in code
.labels_used
:
3040 code
.put_goto(code
.return_label
)
3041 code
.put_label(code
.error_label
)
3042 for cname
, type in code
.funcstate
.all_managed_temps():
3043 code
.put_xdecref(cname
, type)
3044 err_val
= self
.error_value()
3045 if err_val
is not None:
3046 code
.putln("%s = %s;" % (Naming
.retval_cname
, err_val
))
3048 # ----- Non-error return cleanup
3049 code
.put_label(code
.return_label
)
3050 for entry
in lenv
.var_entries
:
3051 if entry
.is_arg
and entry
.type.is_pyobject
:
3052 code
.put_var_decref(entry
)
3054 code
.put_finish_refcount_context()
3055 if not self
.return_type
.is_void
:
3056 code
.putln("return %s;" % Naming
.retval_cname
)
3058 code
.exit_cfunc_scope()
3059 if preprocessor_guard
:
3060 code
.putln("#endif /*!(%s)*/" % preprocessor_guard
)
3062 def generate_function_header(self
, code
, with_pymethdef
, proto_only
=0):
3064 sig
= self
.signature
3066 if sig
.has_dummy_arg
or self
.self_in_stararg
:
3067 arg_code
= "PyObject *%s" % Naming
.self_cname
3068 if not sig
.has_dummy_arg
:
3069 arg_code
= 'CYTHON_UNUSED ' + arg_code
3070 arg_code_list
.append(arg_code
)
3072 for arg
in self
.args
:
3073 if not arg
.is_generic
:
3074 if arg
.is_self_arg
or arg
.is_type_arg
:
3075 arg_code_list
.append("PyObject *%s" % arg
.hdr_cname
)
3077 arg_code_list
.append(
3078 arg
.hdr_type
.declaration_code(arg
.hdr_cname
))
3079 entry
= self
.target
.entry
3080 if not entry
.is_special
and sig
.method_flags() == [TypeSlots
.method_noargs
]:
3081 arg_code_list
.append("CYTHON_UNUSED PyObject *unused")
3082 if entry
.scope
.is_c_class_scope
and entry
.name
== "__ipow__":
3083 arg_code_list
.append("CYTHON_UNUSED PyObject *unused")
3084 if sig
.has_generic_args
:
3085 arg_code_list
.append(
3086 "PyObject *%s, PyObject *%s"
3087 % (Naming
.args_cname
, Naming
.kwds_cname
))
3088 arg_code
= ", ".join(arg_code_list
)
3090 # Prevent warning: unused function '__pyx_pw_5numpy_7ndarray_1__getbuffer__'
3092 if (entry
.name
in ("__getbuffer__", "__releasebuffer__")
3093 and entry
.scope
.is_c_class_scope
):
3094 mf
= "CYTHON_UNUSED "
3095 with_pymethdef
= False
3097 dc
= self
.return_type
.declaration_code(entry
.func_cname
)
3098 header
= "static %s%s(%s)" % (mf
, dc
, arg_code
)
3099 code
.putln("%s; /*proto*/" % header
)
3102 if self
.target
.fused_py_func
:
3103 # If we are the specialized version of the cpdef, we still
3104 # want the prototype for the "fused cpdef", in case we're
3105 # checking to see if our method was overridden in Python
3106 self
.target
.fused_py_func
.generate_function_header(
3107 code
, with_pymethdef
, proto_only
=True)
3110 if (Options
.docstrings
and entry
.doc
and
3111 not self
.target
.fused_py_func
and
3112 not entry
.scope
.is_property_scope
and
3113 (not entry
.is_special
or entry
.wrapperbase_cname
)):
3114 # h_code = code.globalstate['h_code']
3117 if docstr
.is_unicode
:
3118 docstr
= docstr
.utf8encode()
3121 'static char %s[] = "%s";' % (
3123 split_string_literal(escape_byte_string(docstr
))))
3125 if entry
.is_special
:
3126 code
.putln('#if CYTHON_COMPILING_IN_CPYTHON')
3128 "struct wrapperbase %s;" % entry
.wrapperbase_cname
)
3129 code
.putln('#endif')
3131 if with_pymethdef
or self
.target
.fused_py_func
:
3133 "static PyMethodDef %s = " %
3134 entry
.pymethdef_cname
)
3135 code
.put_pymethoddef(self
.target
.entry
, ";", allow_skip
=False)
3136 code
.putln("%s {" % header
)
3138 def generate_argument_declarations(self
, env
, code
):
3139 for arg
in self
.args
:
3141 if arg
.needs_conversion
:
3142 code
.putln("PyObject *%s = 0;" % arg
.hdr_cname
)
3144 code
.put_var_declaration(arg
.entry
)
3145 for entry
in env
.var_entries
:
3147 code
.put_var_declaration(entry
)
3149 def generate_argument_parsing_code(self
, env
, code
):
3150 # Generate fast equivalent of PyArg_ParseTuple call for
3151 # generic arguments, if any, including args/kwargs
3152 old_error_label
= code
.new_error_label()
3153 our_error_label
= code
.error_label
3154 end_label
= code
.new_label("argument_unpacking_done")
3156 has_kwonly_args
= self
.num_kwonly_args
> 0
3157 has_star_or_kw_args
= self
.star_arg
is not None \
3158 or self
.starstar_arg
is not None or has_kwonly_args
3160 for arg
in self
.args
:
3161 if not arg
.type.is_pyobject
:
3162 if not arg
.type.create_from_py_utility_code(env
):
3163 pass # will fail later
3165 if not self
.signature_has_generic_args():
3166 if has_star_or_kw_args
:
3167 error(self
.pos
, "This method cannot have * or keyword arguments")
3168 self
.generate_argument_conversion_code(code
)
3170 elif not self
.signature_has_nongeneric_args():
3171 # func(*args) or func(**kw) or func(*args, **kw)
3172 self
.generate_stararg_copy_code(code
)
3175 self
.generate_tuple_and_keyword_parsing_code(self
.args
, end_label
, code
)
3177 code
.error_label
= old_error_label
3178 if code
.label_used(our_error_label
):
3179 if not code
.label_used(end_label
):
3180 code
.put_goto(end_label
)
3181 code
.put_label(our_error_label
)
3182 if has_star_or_kw_args
:
3183 self
.generate_arg_decref(self
.star_arg
, code
)
3184 if self
.starstar_arg
:
3185 if self
.starstar_arg
.entry
.xdecref_cleanup
:
3186 code
.put_var_xdecref_clear(self
.starstar_arg
.entry
)
3188 code
.put_var_decref_clear(self
.starstar_arg
.entry
)
3189 code
.put_add_traceback(self
.target
.entry
.qualified_name
)
3190 code
.put_finish_refcount_context()
3191 code
.putln("return %s;" % self
.error_value())
3192 if code
.label_used(end_label
):
3193 code
.put_label(end_label
)
3195 def generate_arg_xdecref(self
, arg
, code
):
3197 code
.put_var_xdecref_clear(arg
.entry
)
3199 def generate_arg_decref(self
, arg
, code
):
3201 code
.put_var_decref_clear(arg
.entry
)
3203 def generate_stararg_copy_code(self
, code
):
3204 if not self
.star_arg
:
3205 code
.globalstate
.use_utility_code(
3206 UtilityCode
.load_cached("RaiseArgTupleInvalid", "FunctionArguments.c"))
3207 code
.putln("if (unlikely(PyTuple_GET_SIZE(%s) > 0)) {" %
3209 code
.put('__Pyx_RaiseArgtupleInvalid("%s", 1, 0, 0, PyTuple_GET_SIZE(%s)); return %s;' % (
3210 self
.name
, Naming
.args_cname
, self
.error_value()))
3213 if self
.starstar_arg
:
3215 kwarg_check
= "unlikely(%s)" % Naming
.kwds_cname
3217 kwarg_check
= "%s" % Naming
.kwds_cname
3219 kwarg_check
= "unlikely(%s) && unlikely(PyDict_Size(%s) > 0)" % (
3220 Naming
.kwds_cname
, Naming
.kwds_cname
)
3221 code
.globalstate
.use_utility_code(
3222 UtilityCode
.load_cached("KeywordStringCheck", "FunctionArguments.c"))
3224 "if (%s && unlikely(!__Pyx_CheckKeywordStrings(%s, \"%s\", %d))) return %s;" % (
3225 kwarg_check
, Naming
.kwds_cname
, self
.name
,
3226 bool(self
.starstar_arg
), self
.error_value()))
3228 if self
.starstar_arg
:
3229 code
.putln("%s = (%s) ? PyDict_Copy(%s) : PyDict_New();" % (
3230 self
.starstar_arg
.entry
.cname
,
3233 code
.putln("if (unlikely(!%s)) return %s;" % (
3234 self
.starstar_arg
.entry
.cname
, self
.error_value()))
3235 self
.starstar_arg
.entry
.xdecref_cleanup
= 0
3236 code
.put_gotref(self
.starstar_arg
.entry
.cname
)
3238 if self
.self_in_stararg
and not self
.target
.is_staticmethod
:
3239 # need to create a new tuple with 'self' inserted as first item
3240 code
.put("%s = PyTuple_New(PyTuple_GET_SIZE(%s)+1); if (unlikely(!%s)) " % (
3241 self
.star_arg
.entry
.cname
,
3243 self
.star_arg
.entry
.cname
))
3244 if self
.starstar_arg
:
3246 code
.put_decref_clear(self
.starstar_arg
.entry
.cname
, py_object_type
)
3247 code
.putln("return %s;" % self
.error_value())
3250 code
.putln("return %s;" % self
.error_value())
3251 code
.put_gotref(self
.star_arg
.entry
.cname
)
3252 code
.put_incref(Naming
.self_cname
, py_object_type
)
3253 code
.put_giveref(Naming
.self_cname
)
3254 code
.putln("PyTuple_SET_ITEM(%s, 0, %s);" % (
3255 self
.star_arg
.entry
.cname
, Naming
.self_cname
))
3256 temp
= code
.funcstate
.allocate_temp(PyrexTypes
.c_py_ssize_t_type
, manage_ref
=False)
3257 code
.putln("for (%s=0; %s < PyTuple_GET_SIZE(%s); %s++) {" % (
3258 temp
, temp
, Naming
.args_cname
, temp
))
3259 code
.putln("PyObject* item = PyTuple_GET_ITEM(%s, %s);" % (
3260 Naming
.args_cname
, temp
))
3261 code
.put_incref("item", py_object_type
)
3262 code
.put_giveref("item")
3263 code
.putln("PyTuple_SET_ITEM(%s, %s+1, item);" % (
3264 self
.star_arg
.entry
.cname
, temp
))
3266 code
.funcstate
.release_temp(temp
)
3267 self
.star_arg
.entry
.xdecref_cleanup
= 0
3269 code
.put_incref(Naming
.args_cname
, py_object_type
)
3270 code
.putln("%s = %s;" % (
3271 self
.star_arg
.entry
.cname
,
3273 self
.star_arg
.entry
.xdecref_cleanup
= 0
3275 def generate_tuple_and_keyword_parsing_code(self
, args
, success_label
, code
):
3276 argtuple_error_label
= code
.new_label("argtuple_error")
3278 positional_args
= []
3279 required_kw_only_args
= []
3280 optional_kw_only_args
= []
3284 if not arg
.is_self_arg
and not arg
.is_type_arg
:
3286 optional_kw_only_args
.append(arg
)
3288 positional_args
.append(arg
)
3290 required_kw_only_args
.append(arg
)
3291 elif not arg
.is_self_arg
and not arg
.is_type_arg
:
3292 positional_args
.append(arg
)
3294 # sort required kw-only args before optional ones to avoid special
3295 # cases in the unpacking code
3296 kw_only_args
= required_kw_only_args
+ optional_kw_only_args
3298 min_positional_args
= self
.num_required_args
- self
.num_required_kw_args
3299 if len(args
) > 0 and (args
[0].is_self_arg
or args
[0].is_type_arg
):
3300 min_positional_args
-= 1
3301 max_positional_args
= len(positional_args
)
3302 has_fixed_positional_count
= not self
.star_arg
and \
3303 min_positional_args
== max_positional_args
3304 has_kw_only_args
= bool(kw_only_args
)
3306 if self
.num_required_kw_args
:
3307 code
.globalstate
.use_utility_code(
3308 UtilityCode
.load_cached("RaiseKeywordRequired", "FunctionArguments.c"))
3310 if self
.starstar_arg
or self
.star_arg
:
3311 self
.generate_stararg_init_code(max_positional_args
, code
)
3314 all_args
= tuple(positional_args
) + tuple(kw_only_args
)
3315 code
.putln("static PyObject **%s[] = {%s,0};" % (
3316 Naming
.pykwdlist_cname
,
3317 ','.join([ '&%s' % code
.intern_identifier(arg
.name
)
3318 for arg
in all_args
])))
3320 # Before being converted and assigned to the target variables,
3321 # borrowed references to all unpacked argument values are
3322 # collected into a local PyObject* array called "values",
3323 # regardless if they were taken from default arguments,
3324 # positional arguments or keyword arguments. Note that
3325 # C-typed default arguments are handled at conversion time,
3326 # so their array value is NULL in the end if no argument
3327 # was passed for them.
3328 self
.generate_argument_values_setup_code(all_args
, code
)
3330 # --- optimised code when we receive keyword arguments
3331 code
.putln("if (%s(%s)) {" % (
3332 (self
.num_required_kw_args
> 0) and "likely" or "unlikely",
3334 self
.generate_keyword_unpacking_code(
3335 min_positional_args
, max_positional_args
,
3336 has_fixed_positional_count
, has_kw_only_args
,
3337 all_args
, argtuple_error_label
, code
)
3339 # --- optimised code when we do not receive any keyword arguments
3340 if (self
.num_required_kw_args
and min_positional_args
> 0) or min_positional_args
== max_positional_args
:
3341 # Python raises arg tuple related errors first, so we must
3342 # check the length here
3343 if min_positional_args
== max_positional_args
and not self
.star_arg
:
3347 code
.putln('} else if (PyTuple_GET_SIZE(%s) %s %d) {' % (
3348 Naming
.args_cname
, compare
, min_positional_args
))
3349 code
.put_goto(argtuple_error_label
)
3351 if self
.num_required_kw_args
:
3352 # pure error case: keywords required but not passed
3353 if max_positional_args
> min_positional_args
and not self
.star_arg
:
3354 code
.putln('} else if (PyTuple_GET_SIZE(%s) > %d) {' % (
3355 Naming
.args_cname
, max_positional_args
))
3356 code
.put_goto(argtuple_error_label
)
3357 code
.putln('} else {')
3358 for i
, arg
in enumerate(kw_only_args
):
3360 pystring_cname
= code
.intern_identifier(arg
.name
)
3361 # required keyword-only argument missing
3362 code
.put('__Pyx_RaiseKeywordRequired("%s", %s); ' % (
3365 code
.putln(code
.error_goto(self
.pos
))
3369 # optimised tuple unpacking code
3370 code
.putln('} else {')
3371 if min_positional_args
== max_positional_args
:
3372 # parse the exact number of positional arguments from
3374 for i
, arg
in enumerate(positional_args
):
3375 code
.putln("values[%d] = PyTuple_GET_ITEM(%s, %d);" % (i
, Naming
.args_cname
, i
))
3377 # parse the positional arguments from the variable length
3378 # args tuple and reject illegal argument tuple sizes
3379 code
.putln('switch (PyTuple_GET_SIZE(%s)) {' % Naming
.args_cname
)
3381 code
.putln('default:')
3382 reversed_args
= list(enumerate(positional_args
))[::-1]
3383 for i
, arg
in reversed_args
:
3384 if i
>= min_positional_args
-1:
3385 code
.put('case %2d: ' % (i
+1))
3386 code
.putln("values[%d] = PyTuple_GET_ITEM(%s, %d);" % (i
, Naming
.args_cname
, i
))
3387 if min_positional_args
== 0:
3388 code
.put('case 0: ')
3389 code
.putln('break;')
3391 if min_positional_args
:
3392 for i
in range(min_positional_args
-1, -1, -1):
3393 code
.putln('case %2d:' % i
)
3394 code
.put_goto(argtuple_error_label
)
3396 code
.put('default: ')
3397 code
.put_goto(argtuple_error_label
)
3400 code
.putln('}') # end of the conditional unpacking blocks
3402 # Convert arg values to their final type and assign them.
3403 # Also inject non-Python default arguments, which do cannot
3404 # live in the values[] array.
3405 for i
, arg
in enumerate(all_args
):
3406 self
.generate_arg_assignment(arg
, "values[%d]" % i
, code
)
3408 code
.putln('}') # end of the whole argument unpacking block
3410 if code
.label_used(argtuple_error_label
):
3411 code
.put_goto(success_label
)
3412 code
.put_label(argtuple_error_label
)
3413 code
.globalstate
.use_utility_code(
3414 UtilityCode
.load_cached("RaiseArgTupleInvalid", "FunctionArguments.c"))
3415 code
.put('__Pyx_RaiseArgtupleInvalid("%s", %d, %d, %d, PyTuple_GET_SIZE(%s)); ' % (
3416 self
.name
, has_fixed_positional_count
,
3417 min_positional_args
, max_positional_args
,
3419 code
.putln(code
.error_goto(self
.pos
))
3421 def generate_arg_assignment(self
, arg
, item
, code
):
3422 if arg
.type.is_pyobject
:
3423 # Python default arguments were already stored in 'item' at the very beginning
3425 item
= PyrexTypes
.typecast(arg
.type, PyrexTypes
.py_object_type
, item
)
3427 code
.putln("%s = %s;" % (entry
.cname
, item
))
3429 func
= arg
.type.from_py_function
3432 # C-typed default arguments must be handled here
3433 code
.putln('if (%s) {' % item
)
3434 rhs
= "%s(%s)" % (func
, item
)
3435 if arg
.type.is_enum
:
3436 rhs
= arg
.type.cast_code(rhs
)
3437 code
.putln("%s = %s; %s" % (
3440 code
.error_goto_if(arg
.type.error_condition(arg
.entry
.cname
), arg
.pos
)))
3442 code
.putln('} else {')
3446 arg
.calculate_default_value_code(code
)))
3447 if arg
.type.is_memoryviewslice
:
3448 code
.put_incref_memoryviewslice(arg
.entry
.cname
,
3452 error(arg
.pos
, "Cannot convert Python object argument to type '%s'" % arg
.type)
3454 def generate_stararg_init_code(self
, max_positional_args
, code
):
3455 if self
.starstar_arg
:
3456 self
.starstar_arg
.entry
.xdecref_cleanup
= 0
3457 code
.putln('%s = PyDict_New(); if (unlikely(!%s)) return %s;' % (
3458 self
.starstar_arg
.entry
.cname
,
3459 self
.starstar_arg
.entry
.cname
,
3460 self
.error_value()))
3461 code
.put_gotref(self
.starstar_arg
.entry
.cname
)
3463 self
.star_arg
.entry
.xdecref_cleanup
= 0
3464 code
.putln('if (PyTuple_GET_SIZE(%s) > %d) {' % (
3466 max_positional_args
))
3467 code
.putln('%s = PyTuple_GetSlice(%s, %d, PyTuple_GET_SIZE(%s));' % (
3468 self
.star_arg
.entry
.cname
, Naming
.args_cname
,
3469 max_positional_args
, Naming
.args_cname
))
3470 code
.putln("if (unlikely(!%s)) {" % self
.star_arg
.entry
.cname
)
3471 if self
.starstar_arg
:
3472 code
.put_decref_clear(self
.starstar_arg
.entry
.cname
, py_object_type
)
3473 code
.put_finish_refcount_context()
3474 code
.putln('return %s;' % self
.error_value())
3476 code
.put_gotref(self
.star_arg
.entry
.cname
)
3477 code
.putln('} else {')
3478 code
.put("%s = %s; " % (self
.star_arg
.entry
.cname
, Naming
.empty_tuple
))
3479 code
.put_incref(Naming
.empty_tuple
, py_object_type
)
3482 def generate_argument_values_setup_code(self
, args
, code
):
3483 max_args
= len(args
)
3484 # the 'values' array collects borrowed references to arguments
3485 # before doing any type coercion etc.
3486 code
.putln("PyObject* values[%d] = {%s};" % (
3487 max_args
, ','.join('0'*max_args
)))
3489 if self
.target
.defaults_struct
:
3490 code
.putln('%s *%s = __Pyx_CyFunction_Defaults(%s, %s);' % (
3491 self
.target
.defaults_struct
, Naming
.dynamic_args_cname
,
3492 self
.target
.defaults_struct
, Naming
.self_cname
))
3494 # assign borrowed Python default values to the values array,
3495 # so that they can be overwritten by received arguments below
3496 for i
, arg
in enumerate(args
):
3497 if arg
.default
and arg
.type.is_pyobject
:
3498 default_value
= arg
.calculate_default_value_code(code
)
3499 code
.putln('values[%d] = %s;' % (i
, arg
.type.as_pyobject(default_value
)))
3501 def generate_keyword_unpacking_code(self
, min_positional_args
, max_positional_args
,
3502 has_fixed_positional_count
, has_kw_only_args
,
3503 all_args
, argtuple_error_label
, code
):
3504 code
.putln('Py_ssize_t kw_args;')
3505 code
.putln('const Py_ssize_t pos_args = PyTuple_GET_SIZE(%s);' % Naming
.args_cname
)
3506 # copy the values from the args tuple and check that it's not too long
3507 code
.putln('switch (pos_args) {')
3509 code
.putln('default:')
3510 for i
in range(max_positional_args
-1, -1, -1):
3511 code
.put('case %2d: ' % (i
+1))
3512 code
.putln("values[%d] = PyTuple_GET_ITEM(%s, %d);" % (
3513 i
, Naming
.args_cname
, i
))
3514 code
.putln('case 0: break;')
3515 if not self
.star_arg
:
3516 code
.put('default: ') # more arguments than allowed
3517 code
.put_goto(argtuple_error_label
)
3520 # The code above is very often (but not always) the same as
3521 # the optimised non-kwargs tuple unpacking code, so we keep
3522 # the code block above at the very top, before the following
3523 # 'external' PyDict_Size() call, to make it easy for the C
3524 # compiler to merge the two separate tuple unpacking
3525 # implementations into one when they turn out to be identical.
3527 # If we received kwargs, fill up the positional/required
3528 # arguments with values from the kw dict
3529 code
.putln('kw_args = PyDict_Size(%s);' % Naming
.kwds_cname
)
3530 if self
.num_required_args
or max_positional_args
> 0:
3531 last_required_arg
= -1
3532 for i
, arg
in enumerate(all_args
):
3534 last_required_arg
= i
3535 if last_required_arg
< max_positional_args
:
3536 last_required_arg
= max_positional_args
-1
3537 if max_positional_args
> 0:
3538 code
.putln('switch (pos_args) {')
3539 for i
, arg
in enumerate(all_args
[:last_required_arg
+1]):
3540 if max_positional_args
> 0 and i
<= max_positional_args
:
3541 if self
.star_arg
and i
== max_positional_args
:
3542 code
.putln('default:')
3544 code
.putln('case %2d:' % i
)
3545 pystring_cname
= code
.intern_identifier(arg
.name
)
3548 # optional kw-only args are handled separately below
3550 code
.putln('if (kw_args > 0) {')
3551 # don't overwrite default argument
3552 code
.putln('PyObject* value = PyDict_GetItem(%s, %s);' % (
3553 Naming
.kwds_cname
, pystring_cname
))
3554 code
.putln('if (value) { values[%d] = value; kw_args--; }' % i
)
3557 code
.putln('if (likely((values[%d] = PyDict_GetItem(%s, %s)) != 0)) kw_args--;' % (
3558 i
, Naming
.kwds_cname
, pystring_cname
))
3559 if i
< min_positional_args
:
3561 # special case: we know arg 0 is missing
3563 code
.put_goto(argtuple_error_label
)
3565 # print the correct number of values (args or
3566 # kwargs) that were passed into positional
3567 # arguments up to this point
3568 code
.putln('else {')
3569 code
.globalstate
.use_utility_code(
3570 UtilityCode
.load_cached("RaiseArgTupleInvalid", "FunctionArguments.c"))
3571 code
.put('__Pyx_RaiseArgtupleInvalid("%s", %d, %d, %d, %d); ' % (
3572 self
.name
, has_fixed_positional_count
,
3573 min_positional_args
, max_positional_args
, i
))
3574 code
.putln(code
.error_goto(self
.pos
))
3577 code
.putln('else {')
3578 code
.put('__Pyx_RaiseKeywordRequired("%s", %s); ' %(
3579 self
.name
, pystring_cname
))
3580 code
.putln(code
.error_goto(self
.pos
))
3582 if max_positional_args
> 0:
3585 if has_kw_only_args
:
3586 # unpack optional keyword-only arguments separately because
3587 # checking for interned strings in a dict is faster than iterating
3588 self
.generate_optional_kwonly_args_unpacking_code(all_args
, code
)
3590 code
.putln('if (unlikely(kw_args > 0)) {')
3591 # non-positional/-required kw args left in dict: default args,
3592 # kw-only args, **kwargs or error
3594 # This is sort of a catch-all: except for checking required
3595 # arguments, this will always do the right thing for unpacking
3596 # keyword arguments, so that we can concentrate on optimising
3597 # common cases above.
3598 if max_positional_args
== 0:
3601 code
.putln("const Py_ssize_t used_pos_args = (pos_args < %d) ? pos_args : %d;" % (
3602 max_positional_args
, max_positional_args
))
3603 pos_arg_count
= "used_pos_args"
3605 pos_arg_count
= "pos_args"
3606 code
.globalstate
.use_utility_code(
3607 UtilityCode
.load_cached("ParseKeywords", "FunctionArguments.c"))
3609 'if (unlikely(__Pyx_ParseOptionalKeywords(%s, %s, %s, values, %s, "%s") < 0)) %s' % (
3611 Naming
.pykwdlist_cname
,
3612 self
.starstar_arg
and self
.starstar_arg
.entry
.cname
or '0',
3615 code
.error_goto(self
.pos
)))
3618 def generate_optional_kwonly_args_unpacking_code(self
, all_args
, code
):
3620 first_optional_arg
= -1
3621 for i
, arg
in enumerate(all_args
):
3622 if not arg
.kw_only
or not arg
.default
:
3624 if not optional_args
:
3625 first_optional_arg
= i
3626 optional_args
.append(arg
.name
)
3628 if len(optional_args
) > 1:
3629 # if we receive more than the named kwargs, we either have **kwargs
3630 # (in which case we must iterate anyway) or it's an error (which we
3631 # also handle during iteration) => skip this part if there are more
3632 code
.putln('if (kw_args > 0 && %s(kw_args <= %d)) {' % (
3633 not self
.starstar_arg
and 'likely' or '',
3634 len(optional_args
)))
3635 code
.putln('Py_ssize_t index;')
3636 # not unrolling the loop here reduces the C code overhead
3637 code
.putln('for (index = %d; index < %d && kw_args > 0; index++) {' % (
3638 first_optional_arg
, first_optional_arg
+ len(optional_args
)))
3640 code
.putln('if (kw_args == 1) {')
3641 code
.putln('const Py_ssize_t index = %d;' % first_optional_arg
)
3642 code
.putln('PyObject* value = PyDict_GetItem(%s, *%s[index]);' % (
3643 Naming
.kwds_cname
, Naming
.pykwdlist_cname
))
3644 code
.putln('if (value) { values[index] = value; kw_args--; }')
3645 if len(optional_args
) > 1:
3649 def generate_argument_conversion_code(self
, code
):
3650 # Generate code to convert arguments from signature type to
3651 # declared type, if needed. Also copies signature arguments
3652 # into closure fields.
3653 for arg
in self
.args
:
3654 if arg
.needs_conversion
:
3655 self
.generate_arg_conversion(arg
, code
)
3657 def generate_arg_conversion(self
, arg
, code
):
3658 # Generate conversion code for one argument.
3659 old_type
= arg
.hdr_type
3661 if old_type
.is_pyobject
:
3663 code
.putln("if (%s) {" % arg
.hdr_cname
)
3665 code
.putln("assert(%s); {" % arg
.hdr_cname
)
3666 self
.generate_arg_conversion_from_pyobject(arg
, code
)
3668 elif new_type
.is_pyobject
:
3669 self
.generate_arg_conversion_to_pyobject(arg
, code
)
3671 if new_type
.assignable_from(old_type
):
3673 "%s = %s;" % (arg
.entry
.cname
, arg
.hdr_cname
))
3676 "Cannot convert 1 argument from '%s' to '%s'" %
3677 (old_type
, new_type
))
3679 def generate_arg_conversion_from_pyobject(self
, arg
, code
):
3681 func
= new_type
.from_py_function
3682 # copied from CoerceFromPyTypeNode
3684 lhs
= arg
.entry
.cname
3685 rhs
= "%s(%s)" % (func
, arg
.hdr_cname
)
3686 if new_type
.is_enum
:
3687 rhs
= PyrexTypes
.typecast(new_type
, PyrexTypes
.c_long_type
, rhs
)
3688 code
.putln("%s = %s; %s" % (
3691 code
.error_goto_if(new_type
.error_condition(arg
.entry
.cname
), arg
.pos
)))
3694 "Cannot convert Python object argument to type '%s'"
3697 def generate_arg_conversion_to_pyobject(self
, arg
, code
):
3698 old_type
= arg
.hdr_type
3699 func
= old_type
.to_py_function
3701 code
.putln("%s = %s(%s); %s" % (
3705 code
.error_goto_if_null(arg
.entry
.cname
, arg
.pos
)))
3706 code
.put_var_gotref(arg
.entry
)
3709 "Cannot convert argument of type '%s' to Python object"
3712 def generate_argument_type_tests(self
, code
):
3713 # Generate type tests for args whose signature
3714 # type is PyObject * and whose declared type is
3715 # a subtype thereof.
3716 for arg
in self
.args
:
3717 if arg
.needs_type_test
:
3718 self
.generate_arg_type_test(arg
, code
)
3719 elif not arg
.accept_none
and (arg
.type.is_pyobject
or
3720 arg
.type.is_buffer
or
3721 arg
.type.is_memoryviewslice
):
3722 self
.generate_arg_none_check(arg
, code
)
3724 def error_value(self
):
3725 return self
.signature
.error_value
3728 class GeneratorDefNode(DefNode
):
3729 # Generator function node that creates a new generator instance when called.
3731 # gbody GeneratorBodyDefNode the function implementing the generator
3735 needs_closure
= True
3737 child_attrs
= DefNode
.child_attrs
+ ["gbody"]
3739 def __init__(self
, **kwargs
):
3740 # XXX: don't actually needs a body
3741 kwargs
['body'] = StatListNode(kwargs
['pos'], stats
=[])
3742 super(GeneratorDefNode
, self
).__init
__(**kwargs
)
3744 def analyse_declarations(self
, env
):
3745 super(GeneratorDefNode
, self
).analyse_declarations(env
)
3746 self
.gbody
.local_scope
= self
.local_scope
3747 self
.gbody
.analyse_declarations(env
)
3749 def generate_function_body(self
, env
, code
):
3750 body_cname
= self
.gbody
.entry
.func_cname
3753 code
.putln('__pyx_GeneratorObject *gen = __Pyx_Generator_New('
3754 '(__pyx_generator_body_t) %s, (PyObject *) %s); %s' % (
3755 body_cname
, Naming
.cur_scope_cname
,
3756 code
.error_goto_if_null('gen', self
.pos
)))
3757 code
.put_decref(Naming
.cur_scope_cname
, py_object_type
)
3758 if self
.requires_classobj
:
3759 classobj_cname
= 'gen->classobj'
3760 code
.putln('%s = __Pyx_CyFunction_GetClassObj(%s);' % (
3761 classobj_cname
, Naming
.self_cname
))
3762 code
.put_incref(classobj_cname
, py_object_type
)
3763 code
.put_giveref(classobj_cname
)
3764 code
.put_finish_refcount_context()
3765 code
.putln('return (PyObject *) gen;')
3768 def generate_function_definitions(self
, env
, code
):
3769 env
.use_utility_code(UtilityCode
.load_cached("Generator", "Generator.c"))
3771 self
.gbody
.generate_function_header(code
, proto
=True)
3772 super(GeneratorDefNode
, self
).generate_function_definitions(env
, code
)
3773 self
.gbody
.generate_function_definitions(env
, code
)
3776 class GeneratorBodyDefNode(DefNode
):
3777 # Main code body of a generator implemented as a DefNode.
3780 is_generator_body
= True
3782 def __init__(self
, pos
=None, name
=None, body
=None):
3783 super(GeneratorBodyDefNode
, self
).__init
__(
3784 pos
=pos
, body
=body
, name
=name
, doc
=None,
3785 args
=[], star_arg
=None, starstar_arg
=None)
3787 def declare_generator_body(self
, env
):
3788 prefix
= env
.next_id(env
.scope_prefix
)
3789 name
= env
.next_id('generator')
3790 cname
= Naming
.genbody_prefix
+ prefix
+ name
3791 entry
= env
.declare_var(None, py_object_type
, self
.pos
,
3792 cname
=cname
, visibility
='private')
3793 entry
.func_cname
= cname
3794 entry
.qualified_name
= EncodedString(self
.name
)
3797 def analyse_declarations(self
, env
):
3798 self
.analyse_argument_types(env
)
3799 self
.declare_generator_body(env
)
3801 def generate_function_header(self
, code
, proto
=False):
3802 header
= "static PyObject *%s(__pyx_GeneratorObject *%s, PyObject *%s)" % (
3803 self
.entry
.func_cname
,
3804 Naming
.generator_cname
,
3805 Naming
.sent_value_cname
)
3807 code
.putln('%s; /* proto */' % header
)
3809 code
.putln('%s /* generator body */\n{' % header
)
3811 def generate_function_definitions(self
, env
, code
):
3812 lenv
= self
.local_scope
3814 # Generate closure function definitions
3815 self
.body
.generate_function_definitions(lenv
, code
)
3817 # Generate C code for header and body of function
3818 code
.enter_cfunc_scope()
3819 code
.return_from_error_cleanup_label
= code
.new_label()
3821 # ----- Top-level constants used by this function
3822 code
.mark_pos(self
.pos
)
3823 self
.generate_cached_builtins_decls(lenv
, code
)
3824 # ----- Function header
3826 self
.generate_function_header(code
)
3827 closure_init_code
= code
.insertion_point()
3828 # ----- Local variables
3829 code
.putln("PyObject *%s = NULL;" % Naming
.retval_cname
)
3830 tempvardecl_code
= code
.insertion_point()
3831 code
.put_declare_refcount_context()
3832 code
.put_setup_refcount_context(self
.entry
.name
)
3834 # ----- Resume switch point.
3835 code
.funcstate
.init_closure_temps(lenv
.scope_class
.type.scope
)
3836 resume_code
= code
.insertion_point()
3837 first_run_label
= code
.new_label('first_run')
3838 code
.use_label(first_run_label
)
3839 code
.put_label(first_run_label
)
3841 (code
.error_goto_if_null(Naming
.sent_value_cname
, self
.pos
)))
3843 # ----- Function body
3844 self
.generate_function_body(env
, code
)
3845 # ----- Closure initialization
3846 if lenv
.scope_class
.type.scope
.entries
:
3847 closure_init_code
.putln('%s = %s;' % (
3848 lenv
.scope_class
.type.declaration_code(Naming
.cur_scope_cname
),
3849 lenv
.scope_class
.type.cast_code('%s->closure' %
3850 Naming
.generator_cname
)))
3852 code
.mark_pos(self
.pos
)
3854 code
.putln("/* function exit code */")
3856 # on normal generator termination, we do not take the exception propagation
3857 # path: no traceback info is required and not creating it is much faster
3858 if not self
.body
.is_terminator
:
3859 code
.putln('PyErr_SetNone(PyExc_StopIteration);')
3860 # ----- Error cleanup
3861 if code
.error_label
in code
.labels_used
:
3862 if not self
.body
.is_terminator
:
3863 code
.put_goto(code
.return_label
)
3864 code
.put_label(code
.error_label
)
3865 for cname
, type in code
.funcstate
.all_managed_temps():
3866 code
.put_xdecref(cname
, type)
3867 code
.put_add_traceback(self
.entry
.qualified_name
)
3869 # ----- Non-error return cleanup
3870 code
.put_label(code
.return_label
)
3871 code
.put_xdecref(Naming
.retval_cname
, py_object_type
)
3872 code
.putln('%s->resume_label = -1;' % Naming
.generator_cname
)
3873 # clean up as early as possible to help breaking any reference cycles
3874 code
.putln('__Pyx_Generator_clear((PyObject*)%s);' % Naming
.generator_cname
)
3875 code
.put_finish_refcount_context()
3876 code
.putln('return NULL;')
3879 # ----- Go back and insert temp variable declarations
3880 tempvardecl_code
.put_temp_declarations(code
.funcstate
)
3881 # ----- Generator resume code
3882 resume_code
.putln("switch (%s->resume_label) {" % (
3883 Naming
.generator_cname
))
3884 resume_code
.putln("case 0: goto %s;" % first_run_label
)
3886 for i
, label
in code
.yield_labels
:
3887 resume_code
.putln("case %d: goto %s;" % (i
, label
))
3888 resume_code
.putln("default: /* CPython raises the right error here */")
3889 resume_code
.put_finish_refcount_context()
3890 resume_code
.putln("return NULL;")
3891 resume_code
.putln("}")
3893 code
.exit_cfunc_scope()
3896 class OverrideCheckNode(StatNode
):
3897 # A Node for dispatching to the def method if it
3906 child_attrs
= ['body']
3910 def analyse_expressions(self
, env
):
3911 self
.args
= env
.arg_entries
3912 if self
.py_func
.is_module_scope
:
3917 self
.func_node
= ExprNodes
.RawCNameExprNode(self
.pos
, py_object_type
)
3918 call_node
= ExprNodes
.SimpleCallNode(
3919 self
.pos
, function
=self
.func_node
,
3920 args
=[ ExprNodes
.NameNode(self
.pos
, name
=arg
.name
)
3921 for arg
in self
.args
[first_arg
:] ])
3922 self
.body
= ReturnStatNode(self
.pos
, value
=call_node
)
3923 self
.body
= self
.body
.analyse_expressions(env
)
3926 def generate_execution_code(self
, code
):
3927 interned_attr_cname
= code
.intern_identifier(self
.py_func
.entry
.name
)
3928 # Check to see if we are an extension type
3929 if self
.py_func
.is_module_scope
:
3930 self_arg
= "((PyObject *)%s)" % Naming
.module_cname
3932 self_arg
= "((PyObject *)%s)" % self
.args
[0].cname
3933 code
.putln("/* Check if called by wrapper */")
3934 code
.putln("if (unlikely(%s)) ;" % Naming
.skip_dispatch_cname
)
3935 code
.putln("/* Check if overridden in Python */")
3936 if self
.py_func
.is_module_scope
:
3937 code
.putln("else {")
3939 code
.putln("else if (unlikely(Py_TYPE(%s)->tp_dictoffset != 0)) {" % self_arg
)
3940 func_node_temp
= code
.funcstate
.allocate_temp(py_object_type
, manage_ref
=True)
3941 self
.func_node
.set_cname(func_node_temp
)
3942 # need to get attribute manually--scope would return cdef method
3943 code
.globalstate
.use_utility_code(
3944 UtilityCode
.load_cached("PyObjectGetAttrStr", "ObjectHandling.c"))
3945 err
= code
.error_goto_if_null(func_node_temp
, self
.pos
)
3946 code
.putln("%s = __Pyx_PyObject_GetAttrStr(%s, %s); %s" % (
3947 func_node_temp
, self_arg
, interned_attr_cname
, err
))
3948 code
.put_gotref(func_node_temp
)
3949 is_builtin_function_or_method
= "PyCFunction_Check(%s)" % func_node_temp
3950 is_overridden
= "(PyCFunction_GET_FUNCTION(%s) != (PyCFunction)%s)" % (
3951 func_node_temp
, self
.py_func
.entry
.func_cname
)
3952 code
.putln("if (!%s || %s) {" % (is_builtin_function_or_method
, is_overridden
))
3953 self
.body
.generate_execution_code(code
)
3955 code
.put_decref_clear(func_node_temp
, PyrexTypes
.py_object_type
)
3956 code
.funcstate
.release_temp(func_node_temp
)
3959 class ClassDefNode(StatNode
, BlockNode
):
3962 class PyClassDefNode(ClassDefNode
):
3963 # A Python class definition.
3965 # name EncodedString Name of the class
3966 # doc string or None
3967 # body StatNode Attribute definition code
3968 # entry Symtab.Entry
3969 # scope PyClassScope
3970 # decorators [DecoratorNode] list of decorators or None
3972 # The following subnodes are constructed internally:
3974 # dict DictNode Class dictionary or Py3 namespace
3975 # classobj ClassNode Class object
3976 # target NameNode Variable to assign class object to
3978 child_attrs
= ["body", "dict", "metaclass", "mkw", "bases", "class_result",
3979 "target", "class_cell", "decorators"]
3982 is_py3_style_class
= False # Python3 style class (kwargs)
3986 def __init__(self
, pos
, name
, bases
, doc
, body
, decorators
=None,
3987 keyword_args
=None, starstar_arg
=None, force_py3_semantics
=False):
3988 StatNode
.__init
__(self
, pos
)
3992 self
.decorators
= decorators
3995 if self
.doc
and Options
.docstrings
:
3996 doc
= embed_position(self
.pos
, self
.doc
)
3997 doc_node
= ExprNodes
.StringNode(pos
, value
=doc
)
4001 allow_py2_metaclass
= not force_py3_semantics
4002 if keyword_args
or starstar_arg
:
4003 allow_py2_metaclass
= False
4004 self
.is_py3_style_class
= True
4005 if keyword_args
and not starstar_arg
:
4006 for i
, item
in list(enumerate(keyword_args
.key_value_pairs
))[::-1]:
4007 if item
.key
.value
== 'metaclass':
4008 if self
.metaclass
is not None:
4009 error(item
.pos
, "keyword argument 'metaclass' passed multiple times")
4010 # special case: we already know the metaclass,
4011 # so we don't need to do the "build kwargs,
4012 # find metaclass" dance at runtime
4013 self
.metaclass
= item
.value
4014 del keyword_args
.key_value_pairs
[i
]
4016 self
.mkw
= ExprNodes
.KeywordArgsNode(
4017 pos
, keyword_args
=keyword_args
and keyword_args
.key_value_pairs
or [],
4018 starstar_arg
=starstar_arg
)
4019 elif keyword_args
.key_value_pairs
:
4020 self
.mkw
= keyword_args
4022 assert self
.metaclass
is not None
4024 if force_py3_semantics
or self
.bases
or self
.mkw
or self
.metaclass
:
4025 if self
.metaclass
is None:
4027 # **kwargs may contain 'metaclass' arg
4032 self
.bases
.is_sequence_constructor
and
4033 not self
.bases
.args
):
4034 pass # no base classes => no inherited metaclass
4036 self
.metaclass
= ExprNodes
.PyClassMetaclassNode(
4037 pos
, mkw
=mkdict
, bases
=self
.bases
)
4038 needs_metaclass_calculation
= False
4040 needs_metaclass_calculation
= True
4042 self
.dict = ExprNodes
.PyClassNamespaceNode(
4043 pos
, name
=name
, doc
=doc_node
,
4044 metaclass
=self
.metaclass
, bases
=self
.bases
, mkw
=self
.mkw
)
4045 self
.classobj
= ExprNodes
.Py3ClassNode(
4047 bases
=self
.bases
, dict=self
.dict, doc
=doc_node
,
4048 metaclass
=self
.metaclass
, mkw
=self
.mkw
,
4049 calculate_metaclass
=needs_metaclass_calculation
,
4050 allow_py2_metaclass
=allow_py2_metaclass
)
4052 # no bases, no metaclass => old style class creation
4053 self
.dict = ExprNodes
.DictNode(pos
, key_value_pairs
=[])
4054 self
.classobj
= ExprNodes
.ClassNode(
4056 bases
=bases
, dict=self
.dict, doc
=doc_node
)
4058 self
.target
= ExprNodes
.NameNode(pos
, name
=name
)
4059 self
.class_cell
= ExprNodes
.ClassCellInjectorNode(self
.pos
)
4061 def as_cclass(self
):
4063 Return this node as if it were declared as an extension class
4065 if self
.is_py3_style_class
:
4066 error(self
.classobj
.pos
, "Python3 style class could not be represented as C class")
4068 bases
= self
.classobj
.bases
.args
4070 base_class_name
= None
4071 base_class_module
= None
4072 elif len(bases
) == 1:
4075 from ExprNodes
import AttributeNode
, NameNode
4076 while isinstance(base
, AttributeNode
):
4077 path
.insert(0, base
.attribute
)
4079 if isinstance(base
, NameNode
):
4080 path
.insert(0, base
.name
)
4081 base_class_name
= path
[-1]
4083 base_class_module
= u
'.'.join(path
[:-1])
4085 base_class_module
= None
4087 error(self
.classobj
.bases
.args
.pos
, "Invalid base class")
4089 error(self
.classobj
.bases
.args
.pos
, "C class may only have one base class")
4092 return CClassDefNode(self
.pos
,
4093 visibility
= 'private',
4095 class_name
= self
.name
,
4096 base_class_module
= base_class_module
,
4097 base_class_name
= base_class_name
,
4098 decorators
= self
.decorators
,
4103 def create_scope(self
, env
):
4105 while genv
.is_py_class_scope
or genv
.is_c_class_scope
:
4106 genv
= genv
.outer_scope
4107 cenv
= self
.scope
= PyClassScope(name
= self
.name
, outer_scope
= genv
)
4110 def analyse_declarations(self
, env
):
4111 class_result
= self
.classobj
4113 from ExprNodes
import SimpleCallNode
4114 for decorator
in self
.decorators
[::-1]:
4115 class_result
= SimpleCallNode(
4117 function
= decorator
.decorator
,
4118 args
= [class_result
])
4119 self
.decorators
= None
4120 self
.class_result
= class_result
4121 self
.class_result
.analyse_declarations(env
)
4122 self
.target
.analyse_target_declaration(env
)
4123 cenv
= self
.create_scope(env
)
4124 cenv
.directives
= env
.directives
4125 cenv
.class_obj_cname
= self
.target
.entry
.cname
4126 self
.body
.analyse_declarations(cenv
)
4128 def analyse_expressions(self
, env
):
4130 self
.bases
= self
.bases
.analyse_expressions(env
)
4132 self
.metaclass
= self
.metaclass
.analyse_expressions(env
)
4134 self
.mkw
= self
.mkw
.analyse_expressions(env
)
4135 self
.dict = self
.dict.analyse_expressions(env
)
4136 self
.class_result
= self
.class_result
.analyse_expressions(env
)
4137 genv
= env
.global_scope()
4139 self
.body
= self
.body
.analyse_expressions(cenv
)
4140 self
.target
.analyse_target_expression(env
, self
.classobj
)
4141 self
.class_cell
= self
.class_cell
.analyse_expressions(cenv
)
4144 def generate_function_definitions(self
, env
, code
):
4145 self
.generate_lambda_definitions(self
.scope
, code
)
4146 self
.body
.generate_function_definitions(self
.scope
, code
)
4148 def generate_execution_code(self
, code
):
4149 code
.pyclass_stack
.append(self
)
4152 self
.bases
.generate_evaluation_code(code
)
4154 self
.mkw
.generate_evaluation_code(code
)
4156 self
.metaclass
.generate_evaluation_code(code
)
4157 self
.dict.generate_evaluation_code(code
)
4158 cenv
.namespace_cname
= cenv
.class_obj_cname
= self
.dict.result()
4159 self
.class_cell
.generate_evaluation_code(code
)
4160 self
.body
.generate_execution_code(code
)
4161 self
.class_result
.generate_evaluation_code(code
)
4162 self
.class_cell
.generate_injection_code(
4163 code
, self
.class_result
.result())
4164 self
.class_cell
.generate_disposal_code(code
)
4165 cenv
.namespace_cname
= cenv
.class_obj_cname
= self
.classobj
.result()
4166 self
.target
.generate_assignment_code(self
.class_result
, code
)
4167 self
.dict.generate_disposal_code(code
)
4168 self
.dict.free_temps(code
)
4170 self
.metaclass
.generate_disposal_code(code
)
4171 self
.metaclass
.free_temps(code
)
4173 self
.mkw
.generate_disposal_code(code
)
4174 self
.mkw
.free_temps(code
)
4176 self
.bases
.generate_disposal_code(code
)
4177 self
.bases
.free_temps(code
)
4178 code
.pyclass_stack
.pop()
4180 class CClassDefNode(ClassDefNode
):
4181 # An extension type definition.
4183 # visibility 'private' or 'public' or 'extern'
4184 # typedef_flag boolean
4186 # module_name string or None For import of extern type objects
4187 # class_name string Unqualified name of class
4188 # as_name string or None Name to declare as in this scope
4189 # base_class_module string or None Module containing the base class
4190 # base_class_name string or None Name of the base class
4191 # objstruct_name string or None Specified C name of object struct
4192 # typeobj_name string or None Specified C name of type object
4193 # in_pxd boolean Is in a .pxd file
4194 # decorators [DecoratorNode] list of decorators or None
4195 # doc string or None
4196 # body StatNode or None
4197 # entry Symtab.Entry
4198 # base_type PyExtensionType or None
4199 # buffer_defaults_node DictNode or None Declares defaults for a buffer
4200 # buffer_defaults_pos
4202 child_attrs
= ["body"]
4203 buffer_defaults_node
= None
4204 buffer_defaults_pos
= None
4205 typedef_flag
= False
4207 objstruct_name
= None
4212 def buffer_defaults(self
, env
):
4213 if not hasattr(self
, '_buffer_defaults'):
4215 if self
.buffer_defaults_node
:
4216 self
._buffer
_defaults
= Buffer
.analyse_buffer_options(
4217 self
.buffer_defaults_pos
,
4218 env
, [], self
.buffer_defaults_node
,
4219 need_complete
=False)
4221 self
._buffer
_defaults
= None
4222 return self
._buffer
_defaults
4224 def declare(self
, env
):
4225 if self
.module_name
and self
.visibility
!= 'extern':
4226 module_path
= self
.module_name
.split(".")
4227 home_scope
= env
.find_imported_module(module_path
, self
.pos
)
4233 self
.entry
= home_scope
.declare_c_class(
4234 name
= self
.class_name
,
4238 module_name
= self
.module_name
,
4240 objstruct_cname
= self
.objstruct_name
,
4241 typeobj_cname
= self
.typeobj_name
,
4242 visibility
= self
.visibility
,
4243 typedef_flag
= self
.typedef_flag
,
4245 buffer_defaults
= self
.buffer_defaults(env
),
4246 shadow
= self
.shadow
)
4248 def analyse_declarations(self
, env
):
4249 #print "CClassDefNode.analyse_declarations:", self.class_name
4250 #print "...visibility =", self.visibility
4251 #print "...module_name =", self.module_name
4253 if env
.in_cinclude
and not self
.objstruct_name
:
4254 error(self
.pos
, "Object struct name specification required for "
4255 "C class defined in 'extern from' block")
4258 "Decorators not allowed on cdef classes (used on type '%s')" % self
.class_name
)
4259 self
.base_type
= None
4260 # Now that module imports are cached, we need to
4261 # import the modules for extern classes.
4262 if self
.module_name
:
4264 for module
in env
.cimported_modules
:
4265 if module
.name
== self
.module_name
:
4266 self
.module
= module
4267 if self
.module
is None:
4268 self
.module
= ModuleScope(self
.module_name
, None, env
.context
)
4269 self
.module
.has_extern_class
= 1
4270 env
.add_imported_module(self
.module
)
4272 if self
.base_class_name
:
4273 if self
.base_class_module
:
4274 base_class_scope
= env
.find_module(self
.base_class_module
, self
.pos
)
4276 base_class_scope
= env
4277 if self
.base_class_name
== 'object':
4278 # extension classes are special and don't need to inherit from object
4279 if base_class_scope
is None or base_class_scope
.lookup('object') is None:
4280 self
.base_class_name
= None
4281 self
.base_class_module
= None
4282 base_class_scope
= None
4283 if base_class_scope
:
4284 base_class_entry
= base_class_scope
.find(self
.base_class_name
, self
.pos
)
4285 if base_class_entry
:
4286 if not base_class_entry
.is_type
:
4287 error(self
.pos
, "'%s' is not a type name" % self
.base_class_name
)
4288 elif not base_class_entry
.type.is_extension_type
and \
4289 not (base_class_entry
.type.is_builtin_type
and
4290 base_class_entry
.type.objstruct_cname
):
4291 error(self
.pos
, "'%s' is not an extension type" % self
.base_class_name
)
4292 elif not base_class_entry
.type.is_complete():
4293 error(self
.pos
, "Base class '%s' of type '%s' is incomplete" % (
4294 self
.base_class_name
, self
.class_name
))
4295 elif base_class_entry
.type.scope
and base_class_entry
.type.scope
.directives
and \
4296 base_class_entry
.type.is_final_type
:
4297 error(self
.pos
, "Base class '%s' of type '%s' is final" % (
4298 self
.base_class_name
, self
.class_name
))
4299 elif base_class_entry
.type.is_builtin_type
and \
4300 base_class_entry
.type.name
in ('tuple', 'str', 'bytes'):
4301 error(self
.pos
, "inheritance from PyVarObject types like '%s' is not currently supported"
4302 % base_class_entry
.type.name
)
4304 self
.base_type
= base_class_entry
.type
4305 if env
.directives
.get('freelist', 0) > 0:
4306 warning(self
.pos
, "freelists cannot be used on subtypes, only the base class can manage them", 1)
4308 has_body
= self
.body
is not None
4309 if has_body
and self
.base_type
and not self
.base_type
.scope
:
4310 # To properly initialize inherited attributes, the base type must
4311 # be analysed before this type.
4312 self
.base_type
.defered_declarations
.append(lambda : self
.analyse_declarations(env
))
4315 if self
.module_name
and self
.visibility
!= 'extern':
4316 module_path
= self
.module_name
.split(".")
4317 home_scope
= env
.find_imported_module(module_path
, self
.pos
)
4323 if self
.visibility
== 'extern':
4324 if (self
.module_name
== '__builtin__' and
4325 self
.class_name
in Builtin
.builtin_types
and
4326 env
.qualified_name
[:8] != 'cpython.'): # allow overloaded names for cimporting from cpython
4327 warning(self
.pos
, "%s already a builtin Cython type" % self
.class_name
, 1)
4329 self
.entry
= home_scope
.declare_c_class(
4330 name
= self
.class_name
,
4332 defining
= has_body
and self
.in_pxd
,
4333 implementing
= has_body
and not self
.in_pxd
,
4334 module_name
= self
.module_name
,
4335 base_type
= self
.base_type
,
4336 objstruct_cname
= self
.objstruct_name
,
4337 typeobj_cname
= self
.typeobj_name
,
4338 visibility
= self
.visibility
,
4339 typedef_flag
= self
.typedef_flag
,
4341 buffer_defaults
= self
.buffer_defaults(env
),
4342 shadow
= self
.shadow
)
4345 home_scope
.lookup(self
.class_name
).as_variable
= self
.entry
4346 if home_scope
is not env
and self
.visibility
== 'extern':
4347 env
.add_imported_entry(self
.class_name
, self
.entry
, self
.pos
)
4348 self
.scope
= scope
= self
.entry
.type.scope
4349 if scope
is not None:
4350 scope
.directives
= env
.directives
4352 if self
.doc
and Options
.docstrings
:
4353 scope
.doc
= embed_position(self
.pos
, self
.doc
)
4356 self
.body
.analyse_declarations(scope
)
4360 scope
.implemented
= 1
4361 env
.allocate_vtable_names(self
.entry
)
4363 for thunk
in self
.entry
.type.defered_declarations
:
4366 def analyse_expressions(self
, env
):
4368 scope
= self
.entry
.type.scope
4369 self
.body
= self
.body
.analyse_expressions(scope
)
4372 def generate_function_definitions(self
, env
, code
):
4374 self
.generate_lambda_definitions(self
.scope
, code
)
4375 self
.body
.generate_function_definitions(self
.scope
, code
)
4377 def generate_execution_code(self
, code
):
4378 # This is needed to generate evaluation code for
4379 # default values of method arguments.
4381 self
.body
.generate_execution_code(code
)
4383 def annotate(self
, code
):
4385 self
.body
.annotate(code
)
4388 class PropertyNode(StatNode
):
4389 # Definition of a property in an extension type.
4392 # doc EncodedString or None Doc string
4393 # entry Symtab.Entry
4396 child_attrs
= ["body"]
4398 def analyse_declarations(self
, env
):
4399 self
.entry
= env
.declare_property(self
.name
, self
.doc
, self
.pos
)
4400 self
.entry
.scope
.directives
= env
.directives
4401 self
.body
.analyse_declarations(self
.entry
.scope
)
4403 def analyse_expressions(self
, env
):
4404 self
.body
= self
.body
.analyse_expressions(env
)
4407 def generate_function_definitions(self
, env
, code
):
4408 self
.body
.generate_function_definitions(env
, code
)
4410 def generate_execution_code(self
, code
):
4413 def annotate(self
, code
):
4414 self
.body
.annotate(code
)
4417 class GlobalNode(StatNode
):
4418 # Global variable declaration.
4424 def analyse_declarations(self
, env
):
4425 for name
in self
.names
:
4426 env
.declare_global(name
, self
.pos
)
4428 def analyse_expressions(self
, env
):
4431 def generate_execution_code(self
, code
):
4435 class NonlocalNode(StatNode
):
4436 # Nonlocal variable declaration via the 'nonlocal' keyword.
4442 def analyse_declarations(self
, env
):
4443 for name
in self
.names
:
4444 env
.declare_nonlocal(name
, self
.pos
)
4446 def analyse_expressions(self
, env
):
4449 def generate_execution_code(self
, code
):
4453 class ExprStatNode(StatNode
):
4454 # Expression used as a statement.
4458 child_attrs
= ["expr"]
4460 def analyse_declarations(self
, env
):
4462 if isinstance(self
.expr
, ExprNodes
.GeneralCallNode
):
4463 func
= self
.expr
.function
.as_cython_attribute()
4464 if func
== u
'declare':
4465 args
, kwds
= self
.expr
.explicit_args_kwds()
4467 error(self
.expr
.pos
, "Variable names must be specified.")
4468 for var
, type_node
in kwds
.key_value_pairs
:
4469 type = type_node
.analyse_as_type(env
)
4471 error(type_node
.pos
, "Unknown type")
4473 env
.declare_var(var
.value
, type, var
.pos
, is_cdef
= True)
4474 self
.__class
__ = PassStatNode
4476 def analyse_expressions(self
, env
):
4477 self
.expr
.result_is_used
= False # hint that .result() may safely be left empty
4478 self
.expr
= self
.expr
.analyse_expressions(env
)
4481 def nogil_check(self
, env
):
4482 if self
.expr
.type.is_pyobject
and self
.expr
.is_temp
:
4485 gil_message
= "Discarding owned Python object"
4487 def generate_execution_code(self
, code
):
4488 self
.expr
.generate_evaluation_code(code
)
4489 if not self
.expr
.is_temp
and self
.expr
.result():
4490 code
.putln("%s;" % self
.expr
.result())
4491 self
.expr
.generate_disposal_code(code
)
4492 self
.expr
.free_temps(code
)
4494 def generate_function_definitions(self
, env
, code
):
4495 self
.expr
.generate_function_definitions(env
, code
)
4497 def annotate(self
, code
):
4498 self
.expr
.annotate(code
)
4501 class AssignmentNode(StatNode
):
4502 # Abstract base class for assignment nodes.
4504 # The analyse_expressions and generate_execution_code
4505 # phases of assignments are split into two sub-phases
4506 # each, to enable all the right hand sides of a
4507 # parallel assignment to be evaluated before assigning
4508 # to any of the left hand sides.
4510 def analyse_expressions(self
, env
):
4511 return self
.analyse_types(env
)
4513 # def analyse_expressions(self, env):
4514 # self.analyse_expressions_1(env)
4515 # self.analyse_expressions_2(env)
4517 def generate_execution_code(self
, code
):
4518 self
.generate_rhs_evaluation_code(code
)
4519 self
.generate_assignment_code(code
)
4522 class SingleAssignmentNode(AssignmentNode
):
4523 # The simplest case:
4527 # lhs ExprNode Left hand side
4528 # rhs ExprNode Right hand side
4529 # first bool Is this guaranteed the first assignment to lhs?
4531 child_attrs
= ["lhs", "rhs"]
4533 declaration_only
= False
4535 def analyse_declarations(self
, env
):
4538 # handle declarations of the form x = cython.foo()
4539 if isinstance(self
.rhs
, ExprNodes
.CallNode
):
4540 func_name
= self
.rhs
.function
.as_cython_attribute()
4542 args
, kwds
= self
.rhs
.explicit_args_kwds()
4544 if func_name
in ['declare', 'typedef']:
4545 if len(args
) > 2 or kwds
is not None:
4546 error(self
.rhs
.pos
, "Can only declare one type at a time.")
4549 type = args
[0].analyse_as_type(env
)
4551 error(args
[0].pos
, "Unknown type")
4554 if func_name
== 'declare':
4555 if isinstance(lhs
, ExprNodes
.NameNode
):
4556 vars = [(lhs
.name
, lhs
.pos
)]
4557 elif isinstance(lhs
, ExprNodes
.TupleNode
):
4558 vars = [(var
.name
, var
.pos
) for var
in lhs
.args
]
4560 error(lhs
.pos
, "Invalid declaration")
4562 for var
, pos
in vars:
4563 env
.declare_var(var
, type, pos
, is_cdef
= True)
4568 self
.declaration_only
= True
4570 self
.declaration_only
= True
4571 if not isinstance(lhs
, ExprNodes
.NameNode
):
4572 error(lhs
.pos
, "Invalid declaration.")
4573 env
.declare_typedef(lhs
.name
, type, self
.pos
, visibility
='private')
4575 elif func_name
in ['struct', 'union']:
4576 self
.declaration_only
= True
4577 if len(args
) > 0 or kwds
is None:
4578 error(self
.rhs
.pos
, "Struct or union members must be given by name.")
4581 for member
, type_node
in kwds
.key_value_pairs
:
4582 type = type_node
.analyse_as_type(env
)
4584 error(type_node
.pos
, "Unknown type")
4586 members
.append((member
.value
, type, member
.pos
))
4587 if len(members
) < len(kwds
.key_value_pairs
):
4589 if not isinstance(self
.lhs
, ExprNodes
.NameNode
):
4590 error(self
.lhs
.pos
, "Invalid declaration.")
4591 name
= self
.lhs
.name
4592 scope
= StructOrUnionScope(name
)
4593 env
.declare_struct_or_union(name
, func_name
, scope
, False, self
.rhs
.pos
)
4594 for member
, type, pos
in members
:
4595 scope
.declare_var(member
, type, pos
)
4597 elif func_name
== 'fused_type':
4598 # dtype = cython.fused_type(...)
4599 self
.declaration_only
= True
4601 error(self
.rhs
.function
.pos
,
4602 "fused_type does not take keyword arguments")
4604 fusednode
= FusedTypeNode(self
.rhs
.pos
,
4605 name
= self
.lhs
.name
, types
=args
)
4606 fusednode
.analyse_declarations(env
)
4608 if self
.declaration_only
:
4611 self
.lhs
.analyse_target_declaration(env
)
4613 def analyse_types(self
, env
, use_temp
= 0):
4616 self
.rhs
= self
.rhs
.analyse_types(env
)
4617 self
.lhs
= self
.lhs
.analyse_target_types(env
)
4618 self
.lhs
.gil_assignment_check(env
)
4620 if self
.lhs
.memslice_broadcast
or self
.rhs
.memslice_broadcast
:
4621 self
.lhs
.memslice_broadcast
= True
4622 self
.rhs
.memslice_broadcast
= True
4624 is_index_node
= isinstance(self
.lhs
, ExprNodes
.IndexNode
)
4625 if (is_index_node
and not self
.rhs
.type.is_memoryviewslice
and
4626 (self
.lhs
.memslice_slice
or self
.lhs
.is_memslice_copy
) and
4627 (self
.lhs
.type.dtype
.assignable_from(self
.rhs
.type) or
4628 self
.rhs
.type.is_pyobject
)):
4629 # scalar slice assignment
4630 self
.lhs
.is_memslice_scalar_assignment
= True
4631 dtype
= self
.lhs
.type.dtype
4633 dtype
= self
.lhs
.type
4635 rhs
= self
.rhs
.coerce_to(dtype
, env
)
4636 if use_temp
or rhs
.is_attribute
or (
4637 not rhs
.is_name
and not rhs
.is_literal
and
4638 rhs
.type.is_pyobject
):
4639 # things like (cdef) attribute access are not safe (traverses pointers)
4640 rhs
= rhs
.coerce_to_temp(env
)
4641 elif rhs
.type.is_pyobject
:
4642 rhs
= rhs
.coerce_to_simple(env
)
4646 def generate_rhs_evaluation_code(self
, code
):
4647 self
.rhs
.generate_evaluation_code(code
)
4649 def generate_assignment_code(self
, code
):
4650 self
.lhs
.generate_assignment_code(self
.rhs
, code
)
4652 def generate_function_definitions(self
, env
, code
):
4653 self
.rhs
.generate_function_definitions(env
, code
)
4655 def annotate(self
, code
):
4656 self
.lhs
.annotate(code
)
4657 self
.rhs
.annotate(code
)
4660 class CascadedAssignmentNode(AssignmentNode
):
4661 # An assignment with multiple left hand sides:
4665 # lhs_list [ExprNode] Left hand sides
4666 # rhs ExprNode Right hand sides
4670 # coerced_rhs_list [ExprNode] RHS coerced to type of each LHS
4672 child_attrs
= ["lhs_list", "rhs", "coerced_rhs_list"]
4673 coerced_rhs_list
= None
4675 def analyse_declarations(self
, env
):
4676 for lhs
in self
.lhs_list
:
4677 lhs
.analyse_target_declaration(env
)
4679 def analyse_types(self
, env
, use_temp
= 0):
4680 from ExprNodes
import CloneNode
, ProxyNode
4682 rhs
= self
.rhs
.analyse_types(env
)
4683 if use_temp
or rhs
.is_attribute
or (
4684 not rhs
.is_name
and not rhs
.is_literal
and
4685 rhs
.type.is_pyobject
):
4686 rhs
= rhs
.coerce_to_temp(env
)
4688 rhs
= rhs
.coerce_to_simple(env
)
4689 self
.rhs
= ProxyNode(rhs
)
4691 self
.coerced_rhs_list
= []
4692 for lhs
in self
.lhs_list
:
4693 lhs
.analyse_target_types(env
)
4694 lhs
.gil_assignment_check(env
)
4695 rhs
= CloneNode(self
.rhs
)
4696 rhs
= rhs
.coerce_to(lhs
.type, env
)
4697 self
.coerced_rhs_list
.append(rhs
)
4700 def generate_rhs_evaluation_code(self
, code
):
4701 self
.rhs
.generate_evaluation_code(code
)
4703 def generate_assignment_code(self
, code
):
4704 for i
in range(len(self
.lhs_list
)):
4705 lhs
= self
.lhs_list
[i
]
4706 rhs
= self
.coerced_rhs_list
[i
]
4707 rhs
.generate_evaluation_code(code
)
4708 lhs
.generate_assignment_code(rhs
, code
)
4709 # Assignment has disposed of the cloned RHS
4710 self
.rhs
.generate_disposal_code(code
)
4711 self
.rhs
.free_temps(code
)
4713 def generate_function_definitions(self
, env
, code
):
4714 self
.rhs
.generate_function_definitions(env
, code
)
4716 def annotate(self
, code
):
4717 for i
in range(len(self
.lhs_list
)):
4718 self
.lhs_list
[i
].annotate(code
)
4719 self
.coerced_rhs_list
[i
].annotate(code
)
4720 self
.rhs
.annotate(code
)
4723 class ParallelAssignmentNode(AssignmentNode
):
4724 # A combined packing/unpacking assignment:
4728 # This has been rearranged by the parser into
4730 # a = d ; b = e ; c = f
4732 # but we must evaluate all the right hand sides
4733 # before assigning to any of the left hand sides.
4735 # stats [AssignmentNode] The constituent assignments
4737 child_attrs
= ["stats"]
4739 def analyse_declarations(self
, env
):
4740 for stat
in self
.stats
:
4741 stat
.analyse_declarations(env
)
4743 def analyse_expressions(self
, env
):
4744 self
.stats
= [ stat
.analyse_types(env
, use_temp
= 1)
4745 for stat
in self
.stats
]
4748 # def analyse_expressions(self, env):
4749 # for stat in self.stats:
4750 # stat.analyse_expressions_1(env, use_temp = 1)
4751 # for stat in self.stats:
4752 # stat.analyse_expressions_2(env)
4754 def generate_execution_code(self
, code
):
4755 for stat
in self
.stats
:
4756 stat
.generate_rhs_evaluation_code(code
)
4757 for stat
in self
.stats
:
4758 stat
.generate_assignment_code(code
)
4760 def generate_function_definitions(self
, env
, code
):
4761 for stat
in self
.stats
:
4762 stat
.generate_function_definitions(env
, code
)
4764 def annotate(self
, code
):
4765 for stat
in self
.stats
:
4769 class InPlaceAssignmentNode(AssignmentNode
):
4770 # An in place arithmetic operand:
4776 # lhs ExprNode Left hand side
4777 # rhs ExprNode Right hand side
4778 # operator char one of "+-*/%^&|"
4780 # This code is a bit tricky because in order to obey Python
4781 # semantics the sub-expressions (e.g. indices) of the lhs must
4782 # not be evaluated twice. So we must re-use the values calculated
4783 # in evaluation phase for the assignment phase as well.
4784 # Fortunately, the type of the lhs node is fairly constrained
4785 # (it must be a NameNode, AttributeNode, or IndexNode).
4787 child_attrs
= ["lhs", "rhs"]
4789 def analyse_declarations(self
, env
):
4790 self
.lhs
.analyse_target_declaration(env
)
4792 def analyse_types(self
, env
):
4793 self
.rhs
= self
.rhs
.analyse_types(env
)
4794 self
.lhs
= self
.lhs
.analyse_target_types(env
)
4796 # When assigning to a fully indexed buffer or memoryview, coerce the rhs
4797 if (self
.lhs
.is_subscript
and
4798 (self
.lhs
.memslice_index
or self
.lhs
.is_buffer_access
)):
4799 self
.rhs
= self
.rhs
.coerce_to(self
.lhs
.type, env
)
4800 elif self
.lhs
.type.is_string
and self
.operator
in '+-':
4801 # use pointer arithmetic for char* LHS instead of string concat
4802 self
.rhs
= self
.rhs
.coerce_to(PyrexTypes
.c_py_ssize_t_type
, env
)
4805 def generate_execution_code(self
, code
):
4806 self
.rhs
.generate_evaluation_code(code
)
4807 self
.lhs
.generate_subexpr_evaluation_code(code
)
4808 c_op
= self
.operator
4812 error(self
.pos
, "No C inplace power operator")
4813 if self
.lhs
.is_subscript
and self
.lhs
.is_buffer_access
:
4814 if self
.lhs
.type.is_pyobject
:
4815 error(self
.pos
, "In-place operators not allowed on object buffers in this release.")
4816 if (c_op
in ('/', '%') and self
.lhs
.type.is_int
4817 and not code
.globalstate
.directives
['cdivision']):
4818 error(self
.pos
, "In-place non-c divide operators not allowed on int buffers.")
4819 self
.lhs
.generate_buffer_setitem_code(self
.rhs
, code
, c_op
)
4822 # TODO: make sure overload is declared
4823 code
.putln("%s %s= %s;" % (self
.lhs
.result(), c_op
, self
.rhs
.result()))
4824 self
.lhs
.generate_subexpr_disposal_code(code
)
4825 self
.lhs
.free_subexpr_temps(code
)
4826 self
.rhs
.generate_disposal_code(code
)
4827 self
.rhs
.free_temps(code
)
4829 def annotate(self
, code
):
4830 self
.lhs
.annotate(code
)
4831 self
.rhs
.annotate(code
)
4833 def create_binop_node(self
):
4835 return ExprNodes
.binop_node(self
.pos
, self
.operator
, self
.lhs
, self
.rhs
)
4838 class PrintStatNode(StatNode
):
4841 # arg_tuple TupleNode
4842 # stream ExprNode or None (stdout)
4843 # append_newline boolean
4845 child_attrs
= ["arg_tuple", "stream"]
4847 def analyse_expressions(self
, env
):
4849 stream
= self
.stream
.analyse_expressions(env
)
4850 self
.stream
= stream
.coerce_to_pyobject(env
)
4851 arg_tuple
= self
.arg_tuple
.analyse_expressions(env
)
4852 self
.arg_tuple
= arg_tuple
.coerce_to_pyobject(env
)
4853 env
.use_utility_code(printing_utility_code
)
4854 if len(self
.arg_tuple
.args
) == 1 and self
.append_newline
:
4855 env
.use_utility_code(printing_one_utility_code
)
4858 nogil_check
= Node
.gil_error
4859 gil_message
= "Python print statement"
4861 def generate_execution_code(self
, code
):
4863 self
.stream
.generate_evaluation_code(code
)
4864 stream_result
= self
.stream
.py_result()
4867 if len(self
.arg_tuple
.args
) == 1 and self
.append_newline
:
4868 arg
= self
.arg_tuple
.args
[0]
4869 arg
.generate_evaluation_code(code
)
4872 "if (__Pyx_PrintOne(%s, %s) < 0) %s" % (
4875 code
.error_goto(self
.pos
)))
4876 arg
.generate_disposal_code(code
)
4877 arg
.free_temps(code
)
4879 self
.arg_tuple
.generate_evaluation_code(code
)
4881 "if (__Pyx_Print(%s, %s, %d) < 0) %s" % (
4883 self
.arg_tuple
.py_result(),
4884 self
.append_newline
,
4885 code
.error_goto(self
.pos
)))
4886 self
.arg_tuple
.generate_disposal_code(code
)
4887 self
.arg_tuple
.free_temps(code
)
4890 self
.stream
.generate_disposal_code(code
)
4891 self
.stream
.free_temps(code
)
4893 def generate_function_definitions(self
, env
, code
):
4895 self
.stream
.generate_function_definitions(env
, code
)
4896 self
.arg_tuple
.generate_function_definitions(env
, code
)
4898 def annotate(self
, code
):
4900 self
.stream
.annotate(code
)
4901 self
.arg_tuple
.annotate(code
)
4904 class ExecStatNode(StatNode
):
4909 child_attrs
= ["args"]
4911 def analyse_expressions(self
, env
):
4912 for i
, arg
in enumerate(self
.args
):
4913 arg
= arg
.analyse_expressions(env
)
4914 arg
= arg
.coerce_to_pyobject(env
)
4916 env
.use_utility_code(Builtin
.pyexec_utility_code
)
4919 nogil_check
= Node
.gil_error
4920 gil_message
= "Python exec statement"
4922 def generate_execution_code(self
, code
):
4924 for arg
in self
.args
:
4925 arg
.generate_evaluation_code(code
)
4926 args
.append( arg
.py_result() )
4927 args
= tuple(args
+ ['0', '0'][:3-len(args
)])
4928 temp_result
= code
.funcstate
.allocate_temp(PyrexTypes
.py_object_type
, manage_ref
=True)
4929 code
.putln("%s = __Pyx_PyExec3(%s, %s, %s);" % (
4930 (temp_result
,) + args
))
4931 for arg
in self
.args
:
4932 arg
.generate_disposal_code(code
)
4933 arg
.free_temps(code
)
4935 code
.error_goto_if_null(temp_result
, self
.pos
))
4936 code
.put_gotref(temp_result
)
4937 code
.put_decref_clear(temp_result
, py_object_type
)
4938 code
.funcstate
.release_temp(temp_result
)
4940 def annotate(self
, code
):
4941 for arg
in self
.args
:
4945 class DelStatNode(StatNode
):
4950 child_attrs
= ["args"]
4951 ignore_nonexisting
= False
4953 def analyse_declarations(self
, env
):
4954 for arg
in self
.args
:
4955 arg
.analyse_target_declaration(env
)
4957 def analyse_expressions(self
, env
):
4958 for i
, arg
in enumerate(self
.args
):
4959 arg
= self
.args
[i
] = arg
.analyse_target_expression(env
, None)
4960 if arg
.type.is_pyobject
or (arg
.is_name
and
4961 arg
.type.is_memoryviewslice
):
4962 if arg
.is_name
and arg
.entry
.is_cglobal
:
4963 error(arg
.pos
, "Deletion of global C variable")
4964 elif arg
.type.is_ptr
and arg
.type.base_type
.is_cpp_class
:
4966 elif arg
.type.is_cpp_class
:
4967 error(arg
.pos
, "Deletion of non-heap C++ object")
4968 elif arg
.is_subscript
and arg
.base
.type is Builtin
.bytearray_type
:
4971 error(arg
.pos
, "Deletion of non-Python, non-C++ object")
4972 #arg.release_target_temp(env)
4975 def nogil_check(self
, env
):
4976 for arg
in self
.args
:
4977 if arg
.type.is_pyobject
:
4980 gil_message
= "Deleting Python object"
4982 def generate_execution_code(self
, code
):
4983 for arg
in self
.args
:
4984 if (arg
.type.is_pyobject
or
4985 arg
.type.is_memoryviewslice
or
4986 arg
.is_subscript
and arg
.base
.type is Builtin
.bytearray_type
):
4987 arg
.generate_deletion_code(
4988 code
, ignore_nonexisting
=self
.ignore_nonexisting
)
4989 elif arg
.type.is_ptr
and arg
.type.base_type
.is_cpp_class
:
4990 arg
.generate_result_code(code
)
4991 code
.putln("delete %s;" % arg
.result())
4992 # else error reported earlier
4994 def annotate(self
, code
):
4995 for arg
in self
.args
:
4999 class PassStatNode(StatNode
):
5004 def analyse_expressions(self
, env
):
5007 def generate_execution_code(self
, code
):
5011 class IndirectionNode(StatListNode
):
5013 This adds an indirection so that the node can be shared and a subtree can
5014 be removed at any time by clearing self.stats.
5017 def __init__(self
, stats
):
5018 super(IndirectionNode
, self
).__init
__(stats
[0].pos
, stats
=stats
)
5020 class BreakStatNode(StatNode
):
5023 is_terminator
= True
5025 def analyse_expressions(self
, env
):
5028 def generate_execution_code(self
, code
):
5029 if not code
.break_label
:
5030 error(self
.pos
, "break statement not inside loop")
5032 code
.put_goto(code
.break_label
)
5035 class ContinueStatNode(StatNode
):
5038 is_terminator
= True
5040 def analyse_expressions(self
, env
):
5043 def generate_execution_code(self
, code
):
5044 if code
.funcstate
.in_try_finally
:
5045 error(self
.pos
, "continue statement inside try of try...finally")
5046 elif not code
.continue_label
:
5047 error(self
.pos
, "continue statement not inside loop")
5049 code
.put_goto(code
.continue_label
)
5052 class ReturnStatNode(StatNode
):
5055 # value ExprNode or None
5056 # return_type PyrexType
5057 # in_generator return inside of generator => raise StopIteration
5059 child_attrs
= ["value"]
5060 is_terminator
= True
5061 in_generator
= False
5063 # Whether we are in a parallel section
5066 def analyse_expressions(self
, env
):
5067 return_type
= env
.return_type
5068 self
.return_type
= return_type
5070 error(self
.pos
, "Return not inside a function body")
5073 self
.value
= self
.value
.analyse_types(env
)
5074 if return_type
.is_void
or return_type
.is_returncode
:
5075 error(self
.value
.pos
,
5076 "Return with value in void function")
5078 self
.value
= self
.value
.coerce_to(env
.return_type
, env
)
5080 if (not return_type
.is_void
5081 and not return_type
.is_pyobject
5082 and not return_type
.is_returncode
):
5083 error(self
.pos
, "Return value required")
5086 def nogil_check(self
, env
):
5087 if self
.return_type
.is_pyobject
:
5090 gil_message
= "Returning Python object"
5092 def generate_execution_code(self
, code
):
5093 code
.mark_pos(self
.pos
)
5094 if not self
.return_type
:
5095 # error reported earlier
5097 if self
.return_type
.is_pyobject
:
5098 code
.put_xdecref(Naming
.retval_cname
,
5102 self
.value
.generate_evaluation_code(code
)
5103 if self
.return_type
.is_memoryviewslice
:
5105 MemoryView
.put_acquire_memoryviewslice(
5106 lhs_cname
=Naming
.retval_cname
,
5107 lhs_type
=self
.return_type
,
5108 lhs_pos
=self
.value
.pos
,
5111 have_gil
=self
.in_nogil_context
)
5112 elif self
.in_generator
:
5113 # return value == raise StopIteration(value), but uncatchable
5115 "%s = NULL; PyErr_SetObject(PyExc_StopIteration, %s);" % (
5116 Naming
.retval_cname
,
5117 self
.value
.result_as(self
.return_type
)))
5118 self
.value
.generate_disposal_code(code
)
5120 self
.value
.make_owned_reference(code
)
5123 Naming
.retval_cname
,
5124 self
.value
.result_as(self
.return_type
)))
5125 self
.value
.generate_post_assignment_code(code
)
5126 self
.value
.free_temps(code
)
5128 if self
.return_type
.is_pyobject
:
5129 code
.put_init_to_py_none(Naming
.retval_cname
, self
.return_type
)
5130 elif self
.return_type
.is_returncode
:
5131 self
.put_return(code
, self
.return_type
.default_value
)
5133 for cname
, type in code
.funcstate
.temps_holding_reference():
5134 code
.put_decref_clear(cname
, type)
5136 code
.put_goto(code
.return_label
)
5138 def put_return(self
, code
, value
):
5139 if self
.in_parallel
:
5140 code
.putln_openmp("#pragma omp critical(__pyx_returning)")
5141 code
.putln("%s = %s;" % (Naming
.retval_cname
, value
))
5143 def generate_function_definitions(self
, env
, code
):
5144 if self
.value
is not None:
5145 self
.value
.generate_function_definitions(env
, code
)
5147 def annotate(self
, code
):
5149 self
.value
.annotate(code
)
5152 class RaiseStatNode(StatNode
):
5155 # exc_type ExprNode or None
5156 # exc_value ExprNode or None
5157 # exc_tb ExprNode or None
5158 # cause ExprNode or None
5160 child_attrs
= ["exc_type", "exc_value", "exc_tb", "cause"]
5161 is_terminator
= True
5163 def analyse_expressions(self
, env
):
5165 exc_type
= self
.exc_type
.analyse_types(env
)
5166 self
.exc_type
= exc_type
.coerce_to_pyobject(env
)
5168 exc_value
= self
.exc_value
.analyse_types(env
)
5169 self
.exc_value
= exc_value
.coerce_to_pyobject(env
)
5171 exc_tb
= self
.exc_tb
.analyse_types(env
)
5172 self
.exc_tb
= exc_tb
.coerce_to_pyobject(env
)
5174 cause
= self
.cause
.analyse_types(env
)
5175 self
.cause
= cause
.coerce_to_pyobject(env
)
5176 # special cases for builtin exceptions
5177 self
.builtin_exc_name
= None
5178 if self
.exc_type
and not self
.exc_value
and not self
.exc_tb
:
5181 if (isinstance(exc
, ExprNodes
.SimpleCallNode
) and
5182 not (exc
.args
or (exc
.arg_tuple
is not None and
5183 exc
.arg_tuple
.args
))):
5184 exc
= exc
.function
# extract the exception type
5185 if exc
.is_name
and exc
.entry
.is_builtin
:
5186 self
.builtin_exc_name
= exc
.name
5187 if self
.builtin_exc_name
== 'MemoryError':
5188 self
.exc_type
= None # has a separate implementation
5191 nogil_check
= Node
.gil_error
5192 gil_message
= "Raising exception"
5194 def generate_execution_code(self
, code
):
5195 if self
.builtin_exc_name
== 'MemoryError':
5196 code
.putln('PyErr_NoMemory(); %s' % code
.error_goto(self
.pos
))
5200 self
.exc_type
.generate_evaluation_code(code
)
5201 type_code
= self
.exc_type
.py_result()
5205 self
.exc_value
.generate_evaluation_code(code
)
5206 value_code
= self
.exc_value
.py_result()
5210 self
.exc_tb
.generate_evaluation_code(code
)
5211 tb_code
= self
.exc_tb
.py_result()
5215 self
.cause
.generate_evaluation_code(code
)
5216 cause_code
= self
.cause
.py_result()
5219 code
.globalstate
.use_utility_code(raise_utility_code
)
5221 "__Pyx_Raise(%s, %s, %s, %s);" % (
5226 for obj
in (self
.exc_type
, self
.exc_value
, self
.exc_tb
, self
.cause
):
5228 obj
.generate_disposal_code(code
)
5229 obj
.free_temps(code
)
5231 code
.error_goto(self
.pos
))
5233 def generate_function_definitions(self
, env
, code
):
5234 if self
.exc_type
is not None:
5235 self
.exc_type
.generate_function_definitions(env
, code
)
5236 if self
.exc_value
is not None:
5237 self
.exc_value
.generate_function_definitions(env
, code
)
5238 if self
.exc_tb
is not None:
5239 self
.exc_tb
.generate_function_definitions(env
, code
)
5240 if self
.cause
is not None:
5241 self
.cause
.generate_function_definitions(env
, code
)
5243 def annotate(self
, code
):
5245 self
.exc_type
.annotate(code
)
5247 self
.exc_value
.annotate(code
)
5249 self
.exc_tb
.annotate(code
)
5251 self
.cause
.annotate(code
)
5254 class ReraiseStatNode(StatNode
):
5257 is_terminator
= True
5259 def analyse_expressions(self
, env
):
5262 nogil_check
= Node
.gil_error
5263 gil_message
= "Raising exception"
5265 def generate_execution_code(self
, code
):
5266 vars = code
.funcstate
.exc_vars
5268 code
.globalstate
.use_utility_code(restore_exception_utility_code
)
5269 code
.put_giveref(vars[0])
5270 code
.put_giveref(vars[1])
5271 # fresh exceptions may not have a traceback yet (-> finally!)
5272 code
.put_xgiveref(vars[2])
5273 code
.putln("__Pyx_ErrRestore(%s, %s, %s);" % tuple(vars))
5274 for varname
in vars:
5275 code
.put("%s = 0; " % varname
)
5277 code
.putln(code
.error_goto(self
.pos
))
5279 code
.globalstate
.use_utility_code(
5280 UtilityCode
.load_cached("ReRaiseException", "Exceptions.c"))
5281 code
.putln("__Pyx_ReraiseException(); %s" % code
.error_goto(self
.pos
))
5283 class AssertStatNode(StatNode
):
5287 # value ExprNode or None
5289 child_attrs
= ["cond", "value"]
5291 def analyse_expressions(self
, env
):
5292 self
.cond
= self
.cond
.analyse_boolean_expression(env
)
5294 value
= self
.value
.analyse_types(env
)
5295 if value
.type is Builtin
.tuple_type
or not value
.type.is_builtin_type
:
5296 # prevent tuple values from being interpreted as argument value tuples
5297 from ExprNodes
import TupleNode
5298 value
= TupleNode(value
.pos
, args
=[value
], slow
=True)
5299 self
.value
= value
.analyse_types(env
, skip_children
=True)
5301 self
.value
= value
.coerce_to_pyobject(env
)
5304 nogil_check
= Node
.gil_error
5305 gil_message
= "Raising exception"
5307 def generate_execution_code(self
, code
):
5308 code
.putln("#ifndef CYTHON_WITHOUT_ASSERTIONS")
5309 code
.putln("if (unlikely(!Py_OptimizeFlag)) {")
5310 self
.cond
.generate_evaluation_code(code
)
5312 "if (unlikely(!%s)) {" %
5315 self
.value
.generate_evaluation_code(code
)
5317 "PyErr_SetObject(PyExc_AssertionError, %s);" %
5318 self
.value
.py_result())
5319 self
.value
.generate_disposal_code(code
)
5320 self
.value
.free_temps(code
)
5323 "PyErr_SetNone(PyExc_AssertionError);")
5325 code
.error_goto(self
.pos
))
5328 self
.cond
.generate_disposal_code(code
)
5329 self
.cond
.free_temps(code
)
5332 code
.putln("#endif")
5334 def generate_function_definitions(self
, env
, code
):
5335 self
.cond
.generate_function_definitions(env
, code
)
5336 if self
.value
is not None:
5337 self
.value
.generate_function_definitions(env
, code
)
5339 def annotate(self
, code
):
5340 self
.cond
.annotate(code
)
5342 self
.value
.annotate(code
)
5345 class IfStatNode(StatNode
):
5348 # if_clauses [IfClauseNode]
5349 # else_clause StatNode or None
5351 child_attrs
= ["if_clauses", "else_clause"]
5353 def analyse_declarations(self
, env
):
5354 for if_clause
in self
.if_clauses
:
5355 if_clause
.analyse_declarations(env
)
5356 if self
.else_clause
:
5357 self
.else_clause
.analyse_declarations(env
)
5359 def analyse_expressions(self
, env
):
5360 self
.if_clauses
= [ if_clause
.analyse_expressions(env
)
5361 for if_clause
in self
.if_clauses
]
5362 if self
.else_clause
:
5363 self
.else_clause
= self
.else_clause
.analyse_expressions(env
)
5366 def generate_execution_code(self
, code
):
5367 code
.mark_pos(self
.pos
)
5368 end_label
= code
.new_label()
5369 for if_clause
in self
.if_clauses
:
5370 if_clause
.generate_execution_code(code
, end_label
)
5371 if self
.else_clause
:
5372 code
.putln("/*else*/ {")
5373 self
.else_clause
.generate_execution_code(code
)
5375 code
.put_label(end_label
)
5377 def generate_function_definitions(self
, env
, code
):
5378 for clause
in self
.if_clauses
:
5379 clause
.generate_function_definitions(env
, code
)
5380 if self
.else_clause
is not None:
5381 self
.else_clause
.generate_function_definitions(env
, code
)
5383 def annotate(self
, code
):
5384 for if_clause
in self
.if_clauses
:
5385 if_clause
.annotate(code
)
5386 if self
.else_clause
:
5387 self
.else_clause
.annotate(code
)
5390 class IfClauseNode(Node
):
5391 # if or elif clause in an if statement
5393 # condition ExprNode
5396 child_attrs
= ["condition", "body"]
5398 def analyse_declarations(self
, env
):
5399 self
.body
.analyse_declarations(env
)
5401 def analyse_expressions(self
, env
):
5403 self
.condition
.analyse_temp_boolean_expression(env
)
5404 self
.body
= self
.body
.analyse_expressions(env
)
5407 def generate_execution_code(self
, code
, end_label
):
5408 self
.condition
.generate_evaluation_code(code
)
5411 self
.condition
.result())
5412 self
.condition
.generate_disposal_code(code
)
5413 self
.condition
.free_temps(code
)
5414 self
.body
.generate_execution_code(code
)
5415 if not self
.body
.is_terminator
:
5416 code
.put_goto(end_label
)
5419 def generate_function_definitions(self
, env
, code
):
5420 self
.condition
.generate_function_definitions(env
, code
)
5421 self
.body
.generate_function_definitions(env
, code
)
5423 def annotate(self
, code
):
5424 self
.condition
.annotate(code
)
5425 self
.body
.annotate(code
)
5428 class SwitchCaseNode(StatNode
):
5429 # Generated in the optimization of an if-elif-else node
5431 # conditions [ExprNode]
5434 child_attrs
= ['conditions', 'body']
5436 def generate_execution_code(self
, code
):
5437 for cond
in self
.conditions
:
5438 code
.mark_pos(cond
.pos
)
5439 cond
.generate_evaluation_code(code
)
5440 code
.putln("case %s:" % cond
.result())
5441 self
.body
.generate_execution_code(code
)
5442 code
.putln("break;")
5444 def generate_function_definitions(self
, env
, code
):
5445 for cond
in self
.conditions
:
5446 cond
.generate_function_definitions(env
, code
)
5447 self
.body
.generate_function_definitions(env
, code
)
5449 def annotate(self
, code
):
5450 for cond
in self
.conditions
:
5452 self
.body
.annotate(code
)
5454 class SwitchStatNode(StatNode
):
5455 # Generated in the optimization of an if-elif-else node
5458 # cases [SwitchCaseNode]
5459 # else_clause StatNode or None
5461 child_attrs
= ['test', 'cases', 'else_clause']
5463 def generate_execution_code(self
, code
):
5464 self
.test
.generate_evaluation_code(code
)
5465 code
.putln("switch (%s) {" % self
.test
.result())
5466 for case
in self
.cases
:
5467 case
.generate_execution_code(code
)
5468 if self
.else_clause
is not None:
5469 code
.putln("default:")
5470 self
.else_clause
.generate_execution_code(code
)
5471 code
.putln("break;")
5473 # Always generate a default clause to prevent C compiler warnings
5474 # about unmatched enum values (it was not the user who decided to
5475 # generate the switch statement, so shouldn't be bothered).
5476 code
.putln("default: break;")
5479 def generate_function_definitions(self
, env
, code
):
5480 self
.test
.generate_function_definitions(env
, code
)
5481 for case
in self
.cases
:
5482 case
.generate_function_definitions(env
, code
)
5483 if self
.else_clause
is not None:
5484 self
.else_clause
.generate_function_definitions(env
, code
)
5486 def annotate(self
, code
):
5487 self
.test
.annotate(code
)
5488 for case
in self
.cases
:
5490 if self
.else_clause
is not None:
5491 self
.else_clause
.annotate(code
)
5493 class LoopNode(object):
5497 class WhileStatNode(LoopNode
, StatNode
):
5500 # condition ExprNode
5502 # else_clause StatNode
5504 child_attrs
= ["condition", "body", "else_clause"]
5506 def analyse_declarations(self
, env
):
5507 self
.body
.analyse_declarations(env
)
5508 if self
.else_clause
:
5509 self
.else_clause
.analyse_declarations(env
)
5511 def analyse_expressions(self
, env
):
5513 self
.condition
= self
.condition
.analyse_temp_boolean_expression(env
)
5514 self
.body
= self
.body
.analyse_expressions(env
)
5515 if self
.else_clause
:
5516 self
.else_clause
= self
.else_clause
.analyse_expressions(env
)
5519 def generate_execution_code(self
, code
):
5520 old_loop_labels
= code
.new_loop_labels()
5524 self
.condition
.generate_evaluation_code(code
)
5525 self
.condition
.generate_disposal_code(code
)
5528 self
.condition
.result())
5529 self
.condition
.free_temps(code
)
5530 self
.body
.generate_execution_code(code
)
5531 code
.put_label(code
.continue_label
)
5533 break_label
= code
.break_label
5534 code
.set_loop_labels(old_loop_labels
)
5535 if self
.else_clause
:
5536 code
.mark_pos(self
.else_clause
.pos
)
5537 code
.putln("/*else*/ {")
5538 self
.else_clause
.generate_execution_code(code
)
5540 code
.put_label(break_label
)
5542 def generate_function_definitions(self
, env
, code
):
5544 self
.condition
.generate_function_definitions(env
, code
)
5545 self
.body
.generate_function_definitions(env
, code
)
5546 if self
.else_clause
is not None:
5547 self
.else_clause
.generate_function_definitions(env
, code
)
5549 def annotate(self
, code
):
5551 self
.condition
.annotate(code
)
5552 self
.body
.annotate(code
)
5553 if self
.else_clause
:
5554 self
.else_clause
.annotate(code
)
5557 class DictIterationNextNode(Node
):
5558 # Helper node for calling PyDict_Next() inside of a WhileStatNode
5559 # and checking the dictionary size for changes. Created in
5561 child_attrs
= ['dict_obj', 'expected_size', 'pos_index_var',
5562 'coerced_key_var', 'coerced_value_var', 'coerced_tuple_var',
5563 'key_target', 'value_target', 'tuple_target', 'is_dict_flag']
5565 coerced_key_var
= key_ref
= None
5566 coerced_value_var
= value_ref
= None
5567 coerced_tuple_var
= tuple_ref
= None
5569 def __init__(self
, dict_obj
, expected_size
, pos_index_var
,
5570 key_target
, value_target
, tuple_target
, is_dict_flag
):
5573 dict_obj
= dict_obj
,
5574 expected_size
= expected_size
,
5575 pos_index_var
= pos_index_var
,
5576 key_target
= key_target
,
5577 value_target
= value_target
,
5578 tuple_target
= tuple_target
,
5579 is_dict_flag
= is_dict_flag
,
5581 type = PyrexTypes
.c_bint_type
)
5583 def analyse_expressions(self
, env
):
5585 self
.dict_obj
= self
.dict_obj
.analyse_types(env
)
5586 self
.expected_size
= self
.expected_size
.analyse_types(env
)
5587 if self
.pos_index_var
:
5588 self
.pos_index_var
= self
.pos_index_var
.analyse_types(env
)
5590 self
.key_target
= self
.key_target
.analyse_target_types(env
)
5591 self
.key_ref
= ExprNodes
.TempNode(self
.key_target
.pos
, PyrexTypes
.py_object_type
)
5592 self
.coerced_key_var
= self
.key_ref
.coerce_to(self
.key_target
.type, env
)
5593 if self
.value_target
:
5594 self
.value_target
= self
.value_target
.analyse_target_types(env
)
5595 self
.value_ref
= ExprNodes
.TempNode(self
.value_target
.pos
, type=PyrexTypes
.py_object_type
)
5596 self
.coerced_value_var
= self
.value_ref
.coerce_to(self
.value_target
.type, env
)
5597 if self
.tuple_target
:
5598 self
.tuple_target
= self
.tuple_target
.analyse_target_types(env
)
5599 self
.tuple_ref
= ExprNodes
.TempNode(self
.tuple_target
.pos
, PyrexTypes
.py_object_type
)
5600 self
.coerced_tuple_var
= self
.tuple_ref
.coerce_to(self
.tuple_target
.type, env
)
5601 self
.is_dict_flag
= self
.is_dict_flag
.analyse_types(env
)
5604 def generate_function_definitions(self
, env
, code
):
5605 self
.dict_obj
.generate_function_definitions(env
, code
)
5607 def generate_execution_code(self
, code
):
5608 code
.globalstate
.use_utility_code(UtilityCode
.load_cached("dict_iter", "Optimize.c"))
5609 self
.dict_obj
.generate_evaluation_code(code
)
5613 for var
, result
, target
in [(self
.key_ref
, self
.coerced_key_var
, self
.key_target
),
5614 (self
.value_ref
, self
.coerced_value_var
, self
.value_target
),
5615 (self
.tuple_ref
, self
.coerced_tuple_var
, self
.tuple_target
)]:
5619 assignments
.append((var
, result
, target
))
5621 addr
= '&%s' % var
.result()
5622 temp_addresses
.append(addr
)
5624 result_temp
= code
.funcstate
.allocate_temp(PyrexTypes
.c_int_type
, False)
5625 code
.putln("%s = __Pyx_dict_iter_next(%s, %s, &%s, %s, %s, %s, %s);" % (
5627 self
.dict_obj
.py_result(),
5628 self
.expected_size
.result(),
5629 self
.pos_index_var
.result(),
5633 self
.is_dict_flag
.result()
5635 code
.putln("if (unlikely(%s == 0)) break;" % result_temp
)
5636 code
.putln(code
.error_goto_if("%s == -1" % result_temp
, self
.pos
))
5637 code
.funcstate
.release_temp(result_temp
)
5639 # evaluate all coercions before the assignments
5640 for var
, result
, target
in assignments
:
5641 code
.put_gotref(var
.result())
5642 for var
, result
, target
in assignments
:
5643 result
.generate_evaluation_code(code
)
5644 for var
, result
, target
in assignments
:
5645 target
.generate_assignment_code(result
, code
)
5648 def ForStatNode(pos
, **kw
):
5649 if 'iterator' in kw
:
5650 return ForInStatNode(pos
, **kw
)
5652 return ForFromStatNode(pos
, **kw
)
5654 class ForInStatNode(LoopNode
, StatNode
):
5658 # iterator IteratorNode
5660 # else_clause StatNode
5661 # item NextNode used internally
5663 child_attrs
= ["target", "iterator", "body", "else_clause"]
5666 def analyse_declarations(self
, env
):
5668 self
.target
.analyse_target_declaration(env
)
5669 self
.body
.analyse_declarations(env
)
5670 if self
.else_clause
:
5671 self
.else_clause
.analyse_declarations(env
)
5672 self
.item
= ExprNodes
.NextNode(self
.iterator
)
5674 def analyse_expressions(self
, env
):
5675 self
.target
= self
.target
.analyse_target_types(env
)
5676 self
.iterator
= self
.iterator
.analyse_expressions(env
)
5678 self
.item
= ExprNodes
.NextNode(self
.iterator
) # must rewrap after analysis
5679 self
.item
= self
.item
.analyse_expressions(env
)
5680 if (self
.iterator
.type.is_ptr
or self
.iterator
.type.is_array
) and \
5681 self
.target
.type.assignable_from(self
.iterator
.type):
5682 # C array slice optimization.
5685 self
.item
= self
.item
.coerce_to(self
.target
.type, env
)
5686 self
.body
= self
.body
.analyse_expressions(env
)
5687 if self
.else_clause
:
5688 self
.else_clause
= self
.else_clause
.analyse_expressions(env
)
5691 def generate_execution_code(self
, code
):
5692 old_loop_labels
= code
.new_loop_labels()
5693 self
.iterator
.generate_evaluation_code(code
)
5694 code
.putln("for (;;) {")
5695 self
.item
.generate_evaluation_code(code
)
5696 self
.target
.generate_assignment_code(self
.item
, code
)
5697 self
.body
.generate_execution_code(code
)
5698 code
.put_label(code
.continue_label
)
5700 break_label
= code
.break_label
5701 code
.set_loop_labels(old_loop_labels
)
5703 if self
.else_clause
:
5704 # in nested loops, the 'else' block can contain a
5705 # 'continue' statement for the outer loop, but we may need
5706 # to generate cleanup code before taking that path, so we
5708 orig_continue_label
= code
.continue_label
5709 code
.continue_label
= code
.new_label('outer_continue')
5711 code
.putln("/*else*/ {")
5712 self
.else_clause
.generate_execution_code(code
)
5715 if code
.label_used(code
.continue_label
):
5716 code
.put_goto(break_label
)
5717 code
.put_label(code
.continue_label
)
5718 self
.iterator
.generate_disposal_code(code
)
5719 code
.put_goto(orig_continue_label
)
5720 code
.set_loop_labels(old_loop_labels
)
5722 if code
.label_used(break_label
):
5723 code
.put_label(break_label
)
5724 self
.iterator
.generate_disposal_code(code
)
5725 self
.iterator
.free_temps(code
)
5727 def generate_function_definitions(self
, env
, code
):
5728 self
.target
.generate_function_definitions(env
, code
)
5729 self
.iterator
.generate_function_definitions(env
, code
)
5730 self
.body
.generate_function_definitions(env
, code
)
5731 if self
.else_clause
is not None:
5732 self
.else_clause
.generate_function_definitions(env
, code
)
5734 def annotate(self
, code
):
5735 self
.target
.annotate(code
)
5736 self
.iterator
.annotate(code
)
5737 self
.body
.annotate(code
)
5738 if self
.else_clause
:
5739 self
.else_clause
.annotate(code
)
5740 self
.item
.annotate(code
)
5743 class ForFromStatNode(LoopNode
, StatNode
):
5744 # for name from expr rel name rel expr
5751 # step ExprNode or None
5753 # else_clause StatNode or None
5759 # loopvar_node ExprNode (usually a NameNode or temp node)
5760 # py_loopvar_node PyTempNode or None
5761 child_attrs
= ["target", "bound1", "bound2", "step", "body", "else_clause"]
5763 is_py_target
= False
5765 py_loopvar_node
= None
5768 gil_message
= "For-loop using object bounds or target"
5770 def nogil_check(self
, env
):
5771 for x
in (self
.target
, self
.bound1
, self
.bound2
):
5772 if x
.type.is_pyobject
:
5775 def analyse_declarations(self
, env
):
5776 self
.target
.analyse_target_declaration(env
)
5777 self
.body
.analyse_declarations(env
)
5778 if self
.else_clause
:
5779 self
.else_clause
.analyse_declarations(env
)
5781 def analyse_expressions(self
, env
):
5783 self
.target
= self
.target
.analyse_target_types(env
)
5784 self
.bound1
= self
.bound1
.analyse_types(env
)
5785 self
.bound2
= self
.bound2
.analyse_types(env
)
5786 if self
.step
is not None:
5787 if isinstance(self
.step
, ExprNodes
.UnaryMinusNode
):
5788 warning(self
.step
.pos
, "Probable infinite loop in for-from-by statement. Consider switching the directions of the relations.", 2)
5789 self
.step
= self
.step
.analyse_types(env
)
5791 if self
.target
.type.is_numeric
:
5792 loop_type
= self
.target
.type
5794 loop_type
= PyrexTypes
.c_int_type
5795 if not self
.bound1
.type.is_pyobject
:
5796 loop_type
= PyrexTypes
.widest_numeric_type(loop_type
, self
.bound1
.type)
5797 if not self
.bound2
.type.is_pyobject
:
5798 loop_type
= PyrexTypes
.widest_numeric_type(loop_type
, self
.bound2
.type)
5799 if self
.step
is not None and not self
.step
.type.is_pyobject
:
5800 loop_type
= PyrexTypes
.widest_numeric_type(loop_type
, self
.step
.type)
5801 self
.bound1
= self
.bound1
.coerce_to(loop_type
, env
)
5802 self
.bound2
= self
.bound2
.coerce_to(loop_type
, env
)
5803 if not self
.bound2
.is_literal
:
5804 self
.bound2
= self
.bound2
.coerce_to_temp(env
)
5805 if self
.step
is not None:
5806 self
.step
= self
.step
.coerce_to(loop_type
, env
)
5807 if not self
.step
.is_literal
:
5808 self
.step
= self
.step
.coerce_to_temp(env
)
5810 target_type
= self
.target
.type
5811 if not (target_type
.is_pyobject
or target_type
.is_numeric
):
5812 error(self
.target
.pos
,
5813 "for-from loop variable must be c numeric type or Python object")
5814 if target_type
.is_numeric
:
5815 self
.is_py_target
= False
5816 if isinstance(self
.target
, ExprNodes
.IndexNode
) and self
.target
.is_buffer_access
:
5817 raise error(self
.pos
, "Buffer indexing not allowed as for loop target.")
5818 self
.loopvar_node
= self
.target
5819 self
.py_loopvar_node
= None
5821 self
.is_py_target
= True
5822 c_loopvar_node
= ExprNodes
.TempNode(self
.pos
, loop_type
, env
)
5823 self
.loopvar_node
= c_loopvar_node
5824 self
.py_loopvar_node
= \
5825 ExprNodes
.CloneNode(c_loopvar_node
).coerce_to_pyobject(env
)
5826 self
.body
= self
.body
.analyse_expressions(env
)
5827 if self
.else_clause
:
5828 self
.else_clause
= self
.else_clause
.analyse_expressions(env
)
5831 def generate_execution_code(self
, code
):
5832 old_loop_labels
= code
.new_loop_labels()
5833 from_range
= self
.from_range
5834 self
.bound1
.generate_evaluation_code(code
)
5835 self
.bound2
.generate_evaluation_code(code
)
5836 offset
, incop
= self
.relation_table
[self
.relation1
]
5837 if self
.step
is not None:
5838 self
.step
.generate_evaluation_code(code
)
5839 step
= self
.step
.result()
5840 incop
= "%s=%s" % (incop
[0], step
)
5842 if isinstance(self
.loopvar_node
, ExprNodes
.TempNode
):
5843 self
.loopvar_node
.allocate(code
)
5844 if isinstance(self
.py_loopvar_node
, ExprNodes
.TempNode
):
5845 self
.py_loopvar_node
.allocate(code
)
5847 loopvar_name
= code
.funcstate
.allocate_temp(self
.target
.type, False)
5849 loopvar_name
= self
.loopvar_node
.result()
5851 "for (%s = %s%s; %s %s %s; %s%s) {" % (
5853 self
.bound1
.result(), offset
,
5854 loopvar_name
, self
.relation2
, self
.bound2
.result(),
5855 loopvar_name
, incop
))
5856 if self
.py_loopvar_node
:
5857 self
.py_loopvar_node
.generate_evaluation_code(code
)
5858 self
.target
.generate_assignment_code(self
.py_loopvar_node
, code
)
5860 code
.putln("%s = %s;" % (
5861 self
.target
.result(), loopvar_name
))
5862 self
.body
.generate_execution_code(code
)
5863 code
.put_label(code
.continue_label
)
5864 if self
.py_loopvar_node
:
5865 # This mess is to make for..from loops with python targets behave
5866 # exactly like those with C targets with regards to re-assignment
5867 # of the loop variable.
5869 if self
.target
.entry
.is_pyglobal
:
5870 # We know target is a NameNode, this is the only ugly case.
5871 target_node
= ExprNodes
.PyTempNode(self
.target
.pos
, None)
5872 target_node
.allocate(code
)
5873 interned_cname
= code
.intern_identifier(self
.target
.entry
.name
)
5874 if self
.target
.entry
.scope
.is_module_scope
:
5875 code
.globalstate
.use_utility_code(
5876 UtilityCode
.load_cached("GetModuleGlobalName", "ObjectHandling.c"))
5877 lookup_func
= '__Pyx_GetModuleGlobalName(%s)'
5879 code
.globalstate
.use_utility_code(
5880 UtilityCode
.load_cached("GetNameInClass", "ObjectHandling.c"))
5881 lookup_func
= '__Pyx_GetNameInClass(%s, %%s)' % (
5882 self
.target
.entry
.scope
.namespace_cname
)
5883 code
.putln("%s = %s; %s" % (
5884 target_node
.result(),
5885 lookup_func
% interned_cname
,
5886 code
.error_goto_if_null(target_node
.result(), self
.target
.pos
)))
5887 code
.put_gotref(target_node
.result())
5889 target_node
= self
.target
5890 from_py_node
= ExprNodes
.CoerceFromPyTypeNode(
5891 self
.loopvar_node
.type, target_node
, self
.target
.entry
.scope
)
5892 from_py_node
.temp_code
= loopvar_name
5893 from_py_node
.generate_result_code(code
)
5894 if self
.target
.entry
.is_pyglobal
:
5895 code
.put_decref(target_node
.result(), target_node
.type)
5896 target_node
.release(code
)
5898 if self
.py_loopvar_node
:
5899 # This is potentially wasteful, but we don't want the semantics to
5900 # depend on whether or not the loop is a python type.
5901 self
.py_loopvar_node
.generate_evaluation_code(code
)
5902 self
.target
.generate_assignment_code(self
.py_loopvar_node
, code
)
5904 code
.funcstate
.release_temp(loopvar_name
)
5905 break_label
= code
.break_label
5906 code
.set_loop_labels(old_loop_labels
)
5907 if self
.else_clause
:
5908 code
.putln("/*else*/ {")
5909 self
.else_clause
.generate_execution_code(code
)
5911 code
.put_label(break_label
)
5912 self
.bound1
.generate_disposal_code(code
)
5913 self
.bound1
.free_temps(code
)
5914 self
.bound2
.generate_disposal_code(code
)
5915 self
.bound2
.free_temps(code
)
5916 if isinstance(self
.loopvar_node
, ExprNodes
.TempNode
):
5917 self
.loopvar_node
.release(code
)
5918 if isinstance(self
.py_loopvar_node
, ExprNodes
.TempNode
):
5919 self
.py_loopvar_node
.release(code
)
5920 if self
.step
is not None:
5921 self
.step
.generate_disposal_code(code
)
5922 self
.step
.free_temps(code
)
5925 # {relop : (initial offset, increment op)}
5932 def generate_function_definitions(self
, env
, code
):
5933 self
.target
.generate_function_definitions(env
, code
)
5934 self
.bound1
.generate_function_definitions(env
, code
)
5935 self
.bound2
.generate_function_definitions(env
, code
)
5936 if self
.step
is not None:
5937 self
.step
.generate_function_definitions(env
, code
)
5938 self
.body
.generate_function_definitions(env
, code
)
5939 if self
.else_clause
is not None:
5940 self
.else_clause
.generate_function_definitions(env
, code
)
5942 def annotate(self
, code
):
5943 self
.target
.annotate(code
)
5944 self
.bound1
.annotate(code
)
5945 self
.bound2
.annotate(code
)
5947 self
.step
.annotate(code
)
5948 self
.body
.annotate(code
)
5949 if self
.else_clause
:
5950 self
.else_clause
.annotate(code
)
5953 class WithStatNode(StatNode
):
5955 Represents a Python with statement.
5957 Implemented by the WithTransform as follows:
5961 VALUE = MGR.__enter__()
5965 TARGET = VALUE # optional
5969 if not EXIT(*EXCINFO):
5973 EXIT(None, None, None)
5974 MGR = EXIT = VALUE = None
5976 # manager The with statement manager object
5977 # target ExprNode the target lhs of the __enter__() call
5979 # enter_call ExprNode the call to the __enter__() method
5980 # exit_var String the cname of the __exit__() method reference
5982 child_attrs
= ["manager", "enter_call", "target", "body"]
5986 def analyse_declarations(self
, env
):
5987 self
.manager
.analyse_declarations(env
)
5988 self
.enter_call
.analyse_declarations(env
)
5989 self
.body
.analyse_declarations(env
)
5991 def analyse_expressions(self
, env
):
5992 self
.manager
= self
.manager
.analyse_types(env
)
5993 self
.enter_call
= self
.enter_call
.analyse_types(env
)
5994 self
.body
= self
.body
.analyse_expressions(env
)
5997 def generate_function_definitions(self
, env
, code
):
5998 self
.manager
.generate_function_definitions(env
, code
)
5999 self
.enter_call
.generate_function_definitions(env
, code
)
6000 self
.body
.generate_function_definitions(env
, code
)
6002 def generate_execution_code(self
, code
):
6003 code
.putln("/*with:*/ {")
6004 self
.manager
.generate_evaluation_code(code
)
6005 self
.exit_var
= code
.funcstate
.allocate_temp(py_object_type
, manage_ref
=False)
6006 code
.globalstate
.use_utility_code(
6007 UtilityCode
.load_cached("PyObjectLookupSpecial", "ObjectHandling.c"))
6008 code
.putln("%s = __Pyx_PyObject_LookupSpecial(%s, %s); %s" % (
6010 self
.manager
.py_result(),
6011 code
.intern_identifier(EncodedString('__exit__')),
6012 code
.error_goto_if_null(self
.exit_var
, self
.pos
),
6014 code
.put_gotref(self
.exit_var
)
6016 # need to free exit_var in the face of exceptions during setup
6017 old_error_label
= code
.new_error_label()
6018 intermediate_error_label
= code
.error_label
6020 self
.enter_call
.generate_evaluation_code(code
)
6022 self
.enter_call
.generate_disposal_code(code
)
6023 self
.enter_call
.free_temps(code
)
6025 # Otherwise, the node will be cleaned up by the
6026 # WithTargetAssignmentStatNode after assigning its result
6027 # to the target of the 'with' statement.
6029 self
.manager
.generate_disposal_code(code
)
6030 self
.manager
.free_temps(code
)
6032 code
.error_label
= old_error_label
6033 self
.body
.generate_execution_code(code
)
6035 if code
.label_used(intermediate_error_label
):
6036 step_over_label
= code
.new_label()
6037 code
.put_goto(step_over_label
)
6038 code
.put_label(intermediate_error_label
)
6039 code
.put_decref_clear(self
.exit_var
, py_object_type
)
6040 code
.put_goto(old_error_label
)
6041 code
.put_label(step_over_label
)
6043 code
.funcstate
.release_temp(self
.exit_var
)
6046 class WithTargetAssignmentStatNode(AssignmentNode
):
6047 # The target assignment of the 'with' statement value (return
6048 # value of the __enter__() call).
6050 # This is a special cased assignment that steals the RHS reference
6051 # and frees its temp.
6053 # lhs ExprNode the assignment target
6054 # rhs CloneNode a (coerced) CloneNode for the orig_rhs (not owned by this node)
6055 # orig_rhs ExprNode the original ExprNode of the rhs. this node will clean up the
6056 # temps of the orig_rhs. basically, it takes ownership of the node
6057 # when the WithStatNode is done with it.
6059 child_attrs
= ["lhs"]
6061 def analyse_declarations(self
, env
):
6062 self
.lhs
.analyse_target_declaration(env
)
6064 def analyse_expressions(self
, env
):
6065 self
.rhs
= self
.rhs
.analyse_types(env
)
6066 self
.lhs
= self
.lhs
.analyse_target_types(env
)
6067 self
.lhs
.gil_assignment_check(env
)
6068 self
.rhs
= self
.rhs
.coerce_to(self
.lhs
.type, env
)
6071 def generate_execution_code(self
, code
):
6072 if self
.orig_rhs
.type.is_pyobject
:
6073 # make sure rhs gets freed on errors, see below
6074 old_error_label
= code
.new_error_label()
6075 intermediate_error_label
= code
.error_label
6077 self
.rhs
.generate_evaluation_code(code
)
6078 self
.lhs
.generate_assignment_code(self
.rhs
, code
)
6080 if self
.orig_rhs
.type.is_pyobject
:
6081 self
.orig_rhs
.generate_disposal_code(code
)
6082 code
.error_label
= old_error_label
6083 if code
.label_used(intermediate_error_label
):
6084 step_over_label
= code
.new_label()
6085 code
.put_goto(step_over_label
)
6086 code
.put_label(intermediate_error_label
)
6087 self
.orig_rhs
.generate_disposal_code(code
)
6088 code
.put_goto(old_error_label
)
6089 code
.put_label(step_over_label
)
6091 self
.orig_rhs
.free_temps(code
)
6093 def annotate(self
, code
):
6094 self
.lhs
.annotate(code
)
6095 self
.rhs
.annotate(code
)
6098 class TryExceptStatNode(StatNode
):
6099 # try .. except statement
6102 # except_clauses [ExceptClauseNode]
6103 # else_clause StatNode or None
6105 child_attrs
= ["body", "except_clauses", "else_clause"]
6107 def analyse_declarations(self
, env
):
6108 self
.body
.analyse_declarations(env
)
6109 for except_clause
in self
.except_clauses
:
6110 except_clause
.analyse_declarations(env
)
6111 if self
.else_clause
:
6112 self
.else_clause
.analyse_declarations(env
)
6114 def analyse_expressions(self
, env
):
6115 self
.body
= self
.body
.analyse_expressions(env
)
6116 default_clause_seen
= 0
6117 for i
, except_clause
in enumerate(self
.except_clauses
):
6118 except_clause
= self
.except_clauses
[i
] = except_clause
.analyse_expressions(env
)
6119 if default_clause_seen
:
6120 error(except_clause
.pos
, "default 'except:' must be last")
6121 if not except_clause
.pattern
:
6122 default_clause_seen
= 1
6123 self
.has_default_clause
= default_clause_seen
6124 if self
.else_clause
:
6125 self
.else_clause
= self
.else_clause
.analyse_expressions(env
)
6128 nogil_check
= Node
.gil_error
6129 gil_message
= "Try-except statement"
6131 def generate_execution_code(self
, code
):
6132 old_return_label
= code
.return_label
6133 old_break_label
= code
.break_label
6134 old_continue_label
= code
.continue_label
6135 old_error_label
= code
.new_error_label()
6136 our_error_label
= code
.error_label
6137 except_end_label
= code
.new_label('exception_handled')
6138 except_error_label
= code
.new_label('except_error')
6139 except_return_label
= code
.new_label('except_return')
6140 try_return_label
= code
.new_label('try_return')
6141 try_break_label
= code
.new_label('try_break')
6142 try_continue_label
= code
.new_label('try_continue')
6143 try_end_label
= code
.new_label('try_end')
6145 exc_save_vars
= [code
.funcstate
.allocate_temp(py_object_type
, False)
6148 save_exc
= code
.insertion_point()
6151 code
.return_label
= try_return_label
6152 code
.break_label
= try_break_label
6153 code
.continue_label
= try_continue_label
6154 self
.body
.generate_execution_code(code
)
6157 temps_to_clean_up
= code
.funcstate
.all_free_managed_temps()
6158 can_raise
= code
.label_used(our_error_label
)
6161 # inject code before the try block to save away the exception state
6162 code
.globalstate
.use_utility_code(reset_exception_utility_code
)
6163 save_exc
.putln("__Pyx_ExceptionSave(%s);" %
6164 ', '.join(['&%s' % var
for var
in exc_save_vars
]))
6165 for var
in exc_save_vars
:
6166 save_exc
.put_xgotref(var
)
6168 def restore_saved_exception():
6169 for name
in exc_save_vars
:
6170 code
.put_xgiveref(name
)
6171 code
.putln("__Pyx_ExceptionReset(%s);" %
6172 ', '.join(exc_save_vars
))
6174 # try block cannot raise exceptions, but we had to allocate the temps above,
6175 # so just keep the C compiler from complaining about them being unused
6176 save_exc
.putln("if (%s); else {/*mark used*/};" % '||'.join(exc_save_vars
))
6178 def restore_saved_exception():
6181 code
.error_label
= except_error_label
6182 code
.return_label
= except_return_label
6183 if self
.else_clause
:
6186 self
.else_clause
.generate_execution_code(code
)
6191 for var
in exc_save_vars
:
6192 code
.put_xdecref_clear(var
, py_object_type
)
6193 code
.put_goto(try_end_label
)
6194 code
.put_label(our_error_label
)
6195 for temp_name
, temp_type
in temps_to_clean_up
:
6196 code
.put_xdecref_clear(temp_name
, temp_type
)
6197 for except_clause
in self
.except_clauses
:
6198 except_clause
.generate_handling_code(code
, except_end_label
)
6199 if not self
.has_default_clause
:
6200 code
.put_goto(except_error_label
)
6202 for exit_label
, old_label
in [(except_error_label
, old_error_label
),
6203 (try_break_label
, old_break_label
),
6204 (try_continue_label
, old_continue_label
),
6205 (try_return_label
, old_return_label
),
6206 (except_return_label
, old_return_label
)]:
6207 if code
.label_used(exit_label
):
6208 if not code
.label_used(try_end_label
):
6209 code
.put_goto(try_end_label
)
6210 code
.put_label(exit_label
)
6211 restore_saved_exception()
6212 code
.put_goto(old_label
)
6214 if code
.label_used(except_end_label
):
6215 if not code
.label_used(try_end_label
):
6216 code
.put_goto(try_end_label
)
6217 code
.put_label(except_end_label
)
6218 restore_saved_exception()
6219 if code
.label_used(try_end_label
):
6220 code
.put_label(try_end_label
)
6223 for cname
in exc_save_vars
:
6224 code
.funcstate
.release_temp(cname
)
6226 code
.return_label
= old_return_label
6227 code
.break_label
= old_break_label
6228 code
.continue_label
= old_continue_label
6229 code
.error_label
= old_error_label
6231 def generate_function_definitions(self
, env
, code
):
6232 self
.body
.generate_function_definitions(env
, code
)
6233 for except_clause
in self
.except_clauses
:
6234 except_clause
.generate_function_definitions(env
, code
)
6235 if self
.else_clause
is not None:
6236 self
.else_clause
.generate_function_definitions(env
, code
)
6238 def annotate(self
, code
):
6239 self
.body
.annotate(code
)
6240 for except_node
in self
.except_clauses
:
6241 except_node
.annotate(code
)
6242 if self
.else_clause
:
6243 self
.else_clause
.annotate(code
)
6246 class ExceptClauseNode(Node
):
6247 # Part of try ... except statement.
6249 # pattern [ExprNode]
6250 # target ExprNode or None
6252 # excinfo_target TupleNode(3*ResultRefNode) or None optional target for exception info (not owned here!)
6253 # match_flag string result of exception match
6254 # exc_value ExcValueNode used internally
6255 # function_name string qualified name of enclosing function
6256 # exc_vars (string * 3) local exception variables
6257 # is_except_as bool Py3-style "except ... as xyz"
6259 # excinfo_target is never set by the parser, but can be set by a transform
6260 # in order to extract more extensive information about the exception as a
6261 # sys.exc_info()-style tuple into a target variable
6263 child_attrs
= ["pattern", "target", "body", "exc_value"]
6266 excinfo_target
= None
6267 is_except_as
= False
6269 def analyse_declarations(self
, env
):
6271 self
.target
.analyse_target_declaration(env
)
6272 self
.body
.analyse_declarations(env
)
6274 def analyse_expressions(self
, env
):
6275 self
.function_name
= env
.qualified_name
6277 # normalise/unpack self.pattern into a list
6278 for i
, pattern
in enumerate(self
.pattern
):
6279 pattern
= pattern
.analyse_expressions(env
)
6280 self
.pattern
[i
] = pattern
.coerce_to_pyobject(env
)
6284 self
.exc_value
= ExprNodes
.ExcValueNode(self
.pos
)
6285 self
.target
= self
.target
.analyse_target_expression(env
, self
.exc_value
)
6287 self
.body
= self
.body
.analyse_expressions(env
)
6290 def generate_handling_code(self
, code
, end_label
):
6291 code
.mark_pos(self
.pos
)
6294 for pattern
in self
.pattern
:
6295 pattern
.generate_evaluation_code(code
)
6296 exc_tests
.append("PyErr_ExceptionMatches(%s)" % pattern
.py_result())
6298 match_flag
= code
.funcstate
.allocate_temp(PyrexTypes
.c_int_type
, False)
6300 "%s = %s;" % (match_flag
, ' || '.join(exc_tests
)))
6301 for pattern
in self
.pattern
:
6302 pattern
.generate_disposal_code(code
)
6303 pattern
.free_temps(code
)
6307 code
.funcstate
.release_temp(match_flag
)
6309 code
.putln("/*except:*/ {")
6311 if (not getattr(self
.body
, 'stats', True)
6312 and self
.excinfo_target
is None
6313 and self
.target
is None):
6314 # most simple case: no exception variable, empty body (pass)
6315 # => reset the exception state, done
6316 code
.putln("PyErr_Restore(0,0,0);")
6317 code
.put_goto(end_label
)
6321 exc_vars
= [code
.funcstate
.allocate_temp(py_object_type
,
6324 code
.put_add_traceback(self
.function_name
)
6325 # We always have to fetch the exception value even if
6326 # there is no target, because this also normalises the
6327 # exception and stores it in the thread state.
6328 code
.globalstate
.use_utility_code(get_exception_utility_code
)
6329 exc_args
= "&%s, &%s, &%s" % tuple(exc_vars
)
6330 code
.putln("if (__Pyx_GetException(%s) < 0) %s" % (exc_args
,
6331 code
.error_goto(self
.pos
)))
6335 self
.exc_value
.set_var(exc_vars
[1])
6336 self
.exc_value
.generate_evaluation_code(code
)
6337 self
.target
.generate_assignment_code(self
.exc_value
, code
)
6338 if self
.excinfo_target
is not None:
6339 for tempvar
, node
in zip(exc_vars
, self
.excinfo_target
.args
):
6340 node
.set_var(tempvar
)
6342 old_break_label
, old_continue_label
= code
.break_label
, code
.continue_label
6343 code
.break_label
= code
.new_label('except_break')
6344 code
.continue_label
= code
.new_label('except_continue')
6346 old_exc_vars
= code
.funcstate
.exc_vars
6347 code
.funcstate
.exc_vars
= exc_vars
6348 self
.body
.generate_execution_code(code
)
6349 code
.funcstate
.exc_vars
= old_exc_vars
6350 for var
in exc_vars
:
6351 code
.put_decref_clear(var
, py_object_type
)
6352 code
.put_goto(end_label
)
6354 for new_label
, old_label
in [(code
.break_label
, old_break_label
),
6355 (code
.continue_label
, old_continue_label
)]:
6356 if code
.label_used(new_label
):
6357 code
.put_label(new_label
)
6358 for var
in exc_vars
:
6359 code
.put_decref_clear(var
, py_object_type
)
6360 code
.put_goto(old_label
)
6361 code
.break_label
= old_break_label
6362 code
.continue_label
= old_continue_label
6364 for temp
in exc_vars
:
6365 code
.funcstate
.release_temp(temp
)
6370 def generate_function_definitions(self
, env
, code
):
6371 if self
.target
is not None:
6372 self
.target
.generate_function_definitions(env
, code
)
6373 self
.body
.generate_function_definitions(env
, code
)
6375 def annotate(self
, code
):
6377 for pattern
in self
.pattern
:
6378 pattern
.annotate(code
)
6380 self
.target
.annotate(code
)
6381 self
.body
.annotate(code
)
6384 class TryFinallyStatNode(StatNode
):
6385 # try ... finally statement
6388 # finally_clause StatNode
6390 # The plan is that we funnel all continue, break
6391 # return and error gotos into the beginning of the
6392 # finally block, setting a variable to remember which
6393 # one we're doing. At the end of the finally block, we
6394 # switch on the variable to figure out where to go.
6395 # In addition, if we're doing an error, we save the
6396 # exception on entry to the finally block and restore
6399 child_attrs
= ["body", "finally_clause"]
6401 preserve_exception
= 1
6403 # handle exception case, in addition to return/break/continue
6404 handle_error_case
= True
6405 func_return_type
= None
6407 disallow_continue_in_try_finally
= 0
6408 # There doesn't seem to be any point in disallowing
6409 # continue in the try block, since we have no problem
6412 is_try_finally_in_nogil
= False
6414 def create_analysed(pos
, env
, body
, finally_clause
):
6415 node
= TryFinallyStatNode(pos
, body
=body
, finally_clause
=finally_clause
)
6417 create_analysed
= staticmethod(create_analysed
)
6419 def analyse_declarations(self
, env
):
6420 self
.body
.analyse_declarations(env
)
6421 self
.finally_clause
.analyse_declarations(env
)
6423 def analyse_expressions(self
, env
):
6424 self
.body
= self
.body
.analyse_expressions(env
)
6425 self
.finally_clause
= self
.finally_clause
.analyse_expressions(env
)
6426 if env
.return_type
and not env
.return_type
.is_void
:
6427 self
.func_return_type
= env
.return_type
6430 nogil_check
= Node
.gil_error
6431 gil_message
= "Try-finally statement"
6433 def generate_execution_code(self
, code
):
6434 old_error_label
= code
.error_label
6435 old_labels
= code
.all_new_labels()
6436 new_labels
= code
.get_all_labels()
6437 new_error_label
= code
.error_label
6438 if not self
.handle_error_case
:
6439 code
.error_label
= old_error_label
6440 catch_label
= code
.new_label()
6442 code
.putln("/*try:*/ {")
6444 if self
.disallow_continue_in_try_finally
:
6445 was_in_try_finally
= code
.funcstate
.in_try_finally
6446 code
.funcstate
.in_try_finally
= 1
6448 self
.body
.generate_execution_code(code
)
6450 if self
.disallow_continue_in_try_finally
:
6451 code
.funcstate
.in_try_finally
= was_in_try_finally
6454 code
.set_all_labels(old_labels
)
6456 temps_to_clean_up
= code
.funcstate
.all_free_managed_temps()
6457 code
.mark_pos(self
.finally_clause
.pos
)
6458 code
.putln("/*finally:*/ {")
6460 def fresh_finally_clause(_next
=[self
.finally_clause
]):
6461 # generate the original subtree once and always keep a fresh copy
6463 node_copy
= copy
.deepcopy(node
)
6464 if node
is self
.finally_clause
:
6465 _next
[0] = node_copy
6470 preserve_error
= self
.preserve_exception
and code
.label_used(new_error_label
)
6471 needs_success_cleanup
= not self
.finally_clause
.is_terminator
6473 if not self
.body
.is_terminator
:
6474 code
.putln('/*normal exit:*/{')
6475 fresh_finally_clause().generate_execution_code(code
)
6476 if not self
.finally_clause
.is_terminator
:
6477 code
.put_goto(catch_label
)
6481 code
.putln('/*exception exit:*/{')
6482 if self
.is_try_finally_in_nogil
:
6483 code
.declare_gilstate()
6484 if needs_success_cleanup
:
6485 exc_lineno_cnames
= tuple([
6486 code
.funcstate
.allocate_temp(PyrexTypes
.c_int_type
, manage_ref
=False)
6488 exc_filename_cname
= code
.funcstate
.allocate_temp(
6489 PyrexTypes
.CPtrType(PyrexTypes
.c_const_type(PyrexTypes
.c_char_type
)),
6492 exc_lineno_cnames
= exc_filename_cname
= None
6494 code
.funcstate
.allocate_temp(py_object_type
, manage_ref
=False)
6496 code
.put_label(new_error_label
)
6497 self
.put_error_catcher(
6498 code
, temps_to_clean_up
, exc_vars
, exc_lineno_cnames
, exc_filename_cname
)
6499 finally_old_labels
= code
.all_new_labels()
6502 old_exc_vars
= code
.funcstate
.exc_vars
6503 code
.funcstate
.exc_vars
= exc_vars
[:3]
6504 fresh_finally_clause().generate_execution_code(code
)
6505 code
.funcstate
.exc_vars
= old_exc_vars
6508 if needs_success_cleanup
:
6509 self
.put_error_uncatcher(code
, exc_vars
, exc_lineno_cnames
, exc_filename_cname
)
6510 if exc_lineno_cnames
:
6511 for cname
in exc_lineno_cnames
:
6512 code
.funcstate
.release_temp(cname
)
6513 if exc_filename_cname
:
6514 code
.funcstate
.release_temp(exc_filename_cname
)
6515 code
.put_goto(old_error_label
)
6517 for new_label
, old_label
in zip(code
.get_all_labels(), finally_old_labels
):
6518 if not code
.label_used(new_label
):
6520 code
.put_label(new_label
)
6521 self
.put_error_cleaner(code
, exc_vars
)
6522 code
.put_goto(old_label
)
6524 for cname
in exc_vars
:
6525 code
.funcstate
.release_temp(cname
)
6528 code
.set_all_labels(old_labels
)
6529 return_label
= code
.return_label
6530 for i
, (new_label
, old_label
) in enumerate(zip(new_labels
, old_labels
)):
6531 if not code
.label_used(new_label
):
6533 if new_label
== new_error_label
and preserve_error
:
6534 continue # handled above
6536 code
.put('%s: ' % new_label
)
6539 if old_label
== return_label
and not self
.finally_clause
.is_terminator
:
6540 # store away return value for later reuse
6541 if (self
.func_return_type
and
6542 not self
.is_try_finally_in_nogil
and
6543 not isinstance(self
.finally_clause
, GILExitNode
)):
6544 ret_temp
= code
.funcstate
.allocate_temp(
6545 self
.func_return_type
, manage_ref
=False)
6546 code
.putln("%s = %s;" % (ret_temp
, Naming
.retval_cname
))
6547 if self
.func_return_type
.is_pyobject
:
6548 code
.putln("%s = 0;" % Naming
.retval_cname
)
6549 fresh_finally_clause().generate_execution_code(code
)
6551 code
.putln("%s = %s;" % (Naming
.retval_cname
, ret_temp
))
6552 if self
.func_return_type
.is_pyobject
:
6553 code
.putln("%s = 0;" % ret_temp
)
6554 code
.funcstate
.release_temp(ret_temp
)
6556 if not self
.finally_clause
.is_terminator
:
6557 code
.put_goto(old_label
)
6561 code
.put_label(catch_label
)
6565 def generate_function_definitions(self
, env
, code
):
6566 self
.body
.generate_function_definitions(env
, code
)
6567 self
.finally_clause
.generate_function_definitions(env
, code
)
6569 def put_error_catcher(self
, code
, temps_to_clean_up
, exc_vars
,
6570 exc_lineno_cnames
, exc_filename_cname
):
6571 code
.globalstate
.use_utility_code(restore_exception_utility_code
)
6572 code
.globalstate
.use_utility_code(get_exception_utility_code
)
6573 code
.globalstate
.use_utility_code(swap_exception_utility_code
)
6575 code
.putln(' '.join(["%s = 0;"]*len(exc_vars
)) % exc_vars
)
6576 if self
.is_try_finally_in_nogil
:
6577 code
.put_ensure_gil(declare_gilstate
=False)
6579 for temp_name
, type in temps_to_clean_up
:
6580 code
.put_xdecref_clear(temp_name
, type)
6582 # not using preprocessor here to avoid warnings about
6583 # unused utility functions and/or temps
6584 code
.putln("if (PY_MAJOR_VERSION >= 3)"
6585 " __Pyx_ExceptionSwap(&%s, &%s, &%s);" % exc_vars
[3:])
6586 code
.putln("if ((PY_MAJOR_VERSION < 3) ||"
6587 # if __Pyx_GetException() fails in Py3,
6588 # store the newly raised exception instead
6589 " unlikely(__Pyx_GetException(&%s, &%s, &%s) < 0)) "
6590 "__Pyx_ErrFetch(&%s, &%s, &%s);" % (exc_vars
[:3] * 2))
6591 for var
in exc_vars
:
6592 code
.put_xgotref(var
)
6593 if exc_lineno_cnames
:
6594 code
.putln("%s = %s; %s = %s; %s = %s;" % (
6595 exc_lineno_cnames
[0], Naming
.lineno_cname
,
6596 exc_lineno_cnames
[1], Naming
.clineno_cname
,
6597 exc_filename_cname
, Naming
.filename_cname
))
6599 if self
.is_try_finally_in_nogil
:
6600 code
.put_release_ensured_gil()
6602 def put_error_uncatcher(self
, code
, exc_vars
, exc_lineno_cnames
, exc_filename_cname
):
6603 code
.globalstate
.use_utility_code(restore_exception_utility_code
)
6604 code
.globalstate
.use_utility_code(reset_exception_utility_code
)
6606 if self
.is_try_finally_in_nogil
:
6607 code
.put_ensure_gil(declare_gilstate
=False)
6609 # not using preprocessor here to avoid warnings about
6610 # unused utility functions and/or temps
6611 code
.putln("if (PY_MAJOR_VERSION >= 3) {")
6612 for var
in exc_vars
[3:]:
6613 code
.put_xgiveref(var
)
6614 code
.putln("__Pyx_ExceptionReset(%s, %s, %s);" % exc_vars
[3:])
6616 for var
in exc_vars
[:3]:
6617 code
.put_xgiveref(var
)
6618 code
.putln("__Pyx_ErrRestore(%s, %s, %s);" % exc_vars
[:3])
6620 if self
.is_try_finally_in_nogil
:
6621 code
.put_release_ensured_gil()
6623 code
.putln(' '.join(["%s = 0;"]*len(exc_vars
)) % exc_vars
)
6624 if exc_lineno_cnames
:
6625 code
.putln("%s = %s; %s = %s; %s = %s;" % (
6626 Naming
.lineno_cname
, exc_lineno_cnames
[0],
6627 Naming
.clineno_cname
, exc_lineno_cnames
[1],
6628 Naming
.filename_cname
, exc_filename_cname
))
6630 def put_error_cleaner(self
, code
, exc_vars
):
6631 code
.globalstate
.use_utility_code(reset_exception_utility_code
)
6632 if self
.is_try_finally_in_nogil
:
6633 code
.put_ensure_gil(declare_gilstate
=False)
6634 # not using preprocessor here to avoid warnings about
6635 # unused utility functions and/or temps
6636 code
.putln("if (PY_MAJOR_VERSION >= 3) {")
6637 for var
in exc_vars
[3:]:
6638 code
.put_xgiveref(var
)
6639 code
.putln("__Pyx_ExceptionReset(%s, %s, %s);" % exc_vars
[3:])
6641 for var
in exc_vars
[:3]:
6642 code
.put_xdecref_clear(var
, py_object_type
)
6643 if self
.is_try_finally_in_nogil
:
6644 code
.put_release_ensured_gil()
6645 code
.putln(' '.join(["%s = 0;"]*3) % exc_vars
[3:])
6647 def annotate(self
, code
):
6648 self
.body
.annotate(code
)
6649 self
.finally_clause
.annotate(code
)
6652 class NogilTryFinallyStatNode(TryFinallyStatNode
):
6654 A try/finally statement that may be used in nogil code sections.
6657 preserve_exception
= False
6661 class GILStatNode(NogilTryFinallyStatNode
):
6662 # 'with gil' or 'with nogil' statement
6664 # state string 'gil' or 'nogil'
6668 def __init__(self
, pos
, state
, body
):
6670 self
.create_state_temp_if_needed(pos
, state
, body
)
6671 TryFinallyStatNode
.__init
__(self
, pos
,
6673 finally_clause
=GILExitNode(
6674 pos
, state
=state
, state_temp
=self
.state_temp
))
6676 def create_state_temp_if_needed(self
, pos
, state
, body
):
6677 from ParseTreeTransforms
import YieldNodeCollector
6678 collector
= YieldNodeCollector()
6679 collector
.visitchildren(body
)
6680 if not collector
.yields
:
6684 temp_type
= PyrexTypes
.c_gilstate_type
6686 temp_type
= PyrexTypes
.c_threadstate_ptr_type
6688 self
.state_temp
= ExprNodes
.TempNode(pos
, temp_type
)
6690 def analyse_declarations(self
, env
):
6691 env
._in
_with
_gil
_block
= (self
.state
== 'gil')
6692 if self
.state
== 'gil':
6693 env
.has_with_gil_block
= True
6695 return super(GILStatNode
, self
).analyse_declarations(env
)
6697 def analyse_expressions(self
, env
):
6698 env
.use_utility_code(
6699 UtilityCode
.load_cached("ForceInitThreads", "ModuleSetupCode.c"))
6700 was_nogil
= env
.nogil
6701 env
.nogil
= self
.state
== 'nogil'
6702 node
= TryFinallyStatNode
.analyse_expressions(self
, env
)
6703 env
.nogil
= was_nogil
6706 def generate_execution_code(self
, code
):
6707 code
.mark_pos(self
.pos
)
6710 self
.state_temp
.allocate(code
)
6711 variable
= self
.state_temp
.result()
6715 old_trace_config
= code
.funcstate
.can_trace
6716 if self
.state
== 'gil':
6717 code
.put_ensure_gil(variable
=variable
)
6718 # FIXME: not that easy, tracing may not be possible at all here
6719 #code.funcstate.can_trace = True
6721 code
.put_release_gil(variable
=variable
)
6722 code
.funcstate
.can_trace
= False
6724 TryFinallyStatNode
.generate_execution_code(self
, code
)
6727 self
.state_temp
.release(code
)
6729 code
.funcstate
.can_trace
= old_trace_config
6733 class GILExitNode(StatNode
):
6735 Used as the 'finally' block in a GILStatNode
6737 state string 'gil' or 'nogil'
6743 def analyse_expressions(self
, env
):
6746 def generate_execution_code(self
, code
):
6748 variable
= self
.state_temp
.result()
6752 if self
.state
== 'gil':
6753 code
.put_release_ensured_gil(variable
)
6755 code
.put_acquire_gil(variable
)
6758 class EnsureGILNode(GILExitNode
):
6760 Ensure the GIL in nogil functions for cleanup before returning.
6763 def generate_execution_code(self
, code
):
6764 code
.put_ensure_gil(declare_gilstate
=False)
6766 utility_code_for_cimports
= {
6767 # utility code (or inlining c) in a pxd (or pyx) file.
6768 # TODO: Consider a generic user-level mechanism for importing
6769 'cpython.array' : ("ArrayAPI", "arrayarray.h"),
6770 'cpython.array.array' : ("ArrayAPI", "arrayarray.h"),
6773 class CImportStatNode(StatNode
):
6776 # module_name string Qualified name of module being imported
6777 # as_name string or None Name specified in "as" clause, if any
6781 def analyse_declarations(self
, env
):
6782 if not env
.is_module_scope
:
6783 error(self
.pos
, "cimport only allowed at module level")
6785 module_scope
= env
.find_module(self
.module_name
, self
.pos
)
6786 if "." in self
.module_name
:
6787 names
= [EncodedString(name
) for name
in self
.module_name
.split(".")]
6789 top_module_scope
= env
.context
.find_submodule(top_name
)
6790 module_scope
= top_module_scope
6791 for name
in names
[1:]:
6792 submodule_scope
= module_scope
.find_submodule(name
)
6793 module_scope
.declare_module(name
, submodule_scope
, self
.pos
)
6794 module_scope
= submodule_scope
6796 env
.declare_module(self
.as_name
, module_scope
, self
.pos
)
6798 env
.add_imported_module(module_scope
)
6799 env
.declare_module(top_name
, top_module_scope
, self
.pos
)
6801 name
= self
.as_name
or self
.module_name
6802 env
.declare_module(name
, module_scope
, self
.pos
)
6803 if self
.module_name
in utility_code_for_cimports
:
6804 env
.use_utility_code(UtilityCode
.load_cached(
6805 *utility_code_for_cimports
[self
.module_name
]))
6807 def analyse_expressions(self
, env
):
6810 def generate_execution_code(self
, code
):
6814 class FromCImportStatNode(StatNode
):
6815 # from ... cimport statement
6817 # module_name string Qualified name of module
6818 # imported_names [(pos, name, as_name, kind)] Names to be imported
6822 def analyse_declarations(self
, env
):
6823 if not env
.is_module_scope
:
6824 error(self
.pos
, "cimport only allowed at module level")
6826 module_scope
= env
.find_module(self
.module_name
, self
.pos
)
6827 env
.add_imported_module(module_scope
)
6828 for pos
, name
, as_name
, kind
in self
.imported_names
:
6830 for local_name
, entry
in module_scope
.entries
.items():
6831 env
.add_imported_entry(local_name
, entry
, pos
)
6833 entry
= module_scope
.lookup(name
)
6835 if kind
and not self
.declaration_matches(entry
, kind
):
6836 entry
.redeclared(pos
)
6839 if kind
== 'struct' or kind
== 'union':
6840 entry
= module_scope
.declare_struct_or_union(name
,
6841 kind
= kind
, scope
= None, typedef_flag
= 0, pos
= pos
)
6842 elif kind
== 'class':
6843 entry
= module_scope
.declare_c_class(name
, pos
= pos
,
6844 module_name
= self
.module_name
)
6846 submodule_scope
= env
.context
.find_module(name
, relative_to
= module_scope
, pos
= self
.pos
)
6847 if submodule_scope
.parent_module
is module_scope
:
6848 env
.declare_module(as_name
or name
, submodule_scope
, self
.pos
)
6850 error(pos
, "Name '%s' not declared in module '%s'"
6851 % (name
, self
.module_name
))
6854 local_name
= as_name
or name
6855 env
.add_imported_entry(local_name
, entry
, pos
)
6857 if self
.module_name
.startswith('cpython'): # enough for now
6858 if self
.module_name
in utility_code_for_cimports
:
6859 env
.use_utility_code(UtilityCode
.load_cached(
6860 *utility_code_for_cimports
[self
.module_name
]))
6861 for _
, name
, _
, _
in self
.imported_names
:
6862 fqname
= '%s.%s' % (self
.module_name
, name
)
6863 if fqname
in utility_code_for_cimports
:
6864 env
.use_utility_code(UtilityCode
.load_cached(
6865 *utility_code_for_cimports
[fqname
]))
6867 def declaration_matches(self
, entry
, kind
):
6868 if not entry
.is_type
:
6872 if not type.is_extension_type
:
6875 if not type.is_struct_or_union
:
6877 if kind
!= type.kind
:
6881 def analyse_expressions(self
, env
):
6884 def generate_execution_code(self
, code
):
6888 class FromImportStatNode(StatNode
):
6889 # from ... import statement
6892 # items [(string, NameNode)]
6893 # interned_items [(string, NameNode, ExprNode)]
6894 # item PyTempNode used internally
6895 # import_star boolean used internally
6897 child_attrs
= ["module"]
6900 def analyse_declarations(self
, env
):
6901 for name
, target
in self
.items
:
6903 if not env
.is_module_scope
:
6904 error(self
.pos
, "import * only allowed at module level")
6906 env
.has_import_star
= 1
6907 self
.import_star
= 1
6909 target
.analyse_target_declaration(env
)
6911 def analyse_expressions(self
, env
):
6913 self
.module
= self
.module
.analyse_expressions(env
)
6914 self
.item
= ExprNodes
.RawCNameExprNode(self
.pos
, py_object_type
)
6915 self
.interned_items
= []
6916 for name
, target
in self
.items
:
6918 for _
, entry
in env
.entries
.items():
6919 if not entry
.is_type
and entry
.type.is_extension_type
:
6920 env
.use_utility_code(UtilityCode
.load_cached("ExtTypeTest", "ObjectHandling.c"))
6923 entry
= env
.lookup(target
.name
)
6924 # check whether or not entry is already cimported
6925 if (entry
.is_type
and entry
.type.name
== name
6926 and hasattr(entry
.type, 'module_name')):
6927 if entry
.type.module_name
== self
.module
.module_name
.value
:
6928 # cimported with absolute name
6931 # cimported with relative name
6932 module
= env
.find_module(self
.module
.module_name
.value
,
6934 if entry
.type.module_name
== module
.qualified_name
:
6936 except AttributeError:
6938 target
= target
.analyse_target_expression(env
, None) # FIXME?
6939 if target
.type is py_object_type
:
6942 coerced_item
= self
.item
.coerce_to(target
.type, env
)
6943 self
.interned_items
.append((name
, target
, coerced_item
))
6946 def generate_execution_code(self
, code
):
6947 self
.module
.generate_evaluation_code(code
)
6948 if self
.import_star
:
6950 'if (%s(%s) < 0) %s;' % (
6952 self
.module
.py_result(),
6953 code
.error_goto(self
.pos
)))
6954 item_temp
= code
.funcstate
.allocate_temp(py_object_type
, manage_ref
=True)
6955 self
.item
.set_cname(item_temp
)
6956 if self
.interned_items
:
6957 code
.globalstate
.use_utility_code(
6958 UtilityCode
.load_cached("ImportFrom", "ImportExport.c"))
6959 for name
, target
, coerced_item
in self
.interned_items
:
6961 '%s = __Pyx_ImportFrom(%s, %s); %s' % (
6963 self
.module
.py_result(),
6964 code
.intern_identifier(name
),
6965 code
.error_goto_if_null(item_temp
, self
.pos
)))
6966 code
.put_gotref(item_temp
)
6967 if coerced_item
is None:
6968 target
.generate_assignment_code(self
.item
, code
)
6970 coerced_item
.allocate_temp_result(code
)
6971 coerced_item
.generate_result_code(code
)
6972 target
.generate_assignment_code(coerced_item
, code
)
6973 code
.put_decref_clear(item_temp
, py_object_type
)
6974 code
.funcstate
.release_temp(item_temp
)
6975 self
.module
.generate_disposal_code(code
)
6976 self
.module
.free_temps(code
)
6979 class ParallelNode(Node
):
6981 Base class for cython.parallel constructs.
6987 class ParallelStatNode(StatNode
, ParallelNode
):
6989 Base class for 'with cython.parallel.parallel():' and 'for i in prange():'.
6991 assignments { Entry(var) : (var.pos, inplace_operator_or_None) }
6992 assignments to variables in this parallel section
6994 parent parent ParallelStatNode or None
6995 is_parallel indicates whether this node is OpenMP parallel
6996 (true for #pragma omp parallel for and
6997 #pragma omp parallel)
6999 is_parallel is true for:
7001 #pragma omp parallel
7002 #pragma omp parallel for
7004 sections, but NOT for
7008 We need this to determine the sharing attributes.
7010 privatization_insertion_point a code insertion point used to make temps
7011 private (esp. the "nsteps" temp)
7013 args tuple the arguments passed to the parallel construct
7014 kwargs DictNode the keyword arguments passed to the parallel
7015 construct (replaced by its compile time value)
7018 child_attrs
= ['body', 'num_threads']
7023 is_nested_prange
= False
7025 error_label_used
= False
7031 Naming
.parallel_exc_type
,
7032 Naming
.parallel_exc_value
,
7033 Naming
.parallel_exc_tb
,
7036 parallel_pos_info
= (
7037 Naming
.parallel_filename
,
7038 Naming
.parallel_lineno
,
7039 Naming
.parallel_clineno
,
7043 Naming
.filename_cname
,
7044 Naming
.lineno_cname
,
7045 Naming
.clineno_cname
,
7048 critical_section_counter
= 0
7050 def __init__(self
, pos
, **kwargs
):
7051 super(ParallelStatNode
, self
).__init
__(pos
, **kwargs
)
7053 # All assignments in this scope
7054 self
.assignments
= kwargs
.get('assignments') or {}
7056 # All seen closure cnames and their temporary cnames
7057 self
.seen_closure_vars
= set()
7059 # Dict of variables that should be declared (first|last|)private or
7060 # reduction { Entry: (op, lastprivate) }.
7061 # If op is not None, it's a reduction.
7065 self
.assigned_nodes
= []
7067 def analyse_declarations(self
, env
):
7068 self
.body
.analyse_declarations(env
)
7070 self
.num_threads
= None
7073 # Try to find num_threads and chunksize keyword arguments
7075 for dictitem
in self
.kwargs
.key_value_pairs
:
7076 if dictitem
.key
.value
== 'num_threads':
7077 self
.num_threads
= dictitem
.value
7078 elif self
.is_prange
and dictitem
.key
.value
== 'chunksize':
7079 self
.chunksize
= dictitem
.value
7081 pairs
.append(dictitem
)
7083 self
.kwargs
.key_value_pairs
= pairs
7086 self
.kwargs
= self
.kwargs
.compile_time_value(env
)
7087 except Exception, e
:
7088 error(self
.kwargs
.pos
, "Only compile-time values may be "
7089 "supplied as keyword arguments")
7093 for kw
, val
in self
.kwargs
.iteritems():
7094 if kw
not in self
.valid_keyword_arguments
:
7095 error(self
.pos
, "Invalid keyword argument: %s" % kw
)
7097 setattr(self
, kw
, val
)
7099 def analyse_expressions(self
, env
):
7100 if self
.num_threads
:
7101 self
.num_threads
= self
.num_threads
.analyse_expressions(env
)
7104 self
.chunksize
= self
.chunksize
.analyse_expressions(env
)
7106 self
.body
= self
.body
.analyse_expressions(env
)
7107 self
.analyse_sharing_attributes(env
)
7109 if self
.num_threads
is not None:
7110 if (self
.parent
and self
.parent
.num_threads
is not None and not
7111 self
.parent
.is_prange
):
7113 "num_threads already declared in outer section")
7114 elif self
.parent
and not self
.parent
.is_prange
:
7116 "num_threads must be declared in the parent parallel section")
7117 elif (self
.num_threads
.type.is_int
and
7118 self
.num_threads
.is_literal
and
7119 self
.num_threads
.compile_time_value(env
) <= 0):
7121 "argument to num_threads must be greater than 0")
7123 if not self
.num_threads
.is_simple():
7124 self
.num_threads
= self
.num_threads
.coerce_to(
7125 PyrexTypes
.c_int_type
, env
).coerce_to_temp(env
)
7128 def analyse_sharing_attributes(self
, env
):
7130 Analyse the privates for this block and set them in self.privates.
7131 This should be called in a post-order fashion during the
7132 analyse_expressions phase
7134 for entry
, (pos
, op
) in self
.assignments
.iteritems():
7136 if self
.is_prange
and not self
.is_parallel
:
7137 # closely nested prange in a with parallel block, disallow
7138 # assigning to privates in the with parallel block (we
7139 # consider it too implicit and magicky for users)
7140 if entry
in self
.parent
.assignments
:
7142 "Cannot assign to private of outer parallel block")
7145 if not self
.is_prange
and op
:
7146 # Again possible, but considered to magicky
7147 error(pos
, "Reductions not allowed for parallel blocks")
7150 # By default all variables should have the same values as if
7151 # executed sequentially
7153 self
.propagate_var_privatization(entry
, pos
, op
, lastprivate
)
7155 def propagate_var_privatization(self
, entry
, pos
, op
, lastprivate
):
7157 Propagate the sharing attributes of a variable. If the privatization is
7158 determined by a parent scope, done propagate further.
7160 If we are a prange, we propagate our sharing attributes outwards to
7161 other pranges. If we are a prange in parallel block and the parallel
7162 block does not determine the variable private, we propagate to the
7163 parent of the parent. Recursion stops at parallel blocks, as they have
7164 no concept of lastprivate or reduction.
7166 So the following cases propagate:
7168 sum is a reduction for all loops:
7175 sum is a reduction for both loops, local_var is private to the
7176 parallel with block:
7180 local_var = ... # private to the parallel
7184 Nested with parallel blocks are disallowed, because they wouldn't
7185 allow you to propagate lastprivates or reductions:
7187 #pragma omp parallel for lastprivate(i)
7192 #pragma omp parallel private(j, sum)
7195 #pragma omp parallel
7198 #pragma omp for lastprivate(j) reduction(+:sum)
7202 # sum and j are well-defined here
7204 # sum and j are undefined here
7206 # sum and j are undefined here
7208 self
.privates
[entry
] = (op
, lastprivate
)
7210 if entry
.type.is_memoryviewslice
:
7211 error(pos
, "Memoryview slices can only be shared in parallel sections")
7215 if not self
.is_parallel
and entry
not in self
.parent
.assignments
:
7216 # Parent is a parallel with block
7217 parent
= self
.parent
.parent
7219 parent
= self
.parent
7221 # We don't need to propagate privates, only reductions and
7223 if parent
and (op
or lastprivate
):
7224 parent
.propagate_var_privatization(entry
, pos
, op
, lastprivate
)
7226 def _allocate_closure_temp(self
, code
, entry
):
7228 Helper function that allocate a temporary for a closure variable that
7232 return self
.parent
._allocate
_closure
_temp
(code
, entry
)
7234 if entry
.cname
in self
.seen_closure_vars
:
7237 cname
= code
.funcstate
.allocate_temp(entry
.type, True)
7239 # Add both the actual cname and the temp cname, as the actual cname
7240 # will be replaced with the temp cname on the entry
7241 self
.seen_closure_vars
.add(entry
.cname
)
7242 self
.seen_closure_vars
.add(cname
)
7244 self
.modified_entries
.append((entry
, entry
.cname
))
7245 code
.putln("%s = %s;" % (cname
, entry
.cname
))
7248 def initialize_privates_to_nan(self
, code
, exclude
=None):
7251 for entry
, (op
, lastprivate
) in self
.privates
.iteritems():
7252 if not op
and (not exclude
or entry
!= exclude
):
7253 invalid_value
= entry
.type.invalid_value()
7257 code
.putln("/* Initialize private variables to "
7258 "invalid values */")
7260 code
.putln("%s = %s;" % (entry
.cname
,
7261 entry
.type.cast_code(invalid_value
)))
7263 def evaluate_before_block(self
, code
, expr
):
7264 c
= self
.begin_of_parallel_control_block_point_after_decls
7265 # we need to set the owner to ourselves temporarily, as
7266 # allocate_temp may generate a comment in the middle of our pragma
7267 # otherwise when DebugFlags.debug_temp_code_comments is in effect
7268 owner
= c
.funcstate
.owner
7269 c
.funcstate
.owner
= c
7270 expr
.generate_evaluation_code(c
)
7271 c
.funcstate
.owner
= owner
7273 return expr
.result()
7275 def put_num_threads(self
, code
):
7277 Write self.num_threads if set as the num_threads OpenMP directive
7279 if self
.num_threads
is not None:
7280 code
.put(" num_threads(%s)" % self
.evaluate_before_block(code
,
7284 def declare_closure_privates(self
, code
):
7286 If a variable is in a scope object, we need to allocate a temp and
7287 assign the value from the temp to the variable in the scope object
7288 after the parallel section. This kind of copying should be done only
7289 in the outermost parallel section.
7291 self
.modified_entries
= []
7293 for entry
in self
.assignments
:
7294 if entry
.from_closure
or entry
.in_closure
:
7295 self
._allocate
_closure
_temp
(code
, entry
)
7297 def release_closure_privates(self
, code
):
7299 Release any temps used for variables in scope objects. As this is the
7300 outermost parallel block, we don't need to delete the cnames from
7301 self.seen_closure_vars.
7303 for entry
, original_cname
in self
.modified_entries
:
7304 code
.putln("%s = %s;" % (original_cname
, entry
.cname
))
7305 code
.funcstate
.release_temp(entry
.cname
)
7306 entry
.cname
= original_cname
7308 def privatize_temps(self
, code
, exclude_temps
=()):
7310 Make any used temporaries private. Before the relevant code block
7311 code.start_collecting_temps() should have been called.
7313 if self
.is_parallel
:
7314 c
= self
.privatization_insertion_point
7316 self
.temps
= temps
= code
.funcstate
.stop_collecting_temps()
7317 privates
, firstprivates
= [], []
7318 for temp
, type in temps
:
7319 if type.is_pyobject
or type.is_memoryviewslice
:
7320 firstprivates
.append(temp
)
7322 privates
.append(temp
)
7325 c
.put(" private(%s)" % ", ".join(privates
))
7327 c
.put(" firstprivate(%s)" % ", ".join(firstprivates
))
7329 if self
.breaking_label_used
:
7330 shared_vars
= [Naming
.parallel_why
]
7331 if self
.error_label_used
:
7332 shared_vars
.extend(self
.parallel_exc
)
7333 c
.put(" private(%s, %s, %s)" % self
.pos_info
)
7335 c
.put(" shared(%s)" % ', '.join(shared_vars
))
7337 def cleanup_temps(self
, code
):
7338 # Now clean up any memoryview slice and object temporaries
7339 if self
.is_parallel
and not self
.is_nested_prange
:
7340 code
.putln("/* Clean up any temporaries */")
7341 for temp
, type in self
.temps
:
7342 if type.is_memoryviewslice
:
7343 code
.put_xdecref_memoryviewslice(temp
, have_gil
=False)
7344 elif type.is_pyobject
:
7345 code
.put_xdecref(temp
, type)
7346 code
.putln("%s = NULL;" % temp
)
7348 def setup_parallel_control_flow_block(self
, code
):
7350 Sets up a block that surrounds the parallel block to determine
7351 how the parallel section was exited. Any kind of return is
7352 trapped (break, continue, return, exceptions). This is the idea:
7357 #pragma omp parallel
7359 return # -> goto new_return_label;
7367 #pragma omp flush(why) # we need to flush for every iteration
7371 goto old_return_label;
7374 self
.old_loop_labels
= code
.new_loop_labels()
7375 self
.old_error_label
= code
.new_error_label()
7376 self
.old_return_label
= code
.return_label
7377 code
.return_label
= code
.new_label(name
="return")
7379 code
.begin_block() # parallel control flow block
7380 self
.begin_of_parallel_control_block_point
= code
.insertion_point()
7381 self
.begin_of_parallel_control_block_point_after_decls
= code
.insertion_point()
7383 self
.undef_builtin_expect_apple_gcc_bug(code
)
7385 def begin_parallel_block(self
, code
):
7387 Each OpenMP thread in a parallel section that contains a with gil block
7388 must have the thread-state initialized. The call to
7389 PyGILState_Release() then deallocates our threadstate. If we wouldn't
7390 do this, each with gil block would allocate and deallocate one, thereby
7391 losing exception information before it can be saved before leaving the
7394 self
.begin_of_parallel_block
= code
.insertion_point()
7396 def end_parallel_block(self
, code
):
7398 To ensure all OpenMP threads have thread states, we ensure the GIL
7399 in each thread (which creates a thread state if it doesn't exist),
7400 after which we release the GIL.
7401 On exit, reacquire the GIL and release the thread state.
7403 If compiled without OpenMP support (at the C level), then we still have
7404 to acquire the GIL to decref any object temporaries.
7406 if self
.error_label_used
:
7407 begin_code
= self
.begin_of_parallel_block
7410 begin_code
.putln("#ifdef _OPENMP")
7411 begin_code
.put_ensure_gil(declare_gilstate
=True)
7412 begin_code
.putln("Py_BEGIN_ALLOW_THREADS")
7413 begin_code
.putln("#endif /* _OPENMP */")
7415 end_code
.putln("#ifdef _OPENMP")
7416 end_code
.putln("Py_END_ALLOW_THREADS")
7417 end_code
.putln("#else")
7418 end_code
.put_safe("{\n")
7419 end_code
.put_ensure_gil()
7420 end_code
.putln("#endif /* _OPENMP */")
7421 self
.cleanup_temps(end_code
)
7422 end_code
.put_release_ensured_gil()
7423 end_code
.putln("#ifndef _OPENMP")
7424 end_code
.put_safe("}\n")
7425 end_code
.putln("#endif /* _OPENMP */")
7427 def trap_parallel_exit(self
, code
, should_flush
=False):
7429 Trap any kind of return inside a parallel construct. 'should_flush'
7430 indicates whether the variable should be flushed, which is needed by
7431 prange to skip the loop. It also indicates whether we need to register
7432 a continue (we need this for parallel blocks, but not for prange
7433 loops, as it is a direct jump there).
7435 It uses the same mechanism as try/finally:
7441 save_lastprivates_label
= code
.new_label()
7442 dont_return_label
= code
.new_label()
7444 self
.any_label_used
= False
7445 self
.breaking_label_used
= False
7446 self
.error_label_used
= False
7448 self
.parallel_private_temps
= []
7450 all_labels
= code
.get_all_labels()
7452 # Figure this out before starting to generate any code
7453 for label
in all_labels
:
7454 if code
.label_used(label
):
7455 self
.breaking_label_used
= (self
.breaking_label_used
or
7456 label
!= code
.continue_label
)
7457 self
.any_label_used
= True
7459 if self
.any_label_used
:
7460 code
.put_goto(dont_return_label
)
7462 for i
, label
in enumerate(all_labels
):
7463 if not code
.label_used(label
):
7466 is_continue_label
= label
== code
.continue_label
7468 code
.put_label(label
)
7470 if not (should_flush
and is_continue_label
):
7471 if label
== code
.error_label
:
7472 self
.error_label_used
= True
7473 self
.fetch_parallel_exception(code
)
7475 code
.putln("%s = %d;" % (Naming
.parallel_why
, i
+ 1))
7477 if (self
.breaking_label_used
and self
.is_prange
and not
7479 code
.put_goto(save_lastprivates_label
)
7481 code
.put_goto(dont_return_label
)
7483 if self
.any_label_used
:
7484 if self
.is_prange
and self
.breaking_label_used
:
7485 # Don't rely on lastprivate, save our lastprivates
7486 code
.put_label(save_lastprivates_label
)
7487 self
.save_parallel_vars(code
)
7489 code
.put_label(dont_return_label
)
7491 if should_flush
and self
.breaking_label_used
:
7492 code
.putln_openmp("#pragma omp flush(%s)" % Naming
.parallel_why
)
7494 def save_parallel_vars(self
, code
):
7496 The following shenanigans are instated when we break, return or
7497 propagate errors from a prange. In this case we cannot rely on
7498 lastprivate() to do its job, as no iterations may have executed yet
7499 in the last thread, leaving the values undefined. It is most likely
7500 that the breaking thread has well-defined values of the lastprivate
7501 variables, so we keep those values.
7503 section_name
= ("__pyx_parallel_lastprivates%d" %
7504 self
.critical_section_counter
)
7505 code
.putln_openmp("#pragma omp critical(%s)" % section_name
)
7506 ParallelStatNode
.critical_section_counter
+= 1
7508 code
.begin_block() # begin critical section
7510 c
= self
.begin_of_parallel_control_block_point
7513 for entry
, (op
, lastprivate
) in self
.privates
.iteritems():
7514 if not lastprivate
or entry
.type.is_pyobject
:
7517 type_decl
= entry
.type.declaration_code("")
7518 temp_cname
= "__pyx_parallel_temp%d" % temp_count
7519 private_cname
= entry
.cname
7523 invalid_value
= entry
.type.invalid_value()
7525 init
= ' = ' + invalid_value
7528 # Declare the parallel private in the outer block
7529 c
.putln("%s %s%s;" % (type_decl
, temp_cname
, init
))
7531 # Initialize before escaping
7532 code
.putln("%s = %s;" % (temp_cname
, private_cname
))
7534 self
.parallel_private_temps
.append((temp_cname
, private_cname
))
7536 code
.end_block() # end critical section
7538 def fetch_parallel_exception(self
, code
):
7540 As each OpenMP thread may raise an exception, we need to fetch that
7541 exception from the threadstate and save it for after the parallel
7542 section where it can be re-raised in the master thread.
7544 Although it would seem that __pyx_filename, __pyx_lineno and
7545 __pyx_clineno are only assigned to under exception conditions (i.e.,
7546 when we have the GIL), and thus should be allowed to be shared without
7547 any race condition, they are in fact subject to the same race
7548 conditions that they were previously when they were global variables
7549 and functions were allowed to release the GIL:
7563 deallocate threadstate deallocate threadstate
7566 code
.put_ensure_gil(declare_gilstate
=True)
7568 code
.putln_openmp("#pragma omp flush(%s)" % Naming
.parallel_exc_type
)
7570 "if (!%s) {" % Naming
.parallel_exc_type
)
7572 code
.putln("__Pyx_ErrFetch(&%s, &%s, &%s);" % self
.parallel_exc
)
7573 pos_info
= chain(*zip(self
.parallel_pos_info
, self
.pos_info
))
7574 code
.funcstate
.uses_error_indicator
= True
7575 code
.putln("%s = %s; %s = %s; %s = %s;" % tuple(pos_info
))
7576 code
.put_gotref(Naming
.parallel_exc_type
)
7581 code
.put_release_ensured_gil()
7584 def restore_parallel_exception(self
, code
):
7585 "Re-raise a parallel exception"
7587 code
.put_ensure_gil(declare_gilstate
=True)
7589 code
.put_giveref(Naming
.parallel_exc_type
)
7590 code
.putln("__Pyx_ErrRestore(%s, %s, %s);" % self
.parallel_exc
)
7591 pos_info
= chain(*zip(self
.pos_info
, self
.parallel_pos_info
))
7592 code
.putln("%s = %s; %s = %s; %s = %s;" % tuple(pos_info
))
7594 code
.put_release_ensured_gil()
7597 def restore_labels(self
, code
):
7599 Restore all old labels. Call this before the 'else' clause to for
7600 loops and always before ending the parallel control flow block.
7602 code
.set_all_labels(self
.old_loop_labels
+ (self
.old_return_label
,
7603 self
.old_error_label
))
7605 def end_parallel_control_flow_block(self
, code
,
7606 break_
=False, continue_
=False):
7608 This ends the parallel control flow block and based on how the parallel
7609 section was exited, takes the corresponding action. The break_ and
7610 continue_ parameters indicate whether these should be propagated
7613 for i in prange(...):
7614 with cython.parallel.parallel():
7617 Here break should be trapped in the parallel block, and propagated to
7620 c
= self
.begin_of_parallel_control_block_point
7622 # Firstly, always prefer errors over returning, continue or break
7623 if self
.error_label_used
:
7624 c
.putln("const char *%s = NULL; int %s = 0, %s = 0;" %
7625 self
.parallel_pos_info
)
7627 c
.putln("PyObject *%s = NULL, *%s = NULL, *%s = NULL;" %
7631 "if (%s) {" % Naming
.parallel_exc_type
)
7632 code
.putln("/* This may have been overridden by a continue, "
7633 "break or return in another thread. Prefer the error. */")
7634 code
.putln("%s = 4;" % Naming
.parallel_why
)
7639 any_label_used
= self
.any_label_used
7641 any_label_used
= self
.breaking_label_used
7644 # __pyx_parallel_why is used, declare and initialize
7645 c
.putln("int %s;" % Naming
.parallel_why
)
7646 c
.putln("%s = 0;" % Naming
.parallel_why
)
7649 "if (%s) {" % Naming
.parallel_why
)
7651 for temp_cname
, private_cname
in self
.parallel_private_temps
:
7652 code
.putln("%s = %s;" % (private_cname
, temp_cname
))
7654 code
.putln("switch (%s) {" % Naming
.parallel_why
)
7656 code
.put(" case 1: ")
7657 code
.put_goto(code
.continue_label
)
7660 code
.put(" case 2: ")
7661 code
.put_goto(code
.break_label
)
7663 code
.put(" case 3: ")
7664 code
.put_goto(code
.return_label
)
7666 if self
.error_label_used
:
7667 code
.globalstate
.use_utility_code(restore_exception_utility_code
)
7668 code
.putln(" case 4:")
7669 self
.restore_parallel_exception(code
)
7670 code
.put_goto(code
.error_label
)
7672 code
.putln("}") # end switch
7676 code
.end_block() # end parallel control flow block
7677 self
.redef_builtin_expect_apple_gcc_bug(code
)
7679 # FIXME: improve with version number for OS X Lion
7680 buggy_platform_macro_condition
= "(defined(__APPLE__) || defined(__OSX__))"
7681 have_expect_condition
= "(defined(__GNUC__) && " \
7682 "(__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))"
7683 redef_condition
= "(%s && %s)" % (buggy_platform_macro_condition
, have_expect_condition
)
7685 def undef_builtin_expect_apple_gcc_bug(self
, code
):
7687 A bug on OS X Lion disallows __builtin_expect macros. This code avoids them
7690 code
.undef_builtin_expect(self
.redef_condition
)
7692 def redef_builtin_expect_apple_gcc_bug(self
, code
):
7694 code
.redef_builtin_expect(self
.redef_condition
)
7697 class ParallelWithBlockNode(ParallelStatNode
):
7699 This node represents a 'with cython.parallel.parallel():' block
7702 valid_keyword_arguments
= ['num_threads']
7706 def analyse_declarations(self
, env
):
7707 super(ParallelWithBlockNode
, self
).analyse_declarations(env
)
7709 error(self
.pos
, "cython.parallel.parallel() does not take "
7710 "positional arguments")
7712 def generate_execution_code(self
, code
):
7713 self
.declare_closure_privates(code
)
7714 self
.setup_parallel_control_flow_block(code
)
7716 code
.putln("#ifdef _OPENMP")
7717 code
.put("#pragma omp parallel ")
7720 privates
= [e
.cname
for e
in self
.privates
7721 if not e
.type.is_pyobject
]
7722 code
.put('private(%s)' % ', '.join(privates
))
7724 self
.privatization_insertion_point
= code
.insertion_point()
7725 self
.put_num_threads(code
)
7728 code
.putln("#endif /* _OPENMP */")
7730 code
.begin_block() # parallel block
7731 self
.begin_parallel_block(code
)
7732 self
.initialize_privates_to_nan(code
)
7733 code
.funcstate
.start_collecting_temps()
7734 self
.body
.generate_execution_code(code
)
7735 self
.trap_parallel_exit(code
)
7736 self
.privatize_temps(code
)
7737 self
.end_parallel_block(code
)
7738 code
.end_block() # end parallel block
7740 continue_
= code
.label_used(code
.continue_label
)
7741 break_
= code
.label_used(code
.break_label
)
7743 self
.restore_labels(code
)
7744 self
.end_parallel_control_flow_block(code
, break_
=break_
,
7745 continue_
=continue_
)
7746 self
.release_closure_privates(code
)
7749 class ParallelRangeNode(ParallelStatNode
):
7751 This node represents a 'for i in cython.parallel.prange():' construct.
7753 target NameNode the target iteration variable
7754 else_clause Node or None the else clause of this loop
7757 child_attrs
= ['body', 'target', 'else_clause', 'args', 'num_threads',
7760 body
= target
= else_clause
= args
= None
7762 start
= stop
= step
= None
7769 valid_keyword_arguments
= ['schedule', 'nogil', 'num_threads', 'chunksize']
7771 def __init__(self
, pos
, **kwds
):
7772 super(ParallelRangeNode
, self
).__init
__(pos
, **kwds
)
7773 # Pretend to be a ForInStatNode for control flow analysis
7774 self
.iterator
= PassStatNode(pos
)
7776 def analyse_declarations(self
, env
):
7777 super(ParallelRangeNode
, self
).analyse_declarations(env
)
7778 self
.target
.analyse_target_declaration(env
)
7779 if self
.else_clause
is not None:
7780 self
.else_clause
.analyse_declarations(env
)
7782 if not self
.args
or len(self
.args
) > 3:
7783 error(self
.pos
, "Invalid number of positional arguments to prange")
7786 if len(self
.args
) == 1:
7787 self
.stop
, = self
.args
7788 elif len(self
.args
) == 2:
7789 self
.start
, self
.stop
= self
.args
7791 self
.start
, self
.stop
, self
.step
= self
.args
7793 if hasattr(self
.schedule
, 'decode'):
7794 self
.schedule
= self
.schedule
.decode('ascii')
7796 if self
.schedule
not in (None, 'static', 'dynamic', 'guided',
7798 error(self
.pos
, "Invalid schedule argument to prange: %s" %
7801 def analyse_expressions(self
, env
):
7802 was_nogil
= env
.nogil
7806 if self
.target
is None:
7807 error(self
.pos
, "prange() can only be used as part of a for loop")
7810 self
.target
= self
.target
.analyse_target_types(env
)
7812 if not self
.target
.type.is_numeric
:
7813 # Not a valid type, assume one for now anyway
7815 if not self
.target
.type.is_pyobject
:
7816 # nogil_check will catch the is_pyobject case
7817 error(self
.target
.pos
,
7818 "Must be of numeric type, not %s" % self
.target
.type)
7820 self
.index_type
= PyrexTypes
.c_py_ssize_t_type
7822 self
.index_type
= self
.target
.type
7823 if not self
.index_type
.signed
:
7824 warning(self
.target
.pos
,
7825 "Unsigned index type not allowed before OpenMP 3.0",
7828 # Setup start, stop and step, allocating temps if needed
7829 self
.names
= 'start', 'stop', 'step'
7830 start_stop_step
= self
.start
, self
.stop
, self
.step
7832 for node
, name
in zip(start_stop_step
, self
.names
):
7833 if node
is not None:
7834 node
.analyse_types(env
)
7835 if not node
.type.is_numeric
:
7836 error(node
.pos
, "%s argument must be numeric" % name
)
7839 if not node
.is_literal
:
7840 node
= node
.coerce_to_temp(env
)
7841 setattr(self
, name
, node
)
7843 # As we range from 0 to nsteps, computing the index along the
7844 # way, we need a fitting type for 'i' and 'nsteps'
7845 self
.index_type
= PyrexTypes
.widest_numeric_type(
7846 self
.index_type
, node
.type)
7848 if self
.else_clause
is not None:
7849 self
.else_clause
= self
.else_clause
.analyse_expressions(env
)
7851 # Although not actually an assignment in this scope, it should be
7852 # treated as such to ensure it is unpacked if a closure temp, and to
7853 # ensure lastprivate behaviour and propagation. If the target index is
7854 # not a NameNode, it won't have an entry, and an error was issued by
7855 # ParallelRangeTransform
7856 if hasattr(self
.target
, 'entry'):
7857 self
.assignments
[self
.target
.entry
] = self
.target
.pos
, None
7859 node
= super(ParallelRangeNode
, self
).analyse_expressions(env
)
7862 if not node
.schedule
:
7863 error(node
.chunksize
.pos
,
7864 "Must provide schedule with chunksize")
7865 elif node
.schedule
== 'runtime':
7866 error(node
.chunksize
.pos
,
7867 "Chunksize not valid for the schedule runtime")
7868 elif (node
.chunksize
.type.is_int
and
7869 node
.chunksize
.is_literal
and
7870 node
.chunksize
.compile_time_value(env
) <= 0):
7871 error(node
.chunksize
.pos
, "Chunksize must not be negative")
7873 node
.chunksize
= node
.chunksize
.coerce_to(
7874 PyrexTypes
.c_int_type
, env
).coerce_to_temp(env
)
7877 env
.nogil
= was_nogil
7879 node
.is_nested_prange
= node
.parent
and node
.parent
.is_prange
7880 if node
.is_nested_prange
:
7882 while parent
.parent
and parent
.parent
.is_prange
:
7883 parent
= parent
.parent
7885 parent
.assignments
.update(node
.assignments
)
7886 parent
.privates
.update(node
.privates
)
7887 parent
.assigned_nodes
.extend(node
.assigned_nodes
)
7890 def nogil_check(self
, env
):
7891 names
= 'start', 'stop', 'step', 'target'
7892 nodes
= self
.start
, self
.stop
, self
.step
, self
.target
7893 for name
, node
in zip(names
, nodes
):
7894 if node
is not None and node
.type.is_pyobject
:
7895 error(node
.pos
, "%s may not be a Python object "
7896 "as we don't have the GIL" % name
)
7898 def generate_execution_code(self
, code
):
7900 Generate code in the following steps
7902 1) copy any closure variables determined thread-private
7905 2) allocate temps for start, stop and step
7907 3) generate a loop that calculates the total number of steps,
7908 which then computes the target iteration variable for every step:
7910 for i in prange(start, stop, step):
7915 nsteps = (stop - start) / step;
7918 #pragma omp parallel for lastprivate(i)
7919 for (temp = 0; temp < nsteps; temp++) {
7920 i = start + step * temp;
7924 Note that accumulation of 'i' would have a data dependency
7927 Also, you can't do this
7929 for (i = start; i < stop; i += step)
7932 as the '<' operator should become '>' for descending loops.
7933 'for i from x < i < y:' does not suffer from this problem
7934 as the relational operator is known at compile time!
7936 4) release our temps and write back any private closure variables
7938 self
.declare_closure_privates(code
)
7940 # This can only be a NameNode
7941 target_index_cname
= self
.target
.entry
.cname
7943 # This will be used as the dict to format our code strings, holding
7944 # the start, stop , step, temps and target cnames
7946 'target': target_index_cname
,
7949 # Setup start, stop and step, allocating temps if needed
7950 start_stop_step
= self
.start
, self
.stop
, self
.step
7951 defaults
= '0', '0', '1'
7952 for node
, name
, default
in zip(start_stop_step
, self
.names
, defaults
):
7955 elif node
.is_literal
:
7956 result
= node
.get_constant_c_result_code()
7958 node
.generate_evaluation_code(code
)
7959 result
= node
.result()
7961 fmt_dict
[name
] = result
7963 fmt_dict
['i'] = code
.funcstate
.allocate_temp(self
.index_type
, False)
7964 fmt_dict
['nsteps'] = code
.funcstate
.allocate_temp(self
.index_type
, False)
7966 # TODO: check if the step is 0 and if so, raise an exception in a
7967 # 'with gil' block. For now, just abort
7968 code
.putln("if (%(step)s == 0) abort();" % fmt_dict
)
7970 self
.setup_parallel_control_flow_block(code
) # parallel control flow block
7972 self
.control_flow_var_code_point
= code
.insertion_point()
7974 # Note: nsteps is private in an outer scope if present
7975 code
.putln("%(nsteps)s = (%(stop)s - %(start)s) / %(step)s;" % fmt_dict
)
7977 # The target iteration variable might not be initialized, do it only if
7978 # we are executing at least 1 iteration, otherwise we should leave the
7979 # target unaffected. The target iteration variable is firstprivate to
7980 # shut up compiler warnings caused by lastprivate, as the compiler
7981 # erroneously believes that nsteps may be <= 0, leaving the private
7982 # target index uninitialized
7983 code
.putln("if (%(nsteps)s > 0)" % fmt_dict
)
7984 code
.begin_block() # if block
7985 self
.generate_loop(code
, fmt_dict
)
7986 code
.end_block() # end if block
7988 self
.restore_labels(code
)
7990 if self
.else_clause
:
7991 if self
.breaking_label_used
:
7992 code
.put("if (%s < 2)" % Naming
.parallel_why
)
7994 code
.begin_block() # else block
7995 code
.putln("/* else */")
7996 self
.else_clause
.generate_execution_code(code
)
7997 code
.end_block() # end else block
7999 # ------ cleanup ------
8000 self
.end_parallel_control_flow_block(code
) # end parallel control flow block
8002 # And finally, release our privates and write back any closure
8004 for temp
in start_stop_step
:
8005 if temp
is not None:
8006 temp
.generate_disposal_code(code
)
8007 temp
.free_temps(code
)
8009 code
.funcstate
.release_temp(fmt_dict
['i'])
8010 code
.funcstate
.release_temp(fmt_dict
['nsteps'])
8012 self
.release_closure_privates(code
)
8014 def generate_loop(self
, code
, fmt_dict
):
8015 if self
.is_nested_prange
:
8018 code
.putln("#ifdef _OPENMP")
8020 if not self
.is_parallel
:
8021 code
.put("#pragma omp for")
8022 self
.privatization_insertion_point
= code
.insertion_point()
8023 reduction_codepoint
= self
.parent
.privatization_insertion_point
8025 code
.put("#pragma omp parallel")
8026 self
.privatization_insertion_point
= code
.insertion_point()
8027 reduction_codepoint
= self
.privatization_insertion_point
8029 code
.putln("#endif /* _OPENMP */")
8031 code
.begin_block() # pragma omp parallel begin block
8033 # Initialize the GIL if needed for this thread
8034 self
.begin_parallel_block(code
)
8036 if self
.is_nested_prange
:
8039 code
.putln("#ifdef _OPENMP")
8040 code
.put("#pragma omp for")
8042 for entry
, (op
, lastprivate
) in self
.privates
.iteritems():
8043 # Don't declare the index variable as a reduction
8044 if op
and op
in "+*-&^|" and entry
!= self
.target
.entry
:
8045 if entry
.type.is_pyobject
:
8046 error(self
.pos
, "Python objects cannot be reductions")
8048 #code.put(" reduction(%s:%s)" % (op, entry.cname))
8049 # This is the only way reductions + nesting works in gcc4.5
8050 reduction_codepoint
.put(
8051 " reduction(%s:%s)" % (op
, entry
.cname
))
8053 if entry
== self
.target
.entry
:
8054 code
.put(" firstprivate(%s)" % entry
.cname
)
8055 code
.put(" lastprivate(%s)" % entry
.cname
)
8058 if not entry
.type.is_pyobject
:
8060 private
= 'lastprivate'
8064 code
.put(" %s(%s)" % (private
, entry
.cname
))
8068 chunksize
= ", %s" % self
.evaluate_before_block(code
,
8073 code
.put(" schedule(%s%s)" % (self
.schedule
, chunksize
))
8075 self
.put_num_threads(reduction_codepoint
)
8078 code
.putln("#endif /* _OPENMP */")
8080 code
.put("for (%(i)s = 0; %(i)s < %(nsteps)s; %(i)s++)" % fmt_dict
)
8081 code
.begin_block() # for loop block
8083 guard_around_body_codepoint
= code
.insertion_point()
8085 # Start if guard block around the body. This may be unnecessary, but
8086 # at least it doesn't spoil indentation
8089 code
.putln("%(target)s = %(start)s + %(step)s * %(i)s;" % fmt_dict
)
8090 self
.initialize_privates_to_nan(code
, exclude
=self
.target
.entry
)
8092 if self
.is_parallel
:
8093 code
.funcstate
.start_collecting_temps()
8095 self
.body
.generate_execution_code(code
)
8096 self
.trap_parallel_exit(code
, should_flush
=True)
8097 self
.privatize_temps(code
)
8099 if self
.breaking_label_used
:
8100 # Put a guard around the loop body in case return, break or
8101 # exceptions might be used
8102 guard_around_body_codepoint
.putln("if (%s < 2)" % Naming
.parallel_why
)
8104 code
.end_block() # end guard around loop body
8105 code
.end_block() # end for loop block
8107 if self
.is_parallel
:
8108 # Release the GIL and deallocate the thread state
8109 self
.end_parallel_block(code
)
8110 code
.end_block() # pragma omp parallel end block
8113 class CnameDecoratorNode(StatNode
):
8115 This node is for the cname decorator in CythonUtilityCode:
8121 In case of a cdef class the cname specifies the objstruct_cname.
8123 node the node to which the cname decorator is applied
8124 cname the cname the node should get
8127 child_attrs
= ['node']
8129 def analyse_declarations(self
, env
):
8130 self
.node
.analyse_declarations(env
)
8133 if isinstance(node
, CompilerDirectivesNode
):
8134 node
= node
.body
.stats
[0]
8136 self
.is_function
= isinstance(node
, FuncDefNode
)
8137 is_struct_or_enum
= isinstance(node
, (CStructOrUnionDefNode
,
8141 if self
.is_function
:
8142 e
.cname
= self
.cname
8143 e
.func_cname
= self
.cname
8145 if e
.pyfunc_cname
and '.' in e
.pyfunc_cname
:
8146 e
.pyfunc_cname
= self
.mangle(e
.pyfunc_cname
)
8147 elif is_struct_or_enum
:
8148 e
.cname
= e
.type.cname
= self
.cname
8152 e
.cname
= self
.cname
8153 e
.type.objstruct_cname
= self
.cname
+ '_obj'
8154 e
.type.typeobj_cname
= Naming
.typeobj_prefix
+ self
.cname
8155 e
.type.typeptr_cname
= self
.cname
+ '_type'
8156 e
.type.scope
.namespace_cname
= e
.type.typeptr_cname
8158 e
.as_variable
.cname
= py_object_type
.cast_code(e
.type.typeptr_cname
)
8160 scope
.scope_prefix
= self
.cname
+ "_"
8162 for name
, entry
in scope
.entries
.iteritems():
8163 if entry
.func_cname
:
8164 entry
.func_cname
= self
.mangle(entry
.cname
)
8165 if entry
.pyfunc_cname
:
8166 entry
.pyfunc_cname
= self
.mangle(entry
.pyfunc_cname
)
8168 def mangle(self
, cname
):
8170 # remove __pyx_base from func_cname
8171 cname
= cname
.split('.')[-1]
8172 return '%s_%s' % (self
.cname
, cname
)
8174 def analyse_expressions(self
, env
):
8175 self
.node
= self
.node
.analyse_expressions(env
)
8178 def generate_function_definitions(self
, env
, code
):
8179 "Ensure a prototype for every @cname method in the right place"
8180 if self
.is_function
and env
.is_c_class_scope
:
8181 # method in cdef class, generate a prototype in the header
8182 h_code
= code
.globalstate
['utility_code_proto']
8184 if isinstance(self
.node
, DefNode
):
8185 self
.node
.generate_function_header(
8186 h_code
, with_pymethdef
=False, proto_only
=True)
8189 entry
= self
.node
.entry
8191 entry
.cname
= entry
.func_cname
8193 ModuleNode
.generate_cfunction_declaration(
8201 self
.node
.generate_function_definitions(env
, code
)
8203 def generate_execution_code(self
, code
):
8204 self
.node
.generate_execution_code(code
)
8207 #------------------------------------------------------------------------------------
8209 # Runtime support code
8211 #------------------------------------------------------------------------------------
8213 if Options
.gcc_branch_hints
:
8214 branch_prediction_macros
= """
8215 /* Test for GCC > 2.95 */
8216 #if defined(__GNUC__) \
8217 && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95)))
8218 #define likely(x) __builtin_expect(!!(x), 1)
8219 #define unlikely(x) __builtin_expect(!!(x), 0)
8220 #else /* !__GNUC__ or GCC < 2.95 */
8221 #define likely(x) (x)
8222 #define unlikely(x) (x)
8223 #endif /* __GNUC__ */
8226 branch_prediction_macros
= """
8227 #define likely(x) (x)
8228 #define unlikely(x) (x)
8231 #------------------------------------------------------------------------------------
8233 printing_utility_code
= UtilityCode
.load_cached("Print", "Printing.c")
8234 printing_one_utility_code
= UtilityCode
.load_cached("PrintOne", "Printing.c")
8236 #------------------------------------------------------------------------------------
8238 # Exception raising code
8240 # Exceptions are raised by __Pyx_Raise() and stored as plain
8241 # type/value/tb in PyThreadState->curexc_*. When being caught by an
8242 # 'except' statement, curexc_* is moved over to exc_* by
8243 # __Pyx_GetException()
8245 restore_exception_utility_code
= UtilityCode
.load_cached("PyErrFetchRestore", "Exceptions.c")
8246 raise_utility_code
= UtilityCode
.load_cached("RaiseException", "Exceptions.c")
8247 get_exception_utility_code
= UtilityCode
.load_cached("GetException", "Exceptions.c")
8248 swap_exception_utility_code
= UtilityCode
.load_cached("SwapException", "Exceptions.c")
8249 reset_exception_utility_code
= UtilityCode
.load_cached("SaveResetException", "Exceptions.c")
8250 traceback_utility_code
= UtilityCode
.load_cached("AddTraceback", "Exceptions.c")
8252 #------------------------------------------------------------------------------------
8254 get_exception_tuple_utility_code
= UtilityCode(proto
="""
8255 static PyObject *__Pyx_GetExceptionTuple(void); /*proto*/
8257 # I doubt that calling __Pyx_GetException() here is correct as it moves
8258 # the exception from tstate->curexc_* to tstate->exc_*, which prevents
8259 # exception handlers later on from receiving it.
8261 static PyObject *__Pyx_GetExceptionTuple(void) {
8262 PyObject *type = NULL, *value = NULL, *tb = NULL;
8263 if (__Pyx_GetException(&type, &value, &tb) == 0) {
8264 PyObject* exc_info = PyTuple_New(3);
8269 PyTuple_SET_ITEM(exc_info, 0, type);
8270 PyTuple_SET_ITEM(exc_info, 1, value);
8271 PyTuple_SET_ITEM(exc_info, 2, tb);
8278 requires
=[get_exception_utility_code
])