1 ////////// MemviewSliceStruct.proto //////////
3 /* memoryview slice struct */
4 struct {{memview_struct_name
}};
7 struct {{memview_struct_name
}} *memview
;
9 Py_ssize_t shape
[{{max_dims
}}];
10 Py_ssize_t strides
[{{max_dims
}}];
11 Py_ssize_t suboffsets
[{{max_dims
}}];
12 } {{memviewslice_name
}};
15 /////////// Atomics.proto /////////////
19 #ifndef CYTHON_ATOMICS
20 #define CYTHON_ATOMICS 1
23 #define __pyx_atomic_int_type int
24 // todo: Portland pgcc, maybe OS X's OSAtomicIncrement32,
25 // libatomic + autotools-like distutils support? Such a pain...
26 #if CYTHON_ATOMICS && __GNUC__ >= 4 && (__GNUC_MINOR__ > 1 || \
27 (__GNUC_MINOR__ == 1 && __GNUC_PATCHLEVEL >= 2)) && \
30 #define __pyx_atomic_incr_aligned(value, lock) __sync_fetch_and_add(value, 1)
31 #define __pyx_atomic_decr_aligned(value, lock) __sync_fetch_and_sub(value, 1)
33 #ifdef __PYX_DEBUG_ATOMICS
34 #warning "Using GNU atomics"
36 #elif CYTHON_ATOMICS && MSC_VER
39 #define __pyx_atomic_int_type LONG
40 #define __pyx_atomic_incr_aligned(value, lock) InterlockedIncrement(value)
41 #define __pyx_atomic_decr_aligned(value, lock) InterlockedDecrement(value)
43 #ifdef __PYX_DEBUG_ATOMICS
44 #warning "Using MSVC atomics"
46 #elif CYTHON_ATOMICS && (defined(__ICC) || defined(__INTEL_COMPILER)) && 0
47 #define __pyx_atomic_incr_aligned(value, lock) _InterlockedIncrement(value)
48 #define __pyx_atomic_decr_aligned(value, lock) _InterlockedDecrement(value)
50 #ifdef __PYX_DEBUG_ATOMICS
51 #warning "Using Intel atomics"
55 #define CYTHON_ATOMICS 0
57 #ifdef __PYX_DEBUG_ATOMICS
58 #warning "Not using atomics"
62 typedef volatile __pyx_atomic_int_type __pyx_atomic_int
;
65 #define __pyx_add_acquisition_count(memview) \
66 __pyx_atomic_incr_aligned(__pyx_get_slice_count_pointer(memview), memview->lock)
67 #define __pyx_sub_acquisition_count(memview) \
68 __pyx_atomic_decr_aligned(__pyx_get_slice_count_pointer(memview), memview->lock)
70 #define __pyx_add_acquisition_count(memview) \
71 __pyx_add_acquisition_count_locked(__pyx_get_slice_count_pointer(memview), memview->lock)
72 #define __pyx_sub_acquisition_count(memview) \
73 __pyx_sub_acquisition_count_locked(__pyx_get_slice_count_pointer(memview), memview->lock)
77 /////////////// ObjectToMemviewSlice.proto ///////////////
79 static CYTHON_INLINE
{{memviewslice_name
}} {{funcname
}}(PyObject
*);
82 ////////// MemviewSliceInit.proto //////////
84 #define __Pyx_BUF_MAX_NDIMS %(BUF_MAX_NDIMS)d
86 #define __Pyx_MEMVIEW_DIRECT 1
87 #define __Pyx_MEMVIEW_PTR 2
88 #define __Pyx_MEMVIEW_FULL 4
89 #define __Pyx_MEMVIEW_CONTIG 8
90 #define __Pyx_MEMVIEW_STRIDED 16
91 #define __Pyx_MEMVIEW_FOLLOW 32
93 #define __Pyx_IS_C_CONTIG 1
94 #define __Pyx_IS_F_CONTIG 2
96 static int __Pyx_init_memviewslice(
97 struct __pyx_memoryview_obj
*memview
,
99 __Pyx_memviewslice
*memviewslice
,
100 int memview_is_new_reference
);
102 static CYTHON_INLINE
int __pyx_add_acquisition_count_locked(
103 __pyx_atomic_int
*acquisition_count
, PyThread_type_lock lock
);
104 static CYTHON_INLINE
int __pyx_sub_acquisition_count_locked(
105 __pyx_atomic_int
*acquisition_count
, PyThread_type_lock lock
);
107 #define __pyx_get_slice_count_pointer(memview) (memview->acquisition_count_aligned_p)
108 #define __pyx_get_slice_count(memview) (*__pyx_get_slice_count_pointer(memview))
109 #define __PYX_INC_MEMVIEW(slice, have_gil) __Pyx_INC_MEMVIEW(slice, have_gil, __LINE__)
110 #define __PYX_XDEC_MEMVIEW(slice, have_gil) __Pyx_XDEC_MEMVIEW(slice, have_gil, __LINE__)
111 static CYTHON_INLINE
void __Pyx_INC_MEMVIEW({{memviewslice_name
}} *, int, int);
112 static CYTHON_INLINE
void __Pyx_XDEC_MEMVIEW({{memviewslice_name
}} *, int, int);
115 /////////////// MemviewSliceIndex.proto ///////////////
117 static CYTHON_INLINE
char *__pyx_memviewslice_index_full(
118 const char *bufp
, Py_ssize_t idx
, Py_ssize_t stride
, Py_ssize_t suboffset
);
121 /////////////// ObjectToMemviewSlice ///////////////
122 //@requires: MemviewSliceValidateAndInit
124 static CYTHON_INLINE
{{memviewslice_name
}} {{funcname
}}(PyObject
*obj
) {
125 {{memviewslice_name
}} result
= {{memslice_init
}};
126 __Pyx_BufFmt_StackElem stack
[{{struct_nesting_depth
}}];
127 int axes_specs
[] = { {{axes_specs
}} };
130 if (obj
== Py_None
) {
131 /* We don't bother to refcount None */
132 result
.memview
= (struct __pyx_memoryview_obj
*) Py_None
;
136 retcode
= __Pyx_ValidateAndInit_memviewslice(axes_specs
, {{c_or_f_flag
}},
137 {{buf_flag
}}, {{ndim
}},
138 &{{dtype_typeinfo
}}, stack
,
141 if (unlikely(retcode
== -1))
146 result
.memview
= NULL
;
152 /////////////// MemviewSliceValidateAndInit.proto ///////////////
154 static int __Pyx_ValidateAndInit_memviewslice(
159 __Pyx_TypeInfo
*dtype
,
160 __Pyx_BufFmt_StackElem stack
[],
161 __Pyx_memviewslice
*memviewslice
,
162 PyObject
*original_obj
);
164 /////////////// MemviewSliceValidateAndInit ///////////////
165 //@requires: Buffer.c::TypeInfoCompare
168 __pyx_check_strides(Py_buffer
*buf
, int dim
, int ndim
, int spec
)
170 if (buf
->shape
[dim
] <= 1)
174 if (spec
& __Pyx_MEMVIEW_CONTIG
) {
175 if (spec
& (__Pyx_MEMVIEW_PTR
|__Pyx_MEMVIEW_FULL
)) {
176 if (buf
->strides
[dim
] != sizeof(void *)) {
177 PyErr_Format(PyExc_ValueError
,
178 "Buffer is not indirectly contiguous "
179 "in dimension %d.", dim
);
182 } else if (buf
->strides
[dim
] != buf
->itemsize
) {
183 PyErr_SetString(PyExc_ValueError
,
184 "Buffer and memoryview are not contiguous "
185 "in the same dimension.");
190 if (spec
& __Pyx_MEMVIEW_FOLLOW
) {
191 Py_ssize_t stride
= buf
->strides
[dim
];
194 if (stride
< buf
->itemsize
) {
195 PyErr_SetString(PyExc_ValueError
,
196 "Buffer and memoryview are not contiguous "
197 "in the same dimension.");
202 if (spec
& __Pyx_MEMVIEW_CONTIG
&& dim
!= ndim
- 1) {
203 PyErr_Format(PyExc_ValueError
,
204 "C-contiguous buffer is not contiguous in "
205 "dimension %d", dim
);
207 } else if (spec
& (__Pyx_MEMVIEW_PTR
)) {
208 PyErr_Format(PyExc_ValueError
,
209 "C-contiguous buffer is not indirect in "
210 "dimension %d", dim
);
212 } else if (buf
->suboffsets
) {
213 PyErr_SetString(PyExc_ValueError
,
214 "Buffer exposes suboffsets but no strides");
225 __pyx_check_suboffsets(Py_buffer
*buf
, int dim
, CYTHON_UNUSED
int ndim
, int spec
)
227 // Todo: without PyBUF_INDIRECT we may not have suboffset information, i.e., the
228 // ptr may not be set to NULL but may be uninitialized?
229 if (spec
& __Pyx_MEMVIEW_DIRECT
) {
230 if (buf
->suboffsets
&& buf
->suboffsets
[dim
] >= 0) {
231 PyErr_Format(PyExc_ValueError
,
232 "Buffer not compatible with direct access "
233 "in dimension %d.", dim
);
238 if (spec
& __Pyx_MEMVIEW_PTR
) {
239 if (!buf
->suboffsets
|| (buf
->suboffsets
&& buf
->suboffsets
[dim
] < 0)) {
240 PyErr_Format(PyExc_ValueError
,
241 "Buffer is not indirectly accessible "
242 "in dimension %d.", dim
);
253 __pyx_verify_contig(Py_buffer
*buf
, int ndim
, int c_or_f_flag
)
257 if (c_or_f_flag
& __Pyx_IS_F_CONTIG
) {
258 Py_ssize_t stride
= 1;
259 for (i
= 0; i
< ndim
; i
++) {
260 if (stride
* buf
->itemsize
!= buf
->strides
[i
] &&
263 PyErr_SetString(PyExc_ValueError
,
264 "Buffer not fortran contiguous.");
267 stride
= stride
* buf
->shape
[i
];
269 } else if (c_or_f_flag
& __Pyx_IS_C_CONTIG
) {
270 Py_ssize_t stride
= 1;
271 for (i
= ndim
- 1; i
>- 1; i
--) {
272 if (stride
* buf
->itemsize
!= buf
->strides
[i
] &&
274 PyErr_SetString(PyExc_ValueError
,
275 "Buffer not C contiguous.");
278 stride
= stride
* buf
->shape
[i
];
287 static int __Pyx_ValidateAndInit_memviewslice(
292 __Pyx_TypeInfo
*dtype
,
293 __Pyx_BufFmt_StackElem stack
[],
294 __Pyx_memviewslice
*memviewslice
,
295 PyObject
*original_obj
)
297 struct __pyx_memoryview_obj
*memview
, *new_memview
;
298 __Pyx_RefNannyDeclarations
300 int i
, spec
= 0, retval
= -1;
301 __Pyx_BufFmt_Context ctx
;
302 int from_memoryview
= __pyx_memoryview_check(original_obj
);
304 __Pyx_RefNannySetupContext("ValidateAndInit_memviewslice", 0);
306 if (from_memoryview
&& __pyx_typeinfo_cmp(dtype
, ((struct __pyx_memoryview_obj
*)
307 original_obj
)->typeinfo
)) {
308 /* We have a matching dtype, skip format parsing */
309 memview
= (struct __pyx_memoryview_obj
*) original_obj
;
312 memview
= (struct __pyx_memoryview_obj
*) __pyx_memoryview_new(
313 original_obj
, buf_flags
, 0, dtype
);
314 new_memview
= memview
;
315 if (unlikely(!memview
))
319 buf
= &memview
->view
;
320 if (buf
->ndim
!= ndim
) {
321 PyErr_Format(PyExc_ValueError
,
322 "Buffer has wrong number of dimensions (expected %d, got %d)",
328 __Pyx_BufFmt_Init(&ctx
, stack
, dtype
);
329 if (!__Pyx_BufFmt_CheckString(&ctx
, buf
->format
)) goto fail
;
332 if ((unsigned) buf
->itemsize
!= dtype
->size
) {
333 PyErr_Format(PyExc_ValueError
,
334 "Item size of buffer (%" CYTHON_FORMAT_SSIZE_T
"u byte%s) "
335 "does not match size of '%s' (%" CYTHON_FORMAT_SSIZE_T
"u byte%s)",
337 (buf
->itemsize
> 1) ? "s" : "",
340 (dtype
->size
> 1) ? "s" : "");
345 for (i
= 0; i
< ndim
; i
++) {
346 spec
= axes_specs
[i
];
347 if (!__pyx_check_strides(buf
, i
, ndim
, spec
))
349 if (!__pyx_check_suboffsets(buf
, i
, ndim
, spec
))
353 /* Check contiguity */
354 if (buf
->strides
&& !__pyx_verify_contig(buf
, ndim
, c_or_f_flag
))
358 if (unlikely(__Pyx_init_memviewslice(memview
, ndim
, memviewslice
,
359 new_memview
!= NULL
) == -1)) {
367 Py_XDECREF(new_memview
);
371 __Pyx_RefNannyFinishContext();
376 ////////// MemviewSliceInit //////////
379 __Pyx_init_memviewslice(struct __pyx_memoryview_obj
*memview
,
381 {{memviewslice_name
}} *memviewslice
,
382 int memview_is_new_reference
)
384 __Pyx_RefNannyDeclarations
386 Py_buffer
*buf
= &memview
->view
;
387 __Pyx_RefNannySetupContext("init_memviewslice", 0);
390 PyErr_SetString(PyExc_ValueError
,
393 } else if (memviewslice
->memview
|| memviewslice
->data
) {
394 PyErr_SetString(PyExc_ValueError
,
395 "memviewslice is already initialized!");
400 for (i
= 0; i
< ndim
; i
++) {
401 memviewslice
->strides
[i
] = buf
->strides
[i
];
404 Py_ssize_t stride
= buf
->itemsize
;
405 for (i
= ndim
- 1; i
>= 0; i
--) {
406 memviewslice
->strides
[i
] = stride
;
407 stride
*= buf
->shape
[i
];
411 for (i
= 0; i
< ndim
; i
++) {
412 memviewslice
->shape
[i
] = buf
->shape
[i
];
413 if (buf
->suboffsets
) {
414 memviewslice
->suboffsets
[i
] = buf
->suboffsets
[i
];
416 memviewslice
->suboffsets
[i
] = -1;
420 memviewslice
->memview
= memview
;
421 memviewslice
->data
= (char *)buf
->buf
;
422 if (__pyx_add_acquisition_count(memview
) == 0 && !memview_is_new_reference
) {
429 /* Don't decref, the memoryview may be borrowed. Let the caller do the cleanup */
430 /* __Pyx_XDECREF(memviewslice->memview); */
431 memviewslice
->memview
= 0;
432 memviewslice
->data
= 0;
435 __Pyx_RefNannyFinishContext();
440 static CYTHON_INLINE
void __pyx_fatalerror(const char *fmt
, ...) {
444 va_start(vargs
, fmt
);
446 #ifdef HAVE_STDARG_PROTOTYPES
447 va_start(vargs
, fmt
);
452 vsnprintf(msg
, 200, fmt
, vargs
);
458 static CYTHON_INLINE
int
459 __pyx_add_acquisition_count_locked(__pyx_atomic_int
*acquisition_count
,
460 PyThread_type_lock lock
)
463 PyThread_acquire_lock(lock
, 1);
464 result
= (*acquisition_count
)++;
465 PyThread_release_lock(lock
);
469 static CYTHON_INLINE
int
470 __pyx_sub_acquisition_count_locked(__pyx_atomic_int
*acquisition_count
,
471 PyThread_type_lock lock
)
474 PyThread_acquire_lock(lock
, 1);
475 result
= (*acquisition_count
)--;
476 PyThread_release_lock(lock
);
481 static CYTHON_INLINE
void
482 __Pyx_INC_MEMVIEW({{memviewslice_name
}} *memslice
, int have_gil
, int lineno
)
485 struct {{memview_struct_name
}} *memview
= memslice
->memview
;
486 if (!memview
|| (PyObject
*) memview
== Py_None
)
487 return; /* allow uninitialized memoryview assignment */
489 if (__pyx_get_slice_count(memview
) < 0)
490 __pyx_fatalerror("Acquisition count is %d (line %d)",
491 __pyx_get_slice_count(memview
), lineno
);
493 first_time
= __pyx_add_acquisition_count(memview
) == 0;
497 Py_INCREF((PyObject
*) memview
);
499 PyGILState_STATE _gilstate
= PyGILState_Ensure();
500 Py_INCREF((PyObject
*) memview
);
501 PyGILState_Release(_gilstate
);
506 static CYTHON_INLINE
void __Pyx_XDEC_MEMVIEW({{memviewslice_name
}} *memslice
,
507 int have_gil
, int lineno
) {
509 struct {{memview_struct_name
}} *memview
= memslice
->memview
;
513 } else if ((PyObject
*) memview
== Py_None
) {
514 memslice
->memview
= NULL
;
518 if (__pyx_get_slice_count(memview
) <= 0)
519 __pyx_fatalerror("Acquisition count is %d (line %d)",
520 __pyx_get_slice_count(memview
), lineno
);
522 last_time
= __pyx_sub_acquisition_count(memview
) == 1;
523 memslice
->data
= NULL
;
526 Py_CLEAR(memslice
->memview
);
528 PyGILState_STATE _gilstate
= PyGILState_Ensure();
529 Py_CLEAR(memslice
->memview
);
530 PyGILState_Release(_gilstate
);
533 memslice
->memview
= NULL
;
538 ////////// MemviewSliceCopyTemplate.proto //////////
540 static {{memviewslice_name
}}
541 __pyx_memoryview_copy_new_contig(const __Pyx_memviewslice
*from_mvs
,
542 const char *mode
, int ndim
,
543 size_t sizeof_dtype
, int contig_flag
,
544 int dtype_is_object
);
547 ////////// MemviewSliceCopyTemplate //////////
549 static {{memviewslice_name
}}
550 __pyx_memoryview_copy_new_contig(const __Pyx_memviewslice
*from_mvs
,
551 const char *mode
, int ndim
,
552 size_t sizeof_dtype
, int contig_flag
,
555 __Pyx_RefNannyDeclarations
557 __Pyx_memviewslice new_mvs
= {{memslice_init
}};
558 struct __pyx_memoryview_obj
*from_memview
= from_mvs
->memview
;
559 Py_buffer
*buf
= &from_memview
->view
;
560 PyObject
*shape_tuple
= NULL
;
561 PyObject
*temp_int
= NULL
;
562 struct __pyx_array_obj
*array_obj
= NULL
;
563 struct __pyx_memoryview_obj
*memview_obj
= NULL
;
565 __Pyx_RefNannySetupContext("__pyx_memoryview_copy_new_contig", 0);
567 for (i
= 0; i
< ndim
; i
++) {
568 if (from_mvs
->suboffsets
[i
] >= 0) {
569 PyErr_Format(PyExc_ValueError
, "Cannot copy memoryview slice with "
570 "indirect dimensions (axis %d)", i
);
575 shape_tuple
= PyTuple_New(ndim
);
576 if (unlikely(!shape_tuple
)) {
579 __Pyx_GOTREF(shape_tuple
);
582 for(i
= 0; i
< ndim
; i
++) {
583 temp_int
= PyInt_FromSsize_t(from_mvs
->shape
[i
]);
584 if(unlikely(!temp_int
)) {
587 PyTuple_SET_ITEM(shape_tuple
, i
, temp_int
);
592 array_obj
= __pyx_array_new(shape_tuple
, sizeof_dtype
, buf
->format
, (char *) mode
, NULL
);
593 if (unlikely(!array_obj
)) {
596 __Pyx_GOTREF(array_obj
);
598 memview_obj
= (struct __pyx_memoryview_obj
*) __pyx_memoryview_new(
599 (PyObject
*) array_obj
, contig_flag
,
601 from_mvs
->memview
->typeinfo
);
602 if (unlikely(!memview_obj
))
605 /* initialize new_mvs */
606 if (unlikely(__Pyx_init_memviewslice(memview_obj
, ndim
, &new_mvs
, 1) < 0))
609 if (unlikely(__pyx_memoryview_copy_contents(*from_mvs
, new_mvs
, ndim
, ndim
,
610 dtype_is_object
) < 0))
616 __Pyx_XDECREF(new_mvs
.memview
);
617 new_mvs
.memview
= NULL
;
620 __Pyx_XDECREF(shape_tuple
);
621 __Pyx_XDECREF(temp_int
);
622 __Pyx_XDECREF(array_obj
);
623 __Pyx_RefNannyFinishContext();
628 ////////// CopyContentsUtility.proto /////////
630 #define {{func_cname}}(slice) \
631 __pyx_memoryview_copy_new_contig(&slice, "{{mode}}", {{ndim}}, \
632 sizeof({{dtype_decl}}), {{contig_flag}}, \
636 ////////// OverlappingSlices.proto //////////
638 static int __pyx_slices_overlap({{memviewslice_name
}} *slice1
,
639 {{memviewslice_name
}} *slice2
,
640 int ndim
, size_t itemsize
);
643 ////////// OverlappingSlices //////////
645 /* Based on numpy's core/src/multiarray/array_assign.c */
647 /* Gets a half-open range [start, end) which contains the array data */
649 __pyx_get_array_memory_extents({{memviewslice_name
}} *slice
,
650 void **out_start
, void **out_end
,
651 int ndim
, size_t itemsize
)
656 start
= end
= slice
->data
;
658 for (i
= 0; i
< ndim
; i
++) {
659 Py_ssize_t stride
= slice
->strides
[i
];
660 Py_ssize_t extent
= slice
->shape
[i
];
663 *out_start
= *out_end
= start
;
667 end
+= stride
* (extent
- 1);
669 start
+= stride
* (extent
- 1);
673 /* Return a half-open range */
675 *out_end
= end
+ itemsize
;
678 /* Returns 1 if the arrays have overlapping data, 0 otherwise */
680 __pyx_slices_overlap({{memviewslice_name
}} *slice1
,
681 {{memviewslice_name
}} *slice2
,
682 int ndim
, size_t itemsize
)
684 void *start1
, *end1
, *start2
, *end2
;
686 __pyx_get_array_memory_extents(slice1
, &start1
, &end1
, ndim
, itemsize
);
687 __pyx_get_array_memory_extents(slice2
, &start2
, &end2
, ndim
, itemsize
);
689 return (start1
< end2
) && (start2
< end1
);
693 ////////// MemviewSliceIsCContig.proto //////////
695 #define __pyx_memviewslice_is_c_contig{{ndim}}(slice) \
696 __pyx_memviewslice_is_contig(&slice, 'C', {{ndim}})
699 ////////// MemviewSliceIsFContig.proto //////////
701 #define __pyx_memviewslice_is_f_contig{{ndim}}(slice) \
702 __pyx_memviewslice_is_contig(&slice, 'F', {{ndim}})
705 ////////// MemviewSliceIsContig.proto //////////
707 static int __pyx_memviewslice_is_contig(const {{memviewslice_name
}} *mvs
,
708 char order
, int ndim
);
711 ////////// MemviewSliceIsContig //////////
714 __pyx_memviewslice_is_contig(const {{memviewslice_name
}} *mvs
,
715 char order
, int ndim
)
717 int i
, index
, step
, start
;
718 Py_ssize_t itemsize
= mvs
->memview
->view
.itemsize
;
728 for (i
= 0; i
< ndim
; i
++) {
729 index
= start
+ step
* i
;
730 if (mvs
->suboffsets
[index
] >= 0 || mvs
->strides
[index
] != itemsize
)
733 itemsize
*= mvs
->shape
[index
];
740 /////////////// MemviewSliceIndex ///////////////
742 static CYTHON_INLINE
char *
743 __pyx_memviewslice_index_full(const char *bufp
, Py_ssize_t idx
,
744 Py_ssize_t stride
, Py_ssize_t suboffset
)
746 bufp
= bufp
+ idx
* stride
;
747 if (suboffset
>= 0) {
748 bufp
= *((char **) bufp
) + suboffset
;
750 return (char *) bufp
;
754 /////////////// MemviewDtypeToObject.proto ///////////////
756 {{if to_py_function
}}
757 static PyObject
*{{get_function
}}(const char *itemp
); /* proto */
760 {{if from_py_function
}}
761 static int {{set_function
}}(const char *itemp
, PyObject
*obj
); /* proto */
764 /////////////// MemviewDtypeToObject ///////////////
766 {{#__pyx_memview_<dtype_name>_to_object}}
768 /* Convert a dtype to or from a Python object */
770 {{if to_py_function
}}
771 static PyObject
*{{get_function
}}(const char *itemp
) {
772 return (PyObject
*) {{to_py_function
}}(*({{dtype
}} *) itemp
);
776 {{if from_py_function
}}
777 static int {{set_function
}}(const char *itemp
, PyObject
*obj
) {
778 {{dtype
}} value
= {{from_py_function
}}(obj
);
779 if ({{error_condition
}})
781 *({{dtype
}} *) itemp
= value
;
787 /////////////// MemviewObjectToObject.proto ///////////////
789 /* Function callbacks (for memoryview object) for dtype object */
790 static PyObject
*{{get_function
}}(const char *itemp
); /* proto */
791 static int {{set_function
}}(const char *itemp
, PyObject
*obj
); /* proto */
794 /////////////// MemviewObjectToObject ///////////////
796 static PyObject
*{{get_function
}}(const char *itemp
) {
797 PyObject
*result
= *(PyObject
**) itemp
;
802 static int {{set_function
}}(const char *itemp
, PyObject
*obj
) {
804 Py_DECREF(*(PyObject
**) itemp
);
805 *(PyObject
**) itemp
= obj
;
809 /////////// ToughSlice //////////
811 /* Dimension is indexed with 'start:stop:step' */
813 if (unlikely(__pyx_memoryview_slice_memviewslice(
815 {{src
}}.shape
[{{dim
}}], {{src
}}.strides
[{{dim
}}], {{src
}}.suboffsets
[{{dim
}}],
831 ////////// SimpleSlice //////////
833 /* Dimension is indexed with ':' only */
835 {{dst
}}.shape
[{{new_ndim
}}] = {{src
}}.shape
[{{dim
}}];
836 {{dst
}}.strides
[{{new_ndim
}}] = {{src
}}.strides
[{{dim
}}];
838 {{if access
== 'direct'}}
839 {{dst
}}.suboffsets
[{{new_ndim
}}] = -1;
841 {{dst
}}.suboffsets
[{{new_ndim
}}] = {{src
}}.suboffsets
[{{dim
}}];
842 if ({{src
}}.suboffsets
[{{dim
}}] >= 0)
843 {{suboffset_dim
}} = {{new_ndim
}};
847 ////////// SliceIndex //////////
849 // Dimension is indexed with an integer, we could use the ToughSlice
850 // approach, but this is faster
853 Py_ssize_t __pyx_tmp_idx
= {{idx
}};
854 Py_ssize_t __pyx_tmp_shape
= {{src
}}.shape
[{{dim
}}];
855 Py_ssize_t __pyx_tmp_stride
= {{src
}}.strides
[{{dim
}}];
856 if ({{wraparound
}} && (__pyx_tmp_idx
< 0))
857 __pyx_tmp_idx
+= __pyx_tmp_shape
;
859 if ({{boundscheck
}} && (__pyx_tmp_idx
< 0 || __pyx_tmp_idx
>= __pyx_tmp_shape
)) {
862 PyGILState_STATE __pyx_gilstate_save
= PyGILState_Ensure();
866 PyErr_SetString(PyExc_IndexError
, "Index out of bounds (axis {{dim}})");
870 PyGILState_Release(__pyx_gilstate_save
);
877 {{if all_dimensions_direct
}}
878 {{dst
}}.data
+= __pyx_tmp_idx
* __pyx_tmp_stride
;
880 if ({{suboffset_dim
}} < 0) {
881 {{dst
}}.data
+= __pyx_tmp_idx
* __pyx_tmp_stride
;
883 /* This dimension is the first dimension, or is preceded by */
884 /* direct or indirect dimensions that are indexed away. */
885 /* Hence suboffset_dim must be less than zero, and we can have */
886 /* our data pointer refer to another block by dereferencing. */
887 /* slice.data -> B -> C becomes slice.data -> C */
891 Py_ssize_t __pyx_tmp_suboffset
= {{src
}}.suboffsets
[{{dim
}}];
894 if (__pyx_tmp_suboffset
>= 0)
897 {{dst
}}.data
= *((char **) {{dst
}}.data
) + __pyx_tmp_suboffset
;
902 {{dst
}}.suboffsets
[{{suboffset_dim
}}] += __pyx_tmp_idx
* __pyx_tmp_stride
;
904 /* Note: dimension can not be indirect, the compiler will have */
905 /* issued an error */
912 ////////// FillStrided1DScalar.proto //////////
915 __pyx_fill_slice_
{{dtype_name
}}({{type_decl
}} *p
, Py_ssize_t extent
, Py_ssize_t stride
,
916 size_t itemsize
, void *itemp
);
918 ////////// FillStrided1DScalar //////////
920 /* Fill a slice with a scalar value. The dimension is direct and strided or contiguous */
921 /* This can be used as a callback for the memoryview object to efficienty assign a scalar */
922 /* Currently unused */
924 __pyx_fill_slice_
{{dtype_name
}}({{type_decl
}} *p
, Py_ssize_t extent
, Py_ssize_t stride
,
925 size_t itemsize
, void *itemp
)
928 {{type_decl
}} item
= *(({{type_decl
}} *) itemp
);
931 stride
/= sizeof({{type_decl
}});
932 endp
= p
+ stride
* extent
;