Null commit with -f option to force an uprev and put HEADs firmly on the trunk.
[python/dscho.git] / Objects / listobject.c
blob6fb3145d362846921b01ffe15e64ac5b184893bf
2 /* List object implementation */
4 #include "Python.h"
6 #ifdef STDC_HEADERS
7 #include <stddef.h>
8 #else
9 #include <sys/types.h> /* For size_t */
10 #endif
12 static int
13 roundupsize(int n)
15 unsigned int nbits = 0;
16 unsigned int n2 = (unsigned int)n >> 5;
18 /* Round up:
19 * If n < 256, to a multiple of 8.
20 * If n < 2048, to a multiple of 64.
21 * If n < 16384, to a multiple of 512.
22 * If n < 131072, to a multiple of 4096.
23 * If n < 1048576, to a multiple of 32768.
24 * If n < 8388608, to a multiple of 262144.
25 * If n < 67108864, to a multiple of 2097152.
26 * If n < 536870912, to a multiple of 16777216.
27 * ...
28 * If n < 2**(5+3*i), to a multiple of 2**(3*i).
30 * This over-allocates proportional to the list size, making room
31 * for additional growth. The over-allocation is mild, but is
32 * enough to give linear-time amortized behavior over a long
33 * sequence of appends() in the presence of a poorly-performing
34 * system realloc() (which is a reality, e.g., across all flavors
35 * of Windows, with Win9x behavior being particularly bad -- and
36 * we've still got address space fragmentation problems on Win9x
37 * even with this scheme, although it requires much longer lists to
38 * provoke them than it used to).
40 do {
41 n2 >>= 3;
42 nbits += 3;
43 } while (n2);
44 return ((n >> nbits) + 1) << nbits;
47 #define NRESIZE(var, type, nitems) PyMem_RESIZE(var, type, roundupsize(nitems))
49 PyObject *
50 PyList_New(int size)
52 int i;
53 PyListObject *op;
54 size_t nbytes;
55 if (size < 0) {
56 PyErr_BadInternalCall();
57 return NULL;
59 nbytes = size * sizeof(PyObject *);
60 /* Check for overflow */
61 if (nbytes / sizeof(PyObject *) != (size_t)size) {
62 return PyErr_NoMemory();
64 /* PyObject_NewVar is inlined */
65 op = (PyListObject *) PyObject_MALLOC(sizeof(PyListObject)
66 + PyGC_HEAD_SIZE);
67 if (op == NULL) {
68 return PyErr_NoMemory();
70 op = (PyListObject *) PyObject_FROM_GC(op);
71 if (size <= 0) {
72 op->ob_item = NULL;
74 else {
75 op->ob_item = (PyObject **) PyMem_MALLOC(nbytes);
76 if (op->ob_item == NULL) {
77 PyObject_FREE(PyObject_AS_GC(op));
78 return PyErr_NoMemory();
81 PyObject_INIT_VAR(op, &PyList_Type, size);
82 for (i = 0; i < size; i++)
83 op->ob_item[i] = NULL;
84 PyObject_GC_Init(op);
85 return (PyObject *) op;
88 int
89 PyList_Size(PyObject *op)
91 if (!PyList_Check(op)) {
92 PyErr_BadInternalCall();
93 return -1;
95 else
96 return ((PyListObject *)op) -> ob_size;
99 static PyObject *indexerr;
101 PyObject *
102 PyList_GetItem(PyObject *op, int i)
104 if (!PyList_Check(op)) {
105 PyErr_BadInternalCall();
106 return NULL;
108 if (i < 0 || i >= ((PyListObject *)op) -> ob_size) {
109 if (indexerr == NULL)
110 indexerr = PyString_FromString(
111 "list index out of range");
112 PyErr_SetObject(PyExc_IndexError, indexerr);
113 return NULL;
115 return ((PyListObject *)op) -> ob_item[i];
119 PyList_SetItem(register PyObject *op, register int i,
120 register PyObject *newitem)
122 register PyObject *olditem;
123 register PyObject **p;
124 if (!PyList_Check(op)) {
125 Py_XDECREF(newitem);
126 PyErr_BadInternalCall();
127 return -1;
129 if (i < 0 || i >= ((PyListObject *)op) -> ob_size) {
130 Py_XDECREF(newitem);
131 PyErr_SetString(PyExc_IndexError,
132 "list assignment index out of range");
133 return -1;
135 p = ((PyListObject *)op) -> ob_item + i;
136 olditem = *p;
137 *p = newitem;
138 Py_XDECREF(olditem);
139 return 0;
142 static int
143 ins1(PyListObject *self, int where, PyObject *v)
145 int i;
146 PyObject **items;
147 if (v == NULL) {
148 PyErr_BadInternalCall();
149 return -1;
151 if (self->ob_size == INT_MAX) {
152 PyErr_SetString(PyExc_OverflowError,
153 "cannot add more objects to list");
154 return -1;
156 items = self->ob_item;
157 NRESIZE(items, PyObject *, self->ob_size+1);
158 if (items == NULL) {
159 PyErr_NoMemory();
160 return -1;
162 if (where < 0)
163 where = 0;
164 if (where > self->ob_size)
165 where = self->ob_size;
166 for (i = self->ob_size; --i >= where; )
167 items[i+1] = items[i];
168 Py_INCREF(v);
169 items[where] = v;
170 self->ob_item = items;
171 self->ob_size++;
172 return 0;
176 PyList_Insert(PyObject *op, int where, PyObject *newitem)
178 if (!PyList_Check(op)) {
179 PyErr_BadInternalCall();
180 return -1;
182 return ins1((PyListObject *)op, where, newitem);
186 PyList_Append(PyObject *op, PyObject *newitem)
188 if (!PyList_Check(op)) {
189 PyErr_BadInternalCall();
190 return -1;
192 return ins1((PyListObject *)op,
193 (int) ((PyListObject *)op)->ob_size, newitem);
196 /* Methods */
198 static void
199 list_dealloc(PyListObject *op)
201 int i;
202 Py_TRASHCAN_SAFE_BEGIN(op)
203 PyObject_GC_Fini(op);
204 if (op->ob_item != NULL) {
205 /* Do it backwards, for Christian Tismer.
206 There's a simple test case where somehow this reduces
207 thrashing when a *very* large list is created and
208 immediately deleted. */
209 i = op->ob_size;
210 while (--i >= 0) {
211 Py_XDECREF(op->ob_item[i]);
213 PyMem_FREE(op->ob_item);
215 op = (PyListObject *) PyObject_AS_GC(op);
216 PyObject_DEL(op);
217 Py_TRASHCAN_SAFE_END(op)
220 static int
221 list_print(PyListObject *op, FILE *fp, int flags)
223 int i;
225 i = Py_ReprEnter((PyObject*)op);
226 if (i != 0) {
227 if (i < 0)
228 return i;
229 fprintf(fp, "[...]");
230 return 0;
232 fprintf(fp, "[");
233 for (i = 0; i < op->ob_size; i++) {
234 if (i > 0)
235 fprintf(fp, ", ");
236 if (PyObject_Print(op->ob_item[i], fp, 0) != 0) {
237 Py_ReprLeave((PyObject *)op);
238 return -1;
241 fprintf(fp, "]");
242 Py_ReprLeave((PyObject *)op);
243 return 0;
246 static PyObject *
247 list_repr(PyListObject *v)
249 int i;
250 PyObject *s, *temp;
251 PyObject *pieces = NULL, *result = NULL;
253 i = Py_ReprEnter((PyObject*)v);
254 if (i != 0) {
255 return i > 0 ? PyString_FromString("[...]") : NULL;
258 if (v->ob_size == 0) {
259 result = PyString_FromString("[]");
260 goto Done;
263 pieces = PyList_New(0);
264 if (pieces == NULL)
265 goto Done;
267 /* Do repr() on each element. Note that this may mutate the list,
268 so must refetch the list size on each iteration. */
269 for (i = 0; i < v->ob_size; ++i) {
270 int status;
271 s = PyObject_Repr(v->ob_item[i]);
272 if (s == NULL)
273 goto Done;
274 status = PyList_Append(pieces, s);
275 Py_DECREF(s); /* append created a new ref */
276 if (status < 0)
277 goto Done;
280 /* Add "[]" decorations to the first and last items. */
281 assert(PyList_GET_SIZE(pieces) > 0);
282 s = PyString_FromString("[");
283 if (s == NULL)
284 goto Done;
285 temp = PyList_GET_ITEM(pieces, 0);
286 PyString_ConcatAndDel(&s, temp);
287 PyList_SET_ITEM(pieces, 0, s);
288 if (s == NULL)
289 goto Done;
291 s = PyString_FromString("]");
292 if (s == NULL)
293 goto Done;
294 temp = PyList_GET_ITEM(pieces, PyList_GET_SIZE(pieces) - 1);
295 PyString_ConcatAndDel(&temp, s);
296 PyList_SET_ITEM(pieces, PyList_GET_SIZE(pieces) - 1, temp);
297 if (temp == NULL)
298 goto Done;
300 /* Paste them all together with ", " between. */
301 s = PyString_FromString(", ");
302 if (s == NULL)
303 goto Done;
304 result = _PyString_Join(s, pieces);
305 Py_DECREF(s);
307 Done:
308 Py_XDECREF(pieces);
309 Py_ReprLeave((PyObject *)v);
310 return result;
313 static int
314 list_length(PyListObject *a)
316 return a->ob_size;
321 static int
322 list_contains(PyListObject *a, PyObject *el)
324 int i;
326 for (i = 0; i < a->ob_size; ++i) {
327 int cmp = PyObject_RichCompareBool(el, PyList_GET_ITEM(a, i),
328 Py_EQ);
329 if (cmp > 0)
330 return 1;
331 else if (cmp < 0)
332 return -1;
334 return 0;
338 static PyObject *
339 list_item(PyListObject *a, int i)
341 if (i < 0 || i >= a->ob_size) {
342 if (indexerr == NULL)
343 indexerr = PyString_FromString(
344 "list index out of range");
345 PyErr_SetObject(PyExc_IndexError, indexerr);
346 return NULL;
348 Py_INCREF(a->ob_item[i]);
349 return a->ob_item[i];
352 static PyObject *
353 list_slice(PyListObject *a, int ilow, int ihigh)
355 PyListObject *np;
356 int i;
357 if (ilow < 0)
358 ilow = 0;
359 else if (ilow > a->ob_size)
360 ilow = a->ob_size;
361 if (ihigh < ilow)
362 ihigh = ilow;
363 else if (ihigh > a->ob_size)
364 ihigh = a->ob_size;
365 np = (PyListObject *) PyList_New(ihigh - ilow);
366 if (np == NULL)
367 return NULL;
368 for (i = ilow; i < ihigh; i++) {
369 PyObject *v = a->ob_item[i];
370 Py_INCREF(v);
371 np->ob_item[i - ilow] = v;
373 return (PyObject *)np;
376 PyObject *
377 PyList_GetSlice(PyObject *a, int ilow, int ihigh)
379 if (!PyList_Check(a)) {
380 PyErr_BadInternalCall();
381 return NULL;
383 return list_slice((PyListObject *)a, ilow, ihigh);
386 static PyObject *
387 list_concat(PyListObject *a, PyObject *bb)
389 int size;
390 int i;
391 PyListObject *np;
392 if (!PyList_Check(bb)) {
393 PyErr_Format(PyExc_TypeError,
394 "can only concatenate list (not \"%.200s\") to list",
395 bb->ob_type->tp_name);
396 return NULL;
398 #define b ((PyListObject *)bb)
399 size = a->ob_size + b->ob_size;
400 np = (PyListObject *) PyList_New(size);
401 if (np == NULL) {
402 return NULL;
404 for (i = 0; i < a->ob_size; i++) {
405 PyObject *v = a->ob_item[i];
406 Py_INCREF(v);
407 np->ob_item[i] = v;
409 for (i = 0; i < b->ob_size; i++) {
410 PyObject *v = b->ob_item[i];
411 Py_INCREF(v);
412 np->ob_item[i + a->ob_size] = v;
414 return (PyObject *)np;
415 #undef b
418 static PyObject *
419 list_repeat(PyListObject *a, int n)
421 int i, j;
422 int size;
423 PyListObject *np;
424 PyObject **p;
425 if (n < 0)
426 n = 0;
427 size = a->ob_size * n;
428 np = (PyListObject *) PyList_New(size);
429 if (np == NULL)
430 return NULL;
431 p = np->ob_item;
432 for (i = 0; i < n; i++) {
433 for (j = 0; j < a->ob_size; j++) {
434 *p = a->ob_item[j];
435 Py_INCREF(*p);
436 p++;
439 return (PyObject *) np;
442 static int
443 list_ass_slice(PyListObject *a, int ilow, int ihigh, PyObject *v)
445 /* Because [X]DECREF can recursively invoke list operations on
446 this list, we must postpone all [X]DECREF activity until
447 after the list is back in its canonical shape. Therefore
448 we must allocate an additional array, 'recycle', into which
449 we temporarily copy the items that are deleted from the
450 list. :-( */
451 PyObject **recycle, **p;
452 PyObject **item;
453 int n; /* Size of replacement list */
454 int d; /* Change in size */
455 int k; /* Loop index */
456 #define b ((PyListObject *)v)
457 if (v == NULL)
458 n = 0;
459 else if (PyList_Check(v)) {
460 n = b->ob_size;
461 if (a == b) {
462 /* Special case "a[i:j] = a" -- copy b first */
463 int ret;
464 v = list_slice(b, 0, n);
465 ret = list_ass_slice(a, ilow, ihigh, v);
466 Py_DECREF(v);
467 return ret;
470 else {
471 PyErr_Format(PyExc_TypeError,
472 "must assign list (not \"%.200s\") to slice",
473 v->ob_type->tp_name);
474 return -1;
476 if (ilow < 0)
477 ilow = 0;
478 else if (ilow > a->ob_size)
479 ilow = a->ob_size;
480 if (ihigh < ilow)
481 ihigh = ilow;
482 else if (ihigh > a->ob_size)
483 ihigh = a->ob_size;
484 item = a->ob_item;
485 d = n - (ihigh-ilow);
486 if (ihigh > ilow)
487 p = recycle = PyMem_NEW(PyObject *, (ihigh-ilow));
488 else
489 p = recycle = NULL;
490 if (d <= 0) { /* Delete -d items; recycle ihigh-ilow items */
491 for (k = ilow; k < ihigh; k++)
492 *p++ = item[k];
493 if (d < 0) {
494 for (/*k = ihigh*/; k < a->ob_size; k++)
495 item[k+d] = item[k];
496 a->ob_size += d;
497 NRESIZE(item, PyObject *, a->ob_size); /* Can't fail */
498 a->ob_item = item;
501 else { /* Insert d items; recycle ihigh-ilow items */
502 NRESIZE(item, PyObject *, a->ob_size + d);
503 if (item == NULL) {
504 if (recycle != NULL)
505 PyMem_DEL(recycle);
506 PyErr_NoMemory();
507 return -1;
509 for (k = a->ob_size; --k >= ihigh; )
510 item[k+d] = item[k];
511 for (/*k = ihigh-1*/; k >= ilow; --k)
512 *p++ = item[k];
513 a->ob_item = item;
514 a->ob_size += d;
516 for (k = 0; k < n; k++, ilow++) {
517 PyObject *w = b->ob_item[k];
518 Py_XINCREF(w);
519 item[ilow] = w;
521 if (recycle) {
522 while (--p >= recycle)
523 Py_XDECREF(*p);
524 PyMem_DEL(recycle);
526 return 0;
527 #undef b
531 PyList_SetSlice(PyObject *a, int ilow, int ihigh, PyObject *v)
533 if (!PyList_Check(a)) {
534 PyErr_BadInternalCall();
535 return -1;
537 return list_ass_slice((PyListObject *)a, ilow, ihigh, v);
540 static PyObject *
541 list_inplace_repeat(PyListObject *self, int n)
543 PyObject **items;
544 int size, i, j;
547 size = PyList_GET_SIZE(self);
548 if (size == 0) {
549 Py_INCREF(self);
550 return (PyObject *)self;
553 items = self->ob_item;
555 if (n < 1) {
556 self->ob_item = NULL;
557 self->ob_size = 0;
558 for (i = 0; i < size; i++)
559 Py_XDECREF(items[i]);
560 PyMem_DEL(items);
561 Py_INCREF(self);
562 return (PyObject *)self;
565 NRESIZE(items, PyObject*, size*n);
566 if (items == NULL) {
567 PyErr_NoMemory();
568 goto finally;
570 self->ob_item = items;
571 for (i = 1; i < n; i++) { /* Start counting at 1, not 0 */
572 for (j = 0; j < size; j++) {
573 PyObject *o = PyList_GET_ITEM(self, j);
574 Py_INCREF(o);
575 PyList_SET_ITEM(self, self->ob_size++, o);
578 Py_INCREF(self);
579 return (PyObject *)self;
580 finally:
581 return NULL;
584 static int
585 list_ass_item(PyListObject *a, int i, PyObject *v)
587 PyObject *old_value;
588 if (i < 0 || i >= a->ob_size) {
589 PyErr_SetString(PyExc_IndexError,
590 "list assignment index out of range");
591 return -1;
593 if (v == NULL)
594 return list_ass_slice(a, i, i+1, v);
595 Py_INCREF(v);
596 old_value = a->ob_item[i];
597 a->ob_item[i] = v;
598 Py_DECREF(old_value);
599 return 0;
602 static PyObject *
603 ins(PyListObject *self, int where, PyObject *v)
605 if (ins1(self, where, v) != 0)
606 return NULL;
607 Py_INCREF(Py_None);
608 return Py_None;
611 static PyObject *
612 listinsert(PyListObject *self, PyObject *args)
614 int i;
615 PyObject *v;
616 if (!PyArg_ParseTuple(args, "iO:insert", &i, &v))
617 return NULL;
618 return ins(self, i, v);
621 static PyObject *
622 listappend(PyListObject *self, PyObject *args)
624 PyObject *v;
625 if (!PyArg_ParseTuple(args, "O:append", &v))
626 return NULL;
627 return ins(self, (int) self->ob_size, v);
630 static int
631 listextend_internal(PyListObject *self, PyObject *b)
633 PyObject **items;
634 int selflen = PyList_GET_SIZE(self);
635 int blen;
636 register int i;
638 if (PyObject_Size(b) == 0) {
639 /* short circuit when b is empty */
640 Py_DECREF(b);
641 return 0;
644 if (self == (PyListObject*)b) {
645 /* as in list_ass_slice() we must special case the
646 * situation: a.extend(a)
648 * XXX: I think this way ought to be faster than using
649 * list_slice() the way list_ass_slice() does.
651 Py_DECREF(b);
652 b = PyList_New(selflen);
653 if (!b)
654 return -1;
655 for (i = 0; i < selflen; i++) {
656 PyObject *o = PyList_GET_ITEM(self, i);
657 Py_INCREF(o);
658 PyList_SET_ITEM(b, i, o);
662 blen = PyObject_Size(b);
664 /* resize a using idiom */
665 items = self->ob_item;
666 NRESIZE(items, PyObject*, selflen + blen);
667 if (items == NULL) {
668 PyErr_NoMemory();
669 Py_DECREF(b);
670 return -1;
673 self->ob_item = items;
675 /* populate the end of self with b's items */
676 for (i = 0; i < blen; i++) {
677 PyObject *o = PySequence_Fast_GET_ITEM(b, i);
678 Py_INCREF(o);
679 PyList_SET_ITEM(self, self->ob_size++, o);
681 Py_DECREF(b);
682 return 0;
686 static PyObject *
687 list_inplace_concat(PyListObject *self, PyObject *other)
689 other = PySequence_Fast(other, "argument to += must be iterable");
690 if (!other)
691 return NULL;
693 if (listextend_internal(self, other) < 0)
694 return NULL;
696 Py_INCREF(self);
697 return (PyObject *)self;
700 static PyObject *
701 listextend(PyListObject *self, PyObject *args)
704 PyObject *b;
706 if (!PyArg_ParseTuple(args, "O:extend", &b))
707 return NULL;
709 b = PySequence_Fast(b, "list.extend() argument must be iterable");
710 if (!b)
711 return NULL;
713 if (listextend_internal(self, b) < 0)
714 return NULL;
716 Py_INCREF(Py_None);
717 return Py_None;
720 static PyObject *
721 listpop(PyListObject *self, PyObject *args)
723 int i = -1;
724 PyObject *v;
725 if (!PyArg_ParseTuple(args, "|i:pop", &i))
726 return NULL;
727 if (self->ob_size == 0) {
728 /* Special-case most common failure cause */
729 PyErr_SetString(PyExc_IndexError, "pop from empty list");
730 return NULL;
732 if (i < 0)
733 i += self->ob_size;
734 if (i < 0 || i >= self->ob_size) {
735 PyErr_SetString(PyExc_IndexError, "pop index out of range");
736 return NULL;
738 v = self->ob_item[i];
739 Py_INCREF(v);
740 if (list_ass_slice(self, i, i+1, (PyObject *)NULL) != 0) {
741 Py_DECREF(v);
742 return NULL;
744 return v;
747 /* New quicksort implementation for arrays of object pointers.
748 Thanks to discussions with Tim Peters. */
750 /* CMPERROR is returned by our comparison function when an error
751 occurred. This is the largest negative integer (0x80000000 on a
752 32-bit system). */
753 #define CMPERROR ( (int) ((unsigned int)1 << (8*sizeof(int) - 1)) )
755 /* Comparison function. Takes care of calling a user-supplied
756 comparison function (any callable Python object). Calls the
757 standard comparison function, PyObject_Compare(), if the user-
758 supplied function is NULL. */
760 static int
761 docompare(PyObject *x, PyObject *y, PyObject *compare)
763 PyObject *args, *res;
764 int i;
766 if (compare == NULL) {
767 /* NOTE: we rely on the fact here that the sorting algorithm
768 only ever checks whether k<0, i.e., whether x<y. So we
769 invoke the rich comparison function with Py_LT ('<'), and
770 return -1 when it returns true and 0 when it returns
771 false. */
772 i = PyObject_RichCompareBool(x, y, Py_LT);
773 if (i < 0)
774 return CMPERROR;
775 else
776 return -i;
779 args = Py_BuildValue("(OO)", x, y);
780 if (args == NULL)
781 return CMPERROR;
782 res = PyEval_CallObject(compare, args);
783 Py_DECREF(args);
784 if (res == NULL)
785 return CMPERROR;
786 if (!PyInt_Check(res)) {
787 Py_DECREF(res);
788 PyErr_SetString(PyExc_TypeError,
789 "comparison function must return int");
790 return CMPERROR;
792 i = PyInt_AsLong(res);
793 Py_DECREF(res);
794 if (i < 0)
795 return -1;
796 if (i > 0)
797 return 1;
798 return 0;
801 /* MINSIZE is the smallest array that will get a full-blown samplesort
802 treatment; smaller arrays are sorted using binary insertion. It must
803 be at least 7 for the samplesort implementation to work. Binary
804 insertion does fewer compares, but can suffer O(N**2) data movement.
805 The more expensive compares, the larger MINSIZE should be. */
806 #define MINSIZE 100
808 /* MINPARTITIONSIZE is the smallest array slice samplesort will bother to
809 partition; smaller slices are passed to binarysort. It must be at
810 least 2, and no larger than MINSIZE. Setting it higher reduces the #
811 of compares slowly, but increases the amount of data movement quickly.
812 The value here was chosen assuming a compare costs ~25x more than
813 swapping a pair of memory-resident pointers -- but under that assumption,
814 changing the value by a few dozen more or less has aggregate effect
815 under 1%. So the value is crucial, but not touchy <wink>. */
816 #define MINPARTITIONSIZE 40
818 /* MAXMERGE is the largest number of elements we'll always merge into
819 a known-to-be sorted chunk via binary insertion, regardless of the
820 size of that chunk. Given a chunk of N sorted elements, and a group
821 of K unknowns, the largest K for which it's better to do insertion
822 (than a full-blown sort) is a complicated function of N and K mostly
823 involving the expected number of compares and data moves under each
824 approach, and the relative cost of those operations on a specific
825 architecure. The fixed value here is conservative, and should be a
826 clear win regardless of architecture or N. */
827 #define MAXMERGE 15
829 /* STACKSIZE is the size of our work stack. A rough estimate is that
830 this allows us to sort arrays of size N where
831 N / ln(N) = MINPARTITIONSIZE * 2**STACKSIZE, so 60 is more than enough
832 for arrays of size 2**64. Because we push the biggest partition
833 first, the worst case occurs when all subarrays are always partitioned
834 exactly in two. */
835 #define STACKSIZE 60
838 #define SETK(X,Y) if ((k = docompare(X,Y,compare))==CMPERROR) goto fail
840 /* binarysort is the best method for sorting small arrays: it does
841 few compares, but can do data movement quadratic in the number of
842 elements.
843 [lo, hi) is a contiguous slice of a list, and is sorted via
844 binary insertion.
845 On entry, must have lo <= start <= hi, and that [lo, start) is already
846 sorted (pass start == lo if you don't know!).
847 If docompare complains (returns CMPERROR) return -1, else 0.
848 Even in case of error, the output slice will be some permutation of
849 the input (nothing is lost or duplicated).
852 static int
853 binarysort(PyObject **lo, PyObject **hi, PyObject **start, PyObject *compare)
854 /* compare -- comparison function object, or NULL for default */
856 /* assert lo <= start <= hi
857 assert [lo, start) is sorted */
858 register int k;
859 register PyObject **l, **p, **r;
860 register PyObject *pivot;
862 if (lo == start)
863 ++start;
864 for (; start < hi; ++start) {
865 /* set l to where *start belongs */
866 l = lo;
867 r = start;
868 pivot = *r;
869 do {
870 p = l + ((r - l) >> 1);
871 SETK(pivot, *p);
872 if (k < 0)
873 r = p;
874 else
875 l = p + 1;
876 } while (l < r);
877 /* Pivot should go at l -- slide over to make room.
878 Caution: using memmove is much slower under MSVC 5;
879 we're not usually moving many slots. */
880 for (p = start; p > l; --p)
881 *p = *(p-1);
882 *l = pivot;
884 return 0;
886 fail:
887 return -1;
890 /* samplesortslice is the sorting workhorse.
891 [lo, hi) is a contiguous slice of a list, to be sorted in place.
892 On entry, must have lo <= hi,
893 If docompare complains (returns CMPERROR) return -1, else 0.
894 Even in case of error, the output slice will be some permutation of
895 the input (nothing is lost or duplicated).
897 samplesort is basically quicksort on steroids: a power of 2 close
898 to n/ln(n) is computed, and that many elements (less 1) are picked at
899 random from the array and sorted. These 2**k - 1 elements are then
900 used as preselected pivots for an equal number of quicksort
901 partitioning steps, partitioning the slice into 2**k chunks each of
902 size about ln(n). These small final chunks are then usually handled
903 by binarysort. Note that when k=1, this is roughly the same as an
904 ordinary quicksort using a random pivot, and when k=2 this is roughly
905 a median-of-3 quicksort. From that view, using k ~= lg(n/ln(n)) makes
906 this a "median of n/ln(n)" quicksort. You can also view it as a kind
907 of bucket sort, where 2**k-1 bucket boundaries are picked dynamically.
909 The large number of samples makes a quadratic-time case almost
910 impossible, and asymptotically drives the average-case number of
911 compares from quicksort's 2 N ln N (or 12/7 N ln N for the median-of-
912 3 variant) down to N lg N.
914 We also play lots of low-level tricks to cut the number of compares.
916 Very obscure: To avoid using extra memory, the PPs are stored in the
917 array and shuffled around as partitioning proceeds. At the start of a
918 partitioning step, we'll have 2**m-1 (for some m) PPs in sorted order,
919 adjacent (either on the left or the right!) to a chunk of X elements
920 that are to be partitioned: P X or X P. In either case we need to
921 shuffle things *in place* so that the 2**(m-1) smaller PPs are on the
922 left, followed by the PP to be used for this step (that's the middle
923 of the PPs), followed by X, followed by the 2**(m-1) larger PPs:
924 P X or X P -> Psmall pivot X Plarge
925 and the order of the PPs must not be altered. It can take a while
926 to realize this isn't trivial! It can take even longer <wink> to
927 understand why the simple code below works, using only 2**(m-1) swaps.
928 The key is that the order of the X elements isn't necessarily
929 preserved: X can end up as some cyclic permutation of its original
930 order. That's OK, because X is unsorted anyway. If the order of X
931 had to be preserved too, the simplest method I know of using O(1)
932 scratch storage requires len(X) + 2**(m-1) swaps, spread over 2 passes.
933 Since len(X) is typically several times larger than 2**(m-1), that
934 would slow things down.
937 struct SamplesortStackNode {
938 /* Represents a slice of the array, from (& including) lo up
939 to (but excluding) hi. "extra" additional & adjacent elements
940 are pre-selected pivots (PPs), spanning [lo-extra, lo) if
941 extra > 0, or [hi, hi-extra) if extra < 0. The PPs are
942 already sorted, but nothing is known about the other elements
943 in [lo, hi). |extra| is always one less than a power of 2.
944 When extra is 0, we're out of PPs, and the slice must be
945 sorted by some other means. */
946 PyObject **lo;
947 PyObject **hi;
948 int extra;
951 /* The number of PPs we want is 2**k - 1, where 2**k is as close to
952 N / ln(N) as possible. So k ~= lg(N / ln(N)). Calling libm routines
953 is undesirable, so cutoff values are canned in the "cutoff" table
954 below: cutoff[i] is the smallest N such that k == CUTOFFBASE + i. */
955 #define CUTOFFBASE 4
956 static long cutoff[] = {
957 43, /* smallest N such that k == 4 */
958 106, /* etc */
959 250,
960 576,
961 1298,
962 2885,
963 6339,
964 13805,
965 29843,
966 64116,
967 137030,
968 291554,
969 617916,
970 1305130,
971 2748295,
972 5771662,
973 12091672,
974 25276798,
975 52734615,
976 109820537,
977 228324027,
978 473977813,
979 982548444, /* smallest N such that k == 26 */
980 2034159050 /* largest N that fits in signed 32-bit; k == 27 */
983 static int
984 samplesortslice(PyObject **lo, PyObject **hi, PyObject *compare)
985 /* compare -- comparison function object, or NULL for default */
987 register PyObject **l, **r;
988 register PyObject *tmp, *pivot;
989 register int k;
990 int n, extra, top, extraOnRight;
991 struct SamplesortStackNode stack[STACKSIZE];
993 /* assert lo <= hi */
994 n = hi - lo;
996 /* ----------------------------------------------------------
997 * Special cases
998 * --------------------------------------------------------*/
999 if (n < 2)
1000 return 0;
1002 /* Set r to the largest value such that [lo,r) is sorted.
1003 This catches the already-sorted case, the all-the-same
1004 case, and the appended-a-few-elements-to-a-sorted-list case.
1005 If the array is unsorted, we're very likely to get out of
1006 the loop fast, so the test is cheap if it doesn't pay off.
1008 /* assert lo < hi */
1009 for (r = lo+1; r < hi; ++r) {
1010 SETK(*r, *(r-1));
1011 if (k < 0)
1012 break;
1014 /* [lo,r) is sorted, [r,hi) unknown. Get out cheap if there are
1015 few unknowns, or few elements in total. */
1016 if (hi - r <= MAXMERGE || n < MINSIZE)
1017 return binarysort(lo, hi, r, compare);
1019 /* Check for the array already being reverse-sorted. Typical
1020 benchmark-driven silliness <wink>. */
1021 /* assert lo < hi */
1022 for (r = lo+1; r < hi; ++r) {
1023 SETK(*(r-1), *r);
1024 if (k < 0)
1025 break;
1027 if (hi - r <= MAXMERGE) {
1028 /* Reverse the reversed prefix, then insert the tail */
1029 PyObject **originalr = r;
1030 l = lo;
1031 do {
1032 --r;
1033 tmp = *l; *l = *r; *r = tmp;
1034 ++l;
1035 } while (l < r);
1036 return binarysort(lo, hi, originalr, compare);
1039 /* ----------------------------------------------------------
1040 * Normal case setup: a large array without obvious pattern.
1041 * --------------------------------------------------------*/
1043 /* extra := a power of 2 ~= n/ln(n), less 1.
1044 First find the smallest extra s.t. n < cutoff[extra] */
1045 for (extra = 0;
1046 extra < sizeof(cutoff) / sizeof(cutoff[0]);
1047 ++extra) {
1048 if (n < cutoff[extra])
1049 break;
1050 /* note that if we fall out of the loop, the value of
1051 extra still makes *sense*, but may be smaller than
1052 we would like (but the array has more than ~= 2**31
1053 elements in this case!) */
1055 /* Now k == extra - 1 + CUTOFFBASE. The smallest value k can
1056 have is CUTOFFBASE-1, so
1057 assert MINSIZE >= 2**(CUTOFFBASE-1) - 1 */
1058 extra = (1 << (extra - 1 + CUTOFFBASE)) - 1;
1059 /* assert extra > 0 and n >= extra */
1061 /* Swap that many values to the start of the array. The
1062 selection of elements is pseudo-random, but the same on
1063 every run (this is intentional! timing algorithm changes is
1064 a pain if timing varies across runs). */
1066 unsigned int seed = n / extra; /* arbitrary */
1067 unsigned int i;
1068 for (i = 0; i < (unsigned)extra; ++i) {
1069 /* j := random int in [i, n) */
1070 unsigned int j;
1071 seed = seed * 69069 + 7;
1072 j = i + seed % (n - i);
1073 tmp = lo[i]; lo[i] = lo[j]; lo[j] = tmp;
1077 /* Recursively sort the preselected pivots. */
1078 if (samplesortslice(lo, lo + extra, compare) < 0)
1079 goto fail;
1081 top = 0; /* index of available stack slot */
1082 lo += extra; /* point to first unknown */
1083 extraOnRight = 0; /* the PPs are at the left end */
1085 /* ----------------------------------------------------------
1086 * Partition [lo, hi), and repeat until out of work.
1087 * --------------------------------------------------------*/
1088 for (;;) {
1089 /* assert lo <= hi, so n >= 0 */
1090 n = hi - lo;
1092 /* We may not want, or may not be able, to partition:
1093 If n is small, it's quicker to insert.
1094 If extra is 0, we're out of pivots, and *must* use
1095 another method.
1097 if (n < MINPARTITIONSIZE || extra == 0) {
1098 if (n >= MINSIZE) {
1099 /* assert extra == 0
1100 This is rare, since the average size
1101 of a final block is only about
1102 ln(original n). */
1103 if (samplesortslice(lo, hi, compare) < 0)
1104 goto fail;
1106 else {
1107 /* Binary insertion should be quicker,
1108 and we can take advantage of the PPs
1109 already being sorted. */
1110 if (extraOnRight && extra) {
1111 /* swap the PPs to the left end */
1112 k = extra;
1113 do {
1114 tmp = *lo;
1115 *lo = *hi;
1116 *hi = tmp;
1117 ++lo; ++hi;
1118 } while (--k);
1120 if (binarysort(lo - extra, hi, lo,
1121 compare) < 0)
1122 goto fail;
1125 /* Find another slice to work on. */
1126 if (--top < 0)
1127 break; /* no more -- done! */
1128 lo = stack[top].lo;
1129 hi = stack[top].hi;
1130 extra = stack[top].extra;
1131 extraOnRight = 0;
1132 if (extra < 0) {
1133 extraOnRight = 1;
1134 extra = -extra;
1136 continue;
1139 /* Pretend the PPs are indexed 0, 1, ..., extra-1.
1140 Then our preselected pivot is at (extra-1)/2, and we
1141 want to move the PPs before that to the left end of
1142 the slice, and the PPs after that to the right end.
1143 The following section changes extra, lo, hi, and the
1144 slice such that:
1145 [lo-extra, lo) contains the smaller PPs.
1146 *lo == our PP.
1147 (lo, hi) contains the unknown elements.
1148 [hi, hi+extra) contains the larger PPs.
1150 k = extra >>= 1; /* num PPs to move */
1151 if (extraOnRight) {
1152 /* Swap the smaller PPs to the left end.
1153 Note that this loop actually moves k+1 items:
1154 the last is our PP */
1155 do {
1156 tmp = *lo; *lo = *hi; *hi = tmp;
1157 ++lo; ++hi;
1158 } while (k--);
1160 else {
1161 /* Swap the larger PPs to the right end. */
1162 while (k--) {
1163 --lo; --hi;
1164 tmp = *lo; *lo = *hi; *hi = tmp;
1167 --lo; /* *lo is now our PP */
1168 pivot = *lo;
1170 /* Now an almost-ordinary quicksort partition step.
1171 Note that most of the time is spent here!
1172 Only odd thing is that we partition into < and >=,
1173 instead of the usual <= and >=. This helps when
1174 there are lots of duplicates of different values,
1175 because it eventually tends to make subfiles
1176 "pure" (all duplicates), and we special-case for
1177 duplicates later. */
1178 l = lo + 1;
1179 r = hi - 1;
1180 /* assert lo < l < r < hi (small n weeded out above) */
1182 do {
1183 /* slide l right, looking for key >= pivot */
1184 do {
1185 SETK(*l, pivot);
1186 if (k < 0)
1187 ++l;
1188 else
1189 break;
1190 } while (l < r);
1192 /* slide r left, looking for key < pivot */
1193 while (l < r) {
1194 register PyObject *rval = *r--;
1195 SETK(rval, pivot);
1196 if (k < 0) {
1197 /* swap and advance */
1198 r[1] = *l;
1199 *l++ = rval;
1200 break;
1204 } while (l < r);
1206 /* assert lo < r <= l < hi
1207 assert r == l or r+1 == l
1208 everything to the left of l is < pivot, and
1209 everything to the right of r is >= pivot */
1211 if (l == r) {
1212 SETK(*r, pivot);
1213 if (k < 0)
1214 ++l;
1215 else
1216 --r;
1218 /* assert lo <= r and r+1 == l and l <= hi
1219 assert r == lo or a[r] < pivot
1220 assert a[lo] is pivot
1221 assert l == hi or a[l] >= pivot
1222 Swap the pivot into "the middle", so we can henceforth
1223 ignore it.
1225 *lo = *r;
1226 *r = pivot;
1228 /* The following is true now, & will be preserved:
1229 All in [lo,r) are < pivot
1230 All in [r,l) == pivot (& so can be ignored)
1231 All in [l,hi) are >= pivot */
1233 /* Check for duplicates of the pivot. One compare is
1234 wasted if there are no duplicates, but can win big
1235 when there are.
1236 Tricky: we're sticking to "<" compares, so deduce
1237 equality indirectly. We know pivot <= *l, so they're
1238 equal iff not pivot < *l.
1240 while (l < hi) {
1241 /* pivot <= *l known */
1242 SETK(pivot, *l);
1243 if (k < 0)
1244 break;
1245 else
1246 /* <= and not < implies == */
1247 ++l;
1250 /* assert lo <= r < l <= hi
1251 Partitions are [lo, r) and [l, hi) */
1253 /* push fattest first; remember we still have extra PPs
1254 to the left of the left chunk and to the right of
1255 the right chunk! */
1256 /* assert top < STACKSIZE */
1257 if (r - lo <= hi - l) {
1258 /* second is bigger */
1259 stack[top].lo = l;
1260 stack[top].hi = hi;
1261 stack[top].extra = -extra;
1262 hi = r;
1263 extraOnRight = 0;
1265 else {
1266 /* first is bigger */
1267 stack[top].lo = lo;
1268 stack[top].hi = r;
1269 stack[top].extra = extra;
1270 lo = l;
1271 extraOnRight = 1;
1273 ++top;
1275 } /* end of partitioning loop */
1277 return 0;
1279 fail:
1280 return -1;
1283 #undef SETK
1285 staticforward PyTypeObject immutable_list_type;
1287 static PyObject *
1288 listsort(PyListObject *self, PyObject *args)
1290 int err;
1291 PyObject *compare = NULL;
1293 if (args != NULL) {
1294 if (!PyArg_ParseTuple(args, "|O:sort", &compare))
1295 return NULL;
1297 self->ob_type = &immutable_list_type;
1298 err = samplesortslice(self->ob_item,
1299 self->ob_item + self->ob_size,
1300 compare);
1301 self->ob_type = &PyList_Type;
1302 if (err < 0)
1303 return NULL;
1304 Py_INCREF(Py_None);
1305 return Py_None;
1309 PyList_Sort(PyObject *v)
1311 if (v == NULL || !PyList_Check(v)) {
1312 PyErr_BadInternalCall();
1313 return -1;
1315 v = listsort((PyListObject *)v, (PyObject *)NULL);
1316 if (v == NULL)
1317 return -1;
1318 Py_DECREF(v);
1319 return 0;
1322 static void
1323 _listreverse(PyListObject *self)
1325 register PyObject **p, **q;
1326 register PyObject *tmp;
1328 if (self->ob_size > 1) {
1329 for (p = self->ob_item, q = self->ob_item + self->ob_size - 1;
1330 p < q;
1331 p++, q--)
1333 tmp = *p;
1334 *p = *q;
1335 *q = tmp;
1340 static PyObject *
1341 listreverse(PyListObject *self, PyObject *args)
1343 if (!PyArg_ParseTuple(args, ":reverse"))
1344 return NULL;
1345 _listreverse(self);
1346 Py_INCREF(Py_None);
1347 return Py_None;
1351 PyList_Reverse(PyObject *v)
1353 if (v == NULL || !PyList_Check(v)) {
1354 PyErr_BadInternalCall();
1355 return -1;
1357 _listreverse((PyListObject *)v);
1358 return 0;
1361 PyObject *
1362 PyList_AsTuple(PyObject *v)
1364 PyObject *w;
1365 PyObject **p;
1366 int n;
1367 if (v == NULL || !PyList_Check(v)) {
1368 PyErr_BadInternalCall();
1369 return NULL;
1371 n = ((PyListObject *)v)->ob_size;
1372 w = PyTuple_New(n);
1373 if (w == NULL)
1374 return NULL;
1375 p = ((PyTupleObject *)w)->ob_item;
1376 memcpy((void *)p,
1377 (void *)((PyListObject *)v)->ob_item,
1378 n*sizeof(PyObject *));
1379 while (--n >= 0) {
1380 Py_INCREF(*p);
1381 p++;
1383 return w;
1386 static PyObject *
1387 listindex(PyListObject *self, PyObject *args)
1389 int i;
1390 PyObject *v;
1392 if (!PyArg_ParseTuple(args, "O:index", &v))
1393 return NULL;
1394 for (i = 0; i < self->ob_size; i++) {
1395 int cmp = PyObject_RichCompareBool(self->ob_item[i], v, Py_EQ);
1396 if (cmp > 0)
1397 return PyInt_FromLong((long)i);
1398 else if (cmp < 0)
1399 return NULL;
1401 PyErr_SetString(PyExc_ValueError, "list.index(x): x not in list");
1402 return NULL;
1405 static PyObject *
1406 listcount(PyListObject *self, PyObject *args)
1408 int count = 0;
1409 int i;
1410 PyObject *v;
1412 if (!PyArg_ParseTuple(args, "O:count", &v))
1413 return NULL;
1414 for (i = 0; i < self->ob_size; i++) {
1415 int cmp = PyObject_RichCompareBool(self->ob_item[i], v, Py_EQ);
1416 if (cmp > 0)
1417 count++;
1418 else if (cmp < 0)
1419 return NULL;
1421 return PyInt_FromLong((long)count);
1424 static PyObject *
1425 listremove(PyListObject *self, PyObject *args)
1427 int i;
1428 PyObject *v;
1430 if (!PyArg_ParseTuple(args, "O:remove", &v))
1431 return NULL;
1432 for (i = 0; i < self->ob_size; i++) {
1433 int cmp = PyObject_RichCompareBool(self->ob_item[i], v, Py_EQ);
1434 if (cmp > 0) {
1435 if (list_ass_slice(self, i, i+1,
1436 (PyObject *)NULL) != 0)
1437 return NULL;
1438 Py_INCREF(Py_None);
1439 return Py_None;
1441 else if (cmp < 0)
1442 return NULL;
1444 PyErr_SetString(PyExc_ValueError, "list.remove(x): x not in list");
1445 return NULL;
1448 static int
1449 list_traverse(PyListObject *o, visitproc visit, void *arg)
1451 int i, err;
1452 PyObject *x;
1454 for (i = o->ob_size; --i >= 0; ) {
1455 x = o->ob_item[i];
1456 if (x != NULL) {
1457 err = visit(x, arg);
1458 if (err)
1459 return err;
1462 return 0;
1465 static int
1466 list_clear(PyListObject *lp)
1468 (void) PyList_SetSlice((PyObject *)lp, 0, lp->ob_size, 0);
1469 return 0;
1472 static PyObject *
1473 list_richcompare(PyObject *v, PyObject *w, int op)
1475 PyListObject *vl, *wl;
1476 int i;
1478 if (!PyList_Check(v) || !PyList_Check(w)) {
1479 Py_INCREF(Py_NotImplemented);
1480 return Py_NotImplemented;
1483 vl = (PyListObject *)v;
1484 wl = (PyListObject *)w;
1486 if (vl->ob_size != wl->ob_size && (op == Py_EQ || op == Py_NE)) {
1487 /* Shortcut: if the lengths differ, the lists differ */
1488 PyObject *res;
1489 if (op == Py_EQ)
1490 res = Py_False;
1491 else
1492 res = Py_True;
1493 Py_INCREF(res);
1494 return res;
1497 /* Search for the first index where items are different */
1498 for (i = 0; i < vl->ob_size && i < wl->ob_size; i++) {
1499 int k = PyObject_RichCompareBool(vl->ob_item[i],
1500 wl->ob_item[i], Py_EQ);
1501 if (k < 0)
1502 return NULL;
1503 if (!k)
1504 break;
1507 if (i >= vl->ob_size || i >= wl->ob_size) {
1508 /* No more items to compare -- compare sizes */
1509 int vs = vl->ob_size;
1510 int ws = wl->ob_size;
1511 int cmp;
1512 PyObject *res;
1513 switch (op) {
1514 case Py_LT: cmp = vs < ws; break;
1515 case Py_LE: cmp = ws <= ws; break;
1516 case Py_EQ: cmp = vs == ws; break;
1517 case Py_NE: cmp = vs != ws; break;
1518 case Py_GT: cmp = vs > ws; break;
1519 case Py_GE: cmp = vs >= ws; break;
1520 default: return NULL; /* cannot happen */
1522 if (cmp)
1523 res = Py_True;
1524 else
1525 res = Py_False;
1526 Py_INCREF(res);
1527 return res;
1530 /* We have an item that differs -- shortcuts for EQ/NE */
1531 if (op == Py_EQ) {
1532 Py_INCREF(Py_False);
1533 return Py_False;
1535 if (op == Py_NE) {
1536 Py_INCREF(Py_True);
1537 return Py_True;
1540 /* Compare the final item again using the proper operator */
1541 return PyObject_RichCompare(vl->ob_item[i], wl->ob_item[i], op);
1544 static char append_doc[] =
1545 "L.append(object) -- append object to end";
1546 static char extend_doc[] =
1547 "L.extend(list) -- extend list by appending list elements";
1548 static char insert_doc[] =
1549 "L.insert(index, object) -- insert object before index";
1550 static char pop_doc[] =
1551 "L.pop([index]) -> item -- remove and return item at index (default last)";
1552 static char remove_doc[] =
1553 "L.remove(value) -- remove first occurrence of value";
1554 static char index_doc[] =
1555 "L.index(value) -> integer -- return index of first occurrence of value";
1556 static char count_doc[] =
1557 "L.count(value) -> integer -- return number of occurrences of value";
1558 static char reverse_doc[] =
1559 "L.reverse() -- reverse *IN PLACE*";
1560 static char sort_doc[] =
1561 "L.sort([cmpfunc]) -- sort *IN PLACE*; if given, cmpfunc(x, y) -> -1, 0, 1";
1563 static PyMethodDef list_methods[] = {
1564 {"append", (PyCFunction)listappend, METH_VARARGS, append_doc},
1565 {"insert", (PyCFunction)listinsert, METH_VARARGS, insert_doc},
1566 {"extend", (PyCFunction)listextend, METH_VARARGS, extend_doc},
1567 {"pop", (PyCFunction)listpop, METH_VARARGS, pop_doc},
1568 {"remove", (PyCFunction)listremove, METH_VARARGS, remove_doc},
1569 {"index", (PyCFunction)listindex, METH_VARARGS, index_doc},
1570 {"count", (PyCFunction)listcount, METH_VARARGS, count_doc},
1571 {"reverse", (PyCFunction)listreverse, METH_VARARGS, reverse_doc},
1572 {"sort", (PyCFunction)listsort, METH_VARARGS, sort_doc},
1573 {NULL, NULL} /* sentinel */
1576 static PyObject *
1577 list_getattr(PyListObject *f, char *name)
1579 return Py_FindMethod(list_methods, (PyObject *)f, name);
1582 static PySequenceMethods list_as_sequence = {
1583 (inquiry)list_length, /* sq_length */
1584 (binaryfunc)list_concat, /* sq_concat */
1585 (intargfunc)list_repeat, /* sq_repeat */
1586 (intargfunc)list_item, /* sq_item */
1587 (intintargfunc)list_slice, /* sq_slice */
1588 (intobjargproc)list_ass_item, /* sq_ass_item */
1589 (intintobjargproc)list_ass_slice, /* sq_ass_slice */
1590 (objobjproc)list_contains, /* sq_contains */
1591 (binaryfunc)list_inplace_concat, /* sq_inplace_concat */
1592 (intargfunc)list_inplace_repeat, /* sq_inplace_repeat */
1595 PyTypeObject PyList_Type = {
1596 PyObject_HEAD_INIT(&PyType_Type)
1598 "list",
1599 sizeof(PyListObject) + PyGC_HEAD_SIZE,
1601 (destructor)list_dealloc, /* tp_dealloc */
1602 (printfunc)list_print, /* tp_print */
1603 (getattrfunc)list_getattr, /* tp_getattr */
1604 0, /* tp_setattr */
1605 0, /* tp_compare */
1606 (reprfunc)list_repr, /* tp_repr */
1607 0, /* tp_as_number */
1608 &list_as_sequence, /* tp_as_sequence */
1609 0, /* tp_as_mapping */
1610 0, /* tp_hash */
1611 0, /* tp_call */
1612 0, /* tp_str */
1613 0, /* tp_getattro */
1614 0, /* tp_setattro */
1615 0, /* tp_as_buffer */
1616 Py_TPFLAGS_DEFAULT | Py_TPFLAGS_GC, /* tp_flags */
1617 0, /* tp_doc */
1618 (traverseproc)list_traverse, /* tp_traverse */
1619 (inquiry)list_clear, /* tp_clear */
1620 list_richcompare, /* tp_richcompare */
1624 /* During a sort, we really can't have anyone modifying the list; it could
1625 cause core dumps. Thus, we substitute a dummy type that raises an
1626 explanatory exception when a modifying operation is used. Caveat:
1627 comparisons may behave differently; but I guess it's a bad idea anyway to
1628 compare a list that's being sorted... */
1630 static PyObject *
1631 immutable_list_op(void)
1633 PyErr_SetString(PyExc_TypeError,
1634 "a list cannot be modified while it is being sorted");
1635 return NULL;
1638 static PyMethodDef immutable_list_methods[] = {
1639 {"append", (PyCFunction)immutable_list_op},
1640 {"insert", (PyCFunction)immutable_list_op},
1641 {"remove", (PyCFunction)immutable_list_op},
1642 {"index", (PyCFunction)listindex},
1643 {"count", (PyCFunction)listcount},
1644 {"reverse", (PyCFunction)immutable_list_op},
1645 {"sort", (PyCFunction)immutable_list_op},
1646 {NULL, NULL} /* sentinel */
1649 static PyObject *
1650 immutable_list_getattr(PyListObject *f, char *name)
1652 return Py_FindMethod(immutable_list_methods, (PyObject *)f, name);
1655 static int
1656 immutable_list_ass(void)
1658 immutable_list_op();
1659 return -1;
1662 static PySequenceMethods immutable_list_as_sequence = {
1663 (inquiry)list_length, /* sq_length */
1664 (binaryfunc)list_concat, /* sq_concat */
1665 (intargfunc)list_repeat, /* sq_repeat */
1666 (intargfunc)list_item, /* sq_item */
1667 (intintargfunc)list_slice, /* sq_slice */
1668 (intobjargproc)immutable_list_ass, /* sq_ass_item */
1669 (intintobjargproc)immutable_list_ass, /* sq_ass_slice */
1670 (objobjproc)list_contains, /* sq_contains */
1673 static PyTypeObject immutable_list_type = {
1674 PyObject_HEAD_INIT(&PyType_Type)
1676 "list (immutable, during sort)",
1677 sizeof(PyListObject) + PyGC_HEAD_SIZE,
1679 0, /* Cannot happen */ /* tp_dealloc */
1680 (printfunc)list_print, /* tp_print */
1681 (getattrfunc)immutable_list_getattr, /* tp_getattr */
1682 0, /* tp_setattr */
1683 0, /* Won't be called */ /* tp_compare */
1684 (reprfunc)list_repr, /* tp_repr */
1685 0, /* tp_as_number */
1686 &immutable_list_as_sequence, /* tp_as_sequence */
1687 0, /* tp_as_mapping */
1688 0, /* tp_hash */
1689 0, /* tp_call */
1690 0, /* tp_str */
1691 0, /* tp_getattro */
1692 0, /* tp_setattro */
1693 0, /* tp_as_buffer */
1694 Py_TPFLAGS_DEFAULT | Py_TPFLAGS_GC, /* tp_flags */
1695 0, /* tp_doc */
1696 (traverseproc)list_traverse, /* tp_traverse */
1697 0, /* tp_clear */
1698 list_richcompare, /* tp_richcompare */
1699 /* NOTE: This is *not* the standard list_type struct! */