1 /* This file is concerned with allocating and freeing arbitrary-size blocks of
2 * physical memory on behalf of the FORK and EXEC system calls. The key data
3 * structure used is the hole table, which maintains a list of holes in memory.
4 * It is kept sorted in order of increasing memory address. The addresses
5 * it contains refers to physical memory, starting at absolute address 0
6 * (i.e., they are not relative to the start of PM). During system
7 * initialization, that part of memory containing the interrupt vectors,
8 * kernel, and PM are "allocated" to mark them as not available and to
9 * remove them from the hole list.
11 * The entry points into this file are:
12 * alloc_mem: allocate a given sized chunk of memory
13 * free_mem: release a previously allocated chunk of memory
14 * mem_init: initialize the tables when PM start up
19 #include <minix/com.h>
20 #include <minix/callnr.h>
21 #include <minix/type.h>
22 #include <minix/config.h>
23 #include <minix/const.h>
24 #include <minix/sysutil.h>
25 #include <minix/syslib.h>
26 #include <minix/debug.h>
27 #include <minix/bitmap.h>
41 #include "pagerange.h"
43 #include "sanitycheck.h"
46 /* AVL tree of free pages. */
49 /* Used for sanity check. */
50 PRIVATE phys_bytes mem_low
, mem_high
;
51 #define assert_range(addr, len) \
52 assert((addr) >= mem_low); \
53 assert((addr) + (len) - 1 <= mem_high);
56 struct hole
*h_next
; /* pointer to next entry on the list */
57 phys_clicks h_base
; /* where does the hole begin? */
58 phys_clicks h_len
; /* how big is the hole? */
63 static int startpages
;
65 #define NIL_HOLE (struct hole *) 0
67 #define _NR_HOLES (_NR_PROCS*2) /* No. of memory holes maintained by VM */
69 PRIVATE
struct hole hole
[_NR_HOLES
];
71 PRIVATE
struct hole
*hole_head
; /* pointer to first hole */
72 PRIVATE
struct hole
*free_slots
;/* ptr to list of unused table slots */
74 FORWARD
_PROTOTYPE( void del_slot
, (struct hole
*prev_ptr
, struct hole
*hp
) );
75 FORWARD
_PROTOTYPE( void merge
, (struct hole
*hp
) );
76 FORWARD
_PROTOTYPE( void free_pages
, (phys_bytes addr
, int pages
) );
77 FORWARD
_PROTOTYPE( phys_bytes alloc_pages
, (int pages
, int flags
,
81 FORWARD
_PROTOTYPE( void holes_sanity_f
, (char *fn
, int line
) );
82 #define CHECKHOLES holes_sanity_f(__FILE__, __LINE__)
84 #define PAGESPERGB (1024*1024*1024/VM_PAGE_SIZE) /* 1GB of memory */
85 #define MAXPAGES (2*PAGESPERGB)
86 #define CHUNKS BITMAP_CHUNKS(MAXPAGES)
87 PRIVATE bitchunk_t pagemap
[CHUNKS
];
95 /*===========================================================================*
97 *===========================================================================*/
98 PRIVATE
void holes_sanity_f(file
, line
)
102 #define myassert(c) { \
104 printf("holes_sanity_f:%s:%d: %s failed\n", file, line, #c); \
106 panic("assert failed"); } \
113 for(h
= 0; h
< _NR_HOLES
; h
++) {
114 hole
[h
].freelist
= 0;
115 hole
[h
].holelist
= 0;
118 /* Mark all holes on freelist. */
119 for(hp
= free_slots
; hp
; hp
= hp
->h_next
) {
120 myassert(!hp
->freelist
);
121 myassert(!hp
->holelist
);
123 myassert(c
< _NR_HOLES
);
128 /* Mark all holes on holelist. */
130 for(hp
= hole_head
; hp
; hp
= hp
->h_next
) {
131 myassert(!hp
->freelist
);
132 myassert(!hp
->holelist
);
134 myassert(c
< _NR_HOLES
);
139 /* Check there are exactly the right number of nodes. */
140 myassert(n
== _NR_HOLES
);
142 /* Make sure each slot is on exactly one of the list. */
144 for(h
= 0; h
< _NR_HOLES
; h
++) {
146 myassert(hp
->holelist
|| hp
->freelist
);
147 myassert(!(hp
->holelist
&& hp
->freelist
));
148 myassert(c
< _NR_HOLES
);
152 /* Make sure no holes overlap. */
153 for(hp
= hole_head
; hp
&& hp
->h_next
; hp
= hp
->h_next
) {
154 myassert(hp
->holelist
);
156 /* No holes overlap. */
157 myassert(hp
->h_base
+ hp
->h_len
<= hp
->h_next
->h_base
);
159 /* No uncoalesced holes. */
160 myassert(hp
->h_base
+ hp
->h_len
< hp
->h_next
->h_base
);
165 /*===========================================================================*
167 *===========================================================================*/
168 PUBLIC phys_clicks
alloc_mem(phys_clicks clicks
, u32_t memflags
)
170 /* Allocate a block of memory from the free list using first fit. The block
171 * consists of a sequence of contiguous bytes, whose length in clicks is
172 * given by 'clicks'. A pointer to the block is returned. The block is
173 * always on a click boundary. This procedure is called when memory is
174 * needed for FORK or EXEC.
176 register struct hole
*hp
, *prev_ptr
;
177 phys_clicks old_base
, mem
= NO_MEM
, align_clicks
= 0;
180 if(memflags
& PAF_ALIGN64K
) {
181 align_clicks
= (64 * 1024) / CLICK_SIZE
;
182 clicks
+= align_clicks
;
186 mem
= alloc_pages(clicks
, memflags
, NULL
);
188 free_yielded(clicks
* CLICK_SIZE
);
189 mem
= alloc_pages(clicks
, memflags
, NULL
);
195 while (hp
!= NIL_HOLE
) {
196 if (hp
->h_len
>= clicks
) {
197 /* We found a hole that is big enough. Use it. */
198 old_base
= hp
->h_base
; /* remember where it started */
199 hp
->h_base
+= clicks
; /* bite a piece off */
200 hp
->h_len
-= clicks
; /* ditto */
202 /* Delete the hole if used up completely. */
203 if (hp
->h_len
== 0) del_slot(prev_ptr
, hp
);
205 /* Anything special needs to happen? */
206 if(memflags
& PAF_CLEAR
) {
207 if ((s
= sys_memset(0, CLICK_SIZE
*old_base
,
208 CLICK_SIZE
*clicks
)) != OK
) {
209 panic("alloc_mem: sys_memset failed: %d", s
);
213 /* Return the start address of the acquired block. */
231 o
= mem
% align_clicks
;
234 e
= align_clicks
- o
;
244 /*===========================================================================*
246 *===========================================================================*/
247 PUBLIC
void free_mem(phys_clicks base
, phys_clicks clicks
)
249 /* Return a block of free memory to the hole list. The parameters tell where
250 * the block starts in physical memory and how big it is. The block is added
251 * to the hole list. If it is contiguous with an existing hole on either end,
252 * it is merged with the hole or holes.
254 register struct hole
*hp
, *new_ptr
, *prev_ptr
;
257 if (clicks
== 0) return;
260 assert(CLICK_SIZE
== VM_PAGE_SIZE
);
261 free_pages(base
, clicks
);
265 if ( (new_ptr
= free_slots
) == NIL_HOLE
)
266 panic("hole table full");
267 new_ptr
->h_base
= base
;
268 new_ptr
->h_len
= clicks
;
269 free_slots
= new_ptr
->h_next
;
272 /* If this block's address is numerically less than the lowest hole currently
273 * available, or if no holes are currently available, put this hole on the
274 * front of the hole list.
276 if (hp
== NIL_HOLE
|| base
<= hp
->h_base
) {
277 /* Block to be freed goes on front of the hole list. */
278 new_ptr
->h_next
= hp
;
285 /* Block to be returned does not go on front of hole list. */
287 while (hp
!= NIL_HOLE
&& base
> hp
->h_base
) {
292 /* We found where it goes. Insert block after 'prev_ptr'. */
293 new_ptr
->h_next
= prev_ptr
->h_next
;
294 prev_ptr
->h_next
= new_ptr
;
295 merge(prev_ptr
); /* sequence is 'prev_ptr', 'new_ptr', 'hp' */
299 /*===========================================================================*
301 *===========================================================================*/
302 PRIVATE
void del_slot(prev_ptr
, hp
)
303 /* pointer to hole entry just ahead of 'hp' */
304 register struct hole
*prev_ptr
;
305 /* pointer to hole entry to be removed */
306 register struct hole
*hp
;
308 /* Remove an entry from the hole list. This procedure is called when a
309 * request to allocate memory removes a hole in its entirety, thus reducing
310 * the numbers of holes in memory, and requiring the elimination of one
311 * entry in the hole list.
314 hole_head
= hp
->h_next
;
316 prev_ptr
->h_next
= hp
->h_next
;
318 hp
->h_next
= free_slots
;
319 hp
->h_base
= hp
->h_len
= 0;
323 /*===========================================================================*
325 *===========================================================================*/
326 PRIVATE
void merge(hp
)
327 register struct hole
*hp
; /* ptr to hole to merge with its successors */
329 /* Check for contiguous holes and merge any found. Contiguous holes can occur
330 * when a block of memory is freed, and it happens to abut another hole on
331 * either or both ends. The pointer 'hp' points to the first of a series of
332 * three holes that can potentially all be merged together.
334 register struct hole
*next_ptr
;
336 /* If 'hp' points to the last hole, no merging is possible. If it does not,
337 * try to absorb its successor into it and free the successor's table entry.
339 if ( (next_ptr
= hp
->h_next
) == NIL_HOLE
) return;
340 if (hp
->h_base
+ hp
->h_len
== next_ptr
->h_base
) {
341 hp
->h_len
+= next_ptr
->h_len
; /* first one gets second one's mem */
342 del_slot(hp
, next_ptr
);
347 /* If 'hp' now points to the last hole, return; otherwise, try to absorb its
350 if ( (next_ptr
= hp
->h_next
) == NIL_HOLE
) return;
351 if (hp
->h_base
+ hp
->h_len
== next_ptr
->h_base
) {
352 hp
->h_len
+= next_ptr
->h_len
;
353 del_slot(hp
, next_ptr
);
357 /*===========================================================================*
359 *===========================================================================*/
360 PUBLIC
void mem_init(chunks
)
361 struct memory
*chunks
; /* list of free memory chunks */
363 /* Initialize hole lists. There are two lists: 'hole_head' points to a linked
364 * list of all the holes (unused memory) in the system; 'free_slots' points to
365 * a linked list of table entries that are not in use. Initially, the former
366 * list has one entry for each chunk of physical memory, and the second
367 * list links together the remaining table slots. As memory becomes more
368 * fragmented in the course of time (i.e., the initial big holes break up into
369 * smaller holes), new table slots are needed to represent them. These slots
370 * are taken from the list headed by 'free_slots'.
373 register struct hole
*hp
;
376 /* Put all holes on the free list. */
377 for (hp
= &hole
[0]; hp
< &hole
[_NR_HOLES
]; hp
++) {
379 hp
->h_base
= hp
->h_len
= 0;
381 hole
[_NR_HOLES
-1].h_next
= NIL_HOLE
;
382 hole_head
= NIL_HOLE
;
383 free_slots
= &hole
[0];
389 /* Use the chunks of physical memory to allocate holes. */
390 for (i
=NR_MEMS
-1; i
>=0; i
--) {
391 if (chunks
[i
].size
> 0) {
392 phys_bytes from
= CLICK2ABS(chunks
[i
].base
),
393 to
= CLICK2ABS(chunks
[i
].base
+chunks
[i
].size
)-1;
394 if(first
|| from
< mem_low
) mem_low
= from
;
395 if(first
|| to
> mem_high
) mem_high
= to
;
396 free_mem(chunks
[i
].base
, chunks
[i
].size
);
397 total_pages
+= chunks
[i
].size
;
406 PRIVATE
void sanitycheck(void)
408 pagerange_t
*p
, *prevp
= NULL
;
410 addr_start_iter_least(&addravl
, &iter
);
411 while((p
=addr_get_iter(&iter
))) {
415 assert(prevp
->addr
< p
->addr
);
416 assert(prevp
->addr
+ p
->addr
< p
->addr
);
418 addr_incr_iter(&iter
);
423 PUBLIC
void memstats(int *nodes
, int *pages
, int *largest
)
425 pagerange_t
*p
, *prevp
= NULL
;
427 addr_start_iter_least(&addravl
, &iter
);
434 while((p
=addr_get_iter(&iter
))) {
438 if(p
->size
> *largest
)
440 addr_incr_iter(&iter
);
444 /*===========================================================================*
446 *===========================================================================*/
447 PRIVATE PUBLIC phys_bytes
alloc_pages(int pages
, int memflags
, phys_bytes
*len
)
452 phys_bytes boundary16
= 16 * 1024 * 1024 / VM_PAGE_SIZE
;
453 phys_bytes boundary1
= 1 * 1024 * 1024 / VM_PAGE_SIZE
;
456 int firstnodes
, firstpages
, wantnodes
, wantpages
;
457 int finalnodes
, finalpages
;
460 memstats(&firstnodes
, &firstpages
, &largest
);
462 wantnodes
= firstnodes
;
463 wantpages
= firstpages
- pages
;
466 if(memflags
& (PAF_LOWER16MB
|PAF_LOWER1MB
)) {
467 addr_start_iter_least(&addravl
, &iter
);
470 addr_start_iter_greatest(&addravl
, &iter
);
474 while((pr
= addr_get_iter(&iter
))) {
476 assert(pr
->size
> 0);
477 if(pr
->size
>= pages
|| (memflags
& PAF_FIRSTBLOCK
)) {
478 if(memflags
& PAF_LOWER16MB
) {
479 if(pr
->addr
+ pages
> boundary16
)
483 if(memflags
& PAF_LOWER1MB
) {
484 if(pr
->addr
+ pages
> boundary1
)
488 /* good block found! */
492 addr_incr_iter(&iter
);
494 addr_decr_iter(&iter
);
501 assert(largest
< pages
);
508 if(memflags
& PAF_FIRSTBLOCK
) {
510 /* block doesn't have to as big as requested;
511 * return its size though.
513 if(pr
->size
< pages
) {
516 wantpages
= firstpages
- pages
;
524 /* Allocated chunk is off the end. */
525 mem
= pr
->addr
+ pr
->size
- pages
;
527 assert(pr
->size
>= pages
);
528 if(pr
->size
== pages
) {
530 prr
= addr_remove(&addravl
, pr
->addr
);
538 USE(pr
, pr
->size
-= pages
;);
541 if(memflags
& PAF_CLEAR
) {
543 if ((s
= sys_memset(0, CLICK_SIZE
*mem
,
544 VM_PAGE_SIZE
*pages
)) != OK
)
545 panic("alloc_mem: sys_memset failed: %d", s
);
549 memstats(&finalnodes
, &finalpages
, &largest
);
552 assert(finalnodes
== wantnodes
);
553 assert(finalpages
== wantpages
);
559 /*===========================================================================*
561 *===========================================================================*/
562 PRIVATE
void free_pages(phys_bytes pageno
, int npages
)
567 int firstnodes
, firstpages
, wantnodes
, wantpages
;
568 int finalnodes
, finalpages
, largest
;
570 memstats(&firstnodes
, &firstpages
, &largest
);
573 wantnodes
= firstnodes
;
574 wantpages
= firstpages
+ npages
;
577 assert(!addr_search(&addravl
, pageno
, AVL_EQUAL
));
579 /* try to merge with higher neighbour */
580 if((pr
=addr_search(&addravl
, pageno
+npages
, AVL_EQUAL
))) {
581 USE(pr
, pr
->addr
-= npages
;
582 pr
->size
+= npages
;);
585 panic("alloc_pages: can't alloc");
587 memstats(&firstnodes
, &firstpages
, &largest
);
589 wantnodes
= firstnodes
;
590 wantpages
= firstpages
+ npages
;
595 USE(pr
, pr
->addr
= pageno
;
597 addr_insert(&addravl
, pr
);
603 addr_start_iter(&addravl
, &iter
, pr
->addr
, AVL_EQUAL
);
604 p
= addr_get_iter(&iter
);
608 addr_decr_iter(&iter
);
609 if((p
= addr_get_iter(&iter
))) {
611 if(p
->addr
+ p
->size
== pr
->addr
) {
612 USE(p
, p
->size
+= pr
->size
;);
613 addr_remove(&addravl
, pr
->addr
);
623 memstats(&finalnodes
, &finalpages
, &largest
);
626 assert(finalnodes
== wantnodes
);
627 assert(finalpages
== wantpages
);
633 PRIVATE
struct dmatab
639 phys_clicks dt_seg_base
;
640 phys_clicks dt_seg_size
;
644 #define DTF_RELEASE_DMA 2
645 #define DTF_RELEASE_SEG 4
647 /*===========================================================================*
649 *===========================================================================*/
650 PUBLIC
int do_adddma(message
*msg
)
652 endpoint_t req_proc_e
, target_proc_e
;
654 phys_bytes base
, size
;
657 req_proc_e
= msg
->VMAD_REQ
;
658 target_proc_e
= msg
->VMAD_EP
;
659 base
= msg
->VMAD_START
;
660 size
= msg
->VMAD_SIZE
;
662 /* Find empty slot */
663 for (i
= 0; i
<NR_DMA
; i
++)
665 if (!(dmatab
[i
].dt_flags
& DTF_INUSE
))
670 printf("vm:do_adddma: dma table full\n");
671 for (i
= 0; i
<NR_DMA
; i
++)
673 printf("%d: flags 0x%x proc %d base 0x%x size 0x%x\n",
674 i
, dmatab
[i
].dt_flags
,
679 panic("adddma: table full");
683 /* Find target process */
684 if (vm_isokendpt(target_proc_e
, &proc_n
) != OK
)
686 printf("vm:do_adddma: endpoint %d not found\n", target_proc_e
);
689 vmp
= &vmproc
[proc_n
];
690 vmp
->vm_flags
|= VMF_HAS_DMA
;
692 dmatab
[i
].dt_flags
= DTF_INUSE
;
693 dmatab
[i
].dt_proc
= target_proc_e
;
694 dmatab
[i
].dt_base
= base
;
695 dmatab
[i
].dt_size
= size
;
700 /*===========================================================================*
702 *===========================================================================*/
703 PUBLIC
int do_deldma(message
*msg
)
705 endpoint_t req_proc_e
, target_proc_e
;
707 phys_bytes base
, size
;
710 req_proc_e
= msg
->VMDD_REQ
;
711 target_proc_e
= msg
->VMDD_EP
;
712 base
= msg
->VMDD_START
;
713 size
= msg
->VMDD_SIZE
;
716 for (i
= 0; i
<NR_DMA
; i
++)
718 if (!(dmatab
[i
].dt_flags
& DTF_INUSE
))
720 if (dmatab
[i
].dt_proc
== target_proc_e
&&
721 dmatab
[i
].dt_base
== base
&&
722 dmatab
[i
].dt_size
== size
)
729 printf("vm:do_deldma: slot not found\n");
733 if (dmatab
[i
].dt_flags
& DTF_RELEASE_SEG
)
735 /* Check if we have to release the segment */
736 for (j
= 0; j
<NR_DMA
; j
++)
740 if (!(dmatab
[j
].dt_flags
& DTF_INUSE
))
742 if (!(dmatab
[j
].dt_flags
& DTF_RELEASE_SEG
))
744 if (dmatab
[i
].dt_proc
== target_proc_e
)
750 free_mem(dmatab
[i
].dt_seg_base
,
751 dmatab
[i
].dt_seg_size
);
755 dmatab
[i
].dt_flags
&= ~DTF_INUSE
;
760 /*===========================================================================*
762 *===========================================================================*/
763 PUBLIC
int do_getdma(message
*msg
)
765 endpoint_t target_proc_e
;
767 phys_bytes base
, size
;
770 /* Find slot to report */
771 for (i
= 0; i
<NR_DMA
; i
++)
773 if (!(dmatab
[i
].dt_flags
& DTF_INUSE
))
775 if (!(dmatab
[i
].dt_flags
& DTF_RELEASE_DMA
))
778 printf("do_getdma: setting reply to 0x%x@0x%x proc %d\n",
779 dmatab
[i
].dt_size
, dmatab
[i
].dt_base
,
781 msg
->VMGD_PROCP
= dmatab
[i
].dt_proc
;
782 msg
->VMGD_BASEP
= dmatab
[i
].dt_base
;
783 msg
->VMGD_SIZEP
= dmatab
[i
].dt_size
;
794 /*===========================================================================*
796 *===========================================================================*/
797 PUBLIC
void release_dma(struct vmproc
*vmp
)
801 panic("release_dma not done");
805 for (i
= 0; i
<NR_DMA
; i
++)
807 if (!(dmatab
[i
].dt_flags
& DTF_INUSE
))
809 if (dmatab
[i
].dt_proc
!= vmp
->vm_endpoint
)
811 dmatab
[i
].dt_flags
|= DTF_RELEASE_DMA
| DTF_RELEASE_SEG
;
812 dmatab
[i
].dt_seg_base
= base
;
813 dmatab
[i
].dt_seg_size
= size
;
818 free_mem(base
, size
);
820 msg
->VMRD_FOUND
= found_one
;
826 /*===========================================================================*
828 *===========================================================================*/
829 void printmemstats(void)
831 int nodes
, pages
, largest
;
832 memstats(&nodes
, &pages
, &largest
);
833 printf("%d blocks, %d pages (%ukB) free, largest %d pages (%ukB)\n",
834 nodes
, pages
, (u32_t
) pages
* (VM_PAGE_SIZE
/1024),
835 largest
, (u32_t
) largest
* (VM_PAGE_SIZE
/1024));
841 /*===========================================================================*
843 *===========================================================================*/
844 void usedpages_reset(void)
846 memset(pagemap
, 0, sizeof(pagemap
));
849 /*===========================================================================*
851 *===========================================================================*/
852 int usedpages_add_f(phys_bytes addr
, phys_bytes len
, char *file
, int line
)
855 u32_t pagestart
, pages
;
860 assert(!(addr
% VM_PAGE_SIZE
));
861 assert(!(len
% VM_PAGE_SIZE
));
863 assert_range(addr
, len
);
865 pagestart
= addr
/ VM_PAGE_SIZE
;
866 pages
= len
/ VM_PAGE_SIZE
;
870 assert(pagestart
> 0);
871 assert(pagestart
< MAXPAGES
);
872 thisaddr
= pagestart
* VM_PAGE_SIZE
;
873 if(GET_BIT(pagemap
, pagestart
)) {
875 printf("%s:%d: usedpages_add: addr 0x%lx reused.\n",
876 file
, line
, thisaddr
);
879 SET_BIT(pagemap
, pagestart
);
889 /*===========================================================================*
890 * alloc_mem_in_list *
891 *===========================================================================*/
892 struct memlist
*alloc_mem_in_list(phys_bytes bytes
, u32_t flags
)
895 struct memlist
*head
= NULL
, *ml
;
898 assert(!(bytes
% VM_PAGE_SIZE
));
900 rempages
= bytes
/ VM_PAGE_SIZE
;
902 /* unless we are told to allocate all memory
903 * contiguously, tell alloc function to grab whatever
906 if(!(flags
& PAF_CONTIG
))
907 flags
|= PAF_FIRSTBLOCK
;
911 phys_bytes mem
, gotpages
;
915 mem
= alloc_pages(rempages
, flags
, &gotpages
);
919 freed
= free_yielded(rempages
* VM_PAGE_SIZE
);
921 } while(mem
== NO_MEM
&& freed
> 0);
924 printf("alloc_mem_in_list: giving up, %dkB missing\n",
925 rempages
* VM_PAGE_SIZE
/1024);
927 free_mem_list(head
, 1);
931 assert(gotpages
<= rempages
);
932 assert(gotpages
> 0);
934 if(!(SLABALLOC(ml
))) {
935 free_mem_list(head
, 1);
936 free_pages(mem
, gotpages
);
941 ml
->phys
= CLICK2ABS(mem
);
942 ml
->length
= CLICK2ABS(gotpages
);
945 rempages
-= gotpages
;
946 } while(rempages
> 0);
948 for(ml
= head
; ml
; ml
= ml
->next
) {
956 /*===========================================================================*
958 *===========================================================================*/
959 void free_mem_list(struct memlist
*list
, int all
)
962 struct memlist
*next
;
964 assert(!(list
->phys
% VM_PAGE_SIZE
));
965 assert(!(list
->length
% VM_PAGE_SIZE
));
967 free_pages(list
->phys
/ VM_PAGE_SIZE
,
968 list
->length
/ VM_PAGE_SIZE
);
974 /*===========================================================================*
976 *===========================================================================*/
977 void print_mem_list(struct memlist
*list
)
980 assert(list
->length
> 0);
981 printf("0x%lx-0x%lx", list
->phys
, list
->phys
+list
->length
-1);