4 * Copyright (C) 1991, 1992 Linus Torvalds
8 * 'buffer.c' implements the buffer-cache functions. Race-conditions have
9 * been avoided by NEVER letting an interrupt change a buffer (except for the
10 * data, of course), but instead letting the caller do it.
14 * NOTE! There is one discordant note here: checking floppies for
15 * disk change. This is where it fits best, I think, as it should
16 * invalidate changed floppy-disk-caches.
21 #include <linux/config.h>
22 #include <linux/errno.h>
23 #include <linux/sched.h>
24 #include <linux/kernel.h>
25 #include <linux/major.h>
26 #include <linux/string.h>
27 #include <linux/locks.h>
28 #include <linux/errno.h>
30 #include <asm/system.h>
34 #ifdef CONFIG_BLK_DEV_SR
35 extern int check_cdrom_media_change(int, int);
37 #ifdef CONFIG_BLK_DEV_SD
38 extern int check_scsidisk_media_change(int, int);
39 extern int revalidate_scsidisk(int, int);
43 extern int check_cdu31a_media_change(int, int);
46 extern int check_mcd_media_change(int, int);
49 static int grow_buffers(int pri
, int size
);
51 static struct buffer_head
* hash_table
[NR_HASH
];
52 static struct buffer_head
* free_list
= NULL
;
53 static struct buffer_head
* unused_list
= NULL
;
54 static struct wait_queue
* buffer_wait
= NULL
;
58 int nr_buffer_heads
= 0;
59 static int min_free_pages
= 20; /* nr free pages needed before buffer grows */
60 extern int *blksize_size
[];
63 * Rewrote the wait-routines to use the "new" wait-queue functionality,
64 * and getting rid of the cli-sti pairs. The wait-queue routines still
65 * need cli-sti, but now it's just a couple of 386 instructions or so.
67 * Note that the real wait_on_buffer() is an inline function that checks
68 * if 'b_wait' is set before calling this, so that the queues aren't set
71 void __wait_on_buffer(struct buffer_head
* bh
)
73 struct wait_queue wait
= { current
, NULL
};
76 add_wait_queue(&bh
->b_wait
, &wait
);
78 current
->state
= TASK_UNINTERRUPTIBLE
;
83 remove_wait_queue(&bh
->b_wait
, &wait
);
85 current
->state
= TASK_RUNNING
;
88 /* Call sync_buffers with wait!=0 to ensure that the call does not
89 return until all buffer writes have completed. Sync() may return
90 before the writes have finished; fsync() may not. */
92 static int sync_buffers(dev_t dev
, int wait
)
94 int i
, retry
, pass
= 0, err
= 0;
95 struct buffer_head
* bh
;
97 /* One pass for no-wait, three for wait:
98 0) write out all dirty, unlocked buffers;
99 1) write out all dirty buffers, waiting if locked;
100 2) wait for completion by waiting for all buffers to unlock.
105 for (i
= nr_buffers
*2 ; i
-- > 0 ; bh
= bh
->b_next_free
) {
106 if (dev
&& bh
->b_dev
!= dev
)
108 #ifdef 0 /* Disable bad-block debugging code */
109 if (bh
->b_req
&& !bh
->b_lock
&&
110 !bh
->b_dirt
&& !bh
->b_uptodate
)
111 printk ("Warning (IO error) - orphaned block %08x on %04x\n",
112 bh
->b_blocknr
, bh
->b_dev
);
116 /* Buffer is locked; skip it unless wait is
117 requested AND pass > 0. */
118 if (!wait
|| !pass
) {
124 /* If an unlocked buffer is not uptodate, there has been
125 an IO error. Skip it. */
126 if (wait
&& bh
->b_req
&& !bh
->b_lock
&&
127 !bh
->b_dirt
&& !bh
->b_uptodate
)
132 /* Don't write clean buffers. Don't write ANY buffers
133 on the third pass. */
134 if (!bh
->b_dirt
|| pass
>=2)
137 ll_rw_block(WRITE
, 1, &bh
);
141 /* If we are waiting for the sync to succeed, and if any dirty
142 blocks were written, then repeat; on the second pass, only
143 wait for buffers being written (do not pass to write any
144 more buffers on the second pass). */
145 if (wait
&& retry
&& ++pass
<=2)
150 void sync_dev(dev_t dev
)
152 sync_buffers(dev
, 0);
155 sync_buffers(dev
, 0);
158 int fsync_dev(dev_t dev
)
160 sync_buffers(dev
, 0);
163 return sync_buffers(dev
, 1);
166 asmlinkage
int sys_sync(void)
172 int file_fsync (struct inode
*inode
, struct file
*filp
)
174 return fsync_dev(inode
->i_dev
);
177 asmlinkage
int sys_fsync(unsigned int fd
)
180 struct inode
* inode
;
182 if (fd
>=NR_OPEN
|| !(file
=current
->filp
[fd
]) || !(inode
=file
->f_inode
))
184 if (!file
->f_op
|| !file
->f_op
->fsync
)
186 if (file
->f_op
->fsync(inode
,file
))
191 void invalidate_buffers(dev_t dev
)
194 struct buffer_head
* bh
;
197 for (i
= nr_buffers
*2 ; --i
> 0 ; bh
= bh
->b_next_free
) {
198 if (bh
->b_dev
!= dev
)
201 if (bh
->b_dev
== dev
)
202 bh
->b_uptodate
= bh
->b_dirt
= bh
->b_req
= 0;
207 * This routine checks whether a floppy has been changed, and
208 * invalidates all buffer-cache-entries in that case. This
209 * is a relatively slow routine, so we have to try to minimize using
210 * it. Thus it is called only upon a 'mount' or 'open'. This
211 * is the best way of combining speed and utility, I think.
212 * People changing diskettes in the middle of an operation deserve
215 * NOTE! Although currently this is only for floppies, the idea is
216 * that any additional removable block-device will use this routine,
217 * and that mount/open needn't know that floppies/whatever are
220 void check_disk_change(dev_t dev
)
223 struct buffer_head
* bh
;
227 if (!(bh
= getblk(dev
,0,1024)))
229 i
= floppy_change(bh
);
233 #if defined(CONFIG_BLK_DEV_SD) && defined(CONFIG_SCSI)
234 case SCSI_DISK_MAJOR
:
235 i
= check_scsidisk_media_change(dev
, 0);
239 #if defined(CONFIG_BLK_DEV_SR) && defined(CONFIG_SCSI)
240 case SCSI_CDROM_MAJOR
:
241 i
= check_cdrom_media_change(dev
, 0);
245 #if defined(CONFIG_CDU31A)
246 case CDU31A_CDROM_MAJOR
:
247 i
= check_cdu31a_media_change(dev
, 0);
251 #if defined(CONFIG_MCD)
252 case MITSUMI_CDROM_MAJOR
:
253 i
= check_mcd_media_change(dev
, 0);
263 printk("VFS: Disk change detected on device %d/%d\n",
264 MAJOR(dev
), MINOR(dev
));
265 for (i
=0 ; i
<NR_SUPER
; i
++)
266 if (super_blocks
[i
].s_dev
== dev
)
267 put_super(super_blocks
[i
].s_dev
);
268 invalidate_inodes(dev
);
269 invalidate_buffers(dev
);
271 #if defined(CONFIG_BLK_DEV_SD) && defined(CONFIG_SCSI)
272 /* This is trickier for a removable hardisk, because we have to invalidate
273 all of the partitions that lie on the disk. */
274 if (MAJOR(dev
) == SCSI_DISK_MAJOR
)
275 revalidate_scsidisk(dev
, 0);
279 #define _hashfn(dev,block) (((unsigned)(dev^block))%NR_HASH)
280 #define hash(dev,block) hash_table[_hashfn(dev,block)]
282 static inline void remove_from_hash_queue(struct buffer_head
* bh
)
285 bh
->b_next
->b_prev
= bh
->b_prev
;
287 bh
->b_prev
->b_next
= bh
->b_next
;
288 if (hash(bh
->b_dev
,bh
->b_blocknr
) == bh
)
289 hash(bh
->b_dev
,bh
->b_blocknr
) = bh
->b_next
;
290 bh
->b_next
= bh
->b_prev
= NULL
;
293 static inline void remove_from_free_list(struct buffer_head
* bh
)
295 if (!(bh
->b_prev_free
) || !(bh
->b_next_free
))
296 panic("VFS: Free block list corrupted");
297 bh
->b_prev_free
->b_next_free
= bh
->b_next_free
;
298 bh
->b_next_free
->b_prev_free
= bh
->b_prev_free
;
300 free_list
= bh
->b_next_free
;
301 bh
->b_next_free
= bh
->b_prev_free
= NULL
;
304 static inline void remove_from_queues(struct buffer_head
* bh
)
306 remove_from_hash_queue(bh
);
307 remove_from_free_list(bh
);
310 static inline void put_first_free(struct buffer_head
* bh
)
312 if (!bh
|| (bh
== free_list
))
314 remove_from_free_list(bh
);
315 /* add to front of free list */
316 bh
->b_next_free
= free_list
;
317 bh
->b_prev_free
= free_list
->b_prev_free
;
318 free_list
->b_prev_free
->b_next_free
= bh
;
319 free_list
->b_prev_free
= bh
;
323 static inline void put_last_free(struct buffer_head
* bh
)
327 if (bh
== free_list
) {
328 free_list
= bh
->b_next_free
;
331 remove_from_free_list(bh
);
332 /* add to back of free list */
333 bh
->b_next_free
= free_list
;
334 bh
->b_prev_free
= free_list
->b_prev_free
;
335 free_list
->b_prev_free
->b_next_free
= bh
;
336 free_list
->b_prev_free
= bh
;
339 static inline void insert_into_queues(struct buffer_head
* bh
)
341 /* put at end of free list */
342 bh
->b_next_free
= free_list
;
343 bh
->b_prev_free
= free_list
->b_prev_free
;
344 free_list
->b_prev_free
->b_next_free
= bh
;
345 free_list
->b_prev_free
= bh
;
346 /* put the buffer in new hash-queue if it has a device */
351 bh
->b_next
= hash(bh
->b_dev
,bh
->b_blocknr
);
352 hash(bh
->b_dev
,bh
->b_blocknr
) = bh
;
354 bh
->b_next
->b_prev
= bh
;
357 static struct buffer_head
* find_buffer(dev_t dev
, int block
, int size
)
359 struct buffer_head
* tmp
;
361 for (tmp
= hash(dev
,block
) ; tmp
!= NULL
; tmp
= tmp
->b_next
)
362 if (tmp
->b_dev
==dev
&& tmp
->b_blocknr
==block
)
363 if (tmp
->b_size
== size
)
366 printk("VFS: Wrong blocksize on device %d/%d\n",
367 MAJOR(dev
), MINOR(dev
));
374 * Why like this, I hear you say... The reason is race-conditions.
375 * As we don't lock buffers (unless we are readint them, that is),
376 * something might happen to it while we sleep (ie a read-error
377 * will force it bad). This shouldn't really happen currently, but
380 struct buffer_head
* get_hash_table(dev_t dev
, int block
, int size
)
382 struct buffer_head
* bh
;
385 if (!(bh
=find_buffer(dev
,block
,size
)))
389 if (bh
->b_dev
== dev
&& bh
->b_blocknr
== block
&& bh
->b_size
== size
)
395 void set_blocksize(dev_t dev
, int size
)
398 struct buffer_head
* bh
, *bhnext
;
400 if (!blksize_size
[MAJOR(dev
)])
404 default: panic("Invalid blocksize passed to set_blocksize");
405 case 512: case 1024: case 2048: case 4096:;
408 if (blksize_size
[MAJOR(dev
)][MINOR(dev
)] == 0 && size
== BLOCK_SIZE
) {
409 blksize_size
[MAJOR(dev
)][MINOR(dev
)] = size
;
412 if (blksize_size
[MAJOR(dev
)][MINOR(dev
)] == size
)
414 sync_buffers(dev
, 2);
415 blksize_size
[MAJOR(dev
)][MINOR(dev
)] = size
;
417 /* We need to be quite careful how we do this - we are moving entries
418 around on the free list, and we can get in a loop if we are not careful.*/
421 for (i
= nr_buffers
*2 ; --i
> 0 ; bh
= bhnext
) {
422 bhnext
= bh
->b_next_free
;
423 if (bh
->b_dev
!= dev
)
425 if (bh
->b_size
== size
)
429 if (bh
->b_dev
== dev
&& bh
->b_size
!= size
)
430 bh
->b_uptodate
= bh
->b_dirt
= 0;
431 remove_from_hash_queue(bh
);
432 /* put_first_free(bh); */
437 * Ok, this is getblk, and it isn't very clear, again to hinder
438 * race-conditions. Most of the code is seldom used, (ie repeating),
439 * so it should be much more efficient than it looks.
441 * The algoritm is changed: hopefully better, and an elusive bug removed.
443 * 14.02.92: changed it to sync dirty buffers a bit: better performance
444 * when the filesystem starts to get full of dirty blocks (I hope).
446 #define BADNESS(bh) (((bh)->b_dirt<<1)+(bh)->b_lock)
447 struct buffer_head
* getblk(dev_t dev
, int block
, int size
)
449 struct buffer_head
* bh
, * tmp
;
451 static int grow_size
= 0;
454 bh
= get_hash_table(dev
, block
, size
);
456 if (bh
->b_uptodate
&& !bh
->b_dirt
)
461 if (nr_free_pages
> min_free_pages
&& grow_size
<= 0) {
462 if (grow_buffers(GFP_BUFFER
, size
))
463 grow_size
= PAGE_SIZE
;
465 buffers
= nr_buffers
;
468 for (tmp
= free_list
; buffers
-- > 0 ; tmp
= tmp
->b_next_free
) {
469 if (tmp
->b_count
|| tmp
->b_size
!= size
)
471 if (mem_map
[MAP_NR((unsigned long) tmp
->b_data
)] != 1)
473 if (!bh
|| BADNESS(tmp
)<BADNESS(bh
)) {
481 ll_rw_block(WRITEA
, 1, &tmp
);
487 if (!bh
&& nr_free_pages
> 5) {
488 if (grow_buffers(GFP_BUFFER
, size
))
492 /* and repeat until we find something good */
494 if (!grow_buffers(GFP_ATOMIC
, size
))
495 sleep_on(&buffer_wait
);
499 if (bh
->b_count
|| bh
->b_size
!= size
)
505 /* NOTE!! While we slept waiting for this block, somebody else might */
506 /* already have added "this" block to the cache. check it */
507 if (find_buffer(dev
,block
,size
))
509 /* OK, FINALLY we know that this buffer is the only one of its kind, */
510 /* and that it's unused (b_count=0), unlocked (b_lock=0), and clean */
515 remove_from_queues(bh
);
518 insert_into_queues(bh
);
522 void brelse(struct buffer_head
* buf
)
530 wake_up(&buffer_wait
);
533 printk("VFS: brelse: Trying to free free buffer\n");
537 * bread() reads a specified block and returns the buffer that contains
538 * it. It returns NULL if the block was unreadable.
540 struct buffer_head
* bread(dev_t dev
, int block
, int size
)
542 struct buffer_head
* bh
;
544 if (!(bh
= getblk(dev
, block
, size
))) {
545 printk("VFS: bread: READ error on device %d/%d\n",
546 MAJOR(dev
), MINOR(dev
));
551 ll_rw_block(READ
, 1, &bh
);
560 * Ok, breada can be used as bread, but additionally to mark other
561 * blocks for reading as well. End the argument list with a negative
564 struct buffer_head
* breada(dev_t dev
,int first
, ...)
567 unsigned int blocksize
;
568 struct buffer_head
* bh
, *tmp
;
570 va_start(args
,first
);
572 blocksize
= BLOCK_SIZE
;
573 if (blksize_size
[MAJOR(dev
)] && blksize_size
[MAJOR(dev
)][MINOR(dev
)])
574 blocksize
= blksize_size
[MAJOR(dev
)][MINOR(dev
)];
576 if (!(bh
= getblk(dev
, first
, blocksize
))) {
577 printk("VFS: breada: READ error on device %d/%d\n",
578 MAJOR(dev
), MINOR(dev
));
582 ll_rw_block(READ
, 1, &bh
);
583 while ((first
=va_arg(args
,int))>=0) {
584 tmp
= getblk(dev
, first
, blocksize
);
586 if (!tmp
->b_uptodate
)
587 ll_rw_block(READA
, 1, &tmp
);
600 * See fs/inode.c for the weird use of volatile..
602 static void put_unused_buffer_head(struct buffer_head
* bh
)
604 struct wait_queue
* wait
;
606 wait
= ((volatile struct buffer_head
*) bh
)->b_wait
;
607 memset((void *) bh
,0,sizeof(*bh
));
608 ((volatile struct buffer_head
*) bh
)->b_wait
= wait
;
609 bh
->b_next_free
= unused_list
;
613 static void get_more_buffer_heads(void)
616 struct buffer_head
* bh
;
621 if(! (bh
= (struct buffer_head
*) get_free_page(GFP_BUFFER
)))
624 for (nr_buffer_heads
+=i
=PAGE_SIZE
/sizeof*bh
; i
>0; i
--) {
625 bh
->b_next_free
= unused_list
; /* only make link */
630 static struct buffer_head
* get_unused_buffer_head(void)
632 struct buffer_head
* bh
;
634 get_more_buffer_heads();
638 unused_list
= bh
->b_next_free
;
639 bh
->b_next_free
= NULL
;
647 * Create the appropriate buffers when given a page for data area and
648 * the size of each buffer.. Use the bh->b_this_page linked list to
649 * follow the buffers created. Return NULL if unable to create more
652 static struct buffer_head
* create_buffers(unsigned long page
, unsigned long size
)
654 struct buffer_head
*bh
, *head
;
655 unsigned long offset
;
659 while ((offset
-= size
) < PAGE_SIZE
) {
660 bh
= get_unused_buffer_head();
663 bh
->b_this_page
= head
;
665 bh
->b_data
= (char *) (page
+offset
);
670 * In case anything failed, we just free everything we got.
676 bh
= bh
->b_this_page
;
677 put_unused_buffer_head(head
);
682 static void read_buffers(struct buffer_head
* bh
[], int nrbuf
)
686 struct buffer_head
* bhr
[8];
688 for (i
= 0 ; i
< nrbuf
; i
++) {
689 if (bh
[i
] && !bh
[i
]->b_uptodate
)
690 bhr
[bhnum
++] = bh
[i
];
693 ll_rw_block(READ
, bhnum
, bhr
);
694 for (i
= 0 ; i
< nrbuf
; i
++) {
696 wait_on_buffer(bh
[i
]);
701 static unsigned long check_aligned(struct buffer_head
* first
, unsigned long address
,
702 dev_t dev
, int *b
, int size
)
704 struct buffer_head
* bh
[8];
706 unsigned long offset
;
710 page
= (unsigned long) first
->b_data
;
711 if (page
& ~PAGE_MASK
) {
715 mem_map
[MAP_NR(page
)]++;
718 for (offset
= size
; offset
< PAGE_SIZE
; offset
+= size
) {
722 first
= get_hash_table(dev
, block
, size
);
726 if (page
+offset
!= (unsigned long) first
->b_data
)
729 read_buffers(bh
,nrbuf
); /* make sure they are actually read correctly */
742 static unsigned long try_to_load_aligned(unsigned long address
,
743 dev_t dev
, int b
[], int size
)
745 struct buffer_head
* bh
, * tmp
, * arr
[8];
746 unsigned long offset
;
750 bh
= create_buffers(address
, size
);
754 for (offset
= 0 ; offset
< PAGE_SIZE
; offset
+= size
) {
758 tmp
= get_hash_table(dev
, block
, size
);
773 bh
->b_blocknr
= *(p
++);
775 insert_into_queues(bh
);
777 bh
= bh
->b_this_page
;
781 buffermem
+= PAGE_SIZE
;
782 bh
->b_this_page
= tmp
;
783 mem_map
[MAP_NR(address
)]++;
784 read_buffers(arr
,block
);
790 while ((tmp
= bh
) != NULL
) {
791 bh
= bh
->b_this_page
;
792 put_unused_buffer_head(tmp
);
798 * Try-to-share-buffers tries to minimize memory use by trying to keep
799 * both code pages and the buffer area in the same page. This is done by
800 * (a) checking if the buffers are already aligned correctly in memory and
801 * (b) if none of the buffer heads are in memory at all, trying to load
802 * them into memory the way we want them.
804 * This doesn't guarantee that the memory is shared, but should under most
805 * circumstances work very well indeed (ie >90% sharing of code pages on
806 * demand-loadable executables).
808 static inline unsigned long try_to_share_buffers(unsigned long address
,
809 dev_t dev
, int *b
, int size
)
811 struct buffer_head
* bh
;
817 bh
= get_hash_table(dev
, block
, size
);
819 return check_aligned(bh
, address
, dev
, b
, size
);
820 return try_to_load_aligned(address
, dev
, b
, size
);
823 #define COPYBLK(size,from,to) \
824 __asm__ __volatile__("rep ; movsl": \
825 :"c" (((unsigned long) size) >> 2),"S" (from),"D" (to) \
829 * bread_page reads four buffers into memory at the desired address. It's
830 * a function of its own, as there is some speed to be got by reading them
831 * all at the same time, not waiting for one to be read, and then another
832 * etc. This also allows us to optimize memory usage by sharing code pages
833 * and filesystem buffers..
835 unsigned long bread_page(unsigned long address
, dev_t dev
, int b
[], int size
, int prot
)
837 struct buffer_head
* bh
[8];
841 if (!(prot
& PAGE_RW
)) {
842 where
= try_to_share_buffers(address
,dev
,b
,size
);
847 for (i
=0, j
=0; j
<PAGE_SIZE
; i
++, j
+= size
) {
850 bh
[i
] = getblk(dev
, b
[i
], size
);
854 for (i
=0, j
=0; j
<PAGE_SIZE
; i
++, j
+= size
,address
+= size
) {
856 if (bh
[i
]->b_uptodate
)
857 COPYBLK(size
, (unsigned long) bh
[i
]->b_data
,address
);
865 * Try to increase the number of buffers available: the size argument
866 * is used to determine what kind of buffers we want.
868 static int grow_buffers(int pri
, int size
)
871 struct buffer_head
*bh
, *tmp
;
873 if ((size
& 511) || (size
> PAGE_SIZE
)) {
874 printk("VFS: grow_buffers: size = %d\n",size
);
877 if(!(page
= __get_free_page(pri
)))
879 bh
= create_buffers(page
, size
);
887 tmp
->b_next_free
= free_list
;
888 tmp
->b_prev_free
= free_list
->b_prev_free
;
889 free_list
->b_prev_free
->b_next_free
= tmp
;
890 free_list
->b_prev_free
= tmp
;
892 tmp
->b_prev_free
= tmp
;
893 tmp
->b_next_free
= tmp
;
897 if (tmp
->b_this_page
)
898 tmp
= tmp
->b_this_page
;
902 tmp
->b_this_page
= bh
;
903 buffermem
+= PAGE_SIZE
;
908 * try_to_free() checks if all the buffers on this particular page
909 * are unused, and free's the page if so.
911 static int try_to_free(struct buffer_head
* bh
, struct buffer_head
** bhp
)
914 struct buffer_head
* tmp
, * p
;
917 page
= (unsigned long) bh
->b_data
;
923 if (tmp
->b_count
|| tmp
->b_dirt
|| tmp
->b_lock
)
925 tmp
= tmp
->b_this_page
;
930 tmp
= tmp
->b_this_page
;
933 *bhp
= p
->b_prev_free
;
934 remove_from_queues(p
);
935 put_unused_buffer_head(p
);
937 buffermem
-= PAGE_SIZE
;
939 return !mem_map
[MAP_NR(page
)];
943 * Try to free up some pages by shrinking the buffer-cache
945 * Priority tells the routine how hard to try to shrink the
946 * buffers: 3 means "don't bother too much", while a value
947 * of 0 means "we'd better get some free pages now".
949 int shrink_buffers(unsigned int priority
)
951 struct buffer_head
*bh
;
957 i
= nr_buffers
>> priority
;
958 for ( ; i
-- > 0 ; bh
= bh
->b_next_free
) {
959 if (bh
->b_count
|| !bh
->b_this_page
)
968 ll_rw_block(WRITEA
, 1, &bh
);
972 if (try_to_free(bh
, &bh
))
979 * This initializes the initial buffer free list. nr_buffers is set
980 * to one less the actual number of buffers, as a sop to backwards
981 * compatibility --- the old code did this (I think unintentionally,
982 * but I'm not sure), and programs in the ps package expect it.
985 void buffer_init(void)
989 if (high_memory
>= 4*1024*1024)
990 min_free_pages
= 200;
993 for (i
= 0 ; i
< NR_HASH
; i
++)
994 hash_table
[i
] = NULL
;
996 grow_buffers(GFP_KERNEL
, BLOCK_SIZE
);
998 panic("VFS: Unable to initialize buffer free list!");