2 * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
4 * Copyright (C) 2002-2011 Aleph One Ltd.
5 * for Toby Churchill Ltd and Brightstar Engineering
7 * Created by Charles Manning <charles@aleph1.co.uk>
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
15 #include "yaffs_trace.h"
17 #include "yaffs_guts.h"
18 #include "yaffs_getblockinfo.h"
19 #include "yaffs_tagscompat.h"
20 #include "yaffs_tagsmarshall.h"
21 #include "yaffs_nand.h"
22 #include "yaffs_yaffs1.h"
23 #include "yaffs_yaffs2.h"
24 #include "yaffs_bitmap.h"
25 #include "yaffs_verify.h"
26 #include "yaffs_nand.h"
27 #include "yaffs_packedtags2.h"
28 #include "yaffs_nameval.h"
29 #include "yaffs_allocator.h"
30 #include "yaffs_attribs.h"
31 #include "yaffs_summary.h"
33 /* Note YAFFS_GC_GOOD_ENOUGH must be <= YAFFS_GC_PASSIVE_THRESHOLD */
34 #define YAFFS_GC_GOOD_ENOUGH 2
35 #define YAFFS_GC_PASSIVE_THRESHOLD 4
37 #include "yaffs_ecc.h"
39 /* Forward declarations */
41 static int yaffs_wr_data_obj(struct yaffs_obj
*in
, int inode_chunk
,
42 const u8
*buffer
, int n_bytes
, int use_reserve
);
44 static void yaffs_fix_null_name(struct yaffs_obj
*obj
, YCHAR
*name
,
47 /* Function to calculate chunk and offset */
49 void yaffs_addr_to_chunk(struct yaffs_dev
*dev
, Y_LOFF_T addr
,
50 int *chunk_out
, u32
*offset_out
)
55 chunk
= (u32
) (addr
>> dev
->chunk_shift
);
57 if (dev
->chunk_div
== 1) {
58 /* easy power of 2 case */
59 offset
= (u32
) (addr
& dev
->chunk_mask
);
61 /* Non power-of-2 case */
65 chunk
/= dev
->chunk_div
;
67 chunk_base
= ((Y_LOFF_T
) chunk
) * dev
->data_bytes_per_chunk
;
68 offset
= (u32
) (addr
- chunk_base
);
75 /* Function to return the number of shifts for a power of 2 greater than or
76 * equal to the given number
77 * Note we don't try to cater for all possible numbers and this does not have to
78 * be hellishly efficient.
81 static inline u32
calc_shifts_ceiling(u32 x
)
86 shifts
= extra_bits
= 0;
101 /* Function to return the number of shifts to get a 1 in bit 0
104 static inline u32
calc_shifts(u32 x
)
122 * Temporary buffer manipulations.
125 static int yaffs_init_tmp_buffers(struct yaffs_dev
*dev
)
130 memset(dev
->temp_buffer
, 0, sizeof(dev
->temp_buffer
));
132 for (i
= 0; buf
&& i
< YAFFS_N_TEMP_BUFFERS
; i
++) {
133 dev
->temp_buffer
[i
].in_use
= 0;
134 buf
= kmalloc(dev
->param
.total_bytes_per_chunk
, GFP_NOFS
);
135 dev
->temp_buffer
[i
].buffer
= buf
;
138 return buf
? YAFFS_OK
: YAFFS_FAIL
;
141 u8
*yaffs_get_temp_buffer(struct yaffs_dev
* dev
)
146 if (dev
->temp_in_use
> dev
->max_temp
)
147 dev
->max_temp
= dev
->temp_in_use
;
149 for (i
= 0; i
< YAFFS_N_TEMP_BUFFERS
; i
++) {
150 if (dev
->temp_buffer
[i
].in_use
== 0) {
151 dev
->temp_buffer
[i
].in_use
= 1;
152 return dev
->temp_buffer
[i
].buffer
;
156 yaffs_trace(YAFFS_TRACE_BUFFERS
, "Out of temp buffers");
158 * If we got here then we have to allocate an unmanaged one
162 dev
->unmanaged_buffer_allocs
++;
163 return kmalloc(dev
->data_bytes_per_chunk
, GFP_NOFS
);
167 void yaffs_release_temp_buffer(struct yaffs_dev
*dev
, u8
*buffer
)
173 for (i
= 0; i
< YAFFS_N_TEMP_BUFFERS
; i
++) {
174 if (dev
->temp_buffer
[i
].buffer
== buffer
) {
175 dev
->temp_buffer
[i
].in_use
= 0;
181 /* assume it is an unmanaged one. */
182 yaffs_trace(YAFFS_TRACE_BUFFERS
,
183 "Releasing unmanaged temp buffer");
185 dev
->unmanaged_buffer_deallocs
++;
191 * Functions for robustisizing TODO
195 static void yaffs_handle_chunk_wr_ok(struct yaffs_dev
*dev
, int nand_chunk
,
197 const struct yaffs_ext_tags
*tags
)
205 static void yaffs_handle_chunk_update(struct yaffs_dev
*dev
, int nand_chunk
,
206 const struct yaffs_ext_tags
*tags
)
213 void yaffs_handle_chunk_error(struct yaffs_dev
*dev
,
214 struct yaffs_block_info
*bi
)
216 if (!bi
->gc_prioritise
) {
217 bi
->gc_prioritise
= 1;
218 dev
->has_pending_prioritised_gc
= 1;
219 bi
->chunk_error_strikes
++;
221 if (bi
->chunk_error_strikes
> 3) {
222 bi
->needs_retiring
= 1; /* Too many stikes, so retire */
223 yaffs_trace(YAFFS_TRACE_ALWAYS
,
224 "yaffs: Block struck out");
230 static void yaffs_handle_chunk_wr_error(struct yaffs_dev
*dev
, int nand_chunk
,
233 int flash_block
= nand_chunk
/ dev
->param
.chunks_per_block
;
234 struct yaffs_block_info
*bi
= yaffs_get_block_info(dev
, flash_block
);
236 yaffs_handle_chunk_error(dev
, bi
);
239 /* Was an actual write failure,
240 * so mark the block for retirement.*/
241 bi
->needs_retiring
= 1;
242 yaffs_trace(YAFFS_TRACE_ERROR
| YAFFS_TRACE_BAD_BLOCKS
,
243 "**>> Block %d needs retiring", flash_block
);
246 /* Delete the chunk */
247 yaffs_chunk_del(dev
, nand_chunk
, 1, __LINE__
);
248 yaffs_skip_rest_of_block(dev
);
256 * Simple hash function. Needs to have a reasonable spread
259 static inline int yaffs_hash_fn(int n
)
263 return n
% YAFFS_NOBJECT_BUCKETS
;
267 * Access functions to useful fake objects.
268 * Note that root might have a presence in NAND if permissions are set.
271 struct yaffs_obj
*yaffs_root(struct yaffs_dev
*dev
)
273 return dev
->root_dir
;
276 struct yaffs_obj
*yaffs_lost_n_found(struct yaffs_dev
*dev
)
278 return dev
->lost_n_found
;
282 * Erased NAND checking functions
285 int yaffs_check_ff(u8
*buffer
, int n_bytes
)
287 /* Horrible, slow implementation */
296 static int yaffs_check_chunk_erased(struct yaffs_dev
*dev
, int nand_chunk
)
298 int retval
= YAFFS_OK
;
299 u8
*data
= yaffs_get_temp_buffer(dev
);
300 struct yaffs_ext_tags tags
;
302 if (!yaffs_rd_chunk_tags_nand(dev
, nand_chunk
, data
, &tags
))
304 yaffs_trace(YAFFS_TRACE_ERROR
, "yaffs_check_chunk_erased: unhandled error from rd_chunk_tags_nand");
308 if (tags
.ecc_result
> YAFFS_ECC_RESULT_NO_ERROR
)
311 if (!yaffs_check_ff(data
, dev
->data_bytes_per_chunk
) ||
313 yaffs_trace(YAFFS_TRACE_NANDACCESS
,
314 "Chunk %d not erased", nand_chunk
);
319 yaffs_release_temp_buffer(dev
, data
);
325 static int yaffs_verify_chunk_written(struct yaffs_dev
*dev
,
328 struct yaffs_ext_tags
*tags
)
330 int retval
= YAFFS_OK
;
331 struct yaffs_ext_tags temp_tags
;
332 u8
*buffer
= yaffs_get_temp_buffer(dev
);
334 if (!yaffs_rd_chunk_tags_nand(dev
, nand_chunk
, buffer
, &temp_tags
))
336 yaffs_trace(YAFFS_TRACE_ERROR
, "yaffs_verify_chunk_written: unhandled error from rd_chunk_tags_nand");
339 if (memcmp(buffer
, data
, dev
->data_bytes_per_chunk
) ||
340 temp_tags
.obj_id
!= tags
->obj_id
||
341 temp_tags
.chunk_id
!= tags
->chunk_id
||
342 temp_tags
.n_bytes
!= tags
->n_bytes
)
345 yaffs_release_temp_buffer(dev
, buffer
);
351 int yaffs_check_alloc_available(struct yaffs_dev
*dev
, int n_chunks
)
354 int reserved_blocks
= dev
->param
.n_reserved_blocks
;
357 checkpt_blocks
= yaffs_calc_checkpt_blocks_required(dev
);
360 (reserved_blocks
+ checkpt_blocks
) * dev
->param
.chunks_per_block
;
362 return (dev
->n_free_chunks
> (reserved_chunks
+ n_chunks
));
365 static int yaffs_find_alloc_block(struct yaffs_dev
*dev
)
368 struct yaffs_block_info
*bi
;
370 if (dev
->n_erased_blocks
< 1) {
371 /* Hoosterman we've got a problem.
372 * Can't get space to gc
374 yaffs_trace(YAFFS_TRACE_ERROR
,
375 "yaffs tragedy: no more erased blocks");
380 /* Find an empty block. */
382 for (i
= dev
->internal_start_block
; i
<= dev
->internal_end_block
; i
++) {
383 dev
->alloc_block_finder
++;
384 if (dev
->alloc_block_finder
< dev
->internal_start_block
385 || dev
->alloc_block_finder
> dev
->internal_end_block
) {
386 dev
->alloc_block_finder
= dev
->internal_start_block
;
389 bi
= yaffs_get_block_info(dev
, dev
->alloc_block_finder
);
391 if (bi
->block_state
== YAFFS_BLOCK_STATE_EMPTY
) {
392 bi
->block_state
= YAFFS_BLOCK_STATE_ALLOCATING
;
394 bi
->seq_number
= dev
->seq_number
;
395 dev
->n_erased_blocks
--;
396 yaffs_trace(YAFFS_TRACE_ALLOCATE
,
397 "Allocated block %d, seq %d, %d left" ,
398 dev
->alloc_block_finder
, dev
->seq_number
,
399 dev
->n_erased_blocks
);
400 return dev
->alloc_block_finder
;
404 yaffs_trace(YAFFS_TRACE_ALWAYS
,
405 "yaffs tragedy: no more erased blocks, but there should have been %d",
406 dev
->n_erased_blocks
);
411 static int yaffs_alloc_chunk(struct yaffs_dev
*dev
, int use_reserver
,
412 struct yaffs_block_info
**block_ptr
)
415 struct yaffs_block_info
*bi
;
417 if (dev
->alloc_block
< 0) {
418 /* Get next block to allocate off */
419 dev
->alloc_block
= yaffs_find_alloc_block(dev
);
423 if (!use_reserver
&& !yaffs_check_alloc_available(dev
, 1)) {
424 /* No space unless we're allowed to use the reserve. */
428 if (dev
->n_erased_blocks
< dev
->param
.n_reserved_blocks
429 && dev
->alloc_page
== 0)
430 yaffs_trace(YAFFS_TRACE_ALLOCATE
, "Allocating reserve");
432 /* Next page please.... */
433 if (dev
->alloc_block
>= 0) {
434 bi
= yaffs_get_block_info(dev
, dev
->alloc_block
);
436 ret_val
= (dev
->alloc_block
* dev
->param
.chunks_per_block
) +
439 yaffs_set_chunk_bit(dev
, dev
->alloc_block
, dev
->alloc_page
);
443 dev
->n_free_chunks
--;
445 /* If the block is full set the state to full */
446 if (dev
->alloc_page
>= (u32
)dev
->param
.chunks_per_block
) {
447 bi
->block_state
= YAFFS_BLOCK_STATE_FULL
;
448 dev
->alloc_block
= -1;
457 yaffs_trace(YAFFS_TRACE_ERROR
,
458 "!!!!!!!!! Allocator out !!!!!!!!!!!!!!!!!");
463 static int yaffs_get_erased_chunks(struct yaffs_dev
*dev
)
467 n
= dev
->n_erased_blocks
* dev
->param
.chunks_per_block
;
469 if (dev
->alloc_block
> 0)
470 n
+= (dev
->param
.chunks_per_block
- dev
->alloc_page
);
477 * yaffs_skip_rest_of_block() skips over the rest of the allocation block
478 * if we don't want to write to it.
480 void yaffs_skip_rest_of_block(struct yaffs_dev
*dev
)
482 struct yaffs_block_info
*bi
;
484 if (dev
->alloc_block
> 0) {
485 bi
= yaffs_get_block_info(dev
, dev
->alloc_block
);
486 if (bi
->block_state
== YAFFS_BLOCK_STATE_ALLOCATING
) {
487 bi
->block_state
= YAFFS_BLOCK_STATE_FULL
;
488 dev
->alloc_block
= -1;
493 static int yaffs_write_new_chunk(struct yaffs_dev
*dev
,
495 struct yaffs_ext_tags
*tags
, int use_reserver
)
501 yaffs2_checkpt_invalidate(dev
);
504 struct yaffs_block_info
*bi
= 0;
507 chunk
= yaffs_alloc_chunk(dev
, use_reserver
, &bi
);
513 /* First check this chunk is erased, if it needs
514 * checking. The checking policy (unless forced
515 * always on) is as follows:
517 * Check the first page we try to write in a block.
518 * If the check passes then we don't need to check any
519 * more. If the check fails, we check again...
520 * If the block has been erased, we don't need to check.
522 * However, if the block has been prioritised for gc,
523 * then we think there might be something odd about
524 * this block and stop using it.
526 * Rationale: We should only ever see chunks that have
527 * not been erased if there was a partially written
528 * chunk due to power loss. This checking policy should
529 * catch that case with very few checks and thus save a
530 * lot of checks that are most likely not needed.
533 * If an erase check fails or the write fails we skip the
537 /* let's give it a try */
540 if (dev
->param
.always_check_erased
)
541 bi
->skip_erased_check
= 0;
543 if (!bi
->skip_erased_check
) {
544 erased_ok
= yaffs_check_chunk_erased(dev
, chunk
);
545 if (erased_ok
!= YAFFS_OK
) {
546 yaffs_trace(YAFFS_TRACE_ERROR
,
547 "**>> yaffs chunk %d was not erased",
550 /* If not erased, delete this one,
551 * skip rest of block and
552 * try another chunk */
553 yaffs_chunk_del(dev
, chunk
, 1, __LINE__
);
554 yaffs_skip_rest_of_block(dev
);
559 write_ok
= yaffs_wr_chunk_tags_nand(dev
, chunk
, data
, tags
);
561 if (!bi
->skip_erased_check
)
563 yaffs_verify_chunk_written(dev
, chunk
, data
, tags
);
565 if (write_ok
!= YAFFS_OK
) {
566 /* Clean up aborted write, skip to next block and
567 * try another chunk */
568 yaffs_handle_chunk_wr_error(dev
, chunk
, erased_ok
);
572 bi
->skip_erased_check
= 1;
574 /* Copy the data into the robustification buffer */
575 yaffs_handle_chunk_wr_ok(dev
, chunk
, data
, tags
);
577 } while (write_ok
!= YAFFS_OK
&&
578 (yaffs_wr_attempts
<= 0 || attempts
<= yaffs_wr_attempts
));
584 yaffs_trace(YAFFS_TRACE_ERROR
,
585 "**>> yaffs write required %d attempts",
587 dev
->n_retried_writes
+= (attempts
- 1);
594 * Block retiring for handling a broken block.
597 static void yaffs_retire_block(struct yaffs_dev
*dev
, int flash_block
)
599 struct yaffs_block_info
*bi
= yaffs_get_block_info(dev
, flash_block
);
601 yaffs2_checkpt_invalidate(dev
);
603 yaffs2_clear_oldest_dirty_seq(dev
, bi
);
605 if (yaffs_mark_bad(dev
, flash_block
) != YAFFS_OK
) {
606 if (yaffs_erase_block(dev
, flash_block
) != YAFFS_OK
) {
607 yaffs_trace(YAFFS_TRACE_ALWAYS
,
608 "yaffs: Failed to mark bad and erase block %d",
611 struct yaffs_ext_tags tags
;
613 flash_block
* dev
->param
.chunks_per_block
;
615 u8
*buffer
= yaffs_get_temp_buffer(dev
);
617 memset(buffer
, 0xff, dev
->data_bytes_per_chunk
);
618 memset(&tags
, 0, sizeof(tags
));
619 tags
.seq_number
= YAFFS_SEQUENCE_BAD_BLOCK
;
620 if (dev
->tagger
.write_chunk_tags_fn(dev
, chunk_id
-
624 yaffs_trace(YAFFS_TRACE_ALWAYS
,
625 "yaffs: Failed to write bad block marker to block %d",
628 yaffs_release_temp_buffer(dev
, buffer
);
632 bi
->block_state
= YAFFS_BLOCK_STATE_DEAD
;
633 bi
->gc_prioritise
= 0;
634 bi
->needs_retiring
= 0;
636 dev
->n_retired_blocks
++;
639 /*---------------- Name handling functions ------------*/
641 static void yaffs_load_name_from_oh(struct yaffs_dev
*dev
, YCHAR
*name
,
642 const YCHAR
*oh_name
, int buff_size
)
644 #ifdef CONFIG_YAFFS_AUTO_UNICODE
645 if (dev
->param
.auto_unicode
) {
647 /* It is an ASCII name, do an ASCII to
648 * unicode conversion */
649 const char *ascii_oh_name
= (const char *)oh_name
;
650 int n
= buff_size
- 1;
651 while (n
> 0 && *ascii_oh_name
) {
652 *name
= *ascii_oh_name
;
658 yaffs_strncpy(name
, oh_name
+ 1, buff_size
- 1);
665 yaffs_strncpy(name
, oh_name
, buff_size
- 1);
669 static void yaffs_load_oh_from_name(struct yaffs_dev
*dev
, YCHAR
*oh_name
,
672 #ifdef CONFIG_YAFFS_AUTO_UNICODE
677 if (dev
->param
.auto_unicode
) {
682 /* Figure out if the name will fit in ascii character set */
683 while (is_ascii
&& *w
) {
690 /* It is an ASCII name, so convert unicode to ascii */
691 char *ascii_oh_name
= (char *)oh_name
;
692 int n
= YAFFS_MAX_NAME_LENGTH
- 1;
693 while (n
> 0 && *name
) {
694 *ascii_oh_name
= *name
;
700 /* Unicode name, so save starting at the second YCHAR */
702 yaffs_strncpy(oh_name
+ 1, name
, YAFFS_MAX_NAME_LENGTH
- 2);
709 yaffs_strncpy(oh_name
, name
, YAFFS_MAX_NAME_LENGTH
- 1);
713 static u16
yaffs_calc_name_sum(const YCHAR
*name
)
721 while ((*name
) && i
< (YAFFS_MAX_NAME_LENGTH
/ 2)) {
723 /* 0x1f mask is case insensitive */
724 sum
+= ((*name
) & 0x1f) * i
;
732 void yaffs_set_obj_name(struct yaffs_obj
*obj
, const YCHAR
* name
)
734 memset(obj
->short_name
, 0, sizeof(obj
->short_name
));
736 if (name
&& !name
[0]) {
737 yaffs_fix_null_name(obj
, obj
->short_name
,
738 YAFFS_SHORT_NAME_LENGTH
);
739 name
= obj
->short_name
;
741 yaffs_strnlen(name
, YAFFS_SHORT_NAME_LENGTH
+ 1) <=
742 YAFFS_SHORT_NAME_LENGTH
) {
743 yaffs_strcpy(obj
->short_name
, name
);
746 obj
->sum
= yaffs_calc_name_sum(name
);
749 void yaffs_set_obj_name_from_oh(struct yaffs_obj
*obj
,
750 const struct yaffs_obj_hdr
*oh
)
752 #ifdef CONFIG_YAFFS_AUTO_UNICODE
753 YCHAR tmp_name
[YAFFS_MAX_NAME_LENGTH
+ 1];
754 memset(tmp_name
, 0, sizeof(tmp_name
));
755 yaffs_load_name_from_oh(obj
->my_dev
, tmp_name
, oh
->name
,
756 YAFFS_MAX_NAME_LENGTH
+ 1);
757 yaffs_set_obj_name(obj
, tmp_name
);
759 yaffs_set_obj_name(obj
, oh
->name
);
763 Y_LOFF_T
yaffs_max_file_size(struct yaffs_dev
*dev
)
765 if(sizeof(Y_LOFF_T
) < 8)
766 return YAFFS_MAX_FILE_SIZE_32
;
768 return ((Y_LOFF_T
) YAFFS_MAX_CHUNK_ID
) * dev
->data_bytes_per_chunk
;
771 /*-------------------- TNODES -------------------
773 * List of spare tnodes
774 * The list is hooked together using the first pointer
778 struct yaffs_tnode
*yaffs_get_tnode(struct yaffs_dev
*dev
)
780 struct yaffs_tnode
*tn
= yaffs_alloc_raw_tnode(dev
);
783 memset(tn
, 0, dev
->tnode_size
);
787 dev
->checkpoint_blocks_required
= 0; /* force recalculation */
792 /* FreeTnode frees up a tnode and puts it back on the free list */
793 static void yaffs_free_tnode(struct yaffs_dev
*dev
, struct yaffs_tnode
*tn
)
795 yaffs_free_raw_tnode(dev
, tn
);
797 dev
->checkpoint_blocks_required
= 0; /* force recalculation */
800 static void yaffs_deinit_tnodes_and_objs(struct yaffs_dev
*dev
)
802 yaffs_deinit_raw_tnodes_and_objs(dev
);
807 static void yaffs_load_tnode_0(struct yaffs_dev
*dev
, struct yaffs_tnode
*tn
,
808 unsigned pos
, unsigned val
)
810 u32
*map
= (u32
*) tn
;
816 pos
&= YAFFS_TNODES_LEVEL0_MASK
;
817 val
>>= dev
->chunk_grp_bits
;
819 bit_in_map
= pos
* dev
->tnode_width
;
820 word_in_map
= bit_in_map
/ 32;
821 bit_in_word
= bit_in_map
& (32 - 1);
823 mask
= dev
->tnode_mask
<< bit_in_word
;
825 map
[word_in_map
] &= ~mask
;
826 map
[word_in_map
] |= (mask
& (val
<< bit_in_word
));
828 if (dev
->tnode_width
> (32 - bit_in_word
)) {
829 bit_in_word
= (32 - bit_in_word
);
832 dev
->tnode_mask
>> bit_in_word
;
833 map
[word_in_map
] &= ~mask
;
834 map
[word_in_map
] |= (mask
& (val
>> bit_in_word
));
838 u32
yaffs_get_group_base(struct yaffs_dev
*dev
, struct yaffs_tnode
*tn
,
841 u32
*map
= (u32
*) tn
;
847 pos
&= YAFFS_TNODES_LEVEL0_MASK
;
849 bit_in_map
= pos
* dev
->tnode_width
;
850 word_in_map
= bit_in_map
/ 32;
851 bit_in_word
= bit_in_map
& (32 - 1);
853 val
= map
[word_in_map
] >> bit_in_word
;
855 if (dev
->tnode_width
> (32 - bit_in_word
)) {
856 bit_in_word
= (32 - bit_in_word
);
858 val
|= (map
[word_in_map
] << bit_in_word
);
861 val
&= dev
->tnode_mask
;
862 val
<<= dev
->chunk_grp_bits
;
867 /* ------------------- End of individual tnode manipulation -----------------*/
869 /* ---------Functions to manipulate the look-up tree (made up of tnodes) ------
870 * The look up tree is represented by the top tnode and the number of top_level
871 * in the tree. 0 means only the level 0 tnode is in the tree.
874 /* FindLevel0Tnode finds the level 0 tnode, if one exists. */
875 struct yaffs_tnode
*yaffs_find_tnode_0(struct yaffs_dev
*dev
,
876 struct yaffs_file_var
*file_struct
,
879 struct yaffs_tnode
*tn
= file_struct
->top
;
882 int level
= file_struct
->top_level
;
886 /* Check sane level and chunk Id */
887 if (level
< 0 || level
> YAFFS_TNODES_MAX_LEVEL
)
890 if (chunk_id
> YAFFS_MAX_CHUNK_ID
)
893 /* First check we're tall enough (ie enough top_level) */
895 i
= chunk_id
>> YAFFS_TNODES_LEVEL0_BITS
;
898 i
>>= YAFFS_TNODES_INTERNAL_BITS
;
902 if (required_depth
> file_struct
->top_level
)
903 return NULL
; /* Not tall enough, so we can't find it */
905 /* Traverse down to level 0 */
906 while (level
> 0 && tn
) {
907 tn
= tn
->internal
[(chunk_id
>>
908 (YAFFS_TNODES_LEVEL0_BITS
+
910 YAFFS_TNODES_INTERNAL_BITS
)) &
911 YAFFS_TNODES_INTERNAL_MASK
];
918 /* add_find_tnode_0 finds the level 0 tnode if it exists,
919 * otherwise first expands the tree.
920 * This happens in two steps:
921 * 1. If the tree isn't tall enough, then make it taller.
922 * 2. Scan down the tree towards the level 0 tnode adding tnodes if required.
924 * Used when modifying the tree.
926 * If the tn argument is NULL, then a fresh tnode will be added otherwise the
927 * specified tn will be plugged into the ttree.
930 struct yaffs_tnode
*yaffs_add_find_tnode_0(struct yaffs_dev
*dev
,
931 struct yaffs_file_var
*file_struct
,
933 struct yaffs_tnode
*passed_tn
)
938 struct yaffs_tnode
*tn
;
941 /* Check sane level and page Id */
942 if (file_struct
->top_level
< 0 ||
943 file_struct
->top_level
> YAFFS_TNODES_MAX_LEVEL
)
946 if (chunk_id
> YAFFS_MAX_CHUNK_ID
)
949 /* First check we're tall enough (ie enough top_level) */
951 x
= chunk_id
>> YAFFS_TNODES_LEVEL0_BITS
;
954 x
>>= YAFFS_TNODES_INTERNAL_BITS
;
958 if (required_depth
> file_struct
->top_level
) {
959 /* Not tall enough, gotta make the tree taller */
960 for (i
= file_struct
->top_level
; i
< required_depth
; i
++) {
962 tn
= yaffs_get_tnode(dev
);
965 tn
->internal
[0] = file_struct
->top
;
966 file_struct
->top
= tn
;
967 file_struct
->top_level
++;
969 yaffs_trace(YAFFS_TRACE_ERROR
,
970 "yaffs: no more tnodes");
976 /* Traverse down to level 0, adding anything we need */
978 l
= file_struct
->top_level
;
979 tn
= file_struct
->top
;
982 while (l
> 0 && tn
) {
984 (YAFFS_TNODES_LEVEL0_BITS
+
985 (l
- 1) * YAFFS_TNODES_INTERNAL_BITS
)) &
986 YAFFS_TNODES_INTERNAL_MASK
;
988 if ((l
> 1) && !tn
->internal
[x
]) {
989 /* Add missing non-level-zero tnode */
990 tn
->internal
[x
] = yaffs_get_tnode(dev
);
991 if (!tn
->internal
[x
])
994 /* Looking from level 1 at level 0 */
996 /* If we already have one, release it */
998 yaffs_free_tnode(dev
,
1000 tn
->internal
[x
] = passed_tn
;
1002 } else if (!tn
->internal
[x
]) {
1003 /* Don't have one, none passed in */
1004 tn
->internal
[x
] = yaffs_get_tnode(dev
);
1005 if (!tn
->internal
[x
])
1010 tn
= tn
->internal
[x
];
1014 /* top is level 0 */
1016 memcpy(tn
, passed_tn
,
1017 (dev
->tnode_width
* YAFFS_NTNODES_LEVEL0
) / 8);
1018 yaffs_free_tnode(dev
, passed_tn
);
1025 static int yaffs_tags_match(const struct yaffs_ext_tags
*tags
, unsigned int obj_id
,
1026 unsigned int chunk_obj
)
1028 return (tags
->chunk_id
== chunk_obj
&&
1029 tags
->obj_id
== obj_id
&&
1030 !tags
->is_deleted
) ? 1 : 0;
1034 static int yaffs_find_chunk_in_group(struct yaffs_dev
*dev
, int the_chunk
,
1035 struct yaffs_ext_tags
*tags
, int obj_id
,
1040 for (j
= 0; the_chunk
&& j
< dev
->chunk_grp_size
; j
++) {
1041 if (yaffs_check_chunk_bit
1042 (dev
, the_chunk
/ dev
->param
.chunks_per_block
,
1043 the_chunk
% dev
->param
.chunks_per_block
)) {
1045 if (dev
->chunk_grp_size
== 1)
1048 if (!yaffs_rd_chunk_tags_nand(dev
, the_chunk
, NULL
, tags
))
1050 yaffs_trace(YAFFS_TRACE_ERROR
,
1051 "yaffs_find_chunk_in_group: unhandled error from rd_chunk_tags_nand");
1053 if (yaffs_tags_match(tags
,
1054 obj_id
, inode_chunk
)) {
1065 int yaffs_find_chunk_in_file(struct yaffs_obj
*in
, int inode_chunk
,
1066 struct yaffs_ext_tags
*tags
)
1068 /*Get the Tnode, then get the level 0 offset chunk offset */
1069 struct yaffs_tnode
*tn
;
1071 struct yaffs_ext_tags local_tags
;
1073 struct yaffs_dev
*dev
= in
->my_dev
;
1076 /* Passed a NULL, so use our own tags space */
1080 tn
= yaffs_find_tnode_0(dev
, &in
->variant
.file_variant
, inode_chunk
);
1085 the_chunk
= yaffs_get_group_base(dev
, tn
, inode_chunk
);
1087 ret_val
= yaffs_find_chunk_in_group(dev
, the_chunk
, tags
, in
->obj_id
,
1092 static int yaffs_find_del_file_chunk(struct yaffs_obj
*in
, int inode_chunk
,
1093 struct yaffs_ext_tags
*tags
)
1095 /* Get the Tnode, then get the level 0 offset chunk offset */
1096 struct yaffs_tnode
*tn
;
1098 struct yaffs_ext_tags local_tags
;
1099 struct yaffs_dev
*dev
= in
->my_dev
;
1103 /* Passed a NULL, so use our own tags space */
1107 tn
= yaffs_find_tnode_0(dev
, &in
->variant
.file_variant
, inode_chunk
);
1112 the_chunk
= yaffs_get_group_base(dev
, tn
, inode_chunk
);
1114 ret_val
= yaffs_find_chunk_in_group(dev
, the_chunk
, tags
, in
->obj_id
,
1117 /* Delete the entry in the filestructure (if found) */
1119 yaffs_load_tnode_0(dev
, tn
, inode_chunk
, 0);
1124 int yaffs_put_chunk_in_file(struct yaffs_obj
*in
, int inode_chunk
,
1125 int nand_chunk
, int in_scan
)
1127 /* NB in_scan is zero unless scanning.
1128 * For forward scanning, in_scan is > 0;
1129 * for backward scanning in_scan is < 0
1131 * nand_chunk = 0 is a dummy insert to make sure the tnodes are there.
1134 struct yaffs_tnode
*tn
;
1135 struct yaffs_dev
*dev
= in
->my_dev
;
1137 struct yaffs_ext_tags existing_tags
;
1138 struct yaffs_ext_tags new_tags
;
1139 unsigned existing_serial
, new_serial
;
1141 if (in
->variant_type
!= YAFFS_OBJECT_TYPE_FILE
) {
1142 /* Just ignore an attempt at putting a chunk into a non-file
1144 * If it is not during Scanning then something went wrong!
1147 yaffs_trace(YAFFS_TRACE_ERROR
,
1148 "yaffs tragedy:attempt to put data chunk into a non-file"
1153 yaffs_chunk_del(dev
, nand_chunk
, 1, __LINE__
);
1157 tn
= yaffs_add_find_tnode_0(dev
,
1158 &in
->variant
.file_variant
,
1164 /* Dummy insert, bail now */
1167 existing_cunk
= yaffs_get_group_base(dev
, tn
, inode_chunk
);
1170 /* If we're scanning then we need to test for duplicates
1171 * NB This does not need to be efficient since it should only
1172 * happen when the power fails during a write, then only one
1173 * chunk should ever be affected.
1175 * Correction for YAFFS2: This could happen quite a lot and we
1176 * need to think about efficiency! TODO
1177 * Update: For backward scanning we don't need to re-read tags
1178 * so this is quite cheap.
1181 if (existing_cunk
> 0) {
1182 /* NB Right now existing chunk will not be real
1183 * chunk_id if the chunk group size > 1
1184 * thus we have to do a FindChunkInFile to get the
1187 * We have a duplicate now we need to decide which
1190 * Backwards scanning YAFFS2: The old one is what
1191 * we use, dump the new one.
1192 * YAFFS1: Get both sets of tags and compare serial
1197 /* Only do this for forward scanning */
1198 if (!yaffs_rd_chunk_tags_nand(dev
,
1203 yaffs_trace(YAFFS_TRACE_ERROR
, "yaffs_put_chunk_in_file: unhandled error from rd_chunk_tags_nand");
1206 /* Do a proper find */
1208 yaffs_find_chunk_in_file(in
, inode_chunk
,
1212 if (existing_cunk
<= 0) {
1213 /*Hoosterman - how did this happen? */
1215 yaffs_trace(YAFFS_TRACE_ERROR
,
1216 "yaffs tragedy: existing chunk < 0 in scan"
1221 /* NB The deleted flags should be false, otherwise
1222 * the chunks will not be loaded during a scan
1226 new_serial
= new_tags
.serial_number
;
1227 existing_serial
= existing_tags
.serial_number
;
1230 if ((in_scan
> 0) &&
1231 (existing_cunk
<= 0 ||
1232 ((existing_serial
+ 1) & 3) == new_serial
)) {
1233 /* Forward scanning.
1235 * Delete the old one and drop through to
1238 yaffs_chunk_del(dev
, existing_cunk
, 1,
1241 /* Backward scanning or we want to use the
1243 * Delete the new one and return early so that
1244 * the tnode isn't changed
1246 yaffs_chunk_del(dev
, nand_chunk
, 1, __LINE__
);
1253 if (existing_cunk
== 0)
1254 in
->n_data_chunks
++;
1256 yaffs_load_tnode_0(dev
, tn
, inode_chunk
, nand_chunk
);
1261 static void yaffs_soft_del_chunk(struct yaffs_dev
*dev
, int chunk
)
1263 struct yaffs_block_info
*the_block
;
1266 yaffs_trace(YAFFS_TRACE_DELETION
, "soft delete chunk %d", chunk
);
1268 block_no
= chunk
/ dev
->param
.chunks_per_block
;
1269 the_block
= yaffs_get_block_info(dev
, block_no
);
1271 the_block
->soft_del_pages
++;
1272 dev
->n_free_chunks
++;
1273 yaffs2_update_oldest_dirty_seq(dev
, block_no
, the_block
);
1277 /* SoftDeleteWorker scans backwards through the tnode tree and soft deletes all
1278 * the chunks in the file.
1279 * All soft deleting does is increment the block's softdelete count and pulls
1280 * the chunk out of the tnode.
1281 * Thus, essentially this is the same as DeleteWorker except that the chunks
1285 static int yaffs_soft_del_worker(struct yaffs_obj
*in
, struct yaffs_tnode
*tn
,
1286 u32 level
, int chunk_offset
)
1291 struct yaffs_dev
*dev
= in
->my_dev
;
1297 for (i
= YAFFS_NTNODES_INTERNAL
- 1;
1300 if (tn
->internal
[i
]) {
1302 yaffs_soft_del_worker(in
,
1306 YAFFS_TNODES_INTERNAL_BITS
)
1309 yaffs_free_tnode(dev
,
1311 tn
->internal
[i
] = NULL
;
1313 /* Can this happen? */
1317 return (all_done
) ? 1 : 0;
1321 for (i
= YAFFS_NTNODES_LEVEL0
- 1; i
>= 0; i
--) {
1322 the_chunk
= yaffs_get_group_base(dev
, tn
, i
);
1324 yaffs_soft_del_chunk(dev
, the_chunk
);
1325 yaffs_load_tnode_0(dev
, tn
, i
, 0);
1331 static void yaffs_remove_obj_from_dir(struct yaffs_obj
*obj
)
1333 struct yaffs_dev
*dev
= obj
->my_dev
;
1334 struct yaffs_obj
*parent
;
1336 yaffs_verify_obj_in_dir(obj
);
1337 parent
= obj
->parent
;
1339 yaffs_verify_dir(parent
);
1341 if (dev
&& dev
->param
.remove_obj_fn
)
1342 dev
->param
.remove_obj_fn(obj
);
1344 list_del_init(&obj
->siblings
);
1347 yaffs_verify_dir(parent
);
1350 void yaffs_add_obj_to_dir(struct yaffs_obj
*directory
, struct yaffs_obj
*obj
)
1353 yaffs_trace(YAFFS_TRACE_ALWAYS
,
1354 "tragedy: Trying to add an object to a null pointer directory"
1359 if (directory
->variant_type
!= YAFFS_OBJECT_TYPE_DIRECTORY
) {
1360 yaffs_trace(YAFFS_TRACE_ALWAYS
,
1361 "tragedy: Trying to add an object to a non-directory"
1366 if (obj
->siblings
.prev
== NULL
) {
1367 /* Not initialised */
1371 yaffs_verify_dir(directory
);
1373 yaffs_remove_obj_from_dir(obj
);
1376 list_add(&obj
->siblings
, &directory
->variant
.dir_variant
.children
);
1377 obj
->parent
= directory
;
1379 if (directory
== obj
->my_dev
->unlinked_dir
1380 || directory
== obj
->my_dev
->del_dir
) {
1382 obj
->my_dev
->n_unlinked_files
++;
1383 obj
->rename_allowed
= 0;
1386 yaffs_verify_dir(directory
);
1387 yaffs_verify_obj_in_dir(obj
);
1390 static int yaffs_change_obj_name(struct yaffs_obj
*obj
,
1391 struct yaffs_obj
*new_dir
,
1392 const YCHAR
*new_name
, int force
, int shadows
)
1396 struct yaffs_obj
*existing_target
;
1398 if (new_dir
== NULL
)
1399 new_dir
= obj
->parent
; /* use the old directory */
1401 if (new_dir
->variant_type
!= YAFFS_OBJECT_TYPE_DIRECTORY
) {
1402 yaffs_trace(YAFFS_TRACE_ALWAYS
,
1403 "tragedy: yaffs_change_obj_name: new_dir is not a directory"
1408 unlink_op
= (new_dir
== obj
->my_dev
->unlinked_dir
);
1409 del_op
= (new_dir
== obj
->my_dev
->del_dir
);
1411 existing_target
= yaffs_find_by_name(new_dir
, new_name
);
1413 /* If the object is a file going into the unlinked directory,
1414 * then it is OK to just stuff it in since duplicate names are OK.
1415 * else only proceed if the new name does not exist and we're putting
1416 * it into a directory.
1418 if (!(unlink_op
|| del_op
|| force
||
1419 shadows
> 0 || !existing_target
) ||
1420 new_dir
->variant_type
!= YAFFS_OBJECT_TYPE_DIRECTORY
)
1423 yaffs_set_obj_name(obj
, new_name
);
1425 yaffs_add_obj_to_dir(new_dir
, obj
);
1430 /* If it is a deletion then we mark it as a shrink for gc */
1431 if (yaffs_update_oh(obj
, new_name
, 0, del_op
, shadows
, NULL
) >= 0)
1437 /*------------------------ Short Operations Cache ------------------------------
1438 * In many situations where there is no high level buffering a lot of
1439 * reads might be short sequential reads, and a lot of writes may be short
1440 * sequential writes. eg. scanning/writing a jpeg file.
1441 * In these cases, a short read/write cache can provide a huge perfomance
1442 * benefit with dumb-as-a-rock code.
1443 * In Linux, the page cache provides read buffering and the short op cache
1444 * provides write buffering.
1446 * There are a small number (~10) of cache chunks per device so that we don't
1447 * need a very intelligent search.
1450 static int yaffs_obj_cache_dirty(struct yaffs_obj
*obj
)
1452 struct yaffs_dev
*dev
= obj
->my_dev
;
1454 struct yaffs_cache
*cache
;
1455 int n_caches
= obj
->my_dev
->param
.n_caches
;
1457 for (i
= 0; i
< n_caches
; i
++) {
1458 cache
= &dev
->cache
[i
];
1459 if (cache
->object
== obj
&& cache
->dirty
)
1466 static void yaffs_flush_single_cache(struct yaffs_cache
*cache
, int discard
)
1469 if (!cache
|| cache
->locked
)
1472 /* Write it out and free it up if need be.*/
1474 yaffs_wr_data_obj(cache
->object
,
1484 cache
->object
= NULL
;
1487 static void yaffs_flush_file_cache(struct yaffs_obj
*obj
, int discard
)
1489 struct yaffs_dev
*dev
= obj
->my_dev
;
1491 struct yaffs_cache
*cache
;
1492 int n_caches
= obj
->my_dev
->param
.n_caches
;
1498 /* Find the chunks for this object and flush them. */
1499 for (i
= 0; i
< n_caches
; i
++) {
1500 cache
= &dev
->cache
[i
];
1501 if (cache
->object
== obj
)
1502 yaffs_flush_single_cache(cache
, discard
);
1508 void yaffs_flush_whole_cache(struct yaffs_dev
*dev
, int discard
)
1510 struct yaffs_obj
*obj
;
1511 int n_caches
= dev
->param
.n_caches
;
1514 /* Find a dirty object in the cache and flush it...
1515 * until there are no further dirty objects.
1519 for (i
= 0; i
< n_caches
&& !obj
; i
++) {
1520 if (dev
->cache
[i
].object
&& dev
->cache
[i
].dirty
)
1521 obj
= dev
->cache
[i
].object
;
1524 yaffs_flush_file_cache(obj
, discard
);
1529 /* Grab us an unused cache chunk for use.
1530 * First look for an empty one.
1531 * Then look for the least recently used non-dirty one.
1532 * Then look for the least recently used dirty one...., flush and look again.
1534 static struct yaffs_cache
*yaffs_grab_chunk_worker(struct yaffs_dev
*dev
)
1538 if (dev
->param
.n_caches
> 0) {
1539 for (i
= 0; i
< dev
->param
.n_caches
; i
++) {
1540 if (!dev
->cache
[i
].object
)
1541 return &dev
->cache
[i
];
1548 static struct yaffs_cache
*yaffs_grab_chunk_cache(struct yaffs_dev
*dev
)
1550 struct yaffs_cache
*cache
;
1554 if (dev
->param
.n_caches
< 1)
1557 /* First look for an unused cache */
1559 cache
= yaffs_grab_chunk_worker(dev
);
1565 * Thery were all in use.
1566 * Find the LRU cache and flush it if it is dirty.
1572 for (i
= 0; i
< dev
->param
.n_caches
; i
++) {
1573 if (dev
->cache
[i
].object
&&
1574 !dev
->cache
[i
].locked
&&
1575 (dev
->cache
[i
].last_use
< usage
|| !cache
)) {
1576 usage
= dev
->cache
[i
].last_use
;
1577 cache
= &dev
->cache
[i
];
1582 yaffs_flush_single_cache(cache
, 1);
1584 yaffs_flush_file_cache(cache
->object
, 1);
1585 cache
= yaffs_grab_chunk_worker(dev
);
1591 /* Find a cached chunk */
1592 static struct yaffs_cache
*yaffs_find_chunk_cache(const struct yaffs_obj
*obj
,
1595 struct yaffs_dev
*dev
= obj
->my_dev
;
1598 if (dev
->param
.n_caches
< 1)
1601 for (i
= 0; i
< dev
->param
.n_caches
; i
++) {
1602 if (dev
->cache
[i
].object
== obj
&&
1603 dev
->cache
[i
].chunk_id
== chunk_id
) {
1606 return &dev
->cache
[i
];
1612 /* Mark the chunk for the least recently used algorithym */
1613 static void yaffs_use_cache(struct yaffs_dev
*dev
, struct yaffs_cache
*cache
,
1618 if (dev
->param
.n_caches
< 1)
1621 if (dev
->cache_last_use
< 0 ||
1622 dev
->cache_last_use
> 100000000) {
1623 /* Reset the cache usages */
1624 for (i
= 1; i
< dev
->param
.n_caches
; i
++)
1625 dev
->cache
[i
].last_use
= 0;
1627 dev
->cache_last_use
= 0;
1629 dev
->cache_last_use
++;
1630 cache
->last_use
= dev
->cache_last_use
;
1636 /* Invalidate a single cache page.
1637 * Do this when a whole page gets written,
1638 * ie the short cache for this page is no longer valid.
1640 static void yaffs_invalidate_chunk_cache(struct yaffs_obj
*object
, int chunk_id
)
1642 struct yaffs_cache
*cache
;
1644 if (object
->my_dev
->param
.n_caches
> 0) {
1645 cache
= yaffs_find_chunk_cache(object
, chunk_id
);
1648 cache
->object
= NULL
;
1652 /* Invalidate all the cache pages associated with this object
1653 * Do this whenever ther file is deleted or resized.
1655 static void yaffs_invalidate_whole_cache(struct yaffs_obj
*in
)
1658 struct yaffs_dev
*dev
= in
->my_dev
;
1660 if (dev
->param
.n_caches
> 0) {
1661 /* Invalidate it. */
1662 for (i
= 0; i
< dev
->param
.n_caches
; i
++) {
1663 if (dev
->cache
[i
].object
== in
)
1664 dev
->cache
[i
].object
= NULL
;
1669 static void yaffs_unhash_obj(struct yaffs_obj
*obj
)
1672 struct yaffs_dev
*dev
= obj
->my_dev
;
1674 /* If it is still linked into the bucket list, free from the list */
1675 if (!list_empty(&obj
->hash_link
)) {
1676 list_del_init(&obj
->hash_link
);
1677 bucket
= yaffs_hash_fn(obj
->obj_id
);
1678 dev
->obj_bucket
[bucket
].count
--;
1682 /* FreeObject frees up a Object and puts it back on the free list */
1683 static void yaffs_free_obj(struct yaffs_obj
*obj
)
1685 struct yaffs_dev
*dev
;
1692 yaffs_trace(YAFFS_TRACE_OS
, "FreeObject %p inode %p",
1693 obj
, obj
->my_inode
);
1696 if (!list_empty(&obj
->siblings
))
1699 if (obj
->my_inode
) {
1700 /* We're still hooked up to a cached inode.
1701 * Don't delete now, but mark for later deletion
1703 obj
->defered_free
= 1;
1707 yaffs_unhash_obj(obj
);
1709 yaffs_free_raw_obj(dev
, obj
);
1711 dev
->checkpoint_blocks_required
= 0; /* force recalculation */
1714 void yaffs_handle_defered_free(struct yaffs_obj
*obj
)
1716 if (obj
->defered_free
)
1717 yaffs_free_obj(obj
);
1720 static int yaffs_generic_obj_del(struct yaffs_obj
*in
)
1722 /* Iinvalidate the file's data in the cache, without flushing. */
1723 yaffs_invalidate_whole_cache(in
);
1725 if (in
->my_dev
->param
.is_yaffs2
&& in
->parent
!= in
->my_dev
->del_dir
) {
1726 /* Move to unlinked directory so we have a deletion record */
1727 yaffs_change_obj_name(in
, in
->my_dev
->del_dir
, _Y("deleted"), 0,
1731 yaffs_remove_obj_from_dir(in
);
1732 yaffs_chunk_del(in
->my_dev
, in
->hdr_chunk
, 1, __LINE__
);
1740 static void yaffs_soft_del_file(struct yaffs_obj
*obj
)
1742 if (!obj
->deleted
||
1743 obj
->variant_type
!= YAFFS_OBJECT_TYPE_FILE
||
1747 if (obj
->n_data_chunks
<= 0) {
1748 /* Empty file with no duplicate object headers,
1749 * just delete it immediately */
1750 yaffs_free_tnode(obj
->my_dev
, obj
->variant
.file_variant
.top
);
1751 obj
->variant
.file_variant
.top
= NULL
;
1752 yaffs_trace(YAFFS_TRACE_TRACING
,
1753 "yaffs: Deleting empty file %d",
1755 yaffs_generic_obj_del(obj
);
1757 yaffs_soft_del_worker(obj
,
1758 obj
->variant
.file_variant
.top
,
1760 file_variant
.top_level
, 0);
1765 /* Pruning removes any part of the file structure tree that is beyond the
1766 * bounds of the file (ie that does not point to chunks).
1768 * A file should only get pruned when its size is reduced.
1770 * Before pruning, the chunks must be pulled from the tree and the
1771 * level 0 tnode entries must be zeroed out.
1772 * Could also use this for file deletion, but that's probably better handled
1773 * by a special case.
1775 * This function is recursive. For levels > 0 the function is called again on
1776 * any sub-tree. For level == 0 we just check if the sub-tree has data.
1777 * If there is no data in a subtree then it is pruned.
1780 static struct yaffs_tnode
*yaffs_prune_worker(struct yaffs_dev
*dev
,
1781 struct yaffs_tnode
*tn
, u32 level
,
1793 for (i
= 0; i
< YAFFS_NTNODES_INTERNAL
; i
++) {
1794 if (tn
->internal
[i
]) {
1796 yaffs_prune_worker(dev
,
1799 (i
== 0) ? del0
: 1);
1802 if (tn
->internal
[i
])
1806 int tnode_size_u32
= dev
->tnode_size
/ sizeof(u32
);
1807 u32
*map
= (u32
*) tn
;
1809 for (i
= 0; !has_data
&& i
< tnode_size_u32
; i
++) {
1815 if (has_data
== 0 && del0
) {
1816 /* Free and return NULL */
1817 yaffs_free_tnode(dev
, tn
);
1823 static int yaffs_prune_tree(struct yaffs_dev
*dev
,
1824 struct yaffs_file_var
*file_struct
)
1829 struct yaffs_tnode
*tn
;
1831 if (file_struct
->top_level
< 1)
1835 yaffs_prune_worker(dev
, file_struct
->top
, file_struct
->top_level
, 0);
1837 /* Now we have a tree with all the non-zero branches NULL but
1838 * the height is the same as it was.
1839 * Let's see if we can trim internal tnodes to shorten the tree.
1840 * We can do this if only the 0th element in the tnode is in use
1841 * (ie all the non-zero are NULL)
1844 while (file_struct
->top_level
&& !done
) {
1845 tn
= file_struct
->top
;
1848 for (i
= 1; i
< YAFFS_NTNODES_INTERNAL
; i
++) {
1849 if (tn
->internal
[i
])
1854 file_struct
->top
= tn
->internal
[0];
1855 file_struct
->top_level
--;
1856 yaffs_free_tnode(dev
, tn
);
1865 /*-------------------- End of File Structure functions.-------------------*/
1867 /* alloc_empty_obj gets us a clean Object.*/
1868 static struct yaffs_obj
*yaffs_alloc_empty_obj(struct yaffs_dev
*dev
)
1870 struct yaffs_obj
*obj
= yaffs_alloc_raw_obj(dev
);
1877 /* Now sweeten it up... */
1879 memset(obj
, 0, sizeof(struct yaffs_obj
));
1880 obj
->being_created
= 1;
1884 obj
->variant_type
= YAFFS_OBJECT_TYPE_UNKNOWN
;
1885 INIT_LIST_HEAD(&(obj
->hard_links
));
1886 INIT_LIST_HEAD(&(obj
->hash_link
));
1887 INIT_LIST_HEAD(&obj
->siblings
);
1889 /* Now make the directory sane */
1890 if (dev
->root_dir
) {
1891 obj
->parent
= dev
->root_dir
;
1892 list_add(&(obj
->siblings
),
1893 &dev
->root_dir
->variant
.dir_variant
.children
);
1896 /* Add it to the lost and found directory.
1897 * NB Can't put root or lost-n-found in lost-n-found so
1898 * check if lost-n-found exists first
1900 if (dev
->lost_n_found
)
1901 yaffs_add_obj_to_dir(dev
->lost_n_found
, obj
);
1903 obj
->being_created
= 0;
1905 dev
->checkpoint_blocks_required
= 0; /* force recalculation */
1910 static int yaffs_find_nice_bucket(struct yaffs_dev
*dev
)
1914 int lowest
= 999999;
1916 /* Search for the shortest list or one that
1920 for (i
= 0; i
< 10 && lowest
> 4; i
++) {
1921 dev
->bucket_finder
++;
1922 dev
->bucket_finder
%= YAFFS_NOBJECT_BUCKETS
;
1923 if (dev
->obj_bucket
[dev
->bucket_finder
].count
< lowest
) {
1924 lowest
= dev
->obj_bucket
[dev
->bucket_finder
].count
;
1925 l
= dev
->bucket_finder
;
1932 static int yaffs_new_obj_id(struct yaffs_dev
*dev
)
1934 int bucket
= yaffs_find_nice_bucket(dev
);
1936 struct list_head
*i
;
1937 u32 n
= (u32
) bucket
;
1939 /* Now find an object value that has not already been taken
1940 * by scanning the list.
1945 n
+= YAFFS_NOBJECT_BUCKETS
;
1946 if (1 || dev
->obj_bucket
[bucket
].count
> 0) {
1947 list_for_each(i
, &dev
->obj_bucket
[bucket
].list
) {
1948 /* If there is already one in the list */
1949 if (i
&& list_entry(i
, struct yaffs_obj
,
1950 hash_link
)->obj_id
== n
) {
1959 static void yaffs_hash_obj(struct yaffs_obj
*in
)
1961 int bucket
= yaffs_hash_fn(in
->obj_id
);
1962 struct yaffs_dev
*dev
= in
->my_dev
;
1964 list_add(&in
->hash_link
, &dev
->obj_bucket
[bucket
].list
);
1965 dev
->obj_bucket
[bucket
].count
++;
1968 struct yaffs_obj
*yaffs_find_by_number(struct yaffs_dev
*dev
, u32 number
)
1970 int bucket
= yaffs_hash_fn(number
);
1971 struct list_head
*i
;
1972 struct yaffs_obj
*in
;
1974 list_for_each(i
, &dev
->obj_bucket
[bucket
].list
) {
1975 /* Look if it is in the list */
1976 in
= list_entry(i
, struct yaffs_obj
, hash_link
);
1977 if (in
->obj_id
== number
) {
1978 /* Don't show if it is defered free */
1979 if (in
->defered_free
)
1988 static struct yaffs_obj
*yaffs_new_obj(struct yaffs_dev
*dev
, int number
,
1989 enum yaffs_obj_type type
)
1991 struct yaffs_obj
*the_obj
= NULL
;
1992 struct yaffs_tnode
*tn
= NULL
;
1995 number
= yaffs_new_obj_id(dev
);
1997 if (type
== YAFFS_OBJECT_TYPE_FILE
) {
1998 tn
= yaffs_get_tnode(dev
);
2003 the_obj
= yaffs_alloc_empty_obj(dev
);
2006 yaffs_free_tnode(dev
, tn
);
2011 the_obj
->rename_allowed
= 1;
2012 the_obj
->unlink_allowed
= 1;
2013 the_obj
->obj_id
= number
;
2014 yaffs_hash_obj(the_obj
);
2015 the_obj
->variant_type
= type
;
2016 yaffs_load_current_time(the_obj
, 1, 1);
2019 case YAFFS_OBJECT_TYPE_FILE
:
2020 the_obj
->variant
.file_variant
.file_size
= 0;
2021 the_obj
->variant
.file_variant
.scanned_size
= 0;
2022 the_obj
->variant
.file_variant
.shrink_size
=
2023 yaffs_max_file_size(dev
);
2024 the_obj
->variant
.file_variant
.top_level
= 0;
2025 the_obj
->variant
.file_variant
.top
= tn
;
2027 case YAFFS_OBJECT_TYPE_DIRECTORY
:
2028 INIT_LIST_HEAD(&the_obj
->variant
.dir_variant
.children
);
2029 INIT_LIST_HEAD(&the_obj
->variant
.dir_variant
.dirty
);
2031 case YAFFS_OBJECT_TYPE_SYMLINK
:
2032 case YAFFS_OBJECT_TYPE_HARDLINK
:
2033 case YAFFS_OBJECT_TYPE_SPECIAL
:
2034 /* No action required */
2036 case YAFFS_OBJECT_TYPE_UNKNOWN
:
2037 /* todo this should not happen */
2043 static struct yaffs_obj
*yaffs_create_fake_dir(struct yaffs_dev
*dev
,
2044 int number
, u32 mode
)
2047 struct yaffs_obj
*obj
=
2048 yaffs_new_obj(dev
, number
, YAFFS_OBJECT_TYPE_DIRECTORY
);
2053 obj
->fake
= 1; /* it is fake so it might not use NAND */
2054 obj
->rename_allowed
= 0;
2055 obj
->unlink_allowed
= 0;
2058 obj
->yst_mode
= mode
;
2060 obj
->hdr_chunk
= 0; /* Not a valid chunk. */
2066 static void yaffs_init_tnodes_and_objs(struct yaffs_dev
*dev
)
2072 yaffs_init_raw_tnodes_and_objs(dev
);
2074 for (i
= 0; i
< YAFFS_NOBJECT_BUCKETS
; i
++) {
2075 INIT_LIST_HEAD(&dev
->obj_bucket
[i
].list
);
2076 dev
->obj_bucket
[i
].count
= 0;
2080 struct yaffs_obj
*yaffs_find_or_create_by_number(struct yaffs_dev
*dev
,
2082 enum yaffs_obj_type type
)
2084 struct yaffs_obj
*the_obj
= NULL
;
2087 the_obj
= yaffs_find_by_number(dev
, number
);
2090 the_obj
= yaffs_new_obj(dev
, number
, type
);
2096 YCHAR
*yaffs_clone_str(const YCHAR
*str
)
2098 YCHAR
*new_str
= NULL
;
2104 len
= yaffs_strnlen(str
, YAFFS_MAX_ALIAS_LENGTH
);
2105 new_str
= kmalloc((len
+ 1) * sizeof(YCHAR
), GFP_NOFS
);
2107 yaffs_strncpy(new_str
, str
, len
);
2114 *yaffs_update_parent() handles fixing a directories mtime and ctime when a new
2115 * link (ie. name) is created or deleted in the directory.
2118 * create dir/a : update dir's mtime/ctime
2119 * rm dir/a: update dir's mtime/ctime
2120 * modify dir/a: don't update dir's mtimme/ctime
2122 * This can be handled immediately or defered. Defering helps reduce the number
2123 * of updates when many files in a directory are changed within a brief period.
2125 * If the directory updating is defered then yaffs_update_dirty_dirs must be
2126 * called periodically.
2129 static void yaffs_update_parent(struct yaffs_obj
*obj
)
2131 struct yaffs_dev
*dev
;
2137 yaffs_load_current_time(obj
, 0, 1);
2138 if (dev
->param
.defered_dir_update
) {
2139 struct list_head
*link
= &obj
->variant
.dir_variant
.dirty
;
2141 if (list_empty(link
)) {
2142 list_add(link
, &dev
->dirty_dirs
);
2143 yaffs_trace(YAFFS_TRACE_BACKGROUND
,
2144 "Added object %d to dirty directories",
2149 yaffs_update_oh(obj
, NULL
, 0, 0, 0, NULL
);
2153 void yaffs_update_dirty_dirs(struct yaffs_dev
*dev
)
2155 struct list_head
*link
;
2156 struct yaffs_obj
*obj
;
2157 struct yaffs_dir_var
*d_s
;
2158 union yaffs_obj_var
*o_v
;
2160 yaffs_trace(YAFFS_TRACE_BACKGROUND
, "Update dirty directories");
2162 while (!list_empty(&dev
->dirty_dirs
)) {
2163 link
= dev
->dirty_dirs
.next
;
2164 list_del_init(link
);
2166 d_s
= list_entry(link
, struct yaffs_dir_var
, dirty
);
2167 o_v
= list_entry(d_s
, union yaffs_obj_var
, dir_variant
);
2168 obj
= list_entry(o_v
, struct yaffs_obj
, variant
);
2170 yaffs_trace(YAFFS_TRACE_BACKGROUND
, "Update directory %d",
2174 yaffs_update_oh(obj
, NULL
, 0, 0, 0, NULL
);
2179 * Mknod (create) a new object.
2180 * equiv_obj only has meaning for a hard link;
2181 * alias_str only has meaning for a symlink.
2182 * rdev only has meaning for devices (a subset of special objects)
2185 static struct yaffs_obj
*yaffs_create_obj(enum yaffs_obj_type type
,
2186 struct yaffs_obj
*parent
,
2191 struct yaffs_obj
*equiv_obj
,
2192 const YCHAR
*alias_str
, u32 rdev
)
2194 struct yaffs_obj
*in
;
2196 struct yaffs_dev
*dev
= parent
->my_dev
;
2198 /* Check if the entry exists.
2199 * If it does then fail the call since we don't want a dup. */
2200 if (yaffs_find_by_name(parent
, name
))
2203 if (type
== YAFFS_OBJECT_TYPE_SYMLINK
) {
2204 str
= yaffs_clone_str(alias_str
);
2209 in
= yaffs_new_obj(dev
, -1, type
);
2218 in
->variant_type
= type
;
2220 in
->yst_mode
= mode
;
2222 yaffs_attribs_init(in
, gid
, uid
, rdev
);
2224 in
->n_data_chunks
= 0;
2226 yaffs_set_obj_name(in
, name
);
2229 yaffs_add_obj_to_dir(parent
, in
);
2231 in
->my_dev
= parent
->my_dev
;
2234 case YAFFS_OBJECT_TYPE_SYMLINK
:
2235 in
->variant
.symlink_variant
.alias
= str
;
2237 case YAFFS_OBJECT_TYPE_HARDLINK
:
2238 in
->variant
.hardlink_variant
.equiv_obj
= equiv_obj
;
2239 in
->variant
.hardlink_variant
.equiv_id
= equiv_obj
->obj_id
;
2240 list_add(&in
->hard_links
, &equiv_obj
->hard_links
);
2242 case YAFFS_OBJECT_TYPE_FILE
:
2243 case YAFFS_OBJECT_TYPE_DIRECTORY
:
2244 case YAFFS_OBJECT_TYPE_SPECIAL
:
2245 case YAFFS_OBJECT_TYPE_UNKNOWN
:
2250 if (yaffs_update_oh(in
, name
, 0, 0, 0, NULL
) < 0) {
2251 /* Could not create the object header, fail */
2257 yaffs_update_parent(parent
);
2262 struct yaffs_obj
*yaffs_create_file(struct yaffs_obj
*parent
,
2263 const YCHAR
*name
, u32 mode
, u32 uid
,
2266 return yaffs_create_obj(YAFFS_OBJECT_TYPE_FILE
, parent
, name
, mode
,
2267 uid
, gid
, NULL
, NULL
, 0);
2270 struct yaffs_obj
*yaffs_create_dir(struct yaffs_obj
*parent
, const YCHAR
*name
,
2271 u32 mode
, u32 uid
, u32 gid
)
2273 return yaffs_create_obj(YAFFS_OBJECT_TYPE_DIRECTORY
, parent
, name
,
2274 mode
, uid
, gid
, NULL
, NULL
, 0);
2277 struct yaffs_obj
*yaffs_create_special(struct yaffs_obj
*parent
,
2278 const YCHAR
*name
, u32 mode
, u32 uid
,
2281 return yaffs_create_obj(YAFFS_OBJECT_TYPE_SPECIAL
, parent
, name
, mode
,
2282 uid
, gid
, NULL
, NULL
, rdev
);
2285 struct yaffs_obj
*yaffs_create_symlink(struct yaffs_obj
*parent
,
2286 const YCHAR
*name
, u32 mode
, u32 uid
,
2287 u32 gid
, const YCHAR
*alias
)
2289 return yaffs_create_obj(YAFFS_OBJECT_TYPE_SYMLINK
, parent
, name
, mode
,
2290 uid
, gid
, NULL
, alias
, 0);
2293 /* yaffs_link_obj returns the object id of the equivalent object.*/
2294 struct yaffs_obj
*yaffs_link_obj(struct yaffs_obj
*parent
, const YCHAR
* name
,
2295 struct yaffs_obj
*equiv_obj
)
2297 /* Get the real object in case we were fed a hard link obj */
2298 equiv_obj
= yaffs_get_equivalent_obj(equiv_obj
);
2300 if (yaffs_create_obj(YAFFS_OBJECT_TYPE_HARDLINK
,
2301 parent
, name
, 0, 0, 0,
2302 equiv_obj
, NULL
, 0))
2311 /*---------------------- Block Management and Page Allocation -------------*/
2313 static void yaffs_deinit_blocks(struct yaffs_dev
*dev
)
2315 if (dev
->block_info_alt
&& dev
->block_info
)
2316 vfree(dev
->block_info
);
2318 kfree(dev
->block_info
);
2320 dev
->block_info_alt
= 0;
2322 dev
->block_info
= NULL
;
2324 if (dev
->chunk_bits_alt
&& dev
->chunk_bits
)
2325 vfree(dev
->chunk_bits
);
2327 kfree(dev
->chunk_bits
);
2328 dev
->chunk_bits_alt
= 0;
2329 dev
->chunk_bits
= NULL
;
2332 static int yaffs_init_blocks(struct yaffs_dev
*dev
)
2334 int n_blocks
= dev
->internal_end_block
- dev
->internal_start_block
+ 1;
2336 dev
->block_info
= NULL
;
2337 dev
->chunk_bits
= NULL
;
2338 dev
->alloc_block
= -1; /* force it to get a new one */
2340 /* If the first allocation strategy fails, thry the alternate one */
2342 kmalloc(n_blocks
* sizeof(struct yaffs_block_info
), GFP_NOFS
);
2343 if (!dev
->block_info
) {
2345 vmalloc(n_blocks
* sizeof(struct yaffs_block_info
));
2346 dev
->block_info_alt
= 1;
2348 dev
->block_info_alt
= 0;
2351 if (!dev
->block_info
)
2354 /* Set up dynamic blockinfo stuff. Round up bytes. */
2355 dev
->chunk_bit_stride
= (dev
->param
.chunks_per_block
+ 7) / 8;
2357 kmalloc(dev
->chunk_bit_stride
* n_blocks
, GFP_NOFS
);
2358 if (!dev
->chunk_bits
) {
2360 vmalloc(dev
->chunk_bit_stride
* n_blocks
);
2361 dev
->chunk_bits_alt
= 1;
2363 dev
->chunk_bits_alt
= 0;
2365 if (!dev
->chunk_bits
)
2369 memset(dev
->block_info
, 0, n_blocks
* sizeof(struct yaffs_block_info
));
2370 memset(dev
->chunk_bits
, 0, dev
->chunk_bit_stride
* n_blocks
);
2374 yaffs_deinit_blocks(dev
);
2379 void yaffs_block_became_dirty(struct yaffs_dev
*dev
, int block_no
)
2381 struct yaffs_block_info
*bi
= yaffs_get_block_info(dev
, block_no
);
2385 /* If the block is still healthy erase it and mark as clean.
2386 * If the block has had a data failure, then retire it.
2389 yaffs_trace(YAFFS_TRACE_GC
| YAFFS_TRACE_ERASE
,
2390 "yaffs_block_became_dirty block %d state %d %s",
2391 block_no
, bi
->block_state
,
2392 (bi
->needs_retiring
) ? "needs retiring" : "");
2394 yaffs2_clear_oldest_dirty_seq(dev
, bi
);
2396 bi
->block_state
= YAFFS_BLOCK_STATE_DIRTY
;
2398 /* If this is the block being garbage collected then stop gc'ing */
2399 if ((u32
)block_no
== dev
->gc_block
)
2402 /* If this block is currently the best candidate for gc
2403 * then drop as a candidate */
2404 if ((u32
)block_no
== dev
->gc_dirtiest
) {
2405 dev
->gc_dirtiest
= 0;
2406 dev
->gc_pages_in_use
= 0;
2409 if (!bi
->needs_retiring
) {
2410 yaffs2_checkpt_invalidate(dev
);
2411 erased_ok
= yaffs_erase_block(dev
, block_no
);
2413 dev
->n_erase_failures
++;
2414 yaffs_trace(YAFFS_TRACE_ERROR
| YAFFS_TRACE_BAD_BLOCKS
,
2415 "**>> Erasure failed %d", block_no
);
2419 /* Verify erasure if needed */
2421 ((yaffs_trace_mask
& YAFFS_TRACE_ERASE
) ||
2422 !yaffs_skip_verification(dev
))) {
2423 for (i
= 0; i
< dev
->param
.chunks_per_block
; i
++) {
2424 if (!yaffs_check_chunk_erased(dev
,
2425 block_no
* dev
->param
.chunks_per_block
+ i
)) {
2426 yaffs_trace(YAFFS_TRACE_ERROR
,
2427 ">>Block %d erasure supposedly OK, but chunk %d not erased",
2434 /* We lost a block of free space */
2435 dev
->n_free_chunks
-= dev
->param
.chunks_per_block
;
2436 yaffs_retire_block(dev
, block_no
);
2437 yaffs_trace(YAFFS_TRACE_ERROR
| YAFFS_TRACE_BAD_BLOCKS
,
2438 "**>> Block %d retired", block_no
);
2442 /* Clean it up... */
2443 bi
->block_state
= YAFFS_BLOCK_STATE_EMPTY
;
2445 dev
->n_erased_blocks
++;
2446 bi
->pages_in_use
= 0;
2447 bi
->soft_del_pages
= 0;
2448 bi
->has_shrink_hdr
= 0;
2449 bi
->skip_erased_check
= 1; /* Clean, so no need to check */
2450 bi
->gc_prioritise
= 0;
2451 bi
->has_summary
= 0;
2453 yaffs_clear_chunk_bits(dev
, block_no
);
2455 yaffs_trace(YAFFS_TRACE_ERASE
, "Erased block %d", block_no
);
2458 static inline int yaffs_gc_process_chunk(struct yaffs_dev
*dev
,
2459 struct yaffs_block_info
*bi
,
2460 int old_chunk
, u8
*buffer
)
2464 struct yaffs_ext_tags tags
;
2465 struct yaffs_obj
*object
;
2467 int ret_val
= YAFFS_OK
;
2469 memset(&tags
, 0, sizeof(tags
));
2470 if (!yaffs_rd_chunk_tags_nand(dev
, old_chunk
,
2474 yaffs_trace(YAFFS_TRACE_ERROR
, "yaffs_gc_process_chunk: unhandled error from rd_chunk_tags_nand");
2476 object
= yaffs_find_by_number(dev
, tags
.obj_id
);
2478 yaffs_trace(YAFFS_TRACE_GC_DETAIL
,
2479 "Collecting chunk in block %d, %d %d %d ",
2480 dev
->gc_chunk
, tags
.obj_id
,
2481 tags
.chunk_id
, tags
.n_bytes
);
2483 if (object
&& !yaffs_skip_verification(dev
)) {
2484 if (tags
.chunk_id
== 0)
2487 else if (object
->soft_del
)
2488 /* Defeat the test */
2489 matching_chunk
= old_chunk
;
2492 yaffs_find_chunk_in_file
2493 (object
, tags
.chunk_id
,
2496 if (old_chunk
!= matching_chunk
)
2497 yaffs_trace(YAFFS_TRACE_ERROR
,
2498 "gc: page in gc mismatch: %d %d %d %d",
2506 yaffs_trace(YAFFS_TRACE_ERROR
,
2507 "page %d in gc has no object: %d %d %d ",
2509 tags
.obj_id
, tags
.chunk_id
,
2515 object
->soft_del
&& tags
.chunk_id
!= 0) {
2516 /* Data chunk in a soft deleted file,
2518 * It's a soft deleted data chunk,
2519 * No need to copy this, just forget
2520 * about it and fix up the object.
2523 /* Free chunks already includes
2524 * softdeleted chunks, how ever this
2525 * chunk is going to soon be really
2526 * deleted which will increment free
2527 * chunks. We have to decrement free
2528 * chunks so this works out properly.
2530 dev
->n_free_chunks
--;
2531 bi
->soft_del_pages
--;
2533 object
->n_data_chunks
--;
2534 if (object
->n_data_chunks
<= 0) {
2535 /* remeber to clean up obj */
2536 dev
->gc_cleanup_list
[dev
->n_clean_ups
] = tags
.obj_id
;
2540 } else if (object
) {
2541 /* It's either a data chunk in a live
2542 * file or an ObjectHeader, so we're
2544 * NB Need to keep the ObjectHeaders of
2545 * deleted files until the whole file
2546 * has been deleted off
2548 tags
.serial_number
++;
2551 if (tags
.chunk_id
== 0) {
2552 /* It is an object Id,
2553 * We need to nuke the
2554 * shrinkheader flags since its
2556 * Also need to clean up
2559 struct yaffs_obj_hdr
*oh
;
2560 oh
= (struct yaffs_obj_hdr
*) buffer
;
2563 tags
.extra_is_shrink
= 0;
2564 oh
->shadows_obj
= 0;
2565 oh
->inband_shadowed_obj_id
= 0;
2566 tags
.extra_shadows
= 0;
2568 /* Update file size */
2569 if (object
->variant_type
== YAFFS_OBJECT_TYPE_FILE
) {
2570 yaffs_oh_size_load(oh
,
2571 object
->variant
.file_variant
.file_size
);
2572 tags
.extra_file_size
=
2573 object
->variant
.file_variant
.file_size
;
2576 yaffs_verify_oh(object
, oh
, &tags
, 1);
2578 yaffs_write_new_chunk(dev
, (u8
*) oh
, &tags
, 1);
2581 yaffs_write_new_chunk(dev
, buffer
, &tags
, 1);
2584 if (new_chunk
< 0) {
2585 ret_val
= YAFFS_FAIL
;
2588 /* Now fix up the Tnodes etc. */
2590 if (tags
.chunk_id
== 0) {
2592 object
->hdr_chunk
= new_chunk
;
2593 object
->serial
= tags
.serial_number
;
2595 /* It's a data chunk */
2596 yaffs_put_chunk_in_file(object
, tags
.chunk_id
,
2601 if (ret_val
== YAFFS_OK
)
2602 yaffs_chunk_del(dev
, old_chunk
, mark_flash
, __LINE__
);
2606 static int yaffs_gc_block(struct yaffs_dev
*dev
, int block
, int whole_block
)
2609 int ret_val
= YAFFS_OK
;
2611 int is_checkpt_block
;
2613 int chunks_before
= yaffs_get_erased_chunks(dev
);
2615 struct yaffs_block_info
*bi
= yaffs_get_block_info(dev
, block
);
2617 is_checkpt_block
= (bi
->block_state
== YAFFS_BLOCK_STATE_CHECKPOINT
);
2619 yaffs_trace(YAFFS_TRACE_TRACING
,
2620 "Collecting block %d, in use %d, shrink %d, whole_block %d",
2621 block
, bi
->pages_in_use
, bi
->has_shrink_hdr
,
2624 /*yaffs_verify_free_chunks(dev); */
2626 if (bi
->block_state
== YAFFS_BLOCK_STATE_FULL
)
2627 bi
->block_state
= YAFFS_BLOCK_STATE_COLLECTING
;
2629 bi
->has_shrink_hdr
= 0; /* clear the flag so that the block can erase */
2631 dev
->gc_disable
= 1;
2633 yaffs_summary_gc(dev
, block
);
2635 if (is_checkpt_block
|| !yaffs_still_some_chunks(dev
, block
)) {
2636 yaffs_trace(YAFFS_TRACE_TRACING
,
2637 "Collecting block %d that has no chunks in use",
2639 yaffs_block_became_dirty(dev
, block
);
2642 u8
*buffer
= yaffs_get_temp_buffer(dev
);
2644 yaffs_verify_blk(dev
, bi
, block
);
2646 max_copies
= (whole_block
) ? dev
->param
.chunks_per_block
: 5;
2647 old_chunk
= block
* dev
->param
.chunks_per_block
+ dev
->gc_chunk
;
2649 for (/* init already done */ ;
2650 ret_val
== YAFFS_OK
&&
2651 dev
->gc_chunk
< (u32
)dev
->param
.chunks_per_block
&&
2652 (bi
->block_state
== YAFFS_BLOCK_STATE_COLLECTING
) &&
2654 dev
->gc_chunk
++, old_chunk
++) {
2655 if (yaffs_check_chunk_bit(dev
, block
, dev
->gc_chunk
)) {
2656 /* Page is in use and might need to be copied */
2658 ret_val
= yaffs_gc_process_chunk(dev
, bi
,
2662 yaffs_release_temp_buffer(dev
, buffer
);
2665 yaffs_verify_collected_blk(dev
, bi
, block
);
2667 if (bi
->block_state
== YAFFS_BLOCK_STATE_COLLECTING
) {
2669 * The gc did not complete. Set block state back to FULL
2670 * because checkpointing does not restore gc.
2672 bi
->block_state
= YAFFS_BLOCK_STATE_FULL
;
2674 /* The gc completed. */
2675 /* Do any required cleanups */
2676 for (i
= 0; i
< dev
->n_clean_ups
; i
++) {
2677 /* Time to delete the file too */
2678 struct yaffs_obj
*object
=
2679 yaffs_find_by_number(dev
, dev
->gc_cleanup_list
[i
]);
2681 yaffs_free_tnode(dev
,
2682 object
->variant
.file_variant
.top
);
2683 object
->variant
.file_variant
.top
= NULL
;
2684 yaffs_trace(YAFFS_TRACE_GC
,
2685 "yaffs: About to finally delete object %d",
2687 yaffs_generic_obj_del(object
);
2688 object
->my_dev
->n_deleted_files
--;
2692 chunks_after
= yaffs_get_erased_chunks(dev
);
2693 if (chunks_before
>= chunks_after
)
2694 yaffs_trace(YAFFS_TRACE_GC
,
2695 "gc did not increase free chunks before %d after %d",
2696 chunks_before
, chunks_after
);
2699 dev
->n_clean_ups
= 0;
2702 dev
->gc_disable
= 0;
2708 * find_gc_block() selects the dirtiest block (or close enough)
2709 * for garbage collection.
2712 static unsigned yaffs_find_gc_block(struct yaffs_dev
*dev
,
2713 int aggressive
, int background
)
2717 unsigned selected
= 0;
2718 int prioritised
= 0;
2719 int prioritised_exist
= 0;
2720 struct yaffs_block_info
*bi
;
2723 /* First let's see if we need to grab a prioritised block */
2724 if (dev
->has_pending_prioritised_gc
&& !aggressive
) {
2725 dev
->gc_dirtiest
= 0;
2726 bi
= dev
->block_info
;
2727 for (i
= dev
->internal_start_block
;
2728 i
<= dev
->internal_end_block
&& !selected
; i
++) {
2730 if (bi
->gc_prioritise
) {
2731 prioritised_exist
= 1;
2732 if (bi
->block_state
== YAFFS_BLOCK_STATE_FULL
&&
2733 yaffs_block_ok_for_gc(dev
, bi
)) {
2742 * If there is a prioritised block and none was selected then
2743 * this happened because there is at least one old dirty block
2744 * gumming up the works. Let's gc the oldest dirty block.
2747 if (prioritised_exist
&&
2748 !selected
&& dev
->oldest_dirty_block
> 0)
2749 selected
= dev
->oldest_dirty_block
;
2751 if (!prioritised_exist
) /* None found, so we can clear this */
2752 dev
->has_pending_prioritised_gc
= 0;
2755 /* If we're doing aggressive GC then we are happy to take a less-dirty
2756 * block, and search harder.
2757 * else (leasurely gc), then we only bother to do this if the
2758 * block has only a few pages in use.
2764 dev
->internal_end_block
- dev
->internal_start_block
+ 1;
2766 threshold
= dev
->param
.chunks_per_block
;
2767 iterations
= n_blocks
;
2772 max_threshold
= dev
->param
.chunks_per_block
/ 2;
2774 max_threshold
= dev
->param
.chunks_per_block
/ 8;
2776 if (max_threshold
< YAFFS_GC_PASSIVE_THRESHOLD
)
2777 max_threshold
= YAFFS_GC_PASSIVE_THRESHOLD
;
2779 threshold
= background
? (dev
->gc_not_done
+ 2) * 2 : 0;
2780 if (threshold
< YAFFS_GC_PASSIVE_THRESHOLD
)
2781 threshold
= YAFFS_GC_PASSIVE_THRESHOLD
;
2782 if (threshold
> max_threshold
)
2783 threshold
= max_threshold
;
2785 iterations
= n_blocks
/ 16 + 1;
2786 if (iterations
> 100)
2792 (dev
->gc_dirtiest
< 1 ||
2793 dev
->gc_pages_in_use
> YAFFS_GC_GOOD_ENOUGH
);
2795 dev
->gc_block_finder
++;
2796 if (dev
->gc_block_finder
< (u32
)dev
->internal_start_block
||
2797 (int)dev
->gc_block_finder
> dev
->internal_end_block
)
2798 dev
->gc_block_finder
=
2799 dev
->internal_start_block
;
2801 bi
= yaffs_get_block_info(dev
, dev
->gc_block_finder
);
2803 pages_used
= bi
->pages_in_use
- bi
->soft_del_pages
;
2805 if (bi
->block_state
== YAFFS_BLOCK_STATE_FULL
&&
2806 pages_used
< dev
->param
.chunks_per_block
&&
2807 (dev
->gc_dirtiest
< 1 ||
2808 pages_used
< (int)dev
->gc_pages_in_use
) &&
2809 yaffs_block_ok_for_gc(dev
, bi
)) {
2810 dev
->gc_dirtiest
= dev
->gc_block_finder
;
2811 dev
->gc_pages_in_use
= pages_used
;
2815 if (dev
->gc_dirtiest
> 0 && dev
->gc_pages_in_use
<= (u32
)threshold
)
2816 selected
= dev
->gc_dirtiest
;
2820 * If nothing has been selected for a while, try the oldest dirty
2821 * because that's gumming up the works.
2824 if (!selected
&& dev
->param
.is_yaffs2
&&
2825 dev
->gc_not_done
>= (background
? 10 : 20)) {
2826 yaffs2_find_oldest_dirty_seq(dev
);
2827 if (dev
->oldest_dirty_block
> 0) {
2828 selected
= dev
->oldest_dirty_block
;
2829 dev
->gc_dirtiest
= selected
;
2830 dev
->oldest_dirty_gc_count
++;
2831 bi
= yaffs_get_block_info(dev
, selected
);
2832 dev
->gc_pages_in_use
=
2833 bi
->pages_in_use
- bi
->soft_del_pages
;
2835 dev
->gc_not_done
= 0;
2840 yaffs_trace(YAFFS_TRACE_GC
,
2841 "GC Selected block %d with %d free, prioritised:%d",
2843 dev
->param
.chunks_per_block
- dev
->gc_pages_in_use
,
2850 dev
->gc_dirtiest
= 0;
2851 dev
->gc_pages_in_use
= 0;
2852 dev
->gc_not_done
= 0;
2853 if (dev
->refresh_skip
> 0)
2854 dev
->refresh_skip
--;
2857 yaffs_trace(YAFFS_TRACE_GC
,
2858 "GC none: finder %d skip %d threshold %d dirtiest %d using %d oldest %d%s",
2859 dev
->gc_block_finder
, dev
->gc_not_done
, threshold
,
2860 dev
->gc_dirtiest
, dev
->gc_pages_in_use
,
2861 dev
->oldest_dirty_block
, background
? " bg" : "");
2867 /* New garbage collector
2868 * If we're very low on erased blocks then we do aggressive garbage collection
2869 * otherwise we do "leasurely" garbage collection.
2870 * Aggressive gc looks further (whole array) and will accept less dirty blocks.
2871 * Passive gc only inspects smaller areas and only accepts more dirty blocks.
2873 * The idea is to help clear out space in a more spread-out manner.
2874 * Dunno if it really does anything useful.
2876 static int yaffs_check_gc(struct yaffs_dev
*dev
, int background
)
2879 int gc_ok
= YAFFS_OK
;
2883 int checkpt_block_adjust
;
2885 if (dev
->param
.gc_control_fn
&&
2886 (dev
->param
.gc_control_fn(dev
) & 1) == 0)
2889 if (dev
->gc_disable
)
2890 /* Bail out so we don't get recursive gc */
2893 /* This loop should pass the first time.
2894 * Only loops here if the collection does not increase space.
2900 checkpt_block_adjust
= yaffs_calc_checkpt_blocks_required(dev
);
2903 dev
->param
.n_reserved_blocks
+ checkpt_block_adjust
+ 1;
2905 dev
->n_erased_blocks
* dev
->param
.chunks_per_block
;
2907 /* If we need a block soon then do aggressive gc. */
2908 if (dev
->n_erased_blocks
< min_erased
)
2912 && erased_chunks
> (dev
->n_free_chunks
/ 4))
2915 if (dev
->gc_skip
> 20)
2917 if (erased_chunks
< dev
->n_free_chunks
/ 2 ||
2918 dev
->gc_skip
< 1 || background
)
2928 /* If we don't already have a block being gc'd then see if we
2929 * should start another */
2931 if (dev
->gc_block
< 1 && !aggressive
) {
2932 dev
->gc_block
= yaffs2_find_refresh_block(dev
);
2934 dev
->n_clean_ups
= 0;
2936 if (dev
->gc_block
< 1) {
2938 yaffs_find_gc_block(dev
, aggressive
, background
);
2940 dev
->n_clean_ups
= 0;
2943 if (dev
->gc_block
> 0) {
2946 dev
->passive_gc_count
++;
2948 yaffs_trace(YAFFS_TRACE_GC
,
2949 "yaffs: GC n_erased_blocks %d aggressive %d",
2950 dev
->n_erased_blocks
, aggressive
);
2952 gc_ok
= yaffs_gc_block(dev
, dev
->gc_block
, aggressive
);
2955 if (dev
->n_erased_blocks
< (dev
->param
.n_reserved_blocks
) &&
2956 dev
->gc_block
> 0) {
2957 yaffs_trace(YAFFS_TRACE_GC
,
2958 "yaffs: GC !!!no reclaim!!! n_erased_blocks %d after try %d block %d",
2959 dev
->n_erased_blocks
, max_tries
,
2962 } while ((dev
->n_erased_blocks
< dev
->param
.n_reserved_blocks
) &&
2963 (dev
->gc_block
> 0) && (max_tries
< 2));
2965 return aggressive
? gc_ok
: YAFFS_OK
;
2970 * Garbage collects. Intended to be called from a background thread.
2971 * Returns non-zero if at least half the free chunks are erased.
2973 int yaffs_bg_gc(struct yaffs_dev
*dev
, unsigned urgency
)
2975 int erased_chunks
= dev
->n_erased_blocks
* dev
->param
.chunks_per_block
;
2977 yaffs_trace(YAFFS_TRACE_BACKGROUND
, "Background gc %u", urgency
);
2979 yaffs_check_gc(dev
, 1);
2980 return erased_chunks
> dev
->n_free_chunks
/ 2;
2983 /*-------------------- Data file manipulation -----------------*/
2985 static int yaffs_rd_data_obj(struct yaffs_obj
*in
, int inode_chunk
, u8
* buffer
)
2987 int nand_chunk
= yaffs_find_chunk_in_file(in
, inode_chunk
, NULL
);
2989 if (nand_chunk
>= 0)
2990 return yaffs_rd_chunk_tags_nand(in
->my_dev
, nand_chunk
,
2993 yaffs_trace(YAFFS_TRACE_NANDACCESS
,
2994 "Chunk %d not found zero instead",
2996 /* get sane (zero) data if you read a hole */
2997 memset(buffer
, 0, in
->my_dev
->data_bytes_per_chunk
);
3003 void yaffs_chunk_del(struct yaffs_dev
*dev
, int chunk_id
, int mark_flash
,
3008 struct yaffs_ext_tags tags
;
3009 struct yaffs_block_info
*bi
;
3015 block
= chunk_id
/ dev
->param
.chunks_per_block
;
3016 page
= chunk_id
% dev
->param
.chunks_per_block
;
3018 if (!yaffs_check_chunk_bit(dev
, block
, page
))
3019 yaffs_trace(YAFFS_TRACE_VERIFY
,
3020 "Deleting invalid chunk %d", chunk_id
);
3022 bi
= yaffs_get_block_info(dev
, block
);
3024 yaffs2_update_oldest_dirty_seq(dev
, block
, bi
);
3026 yaffs_trace(YAFFS_TRACE_DELETION
,
3027 "line %d delete of chunk %d",
3030 if (!dev
->param
.is_yaffs2
&& mark_flash
&&
3031 bi
->block_state
!= YAFFS_BLOCK_STATE_COLLECTING
) {
3033 memset(&tags
, 0, sizeof(tags
));
3034 tags
.is_deleted
= 1;
3035 yaffs_wr_chunk_tags_nand(dev
, chunk_id
, NULL
, &tags
);
3036 yaffs_handle_chunk_update(dev
, chunk_id
, &tags
);
3038 dev
->n_unmarked_deletions
++;
3041 /* Pull out of the management area.
3042 * If the whole block became dirty, this will kick off an erasure.
3044 if (bi
->block_state
== YAFFS_BLOCK_STATE_ALLOCATING
||
3045 bi
->block_state
== YAFFS_BLOCK_STATE_FULL
||
3046 bi
->block_state
== YAFFS_BLOCK_STATE_NEEDS_SCAN
||
3047 bi
->block_state
== YAFFS_BLOCK_STATE_COLLECTING
) {
3048 dev
->n_free_chunks
++;
3049 yaffs_clear_chunk_bit(dev
, block
, page
);
3052 if (bi
->pages_in_use
== 0 &&
3053 !bi
->has_shrink_hdr
&&
3054 bi
->block_state
!= YAFFS_BLOCK_STATE_ALLOCATING
&&
3055 bi
->block_state
!= YAFFS_BLOCK_STATE_NEEDS_SCAN
) {
3056 yaffs_block_became_dirty(dev
, block
);
3061 static int yaffs_wr_data_obj(struct yaffs_obj
*in
, int inode_chunk
,
3062 const u8
*buffer
, int n_bytes
, int use_reserve
)
3064 /* Find old chunk Need to do this to get serial number
3065 * Write new one and patch into tree.
3066 * Invalidate old tags.
3070 struct yaffs_ext_tags prev_tags
;
3072 struct yaffs_ext_tags new_tags
;
3073 struct yaffs_dev
*dev
= in
->my_dev
;
3075 yaffs_check_gc(dev
, 0);
3077 /* Get the previous chunk at this location in the file if it exists.
3078 * If it does not exist then put a zero into the tree. This creates
3079 * the tnode now, rather than later when it is harder to clean up.
3081 prev_chunk_id
= yaffs_find_chunk_in_file(in
, inode_chunk
, &prev_tags
);
3082 if (prev_chunk_id
< 1 &&
3083 !yaffs_put_chunk_in_file(in
, inode_chunk
, 0, 0))
3086 /* Set up new tags */
3087 memset(&new_tags
, 0, sizeof(new_tags
));
3089 new_tags
.chunk_id
= inode_chunk
;
3090 new_tags
.obj_id
= in
->obj_id
;
3091 new_tags
.serial_number
=
3092 (prev_chunk_id
> 0) ? prev_tags
.serial_number
+ 1 : 1;
3093 new_tags
.n_bytes
= n_bytes
;
3095 if (n_bytes
< 1 || (u32
)n_bytes
> dev
->param
.total_bytes_per_chunk
) {
3096 yaffs_trace(YAFFS_TRACE_ERROR
,
3097 "Writing %d bytes to chunk!!!!!!!!!",
3103 yaffs_write_new_chunk(dev
, buffer
, &new_tags
, use_reserve
);
3105 if (new_chunk_id
> 0) {
3106 yaffs_put_chunk_in_file(in
, inode_chunk
, new_chunk_id
, 0);
3108 if (prev_chunk_id
> 0)
3109 yaffs_chunk_del(dev
, prev_chunk_id
, 1, __LINE__
);
3111 yaffs_verify_file_sane(in
);
3113 return new_chunk_id
;
3119 static int yaffs_do_xattrib_mod(struct yaffs_obj
*obj
, int set
,
3120 const YCHAR
*name
, const void *value
, int size
,
3123 struct yaffs_xattr_mod xmod
;
3131 xmod
.result
= -ENOSPC
;
3133 result
= yaffs_update_oh(obj
, NULL
, 0, 0, 0, &xmod
);
3141 static int yaffs_apply_xattrib_mod(struct yaffs_obj
*obj
, char *buffer
,
3142 struct yaffs_xattr_mod
*xmod
)
3145 int x_offs
= sizeof(struct yaffs_obj_hdr
);
3146 struct yaffs_dev
*dev
= obj
->my_dev
;
3147 int x_size
= dev
->data_bytes_per_chunk
- sizeof(struct yaffs_obj_hdr
);
3148 char *x_buffer
= buffer
+ x_offs
;
3152 nval_set(x_buffer
, x_size
, xmod
->name
, xmod
->data
,
3153 xmod
->size
, xmod
->flags
);
3155 retval
= nval_del(x_buffer
, x_size
, xmod
->name
);
3157 obj
->has_xattr
= nval_hasvalues(x_buffer
, x_size
);
3158 obj
->xattr_known
= 1;
3159 xmod
->result
= retval
;
3164 static int yaffs_do_xattrib_fetch(struct yaffs_obj
*obj
, const YCHAR
*name
,
3165 void *value
, int size
)
3167 char *buffer
= NULL
;
3169 struct yaffs_ext_tags tags
;
3170 struct yaffs_dev
*dev
= obj
->my_dev
;
3171 int x_offs
= sizeof(struct yaffs_obj_hdr
);
3172 int x_size
= dev
->data_bytes_per_chunk
- sizeof(struct yaffs_obj_hdr
);
3176 if (obj
->hdr_chunk
< 1)
3179 /* If we know that the object has no xattribs then don't do all the
3180 * reading and parsing.
3182 if (obj
->xattr_known
&& !obj
->has_xattr
) {
3189 buffer
= (char *)yaffs_get_temp_buffer(dev
);
3194 yaffs_rd_chunk_tags_nand(dev
, obj
->hdr_chunk
, (u8
*) buffer
, &tags
);
3196 if (result
!= YAFFS_OK
)
3199 x_buffer
= buffer
+ x_offs
;
3201 if (!obj
->xattr_known
) {
3202 obj
->has_xattr
= nval_hasvalues(x_buffer
, x_size
);
3203 obj
->xattr_known
= 1;
3207 retval
= nval_get(x_buffer
, x_size
, name
, value
, size
);
3209 retval
= nval_list(x_buffer
, x_size
, value
, size
);
3211 yaffs_release_temp_buffer(dev
, (u8
*) buffer
);
3215 int yaffs_set_xattrib(struct yaffs_obj
*obj
, const YCHAR
* name
,
3216 const void *value
, int size
, int flags
)
3218 return yaffs_do_xattrib_mod(obj
, 1, name
, value
, size
, flags
);
3221 int yaffs_remove_xattrib(struct yaffs_obj
*obj
, const YCHAR
* name
)
3223 return yaffs_do_xattrib_mod(obj
, 0, name
, NULL
, 0, 0);
3226 int yaffs_get_xattrib(struct yaffs_obj
*obj
, const YCHAR
* name
, void *value
,
3229 return yaffs_do_xattrib_fetch(obj
, name
, value
, size
);
3232 int yaffs_list_xattrib(struct yaffs_obj
*obj
, char *buffer
, int size
)
3234 return yaffs_do_xattrib_fetch(obj
, NULL
, buffer
, size
);
3237 static void yaffs_check_obj_details_loaded(struct yaffs_obj
*in
)
3240 struct yaffs_obj_hdr
*oh
;
3241 struct yaffs_dev
*dev
;
3242 struct yaffs_ext_tags tags
;
3244 //int alloc_failed = 0;
3246 if (!in
|| !in
->lazy_loaded
|| in
->hdr_chunk
< 1)
3250 in
->lazy_loaded
= 0;
3251 buf
= yaffs_get_temp_buffer(dev
);
3253 if (!yaffs_rd_chunk_tags_nand(dev
, in
->hdr_chunk
, buf
, &tags
))
3255 yaffs_trace(YAFFS_TRACE_ERROR
, "yaffs_check_obj_details_loaded: unhandled error from rd_chunk_tags_nand");
3257 oh
= (struct yaffs_obj_hdr
*)buf
;
3259 in
->yst_mode
= oh
->yst_mode
;
3260 yaffs_load_attribs(in
, oh
);
3261 yaffs_set_obj_name_from_oh(in
, oh
);
3263 if (in
->variant_type
== YAFFS_OBJECT_TYPE_SYMLINK
) {
3264 in
->variant
.symlink_variant
.alias
=
3265 yaffs_clone_str(oh
->alias
);
3266 if (!in
->variant
.symlink_variant
.alias
)
3268 // alloc_failed = 1;
3269 yaffs_trace(YAFFS_TRACE_ERROR
, "yaffs_check_obj_details_loaded: alloc_failed = 1??");
3272 yaffs_release_temp_buffer(dev
, buf
);
3275 /* UpdateObjectHeader updates the header on NAND for an object.
3276 * If name is not NULL, then that new name is used.
3278 int yaffs_update_oh(struct yaffs_obj
*in
, const YCHAR
*name
, int force
,
3279 int is_shrink
, int shadows
, struct yaffs_xattr_mod
*xmod
)
3282 struct yaffs_block_info
*bi
;
3283 struct yaffs_dev
*dev
= in
->my_dev
;
3288 struct yaffs_ext_tags new_tags
;
3289 struct yaffs_ext_tags old_tags
;
3290 const YCHAR
*alias
= NULL
;
3292 YCHAR old_name
[YAFFS_MAX_NAME_LENGTH
+ 1];
3293 struct yaffs_obj_hdr
*oh
= NULL
;
3294 Y_LOFF_T file_size
= 0;
3296 yaffs_strcpy(old_name
, _Y("silly old name"));
3298 if (in
->fake
&& in
!= dev
->root_dir
&& !force
&& !xmod
)
3301 yaffs_check_gc(dev
, 0);
3302 yaffs_check_obj_details_loaded(in
);
3304 buffer
= yaffs_get_temp_buffer(in
->my_dev
);
3305 oh
= (struct yaffs_obj_hdr
*)buffer
;
3307 prev_chunk_id
= in
->hdr_chunk
;
3309 if (prev_chunk_id
> 0) {
3310 if (!yaffs_rd_chunk_tags_nand(dev
, prev_chunk_id
, buffer
, &old_tags
))
3312 yaffs_trace(YAFFS_TRACE_ERROR
, "yaffs_update_oh: unhandled error from rd_chunk_tags_nand");
3315 yaffs_verify_oh(in
, oh
, &old_tags
, 0);
3316 memcpy(old_name
, oh
->name
, sizeof(oh
->name
));
3317 memset(buffer
, 0xff, sizeof(struct yaffs_obj_hdr
));
3319 memset(buffer
, 0xff, dev
->data_bytes_per_chunk
);
3322 oh
->type
= in
->variant_type
;
3323 oh
->yst_mode
= in
->yst_mode
;
3324 oh
->shadows_obj
= oh
->inband_shadowed_obj_id
= shadows
;
3326 yaffs_load_attribs_oh(oh
, in
);
3329 oh
->parent_obj_id
= in
->parent
->obj_id
;
3331 oh
->parent_obj_id
= 0;
3333 if (name
&& *name
) {
3334 memset(oh
->name
, 0, sizeof(oh
->name
));
3335 yaffs_load_oh_from_name(dev
, oh
->name
, name
);
3336 } else if (prev_chunk_id
> 0) {
3337 memcpy(oh
->name
, old_name
, sizeof(oh
->name
));
3339 memset(oh
->name
, 0, sizeof(oh
->name
));
3342 oh
->is_shrink
= is_shrink
;
3344 switch (in
->variant_type
) {
3345 case YAFFS_OBJECT_TYPE_UNKNOWN
:
3346 /* Should not happen */
3348 case YAFFS_OBJECT_TYPE_FILE
:
3349 if (oh
->parent_obj_id
!= YAFFS_OBJECTID_DELETED
&&
3350 oh
->parent_obj_id
!= YAFFS_OBJECTID_UNLINKED
)
3351 file_size
= in
->variant
.file_variant
.file_size
;
3352 yaffs_oh_size_load(oh
, file_size
);
3354 case YAFFS_OBJECT_TYPE_HARDLINK
:
3355 oh
->equiv_id
= in
->variant
.hardlink_variant
.equiv_id
;
3357 case YAFFS_OBJECT_TYPE_SPECIAL
:
3360 case YAFFS_OBJECT_TYPE_DIRECTORY
:
3363 case YAFFS_OBJECT_TYPE_SYMLINK
:
3364 alias
= in
->variant
.symlink_variant
.alias
;
3366 alias
= _Y("no alias");
3367 yaffs_strncpy(oh
->alias
, alias
, YAFFS_MAX_ALIAS_LENGTH
);
3368 oh
->alias
[YAFFS_MAX_ALIAS_LENGTH
] = 0;
3372 /* process any xattrib modifications */
3374 yaffs_apply_xattrib_mod(in
, (char *)buffer
, xmod
);
3377 memset(&new_tags
, 0, sizeof(new_tags
));
3379 new_tags
.chunk_id
= 0;
3380 new_tags
.obj_id
= in
->obj_id
;
3381 new_tags
.serial_number
= in
->serial
;
3383 /* Add extra info for file header */
3384 new_tags
.extra_available
= 1;
3385 new_tags
.extra_parent_id
= oh
->parent_obj_id
;
3386 new_tags
.extra_file_size
= file_size
;
3387 new_tags
.extra_is_shrink
= oh
->is_shrink
;
3388 new_tags
.extra_equiv_id
= oh
->equiv_id
;
3389 new_tags
.extra_shadows
= (oh
->shadows_obj
> 0) ? 1 : 0;
3390 new_tags
.extra_obj_type
= in
->variant_type
;
3391 yaffs_verify_oh(in
, oh
, &new_tags
, 1);
3393 /* Create new chunk in NAND */
3395 yaffs_write_new_chunk(dev
, buffer
, &new_tags
,
3396 (prev_chunk_id
> 0) ? 1 : 0);
3399 yaffs_release_temp_buffer(dev
, buffer
);
3401 if (new_chunk_id
< 0)
3402 return new_chunk_id
;
3404 in
->hdr_chunk
= new_chunk_id
;
3406 if (prev_chunk_id
> 0)
3407 yaffs_chunk_del(dev
, prev_chunk_id
, 1, __LINE__
);
3409 if (!yaffs_obj_cache_dirty(in
))
3412 /* If this was a shrink, then mark the block
3413 * that the chunk lives on */
3415 bi
= yaffs_get_block_info(in
->my_dev
,
3417 in
->my_dev
->param
.chunks_per_block
);
3418 bi
->has_shrink_hdr
= 1;
3422 return new_chunk_id
;
3425 /*--------------------- File read/write ------------------------
3426 * Read and write have very similar structures.
3427 * In general the read/write has three parts to it
3428 * An incomplete chunk to start with (if the read/write is not chunk-aligned)
3429 * Some complete chunks
3430 * An incomplete chunk to end off with
3432 * Curve-balls: the first chunk might also be the last chunk.
3435 int yaffs_file_rd(struct yaffs_obj
*in
, u8
* buffer
, Y_LOFF_T offset
, int n_bytes
)
3442 struct yaffs_cache
*cache
;
3443 struct yaffs_dev
*dev
;
3448 yaffs_addr_to_chunk(dev
, offset
, &chunk
, &start
);
3451 /* OK now check for the curveball where the start and end are in
3454 if ((start
+ n
) < (u32
)dev
->data_bytes_per_chunk
)
3457 n_copy
= dev
->data_bytes_per_chunk
- start
;
3459 cache
= yaffs_find_chunk_cache(in
, chunk
);
3461 /* If the chunk is already in the cache or it is less than
3462 * a whole chunk or we're using inband tags then use the cache
3463 * (if there is caching) else bypass the cache.
3465 if (cache
|| n_copy
!= dev
->data_bytes_per_chunk
||
3466 dev
->param
.inband_tags
) {
3467 if (dev
->param
.n_caches
> 0) {
3469 /* If we can't find the data in the cache,
3470 * then load it up. */
3474 yaffs_grab_chunk_cache(in
->my_dev
);
3476 cache
->chunk_id
= chunk
;
3479 yaffs_rd_data_obj(in
, chunk
,
3484 yaffs_use_cache(dev
, cache
, 0);
3488 memcpy(buffer
, &cache
->data
[start
], n_copy
);
3492 /* Read into the local buffer then copy.. */
3495 yaffs_get_temp_buffer(dev
);
3496 yaffs_rd_data_obj(in
, chunk
, local_buffer
);
3498 memcpy(buffer
, &local_buffer
[start
], n_copy
);
3500 yaffs_release_temp_buffer(dev
, local_buffer
);
3503 /* A full chunk. Read directly into the buffer. */
3504 yaffs_rd_data_obj(in
, chunk
, buffer
);
3514 int yaffs_do_file_wr(struct yaffs_obj
*in
, const u8
*buffer
, Y_LOFF_T offset
,
3515 int n_bytes
, int write_through
)
3524 Y_LOFF_T start_write
= offset
;
3525 int chunk_written
= 0;
3527 Y_LOFF_T chunk_start
;
3528 struct yaffs_dev
*dev
;
3532 while (n
> 0 && chunk_written
>= 0) {
3533 yaffs_addr_to_chunk(dev
, offset
, &chunk
, &start
);
3535 if (((Y_LOFF_T
)chunk
) *
3536 (s32
)(dev
->data_bytes_per_chunk
+ start
) != offset
||
3537 (s32
)start
>= dev
->data_bytes_per_chunk
) {
3538 yaffs_trace(YAFFS_TRACE_ERROR
,
3539 "AddrToChunk of offset %ud gives chunk %d start %d",
3540 (unsigned int)offset
, chunk
, start
);
3542 chunk
++; /* File pos to chunk in file offset */
3544 /* OK now check for the curveball where the start and end are in
3548 if ((start
+ n
) < (u32
)dev
->data_bytes_per_chunk
) {
3551 /* Now calculate how many bytes to write back....
3552 * If we're overwriting and not writing to then end of
3553 * file then we need to write back as much as was there
3557 chunk_start
= (((Y_LOFF_T
)(chunk
- 1)) *
3558 dev
->data_bytes_per_chunk
);
3560 if (chunk_start
> in
->variant
.file_variant
.file_size
)
3561 n_bytes_read
= 0; /* Past end of file */
3564 in
->variant
.file_variant
.file_size
-
3567 if ((int)n_bytes_read
> dev
->data_bytes_per_chunk
)
3568 n_bytes_read
= dev
->data_bytes_per_chunk
;
3572 (start
+ n
)) ? n_bytes_read
: (start
+ n
);
3574 if (n_writeback
< 0 ||
3575 n_writeback
> dev
->data_bytes_per_chunk
)
3579 n_copy
= dev
->data_bytes_per_chunk
- start
;
3580 n_writeback
= dev
->data_bytes_per_chunk
;
3583 if (n_copy
!= dev
->data_bytes_per_chunk
||
3584 !dev
->param
.cache_bypass_aligned
||
3585 dev
->param
.inband_tags
) {
3586 /* An incomplete start or end chunk (or maybe both
3587 * start and end chunk), or we're using inband tags,
3588 * or we're forcing writes through the cache,
3589 * so we want to use the cache buffers.
3591 if (dev
->param
.n_caches
> 0) {
3592 struct yaffs_cache
*cache
;
3594 /* If we can't find the data in the cache, then
3596 cache
= yaffs_find_chunk_cache(in
, chunk
);
3599 yaffs_check_alloc_available(dev
, 1)) {
3600 cache
= yaffs_grab_chunk_cache(dev
);
3602 cache
->chunk_id
= chunk
;
3605 yaffs_rd_data_obj(in
, chunk
,
3609 !yaffs_check_alloc_available(dev
,
3611 /* Drop the cache if it was a read cache
3612 * item and no space check has been made
3619 yaffs_use_cache(dev
, cache
, 1);
3622 memcpy(&cache
->data
[start
], buffer
,
3626 cache
->n_bytes
= n_writeback
;
3628 if (write_through
) {
3638 chunk_written
= -1; /* fail write */
3641 /* An incomplete start or end chunk (or maybe
3642 * both start and end chunk). Read into the
3643 * local buffer then copy over and write back.
3646 u8
*local_buffer
= yaffs_get_temp_buffer(dev
);
3648 yaffs_rd_data_obj(in
, chunk
, local_buffer
);
3649 memcpy(&local_buffer
[start
], buffer
, n_copy
);
3652 yaffs_wr_data_obj(in
, chunk
,
3656 yaffs_release_temp_buffer(dev
, local_buffer
);
3659 /* A full chunk. Write directly from the buffer. */
3662 yaffs_wr_data_obj(in
, chunk
, buffer
,
3663 dev
->data_bytes_per_chunk
, 0);
3665 /* Since we've overwritten the cached data,
3666 * we better invalidate it. */
3667 yaffs_invalidate_chunk_cache(in
, chunk
);
3670 if (chunk_written
>= 0) {
3678 /* Update file object */
3680 if ((start_write
+ n_done
) > in
->variant
.file_variant
.file_size
)
3681 in
->variant
.file_variant
.file_size
= (start_write
+ n_done
);
3687 int yaffs_wr_file(struct yaffs_obj
*in
, const u8
*buffer
, Y_LOFF_T offset
,
3688 int n_bytes
, int write_through
)
3690 yaffs2_handle_hole(in
, offset
);
3691 return yaffs_do_file_wr(in
, buffer
, offset
, n_bytes
, write_through
);
3694 /* ---------------------- File resizing stuff ------------------ */
3696 static void yaffs_prune_chunks(struct yaffs_obj
*in
, Y_LOFF_T new_size
)
3699 struct yaffs_dev
*dev
= in
->my_dev
;
3700 Y_LOFF_T old_size
= in
->variant
.file_variant
.file_size
;
3708 yaffs_addr_to_chunk(dev
, old_size
- 1, &last_del
, &dummy
);
3712 yaffs_addr_to_chunk(dev
, new_size
+ dev
->data_bytes_per_chunk
- 1,
3713 &start_del
, &dummy
);
3717 /* Delete backwards so that we don't end up with holes if
3718 * power is lost part-way through the operation.
3720 for (i
= last_del
; i
>= start_del
; i
--) {
3721 /* NB this could be optimised somewhat,
3722 * eg. could retrieve the tags and write them without
3723 * using yaffs_chunk_del
3726 chunk_id
= yaffs_find_del_file_chunk(in
, i
, NULL
);
3732 (dev
->internal_start_block
* dev
->param
.chunks_per_block
) ||
3734 ((dev
->internal_end_block
+ 1) *
3735 dev
->param
.chunks_per_block
)) {
3736 yaffs_trace(YAFFS_TRACE_ALWAYS
,
3737 "Found daft chunk_id %d for %d",
3740 in
->n_data_chunks
--;
3741 yaffs_chunk_del(dev
, chunk_id
, 1, __LINE__
);
3746 void yaffs_resize_file_down(struct yaffs_obj
*obj
, Y_LOFF_T new_size
)
3750 struct yaffs_dev
*dev
= obj
->my_dev
;
3752 yaffs_addr_to_chunk(dev
, new_size
, &new_full
, &new_partial
);
3754 yaffs_prune_chunks(obj
, new_size
);
3756 if (new_partial
!= 0) {
3757 int last_chunk
= 1 + new_full
;
3758 u8
*local_buffer
= yaffs_get_temp_buffer(dev
);
3760 /* Rewrite the last chunk with its new size and zero pad */
3761 yaffs_rd_data_obj(obj
, last_chunk
, local_buffer
);
3762 memset(local_buffer
+ new_partial
, 0,
3763 dev
->data_bytes_per_chunk
- new_partial
);
3765 yaffs_wr_data_obj(obj
, last_chunk
, local_buffer
,
3768 yaffs_release_temp_buffer(dev
, local_buffer
);
3771 obj
->variant
.file_variant
.file_size
= new_size
;
3773 yaffs_prune_tree(dev
, &obj
->variant
.file_variant
);
3776 int yaffs_resize_file(struct yaffs_obj
*in
, Y_LOFF_T new_size
)
3778 struct yaffs_dev
*dev
= in
->my_dev
;
3779 Y_LOFF_T old_size
= in
->variant
.file_variant
.file_size
;
3781 yaffs_flush_file_cache(in
, 1);
3782 yaffs_invalidate_whole_cache(in
);
3784 yaffs_check_gc(dev
, 0);
3786 if (in
->variant_type
!= YAFFS_OBJECT_TYPE_FILE
)
3789 if (new_size
== old_size
)
3792 if (new_size
> old_size
) {
3793 yaffs2_handle_hole(in
, new_size
);
3794 in
->variant
.file_variant
.file_size
= new_size
;
3796 /* new_size < old_size */
3797 yaffs_resize_file_down(in
, new_size
);
3800 /* Write a new object header to reflect the resize.
3801 * show we've shrunk the file, if need be
3802 * Do this only if the file is not in the deleted directories
3803 * and is not shadowed.
3807 in
->parent
->obj_id
!= YAFFS_OBJECTID_UNLINKED
&&
3808 in
->parent
->obj_id
!= YAFFS_OBJECTID_DELETED
)
3809 yaffs_update_oh(in
, NULL
, 0, 0, 0, NULL
);
3814 int yaffs_flush_file(struct yaffs_obj
*in
,
3822 yaffs_flush_file_cache(in
, discard_cache
);
3828 yaffs_load_current_time(in
, 0, 0);
3830 return (yaffs_update_oh(in
, NULL
, 0, 0, 0, NULL
) >= 0) ?
3831 YAFFS_OK
: YAFFS_FAIL
;
3835 /* yaffs_del_file deletes the whole file data
3836 * and the inode associated with the file.
3837 * It does not delete the links associated with the file.
3839 static int yaffs_unlink_file_if_needed(struct yaffs_obj
*in
)
3843 struct yaffs_dev
*dev
= in
->my_dev
;
3850 yaffs_change_obj_name(in
, in
->my_dev
->del_dir
,
3851 _Y("deleted"), 0, 0);
3852 yaffs_trace(YAFFS_TRACE_TRACING
,
3853 "yaffs: immediate deletion of file %d",
3856 in
->my_dev
->n_deleted_files
++;
3857 if (dev
->param
.disable_soft_del
|| dev
->param
.is_yaffs2
)
3858 yaffs_resize_file(in
, 0);
3859 yaffs_soft_del_file(in
);
3862 yaffs_change_obj_name(in
, in
->my_dev
->unlinked_dir
,
3863 _Y("unlinked"), 0, 0);
3868 static int yaffs_del_file(struct yaffs_obj
*in
)
3870 int ret_val
= YAFFS_OK
;
3871 int deleted
; /* Need to cache value on stack if in is freed */
3872 struct yaffs_dev
*dev
= in
->my_dev
;
3874 if (dev
->param
.disable_soft_del
|| dev
->param
.is_yaffs2
)
3875 yaffs_resize_file(in
, 0);
3877 if (in
->n_data_chunks
> 0) {
3878 /* Use soft deletion if there is data in the file.
3879 * That won't be the case if it has been resized to zero.
3882 ret_val
= yaffs_unlink_file_if_needed(in
);
3884 deleted
= in
->deleted
;
3886 if (ret_val
== YAFFS_OK
&& in
->unlinked
&& !in
->deleted
) {
3889 in
->my_dev
->n_deleted_files
++;
3890 yaffs_soft_del_file(in
);
3892 return deleted
? YAFFS_OK
: YAFFS_FAIL
;
3894 /* The file has no data chunks so we toss it immediately */
3895 yaffs_free_tnode(in
->my_dev
, in
->variant
.file_variant
.top
);
3896 in
->variant
.file_variant
.top
= NULL
;
3897 yaffs_generic_obj_del(in
);
3903 int yaffs_is_non_empty_dir(struct yaffs_obj
*obj
)
3906 obj
->variant_type
== YAFFS_OBJECT_TYPE_DIRECTORY
) &&
3907 !(list_empty(&obj
->variant
.dir_variant
.children
));
3910 static int yaffs_del_dir(struct yaffs_obj
*obj
)
3912 /* First check that the directory is empty. */
3913 if (yaffs_is_non_empty_dir(obj
))
3916 return yaffs_generic_obj_del(obj
);
3919 static int yaffs_del_symlink(struct yaffs_obj
*in
)
3921 kfree(in
->variant
.symlink_variant
.alias
);
3922 in
->variant
.symlink_variant
.alias
= NULL
;
3924 return yaffs_generic_obj_del(in
);
3927 static int yaffs_del_link(struct yaffs_obj
*in
)
3929 /* remove this hardlink from the list associated with the equivalent
3932 list_del_init(&in
->hard_links
);
3933 return yaffs_generic_obj_del(in
);
3936 int yaffs_del_obj(struct yaffs_obj
*obj
)
3940 switch (obj
->variant_type
) {
3941 case YAFFS_OBJECT_TYPE_FILE
:
3942 ret_val
= yaffs_del_file(obj
);
3944 case YAFFS_OBJECT_TYPE_DIRECTORY
:
3945 if (!list_empty(&obj
->variant
.dir_variant
.dirty
)) {
3946 yaffs_trace(YAFFS_TRACE_BACKGROUND
,
3947 "Remove object %d from dirty directories",
3949 list_del_init(&obj
->variant
.dir_variant
.dirty
);
3951 return yaffs_del_dir(obj
);
3953 case YAFFS_OBJECT_TYPE_SYMLINK
:
3954 ret_val
= yaffs_del_symlink(obj
);
3956 case YAFFS_OBJECT_TYPE_HARDLINK
:
3957 ret_val
= yaffs_del_link(obj
);
3959 case YAFFS_OBJECT_TYPE_SPECIAL
:
3960 ret_val
= yaffs_generic_obj_del(obj
);
3962 case YAFFS_OBJECT_TYPE_UNKNOWN
:
3964 break; /* should not happen. */
3970 static void yaffs_empty_dir_to_dir(struct yaffs_obj
*from_dir
,
3971 struct yaffs_obj
*to_dir
)
3973 struct yaffs_obj
*obj
;
3974 struct list_head
*lh
;
3975 struct list_head
*n
;
3977 list_for_each_safe(lh
, n
, &from_dir
->variant
.dir_variant
.children
) {
3978 obj
= list_entry(lh
, struct yaffs_obj
, siblings
);
3979 yaffs_add_obj_to_dir(to_dir
, obj
);
3983 struct yaffs_obj
*yaffs_retype_obj(struct yaffs_obj
*obj
,
3984 enum yaffs_obj_type type
)
3986 /* Tear down the old variant */
3987 switch (obj
->variant_type
) {
3988 case YAFFS_OBJECT_TYPE_FILE
:
3989 /* Nuke file data */
3990 yaffs_resize_file(obj
, 0);
3991 yaffs_free_tnode(obj
->my_dev
, obj
->variant
.file_variant
.top
);
3992 obj
->variant
.file_variant
.top
= NULL
;
3994 case YAFFS_OBJECT_TYPE_DIRECTORY
:
3995 /* Put the children in lost and found. */
3996 yaffs_empty_dir_to_dir(obj
, obj
->my_dev
->lost_n_found
);
3997 if (!list_empty(&obj
->variant
.dir_variant
.dirty
))
3998 list_del_init(&obj
->variant
.dir_variant
.dirty
);
4000 case YAFFS_OBJECT_TYPE_SYMLINK
:
4001 /* Nuke symplink data */
4002 kfree(obj
->variant
.symlink_variant
.alias
);
4003 obj
->variant
.symlink_variant
.alias
= NULL
;
4005 case YAFFS_OBJECT_TYPE_HARDLINK
:
4006 list_del_init(&obj
->hard_links
);
4012 memset(&obj
->variant
, 0, sizeof(obj
->variant
));
4014 /*Set up new variant if the memset is not enough. */
4016 case YAFFS_OBJECT_TYPE_DIRECTORY
:
4017 INIT_LIST_HEAD(&obj
->variant
.dir_variant
.children
);
4018 INIT_LIST_HEAD(&obj
->variant
.dir_variant
.dirty
);
4020 case YAFFS_OBJECT_TYPE_FILE
:
4021 case YAFFS_OBJECT_TYPE_SYMLINK
:
4022 case YAFFS_OBJECT_TYPE_HARDLINK
:
4027 obj
->variant_type
= type
;
4033 static int yaffs_unlink_worker(struct yaffs_obj
*obj
)
4043 yaffs_update_parent(obj
->parent
);
4045 if (obj
->variant_type
== YAFFS_OBJECT_TYPE_HARDLINK
) {
4046 return yaffs_del_link(obj
);
4047 } else if (!list_empty(&obj
->hard_links
)) {
4048 /* Curve ball: We're unlinking an object that has a hardlink.
4050 * This problem arises because we are not strictly following
4051 * The Linux link/inode model.
4053 * We can't really delete the object.
4054 * Instead, we do the following:
4055 * - Select a hardlink.
4056 * - Unhook it from the hard links
4057 * - Move it from its parent directory so that the rename works.
4058 * - Rename the object to the hardlink's name.
4059 * - Delete the hardlink
4062 struct yaffs_obj
*hl
;
4063 struct yaffs_obj
*parent
;
4065 YCHAR name
[YAFFS_MAX_NAME_LENGTH
+ 1];
4067 hl
= list_entry(obj
->hard_links
.next
, struct yaffs_obj
,
4070 yaffs_get_obj_name(hl
, name
, YAFFS_MAX_NAME_LENGTH
+ 1);
4071 parent
= hl
->parent
;
4073 list_del_init(&hl
->hard_links
);
4075 yaffs_add_obj_to_dir(obj
->my_dev
->unlinked_dir
, hl
);
4077 ret_val
= yaffs_change_obj_name(obj
, parent
, name
, 0, 0);
4079 if (ret_val
== YAFFS_OK
)
4080 ret_val
= yaffs_generic_obj_del(hl
);
4084 } else if (del_now
) {
4085 switch (obj
->variant_type
) {
4086 case YAFFS_OBJECT_TYPE_FILE
:
4087 return yaffs_del_file(obj
);
4089 case YAFFS_OBJECT_TYPE_DIRECTORY
:
4090 list_del_init(&obj
->variant
.dir_variant
.dirty
);
4091 return yaffs_del_dir(obj
);
4093 case YAFFS_OBJECT_TYPE_SYMLINK
:
4094 return yaffs_del_symlink(obj
);
4096 case YAFFS_OBJECT_TYPE_SPECIAL
:
4097 return yaffs_generic_obj_del(obj
);
4099 case YAFFS_OBJECT_TYPE_HARDLINK
:
4100 case YAFFS_OBJECT_TYPE_UNKNOWN
:
4104 } else if (yaffs_is_non_empty_dir(obj
)) {
4107 return yaffs_change_obj_name(obj
, obj
->my_dev
->unlinked_dir
,
4108 _Y("unlinked"), 0, 0);
4112 static int yaffs_unlink_obj(struct yaffs_obj
*obj
)
4114 if (obj
&& obj
->unlink_allowed
)
4115 return yaffs_unlink_worker(obj
);
4120 int yaffs_unlinker(struct yaffs_obj
*dir
, const YCHAR
*name
)
4122 struct yaffs_obj
*obj
;
4124 obj
= yaffs_find_by_name(dir
, name
);
4125 return yaffs_unlink_obj(obj
);
4129 * If old_name is NULL then we take old_dir as the object to be renamed.
4131 int yaffs_rename_obj(struct yaffs_obj
*old_dir
, const YCHAR
*old_name
,
4132 struct yaffs_obj
*new_dir
, const YCHAR
*new_name
)
4134 struct yaffs_obj
*obj
= NULL
;
4135 struct yaffs_obj
*existing_target
= NULL
;
4138 struct yaffs_dev
*dev
;
4140 if (!old_dir
|| old_dir
->variant_type
!= YAFFS_OBJECT_TYPE_DIRECTORY
) {
4144 if (!new_dir
|| new_dir
->variant_type
!= YAFFS_OBJECT_TYPE_DIRECTORY
) {
4149 dev
= old_dir
->my_dev
;
4151 #ifdef CONFIG_YAFFS_CASE_INSENSITIVE
4152 /* Special case for case insemsitive systems.
4153 * While look-up is case insensitive, the name isn't.
4154 * Therefore we might want to change x.txt to X.txt
4156 if (old_dir
== new_dir
&&
4157 old_name
&& new_name
&&
4158 yaffs_strcmp(old_name
, new_name
) == 0)
4162 if (yaffs_strnlen(new_name
, YAFFS_MAX_NAME_LENGTH
+ 1) >
4163 YAFFS_MAX_NAME_LENGTH
)
4168 obj
= yaffs_find_by_name(old_dir
, old_name
);
4171 old_dir
= obj
->parent
;
4174 if (obj
&& obj
->rename_allowed
) {
4175 /* Now handle an existing target, if there is one */
4176 existing_target
= yaffs_find_by_name(new_dir
, new_name
);
4177 if (yaffs_is_non_empty_dir(existing_target
)) {
4178 return YAFFS_FAIL
; /* ENOTEMPTY */
4179 } else if (existing_target
&& existing_target
!= obj
) {
4180 /* Nuke the target first, using shadowing,
4181 * but only if it isn't the same object.
4183 * Note we must disable gc here otherwise it can mess
4187 dev
->gc_disable
= 1;
4188 yaffs_change_obj_name(obj
, new_dir
, new_name
, force
,
4189 existing_target
->obj_id
);
4190 existing_target
->is_shadowed
= 1;
4191 yaffs_unlink_obj(existing_target
);
4192 dev
->gc_disable
= 0;
4195 result
= yaffs_change_obj_name(obj
, new_dir
, new_name
, 1, 0);
4197 yaffs_update_parent(old_dir
);
4198 if (new_dir
!= old_dir
)
4199 yaffs_update_parent(new_dir
);
4206 /*----------------------- Initialisation Scanning ---------------------- */
4208 void yaffs_handle_shadowed_obj(struct yaffs_dev
*dev
, int obj_id
,
4209 int backward_scanning
)
4211 struct yaffs_obj
*obj
;
4213 if (backward_scanning
) {
4214 /* Handle YAFFS2 case (backward scanning)
4215 * If the shadowed object exists then ignore.
4217 obj
= yaffs_find_by_number(dev
, obj_id
);
4222 /* Let's create it (if it does not exist) assuming it is a file so that
4223 * it can do shrinking etc.
4224 * We put it in unlinked dir to be cleaned up after the scanning
4227 yaffs_find_or_create_by_number(dev
, obj_id
, YAFFS_OBJECT_TYPE_FILE
);
4230 obj
->is_shadowed
= 1;
4231 yaffs_add_obj_to_dir(dev
->unlinked_dir
, obj
);
4232 obj
->variant
.file_variant
.shrink_size
= 0;
4233 obj
->valid
= 1; /* So that we don't read any other info. */
4236 void yaffs_link_fixup(struct yaffs_dev
*dev
, struct list_head
*hard_list
)
4238 struct list_head
*lh
;
4239 struct list_head
*save
;
4240 struct yaffs_obj
*hl
;
4241 struct yaffs_obj
*in
;
4243 list_for_each_safe(lh
, save
, hard_list
) {
4244 hl
= list_entry(lh
, struct yaffs_obj
, hard_links
);
4245 in
= yaffs_find_by_number(dev
,
4246 hl
->variant
.hardlink_variant
.equiv_id
);
4249 /* Add the hardlink pointers */
4250 hl
->variant
.hardlink_variant
.equiv_obj
= in
;
4251 list_add(&hl
->hard_links
, &in
->hard_links
);
4253 /* Todo Need to report/handle this better.
4254 * Got a problem... hardlink to a non-existant object
4256 hl
->variant
.hardlink_variant
.equiv_obj
= NULL
;
4257 INIT_LIST_HEAD(&hl
->hard_links
);
4262 static void yaffs_strip_deleted_objs(struct yaffs_dev
*dev
)
4265 * Sort out state of unlinked and deleted objects after scanning.
4267 struct list_head
*i
;
4268 struct list_head
*n
;
4269 struct yaffs_obj
*l
;
4274 /* Soft delete all the unlinked files */
4275 list_for_each_safe(i
, n
,
4276 &dev
->unlinked_dir
->variant
.dir_variant
.children
) {
4277 l
= list_entry(i
, struct yaffs_obj
, siblings
);
4281 list_for_each_safe(i
, n
, &dev
->del_dir
->variant
.dir_variant
.children
) {
4282 l
= list_entry(i
, struct yaffs_obj
, siblings
);
4288 * This code iterates through all the objects making sure that they are rooted.
4289 * Any unrooted objects are re-rooted in lost+found.
4290 * An object needs to be in one of:
4291 * - Directly under deleted, unlinked
4292 * - Directly or indirectly under root.
4295 * This code assumes that we don't ever change the current relationships
4296 * between directories:
4297 * root_dir->parent == unlinked_dir->parent == del_dir->parent == NULL
4298 * lost-n-found->parent == root_dir
4300 * This fixes the problem where directories might have inadvertently been
4301 * deleted leaving the object "hanging" without being rooted in the
4305 static int yaffs_has_null_parent(struct yaffs_dev
*dev
, struct yaffs_obj
*obj
)
4307 return (obj
== dev
->del_dir
||
4308 obj
== dev
->unlinked_dir
|| obj
== dev
->root_dir
);
4311 static void yaffs_fix_hanging_objs(struct yaffs_dev
*dev
)
4313 struct yaffs_obj
*obj
;
4314 struct yaffs_obj
*parent
;
4316 struct list_head
*lh
;
4317 struct list_head
*n
;
4324 /* Iterate through the objects in each hash entry,
4325 * looking at each object.
4326 * Make sure it is rooted.
4329 for (i
= 0; i
< YAFFS_NOBJECT_BUCKETS
; i
++) {
4330 list_for_each_safe(lh
, n
, &dev
->obj_bucket
[i
].list
) {
4331 obj
= list_entry(lh
, struct yaffs_obj
, hash_link
);
4332 parent
= obj
->parent
;
4334 if (yaffs_has_null_parent(dev
, obj
)) {
4335 /* These directories are not hanging */
4337 } else if (!parent
||
4338 parent
->variant_type
!=
4339 YAFFS_OBJECT_TYPE_DIRECTORY
) {
4341 } else if (yaffs_has_null_parent(dev
, parent
)) {
4345 * Need to follow the parent chain to
4346 * see if it is hanging.
4351 while (parent
!= dev
->root_dir
&&
4353 parent
->parent
->variant_type
==
4354 YAFFS_OBJECT_TYPE_DIRECTORY
&&
4356 parent
= parent
->parent
;
4359 if (parent
!= dev
->root_dir
)
4363 yaffs_trace(YAFFS_TRACE_SCAN
,
4364 "Hanging object %d moved to lost and found",
4366 yaffs_add_obj_to_dir(dev
->lost_n_found
, obj
);
4373 * Delete directory contents for cleaning up lost and found.
4375 static void yaffs_del_dir_contents(struct yaffs_obj
*dir
)
4377 struct yaffs_obj
*obj
;
4378 struct list_head
*lh
;
4379 struct list_head
*n
;
4381 if (dir
->variant_type
!= YAFFS_OBJECT_TYPE_DIRECTORY
)
4384 list_for_each_safe(lh
, n
, &dir
->variant
.dir_variant
.children
) {
4385 obj
= list_entry(lh
, struct yaffs_obj
, siblings
);
4386 if (obj
->variant_type
== YAFFS_OBJECT_TYPE_DIRECTORY
)
4387 yaffs_del_dir_contents(obj
);
4388 yaffs_trace(YAFFS_TRACE_SCAN
,
4389 "Deleting lost_found object %d",
4391 yaffs_unlink_obj(obj
);
4395 static void yaffs_empty_l_n_f(struct yaffs_dev
*dev
)
4397 yaffs_del_dir_contents(dev
->lost_n_found
);
4401 struct yaffs_obj
*yaffs_find_by_name(struct yaffs_obj
*directory
,
4405 struct list_head
*i
;
4406 YCHAR buffer
[YAFFS_MAX_NAME_LENGTH
+ 1];
4407 struct yaffs_obj
*l
;
4413 yaffs_trace(YAFFS_TRACE_ALWAYS
,
4414 "tragedy: yaffs_find_by_name: null pointer directory"
4419 if (directory
->variant_type
!= YAFFS_OBJECT_TYPE_DIRECTORY
) {
4420 yaffs_trace(YAFFS_TRACE_ALWAYS
,
4421 "tragedy: yaffs_find_by_name: non-directory"
4426 sum
= yaffs_calc_name_sum(name
);
4428 list_for_each(i
, &directory
->variant
.dir_variant
.children
) {
4429 l
= list_entry(i
, struct yaffs_obj
, siblings
);
4431 if (l
->parent
!= directory
)
4434 yaffs_check_obj_details_loaded(l
);
4436 /* Special case for lost-n-found */
4437 if (l
->obj_id
== YAFFS_OBJECTID_LOSTNFOUND
) {
4438 if (!yaffs_strcmp(name
, YAFFS_LOSTNFOUND_NAME
))
4440 } else if (l
->sum
== sum
|| l
->hdr_chunk
<= 0) {
4441 /* LostnFound chunk called Objxxx
4444 yaffs_get_obj_name(l
, buffer
,
4445 YAFFS_MAX_NAME_LENGTH
+ 1);
4446 if (!yaffs_strncmp(name
, buffer
, YAFFS_MAX_NAME_LENGTH
))
4453 /* GetEquivalentObject dereferences any hard links to get to the
4457 struct yaffs_obj
*yaffs_get_equivalent_obj(struct yaffs_obj
*obj
)
4459 if (obj
&& obj
->variant_type
== YAFFS_OBJECT_TYPE_HARDLINK
) {
4460 obj
= obj
->variant
.hardlink_variant
.equiv_obj
;
4461 yaffs_check_obj_details_loaded(obj
);
4467 * A note or two on object names.
4468 * * If the object name is missing, we then make one up in the form objnnn
4470 * * ASCII names are stored in the object header's name field from byte zero
4471 * * Unicode names are historically stored starting from byte zero.
4473 * Then there are automatic Unicode names...
4474 * The purpose of these is to save names in a way that can be read as
4475 * ASCII or Unicode names as appropriate, thus allowing a Unicode and ASCII
4476 * system to share files.
4478 * These automatic unicode are stored slightly differently...
4479 * - If the name can fit in the ASCII character space then they are saved as
4480 * ascii names as per above.
4481 * - If the name needs Unicode then the name is saved in Unicode
4482 * starting at oh->name[1].
4485 static void yaffs_fix_null_name(struct yaffs_obj
*obj
, YCHAR
*name
,
4488 /* Create an object name if we could not find one. */
4489 if (yaffs_strnlen(name
, YAFFS_MAX_NAME_LENGTH
) == 0) {
4490 YCHAR local_name
[20];
4491 YCHAR num_string
[20];
4492 YCHAR
*x
= &num_string
[19];
4493 unsigned v
= obj
->obj_id
;
4497 *x
= '0' + (v
% 10);
4500 /* make up a name */
4501 yaffs_strcpy(local_name
, YAFFS_LOSTNFOUND_PREFIX
);
4502 yaffs_strcat(local_name
, x
);
4503 yaffs_strncpy(name
, local_name
, buffer_size
- 1);
4507 int yaffs_get_obj_name(struct yaffs_obj
*obj
, YCHAR
*name
, int buffer_size
)
4509 memset(name
, 0, buffer_size
* sizeof(YCHAR
));
4510 yaffs_check_obj_details_loaded(obj
);
4511 if (obj
->obj_id
== YAFFS_OBJECTID_LOSTNFOUND
) {
4512 yaffs_strncpy(name
, YAFFS_LOSTNFOUND_NAME
, buffer_size
- 1);
4513 } else if (obj
->short_name
[0]) {
4514 yaffs_strcpy(name
, obj
->short_name
);
4515 } else if (obj
->hdr_chunk
> 0) {
4517 u8
*buffer
= yaffs_get_temp_buffer(obj
->my_dev
);
4519 struct yaffs_obj_hdr
*oh
= (struct yaffs_obj_hdr
*)buffer
;
4521 memset(buffer
, 0, obj
->my_dev
->data_bytes_per_chunk
);
4523 if (obj
->hdr_chunk
> 0) {
4524 if (!yaffs_rd_chunk_tags_nand(obj
->my_dev
,
4528 yaffs_trace(YAFFS_TRACE_ERROR
,
4529 "yaffs_get_obj_name: unhandled error from rd_chunk_tags_nand");
4532 yaffs_load_name_from_oh(obj
->my_dev
, name
, oh
->name
,
4535 yaffs_release_temp_buffer(obj
->my_dev
, buffer
);
4538 yaffs_fix_null_name(obj
, name
, buffer_size
);
4540 return yaffs_strnlen(name
, YAFFS_MAX_NAME_LENGTH
);
4543 Y_LOFF_T
yaffs_get_obj_length(struct yaffs_obj
*obj
)
4545 /* Dereference any hard linking */
4546 obj
= yaffs_get_equivalent_obj(obj
);
4548 if (obj
->variant_type
== YAFFS_OBJECT_TYPE_FILE
)
4549 return obj
->variant
.file_variant
.file_size
;
4550 if (obj
->variant_type
== YAFFS_OBJECT_TYPE_SYMLINK
) {
4551 if (!obj
->variant
.symlink_variant
.alias
)
4553 return yaffs_strnlen(obj
->variant
.symlink_variant
.alias
,
4554 YAFFS_MAX_ALIAS_LENGTH
);
4556 /* Only a directory should drop through to here */
4557 return obj
->my_dev
->data_bytes_per_chunk
;
4561 int yaffs_get_obj_link_count(struct yaffs_obj
*obj
)
4564 struct list_head
*i
;
4567 count
++; /* the object itself */
4569 list_for_each(i
, &obj
->hard_links
)
4570 count
++; /* add the hard links; */
4575 int yaffs_get_obj_inode(struct yaffs_obj
*obj
)
4577 obj
= yaffs_get_equivalent_obj(obj
);
4582 unsigned yaffs_get_obj_type(struct yaffs_obj
*obj
)
4584 obj
= yaffs_get_equivalent_obj(obj
);
4586 switch (obj
->variant_type
) {
4587 case YAFFS_OBJECT_TYPE_FILE
:
4590 case YAFFS_OBJECT_TYPE_DIRECTORY
:
4593 case YAFFS_OBJECT_TYPE_SYMLINK
:
4596 case YAFFS_OBJECT_TYPE_HARDLINK
:
4599 case YAFFS_OBJECT_TYPE_SPECIAL
:
4600 if (S_ISFIFO(obj
->yst_mode
))
4602 if (S_ISCHR(obj
->yst_mode
))
4604 if (S_ISBLK(obj
->yst_mode
))
4606 if (S_ISSOCK(obj
->yst_mode
))
4616 YCHAR
*yaffs_get_symlink_alias(struct yaffs_obj
*obj
)
4618 obj
= yaffs_get_equivalent_obj(obj
);
4619 if (obj
->variant_type
== YAFFS_OBJECT_TYPE_SYMLINK
)
4620 return yaffs_clone_str(obj
->variant
.symlink_variant
.alias
);
4622 return yaffs_clone_str(_Y(""));
4625 /*--------------------------- Initialisation code -------------------------- */
4627 static int yaffs_check_dev_fns(struct yaffs_dev
*dev
)
4629 struct yaffs_driver
*drv
= &dev
->drv
;
4630 struct yaffs_tags_handler
*tagger
= &dev
->tagger
;
4632 /* Common functions, gotta have */
4633 if (!drv
->drv_read_chunk_fn
||
4634 !drv
->drv_write_chunk_fn
||
4638 if (dev
->param
.is_yaffs2
&&
4639 (!drv
->drv_mark_bad_fn
|| !drv
->drv_check_bad_fn
))
4642 /* Install the default tags marshalling functions if needed. */
4643 yaffs_tags_compat_install(dev
);
4644 yaffs_tags_marshall_install(dev
);
4646 /* Check we now have the marshalling functions required. */
4647 if (!tagger
->write_chunk_tags_fn
||
4648 !tagger
->read_chunk_tags_fn
||
4649 !tagger
->query_block_fn
||
4650 !tagger
->mark_bad_fn
)
4656 static int yaffs_create_initial_dir(struct yaffs_dev
*dev
)
4658 /* Initialise the unlinked, deleted, root and lost+found directories */
4659 dev
->lost_n_found
= dev
->root_dir
= NULL
;
4660 dev
->unlinked_dir
= dev
->del_dir
= NULL
;
4662 yaffs_create_fake_dir(dev
, YAFFS_OBJECTID_UNLINKED
, S_IFDIR
);
4664 yaffs_create_fake_dir(dev
, YAFFS_OBJECTID_DELETED
, S_IFDIR
);
4666 yaffs_create_fake_dir(dev
, YAFFS_OBJECTID_ROOT
,
4667 YAFFS_ROOT_MODE
| S_IFDIR
);
4669 yaffs_create_fake_dir(dev
, YAFFS_OBJECTID_LOSTNFOUND
,
4670 YAFFS_LOSTNFOUND_MODE
| S_IFDIR
);
4672 if (dev
->lost_n_found
&& dev
->root_dir
&& dev
->unlinked_dir
4674 yaffs_add_obj_to_dir(dev
->root_dir
, dev
->lost_n_found
);
4681 * Typically only used by yaffs_guts_initialise, but also used by the
4682 * Low level yaffs driver tests.
4685 int yaffs_guts_ll_init(struct yaffs_dev
*dev
)
4689 yaffs_trace(YAFFS_TRACE_TRACING
, "yaffs: yaffs_ll_init()");
4692 yaffs_trace(YAFFS_TRACE_ALWAYS
,
4693 "yaffs: Need a device"
4701 dev
->internal_start_block
= dev
->param
.start_block
;
4702 dev
->internal_end_block
= dev
->param
.end_block
;
4703 dev
->block_offset
= 0;
4704 dev
->chunk_offset
= 0;
4705 dev
->n_free_chunks
= 0;
4709 if (dev
->param
.start_block
== 0) {
4710 dev
->internal_start_block
= dev
->param
.start_block
+ 1;
4711 dev
->internal_end_block
= dev
->param
.end_block
+ 1;
4712 dev
->block_offset
= 1;
4713 dev
->chunk_offset
= dev
->param
.chunks_per_block
;
4716 /* Check geometry parameters. */
4718 if ((!dev
->param
.inband_tags
&& dev
->param
.is_yaffs2
&&
4719 dev
->param
.total_bytes_per_chunk
< 1024) ||
4720 (!dev
->param
.is_yaffs2
&&
4721 dev
->param
.total_bytes_per_chunk
< 512) ||
4722 (dev
->param
.inband_tags
&& !dev
->param
.is_yaffs2
) ||
4723 dev
->param
.chunks_per_block
< 2 ||
4724 dev
->param
.n_reserved_blocks
< 2 ||
4725 dev
->internal_start_block
<= 0 ||
4726 dev
->internal_end_block
<= 0 ||
4727 dev
->internal_end_block
<=
4728 (dev
->internal_start_block
+ dev
->param
.n_reserved_blocks
+ 2)
4730 /* otherwise it is too small */
4731 yaffs_trace(YAFFS_TRACE_ALWAYS
,
4732 "NAND geometry problems: chunk size %d, type is yaffs%s, inband_tags %d ",
4733 dev
->param
.total_bytes_per_chunk
,
4734 dev
->param
.is_yaffs2
? "2" : "",
4735 dev
->param
.inband_tags
);
4739 /* Sort out space for inband tags, if required */
4740 if (dev
->param
.inband_tags
)
4741 dev
->data_bytes_per_chunk
=
4742 dev
->param
.total_bytes_per_chunk
-
4743 sizeof(struct yaffs_packed_tags2_tags_only
);
4745 dev
->data_bytes_per_chunk
= dev
->param
.total_bytes_per_chunk
;
4747 /* Got the right mix of functions? */
4748 if (!yaffs_check_dev_fns(dev
)) {
4749 /* Function missing */
4750 yaffs_trace(YAFFS_TRACE_ALWAYS
,
4751 "device function(s) missing or wrong");
4756 if (yaffs_init_nand(dev
) != YAFFS_OK
) {
4757 yaffs_trace(YAFFS_TRACE_ALWAYS
, "InitialiseNAND failed");
4765 int yaffs_guts_format_dev(struct yaffs_dev
*dev
)
4768 enum yaffs_block_state state
;
4771 if(yaffs_guts_ll_init(dev
) != YAFFS_OK
)
4777 for (i
= dev
->internal_start_block
; i
<= dev
->internal_end_block
; i
++) {
4778 yaffs_query_init_block_state(dev
, i
, &state
, &dummy
);
4779 if (state
!= YAFFS_BLOCK_STATE_DEAD
)
4780 yaffs_erase_block(dev
, i
);
4787 int yaffs_guts_initialise(struct yaffs_dev
*dev
)
4789 int init_failed
= 0;
4793 if(yaffs_guts_ll_init(dev
) != YAFFS_OK
)
4796 if (dev
->is_mounted
) {
4797 yaffs_trace(YAFFS_TRACE_ALWAYS
, "device already mounted");
4801 dev
->is_mounted
= 1;
4803 /* OK now calculate a few things for the device */
4806 * Calculate all the chunk size manipulation numbers:
4808 x
= dev
->data_bytes_per_chunk
;
4809 /* We always use dev->chunk_shift and dev->chunk_div */
4810 dev
->chunk_shift
= calc_shifts(x
);
4811 x
>>= dev
->chunk_shift
;
4813 /* We only use chunk mask if chunk_div is 1 */
4814 dev
->chunk_mask
= (1 << dev
->chunk_shift
) - 1;
4817 * Calculate chunk_grp_bits.
4818 * We need to find the next power of 2 > than internal_end_block
4821 x
= dev
->param
.chunks_per_block
* (dev
->internal_end_block
+ 1);
4823 bits
= calc_shifts_ceiling(x
);
4825 /* Set up tnode width if wide tnodes are enabled. */
4826 if (!dev
->param
.wide_tnodes_disabled
) {
4827 /* bits must be even so that we end up with 32-bit words */
4831 dev
->tnode_width
= 16;
4833 dev
->tnode_width
= bits
;
4835 dev
->tnode_width
= 16;
4838 dev
->tnode_mask
= (1 << dev
->tnode_width
) - 1;
4840 /* Level0 Tnodes are 16 bits or wider (if wide tnodes are enabled),
4841 * so if the bitwidth of the
4842 * chunk range we're using is greater than 16 we need
4843 * to figure out chunk shift and chunk_grp_size
4846 if (bits
<= (int)dev
->tnode_width
)
4847 dev
->chunk_grp_bits
= 0;
4849 dev
->chunk_grp_bits
= bits
- dev
->tnode_width
;
4851 dev
->tnode_size
= (dev
->tnode_width
* YAFFS_NTNODES_LEVEL0
) / 8;
4852 if (dev
->tnode_size
< sizeof(struct yaffs_tnode
))
4853 dev
->tnode_size
= sizeof(struct yaffs_tnode
);
4855 dev
->chunk_grp_size
= 1 << dev
->chunk_grp_bits
;
4857 if (dev
->param
.chunks_per_block
< dev
->chunk_grp_size
) {
4858 /* We have a problem because the soft delete won't work if
4859 * the chunk group size > chunks per block.
4860 * This can be remedied by using larger "virtual blocks".
4862 yaffs_trace(YAFFS_TRACE_ALWAYS
, "chunk group too large");
4867 /* Finished verifying the device, continue with initialisation */
4869 /* More device initialisation */
4871 dev
->passive_gc_count
= 0;
4872 dev
->oldest_dirty_gc_count
= 0;
4874 dev
->gc_block_finder
= 0;
4875 dev
->buffered_block
= -1;
4876 dev
->doing_buffered_block_rewrite
= 0;
4877 dev
->n_deleted_files
= 0;
4878 dev
->n_bg_deletions
= 0;
4879 dev
->n_unlinked_files
= 0;
4880 dev
->n_ecc_fixed
= 0;
4881 dev
->n_ecc_unfixed
= 0;
4882 dev
->n_tags_ecc_fixed
= 0;
4883 dev
->n_tags_ecc_unfixed
= 0;
4884 dev
->n_erase_failures
= 0;
4885 dev
->n_erased_blocks
= 0;
4886 dev
->gc_disable
= 0;
4887 dev
->has_pending_prioritised_gc
= 1;
4888 /* Assume the worst for now, will get fixed on first GC */
4889 INIT_LIST_HEAD(&dev
->dirty_dirs
);
4890 dev
->oldest_dirty_seq
= 0;
4891 dev
->oldest_dirty_block
= 0;
4893 /* Initialise temporary buffers and caches. */
4894 if (!yaffs_init_tmp_buffers(dev
))
4898 dev
->gc_cleanup_list
= NULL
;
4900 if (!init_failed
&& dev
->param
.n_caches
> 0) {
4904 dev
->param
.n_caches
* sizeof(struct yaffs_cache
);
4906 if (dev
->param
.n_caches
> YAFFS_MAX_SHORT_OP_CACHES
)
4907 dev
->param
.n_caches
= YAFFS_MAX_SHORT_OP_CACHES
;
4909 dev
->cache
= kmalloc(cache_bytes
, GFP_NOFS
);
4911 buf
= (u8
*) dev
->cache
;
4914 memset(dev
->cache
, 0, cache_bytes
);
4916 for (i
= 0; i
< dev
->param
.n_caches
&& buf
; i
++) {
4917 dev
->cache
[i
].object
= NULL
;
4918 dev
->cache
[i
].last_use
= 0;
4919 dev
->cache
[i
].dirty
= 0;
4920 dev
->cache
[i
].data
= buf
=
4921 kmalloc(dev
->param
.total_bytes_per_chunk
, GFP_NOFS
);
4926 dev
->cache_last_use
= 0;
4929 dev
->cache_hits
= 0;
4932 dev
->gc_cleanup_list
=
4933 kmalloc(dev
->param
.chunks_per_block
* sizeof(u32
),
4935 if (!dev
->gc_cleanup_list
)
4939 if (dev
->param
.is_yaffs2
)
4940 dev
->param
.use_header_file_size
= 1;
4942 if (!init_failed
&& !yaffs_init_blocks(dev
))
4945 yaffs_init_tnodes_and_objs(dev
);
4947 if (!init_failed
&& !yaffs_create_initial_dir(dev
))
4950 if (!init_failed
&& dev
->param
.is_yaffs2
&&
4951 !dev
->param
.disable_summary
&&
4952 !yaffs_summary_init(dev
))
4956 /* Now scan the flash. */
4957 if (dev
->param
.is_yaffs2
) {
4958 if (yaffs2_checkpt_restore(dev
)) {
4959 yaffs_check_obj_details_loaded(dev
->root_dir
);
4960 yaffs_trace(YAFFS_TRACE_CHECKPOINT
|
4962 "yaffs: restored from checkpoint"
4966 /* Clean up the mess caused by an aborted
4967 * checkpoint load then scan backwards.
4969 yaffs_deinit_blocks(dev
);
4971 yaffs_deinit_tnodes_and_objs(dev
);
4973 dev
->n_erased_blocks
= 0;
4974 dev
->n_free_chunks
= 0;
4975 dev
->alloc_block
= -1;
4976 dev
->alloc_page
= -1;
4977 dev
->n_deleted_files
= 0;
4978 dev
->n_unlinked_files
= 0;
4979 dev
->n_bg_deletions
= 0;
4981 if (!init_failed
&& !yaffs_init_blocks(dev
))
4984 yaffs_init_tnodes_and_objs(dev
);
4987 && !yaffs_create_initial_dir(dev
))
4990 if (!init_failed
&& !yaffs2_scan_backwards(dev
))
4993 } else if (!yaffs1_scan(dev
)) {
4997 yaffs_strip_deleted_objs(dev
);
4998 yaffs_fix_hanging_objs(dev
);
4999 if (dev
->param
.empty_lost_n_found
)
5000 yaffs_empty_l_n_f(dev
);
5004 /* Clean up the mess */
5005 yaffs_trace(YAFFS_TRACE_TRACING
,
5006 "yaffs: yaffs_guts_initialise() aborted.");
5008 yaffs_deinitialise(dev
);
5012 /* Zero out stats */
5013 dev
->n_page_reads
= 0;
5014 dev
->n_page_writes
= 0;
5015 dev
->n_erasures
= 0;
5016 dev
->n_gc_copies
= 0;
5017 dev
->n_retried_writes
= 0;
5019 dev
->n_retired_blocks
= 0;
5021 yaffs_verify_free_chunks(dev
);
5022 yaffs_verify_blocks(dev
);
5024 /* Clean up any aborted checkpoint data */
5025 if (!dev
->is_checkpointed
&& dev
->blocks_in_checkpt
> 0)
5026 yaffs2_checkpt_invalidate(dev
);
5028 yaffs_trace(YAFFS_TRACE_TRACING
,
5029 "yaffs: yaffs_guts_initialise() done.");
5033 void yaffs_deinitialise(struct yaffs_dev
*dev
)
5035 if (dev
->is_mounted
) {
5038 yaffs_deinit_blocks(dev
);
5039 yaffs_deinit_tnodes_and_objs(dev
);
5040 yaffs_summary_deinit(dev
);
5042 if (dev
->param
.n_caches
> 0 && dev
->cache
) {
5044 for (i
= 0; i
< dev
->param
.n_caches
; i
++) {
5045 kfree(dev
->cache
[i
].data
);
5046 dev
->cache
[i
].data
= NULL
;
5053 kfree(dev
->gc_cleanup_list
);
5055 for (i
= 0; i
< YAFFS_N_TEMP_BUFFERS
; i
++)
5056 kfree(dev
->temp_buffer
[i
].buffer
);
5058 dev
->is_mounted
= 0;
5060 yaffs_deinit_nand(dev
);
5064 int yaffs_count_free_chunks(struct yaffs_dev
*dev
)
5068 struct yaffs_block_info
*blk
;
5070 blk
= dev
->block_info
;
5071 for (b
= dev
->internal_start_block
; b
<= dev
->internal_end_block
; b
++) {
5072 switch (blk
->block_state
) {
5073 case YAFFS_BLOCK_STATE_EMPTY
:
5074 case YAFFS_BLOCK_STATE_ALLOCATING
:
5075 case YAFFS_BLOCK_STATE_COLLECTING
:
5076 case YAFFS_BLOCK_STATE_FULL
:
5078 (dev
->param
.chunks_per_block
- blk
->pages_in_use
+
5079 blk
->soft_del_pages
);
5089 int yaffs_get_n_free_chunks(struct yaffs_dev
*dev
)
5091 /* This is what we report to the outside world */
5094 int blocks_for_checkpt
;
5097 n_free
= dev
->n_free_chunks
;
5098 n_free
+= dev
->n_deleted_files
;
5100 /* Now count and subtract the number of dirty chunks in the cache. */
5102 for (n_dirty_caches
= 0, i
= 0; i
< dev
->param
.n_caches
; i
++) {
5103 if (dev
->cache
[i
].dirty
)
5107 n_free
-= n_dirty_caches
;
5110 ((dev
->param
.n_reserved_blocks
+ 1) * dev
->param
.chunks_per_block
);
5112 /* Now figure checkpoint space and report that... */
5113 blocks_for_checkpt
= yaffs_calc_checkpt_blocks_required(dev
);
5115 n_free
-= (blocks_for_checkpt
* dev
->param
.chunks_per_block
);
5126 * Marshalling functions to get Y_LOFF_T file sizes into and out of
5129 void yaffs_oh_size_load(struct yaffs_obj_hdr
*oh
, Y_LOFF_T fsize
)
5131 #ifdef CONFIG_YAFFS_OP
5132 oh
->file_size_low
= (fsize
& 0xFFFFFFFF);
5133 oh
->file_size_high
= 0; //((fsize >> 32) & 0xFFFFFFFF);
5135 oh
->file_size_low
= (fsize
& 0xFFFFFFFF);
5136 oh
->file_size_high
= ((fsize
>> 32) & 0xFFFFFFFF);
5140 Y_LOFF_T
yaffs_oh_to_size(struct yaffs_obj_hdr
*oh
)
5144 if (sizeof(Y_LOFF_T
) >= 8 && ~(oh
->file_size_high
))
5145 #ifdef CONFIG_YAFFS_OP
5146 retval
= (((Y_LOFF_T
) oh
->file_size_low
) & 0xFFFFFFFF);
5148 retval
= (((Y_LOFF_T
) oh
->file_size_high
) << 32) |
5149 (((Y_LOFF_T
) oh
->file_size_low
) & 0xFFFFFFFF);
5152 retval
= (Y_LOFF_T
) oh
->file_size_low
;
5158 void yaffs_count_blocks_by_state(struct yaffs_dev
*dev
, int bs
[10])
5161 struct yaffs_block_info
*bi
;
5164 for(i
= 0; i
< 10; i
++)
5167 for(i
= dev
->internal_start_block
; i
<= dev
->internal_end_block
; i
++) {
5168 bi
= yaffs_get_block_info(dev
, i
);
5169 s
= bi
->block_state
;
5170 if(s
> YAFFS_BLOCK_STATE_DEAD
|| s
< YAFFS_BLOCK_STATE_UNKNOWN
)