2 * GIT - The information manager from hell
4 * Copyright (C) Linus Torvalds, 2005
6 #include "git-compat-util.h"
7 #include "bulk-checkin.h"
15 #include "cache-tree.h"
18 #include "object-file.h"
19 #include "object-store-ll.h"
20 #include "oid-array.h"
23 #include "environment.h"
26 #include "name-hash.h"
27 #include "object-name.h"
29 #include "preload-index.h"
30 #include "read-cache.h"
31 #include "resolve-undo.h"
36 #include "split-index.h"
39 #include "fsmonitor.h"
40 #include "thread-utils.h"
42 #include "sparse-index.h"
43 #include "csum-file.h"
44 #include "promisor-remote.h"
47 /* Mask for the name length in ce_flags in the on-disk index */
49 #define CE_NAMEMASK (0x0fff)
53 * The first letter should be 'A'..'Z' for extensions that are not
54 * necessary for a correct operation (i.e. optimization data).
55 * When new extensions are added that _needs_ to be understood in
56 * order to correctly interpret the index file, pick character that
57 * is outside the range, to cause the reader to abort.
60 #define CACHE_EXT(s) ( (s[0]<<24)|(s[1]<<16)|(s[2]<<8)|(s[3]) )
61 #define CACHE_EXT_TREE 0x54524545 /* "TREE" */
62 #define CACHE_EXT_RESOLVE_UNDO 0x52455543 /* "REUC" */
63 #define CACHE_EXT_LINK 0x6c696e6b /* "link" */
64 #define CACHE_EXT_UNTRACKED 0x554E5452 /* "UNTR" */
65 #define CACHE_EXT_FSMONITOR 0x46534D4E /* "FSMN" */
66 #define CACHE_EXT_ENDOFINDEXENTRIES 0x454F4945 /* "EOIE" */
67 #define CACHE_EXT_INDEXENTRYOFFSETTABLE 0x49454F54 /* "IEOT" */
68 #define CACHE_EXT_SPARSE_DIRECTORIES 0x73646972 /* "sdir" */
70 /* changes that can be kept in $GIT_DIR/index (basically all extensions) */
71 #define EXTMASK (RESOLVE_UNDO_CHANGED | CACHE_TREE_CHANGED | \
72 CE_ENTRY_ADDED | CE_ENTRY_REMOVED | CE_ENTRY_CHANGED | \
73 SPLIT_INDEX_ORDERED | UNTRACKED_CHANGED | FSMONITOR_CHANGED)
77 * This is an estimate of the pathname length in the index. We use
78 * this for V4 index files to guess the un-deltafied size of the index
79 * in memory because of pathname deltafication. This is not required
80 * for V2/V3 index formats because their pathnames are not compressed.
81 * If the initial amount of memory set aside is not sufficient, the
82 * mem pool will allocate extra memory.
84 #define CACHE_ENTRY_PATH_LENGTH 80
86 enum index_search_mode
{
91 static inline struct cache_entry
*mem_pool__ce_alloc(struct mem_pool
*mem_pool
, size_t len
)
93 struct cache_entry
*ce
;
94 ce
= mem_pool_alloc(mem_pool
, cache_entry_size(len
));
95 ce
->mem_pool_allocated
= 1;
99 static inline struct cache_entry
*mem_pool__ce_calloc(struct mem_pool
*mem_pool
, size_t len
)
101 struct cache_entry
* ce
;
102 ce
= mem_pool_calloc(mem_pool
, 1, cache_entry_size(len
));
103 ce
->mem_pool_allocated
= 1;
107 static struct mem_pool
*find_mem_pool(struct index_state
*istate
)
109 struct mem_pool
**pool_ptr
;
111 if (istate
->split_index
&& istate
->split_index
->base
)
112 pool_ptr
= &istate
->split_index
->base
->ce_mem_pool
;
114 pool_ptr
= &istate
->ce_mem_pool
;
117 *pool_ptr
= xmalloc(sizeof(**pool_ptr
));
118 mem_pool_init(*pool_ptr
, 0);
124 static const char *alternate_index_output
;
126 static void set_index_entry(struct index_state
*istate
, int nr
, struct cache_entry
*ce
)
128 if (S_ISSPARSEDIR(ce
->ce_mode
))
129 istate
->sparse_index
= INDEX_COLLAPSED
;
131 istate
->cache
[nr
] = ce
;
132 add_name_hash(istate
, ce
);
135 static void replace_index_entry(struct index_state
*istate
, int nr
, struct cache_entry
*ce
)
137 struct cache_entry
*old
= istate
->cache
[nr
];
139 replace_index_entry_in_base(istate
, old
, ce
);
140 remove_name_hash(istate
, old
);
141 discard_cache_entry(old
);
142 ce
->ce_flags
&= ~CE_HASHED
;
143 set_index_entry(istate
, nr
, ce
);
144 ce
->ce_flags
|= CE_UPDATE_IN_BASE
;
145 mark_fsmonitor_invalid(istate
, ce
);
146 istate
->cache_changed
|= CE_ENTRY_CHANGED
;
149 void rename_index_entry_at(struct index_state
*istate
, int nr
, const char *new_name
)
151 struct cache_entry
*old_entry
= istate
->cache
[nr
], *new_entry
, *refreshed
;
152 int namelen
= strlen(new_name
);
154 new_entry
= make_empty_cache_entry(istate
, namelen
);
155 copy_cache_entry(new_entry
, old_entry
);
156 new_entry
->ce_flags
&= ~CE_HASHED
;
157 new_entry
->ce_namelen
= namelen
;
158 new_entry
->index
= 0;
159 memcpy(new_entry
->name
, new_name
, namelen
+ 1);
161 cache_tree_invalidate_path(istate
, old_entry
->name
);
162 untracked_cache_remove_from_index(istate
, old_entry
->name
);
163 remove_index_entry_at(istate
, nr
);
166 * Refresh the new index entry. Using 'refresh_cache_entry' ensures
167 * we only update stat info if the entry is otherwise up-to-date (i.e.,
168 * the contents/mode haven't changed). This ensures that we reflect the
169 * 'ctime' of the rename in the index without (incorrectly) updating
170 * the cached stat info to reflect unstaged changes on disk.
172 refreshed
= refresh_cache_entry(istate
, new_entry
, CE_MATCH_REFRESH
);
173 if (refreshed
&& refreshed
!= new_entry
) {
174 add_index_entry(istate
, refreshed
, ADD_CACHE_OK_TO_ADD
|ADD_CACHE_OK_TO_REPLACE
);
175 discard_cache_entry(new_entry
);
177 add_index_entry(istate
, new_entry
, ADD_CACHE_OK_TO_ADD
|ADD_CACHE_OK_TO_REPLACE
);
181 * This only updates the "non-critical" parts of the directory
182 * cache, ie the parts that aren't tracked by GIT, and only used
183 * to validate the cache.
185 void fill_stat_cache_info(struct index_state
*istate
, struct cache_entry
*ce
, struct stat
*st
)
187 fill_stat_data(&ce
->ce_stat_data
, st
);
189 if (assume_unchanged
)
190 ce
->ce_flags
|= CE_VALID
;
192 if (S_ISREG(st
->st_mode
)) {
193 ce_mark_uptodate(ce
);
194 mark_fsmonitor_valid(istate
, ce
);
198 static unsigned int st_mode_from_ce(const struct cache_entry
*ce
)
200 extern int trust_executable_bit
, has_symlinks
;
202 switch (ce
->ce_mode
& S_IFMT
) {
204 return has_symlinks
? S_IFLNK
: (S_IFREG
| 0644);
206 return (ce
->ce_mode
& (trust_executable_bit
? 0755 : 0644)) | S_IFREG
;
208 return S_IFDIR
| 0755;
212 BUG("unsupported ce_mode: %o", ce
->ce_mode
);
216 int fake_lstat(const struct cache_entry
*ce
, struct stat
*st
)
218 fake_lstat_data(&ce
->ce_stat_data
, st
);
219 st
->st_mode
= st_mode_from_ce(ce
);
221 /* always succeed as lstat() replacement */
225 static int ce_compare_data(struct index_state
*istate
,
226 const struct cache_entry
*ce
,
230 int fd
= git_open_cloexec(ce
->name
, O_RDONLY
);
233 struct object_id oid
;
234 if (!index_fd(istate
, &oid
, fd
, st
, OBJ_BLOB
, ce
->name
, 0))
235 match
= !oideq(&oid
, &ce
->oid
);
236 /* index_fd() closed the file descriptor already */
241 static int ce_compare_link(const struct cache_entry
*ce
, size_t expected_size
)
246 enum object_type type
;
247 struct strbuf sb
= STRBUF_INIT
;
249 if (strbuf_readlink(&sb
, ce
->name
, expected_size
))
252 buffer
= repo_read_object_file(the_repository
, &ce
->oid
, &type
, &size
);
255 match
= memcmp(buffer
, sb
.buf
, size
);
262 static int ce_compare_gitlink(const struct cache_entry
*ce
)
264 struct object_id oid
;
267 * We don't actually require that the .git directory
268 * under GITLINK directory be a valid git directory. It
269 * might even be missing (in case nobody populated that
272 * If so, we consider it always to match.
274 if (repo_resolve_gitlink_ref(the_repository
, ce
->name
,
277 return !oideq(&oid
, &ce
->oid
);
280 static int ce_modified_check_fs(struct index_state
*istate
,
281 const struct cache_entry
*ce
,
284 switch (st
->st_mode
& S_IFMT
) {
286 if (ce_compare_data(istate
, ce
, st
))
290 if (ce_compare_link(ce
, xsize_t(st
->st_size
)))
294 if (S_ISGITLINK(ce
->ce_mode
))
295 return ce_compare_gitlink(ce
) ? DATA_CHANGED
: 0;
296 /* else fallthrough */
303 static int ce_match_stat_basic(const struct cache_entry
*ce
, struct stat
*st
)
305 unsigned int changed
= 0;
307 if (ce
->ce_flags
& CE_REMOVE
)
308 return MODE_CHANGED
| DATA_CHANGED
| TYPE_CHANGED
;
310 switch (ce
->ce_mode
& S_IFMT
) {
312 changed
|= !S_ISREG(st
->st_mode
) ? TYPE_CHANGED
: 0;
313 /* We consider only the owner x bit to be relevant for
316 if (trust_executable_bit
&&
317 (0100 & (ce
->ce_mode
^ st
->st_mode
)))
318 changed
|= MODE_CHANGED
;
321 if (!S_ISLNK(st
->st_mode
) &&
322 (has_symlinks
|| !S_ISREG(st
->st_mode
)))
323 changed
|= TYPE_CHANGED
;
326 /* We ignore most of the st_xxx fields for gitlinks */
327 if (!S_ISDIR(st
->st_mode
))
328 changed
|= TYPE_CHANGED
;
329 else if (ce_compare_gitlink(ce
))
330 changed
|= DATA_CHANGED
;
333 BUG("unsupported ce_mode: %o", ce
->ce_mode
);
336 changed
|= match_stat_data(&ce
->ce_stat_data
, st
);
338 /* Racily smudged entry? */
339 if (!ce
->ce_stat_data
.sd_size
) {
340 if (!is_empty_blob_oid(&ce
->oid
, the_repository
->hash_algo
))
341 changed
|= DATA_CHANGED
;
347 static int is_racy_stat(const struct index_state
*istate
,
348 const struct stat_data
*sd
)
350 return (istate
->timestamp
.sec
&&
352 /* nanosecond timestamped files can also be racy! */
353 (istate
->timestamp
.sec
< sd
->sd_mtime
.sec
||
354 (istate
->timestamp
.sec
== sd
->sd_mtime
.sec
&&
355 istate
->timestamp
.nsec
<= sd
->sd_mtime
.nsec
))
357 istate
->timestamp
.sec
<= sd
->sd_mtime
.sec
362 int is_racy_timestamp(const struct index_state
*istate
,
363 const struct cache_entry
*ce
)
365 return (!S_ISGITLINK(ce
->ce_mode
) &&
366 is_racy_stat(istate
, &ce
->ce_stat_data
));
369 int match_stat_data_racy(const struct index_state
*istate
,
370 const struct stat_data
*sd
, struct stat
*st
)
372 if (is_racy_stat(istate
, sd
))
373 return MTIME_CHANGED
;
374 return match_stat_data(sd
, st
);
377 int ie_match_stat(struct index_state
*istate
,
378 const struct cache_entry
*ce
, struct stat
*st
,
379 unsigned int options
)
381 unsigned int changed
;
382 int ignore_valid
= options
& CE_MATCH_IGNORE_VALID
;
383 int ignore_skip_worktree
= options
& CE_MATCH_IGNORE_SKIP_WORKTREE
;
384 int assume_racy_is_modified
= options
& CE_MATCH_RACY_IS_DIRTY
;
385 int ignore_fsmonitor
= options
& CE_MATCH_IGNORE_FSMONITOR
;
387 if (!ignore_fsmonitor
)
388 refresh_fsmonitor(istate
);
390 * If it's marked as always valid in the index, it's
391 * valid whatever the checked-out copy says.
393 * skip-worktree has the same effect with higher precedence
395 if (!ignore_skip_worktree
&& ce_skip_worktree(ce
))
397 if (!ignore_valid
&& (ce
->ce_flags
& CE_VALID
))
399 if (!ignore_fsmonitor
&& (ce
->ce_flags
& CE_FSMONITOR_VALID
))
403 * Intent-to-add entries have not been added, so the index entry
404 * by definition never matches what is in the work tree until it
405 * actually gets added.
407 if (ce_intent_to_add(ce
))
408 return DATA_CHANGED
| TYPE_CHANGED
| MODE_CHANGED
;
410 changed
= ce_match_stat_basic(ce
, st
);
413 * Within 1 second of this sequence:
414 * echo xyzzy >file && git-update-index --add file
415 * running this command:
417 * would give a falsely clean cache entry. The mtime and
418 * length match the cache, and other stat fields do not change.
420 * We could detect this at update-index time (the cache entry
421 * being registered/updated records the same time as "now")
422 * and delay the return from git-update-index, but that would
423 * effectively mean we can make at most one commit per second,
424 * which is not acceptable. Instead, we check cache entries
425 * whose mtime are the same as the index file timestamp more
426 * carefully than others.
428 if (!changed
&& is_racy_timestamp(istate
, ce
)) {
429 if (assume_racy_is_modified
)
430 changed
|= DATA_CHANGED
;
432 changed
|= ce_modified_check_fs(istate
, ce
, st
);
438 int ie_modified(struct index_state
*istate
,
439 const struct cache_entry
*ce
,
440 struct stat
*st
, unsigned int options
)
442 int changed
, changed_fs
;
444 changed
= ie_match_stat(istate
, ce
, st
, options
);
448 * If the mode or type has changed, there's no point in trying
449 * to refresh the entry - it's not going to match
451 if (changed
& (MODE_CHANGED
| TYPE_CHANGED
))
455 * Immediately after read-tree or update-index --cacheinfo,
456 * the length field is zero, as we have never even read the
457 * lstat(2) information once, and we cannot trust DATA_CHANGED
458 * returned by ie_match_stat() which in turn was returned by
459 * ce_match_stat_basic() to signal that the filesize of the
460 * blob changed. We have to actually go to the filesystem to
461 * see if the contents match, and if so, should answer "unchanged".
463 * The logic does not apply to gitlinks, as ce_match_stat_basic()
464 * already has checked the actual HEAD from the filesystem in the
465 * subproject. If ie_match_stat() already said it is different,
466 * then we know it is.
468 if ((changed
& DATA_CHANGED
) &&
469 (S_ISGITLINK(ce
->ce_mode
) || ce
->ce_stat_data
.sd_size
!= 0))
472 changed_fs
= ce_modified_check_fs(istate
, ce
, st
);
474 return changed
| changed_fs
;
478 static int cache_name_stage_compare(const char *name1
, int len1
, int stage1
,
479 const char *name2
, int len2
, int stage2
)
483 cmp
= name_compare(name1
, len1
, name2
, len2
);
494 int cmp_cache_name_compare(const void *a_
, const void *b_
)
496 const struct cache_entry
*ce1
, *ce2
;
498 ce1
= *((const struct cache_entry
**)a_
);
499 ce2
= *((const struct cache_entry
**)b_
);
500 return cache_name_stage_compare(ce1
->name
, ce1
->ce_namelen
, ce_stage(ce1
),
501 ce2
->name
, ce2
->ce_namelen
, ce_stage(ce2
));
504 static int index_name_stage_pos(struct index_state
*istate
,
505 const char *name
, int namelen
,
507 enum index_search_mode search_mode
)
512 last
= istate
->cache_nr
;
513 while (last
> first
) {
514 int next
= first
+ ((last
- first
) >> 1);
515 struct cache_entry
*ce
= istate
->cache
[next
];
516 int cmp
= cache_name_stage_compare(name
, namelen
, stage
, ce
->name
, ce_namelen(ce
), ce_stage(ce
));
526 if (search_mode
== EXPAND_SPARSE
&& istate
->sparse_index
&&
528 /* Note: first <= istate->cache_nr */
529 struct cache_entry
*ce
= istate
->cache
[first
- 1];
532 * If we are in a sparse-index _and_ the entry before the
533 * insertion position is a sparse-directory entry that is
534 * an ancestor of 'name', then we need to expand the index
535 * and search again. This will only trigger once, because
536 * thereafter the index is fully expanded.
538 if (S_ISSPARSEDIR(ce
->ce_mode
) &&
539 ce_namelen(ce
) < namelen
&&
540 !strncmp(name
, ce
->name
, ce_namelen(ce
))) {
541 ensure_full_index(istate
);
542 return index_name_stage_pos(istate
, name
, namelen
, stage
, search_mode
);
549 int index_name_pos(struct index_state
*istate
, const char *name
, int namelen
)
551 return index_name_stage_pos(istate
, name
, namelen
, 0, EXPAND_SPARSE
);
554 int index_name_pos_sparse(struct index_state
*istate
, const char *name
, int namelen
)
556 return index_name_stage_pos(istate
, name
, namelen
, 0, NO_EXPAND_SPARSE
);
559 int index_entry_exists(struct index_state
*istate
, const char *name
, int namelen
)
561 return index_name_stage_pos(istate
, name
, namelen
, 0, NO_EXPAND_SPARSE
) >= 0;
564 int remove_index_entry_at(struct index_state
*istate
, int pos
)
566 struct cache_entry
*ce
= istate
->cache
[pos
];
568 record_resolve_undo(istate
, ce
);
569 remove_name_hash(istate
, ce
);
570 save_or_free_index_entry(istate
, ce
);
571 istate
->cache_changed
|= CE_ENTRY_REMOVED
;
573 if (pos
>= istate
->cache_nr
)
575 MOVE_ARRAY(istate
->cache
+ pos
, istate
->cache
+ pos
+ 1,
576 istate
->cache_nr
- pos
);
581 * Remove all cache entries marked for removal, that is where
582 * CE_REMOVE is set in ce_flags. This is much more effective than
583 * calling remove_index_entry_at() for each entry to be removed.
585 void remove_marked_cache_entries(struct index_state
*istate
, int invalidate
)
587 struct cache_entry
**ce_array
= istate
->cache
;
590 for (i
= j
= 0; i
< istate
->cache_nr
; i
++) {
591 if (ce_array
[i
]->ce_flags
& CE_REMOVE
) {
593 cache_tree_invalidate_path(istate
,
595 untracked_cache_remove_from_index(istate
,
598 remove_name_hash(istate
, ce_array
[i
]);
599 save_or_free_index_entry(istate
, ce_array
[i
]);
602 ce_array
[j
++] = ce_array
[i
];
604 if (j
== istate
->cache_nr
)
606 istate
->cache_changed
|= CE_ENTRY_REMOVED
;
607 istate
->cache_nr
= j
;
610 int remove_file_from_index(struct index_state
*istate
, const char *path
)
612 int pos
= index_name_pos(istate
, path
, strlen(path
));
615 cache_tree_invalidate_path(istate
, path
);
616 untracked_cache_remove_from_index(istate
, path
);
617 while (pos
< istate
->cache_nr
&& !strcmp(istate
->cache
[pos
]->name
, path
))
618 remove_index_entry_at(istate
, pos
);
622 static int compare_name(struct cache_entry
*ce
, const char *path
, int namelen
)
624 return namelen
!= ce_namelen(ce
) || memcmp(path
, ce
->name
, namelen
);
627 static int index_name_pos_also_unmerged(struct index_state
*istate
,
628 const char *path
, int namelen
)
630 int pos
= index_name_pos(istate
, path
, namelen
);
631 struct cache_entry
*ce
;
636 /* maybe unmerged? */
638 if (pos
>= istate
->cache_nr
||
639 compare_name((ce
= istate
->cache
[pos
]), path
, namelen
))
642 /* order of preference: stage 2, 1, 3 */
643 if (ce_stage(ce
) == 1 && pos
+ 1 < istate
->cache_nr
&&
644 ce_stage((ce
= istate
->cache
[pos
+ 1])) == 2 &&
645 !compare_name(ce
, path
, namelen
))
650 static int different_name(struct cache_entry
*ce
, struct cache_entry
*alias
)
652 int len
= ce_namelen(ce
);
653 return ce_namelen(alias
) != len
|| memcmp(ce
->name
, alias
->name
, len
);
657 * If we add a filename that aliases in the cache, we will use the
658 * name that we already have - but we don't want to update the same
659 * alias twice, because that implies that there were actually two
660 * different files with aliasing names!
662 * So we use the CE_ADDED flag to verify that the alias was an old
663 * one before we accept it as
665 static struct cache_entry
*create_alias_ce(struct index_state
*istate
,
666 struct cache_entry
*ce
,
667 struct cache_entry
*alias
)
670 struct cache_entry
*new_entry
;
672 if (alias
->ce_flags
& CE_ADDED
)
673 die(_("will not add file alias '%s' ('%s' already exists in index)"),
674 ce
->name
, alias
->name
);
676 /* Ok, create the new entry using the name of the existing alias */
677 len
= ce_namelen(alias
);
678 new_entry
= make_empty_cache_entry(istate
, len
);
679 memcpy(new_entry
->name
, alias
->name
, len
);
680 copy_cache_entry(new_entry
, ce
);
681 save_or_free_index_entry(istate
, ce
);
685 void set_object_name_for_intent_to_add_entry(struct cache_entry
*ce
)
687 struct object_id oid
;
688 if (write_object_file("", 0, OBJ_BLOB
, &oid
))
689 die(_("cannot create an empty blob in the object database"));
690 oidcpy(&ce
->oid
, &oid
);
693 int add_to_index(struct index_state
*istate
, const char *path
, struct stat
*st
, int flags
)
695 int namelen
, was_same
;
696 mode_t st_mode
= st
->st_mode
;
697 struct cache_entry
*ce
, *alias
= NULL
;
698 unsigned ce_option
= CE_MATCH_IGNORE_VALID
|CE_MATCH_IGNORE_SKIP_WORKTREE
|CE_MATCH_RACY_IS_DIRTY
;
699 int verbose
= flags
& (ADD_CACHE_VERBOSE
| ADD_CACHE_PRETEND
);
700 int pretend
= flags
& ADD_CACHE_PRETEND
;
701 int intent_only
= flags
& ADD_CACHE_INTENT
;
702 int add_option
= (ADD_CACHE_OK_TO_ADD
|ADD_CACHE_OK_TO_REPLACE
|
703 (intent_only
? ADD_CACHE_NEW_ONLY
: 0));
704 unsigned hash_flags
= pretend
? 0 : HASH_WRITE_OBJECT
;
705 struct object_id oid
;
707 if (flags
& ADD_CACHE_RENORMALIZE
)
708 hash_flags
|= HASH_RENORMALIZE
;
710 if (!S_ISREG(st_mode
) && !S_ISLNK(st_mode
) && !S_ISDIR(st_mode
))
711 return error(_("%s: can only add regular files, symbolic links or git-directories"), path
);
713 namelen
= strlen(path
);
714 if (S_ISDIR(st_mode
)) {
715 if (repo_resolve_gitlink_ref(the_repository
, path
, "HEAD", &oid
) < 0)
716 return error(_("'%s' does not have a commit checked out"), path
);
717 while (namelen
&& path
[namelen
-1] == '/')
720 ce
= make_empty_cache_entry(istate
, namelen
);
721 memcpy(ce
->name
, path
, namelen
);
722 ce
->ce_namelen
= namelen
;
724 fill_stat_cache_info(istate
, ce
, st
);
726 ce
->ce_flags
|= CE_INTENT_TO_ADD
;
729 if (trust_executable_bit
&& has_symlinks
) {
730 ce
->ce_mode
= create_ce_mode(st_mode
);
732 /* If there is an existing entry, pick the mode bits and type
733 * from it, otherwise assume unexecutable regular file.
735 struct cache_entry
*ent
;
736 int pos
= index_name_pos_also_unmerged(istate
, path
, namelen
);
738 ent
= (0 <= pos
) ? istate
->cache
[pos
] : NULL
;
739 ce
->ce_mode
= ce_mode_from_stat(ent
, st_mode
);
742 /* When core.ignorecase=true, determine if a directory of the same name but differing
743 * case already exists within the Git repository. If it does, ensure the directory
744 * case of the file being added to the repository matches (is folded into) the existing
745 * entry's directory case.
748 adjust_dirname_case(istate
, ce
->name
);
750 if (!(flags
& ADD_CACHE_RENORMALIZE
)) {
751 alias
= index_file_exists(istate
, ce
->name
,
752 ce_namelen(ce
), ignore_case
);
755 !ie_match_stat(istate
, alias
, st
, ce_option
)) {
756 /* Nothing changed, really */
757 if (!S_ISGITLINK(alias
->ce_mode
))
758 ce_mark_uptodate(alias
);
759 alias
->ce_flags
|= CE_ADDED
;
761 discard_cache_entry(ce
);
766 if (index_path(istate
, &ce
->oid
, path
, st
, hash_flags
)) {
767 discard_cache_entry(ce
);
768 return error(_("unable to index file '%s'"), path
);
771 set_object_name_for_intent_to_add_entry(ce
);
773 if (ignore_case
&& alias
&& different_name(ce
, alias
))
774 ce
= create_alias_ce(istate
, ce
, alias
);
775 ce
->ce_flags
|= CE_ADDED
;
777 /* It was suspected to be racily clean, but it turns out to be Ok */
780 oideq(&alias
->oid
, &ce
->oid
) &&
781 ce
->ce_mode
== alias
->ce_mode
);
784 discard_cache_entry(ce
);
785 else if (add_index_entry(istate
, ce
, add_option
)) {
786 discard_cache_entry(ce
);
787 return error(_("unable to add '%s' to index"), path
);
789 if (verbose
&& !was_same
)
790 printf("add '%s'\n", path
);
794 int add_file_to_index(struct index_state
*istate
, const char *path
, int flags
)
797 if (lstat(path
, &st
))
798 die_errno(_("unable to stat '%s'"), path
);
799 return add_to_index(istate
, path
, &st
, flags
);
802 struct cache_entry
*make_empty_cache_entry(struct index_state
*istate
, size_t len
)
804 return mem_pool__ce_calloc(find_mem_pool(istate
), len
);
807 struct cache_entry
*make_empty_transient_cache_entry(size_t len
,
808 struct mem_pool
*ce_mem_pool
)
811 return mem_pool__ce_calloc(ce_mem_pool
, len
);
812 return xcalloc(1, cache_entry_size(len
));
815 enum verify_path_result
{
821 static enum verify_path_result
verify_path_internal(const char *, unsigned);
823 int verify_path(const char *path
, unsigned mode
)
825 return verify_path_internal(path
, mode
) == PATH_OK
;
828 struct cache_entry
*make_cache_entry(struct index_state
*istate
,
830 const struct object_id
*oid
,
833 unsigned int refresh_options
)
835 struct cache_entry
*ce
, *ret
;
838 if (verify_path_internal(path
, mode
) == PATH_INVALID
) {
839 error(_("invalid path '%s'"), path
);
844 ce
= make_empty_cache_entry(istate
, len
);
846 oidcpy(&ce
->oid
, oid
);
847 memcpy(ce
->name
, path
, len
);
848 ce
->ce_flags
= create_ce_flags(stage
);
849 ce
->ce_namelen
= len
;
850 ce
->ce_mode
= create_ce_mode(mode
);
852 ret
= refresh_cache_entry(istate
, ce
, refresh_options
);
854 discard_cache_entry(ce
);
858 struct cache_entry
*make_transient_cache_entry(unsigned int mode
,
859 const struct object_id
*oid
,
862 struct mem_pool
*ce_mem_pool
)
864 struct cache_entry
*ce
;
867 if (!verify_path(path
, mode
)) {
868 error(_("invalid path '%s'"), path
);
873 ce
= make_empty_transient_cache_entry(len
, ce_mem_pool
);
875 oidcpy(&ce
->oid
, oid
);
876 memcpy(ce
->name
, path
, len
);
877 ce
->ce_flags
= create_ce_flags(stage
);
878 ce
->ce_namelen
= len
;
879 ce
->ce_mode
= create_ce_mode(mode
);
885 * Chmod an index entry with either +x or -x.
887 * Returns -1 if the chmod for the particular cache entry failed (if it's
888 * not a regular file), -2 if an invalid flip argument is passed in, 0
891 int chmod_index_entry(struct index_state
*istate
, struct cache_entry
*ce
,
894 if (!S_ISREG(ce
->ce_mode
))
901 ce
->ce_mode
&= ~0111;
906 cache_tree_invalidate_path(istate
, ce
->name
);
907 ce
->ce_flags
|= CE_UPDATE_IN_BASE
;
908 mark_fsmonitor_invalid(istate
, ce
);
909 istate
->cache_changed
|= CE_ENTRY_CHANGED
;
914 int ce_same_name(const struct cache_entry
*a
, const struct cache_entry
*b
)
916 int len
= ce_namelen(a
);
917 return ce_namelen(b
) == len
&& !memcmp(a
->name
, b
->name
, len
);
921 * We fundamentally don't like some paths: we don't want
922 * dot or dot-dot anywhere, and for obvious reasons don't
923 * want to recurse into ".git" either.
925 * Also, we don't want double slashes or slashes at the
926 * end that can make pathnames ambiguous.
928 static int verify_dotfile(const char *rest
, unsigned mode
)
931 * The first character was '.', but that
932 * has already been discarded, we now test
936 /* "." is not allowed */
937 if (*rest
== '\0' || is_dir_sep(*rest
))
942 * ".git" followed by NUL or slash is bad. Note that we match
943 * case-insensitively here, even if ignore_case is not set.
944 * This outlaws ".GIT" everywhere out of an abundance of caution,
945 * since there's really no good reason to allow it.
947 * Once we've seen ".git", we can also find ".gitmodules", etc (also
948 * case-insensitively).
952 if (rest
[1] != 'i' && rest
[1] != 'I')
954 if (rest
[2] != 't' && rest
[2] != 'T')
956 if (rest
[3] == '\0' || is_dir_sep(rest
[3]))
960 if (skip_iprefix(rest
, "modules", &rest
) &&
961 (*rest
== '\0' || is_dir_sep(*rest
)))
966 if (rest
[1] == '\0' || is_dir_sep(rest
[1]))
972 static enum verify_path_result
verify_path_internal(const char *path
,
977 if (has_dos_drive_prefix(path
))
980 if (!is_valid_path(path
))
991 if (is_hfs_dotgit(path
))
994 if (is_hfs_dotgitmodules(path
))
999 #if defined GIT_WINDOWS_NATIVE || defined __CYGWIN__
1001 return PATH_INVALID
;
1003 if (is_ntfs_dotgit(path
))
1004 return PATH_INVALID
;
1005 if (S_ISLNK(mode
)) {
1006 if (is_ntfs_dotgitmodules(path
))
1007 return PATH_INVALID
;
1012 if ((c
== '.' && !verify_dotfile(path
, mode
)) ||
1014 return PATH_INVALID
;
1016 * allow terminating directory separators for
1017 * sparse directory entries.
1020 return S_ISDIR(mode
) ? PATH_DIR_WITH_SEP
:
1022 } else if (c
== '\\' && protect_ntfs
) {
1023 if (is_ntfs_dotgit(path
))
1024 return PATH_INVALID
;
1025 if (S_ISLNK(mode
)) {
1026 if (is_ntfs_dotgitmodules(path
))
1027 return PATH_INVALID
;
1036 * Do we have another file that has the beginning components being a
1037 * proper superset of the name we're trying to add?
1039 static int has_file_name(struct index_state
*istate
,
1040 const struct cache_entry
*ce
, int pos
, int ok_to_replace
)
1043 int len
= ce_namelen(ce
);
1044 int stage
= ce_stage(ce
);
1045 const char *name
= ce
->name
;
1047 while (pos
< istate
->cache_nr
) {
1048 struct cache_entry
*p
= istate
->cache
[pos
++];
1050 if (len
>= ce_namelen(p
))
1052 if (memcmp(name
, p
->name
, len
))
1054 if (ce_stage(p
) != stage
)
1056 if (p
->name
[len
] != '/')
1058 if (p
->ce_flags
& CE_REMOVE
)
1063 remove_index_entry_at(istate
, --pos
);
1070 * Like strcmp(), but also return the offset of the first change.
1071 * If strings are equal, return the length.
1073 int strcmp_offset(const char *s1
, const char *s2
, size_t *first_change
)
1078 return strcmp(s1
, s2
);
1080 for (k
= 0; s1
[k
] == s2
[k
]; k
++)
1085 return (unsigned char)s1
[k
] - (unsigned char)s2
[k
];
1089 * Do we have another file with a pathname that is a proper
1090 * subset of the name we're trying to add?
1092 * That is, is there another file in the index with a path
1093 * that matches a sub-directory in the given entry?
1095 static int has_dir_name(struct index_state
*istate
,
1096 const struct cache_entry
*ce
, int pos
, int ok_to_replace
)
1099 int stage
= ce_stage(ce
);
1100 const char *name
= ce
->name
;
1101 const char *slash
= name
+ ce_namelen(ce
);
1106 * We are frequently called during an iteration on a sorted
1107 * list of pathnames and while building a new index. Therefore,
1108 * there is a high probability that this entry will eventually
1109 * be appended to the index, rather than inserted in the middle.
1110 * If we can confirm that, we can avoid binary searches on the
1111 * components of the pathname.
1113 * Compare the entry's full path with the last path in the index.
1115 if (istate
->cache_nr
> 0) {
1116 cmp_last
= strcmp_offset(name
,
1117 istate
->cache
[istate
->cache_nr
- 1]->name
,
1120 if (name
[len_eq_last
] != '/') {
1122 * The entry sorts AFTER the last one in the
1125 * If there were a conflict with "file", then our
1126 * name would start with "file/" and the last index
1127 * entry would start with "file" but not "file/".
1129 * The next character after common prefix is
1130 * not '/', so there can be no conflict.
1135 * The entry sorts AFTER the last one in the
1136 * index, and the next character after common
1139 * Either the last index entry is a file in
1140 * conflict with this entry, or it has a name
1141 * which sorts between this entry and the
1142 * potential conflicting file.
1144 * In both cases, we fall through to the loop
1145 * below and let the regular search code handle it.
1148 } else if (cmp_last
== 0) {
1150 * The entry exactly matches the last one in the
1151 * index, but because of multiple stage and CE_REMOVE
1152 * items, we fall through and let the regular search
1162 if (*--slash
== '/')
1164 if (slash
<= ce
->name
)
1169 pos
= index_name_stage_pos(istate
, name
, len
, stage
, EXPAND_SPARSE
);
1172 * Found one, but not so fast. This could
1173 * be a marker that says "I was here, but
1174 * I am being removed". Such an entry is
1175 * not a part of the resulting tree, and
1176 * it is Ok to have a directory at the same
1179 if (!(istate
->cache
[pos
]->ce_flags
& CE_REMOVE
)) {
1183 remove_index_entry_at(istate
, pos
);
1191 * Trivial optimization: if we find an entry that
1192 * already matches the sub-directory, then we know
1193 * we're ok, and we can exit.
1195 while (pos
< istate
->cache_nr
) {
1196 struct cache_entry
*p
= istate
->cache
[pos
];
1197 if ((ce_namelen(p
) <= len
) ||
1198 (p
->name
[len
] != '/') ||
1199 memcmp(p
->name
, name
, len
))
1200 break; /* not our subdirectory */
1201 if (ce_stage(p
) == stage
&& !(p
->ce_flags
& CE_REMOVE
))
1203 * p is at the same stage as our entry, and
1204 * is a subdirectory of what we are looking
1205 * at, so we cannot have conflicts at our
1206 * level or anything shorter.
1215 /* We may be in a situation where we already have path/file and path
1216 * is being added, or we already have path and path/file is being
1217 * added. Either one would result in a nonsense tree that has path
1218 * twice when git-write-tree tries to write it out. Prevent it.
1220 * If ok-to-replace is specified, we remove the conflicting entries
1221 * from the cache so the caller should recompute the insert position.
1222 * When this happens, we return non-zero.
1224 static int check_file_directory_conflict(struct index_state
*istate
,
1225 const struct cache_entry
*ce
,
1226 int pos
, int ok_to_replace
)
1231 * When ce is an "I am going away" entry, we allow it to be added
1233 if (ce
->ce_flags
& CE_REMOVE
)
1237 * We check if the path is a sub-path of a subsequent pathname
1238 * first, since removing those will not change the position
1241 retval
= has_file_name(istate
, ce
, pos
, ok_to_replace
);
1244 * Then check if the path might have a clashing sub-directory
1247 return retval
+ has_dir_name(istate
, ce
, pos
, ok_to_replace
);
1250 static int add_index_entry_with_check(struct index_state
*istate
, struct cache_entry
*ce
, int option
)
1253 int ok_to_add
= option
& ADD_CACHE_OK_TO_ADD
;
1254 int ok_to_replace
= option
& ADD_CACHE_OK_TO_REPLACE
;
1255 int skip_df_check
= option
& ADD_CACHE_SKIP_DFCHECK
;
1256 int new_only
= option
& ADD_CACHE_NEW_ONLY
;
1259 * If this entry's path sorts after the last entry in the index,
1260 * we can avoid searching for it.
1262 if (istate
->cache_nr
> 0 &&
1263 strcmp(ce
->name
, istate
->cache
[istate
->cache_nr
- 1]->name
) > 0)
1264 pos
= index_pos_to_insert_pos(istate
->cache_nr
);
1266 pos
= index_name_stage_pos(istate
, ce
->name
, ce_namelen(ce
), ce_stage(ce
), EXPAND_SPARSE
);
1269 * Cache tree path should be invalidated only after index_name_stage_pos,
1270 * in case it expands a sparse index.
1272 if (!(option
& ADD_CACHE_KEEP_CACHE_TREE
))
1273 cache_tree_invalidate_path(istate
, ce
->name
);
1275 /* existing match? Just replace it. */
1278 replace_index_entry(istate
, pos
, ce
);
1283 if (!(option
& ADD_CACHE_KEEP_CACHE_TREE
))
1284 untracked_cache_add_to_index(istate
, ce
->name
);
1287 * Inserting a merged entry ("stage 0") into the index
1288 * will always replace all non-merged entries..
1290 if (pos
< istate
->cache_nr
&& ce_stage(ce
) == 0) {
1291 while (ce_same_name(istate
->cache
[pos
], ce
)) {
1293 if (!remove_index_entry_at(istate
, pos
))
1300 if (verify_path_internal(ce
->name
, ce
->ce_mode
) == PATH_INVALID
)
1301 return error(_("invalid path '%s'"), ce
->name
);
1303 if (!skip_df_check
&&
1304 check_file_directory_conflict(istate
, ce
, pos
, ok_to_replace
)) {
1306 return error(_("'%s' appears as both a file and as a directory"),
1308 pos
= index_name_stage_pos(istate
, ce
->name
, ce_namelen(ce
), ce_stage(ce
), EXPAND_SPARSE
);
1314 int add_index_entry(struct index_state
*istate
, struct cache_entry
*ce
, int option
)
1318 if (option
& ADD_CACHE_JUST_APPEND
)
1319 pos
= istate
->cache_nr
;
1322 ret
= add_index_entry_with_check(istate
, ce
, option
);
1328 /* Make sure the array is big enough .. */
1329 ALLOC_GROW(istate
->cache
, istate
->cache_nr
+ 1, istate
->cache_alloc
);
1333 if (istate
->cache_nr
> pos
+ 1)
1334 MOVE_ARRAY(istate
->cache
+ pos
+ 1, istate
->cache
+ pos
,
1335 istate
->cache_nr
- pos
- 1);
1336 set_index_entry(istate
, pos
, ce
);
1337 istate
->cache_changed
|= CE_ENTRY_ADDED
;
1342 * "refresh" does not calculate a new sha1 file or bring the
1343 * cache up-to-date for mode/content changes. But what it
1344 * _does_ do is to "re-match" the stat information of a file
1345 * with the cache, so that you can refresh the cache for a
1346 * file that hasn't been changed but where the stat entry is
1349 * For example, you'd want to do this after doing a "git-read-tree",
1350 * to link up the stat cache details with the proper files.
1352 static struct cache_entry
*refresh_cache_ent(struct index_state
*istate
,
1353 struct cache_entry
*ce
,
1354 unsigned int options
, int *err
,
1360 struct cache_entry
*updated
;
1362 int refresh
= options
& CE_MATCH_REFRESH
;
1363 int ignore_valid
= options
& CE_MATCH_IGNORE_VALID
;
1364 int ignore_skip_worktree
= options
& CE_MATCH_IGNORE_SKIP_WORKTREE
;
1365 int ignore_missing
= options
& CE_MATCH_IGNORE_MISSING
;
1366 int ignore_fsmonitor
= options
& CE_MATCH_IGNORE_FSMONITOR
;
1368 if (!refresh
|| ce_uptodate(ce
))
1371 if (!ignore_fsmonitor
)
1372 refresh_fsmonitor(istate
);
1374 * CE_VALID or CE_SKIP_WORKTREE means the user promised us
1375 * that the change to the work tree does not matter and told
1378 if (!ignore_skip_worktree
&& ce_skip_worktree(ce
)) {
1379 ce_mark_uptodate(ce
);
1382 if (!ignore_valid
&& (ce
->ce_flags
& CE_VALID
)) {
1383 ce_mark_uptodate(ce
);
1386 if (!ignore_fsmonitor
&& (ce
->ce_flags
& CE_FSMONITOR_VALID
)) {
1387 ce_mark_uptodate(ce
);
1391 if (has_symlink_leading_path(ce
->name
, ce_namelen(ce
))) {
1401 if (lstat(ce
->name
, &st
) < 0) {
1402 if (ignore_missing
&& errno
== ENOENT
)
1409 changed
= ie_match_stat(istate
, ce
, &st
, options
);
1411 *changed_ret
= changed
;
1414 * The path is unchanged. If we were told to ignore
1415 * valid bit, then we did the actual stat check and
1416 * found that the entry is unmodified. If the entry
1417 * is not marked VALID, this is the place to mark it
1418 * valid again, under "assume unchanged" mode.
1420 if (ignore_valid
&& assume_unchanged
&&
1421 !(ce
->ce_flags
& CE_VALID
))
1422 ; /* mark this one VALID again */
1425 * We do not mark the index itself "modified"
1426 * because CE_UPTODATE flag is in-core only;
1427 * we are not going to write this change out.
1429 if (!S_ISGITLINK(ce
->ce_mode
)) {
1430 ce_mark_uptodate(ce
);
1431 mark_fsmonitor_valid(istate
, ce
);
1439 if (ie_modified(istate
, ce
, &st
, options
)) {
1445 updated
= make_empty_cache_entry(istate
, ce_namelen(ce
));
1446 copy_cache_entry(updated
, ce
);
1447 memcpy(updated
->name
, ce
->name
, ce
->ce_namelen
+ 1);
1448 fill_stat_cache_info(istate
, updated
, &st
);
1450 * If ignore_valid is not set, we should leave CE_VALID bit
1451 * alone. Otherwise, paths marked with --no-assume-unchanged
1452 * (i.e. things to be edited) will reacquire CE_VALID bit
1453 * automatically, which is not really what we want.
1455 if (!ignore_valid
&& assume_unchanged
&&
1456 !(ce
->ce_flags
& CE_VALID
))
1457 updated
->ce_flags
&= ~CE_VALID
;
1459 /* istate->cache_changed is updated in the caller */
1463 static void show_file(const char * fmt
, const char * name
, int in_porcelain
,
1464 int * first
, const char *header_msg
)
1466 if (in_porcelain
&& *first
&& header_msg
) {
1467 printf("%s\n", header_msg
);
1473 int repo_refresh_and_write_index(struct repository
*repo
,
1474 unsigned int refresh_flags
,
1475 unsigned int write_flags
,
1477 const struct pathspec
*pathspec
,
1478 char *seen
, const char *header_msg
)
1480 struct lock_file lock_file
= LOCK_INIT
;
1483 fd
= repo_hold_locked_index(repo
, &lock_file
, 0);
1484 if (!gentle
&& fd
< 0)
1486 if (refresh_index(repo
->index
, refresh_flags
, pathspec
, seen
, header_msg
))
1488 if (0 <= fd
&& write_locked_index(repo
->index
, &lock_file
, COMMIT_LOCK
| write_flags
))
1494 int refresh_index(struct index_state
*istate
, unsigned int flags
,
1495 const struct pathspec
*pathspec
,
1496 char *seen
, const char *header_msg
)
1500 int really
= (flags
& REFRESH_REALLY
) != 0;
1501 int allow_unmerged
= (flags
& REFRESH_UNMERGED
) != 0;
1502 int quiet
= (flags
& REFRESH_QUIET
) != 0;
1503 int not_new
= (flags
& REFRESH_IGNORE_MISSING
) != 0;
1504 int ignore_submodules
= (flags
& REFRESH_IGNORE_SUBMODULES
) != 0;
1505 int ignore_skip_worktree
= (flags
& REFRESH_IGNORE_SKIP_WORKTREE
) != 0;
1507 int in_porcelain
= (flags
& REFRESH_IN_PORCELAIN
);
1508 unsigned int options
= (CE_MATCH_REFRESH
|
1509 (really
? CE_MATCH_IGNORE_VALID
: 0) |
1510 (not_new
? CE_MATCH_IGNORE_MISSING
: 0));
1511 const char *modified_fmt
;
1512 const char *deleted_fmt
;
1513 const char *typechange_fmt
;
1514 const char *added_fmt
;
1515 const char *unmerged_fmt
;
1516 struct progress
*progress
= NULL
;
1517 int t2_sum_lstat
= 0;
1518 int t2_sum_scan
= 0;
1520 if (flags
& REFRESH_PROGRESS
&& isatty(2))
1521 progress
= start_delayed_progress(_("Refresh index"),
1524 trace_performance_enter();
1525 modified_fmt
= in_porcelain
? "M\t%s\n" : "%s: needs update\n";
1526 deleted_fmt
= in_porcelain
? "D\t%s\n" : "%s: needs update\n";
1527 typechange_fmt
= in_porcelain
? "T\t%s\n" : "%s: needs update\n";
1528 added_fmt
= in_porcelain
? "A\t%s\n" : "%s: needs update\n";
1529 unmerged_fmt
= in_porcelain
? "U\t%s\n" : "%s: needs merge\n";
1531 * Use the multi-threaded preload_index() to refresh most of the
1532 * cache entries quickly then in the single threaded loop below,
1533 * we only have to do the special cases that are left.
1535 preload_index(istate
, pathspec
, 0);
1536 trace2_region_enter("index", "refresh", NULL
);
1538 for (i
= 0; i
< istate
->cache_nr
; i
++) {
1539 struct cache_entry
*ce
, *new_entry
;
1540 int cache_errno
= 0;
1543 int t2_did_lstat
= 0;
1544 int t2_did_scan
= 0;
1546 ce
= istate
->cache
[i
];
1547 if (ignore_submodules
&& S_ISGITLINK(ce
->ce_mode
))
1549 if (ignore_skip_worktree
&& ce_skip_worktree(ce
))
1553 * If this entry is a sparse directory, then there isn't
1554 * any stat() information to update. Ignore the entry.
1556 if (S_ISSPARSEDIR(ce
->ce_mode
))
1559 if (pathspec
&& !ce_path_match(istate
, ce
, pathspec
, seen
))
1563 while ((i
< istate
->cache_nr
) &&
1564 ! strcmp(istate
->cache
[i
]->name
, ce
->name
))
1570 show_file(unmerged_fmt
, ce
->name
, in_porcelain
,
1571 &first
, header_msg
);
1579 new_entry
= refresh_cache_ent(istate
, ce
, options
,
1580 &cache_errno
, &changed
,
1581 &t2_did_lstat
, &t2_did_scan
);
1582 t2_sum_lstat
+= t2_did_lstat
;
1583 t2_sum_scan
+= t2_did_scan
;
1584 if (new_entry
== ce
)
1586 display_progress(progress
, i
);
1590 if (really
&& cache_errno
== EINVAL
) {
1591 /* If we are doing --really-refresh that
1592 * means the index is not valid anymore.
1594 ce
->ce_flags
&= ~CE_VALID
;
1595 ce
->ce_flags
|= CE_UPDATE_IN_BASE
;
1596 mark_fsmonitor_invalid(istate
, ce
);
1597 istate
->cache_changed
|= CE_ENTRY_CHANGED
;
1602 if (cache_errno
== ENOENT
)
1604 else if (ce_intent_to_add(ce
))
1605 fmt
= added_fmt
; /* must be before other checks */
1606 else if (changed
& TYPE_CHANGED
)
1607 fmt
= typechange_fmt
;
1611 ce
->name
, in_porcelain
, &first
, header_msg
);
1616 replace_index_entry(istate
, i
, new_entry
);
1618 trace2_data_intmax("index", NULL
, "refresh/sum_lstat", t2_sum_lstat
);
1619 trace2_data_intmax("index", NULL
, "refresh/sum_scan", t2_sum_scan
);
1620 trace2_region_leave("index", "refresh", NULL
);
1621 display_progress(progress
, istate
->cache_nr
);
1622 stop_progress(&progress
);
1623 trace_performance_leave("refresh index");
1627 struct cache_entry
*refresh_cache_entry(struct index_state
*istate
,
1628 struct cache_entry
*ce
,
1629 unsigned int options
)
1631 return refresh_cache_ent(istate
, ce
, options
, NULL
, NULL
, NULL
, NULL
);
1635 /*****************************************************************
1637 *****************************************************************/
1639 #define INDEX_FORMAT_DEFAULT 3
1641 static unsigned int get_index_format_default(struct repository
*r
)
1643 char *envversion
= getenv("GIT_INDEX_VERSION");
1645 unsigned int version
= INDEX_FORMAT_DEFAULT
;
1648 prepare_repo_settings(r
);
1650 if (r
->settings
.index_version
>= 0)
1651 version
= r
->settings
.index_version
;
1652 if (version
< INDEX_FORMAT_LB
|| INDEX_FORMAT_UB
< version
) {
1653 warning(_("index.version set, but the value is invalid.\n"
1654 "Using version %i"), INDEX_FORMAT_DEFAULT
);
1655 return INDEX_FORMAT_DEFAULT
;
1660 version
= strtoul(envversion
, &endp
, 10);
1662 version
< INDEX_FORMAT_LB
|| INDEX_FORMAT_UB
< version
) {
1663 warning(_("GIT_INDEX_VERSION set, but the value is invalid.\n"
1664 "Using version %i"), INDEX_FORMAT_DEFAULT
);
1665 version
= INDEX_FORMAT_DEFAULT
;
1671 * dev/ino/uid/gid/size are also just tracked to the low 32 bits
1672 * Again - this is just a (very strong in practice) heuristic that
1673 * the inode hasn't changed.
1675 * We save the fields in big-endian order to allow using the
1676 * index file over NFS transparently.
1678 struct ondisk_cache_entry
{
1679 struct cache_time ctime
;
1680 struct cache_time mtime
;
1688 * unsigned char hash[hashsz];
1690 * if (flags & CE_EXTENDED)
1693 unsigned char data
[GIT_MAX_RAWSZ
+ 2 * sizeof(uint16_t)];
1694 char name
[FLEX_ARRAY
];
1697 /* These are only used for v3 or lower */
1698 #define align_padding_size(size, len) ((size + (len) + 8) & ~7) - (size + len)
1699 #define align_flex_name(STRUCT,len) ((offsetof(struct STRUCT,data) + (len) + 8) & ~7)
1700 #define ondisk_cache_entry_size(len) align_flex_name(ondisk_cache_entry,len)
1701 #define ondisk_data_size(flags, len) (the_hash_algo->rawsz + \
1702 ((flags & CE_EXTENDED) ? 2 : 1) * sizeof(uint16_t) + len)
1703 #define ondisk_data_size_max(len) (ondisk_data_size(CE_EXTENDED, len))
1704 #define ondisk_ce_size(ce) (ondisk_cache_entry_size(ondisk_data_size((ce)->ce_flags, ce_namelen(ce))))
1706 /* Allow fsck to force verification of the index checksum. */
1707 int verify_index_checksum
;
1709 /* Allow fsck to force verification of the cache entry order. */
1710 int verify_ce_order
;
1712 static int verify_hdr(const struct cache_header
*hdr
, unsigned long size
)
1715 unsigned char hash
[GIT_MAX_RAWSZ
];
1717 unsigned char *start
, *end
;
1718 struct object_id oid
;
1720 if (hdr
->hdr_signature
!= htonl(CACHE_SIGNATURE
))
1721 return error(_("bad signature 0x%08x"), hdr
->hdr_signature
);
1722 hdr_version
= ntohl(hdr
->hdr_version
);
1723 if (hdr_version
< INDEX_FORMAT_LB
|| INDEX_FORMAT_UB
< hdr_version
)
1724 return error(_("bad index version %d"), hdr_version
);
1726 if (!verify_index_checksum
)
1729 end
= (unsigned char *)hdr
+ size
;
1730 start
= end
- the_hash_algo
->rawsz
;
1731 oidread(&oid
, start
, the_repository
->hash_algo
);
1732 if (oideq(&oid
, null_oid()))
1735 the_hash_algo
->init_fn(&c
);
1736 the_hash_algo
->update_fn(&c
, hdr
, size
- the_hash_algo
->rawsz
);
1737 the_hash_algo
->final_fn(hash
, &c
);
1738 if (!hasheq(hash
, start
, the_repository
->hash_algo
))
1739 return error(_("bad index file sha1 signature"));
1743 static int read_index_extension(struct index_state
*istate
,
1744 const char *ext
, const char *data
, unsigned long sz
)
1746 switch (CACHE_EXT(ext
)) {
1747 case CACHE_EXT_TREE
:
1748 istate
->cache_tree
= cache_tree_read(data
, sz
);
1750 case CACHE_EXT_RESOLVE_UNDO
:
1751 istate
->resolve_undo
= resolve_undo_read(data
, sz
);
1753 case CACHE_EXT_LINK
:
1754 if (read_link_extension(istate
, data
, sz
))
1757 case CACHE_EXT_UNTRACKED
:
1758 istate
->untracked
= read_untracked_extension(data
, sz
);
1760 case CACHE_EXT_FSMONITOR
:
1761 read_fsmonitor_extension(istate
, data
, sz
);
1763 case CACHE_EXT_ENDOFINDEXENTRIES
:
1764 case CACHE_EXT_INDEXENTRYOFFSETTABLE
:
1765 /* already handled in do_read_index() */
1767 case CACHE_EXT_SPARSE_DIRECTORIES
:
1768 /* no content, only an indicator */
1769 istate
->sparse_index
= INDEX_COLLAPSED
;
1772 if (*ext
< 'A' || 'Z' < *ext
)
1773 return error(_("index uses %.4s extension, which we do not understand"),
1775 fprintf_ln(stderr
, _("ignoring %.4s extension"), ext
);
1782 * Parses the contents of the cache entry contained within the 'ondisk' buffer
1783 * into a new incore 'cache_entry'.
1785 * Note that 'char *ondisk' may not be aligned to a 4-byte address interval in
1786 * index v4, so we cannot cast it to 'struct ondisk_cache_entry *' and access
1787 * its members. Instead, we use the byte offsets of members within the struct to
1788 * identify where 'get_be16()', 'get_be32()', and 'oidread()' (which can all
1789 * read from an unaligned memory buffer) should read from the 'ondisk' buffer
1790 * into the corresponding incore 'cache_entry' members.
1792 static struct cache_entry
*create_from_disk(struct mem_pool
*ce_mem_pool
,
1793 unsigned int version
,
1795 unsigned long *ent_size
,
1796 const struct cache_entry
*previous_ce
)
1798 struct cache_entry
*ce
;
1801 const unsigned hashsz
= the_hash_algo
->rawsz
;
1802 const char *flagsp
= ondisk
+ offsetof(struct ondisk_cache_entry
, data
) + hashsz
;
1804 size_t copy_len
= 0;
1806 * Adjacent cache entries tend to share the leading paths, so it makes
1807 * sense to only store the differences in later entries. In the v4
1808 * on-disk format of the index, each on-disk cache entry stores the
1809 * number of bytes to be stripped from the end of the previous name,
1810 * and the bytes to append to the result, to come up with its name.
1812 int expand_name_field
= version
== 4;
1814 /* On-disk flags are just 16 bits */
1815 flags
= get_be16(flagsp
);
1816 len
= flags
& CE_NAMEMASK
;
1818 if (flags
& CE_EXTENDED
) {
1820 extended_flags
= get_be16(flagsp
+ sizeof(uint16_t)) << 16;
1821 /* We do not yet understand any bit out of CE_EXTENDED_FLAGS */
1822 if (extended_flags
& ~CE_EXTENDED_FLAGS
)
1823 die(_("unknown index entry format 0x%08x"), extended_flags
);
1824 flags
|= extended_flags
;
1825 name
= (const char *)(flagsp
+ 2 * sizeof(uint16_t));
1828 name
= (const char *)(flagsp
+ sizeof(uint16_t));
1830 if (expand_name_field
) {
1831 const unsigned char *cp
= (const unsigned char *)name
;
1832 size_t strip_len
, previous_len
;
1834 /* If we're at the beginning of a block, ignore the previous name */
1835 strip_len
= decode_varint(&cp
);
1837 previous_len
= previous_ce
->ce_namelen
;
1838 if (previous_len
< strip_len
)
1839 die(_("malformed name field in the index, near path '%s'"),
1841 copy_len
= previous_len
- strip_len
;
1843 name
= (const char *)cp
;
1846 if (len
== CE_NAMEMASK
) {
1848 if (expand_name_field
)
1852 ce
= mem_pool__ce_alloc(ce_mem_pool
, len
);
1855 * NEEDSWORK: using 'offsetof()' is cumbersome and should be replaced
1856 * with something more akin to 'load_bitmap_entries_v1()'s use of
1857 * 'read_be16'/'read_be32'. For consistency with the corresponding
1858 * ondisk entry write function ('copy_cache_entry_to_ondisk()'), this
1859 * should be done at the same time as removing references to
1860 * 'ondisk_cache_entry' there.
1862 ce
->ce_stat_data
.sd_ctime
.sec
= get_be32(ondisk
+ offsetof(struct ondisk_cache_entry
, ctime
)
1863 + offsetof(struct cache_time
, sec
));
1864 ce
->ce_stat_data
.sd_mtime
.sec
= get_be32(ondisk
+ offsetof(struct ondisk_cache_entry
, mtime
)
1865 + offsetof(struct cache_time
, sec
));
1866 ce
->ce_stat_data
.sd_ctime
.nsec
= get_be32(ondisk
+ offsetof(struct ondisk_cache_entry
, ctime
)
1867 + offsetof(struct cache_time
, nsec
));
1868 ce
->ce_stat_data
.sd_mtime
.nsec
= get_be32(ondisk
+ offsetof(struct ondisk_cache_entry
, mtime
)
1869 + offsetof(struct cache_time
, nsec
));
1870 ce
->ce_stat_data
.sd_dev
= get_be32(ondisk
+ offsetof(struct ondisk_cache_entry
, dev
));
1871 ce
->ce_stat_data
.sd_ino
= get_be32(ondisk
+ offsetof(struct ondisk_cache_entry
, ino
));
1872 ce
->ce_mode
= get_be32(ondisk
+ offsetof(struct ondisk_cache_entry
, mode
));
1873 ce
->ce_stat_data
.sd_uid
= get_be32(ondisk
+ offsetof(struct ondisk_cache_entry
, uid
));
1874 ce
->ce_stat_data
.sd_gid
= get_be32(ondisk
+ offsetof(struct ondisk_cache_entry
, gid
));
1875 ce
->ce_stat_data
.sd_size
= get_be32(ondisk
+ offsetof(struct ondisk_cache_entry
, size
));
1876 ce
->ce_flags
= flags
& ~CE_NAMEMASK
;
1877 ce
->ce_namelen
= len
;
1879 oidread(&ce
->oid
, (const unsigned char *)ondisk
+ offsetof(struct ondisk_cache_entry
, data
),
1880 the_repository
->hash_algo
);
1882 if (expand_name_field
) {
1884 memcpy(ce
->name
, previous_ce
->name
, copy_len
);
1885 memcpy(ce
->name
+ copy_len
, name
, len
+ 1 - copy_len
);
1886 *ent_size
= (name
- ((char *)ondisk
)) + len
+ 1 - copy_len
;
1888 memcpy(ce
->name
, name
, len
+ 1);
1889 *ent_size
= ondisk_ce_size(ce
);
1894 static void check_ce_order(struct index_state
*istate
)
1898 if (!verify_ce_order
)
1901 for (i
= 1; i
< istate
->cache_nr
; i
++) {
1902 struct cache_entry
*ce
= istate
->cache
[i
- 1];
1903 struct cache_entry
*next_ce
= istate
->cache
[i
];
1904 int name_compare
= strcmp(ce
->name
, next_ce
->name
);
1906 if (0 < name_compare
)
1907 die(_("unordered stage entries in index"));
1908 if (!name_compare
) {
1910 die(_("multiple stage entries for merged file '%s'"),
1912 if (ce_stage(ce
) > ce_stage(next_ce
))
1913 die(_("unordered stage entries for '%s'"),
1919 static void tweak_untracked_cache(struct index_state
*istate
)
1921 struct repository
*r
= the_repository
;
1923 prepare_repo_settings(r
);
1925 switch (r
->settings
.core_untracked_cache
) {
1926 case UNTRACKED_CACHE_REMOVE
:
1927 remove_untracked_cache(istate
);
1929 case UNTRACKED_CACHE_WRITE
:
1930 add_untracked_cache(istate
);
1932 case UNTRACKED_CACHE_KEEP
:
1934 * Either an explicit "core.untrackedCache=keep", the
1935 * default if "core.untrackedCache" isn't configured,
1936 * or a fallback on an unknown "core.untrackedCache"
1943 static void tweak_split_index(struct index_state
*istate
)
1945 switch (git_config_get_split_index()) {
1946 case -1: /* unset: do nothing */
1949 remove_split_index(istate
);
1952 add_split_index(istate
);
1954 default: /* unknown value: do nothing */
1959 static void post_read_index_from(struct index_state
*istate
)
1961 check_ce_order(istate
);
1962 tweak_untracked_cache(istate
);
1963 tweak_split_index(istate
);
1964 tweak_fsmonitor(istate
);
1967 static size_t estimate_cache_size_from_compressed(unsigned int entries
)
1969 return entries
* (sizeof(struct cache_entry
) + CACHE_ENTRY_PATH_LENGTH
);
1972 static size_t estimate_cache_size(size_t ondisk_size
, unsigned int entries
)
1974 long per_entry
= sizeof(struct cache_entry
) - sizeof(struct ondisk_cache_entry
);
1977 * Account for potential alignment differences.
1979 per_entry
+= align_padding_size(per_entry
, 0);
1980 return ondisk_size
+ entries
* per_entry
;
1983 struct index_entry_offset
1985 /* starting byte offset into index file, count of index entries in this block */
1989 struct index_entry_offset_table
1992 struct index_entry_offset entries
[FLEX_ARRAY
];
1995 static struct index_entry_offset_table
*read_ieot_extension(const char *mmap
, size_t mmap_size
, size_t offset
);
1996 static void write_ieot_extension(struct strbuf
*sb
, struct index_entry_offset_table
*ieot
);
1998 static size_t read_eoie_extension(const char *mmap
, size_t mmap_size
);
1999 static void write_eoie_extension(struct strbuf
*sb
, git_hash_ctx
*eoie_context
, size_t offset
);
2001 struct load_index_extensions
2004 struct index_state
*istate
;
2007 unsigned long src_offset
;
2010 static void *load_index_extensions(void *_data
)
2012 struct load_index_extensions
*p
= _data
;
2013 unsigned long src_offset
= p
->src_offset
;
2015 while (src_offset
<= p
->mmap_size
- the_hash_algo
->rawsz
- 8) {
2016 /* After an array of active_nr index entries,
2017 * there can be arbitrary number of extended
2018 * sections, each of which is prefixed with
2019 * extension name (4-byte) and section length
2020 * in 4-byte network byte order.
2022 uint32_t extsize
= get_be32(p
->mmap
+ src_offset
+ 4);
2023 if (read_index_extension(p
->istate
,
2024 p
->mmap
+ src_offset
,
2025 p
->mmap
+ src_offset
+ 8,
2027 munmap((void *)p
->mmap
, p
->mmap_size
);
2028 die(_("index file corrupt"));
2031 src_offset
+= extsize
;
2038 * A helper function that will load the specified range of cache entries
2039 * from the memory mapped file and add them to the given index.
2041 static unsigned long load_cache_entry_block(struct index_state
*istate
,
2042 struct mem_pool
*ce_mem_pool
, int offset
, int nr
, const char *mmap
,
2043 unsigned long start_offset
, const struct cache_entry
*previous_ce
)
2046 unsigned long src_offset
= start_offset
;
2048 for (i
= offset
; i
< offset
+ nr
; i
++) {
2049 struct cache_entry
*ce
;
2050 unsigned long consumed
;
2052 ce
= create_from_disk(ce_mem_pool
, istate
->version
,
2054 &consumed
, previous_ce
);
2055 set_index_entry(istate
, i
, ce
);
2057 src_offset
+= consumed
;
2060 return src_offset
- start_offset
;
2063 static unsigned long load_all_cache_entries(struct index_state
*istate
,
2064 const char *mmap
, size_t mmap_size
, unsigned long src_offset
)
2066 unsigned long consumed
;
2068 istate
->ce_mem_pool
= xmalloc(sizeof(*istate
->ce_mem_pool
));
2069 if (istate
->version
== 4) {
2070 mem_pool_init(istate
->ce_mem_pool
,
2071 estimate_cache_size_from_compressed(istate
->cache_nr
));
2073 mem_pool_init(istate
->ce_mem_pool
,
2074 estimate_cache_size(mmap_size
, istate
->cache_nr
));
2077 consumed
= load_cache_entry_block(istate
, istate
->ce_mem_pool
,
2078 0, istate
->cache_nr
, mmap
, src_offset
, NULL
);
2083 * Mostly randomly chosen maximum thread counts: we
2084 * cap the parallelism to online_cpus() threads, and we want
2085 * to have at least 10000 cache entries per thread for it to
2086 * be worth starting a thread.
2089 #define THREAD_COST (10000)
2091 struct load_cache_entries_thread_data
2094 struct index_state
*istate
;
2095 struct mem_pool
*ce_mem_pool
;
2098 struct index_entry_offset_table
*ieot
;
2099 int ieot_start
; /* starting index into the ieot array */
2100 int ieot_blocks
; /* count of ieot entries to process */
2101 unsigned long consumed
; /* return # of bytes in index file processed */
2105 * A thread proc to run the load_cache_entries() computation
2106 * across multiple background threads.
2108 static void *load_cache_entries_thread(void *_data
)
2110 struct load_cache_entries_thread_data
*p
= _data
;
2113 /* iterate across all ieot blocks assigned to this thread */
2114 for (i
= p
->ieot_start
; i
< p
->ieot_start
+ p
->ieot_blocks
; i
++) {
2115 p
->consumed
+= load_cache_entry_block(p
->istate
, p
->ce_mem_pool
,
2116 p
->offset
, p
->ieot
->entries
[i
].nr
, p
->mmap
, p
->ieot
->entries
[i
].offset
, NULL
);
2117 p
->offset
+= p
->ieot
->entries
[i
].nr
;
2122 static unsigned long load_cache_entries_threaded(struct index_state
*istate
, const char *mmap
, size_t mmap_size
,
2123 int nr_threads
, struct index_entry_offset_table
*ieot
)
2125 int i
, offset
, ieot_blocks
, ieot_start
, err
;
2126 struct load_cache_entries_thread_data
*data
;
2127 unsigned long consumed
= 0;
2129 /* a little sanity checking */
2130 if (istate
->name_hash_initialized
)
2131 BUG("the name hash isn't thread safe");
2133 istate
->ce_mem_pool
= xmalloc(sizeof(*istate
->ce_mem_pool
));
2134 mem_pool_init(istate
->ce_mem_pool
, 0);
2136 /* ensure we have no more threads than we have blocks to process */
2137 if (nr_threads
> ieot
->nr
)
2138 nr_threads
= ieot
->nr
;
2139 CALLOC_ARRAY(data
, nr_threads
);
2141 offset
= ieot_start
= 0;
2142 ieot_blocks
= DIV_ROUND_UP(ieot
->nr
, nr_threads
);
2143 for (i
= 0; i
< nr_threads
; i
++) {
2144 struct load_cache_entries_thread_data
*p
= &data
[i
];
2147 if (ieot_start
+ ieot_blocks
> ieot
->nr
)
2148 ieot_blocks
= ieot
->nr
- ieot_start
;
2154 p
->ieot_start
= ieot_start
;
2155 p
->ieot_blocks
= ieot_blocks
;
2157 /* create a mem_pool for each thread */
2159 for (j
= p
->ieot_start
; j
< p
->ieot_start
+ p
->ieot_blocks
; j
++)
2160 nr
+= p
->ieot
->entries
[j
].nr
;
2161 p
->ce_mem_pool
= xmalloc(sizeof(*istate
->ce_mem_pool
));
2162 if (istate
->version
== 4) {
2163 mem_pool_init(p
->ce_mem_pool
,
2164 estimate_cache_size_from_compressed(nr
));
2166 mem_pool_init(p
->ce_mem_pool
,
2167 estimate_cache_size(mmap_size
, nr
));
2170 err
= pthread_create(&p
->pthread
, NULL
, load_cache_entries_thread
, p
);
2172 die(_("unable to create load_cache_entries thread: %s"), strerror(err
));
2174 /* increment by the number of cache entries in the ieot block being processed */
2175 for (j
= 0; j
< ieot_blocks
; j
++)
2176 offset
+= ieot
->entries
[ieot_start
+ j
].nr
;
2177 ieot_start
+= ieot_blocks
;
2180 for (i
= 0; i
< nr_threads
; i
++) {
2181 struct load_cache_entries_thread_data
*p
= &data
[i
];
2183 err
= pthread_join(p
->pthread
, NULL
);
2185 die(_("unable to join load_cache_entries thread: %s"), strerror(err
));
2186 mem_pool_combine(istate
->ce_mem_pool
, p
->ce_mem_pool
);
2187 consumed
+= p
->consumed
;
2195 static void set_new_index_sparsity(struct index_state
*istate
)
2198 * If the index's repo exists, mark it sparse according to
2201 prepare_repo_settings(istate
->repo
);
2202 if (!istate
->repo
->settings
.command_requires_full_index
&&
2203 is_sparse_index_allowed(istate
, 0))
2204 istate
->sparse_index
= 1;
2207 /* remember to discard_cache() before reading a different cache! */
2208 int do_read_index(struct index_state
*istate
, const char *path
, int must_exist
)
2212 unsigned long src_offset
;
2213 const struct cache_header
*hdr
;
2216 struct load_index_extensions p
;
2217 size_t extension_offset
= 0;
2218 int nr_threads
, cpus
;
2219 struct index_entry_offset_table
*ieot
= NULL
;
2221 if (istate
->initialized
)
2222 return istate
->cache_nr
;
2224 istate
->timestamp
.sec
= 0;
2225 istate
->timestamp
.nsec
= 0;
2226 fd
= open(path
, O_RDONLY
);
2228 if (!must_exist
&& errno
== ENOENT
) {
2229 set_new_index_sparsity(istate
);
2230 istate
->initialized
= 1;
2233 die_errno(_("%s: index file open failed"), path
);
2237 die_errno(_("%s: cannot stat the open index"), path
);
2239 mmap_size
= xsize_t(st
.st_size
);
2240 if (mmap_size
< sizeof(struct cache_header
) + the_hash_algo
->rawsz
)
2241 die(_("%s: index file smaller than expected"), path
);
2243 mmap
= xmmap_gently(NULL
, mmap_size
, PROT_READ
, MAP_PRIVATE
, fd
, 0);
2244 if (mmap
== MAP_FAILED
)
2245 die_errno(_("%s: unable to map index file%s"), path
,
2249 hdr
= (const struct cache_header
*)mmap
;
2250 if (verify_hdr(hdr
, mmap_size
) < 0)
2253 oidread(&istate
->oid
, (const unsigned char *)hdr
+ mmap_size
- the_hash_algo
->rawsz
,
2254 the_repository
->hash_algo
);
2255 istate
->version
= ntohl(hdr
->hdr_version
);
2256 istate
->cache_nr
= ntohl(hdr
->hdr_entries
);
2257 istate
->cache_alloc
= alloc_nr(istate
->cache_nr
);
2258 CALLOC_ARRAY(istate
->cache
, istate
->cache_alloc
);
2259 istate
->initialized
= 1;
2263 p
.mmap_size
= mmap_size
;
2265 src_offset
= sizeof(*hdr
);
2267 if (git_config_get_index_threads(&nr_threads
))
2270 /* TODO: does creating more threads than cores help? */
2272 nr_threads
= istate
->cache_nr
/ THREAD_COST
;
2273 cpus
= online_cpus();
2274 if (nr_threads
> cpus
)
2281 if (nr_threads
> 1) {
2282 extension_offset
= read_eoie_extension(mmap
, mmap_size
);
2283 if (extension_offset
) {
2286 p
.src_offset
= extension_offset
;
2287 err
= pthread_create(&p
.pthread
, NULL
, load_index_extensions
, &p
);
2289 die(_("unable to create load_index_extensions thread: %s"), strerror(err
));
2296 * Locate and read the index entry offset table so that we can use it
2297 * to multi-thread the reading of the cache entries.
2299 if (extension_offset
&& nr_threads
> 1)
2300 ieot
= read_ieot_extension(mmap
, mmap_size
, extension_offset
);
2303 src_offset
+= load_cache_entries_threaded(istate
, mmap
, mmap_size
, nr_threads
, ieot
);
2306 src_offset
+= load_all_cache_entries(istate
, mmap
, mmap_size
, src_offset
);
2309 istate
->timestamp
.sec
= st
.st_mtime
;
2310 istate
->timestamp
.nsec
= ST_MTIME_NSEC(st
);
2312 /* if we created a thread, join it otherwise load the extensions on the primary thread */
2313 if (extension_offset
) {
2314 int ret
= pthread_join(p
.pthread
, NULL
);
2316 die(_("unable to join load_index_extensions thread: %s"), strerror(ret
));
2318 p
.src_offset
= src_offset
;
2319 load_index_extensions(&p
);
2321 munmap((void *)mmap
, mmap_size
);
2324 * TODO trace2: replace "the_repository" with the actual repo instance
2325 * that is associated with the given "istate".
2327 trace2_data_intmax("index", the_repository
, "read/version",
2329 trace2_data_intmax("index", the_repository
, "read/cache_nr",
2333 * If the command explicitly requires a full index, force it
2334 * to be full. Otherwise, correct the sparsity based on repository
2335 * settings and other properties of the index (if necessary).
2337 prepare_repo_settings(istate
->repo
);
2338 if (istate
->repo
->settings
.command_requires_full_index
)
2339 ensure_full_index(istate
);
2341 ensure_correct_sparsity(istate
);
2343 return istate
->cache_nr
;
2346 munmap((void *)mmap
, mmap_size
);
2347 die(_("index file corrupt"));
2351 * Signal that the shared index is used by updating its mtime.
2353 * This way, shared index can be removed if they have not been used
2356 static void freshen_shared_index(const char *shared_index
, int warn
)
2358 if (!check_and_freshen_file(shared_index
, 1) && warn
)
2359 warning(_("could not freshen shared index '%s'"), shared_index
);
2362 int read_index_from(struct index_state
*istate
, const char *path
,
2365 struct split_index
*split_index
;
2370 /* istate->initialized covers both .git/index and .git/sharedindex.xxx */
2371 if (istate
->initialized
)
2372 return istate
->cache_nr
;
2375 * TODO trace2: replace "the_repository" with the actual repo instance
2376 * that is associated with the given "istate".
2378 trace2_region_enter_printf("index", "do_read_index", the_repository
,
2380 trace_performance_enter();
2381 ret
= do_read_index(istate
, path
, 0);
2382 trace_performance_leave("read cache %s", path
);
2383 trace2_region_leave_printf("index", "do_read_index", the_repository
,
2386 split_index
= istate
->split_index
;
2387 if (!split_index
|| is_null_oid(&split_index
->base_oid
)) {
2388 post_read_index_from(istate
);
2392 trace_performance_enter();
2393 if (split_index
->base
)
2394 release_index(split_index
->base
);
2396 ALLOC_ARRAY(split_index
->base
, 1);
2397 index_state_init(split_index
->base
, istate
->repo
);
2399 base_oid_hex
= oid_to_hex(&split_index
->base_oid
);
2400 base_path
= xstrfmt("%s/sharedindex.%s", gitdir
, base_oid_hex
);
2401 if (file_exists(base_path
)) {
2402 trace2_region_enter_printf("index", "shared/do_read_index",
2403 the_repository
, "%s", base_path
);
2405 ret
= do_read_index(split_index
->base
, base_path
, 0);
2406 trace2_region_leave_printf("index", "shared/do_read_index",
2407 the_repository
, "%s", base_path
);
2409 char *path_copy
= xstrdup(path
);
2410 char *base_path2
= xstrfmt("%s/sharedindex.%s",
2411 dirname(path_copy
), base_oid_hex
);
2413 trace2_region_enter_printf("index", "shared/do_read_index",
2414 the_repository
, "%s", base_path2
);
2415 ret
= do_read_index(split_index
->base
, base_path2
, 1);
2416 trace2_region_leave_printf("index", "shared/do_read_index",
2417 the_repository
, "%s", base_path2
);
2420 if (!oideq(&split_index
->base_oid
, &split_index
->base
->oid
))
2421 die(_("broken index, expect %s in %s, got %s"),
2422 base_oid_hex
, base_path
,
2423 oid_to_hex(&split_index
->base
->oid
));
2425 freshen_shared_index(base_path
, 0);
2426 merge_base_index(istate
);
2427 post_read_index_from(istate
);
2428 trace_performance_leave("read cache %s", base_path
);
2433 int is_index_unborn(struct index_state
*istate
)
2435 return (!istate
->cache_nr
&& !istate
->timestamp
.sec
);
2438 void index_state_init(struct index_state
*istate
, struct repository
*r
)
2440 struct index_state blank
= INDEX_STATE_INIT(r
);
2441 memcpy(istate
, &blank
, sizeof(*istate
));
2444 void release_index(struct index_state
*istate
)
2447 * Cache entries in istate->cache[] should have been allocated
2448 * from the memory pool associated with this index, or from an
2449 * associated split_index. There is no need to free individual
2450 * cache entries. validate_cache_entries can detect when this
2451 * assertion does not hold.
2453 validate_cache_entries(istate
);
2455 resolve_undo_clear_index(istate
);
2456 free_name_hash(istate
);
2457 cache_tree_free(&(istate
->cache_tree
));
2458 free(istate
->fsmonitor_last_update
);
2459 free(istate
->cache
);
2460 discard_split_index(istate
);
2461 free_untracked_cache(istate
->untracked
);
2463 if (istate
->sparse_checkout_patterns
) {
2464 clear_pattern_list(istate
->sparse_checkout_patterns
);
2465 FREE_AND_NULL(istate
->sparse_checkout_patterns
);
2468 if (istate
->ce_mem_pool
) {
2469 mem_pool_discard(istate
->ce_mem_pool
, should_validate_cache_entries());
2470 FREE_AND_NULL(istate
->ce_mem_pool
);
2474 void discard_index(struct index_state
*istate
)
2476 release_index(istate
);
2477 index_state_init(istate
, istate
->repo
);
2481 * Validate the cache entries of this index.
2482 * All cache entries associated with this index
2483 * should have been allocated by the memory pool
2484 * associated with this index, or by a referenced
2487 void validate_cache_entries(const struct index_state
*istate
)
2491 if (!should_validate_cache_entries() ||!istate
|| !istate
->initialized
)
2494 for (i
= 0; i
< istate
->cache_nr
; i
++) {
2496 BUG("cache entry is not allocated from expected memory pool");
2497 } else if (!istate
->ce_mem_pool
||
2498 !mem_pool_contains(istate
->ce_mem_pool
, istate
->cache
[i
])) {
2499 if (!istate
->split_index
||
2500 !istate
->split_index
->base
||
2501 !istate
->split_index
->base
->ce_mem_pool
||
2502 !mem_pool_contains(istate
->split_index
->base
->ce_mem_pool
, istate
->cache
[i
])) {
2503 BUG("cache entry is not allocated from expected memory pool");
2508 if (istate
->split_index
)
2509 validate_cache_entries(istate
->split_index
->base
);
2512 int unmerged_index(const struct index_state
*istate
)
2515 for (i
= 0; i
< istate
->cache_nr
; i
++) {
2516 if (ce_stage(istate
->cache
[i
]))
2522 int repo_index_has_changes(struct repository
*repo
,
2526 struct index_state
*istate
= repo
->index
;
2527 struct object_id cmp
;
2531 cmp
= tree
->object
.oid
;
2532 if (tree
|| !repo_get_oid_tree(repo
, "HEAD", &cmp
)) {
2533 struct diff_options opt
;
2535 repo_diff_setup(repo
, &opt
);
2536 opt
.flags
.exit_with_status
= 1;
2538 opt
.flags
.quick
= 1;
2539 diff_setup_done(&opt
);
2540 do_diff_cache(&cmp
, &opt
);
2542 for (i
= 0; sb
&& i
< diff_queued_diff
.nr
; i
++) {
2544 strbuf_addch(sb
, ' ');
2545 strbuf_addstr(sb
, diff_queued_diff
.queue
[i
]->two
->path
);
2548 return opt
.flags
.has_changes
!= 0;
2550 /* TODO: audit for interaction with sparse-index. */
2551 ensure_full_index(istate
);
2552 for (i
= 0; sb
&& i
< istate
->cache_nr
; i
++) {
2554 strbuf_addch(sb
, ' ');
2555 strbuf_addstr(sb
, istate
->cache
[i
]->name
);
2557 return !!istate
->cache_nr
;
2561 static int write_index_ext_header(struct hashfile
*f
,
2562 git_hash_ctx
*eoie_f
,
2566 hashwrite_be32(f
, ext
);
2567 hashwrite_be32(f
, sz
);
2572 the_hash_algo
->update_fn(eoie_f
, &ext
, sizeof(ext
));
2573 the_hash_algo
->update_fn(eoie_f
, &sz
, sizeof(sz
));
2578 static void ce_smudge_racily_clean_entry(struct index_state
*istate
,
2579 struct cache_entry
*ce
)
2582 * The only thing we care about in this function is to smudge the
2583 * falsely clean entry due to touch-update-touch race, so we leave
2584 * everything else as they are. We are called for entries whose
2585 * ce_stat_data.sd_mtime match the index file mtime.
2587 * Note that this actually does not do much for gitlinks, for
2588 * which ce_match_stat_basic() always goes to the actual
2589 * contents. The caller checks with is_racy_timestamp() which
2590 * always says "no" for gitlinks, so we are not called for them ;-)
2594 if (lstat(ce
->name
, &st
) < 0)
2596 if (ce_match_stat_basic(ce
, &st
))
2598 if (ce_modified_check_fs(istate
, ce
, &st
)) {
2599 /* This is "racily clean"; smudge it. Note that this
2600 * is a tricky code. At first glance, it may appear
2601 * that it can break with this sequence:
2603 * $ echo xyzzy >frotz
2604 * $ git-update-index --add frotz
2607 * $ echo filfre >nitfol
2608 * $ git-update-index --add nitfol
2610 * but it does not. When the second update-index runs,
2611 * it notices that the entry "frotz" has the same timestamp
2612 * as index, and if we were to smudge it by resetting its
2613 * size to zero here, then the object name recorded
2614 * in index is the 6-byte file but the cached stat information
2615 * becomes zero --- which would then match what we would
2616 * obtain from the filesystem next time we stat("frotz").
2618 * However, the second update-index, before calling
2619 * this function, notices that the cached size is 6
2620 * bytes and what is on the filesystem is an empty
2621 * file, and never calls us, so the cached size information
2622 * for "frotz" stays 6 which does not match the filesystem.
2624 ce
->ce_stat_data
.sd_size
= 0;
2628 /* Copy miscellaneous fields but not the name */
2629 static void copy_cache_entry_to_ondisk(struct ondisk_cache_entry
*ondisk
,
2630 struct cache_entry
*ce
)
2633 const unsigned hashsz
= the_hash_algo
->rawsz
;
2634 uint16_t *flagsp
= (uint16_t *)(ondisk
->data
+ hashsz
);
2636 ondisk
->ctime
.sec
= htonl(ce
->ce_stat_data
.sd_ctime
.sec
);
2637 ondisk
->mtime
.sec
= htonl(ce
->ce_stat_data
.sd_mtime
.sec
);
2638 ondisk
->ctime
.nsec
= htonl(ce
->ce_stat_data
.sd_ctime
.nsec
);
2639 ondisk
->mtime
.nsec
= htonl(ce
->ce_stat_data
.sd_mtime
.nsec
);
2640 ondisk
->dev
= htonl(ce
->ce_stat_data
.sd_dev
);
2641 ondisk
->ino
= htonl(ce
->ce_stat_data
.sd_ino
);
2642 ondisk
->mode
= htonl(ce
->ce_mode
);
2643 ondisk
->uid
= htonl(ce
->ce_stat_data
.sd_uid
);
2644 ondisk
->gid
= htonl(ce
->ce_stat_data
.sd_gid
);
2645 ondisk
->size
= htonl(ce
->ce_stat_data
.sd_size
);
2646 hashcpy(ondisk
->data
, ce
->oid
.hash
, the_repository
->hash_algo
);
2648 flags
= ce
->ce_flags
& ~CE_NAMEMASK
;
2649 flags
|= (ce_namelen(ce
) >= CE_NAMEMASK
? CE_NAMEMASK
: ce_namelen(ce
));
2650 flagsp
[0] = htons(flags
);
2651 if (ce
->ce_flags
& CE_EXTENDED
) {
2652 flagsp
[1] = htons((ce
->ce_flags
& CE_EXTENDED_FLAGS
) >> 16);
2656 static int ce_write_entry(struct hashfile
*f
, struct cache_entry
*ce
,
2657 struct strbuf
*previous_name
, struct ondisk_cache_entry
*ondisk
)
2660 unsigned int saved_namelen
;
2661 int stripped_name
= 0;
2662 static unsigned char padding
[8] = { 0x00 };
2664 if (ce
->ce_flags
& CE_STRIP_NAME
) {
2665 saved_namelen
= ce_namelen(ce
);
2670 size
= offsetof(struct ondisk_cache_entry
,data
) + ondisk_data_size(ce
->ce_flags
, 0);
2672 if (!previous_name
) {
2673 int len
= ce_namelen(ce
);
2674 copy_cache_entry_to_ondisk(ondisk
, ce
);
2675 hashwrite(f
, ondisk
, size
);
2676 hashwrite(f
, ce
->name
, len
);
2677 hashwrite(f
, padding
, align_padding_size(size
, len
));
2679 int common
, to_remove
, prefix_size
;
2680 unsigned char to_remove_vi
[16];
2682 (ce
->name
[common
] &&
2683 common
< previous_name
->len
&&
2684 ce
->name
[common
] == previous_name
->buf
[common
]);
2686 ; /* still matching */
2687 to_remove
= previous_name
->len
- common
;
2688 prefix_size
= encode_varint(to_remove
, to_remove_vi
);
2690 copy_cache_entry_to_ondisk(ondisk
, ce
);
2691 hashwrite(f
, ondisk
, size
);
2692 hashwrite(f
, to_remove_vi
, prefix_size
);
2693 hashwrite(f
, ce
->name
+ common
, ce_namelen(ce
) - common
);
2694 hashwrite(f
, padding
, 1);
2696 strbuf_splice(previous_name
, common
, to_remove
,
2697 ce
->name
+ common
, ce_namelen(ce
) - common
);
2699 if (stripped_name
) {
2700 ce
->ce_namelen
= saved_namelen
;
2701 ce
->ce_flags
&= ~CE_STRIP_NAME
;
2708 * This function verifies if index_state has the correct sha1 of the
2709 * index file. Don't die if we have any other failure, just return 0.
2711 static int verify_index_from(const struct index_state
*istate
, const char *path
)
2716 unsigned char hash
[GIT_MAX_RAWSZ
];
2718 if (!istate
->initialized
)
2721 fd
= open(path
, O_RDONLY
);
2728 if (st
.st_size
< sizeof(struct cache_header
) + the_hash_algo
->rawsz
)
2731 n
= pread_in_full(fd
, hash
, the_hash_algo
->rawsz
, st
.st_size
- the_hash_algo
->rawsz
);
2732 if (n
!= the_hash_algo
->rawsz
)
2735 if (!hasheq(istate
->oid
.hash
, hash
, the_repository
->hash_algo
))
2746 static int repo_verify_index(struct repository
*repo
)
2748 return verify_index_from(repo
->index
, repo
->index_file
);
2751 int has_racy_timestamp(struct index_state
*istate
)
2753 int entries
= istate
->cache_nr
;
2756 for (i
= 0; i
< entries
; i
++) {
2757 struct cache_entry
*ce
= istate
->cache
[i
];
2758 if (is_racy_timestamp(istate
, ce
))
2764 void repo_update_index_if_able(struct repository
*repo
,
2765 struct lock_file
*lockfile
)
2767 if ((repo
->index
->cache_changed
||
2768 has_racy_timestamp(repo
->index
)) &&
2769 repo_verify_index(repo
))
2770 write_locked_index(repo
->index
, lockfile
, COMMIT_LOCK
);
2772 rollback_lock_file(lockfile
);
2775 static int record_eoie(void)
2779 if (!git_config_get_bool("index.recordendofindexentries", &val
))
2783 * As a convenience, the end of index entries extension
2784 * used for threading is written by default if the user
2785 * explicitly requested threaded index reads.
2787 return !git_config_get_index_threads(&val
) && val
!= 1;
2790 static int record_ieot(void)
2794 if (!git_config_get_bool("index.recordoffsettable", &val
))
2798 * As a convenience, the offset table used for threading is
2799 * written by default if the user explicitly requested
2800 * threaded index reads.
2802 return !git_config_get_index_threads(&val
) && val
!= 1;
2805 enum write_extensions
{
2806 WRITE_NO_EXTENSION
= 0,
2807 WRITE_SPLIT_INDEX_EXTENSION
= 1<<0,
2808 WRITE_CACHE_TREE_EXTENSION
= 1<<1,
2809 WRITE_RESOLVE_UNDO_EXTENSION
= 1<<2,
2810 WRITE_UNTRACKED_CACHE_EXTENSION
= 1<<3,
2811 WRITE_FSMONITOR_EXTENSION
= 1<<4,
2813 #define WRITE_ALL_EXTENSIONS ((enum write_extensions)-1)
2816 * On success, `tempfile` is closed. If it is the temporary file
2817 * of a `struct lock_file`, we will therefore effectively perform
2818 * a 'close_lock_file_gently()`. Since that is an implementation
2819 * detail of lockfiles, callers of `do_write_index()` should not
2822 static int do_write_index(struct index_state
*istate
, struct tempfile
*tempfile
,
2823 enum write_extensions write_extensions
, unsigned flags
)
2825 uint64_t start
= getnanotime();
2827 git_hash_ctx
*eoie_c
= NULL
;
2828 struct cache_header hdr
;
2829 int i
, err
= 0, removed
, extended
, hdr_version
;
2830 struct cache_entry
**cache
= istate
->cache
;
2831 int entries
= istate
->cache_nr
;
2833 struct ondisk_cache_entry ondisk
;
2834 struct strbuf previous_name_buf
= STRBUF_INIT
, *previous_name
;
2835 int drop_cache_tree
= istate
->drop_cache_tree
;
2837 int csum_fsync_flag
;
2838 int ieot_entries
= 1;
2839 struct index_entry_offset_table
*ieot
= NULL
;
2841 struct repository
*r
= istate
->repo
;
2843 f
= hashfd(tempfile
->fd
, tempfile
->filename
.buf
);
2845 prepare_repo_settings(r
);
2846 f
->skip_hash
= r
->settings
.index_skip_hash
;
2848 for (i
= removed
= extended
= 0; i
< entries
; i
++) {
2849 if (cache
[i
]->ce_flags
& CE_REMOVE
)
2852 /* reduce extended entries if possible */
2853 cache
[i
]->ce_flags
&= ~CE_EXTENDED
;
2854 if (cache
[i
]->ce_flags
& CE_EXTENDED_FLAGS
) {
2856 cache
[i
]->ce_flags
|= CE_EXTENDED
;
2860 if (!istate
->version
)
2861 istate
->version
= get_index_format_default(r
);
2863 /* demote version 3 to version 2 when the latter suffices */
2864 if (istate
->version
== 3 || istate
->version
== 2)
2865 istate
->version
= extended
? 3 : 2;
2867 hdr_version
= istate
->version
;
2869 hdr
.hdr_signature
= htonl(CACHE_SIGNATURE
);
2870 hdr
.hdr_version
= htonl(hdr_version
);
2871 hdr
.hdr_entries
= htonl(entries
- removed
);
2873 hashwrite(f
, &hdr
, sizeof(hdr
));
2875 if (!HAVE_THREADS
|| git_config_get_index_threads(&nr_threads
))
2878 if (nr_threads
!= 1 && record_ieot()) {
2879 int ieot_blocks
, cpus
;
2882 * ensure default number of ieot blocks maps evenly to the
2883 * default number of threads that will process them leaving
2884 * room for the thread to load the index extensions.
2887 ieot_blocks
= istate
->cache_nr
/ THREAD_COST
;
2888 cpus
= online_cpus();
2889 if (ieot_blocks
> cpus
- 1)
2890 ieot_blocks
= cpus
- 1;
2892 ieot_blocks
= nr_threads
;
2893 if (ieot_blocks
> istate
->cache_nr
)
2894 ieot_blocks
= istate
->cache_nr
;
2898 * no reason to write out the IEOT extension if we don't
2899 * have enough blocks to utilize multi-threading
2901 if (ieot_blocks
> 1) {
2902 ieot
= xcalloc(1, sizeof(struct index_entry_offset_table
)
2903 + (ieot_blocks
* sizeof(struct index_entry_offset
)));
2904 ieot_entries
= DIV_ROUND_UP(entries
, ieot_blocks
);
2908 offset
= hashfile_total(f
);
2911 previous_name
= (hdr_version
== 4) ? &previous_name_buf
: NULL
;
2913 for (i
= 0; i
< entries
; i
++) {
2914 struct cache_entry
*ce
= cache
[i
];
2915 if (ce
->ce_flags
& CE_REMOVE
)
2917 if (!ce_uptodate(ce
) && is_racy_timestamp(istate
, ce
))
2918 ce_smudge_racily_clean_entry(istate
, ce
);
2919 if (is_null_oid(&ce
->oid
)) {
2920 static const char msg
[] = "cache entry has null sha1: %s";
2921 static int allow
= -1;
2924 allow
= git_env_bool("GIT_ALLOW_NULL_SHA1", 0);
2926 warning(msg
, ce
->name
);
2928 err
= error(msg
, ce
->name
);
2930 drop_cache_tree
= 1;
2932 if (ieot
&& i
&& (i
% ieot_entries
== 0)) {
2933 ieot
->entries
[ieot
->nr
].nr
= nr
;
2934 ieot
->entries
[ieot
->nr
].offset
= offset
;
2937 * If we have a V4 index, set the first byte to an invalid
2938 * character to ensure there is nothing common with the previous
2942 previous_name
->buf
[0] = 0;
2945 offset
= hashfile_total(f
);
2947 if (ce_write_entry(f
, ce
, previous_name
, (struct ondisk_cache_entry
*)&ondisk
) < 0)
2955 ieot
->entries
[ieot
->nr
].nr
= nr
;
2956 ieot
->entries
[ieot
->nr
].offset
= offset
;
2959 strbuf_release(&previous_name_buf
);
2966 offset
= hashfile_total(f
);
2969 * The extension headers must be hashed on their own for the
2970 * EOIE extension. Create a hashfile here to compute that hash.
2972 if (offset
&& record_eoie()) {
2973 CALLOC_ARRAY(eoie_c
, 1);
2974 the_hash_algo
->init_fn(eoie_c
);
2978 * Lets write out CACHE_EXT_INDEXENTRYOFFSETTABLE first so that we
2979 * can minimize the number of extensions we have to scan through to
2980 * find it during load. Write it out regardless of the
2981 * strip_extensions parameter as we need it when loading the shared
2985 struct strbuf sb
= STRBUF_INIT
;
2987 write_ieot_extension(&sb
, ieot
);
2988 err
= write_index_ext_header(f
, eoie_c
, CACHE_EXT_INDEXENTRYOFFSETTABLE
, sb
.len
) < 0;
2989 hashwrite(f
, sb
.buf
, sb
.len
);
2990 strbuf_release(&sb
);
2996 if (write_extensions
& WRITE_SPLIT_INDEX_EXTENSION
&&
2997 istate
->split_index
) {
2998 struct strbuf sb
= STRBUF_INIT
;
3000 if (istate
->sparse_index
)
3001 die(_("cannot write split index for a sparse index"));
3003 err
= write_link_extension(&sb
, istate
) < 0 ||
3004 write_index_ext_header(f
, eoie_c
, CACHE_EXT_LINK
,
3006 hashwrite(f
, sb
.buf
, sb
.len
);
3007 strbuf_release(&sb
);
3011 if (write_extensions
& WRITE_CACHE_TREE_EXTENSION
&&
3012 !drop_cache_tree
&& istate
->cache_tree
) {
3013 struct strbuf sb
= STRBUF_INIT
;
3015 cache_tree_write(&sb
, istate
->cache_tree
);
3016 err
= write_index_ext_header(f
, eoie_c
, CACHE_EXT_TREE
, sb
.len
) < 0;
3017 hashwrite(f
, sb
.buf
, sb
.len
);
3018 strbuf_release(&sb
);
3022 if (write_extensions
& WRITE_RESOLVE_UNDO_EXTENSION
&&
3023 istate
->resolve_undo
) {
3024 struct strbuf sb
= STRBUF_INIT
;
3026 resolve_undo_write(&sb
, istate
->resolve_undo
);
3027 err
= write_index_ext_header(f
, eoie_c
, CACHE_EXT_RESOLVE_UNDO
,
3029 hashwrite(f
, sb
.buf
, sb
.len
);
3030 strbuf_release(&sb
);
3034 if (write_extensions
& WRITE_UNTRACKED_CACHE_EXTENSION
&&
3035 istate
->untracked
) {
3036 struct strbuf sb
= STRBUF_INIT
;
3038 write_untracked_extension(&sb
, istate
->untracked
);
3039 err
= write_index_ext_header(f
, eoie_c
, CACHE_EXT_UNTRACKED
,
3041 hashwrite(f
, sb
.buf
, sb
.len
);
3042 strbuf_release(&sb
);
3046 if (write_extensions
& WRITE_FSMONITOR_EXTENSION
&&
3047 istate
->fsmonitor_last_update
) {
3048 struct strbuf sb
= STRBUF_INIT
;
3050 write_fsmonitor_extension(&sb
, istate
);
3051 err
= write_index_ext_header(f
, eoie_c
, CACHE_EXT_FSMONITOR
, sb
.len
) < 0;
3052 hashwrite(f
, sb
.buf
, sb
.len
);
3053 strbuf_release(&sb
);
3057 if (istate
->sparse_index
) {
3058 if (write_index_ext_header(f
, eoie_c
, CACHE_EXT_SPARSE_DIRECTORIES
, 0) < 0)
3063 * CACHE_EXT_ENDOFINDEXENTRIES must be written as the last entry before the SHA1
3064 * so that it can be found and processed before all the index entries are
3065 * read. Write it out regardless of the strip_extensions parameter as we need it
3066 * when loading the shared index.
3069 struct strbuf sb
= STRBUF_INIT
;
3071 write_eoie_extension(&sb
, eoie_c
, offset
);
3072 err
= write_index_ext_header(f
, NULL
, CACHE_EXT_ENDOFINDEXENTRIES
, sb
.len
) < 0;
3073 hashwrite(f
, sb
.buf
, sb
.len
);
3074 strbuf_release(&sb
);
3079 csum_fsync_flag
= 0;
3080 if (!alternate_index_output
&& (flags
& COMMIT_LOCK
))
3081 csum_fsync_flag
= CSUM_FSYNC
;
3083 finalize_hashfile(f
, istate
->oid
.hash
, FSYNC_COMPONENT_INDEX
,
3084 CSUM_HASH_IN_STREAM
| csum_fsync_flag
);
3086 if (close_tempfile_gently(tempfile
)) {
3087 error(_("could not close '%s'"), get_tempfile_path(tempfile
));
3090 if (stat(get_tempfile_path(tempfile
), &st
))
3092 istate
->timestamp
.sec
= (unsigned int)st
.st_mtime
;
3093 istate
->timestamp
.nsec
= ST_MTIME_NSEC(st
);
3094 trace_performance_since(start
, "write index, changed mask = %x", istate
->cache_changed
);
3097 * TODO trace2: replace "the_repository" with the actual repo instance
3098 * that is associated with the given "istate".
3100 trace2_data_intmax("index", the_repository
, "write/version",
3102 trace2_data_intmax("index", the_repository
, "write/cache_nr",
3108 void set_alternate_index_output(const char *name
)
3110 alternate_index_output
= name
;
3113 static int commit_locked_index(struct lock_file
*lk
)
3115 if (alternate_index_output
)
3116 return commit_lock_file_to(lk
, alternate_index_output
);
3118 return commit_lock_file(lk
);
3121 static int do_write_locked_index(struct index_state
*istate
,
3122 struct lock_file
*lock
,
3124 enum write_extensions write_extensions
)
3127 int was_full
= istate
->sparse_index
== INDEX_EXPANDED
;
3129 ret
= convert_to_sparse(istate
, 0);
3132 warning(_("failed to convert to a sparse-index"));
3137 * TODO trace2: replace "the_repository" with the actual repo instance
3138 * that is associated with the given "istate".
3140 trace2_region_enter_printf("index", "do_write_index", the_repository
,
3141 "%s", get_lock_file_path(lock
));
3142 ret
= do_write_index(istate
, lock
->tempfile
, write_extensions
, flags
);
3143 trace2_region_leave_printf("index", "do_write_index", the_repository
,
3144 "%s", get_lock_file_path(lock
));
3147 ensure_full_index(istate
);
3151 if (flags
& COMMIT_LOCK
)
3152 ret
= commit_locked_index(lock
);
3154 ret
= close_lock_file_gently(lock
);
3156 run_hooks_l("post-index-change",
3157 istate
->updated_workdir
? "1" : "0",
3158 istate
->updated_skipworktree
? "1" : "0", NULL
);
3159 istate
->updated_workdir
= 0;
3160 istate
->updated_skipworktree
= 0;
3165 static int write_split_index(struct index_state
*istate
,
3166 struct lock_file
*lock
,
3170 prepare_to_write_split_index(istate
);
3171 ret
= do_write_locked_index(istate
, lock
, flags
, WRITE_ALL_EXTENSIONS
);
3172 finish_writing_split_index(istate
);
3176 static const char *shared_index_expire
= "2.weeks.ago";
3178 static unsigned long get_shared_index_expire_date(void)
3180 static unsigned long shared_index_expire_date
;
3181 static int shared_index_expire_date_prepared
;
3183 if (!shared_index_expire_date_prepared
) {
3184 git_config_get_expiry("splitindex.sharedindexexpire",
3185 &shared_index_expire
);
3186 shared_index_expire_date
= approxidate(shared_index_expire
);
3187 shared_index_expire_date_prepared
= 1;
3190 return shared_index_expire_date
;
3193 static int should_delete_shared_index(const char *shared_index_path
)
3196 unsigned long expiration
;
3198 /* Check timestamp */
3199 expiration
= get_shared_index_expire_date();
3202 if (stat(shared_index_path
, &st
))
3203 return error_errno(_("could not stat '%s'"), shared_index_path
);
3204 if (st
.st_mtime
> expiration
)
3210 static int clean_shared_index_files(const char *current_hex
)
3213 DIR *dir
= opendir(get_git_dir());
3216 return error_errno(_("unable to open git dir: %s"), get_git_dir());
3218 while ((de
= readdir(dir
)) != NULL
) {
3219 const char *sha1_hex
;
3220 const char *shared_index_path
;
3221 if (!skip_prefix(de
->d_name
, "sharedindex.", &sha1_hex
))
3223 if (!strcmp(sha1_hex
, current_hex
))
3225 shared_index_path
= git_path("%s", de
->d_name
);
3226 if (should_delete_shared_index(shared_index_path
) > 0 &&
3227 unlink(shared_index_path
))
3228 warning_errno(_("unable to unlink: %s"), shared_index_path
);
3235 static int write_shared_index(struct index_state
*istate
,
3236 struct tempfile
**temp
, unsigned flags
)
3238 struct split_index
*si
= istate
->split_index
;
3239 int ret
, was_full
= !istate
->sparse_index
;
3241 move_cache_to_base_index(istate
);
3242 convert_to_sparse(istate
, 0);
3244 trace2_region_enter_printf("index", "shared/do_write_index",
3245 the_repository
, "%s", get_tempfile_path(*temp
));
3246 ret
= do_write_index(si
->base
, *temp
, WRITE_NO_EXTENSION
, flags
);
3247 trace2_region_leave_printf("index", "shared/do_write_index",
3248 the_repository
, "%s", get_tempfile_path(*temp
));
3251 ensure_full_index(istate
);
3255 ret
= adjust_shared_perm(get_tempfile_path(*temp
));
3257 error(_("cannot fix permission bits on '%s'"), get_tempfile_path(*temp
));
3260 ret
= rename_tempfile(temp
,
3261 git_path("sharedindex.%s", oid_to_hex(&si
->base
->oid
)));
3263 oidcpy(&si
->base_oid
, &si
->base
->oid
);
3264 clean_shared_index_files(oid_to_hex(&si
->base
->oid
));
3270 static const int default_max_percent_split_change
= 20;
3272 static int too_many_not_shared_entries(struct index_state
*istate
)
3274 int i
, not_shared
= 0;
3275 int max_split
= git_config_get_max_percent_split_change();
3277 switch (max_split
) {
3279 /* not or badly configured: use the default value */
3280 max_split
= default_max_percent_split_change
;
3283 return 1; /* 0% means always write a new shared index */
3285 return 0; /* 100% means never write a new shared index */
3287 break; /* just use the configured value */
3290 /* Count not shared entries */
3291 for (i
= 0; i
< istate
->cache_nr
; i
++) {
3292 struct cache_entry
*ce
= istate
->cache
[i
];
3297 return (int64_t)istate
->cache_nr
* max_split
< (int64_t)not_shared
* 100;
3300 int write_locked_index(struct index_state
*istate
, struct lock_file
*lock
,
3303 int new_shared_index
, ret
, test_split_index_env
;
3304 struct split_index
*si
= istate
->split_index
;
3306 if (git_env_bool("GIT_TEST_CHECK_CACHE_TREE", 0))
3307 cache_tree_verify(the_repository
, istate
);
3309 if ((flags
& SKIP_IF_UNCHANGED
) && !istate
->cache_changed
) {
3310 if (flags
& COMMIT_LOCK
)
3311 rollback_lock_file(lock
);
3315 if (istate
->fsmonitor_last_update
)
3316 fill_fsmonitor_bitmap(istate
);
3318 test_split_index_env
= git_env_bool("GIT_TEST_SPLIT_INDEX", 0);
3320 if ((!si
&& !test_split_index_env
) ||
3321 alternate_index_output
||
3322 (istate
->cache_changed
& ~EXTMASK
)) {
3323 ret
= do_write_locked_index(istate
, lock
, flags
,
3324 ~WRITE_SPLIT_INDEX_EXTENSION
);
3328 if (test_split_index_env
) {
3330 si
= init_split_index(istate
);
3331 istate
->cache_changed
|= SPLIT_INDEX_ORDERED
;
3333 int v
= si
->base_oid
.hash
[0];
3335 istate
->cache_changed
|= SPLIT_INDEX_ORDERED
;
3338 if (too_many_not_shared_entries(istate
))
3339 istate
->cache_changed
|= SPLIT_INDEX_ORDERED
;
3341 new_shared_index
= istate
->cache_changed
& SPLIT_INDEX_ORDERED
;
3343 if (new_shared_index
) {
3344 struct tempfile
*temp
;
3347 /* Same initial permissions as the main .git/index file */
3348 temp
= mks_tempfile_sm(git_path("sharedindex_XXXXXX"), 0, 0666);
3350 ret
= do_write_locked_index(istate
, lock
, flags
,
3351 ~WRITE_SPLIT_INDEX_EXTENSION
);
3354 ret
= write_shared_index(istate
, &temp
, flags
);
3356 saved_errno
= errno
;
3357 if (is_tempfile_active(temp
))
3358 delete_tempfile(&temp
);
3359 errno
= saved_errno
;
3365 ret
= write_split_index(istate
, lock
, flags
);
3367 /* Freshen the shared index only if the split-index was written */
3368 if (!ret
&& !new_shared_index
&& !is_null_oid(&si
->base_oid
)) {
3369 const char *shared_index
= git_path("sharedindex.%s",
3370 oid_to_hex(&si
->base_oid
));
3371 freshen_shared_index(shared_index
, 1);
3375 if (flags
& COMMIT_LOCK
)
3376 rollback_lock_file(lock
);
3381 * Read the index file that is potentially unmerged into given
3382 * index_state, dropping any unmerged entries to stage #0 (potentially
3383 * resulting in a path appearing as both a file and a directory in the
3384 * index; the caller is responsible to clear out the extra entries
3385 * before writing the index to a tree). Returns true if the index is
3386 * unmerged. Callers who want to refuse to work from an unmerged
3387 * state can call this and check its return value, instead of calling
3390 int repo_read_index_unmerged(struct repository
*repo
)
3392 struct index_state
*istate
;
3396 repo_read_index(repo
);
3397 istate
= repo
->index
;
3398 for (i
= 0; i
< istate
->cache_nr
; i
++) {
3399 struct cache_entry
*ce
= istate
->cache
[i
];
3400 struct cache_entry
*new_ce
;
3406 len
= ce_namelen(ce
);
3407 new_ce
= make_empty_cache_entry(istate
, len
);
3408 memcpy(new_ce
->name
, ce
->name
, len
);
3409 new_ce
->ce_flags
= create_ce_flags(0) | CE_CONFLICTED
;
3410 new_ce
->ce_namelen
= len
;
3411 new_ce
->ce_mode
= ce
->ce_mode
;
3412 if (add_index_entry(istate
, new_ce
, ADD_CACHE_SKIP_DFCHECK
))
3413 return error(_("%s: cannot drop to stage #0"),
3420 * Returns 1 if the path is an "other" path with respect to
3421 * the index; that is, the path is not mentioned in the index at all,
3422 * either as a file, a directory with some files in the index,
3423 * or as an unmerged entry.
3425 * We helpfully remove a trailing "/" from directories so that
3426 * the output of read_directory can be used as-is.
3428 int index_name_is_other(struct index_state
*istate
, const char *name
,
3432 if (namelen
&& name
[namelen
- 1] == '/')
3434 pos
= index_name_pos(istate
, name
, namelen
);
3436 return 0; /* exact match */
3438 if (pos
< istate
->cache_nr
) {
3439 struct cache_entry
*ce
= istate
->cache
[pos
];
3440 if (ce_namelen(ce
) == namelen
&&
3441 !memcmp(ce
->name
, name
, namelen
))
3442 return 0; /* Yup, this one exists unmerged */
3447 void *read_blob_data_from_index(struct index_state
*istate
,
3448 const char *path
, unsigned long *size
)
3452 enum object_type type
;
3456 pos
= index_name_pos(istate
, path
, len
);
3459 * We might be in the middle of a merge, in which
3460 * case we would read stage #2 (ours).
3464 (pos
< 0 && i
< istate
->cache_nr
&&
3465 !strcmp(istate
->cache
[i
]->name
, path
));
3467 if (ce_stage(istate
->cache
[i
]) == 2)
3472 data
= repo_read_object_file(the_repository
, &istate
->cache
[pos
]->oid
,
3474 if (!data
|| type
!= OBJ_BLOB
) {
3483 void move_index_extensions(struct index_state
*dst
, struct index_state
*src
)
3485 dst
->untracked
= src
->untracked
;
3486 src
->untracked
= NULL
;
3487 dst
->cache_tree
= src
->cache_tree
;
3488 src
->cache_tree
= NULL
;
3491 struct cache_entry
*dup_cache_entry(const struct cache_entry
*ce
,
3492 struct index_state
*istate
)
3494 unsigned int size
= ce_size(ce
);
3495 int mem_pool_allocated
;
3496 struct cache_entry
*new_entry
= make_empty_cache_entry(istate
, ce_namelen(ce
));
3497 mem_pool_allocated
= new_entry
->mem_pool_allocated
;
3499 memcpy(new_entry
, ce
, size
);
3500 new_entry
->mem_pool_allocated
= mem_pool_allocated
;
3504 void discard_cache_entry(struct cache_entry
*ce
)
3506 if (ce
&& should_validate_cache_entries())
3507 memset(ce
, 0xCD, cache_entry_size(ce
->ce_namelen
));
3509 if (ce
&& ce
->mem_pool_allocated
)
3515 int should_validate_cache_entries(void)
3517 static int validate_index_cache_entries
= -1;
3519 if (validate_index_cache_entries
< 0) {
3520 if (getenv("GIT_TEST_VALIDATE_INDEX_CACHE_ENTRIES"))
3521 validate_index_cache_entries
= 1;
3523 validate_index_cache_entries
= 0;
3526 return validate_index_cache_entries
;
3529 #define EOIE_SIZE (4 + GIT_SHA1_RAWSZ) /* <4-byte offset> + <20-byte hash> */
3530 #define EOIE_SIZE_WITH_HEADER (4 + 4 + EOIE_SIZE) /* <4-byte signature> + <4-byte length> + EOIE_SIZE */
3532 static size_t read_eoie_extension(const char *mmap
, size_t mmap_size
)
3535 * The end of index entries (EOIE) extension is guaranteed to be last
3536 * so that it can be found by scanning backwards from the EOF.
3543 const char *index
, *eoie
;
3545 size_t offset
, src_offset
;
3546 unsigned char hash
[GIT_MAX_RAWSZ
];
3549 /* ensure we have an index big enough to contain an EOIE extension */
3550 if (mmap_size
< sizeof(struct cache_header
) + EOIE_SIZE_WITH_HEADER
+ the_hash_algo
->rawsz
)
3553 /* validate the extension signature */
3554 index
= eoie
= mmap
+ mmap_size
- EOIE_SIZE_WITH_HEADER
- the_hash_algo
->rawsz
;
3555 if (CACHE_EXT(index
) != CACHE_EXT_ENDOFINDEXENTRIES
)
3557 index
+= sizeof(uint32_t);
3559 /* validate the extension size */
3560 extsize
= get_be32(index
);
3561 if (extsize
!= EOIE_SIZE
)
3563 index
+= sizeof(uint32_t);
3566 * Validate the offset we're going to look for the first extension
3567 * signature is after the index header and before the eoie extension.
3569 offset
= get_be32(index
);
3570 if (mmap
+ offset
< mmap
+ sizeof(struct cache_header
))
3572 if (mmap
+ offset
>= eoie
)
3574 index
+= sizeof(uint32_t);
3577 * The hash is computed over extension types and their sizes (but not
3578 * their contents). E.g. if we have "TREE" extension that is N-bytes
3579 * long, "REUC" extension that is M-bytes long, followed by "EOIE",
3580 * then the hash would be:
3582 * SHA-1("TREE" + <binary representation of N> +
3583 * "REUC" + <binary representation of M>)
3585 src_offset
= offset
;
3586 the_hash_algo
->init_fn(&c
);
3587 while (src_offset
< mmap_size
- the_hash_algo
->rawsz
- EOIE_SIZE_WITH_HEADER
) {
3588 /* After an array of active_nr index entries,
3589 * there can be arbitrary number of extended
3590 * sections, each of which is prefixed with
3591 * extension name (4-byte) and section length
3592 * in 4-byte network byte order.
3595 memcpy(&extsize
, mmap
+ src_offset
+ 4, 4);
3596 extsize
= ntohl(extsize
);
3598 /* verify the extension size isn't so large it will wrap around */
3599 if (src_offset
+ 8 + extsize
< src_offset
)
3602 the_hash_algo
->update_fn(&c
, mmap
+ src_offset
, 8);
3605 src_offset
+= extsize
;
3607 the_hash_algo
->final_fn(hash
, &c
);
3608 if (!hasheq(hash
, (const unsigned char *)index
, the_repository
->hash_algo
))
3611 /* Validate that the extension offsets returned us back to the eoie extension. */
3612 if (src_offset
!= mmap_size
- the_hash_algo
->rawsz
- EOIE_SIZE_WITH_HEADER
)
3618 static void write_eoie_extension(struct strbuf
*sb
, git_hash_ctx
*eoie_context
, size_t offset
)
3621 unsigned char hash
[GIT_MAX_RAWSZ
];
3624 put_be32(&buffer
, offset
);
3625 strbuf_add(sb
, &buffer
, sizeof(uint32_t));
3628 the_hash_algo
->final_fn(hash
, eoie_context
);
3629 strbuf_add(sb
, hash
, the_hash_algo
->rawsz
);
3632 #define IEOT_VERSION (1)
3634 static struct index_entry_offset_table
*read_ieot_extension(const char *mmap
, size_t mmap_size
, size_t offset
)
3636 const char *index
= NULL
;
3637 uint32_t extsize
, ext_version
;
3638 struct index_entry_offset_table
*ieot
;
3641 /* find the IEOT extension */
3644 while (offset
<= mmap_size
- the_hash_algo
->rawsz
- 8) {
3645 extsize
= get_be32(mmap
+ offset
+ 4);
3646 if (CACHE_EXT((mmap
+ offset
)) == CACHE_EXT_INDEXENTRYOFFSETTABLE
) {
3647 index
= mmap
+ offset
+ 4 + 4;
3656 /* validate the version is IEOT_VERSION */
3657 ext_version
= get_be32(index
);
3658 if (ext_version
!= IEOT_VERSION
) {
3659 error("invalid IEOT version %d", ext_version
);
3662 index
+= sizeof(uint32_t);
3664 /* extension size - version bytes / bytes per entry */
3665 nr
= (extsize
- sizeof(uint32_t)) / (sizeof(uint32_t) + sizeof(uint32_t));
3667 error("invalid number of IEOT entries %d", nr
);
3670 ieot
= xmalloc(sizeof(struct index_entry_offset_table
)
3671 + (nr
* sizeof(struct index_entry_offset
)));
3673 for (i
= 0; i
< nr
; i
++) {
3674 ieot
->entries
[i
].offset
= get_be32(index
);
3675 index
+= sizeof(uint32_t);
3676 ieot
->entries
[i
].nr
= get_be32(index
);
3677 index
+= sizeof(uint32_t);
3683 static void write_ieot_extension(struct strbuf
*sb
, struct index_entry_offset_table
*ieot
)
3689 put_be32(&buffer
, IEOT_VERSION
);
3690 strbuf_add(sb
, &buffer
, sizeof(uint32_t));
3693 for (i
= 0; i
< ieot
->nr
; i
++) {
3696 put_be32(&buffer
, ieot
->entries
[i
].offset
);
3697 strbuf_add(sb
, &buffer
, sizeof(uint32_t));
3700 put_be32(&buffer
, ieot
->entries
[i
].nr
);
3701 strbuf_add(sb
, &buffer
, sizeof(uint32_t));
3705 void prefetch_cache_entries(const struct index_state
*istate
,
3706 must_prefetch_predicate must_prefetch
)
3709 struct oid_array to_fetch
= OID_ARRAY_INIT
;
3711 for (i
= 0; i
< istate
->cache_nr
; i
++) {
3712 struct cache_entry
*ce
= istate
->cache
[i
];
3714 if (S_ISGITLINK(ce
->ce_mode
) || !must_prefetch(ce
))
3716 if (!oid_object_info_extended(the_repository
, &ce
->oid
,
3718 OBJECT_INFO_FOR_PREFETCH
))
3720 oid_array_append(&to_fetch
, &ce
->oid
);
3722 promisor_remote_get_direct(the_repository
,
3723 to_fetch
.oid
, to_fetch
.nr
);
3724 oid_array_clear(&to_fetch
);
3727 static int read_one_entry_opt(struct index_state
*istate
,
3728 const struct object_id
*oid
,
3729 struct strbuf
*base
,
3730 const char *pathname
,
3731 unsigned mode
, int opt
)
3734 struct cache_entry
*ce
;
3737 return READ_TREE_RECURSIVE
;
3739 len
= strlen(pathname
);
3740 ce
= make_empty_cache_entry(istate
, base
->len
+ len
);
3742 ce
->ce_mode
= create_ce_mode(mode
);
3743 ce
->ce_flags
= create_ce_flags(1);
3744 ce
->ce_namelen
= base
->len
+ len
;
3745 memcpy(ce
->name
, base
->buf
, base
->len
);
3746 memcpy(ce
->name
+ base
->len
, pathname
, len
+1);
3747 oidcpy(&ce
->oid
, oid
);
3748 return add_index_entry(istate
, ce
, opt
);
3751 static int read_one_entry(const struct object_id
*oid
, struct strbuf
*base
,
3752 const char *pathname
, unsigned mode
,
3755 struct index_state
*istate
= context
;
3756 return read_one_entry_opt(istate
, oid
, base
, pathname
,
3758 ADD_CACHE_OK_TO_ADD
|ADD_CACHE_SKIP_DFCHECK
);
3762 * This is used when the caller knows there is no existing entries at
3763 * the stage that will conflict with the entry being added.
3765 static int read_one_entry_quick(const struct object_id
*oid
, struct strbuf
*base
,
3766 const char *pathname
, unsigned mode
,
3769 struct index_state
*istate
= context
;
3770 return read_one_entry_opt(istate
, oid
, base
, pathname
,
3771 mode
, ADD_CACHE_JUST_APPEND
);
3775 * Read the tree specified with --with-tree option
3776 * (typically, HEAD) into stage #1 and then
3777 * squash them down to stage #0. This is used for
3778 * --error-unmatch to list and check the path patterns
3779 * that were given from the command line. We are not
3780 * going to write this index out.
3782 void overlay_tree_on_index(struct index_state
*istate
,
3783 const char *tree_name
, const char *prefix
)
3786 struct object_id oid
;
3787 struct pathspec pathspec
;
3788 struct cache_entry
*last_stage0
= NULL
;
3790 read_tree_fn_t fn
= NULL
;
3793 if (repo_get_oid(the_repository
, tree_name
, &oid
))
3794 die("tree-ish %s not found.", tree_name
);
3795 tree
= parse_tree_indirect(&oid
);
3797 die("bad tree-ish %s", tree_name
);
3799 /* Hoist the unmerged entries up to stage #3 to make room */
3800 /* TODO: audit for interaction with sparse-index. */
3801 ensure_full_index(istate
);
3802 for (i
= 0; i
< istate
->cache_nr
; i
++) {
3803 struct cache_entry
*ce
= istate
->cache
[i
];
3806 ce
->ce_flags
|= CE_STAGEMASK
;
3810 static const char *(matchbuf
[1]);
3812 parse_pathspec(&pathspec
, PATHSPEC_ALL_MAGIC
,
3813 PATHSPEC_PREFER_CWD
, prefix
, matchbuf
);
3815 memset(&pathspec
, 0, sizeof(pathspec
));
3818 * See if we have cache entry at the stage. If so,
3819 * do it the original slow way, otherwise, append and then
3822 for (i
= 0; !fn
&& i
< istate
->cache_nr
; i
++) {
3823 const struct cache_entry
*ce
= istate
->cache
[i
];
3824 if (ce_stage(ce
) == 1)
3825 fn
= read_one_entry
;
3829 fn
= read_one_entry_quick
;
3830 err
= read_tree(the_repository
, tree
, &pathspec
, fn
, istate
);
3831 clear_pathspec(&pathspec
);
3833 die("unable to read tree entries %s", tree_name
);
3836 * Sort the cache entry -- we need to nuke the cache tree, though.
3838 if (fn
== read_one_entry_quick
) {
3839 cache_tree_free(&istate
->cache_tree
);
3840 QSORT(istate
->cache
, istate
->cache_nr
, cmp_cache_name_compare
);
3843 for (i
= 0; i
< istate
->cache_nr
; i
++) {
3844 struct cache_entry
*ce
= istate
->cache
[i
];
3845 switch (ce_stage(ce
)) {
3853 * If there is stage #0 entry for this, we do not
3854 * need to show it. We use CE_UPDATE bit to mark
3858 !strcmp(last_stage0
->name
, ce
->name
))
3859 ce
->ce_flags
|= CE_UPDATE
;
3864 struct update_callback_data
{
3865 struct index_state
*index
;
3871 static int fix_unmerged_status(struct diff_filepair
*p
,
3872 struct update_callback_data
*data
)
3874 if (p
->status
!= DIFF_STATUS_UNMERGED
)
3876 if (!(data
->flags
& ADD_CACHE_IGNORE_REMOVAL
) && !p
->two
->mode
)
3878 * This is not an explicit add request, and the
3879 * path is missing from the working tree (deleted)
3881 return DIFF_STATUS_DELETED
;
3884 * Either an explicit add request, or path exists
3885 * in the working tree. An attempt to explicitly
3886 * add a path that does not exist in the working tree
3887 * will be caught as an error by the caller immediately.
3889 return DIFF_STATUS_MODIFIED
;
3892 static void update_callback(struct diff_queue_struct
*q
,
3893 struct diff_options
*opt UNUSED
, void *cbdata
)
3896 struct update_callback_data
*data
= cbdata
;
3898 for (i
= 0; i
< q
->nr
; i
++) {
3899 struct diff_filepair
*p
= q
->queue
[i
];
3900 const char *path
= p
->one
->path
;
3902 if (!data
->include_sparse
&&
3903 !path_in_sparse_checkout(path
, data
->index
))
3906 switch (fix_unmerged_status(p
, data
)) {
3908 die(_("unexpected diff status %c"), p
->status
);
3909 case DIFF_STATUS_MODIFIED
:
3910 case DIFF_STATUS_TYPE_CHANGED
:
3911 if (add_file_to_index(data
->index
, path
, data
->flags
)) {
3912 if (!(data
->flags
& ADD_CACHE_IGNORE_ERRORS
))
3913 die(_("updating files failed"));
3917 case DIFF_STATUS_DELETED
:
3918 if (data
->flags
& ADD_CACHE_IGNORE_REMOVAL
)
3920 if (!(data
->flags
& ADD_CACHE_PRETEND
))
3921 remove_file_from_index(data
->index
, path
);
3922 if (data
->flags
& (ADD_CACHE_PRETEND
|ADD_CACHE_VERBOSE
))
3923 printf(_("remove '%s'\n"), path
);
3929 int add_files_to_cache(struct repository
*repo
, const char *prefix
,
3930 const struct pathspec
*pathspec
, char *ps_matched
,
3931 int include_sparse
, int flags
)
3933 struct update_callback_data data
;
3934 struct rev_info rev
;
3936 memset(&data
, 0, sizeof(data
));
3937 data
.index
= repo
->index
;
3938 data
.include_sparse
= include_sparse
;
3941 repo_init_revisions(repo
, &rev
, prefix
);
3942 setup_revisions(0, NULL
, &rev
, NULL
);
3944 copy_pathspec(&rev
.prune_data
, pathspec
);
3945 rev
.ps_matched
= ps_matched
;
3947 rev
.diffopt
.output_format
= DIFF_FORMAT_CALLBACK
;
3948 rev
.diffopt
.format_callback
= update_callback
;
3949 rev
.diffopt
.format_callback_data
= &data
;
3950 rev
.diffopt
.flags
.override_submodule_config
= 1;
3951 rev
.max_count
= 0; /* do not compare unmerged paths with stage #2 */
3954 * Use an ODB transaction to optimize adding multiple objects.
3955 * This function is invoked from commands other than 'add', which
3956 * may not have their own transaction active.
3958 begin_odb_transaction();
3959 run_diff_files(&rev
, DIFF_RACY_IS_MODIFIED
);
3960 end_odb_transaction();
3962 release_revisions(&rev
);
3963 return !!data
.add_errors
;