t/helper: fix leaking buffer in "dump-untracked-cache"
[git/gitster.git] / name-hash.c
blob95528e3bcd2a3e36cf72faa7ed629c3175492bbb
1 /*
2 * name-hash.c
4 * Hashing names in the index state
6 * Copyright (C) 2008 Linus Torvalds
7 */
9 #define USE_THE_REPOSITORY_VARIABLE
11 #include "git-compat-util.h"
12 #include "environment.h"
13 #include "gettext.h"
14 #include "name-hash.h"
15 #include "object.h"
16 #include "read-cache-ll.h"
17 #include "thread-utils.h"
18 #include "trace.h"
19 #include "trace2.h"
20 #include "sparse-index.h"
22 struct dir_entry {
23 struct hashmap_entry ent;
24 struct dir_entry *parent;
25 int nr;
26 unsigned int namelen;
27 char name[FLEX_ARRAY];
30 static int dir_entry_cmp(const void *cmp_data UNUSED,
31 const struct hashmap_entry *eptr,
32 const struct hashmap_entry *entry_or_key,
33 const void *keydata)
35 const struct dir_entry *e1, *e2;
36 const char *name = keydata;
38 e1 = container_of(eptr, const struct dir_entry, ent);
39 e2 = container_of(entry_or_key, const struct dir_entry, ent);
41 return e1->namelen != e2->namelen || strncasecmp(e1->name,
42 name ? name : e2->name, e1->namelen);
45 static struct dir_entry *find_dir_entry__hash(struct index_state *istate,
46 const char *name, unsigned int namelen, unsigned int hash)
48 struct dir_entry key;
49 hashmap_entry_init(&key.ent, hash);
50 key.namelen = namelen;
51 return hashmap_get_entry(&istate->dir_hash, &key, ent, name);
54 static struct dir_entry *find_dir_entry(struct index_state *istate,
55 const char *name, unsigned int namelen)
57 return find_dir_entry__hash(istate, name, namelen, memihash(name, namelen));
60 static struct dir_entry *hash_dir_entry(struct index_state *istate,
61 struct cache_entry *ce, int namelen)
64 * Throw each directory component in the hash for quick lookup
65 * during a git status. Directory components are stored without their
66 * closing slash. Despite submodules being a directory, they never
67 * reach this point, because they are stored
68 * in index_state.name_hash (as ordinary cache_entries).
70 struct dir_entry *dir;
72 /* get length of parent directory */
73 while (namelen > 0 && !is_dir_sep(ce->name[namelen - 1]))
74 namelen--;
75 if (namelen <= 0)
76 return NULL;
77 namelen--;
79 /* lookup existing entry for that directory */
80 dir = find_dir_entry(istate, ce->name, namelen);
81 if (!dir) {
82 /* not found, create it and add to hash table */
83 FLEX_ALLOC_MEM(dir, name, ce->name, namelen);
84 hashmap_entry_init(&dir->ent, memihash(ce->name, namelen));
85 dir->namelen = namelen;
86 hashmap_add(&istate->dir_hash, &dir->ent);
88 /* recursively add missing parent directories */
89 dir->parent = hash_dir_entry(istate, ce, namelen);
91 return dir;
94 static void add_dir_entry(struct index_state *istate, struct cache_entry *ce)
96 /* Add reference to the directory entry (and parents if 0). */
97 struct dir_entry *dir = hash_dir_entry(istate, ce, ce_namelen(ce));
98 while (dir && !(dir->nr++))
99 dir = dir->parent;
102 static void remove_dir_entry(struct index_state *istate, struct cache_entry *ce)
105 * Release reference to the directory entry. If 0, remove and continue
106 * with parent directory.
108 struct dir_entry *dir = hash_dir_entry(istate, ce, ce_namelen(ce));
109 while (dir && !(--dir->nr)) {
110 struct dir_entry *parent = dir->parent;
111 hashmap_remove(&istate->dir_hash, &dir->ent, NULL);
112 free(dir);
113 dir = parent;
117 static void hash_index_entry(struct index_state *istate, struct cache_entry *ce)
119 if (ce->ce_flags & CE_HASHED)
120 return;
121 ce->ce_flags |= CE_HASHED;
123 if (!S_ISSPARSEDIR(ce->ce_mode)) {
124 hashmap_entry_init(&ce->ent, memihash(ce->name, ce_namelen(ce)));
125 hashmap_add(&istate->name_hash, &ce->ent);
128 if (ignore_case)
129 add_dir_entry(istate, ce);
132 static int cache_entry_cmp(const void *cmp_data UNUSED,
133 const struct hashmap_entry *eptr,
134 const struct hashmap_entry *entry_or_key,
135 const void *remove)
137 const struct cache_entry *ce1, *ce2;
139 ce1 = container_of(eptr, const struct cache_entry, ent);
140 ce2 = container_of(entry_or_key, const struct cache_entry, ent);
143 * For remove_name_hash, find the exact entry (pointer equality); for
144 * index_file_exists, find all entries with matching hash code and
145 * decide whether the entry matches in same_name.
147 return remove ? !(ce1 == ce2) : 0;
150 static int lazy_try_threaded = 1;
151 static int lazy_nr_dir_threads;
154 * Set a minimum number of cache_entries that we will handle per
155 * thread and use that to decide how many threads to run (up to
156 * the number on the system).
158 * For guidance setting the lower per-thread bound, see:
159 * t/helper/test-lazy-init-name-hash --analyze
161 #define LAZY_THREAD_COST (2000)
164 * We use n mutexes to guard n partitions of the "istate->dir_hash"
165 * hashtable. Since "find" and "insert" operations will hash to a
166 * particular bucket and modify/search a single chain, we can say
167 * that "all chains mod n" are guarded by the same mutex -- rather
168 * than having a single mutex to guard the entire table. (This does
169 * require that we disable "rehashing" on the hashtable.)
171 * So, a larger value here decreases the probability of a collision
172 * and the time that each thread must wait for the mutex.
174 #define LAZY_MAX_MUTEX (32)
176 static pthread_mutex_t *lazy_dir_mutex_array;
179 * An array of lazy_entry items is used by the n threads in
180 * the directory parse (first) phase to (lock-free) store the
181 * intermediate results. These values are then referenced by
182 * the 2 threads in the second phase.
184 struct lazy_entry {
185 struct dir_entry *dir;
186 unsigned int hash_dir;
187 unsigned int hash_name;
191 * Decide if we want to use threads (if available) to load
192 * the hash tables. We set "lazy_nr_dir_threads" to zero when
193 * it is not worth it.
195 static int lookup_lazy_params(struct index_state *istate)
197 int nr_cpus;
199 lazy_nr_dir_threads = 0;
201 if (!lazy_try_threaded)
202 return 0;
205 * If we are respecting case, just use the original
206 * code to build the "istate->name_hash". We don't
207 * need the complexity here.
209 if (!ignore_case)
210 return 0;
212 nr_cpus = online_cpus();
213 if (nr_cpus < 2)
214 return 0;
216 if (istate->cache_nr < 2 * LAZY_THREAD_COST)
217 return 0;
219 if (istate->cache_nr < nr_cpus * LAZY_THREAD_COST)
220 nr_cpus = istate->cache_nr / LAZY_THREAD_COST;
221 lazy_nr_dir_threads = nr_cpus;
222 return lazy_nr_dir_threads;
226 * Initialize n mutexes for use when searching and inserting
227 * into "istate->dir_hash". All "dir" threads are trying
228 * to insert partial pathnames into the hash as they iterate
229 * over their portions of the index, so lock contention is
230 * high.
232 * However, the hashmap is going to put items into bucket
233 * chains based on their hash values. Use that to create n
234 * mutexes and lock on mutex[bucket(hash) % n]. This will
235 * decrease the collision rate by (hopefully) a factor of n.
237 static void init_dir_mutex(void)
239 int j;
241 CALLOC_ARRAY(lazy_dir_mutex_array, LAZY_MAX_MUTEX);
243 for (j = 0; j < LAZY_MAX_MUTEX; j++)
244 init_recursive_mutex(&lazy_dir_mutex_array[j]);
247 static void cleanup_dir_mutex(void)
249 int j;
251 for (j = 0; j < LAZY_MAX_MUTEX; j++)
252 pthread_mutex_destroy(&lazy_dir_mutex_array[j]);
254 free(lazy_dir_mutex_array);
257 static void lock_dir_mutex(int j)
259 pthread_mutex_lock(&lazy_dir_mutex_array[j]);
262 static void unlock_dir_mutex(int j)
264 pthread_mutex_unlock(&lazy_dir_mutex_array[j]);
267 static inline int compute_dir_lock_nr(
268 const struct hashmap *map,
269 unsigned int hash)
271 return hashmap_bucket(map, hash) % LAZY_MAX_MUTEX;
274 static struct dir_entry *hash_dir_entry_with_parent_and_prefix(
275 struct index_state *istate,
276 struct dir_entry *parent,
277 struct strbuf *prefix)
279 struct dir_entry *dir;
280 unsigned int hash;
281 int lock_nr;
284 * Either we have a parent directory and path with slash(es)
285 * or the directory is an immediate child of the root directory.
287 assert((parent != NULL) ^ (strchr(prefix->buf, '/') == NULL));
289 if (parent)
290 hash = memihash_cont(parent->ent.hash,
291 prefix->buf + parent->namelen,
292 prefix->len - parent->namelen);
293 else
294 hash = memihash(prefix->buf, prefix->len);
296 lock_nr = compute_dir_lock_nr(&istate->dir_hash, hash);
297 lock_dir_mutex(lock_nr);
299 dir = find_dir_entry__hash(istate, prefix->buf, prefix->len, hash);
300 if (!dir) {
301 FLEX_ALLOC_MEM(dir, name, prefix->buf, prefix->len);
302 hashmap_entry_init(&dir->ent, hash);
303 dir->namelen = prefix->len;
304 dir->parent = parent;
305 hashmap_add(&istate->dir_hash, &dir->ent);
307 if (parent) {
308 unlock_dir_mutex(lock_nr);
310 /* All I really need here is an InterlockedIncrement(&(parent->nr)) */
311 lock_nr = compute_dir_lock_nr(&istate->dir_hash, parent->ent.hash);
312 lock_dir_mutex(lock_nr);
313 parent->nr++;
317 unlock_dir_mutex(lock_nr);
319 return dir;
323 * handle_range_1() and handle_range_dir() are derived from
324 * clear_ce_flags_1() and clear_ce_flags_dir() in unpack-trees.c
325 * and handle the iteration over the entire array of index entries.
326 * They use recursion for adjacent entries in the same parent
327 * directory.
329 static int handle_range_1(
330 struct index_state *istate,
331 int k_start,
332 int k_end,
333 struct dir_entry *parent,
334 struct strbuf *prefix,
335 struct lazy_entry *lazy_entries);
337 static int handle_range_dir(
338 struct index_state *istate,
339 int k_start,
340 int k_end,
341 struct dir_entry *parent,
342 struct strbuf *prefix,
343 struct lazy_entry *lazy_entries,
344 struct dir_entry **dir_new_out)
346 int rc, k;
347 int input_prefix_len = prefix->len;
348 struct dir_entry *dir_new;
350 dir_new = hash_dir_entry_with_parent_and_prefix(istate, parent, prefix);
352 strbuf_addch(prefix, '/');
355 * Scan forward in the index array for index entries having the same
356 * path prefix (that are also in this directory).
358 if (k_start + 1 >= k_end)
359 k = k_end;
360 else if (strncmp(istate->cache[k_start + 1]->name, prefix->buf, prefix->len) > 0)
361 k = k_start + 1;
362 else if (strncmp(istate->cache[k_end - 1]->name, prefix->buf, prefix->len) == 0)
363 k = k_end;
364 else {
365 int begin = k_start;
366 int end = k_end;
367 assert(begin >= 0);
368 while (begin < end) {
369 int mid = begin + ((end - begin) >> 1);
370 int cmp = strncmp(istate->cache[mid]->name, prefix->buf, prefix->len);
371 if (cmp == 0) /* mid has same prefix; look in second part */
372 begin = mid + 1;
373 else if (cmp > 0) /* mid is past group; look in first part */
374 end = mid;
375 else
376 die("cache entry out of order");
378 k = begin;
382 * Recurse and process what we can of this subset [k_start, k).
384 rc = handle_range_1(istate, k_start, k, dir_new, prefix, lazy_entries);
386 strbuf_setlen(prefix, input_prefix_len);
388 *dir_new_out = dir_new;
389 return rc;
392 static int handle_range_1(
393 struct index_state *istate,
394 int k_start,
395 int k_end,
396 struct dir_entry *parent,
397 struct strbuf *prefix,
398 struct lazy_entry *lazy_entries)
400 int input_prefix_len = prefix->len;
401 int k = k_start;
403 while (k < k_end) {
404 struct cache_entry *ce_k = istate->cache[k];
405 const char *name, *slash;
407 if (prefix->len && strncmp(ce_k->name, prefix->buf, prefix->len))
408 break;
410 name = ce_k->name + prefix->len;
411 slash = strchr(name, '/');
413 if (slash) {
414 int len = slash - name;
415 int processed;
416 struct dir_entry *dir_new;
418 strbuf_add(prefix, name, len);
419 processed = handle_range_dir(istate, k, k_end, parent, prefix, lazy_entries, &dir_new);
420 if (processed) {
421 k += processed;
422 strbuf_setlen(prefix, input_prefix_len);
423 continue;
426 strbuf_addch(prefix, '/');
427 processed = handle_range_1(istate, k, k_end, dir_new, prefix, lazy_entries);
428 k += processed;
429 strbuf_setlen(prefix, input_prefix_len);
430 continue;
434 * It is too expensive to take a lock to insert "ce_k"
435 * into "istate->name_hash" and increment the ref-count
436 * on the "parent" dir. So we defer actually updating
437 * permanent data structures until phase 2 (where we
438 * can change the locking requirements) and simply
439 * accumulate our current results into the lazy_entries
440 * data array).
442 * We do not need to lock the lazy_entries array because
443 * we have exclusive access to the cells in the range
444 * [k_start,k_end) that this thread was given.
446 lazy_entries[k].dir = parent;
447 if (parent) {
448 lazy_entries[k].hash_name = memihash_cont(
449 parent->ent.hash,
450 ce_k->name + parent->namelen,
451 ce_namelen(ce_k) - parent->namelen);
452 lazy_entries[k].hash_dir = parent->ent.hash;
453 } else {
454 lazy_entries[k].hash_name = memihash(ce_k->name, ce_namelen(ce_k));
457 k++;
460 return k - k_start;
463 struct lazy_dir_thread_data {
464 pthread_t pthread;
465 struct index_state *istate;
466 struct lazy_entry *lazy_entries;
467 int k_start;
468 int k_end;
471 static void *lazy_dir_thread_proc(void *_data)
473 struct lazy_dir_thread_data *d = _data;
474 struct strbuf prefix = STRBUF_INIT;
475 handle_range_1(d->istate, d->k_start, d->k_end, NULL, &prefix, d->lazy_entries);
476 strbuf_release(&prefix);
477 return NULL;
480 struct lazy_name_thread_data {
481 pthread_t pthread;
482 struct index_state *istate;
483 struct lazy_entry *lazy_entries;
486 static void *lazy_name_thread_proc(void *_data)
488 struct lazy_name_thread_data *d = _data;
489 int k;
491 for (k = 0; k < d->istate->cache_nr; k++) {
492 struct cache_entry *ce_k = d->istate->cache[k];
493 ce_k->ce_flags |= CE_HASHED;
494 hashmap_entry_init(&ce_k->ent, d->lazy_entries[k].hash_name);
495 hashmap_add(&d->istate->name_hash, &ce_k->ent);
498 return NULL;
501 static inline void lazy_update_dir_ref_counts(
502 struct index_state *istate,
503 struct lazy_entry *lazy_entries)
505 int k;
507 for (k = 0; k < istate->cache_nr; k++) {
508 if (lazy_entries[k].dir)
509 lazy_entries[k].dir->nr++;
513 static void threaded_lazy_init_name_hash(
514 struct index_state *istate)
516 int err;
517 int nr_each;
518 int k_start;
519 int t;
520 struct lazy_entry *lazy_entries;
521 struct lazy_dir_thread_data *td_dir;
522 struct lazy_name_thread_data *td_name;
524 if (!HAVE_THREADS)
525 return;
527 k_start = 0;
528 nr_each = DIV_ROUND_UP(istate->cache_nr, lazy_nr_dir_threads);
530 CALLOC_ARRAY(lazy_entries, istate->cache_nr);
531 CALLOC_ARRAY(td_dir, lazy_nr_dir_threads);
532 CALLOC_ARRAY(td_name, 1);
534 init_dir_mutex();
537 * Phase 1:
538 * Build "istate->dir_hash" using n "dir" threads (and a read-only index).
540 for (t = 0; t < lazy_nr_dir_threads; t++) {
541 struct lazy_dir_thread_data *td_dir_t = td_dir + t;
542 td_dir_t->istate = istate;
543 td_dir_t->lazy_entries = lazy_entries;
544 td_dir_t->k_start = k_start;
545 k_start += nr_each;
546 if (k_start > istate->cache_nr)
547 k_start = istate->cache_nr;
548 td_dir_t->k_end = k_start;
549 err = pthread_create(&td_dir_t->pthread, NULL, lazy_dir_thread_proc, td_dir_t);
550 if (err)
551 die(_("unable to create lazy_dir thread: %s"), strerror(err));
553 for (t = 0; t < lazy_nr_dir_threads; t++) {
554 struct lazy_dir_thread_data *td_dir_t = td_dir + t;
555 if (pthread_join(td_dir_t->pthread, NULL))
556 die("unable to join lazy_dir_thread");
560 * Phase 2:
561 * Iterate over all index entries and add them to the "istate->name_hash"
562 * using a single "name" background thread.
563 * (Testing showed it wasn't worth running more than 1 thread for this.)
565 * Meanwhile, finish updating the parent directory ref-counts for each
566 * index entry using the current thread. (This step is very fast and
567 * doesn't need threading.)
569 td_name->istate = istate;
570 td_name->lazy_entries = lazy_entries;
571 err = pthread_create(&td_name->pthread, NULL, lazy_name_thread_proc, td_name);
572 if (err)
573 die(_("unable to create lazy_name thread: %s"), strerror(err));
575 lazy_update_dir_ref_counts(istate, lazy_entries);
577 err = pthread_join(td_name->pthread, NULL);
578 if (err)
579 die(_("unable to join lazy_name thread: %s"), strerror(err));
581 cleanup_dir_mutex();
583 free(td_name);
584 free(td_dir);
585 free(lazy_entries);
588 static void lazy_init_name_hash(struct index_state *istate)
591 if (istate->name_hash_initialized)
592 return;
593 trace_performance_enter();
594 trace2_region_enter("index", "name-hash-init", istate->repo);
595 hashmap_init(&istate->name_hash, cache_entry_cmp, NULL, istate->cache_nr);
596 hashmap_init(&istate->dir_hash, dir_entry_cmp, NULL, istate->cache_nr);
598 if (lookup_lazy_params(istate)) {
600 * Disable item counting and automatic rehashing because
601 * we do per-chain (mod n) locking rather than whole hashmap
602 * locking and we need to prevent the table-size from changing
603 * and bucket items from being redistributed.
605 hashmap_disable_item_counting(&istate->dir_hash);
606 threaded_lazy_init_name_hash(istate);
607 hashmap_enable_item_counting(&istate->dir_hash);
608 } else {
609 int nr;
610 for (nr = 0; nr < istate->cache_nr; nr++)
611 hash_index_entry(istate, istate->cache[nr]);
614 istate->name_hash_initialized = 1;
615 trace2_region_leave("index", "name-hash-init", istate->repo);
616 trace_performance_leave("initialize name hash");
620 * A test routine for t/helper/ sources.
622 * Returns the number of threads used or 0 when
623 * the non-threaded code path was used.
625 * Requesting threading WILL NOT override guards
626 * in lookup_lazy_params().
628 int test_lazy_init_name_hash(struct index_state *istate, int try_threaded)
630 lazy_nr_dir_threads = 0;
631 lazy_try_threaded = try_threaded;
633 lazy_init_name_hash(istate);
635 return lazy_nr_dir_threads;
638 void add_name_hash(struct index_state *istate, struct cache_entry *ce)
640 if (istate->name_hash_initialized)
641 hash_index_entry(istate, ce);
644 void remove_name_hash(struct index_state *istate, struct cache_entry *ce)
646 if (!istate->name_hash_initialized || !(ce->ce_flags & CE_HASHED))
647 return;
648 ce->ce_flags &= ~CE_HASHED;
649 hashmap_remove(&istate->name_hash, &ce->ent, ce);
651 if (ignore_case)
652 remove_dir_entry(istate, ce);
655 static int slow_same_name(const char *name1, int len1, const char *name2, int len2)
657 if (len1 != len2)
658 return 0;
660 while (len1) {
661 unsigned char c1 = *name1++;
662 unsigned char c2 = *name2++;
663 len1--;
664 if (c1 != c2) {
665 c1 = toupper(c1);
666 c2 = toupper(c2);
667 if (c1 != c2)
668 return 0;
671 return 1;
674 static int same_name(const struct cache_entry *ce, const char *name, int namelen, int icase)
676 int len = ce_namelen(ce);
679 * Always do exact compare, even if we want a case-ignoring comparison;
680 * we do the quick exact one first, because it will be the common case.
682 if (len == namelen && !memcmp(name, ce->name, len))
683 return 1;
685 if (!icase)
686 return 0;
688 return slow_same_name(name, namelen, ce->name, len);
691 int index_dir_find(struct index_state *istate, const char *name, int namelen,
692 struct strbuf *canonical_path)
694 struct dir_entry *dir;
696 lazy_init_name_hash(istate);
697 expand_to_path(istate, name, namelen, 0);
698 dir = find_dir_entry(istate, name, namelen);
700 if (canonical_path && dir && dir->nr) {
701 strbuf_reset(canonical_path);
702 strbuf_add(canonical_path, dir->name, dir->namelen);
705 return dir && dir->nr;
708 void adjust_dirname_case(struct index_state *istate, char *name)
710 const char *startPtr = name;
711 const char *ptr = startPtr;
713 lazy_init_name_hash(istate);
714 expand_to_path(istate, name, strlen(name), 0);
715 while (*ptr) {
716 while (*ptr && *ptr != '/')
717 ptr++;
719 if (*ptr == '/') {
720 struct dir_entry *dir;
722 dir = find_dir_entry(istate, name, ptr - name);
723 if (dir) {
724 memcpy((void *)startPtr, dir->name + (startPtr - name), ptr - startPtr);
725 startPtr = ptr + 1;
727 ptr++;
732 struct cache_entry *index_file_exists(struct index_state *istate, const char *name, int namelen, int icase)
734 struct cache_entry *ce;
735 unsigned int hash = memihash(name, namelen);
737 lazy_init_name_hash(istate);
738 expand_to_path(istate, name, namelen, icase);
740 ce = hashmap_get_entry_from_hash(&istate->name_hash, hash, NULL,
741 struct cache_entry, ent);
742 hashmap_for_each_entry_from(&istate->name_hash, ce, ent) {
743 if (same_name(ce, name, namelen, icase))
744 return ce;
746 return NULL;
749 void free_name_hash(struct index_state *istate)
751 if (!istate->name_hash_initialized)
752 return;
753 istate->name_hash_initialized = 0;
755 hashmap_clear(&istate->name_hash);
756 hashmap_clear_and_free(&istate->dir_hash, struct dir_entry, ent);