sparse-checkout: consolidate cleanup when writing patterns
[git/gitster.git] / parallel-checkout.c
blob01736f1352a5dc58886dfd3cb05a46d2a32e829d
1 #define USE_THE_REPOSITORY_VARIABLE
3 #include "git-compat-util.h"
4 #include "config.h"
5 #include "entry.h"
6 #include "gettext.h"
7 #include "hash.h"
8 #include "hex.h"
9 #include "parallel-checkout.h"
10 #include "pkt-line.h"
11 #include "progress.h"
12 #include "read-cache-ll.h"
13 #include "run-command.h"
14 #include "sigchain.h"
15 #include "streaming.h"
16 #include "symlinks.h"
17 #include "thread-utils.h"
18 #include "trace2.h"
20 struct pc_worker {
21 struct child_process cp;
22 size_t next_item_to_complete, nr_items_to_complete;
25 struct parallel_checkout {
26 enum pc_status status;
27 struct parallel_checkout_item *items; /* The parallel checkout queue. */
28 size_t nr, alloc;
29 struct progress *progress;
30 unsigned int *progress_cnt;
33 static struct parallel_checkout parallel_checkout;
35 enum pc_status parallel_checkout_status(void)
37 return parallel_checkout.status;
40 static const int DEFAULT_THRESHOLD_FOR_PARALLELISM = 100;
41 static const int DEFAULT_NUM_WORKERS = 1;
43 void get_parallel_checkout_configs(int *num_workers, int *threshold)
45 char *env_workers = getenv("GIT_TEST_CHECKOUT_WORKERS");
47 if (env_workers && *env_workers) {
48 if (strtol_i(env_workers, 10, num_workers)) {
49 die(_("invalid value for '%s': '%s'"),
50 "GIT_TEST_CHECKOUT_WORKERS", env_workers);
52 if (*num_workers < 1)
53 *num_workers = online_cpus();
55 *threshold = 0;
56 return;
59 if (git_config_get_int("checkout.workers", num_workers))
60 *num_workers = DEFAULT_NUM_WORKERS;
61 else if (*num_workers < 1)
62 *num_workers = online_cpus();
64 if (git_config_get_int("checkout.thresholdForParallelism", threshold))
65 *threshold = DEFAULT_THRESHOLD_FOR_PARALLELISM;
68 void init_parallel_checkout(void)
70 if (parallel_checkout.status != PC_UNINITIALIZED)
71 BUG("parallel checkout already initialized");
73 parallel_checkout.status = PC_ACCEPTING_ENTRIES;
76 static void finish_parallel_checkout(void)
78 if (parallel_checkout.status == PC_UNINITIALIZED)
79 BUG("cannot finish parallel checkout: not initialized yet");
81 free(parallel_checkout.items);
82 memset(&parallel_checkout, 0, sizeof(parallel_checkout));
85 static int is_eligible_for_parallel_checkout(const struct cache_entry *ce,
86 const struct conv_attrs *ca)
88 enum conv_attrs_classification c;
89 size_t packed_item_size;
92 * Symlinks cannot be checked out in parallel as, in case of path
93 * collision, they could racily replace leading directories of other
94 * entries being checked out. Submodules are checked out in child
95 * processes, which have their own parallel checkout queues.
97 if (!S_ISREG(ce->ce_mode))
98 return 0;
100 packed_item_size = sizeof(struct pc_item_fixed_portion) + ce->ce_namelen +
101 (ca->working_tree_encoding ? strlen(ca->working_tree_encoding) : 0);
104 * The amount of data we send to the workers per checkout item is
105 * typically small (75~300B). So unless we find an insanely huge path
106 * of 64KB, we should never reach the 65KB limit of one pkt-line. If
107 * that does happen, we let the sequential code handle the item.
109 if (packed_item_size > LARGE_PACKET_DATA_MAX)
110 return 0;
112 c = classify_conv_attrs(ca);
113 switch (c) {
114 case CA_CLASS_INCORE:
115 return 1;
117 case CA_CLASS_INCORE_FILTER:
119 * It would be safe to allow concurrent instances of
120 * single-file smudge filters, like rot13, but we should not
121 * assume that all filters are parallel-process safe. So we
122 * don't allow this.
124 return 0;
126 case CA_CLASS_INCORE_PROCESS:
128 * The parallel queue and the delayed queue are not compatible,
129 * so they must be kept completely separated. And we can't tell
130 * if a long-running process will delay its response without
131 * actually asking it to perform the filtering. Therefore, this
132 * type of filter is not allowed in parallel checkout.
134 * Furthermore, there should only be one instance of the
135 * long-running process filter as we don't know how it is
136 * managing its own concurrency. So, spreading the entries that
137 * requisite such a filter among the parallel workers would
138 * require a lot more inter-process communication. We would
139 * probably have to designate a single process to interact with
140 * the filter and send all the necessary data to it, for each
141 * entry.
143 return 0;
145 case CA_CLASS_STREAMABLE:
146 return 1;
148 default:
149 BUG("unsupported conv_attrs classification '%d'", c);
153 int enqueue_checkout(struct cache_entry *ce, struct conv_attrs *ca,
154 int *checkout_counter)
156 struct parallel_checkout_item *pc_item;
158 if (parallel_checkout.status != PC_ACCEPTING_ENTRIES ||
159 !is_eligible_for_parallel_checkout(ce, ca))
160 return -1;
162 ALLOC_GROW(parallel_checkout.items, parallel_checkout.nr + 1,
163 parallel_checkout.alloc);
165 pc_item = &parallel_checkout.items[parallel_checkout.nr];
166 pc_item->ce = ce;
167 memcpy(&pc_item->ca, ca, sizeof(pc_item->ca));
168 pc_item->status = PC_ITEM_PENDING;
169 pc_item->id = parallel_checkout.nr;
170 pc_item->checkout_counter = checkout_counter;
171 parallel_checkout.nr++;
173 return 0;
176 size_t pc_queue_size(void)
178 return parallel_checkout.nr;
181 static void advance_progress_meter(void)
183 if (parallel_checkout.progress) {
184 (*parallel_checkout.progress_cnt)++;
185 display_progress(parallel_checkout.progress,
186 *parallel_checkout.progress_cnt);
190 static int handle_results(struct checkout *state)
192 int ret = 0;
193 size_t i;
194 int have_pending = 0;
197 * We first update the successfully written entries with the collected
198 * stat() data, so that they can be found by mark_colliding_entries(),
199 * in the next loop, when necessary.
201 for (i = 0; i < parallel_checkout.nr; i++) {
202 struct parallel_checkout_item *pc_item = &parallel_checkout.items[i];
203 if (pc_item->status == PC_ITEM_WRITTEN)
204 update_ce_after_write(state, pc_item->ce, &pc_item->st);
207 for (i = 0; i < parallel_checkout.nr; i++) {
208 struct parallel_checkout_item *pc_item = &parallel_checkout.items[i];
210 switch(pc_item->status) {
211 case PC_ITEM_WRITTEN:
212 if (pc_item->checkout_counter)
213 (*pc_item->checkout_counter)++;
214 break;
215 case PC_ITEM_COLLIDED:
217 * The entry could not be checked out due to a path
218 * collision with another entry. Since there can only
219 * be one entry of each colliding group on the disk, we
220 * could skip trying to check out this one and move on.
221 * However, this would leave the unwritten entries with
222 * null stat() fields on the index, which could
223 * potentially slow down subsequent operations that
224 * require refreshing it: git would not be able to
225 * trust st_size and would have to go to the filesystem
226 * to see if the contents match (see ie_modified()).
228 * Instead, let's pay the overhead only once, now, and
229 * call checkout_entry_ca() again for this file, to
230 * have its stat() data stored in the index. This also
231 * has the benefit of adding this entry and its
232 * colliding pair to the collision report message.
233 * Additionally, this overwriting behavior is consistent
234 * with what the sequential checkout does, so it doesn't
235 * add any extra overhead.
237 ret |= checkout_entry_ca(pc_item->ce, &pc_item->ca,
238 state, NULL,
239 pc_item->checkout_counter);
240 advance_progress_meter();
241 break;
242 case PC_ITEM_PENDING:
243 have_pending = 1;
244 /* fall through */
245 case PC_ITEM_FAILED:
246 ret = -1;
247 break;
248 default:
249 BUG("unknown checkout item status in parallel checkout");
253 if (have_pending)
254 error("parallel checkout finished with pending entries");
256 return ret;
259 static int reset_fd(int fd, const char *path)
261 if (lseek(fd, 0, SEEK_SET) != 0)
262 return error_errno("failed to rewind descriptor of '%s'", path);
263 if (ftruncate(fd, 0))
264 return error_errno("failed to truncate file '%s'", path);
265 return 0;
268 static int write_pc_item_to_fd(struct parallel_checkout_item *pc_item, int fd,
269 const char *path)
271 int ret;
272 struct stream_filter *filter;
273 struct strbuf buf = STRBUF_INIT;
274 char *blob;
275 size_t size;
276 ssize_t wrote;
278 /* Sanity check */
279 assert(is_eligible_for_parallel_checkout(pc_item->ce, &pc_item->ca));
281 filter = get_stream_filter_ca(&pc_item->ca, &pc_item->ce->oid);
282 if (filter) {
283 if (stream_blob_to_fd(fd, &pc_item->ce->oid, filter, 1)) {
284 /* On error, reset fd to try writing without streaming */
285 if (reset_fd(fd, path))
286 return -1;
287 } else {
288 return 0;
292 blob = read_blob_entry(pc_item->ce, &size);
293 if (!blob)
294 return error("cannot read object %s '%s'",
295 oid_to_hex(&pc_item->ce->oid), pc_item->ce->name);
298 * checkout metadata is used to give context for external process
299 * filters. Files requiring such filters are not eligible for parallel
300 * checkout, so pass NULL. Note: if that changes, the metadata must also
301 * be passed from the main process to the workers.
303 ret = convert_to_working_tree_ca(&pc_item->ca, pc_item->ce->name,
304 blob, size, &buf, NULL);
306 if (ret) {
307 size_t newsize;
308 free(blob);
309 blob = strbuf_detach(&buf, &newsize);
310 size = newsize;
313 wrote = write_in_full(fd, blob, size);
314 free(blob);
315 if (wrote < 0)
316 return error("unable to write file '%s'", path);
318 return 0;
321 static int close_and_clear(int *fd)
323 int ret = 0;
325 if (*fd >= 0) {
326 ret = close(*fd);
327 *fd = -1;
330 return ret;
333 void write_pc_item(struct parallel_checkout_item *pc_item,
334 struct checkout *state)
336 unsigned int mode = (pc_item->ce->ce_mode & 0100) ? 0777 : 0666;
337 int fd = -1, fstat_done = 0;
338 struct strbuf path = STRBUF_INIT;
339 const char *dir_sep;
341 strbuf_add(&path, state->base_dir, state->base_dir_len);
342 strbuf_add(&path, pc_item->ce->name, pc_item->ce->ce_namelen);
344 dir_sep = find_last_dir_sep(path.buf);
347 * The leading dirs should have been already created by now. But, in
348 * case of path collisions, one of the dirs could have been replaced by
349 * a symlink (checked out after we enqueued this entry for parallel
350 * checkout). Thus, we must check the leading dirs again.
352 if (dir_sep && !has_dirs_only_path(path.buf, dir_sep - path.buf,
353 state->base_dir_len)) {
354 pc_item->status = PC_ITEM_COLLIDED;
355 trace2_data_string("pcheckout", NULL, "collision/dirname", path.buf);
356 goto out;
359 fd = open(path.buf, O_WRONLY | O_CREAT | O_EXCL, mode);
361 if (fd < 0) {
362 if (errno == EEXIST || errno == EISDIR) {
364 * Errors which probably represent a path collision.
365 * Suppress the error message and mark the item to be
366 * retried later, sequentially. ENOTDIR and ENOENT are
367 * also interesting, but the above has_dirs_only_path()
368 * call should have already caught these cases.
370 pc_item->status = PC_ITEM_COLLIDED;
371 trace2_data_string("pcheckout", NULL,
372 "collision/basename", path.buf);
373 } else {
374 error_errno("failed to open file '%s'", path.buf);
375 pc_item->status = PC_ITEM_FAILED;
377 goto out;
380 if (write_pc_item_to_fd(pc_item, fd, path.buf)) {
381 /* Error was already reported. */
382 pc_item->status = PC_ITEM_FAILED;
383 close_and_clear(&fd);
384 unlink(path.buf);
385 goto out;
388 fstat_done = fstat_checkout_output(fd, state, &pc_item->st);
390 if (close_and_clear(&fd)) {
391 error_errno("unable to close file '%s'", path.buf);
392 pc_item->status = PC_ITEM_FAILED;
393 goto out;
396 if (state->refresh_cache && !fstat_done && lstat(path.buf, &pc_item->st) < 0) {
397 error_errno("unable to stat just-written file '%s'", path.buf);
398 pc_item->status = PC_ITEM_FAILED;
399 goto out;
402 pc_item->status = PC_ITEM_WRITTEN;
404 out:
405 strbuf_release(&path);
408 static void send_one_item(int fd, struct parallel_checkout_item *pc_item)
410 size_t len_data;
411 char *data, *variant;
412 struct pc_item_fixed_portion *fixed_portion;
413 const char *working_tree_encoding = pc_item->ca.working_tree_encoding;
414 size_t name_len = pc_item->ce->ce_namelen;
415 size_t working_tree_encoding_len = working_tree_encoding ?
416 strlen(working_tree_encoding) : 0;
419 * Any changes in the calculation of the message size must also be made
420 * in is_eligible_for_parallel_checkout().
422 len_data = sizeof(struct pc_item_fixed_portion) + name_len +
423 working_tree_encoding_len;
425 data = xmalloc(len_data);
427 fixed_portion = (struct pc_item_fixed_portion *)data;
428 fixed_portion->id = pc_item->id;
429 fixed_portion->ce_mode = pc_item->ce->ce_mode;
430 fixed_portion->crlf_action = pc_item->ca.crlf_action;
431 fixed_portion->ident = pc_item->ca.ident;
432 fixed_portion->name_len = name_len;
433 fixed_portion->working_tree_encoding_len = working_tree_encoding_len;
434 oidcpy(&fixed_portion->oid, &pc_item->ce->oid);
436 variant = data + sizeof(*fixed_portion);
437 if (working_tree_encoding_len) {
438 memcpy(variant, working_tree_encoding, working_tree_encoding_len);
439 variant += working_tree_encoding_len;
441 memcpy(variant, pc_item->ce->name, name_len);
443 packet_write(fd, data, len_data);
445 free(data);
448 static void send_batch(int fd, size_t start, size_t nr)
450 size_t i;
451 sigchain_push(SIGPIPE, SIG_IGN);
452 for (i = 0; i < nr; i++)
453 send_one_item(fd, &parallel_checkout.items[start + i]);
454 packet_flush(fd);
455 sigchain_pop(SIGPIPE);
458 static struct pc_worker *setup_workers(struct checkout *state, int num_workers)
460 struct pc_worker *workers;
461 int i, workers_with_one_extra_item;
462 size_t base_batch_size, batch_beginning = 0;
464 ALLOC_ARRAY(workers, num_workers);
466 for (i = 0; i < num_workers; i++) {
467 struct child_process *cp = &workers[i].cp;
469 child_process_init(cp);
470 cp->git_cmd = 1;
471 cp->in = -1;
472 cp->out = -1;
473 cp->clean_on_exit = 1;
474 strvec_push(&cp->args, "checkout--worker");
475 if (state->base_dir_len)
476 strvec_pushf(&cp->args, "--prefix=%s", state->base_dir);
477 if (start_command(cp))
478 die("failed to spawn checkout worker");
481 base_batch_size = parallel_checkout.nr / num_workers;
482 workers_with_one_extra_item = parallel_checkout.nr % num_workers;
484 for (i = 0; i < num_workers; i++) {
485 struct pc_worker *worker = &workers[i];
486 size_t batch_size = base_batch_size;
488 /* distribute the extra work evenly */
489 if (i < workers_with_one_extra_item)
490 batch_size++;
492 send_batch(worker->cp.in, batch_beginning, batch_size);
493 worker->next_item_to_complete = batch_beginning;
494 worker->nr_items_to_complete = batch_size;
496 batch_beginning += batch_size;
499 return workers;
502 static void finish_workers(struct pc_worker *workers, int num_workers)
504 int i;
507 * Close pipes before calling finish_command() to let the workers
508 * exit asynchronously and avoid spending extra time on wait().
510 for (i = 0; i < num_workers; i++) {
511 struct child_process *cp = &workers[i].cp;
512 if (cp->in >= 0)
513 close(cp->in);
514 if (cp->out >= 0)
515 close(cp->out);
518 for (i = 0; i < num_workers; i++) {
519 int rc = finish_command(&workers[i].cp);
520 if (rc > 128) {
522 * For a normal non-zero exit, the worker should have
523 * already printed something useful to stderr. But a
524 * death by signal should be mentioned to the user.
526 error("checkout worker %d died of signal %d", i, rc - 128);
530 free(workers);
533 static inline void assert_pc_item_result_size(int got, int exp)
535 if (got != exp)
536 BUG("wrong result size from checkout worker (got %dB, exp %dB)",
537 got, exp);
540 static void parse_and_save_result(const char *buffer, int len,
541 struct pc_worker *worker)
543 struct pc_item_result *res;
544 struct parallel_checkout_item *pc_item;
545 struct stat *st = NULL;
547 if (len < PC_ITEM_RESULT_BASE_SIZE)
548 BUG("too short result from checkout worker (got %dB, exp >=%dB)",
549 len, (int)PC_ITEM_RESULT_BASE_SIZE);
551 res = (struct pc_item_result *)buffer;
554 * Worker should send either the full result struct on success, or
555 * just the base (i.e. no stat data), otherwise.
557 if (res->status == PC_ITEM_WRITTEN) {
558 assert_pc_item_result_size(len, (int)sizeof(struct pc_item_result));
559 st = &res->st;
560 } else {
561 assert_pc_item_result_size(len, (int)PC_ITEM_RESULT_BASE_SIZE);
564 if (!worker->nr_items_to_complete)
565 BUG("received result from supposedly finished checkout worker");
566 if (res->id != worker->next_item_to_complete)
567 BUG("unexpected item id from checkout worker (got %"PRIuMAX", exp %"PRIuMAX")",
568 (uintmax_t)res->id, (uintmax_t)worker->next_item_to_complete);
570 worker->next_item_to_complete++;
571 worker->nr_items_to_complete--;
573 pc_item = &parallel_checkout.items[res->id];
574 pc_item->status = res->status;
575 if (st)
576 pc_item->st = *st;
578 if (res->status != PC_ITEM_COLLIDED)
579 advance_progress_meter();
582 static void gather_results_from_workers(struct pc_worker *workers,
583 int num_workers)
585 int i, active_workers = num_workers;
586 struct pollfd *pfds;
588 CALLOC_ARRAY(pfds, num_workers);
589 for (i = 0; i < num_workers; i++) {
590 pfds[i].fd = workers[i].cp.out;
591 pfds[i].events = POLLIN;
594 while (active_workers) {
595 int nr = poll(pfds, num_workers, -1);
597 if (nr < 0) {
598 if (errno == EINTR)
599 continue;
600 die_errno("failed to poll checkout workers");
603 for (i = 0; i < num_workers && nr > 0; i++) {
604 struct pc_worker *worker = &workers[i];
605 struct pollfd *pfd = &pfds[i];
607 if (!pfd->revents)
608 continue;
610 if (pfd->revents & POLLIN) {
611 int len = packet_read(pfd->fd, packet_buffer,
612 sizeof(packet_buffer), 0);
614 if (len < 0) {
615 BUG("packet_read() returned negative value");
616 } else if (!len) {
617 pfd->fd = -1;
618 active_workers--;
619 } else {
620 parse_and_save_result(packet_buffer,
621 len, worker);
623 } else if (pfd->revents & POLLHUP) {
624 pfd->fd = -1;
625 active_workers--;
626 } else if (pfd->revents & (POLLNVAL | POLLERR)) {
627 die("error polling from checkout worker");
630 nr--;
634 free(pfds);
637 static void write_items_sequentially(struct checkout *state)
639 size_t i;
641 for (i = 0; i < parallel_checkout.nr; i++) {
642 struct parallel_checkout_item *pc_item = &parallel_checkout.items[i];
643 write_pc_item(pc_item, state);
644 if (pc_item->status != PC_ITEM_COLLIDED)
645 advance_progress_meter();
649 int run_parallel_checkout(struct checkout *state, int num_workers, int threshold,
650 struct progress *progress, unsigned int *progress_cnt)
652 int ret;
654 if (parallel_checkout.status != PC_ACCEPTING_ENTRIES)
655 BUG("cannot run parallel checkout: uninitialized or already running");
657 parallel_checkout.status = PC_RUNNING;
658 parallel_checkout.progress = progress;
659 parallel_checkout.progress_cnt = progress_cnt;
661 if (parallel_checkout.nr < num_workers)
662 num_workers = parallel_checkout.nr;
664 if (num_workers <= 1 || parallel_checkout.nr < threshold) {
665 write_items_sequentially(state);
666 } else {
667 struct pc_worker *workers = setup_workers(state, num_workers);
668 gather_results_from_workers(workers, num_workers);
669 finish_workers(workers, num_workers);
672 ret = handle_results(state);
674 finish_parallel_checkout();
675 return ret;