]> git.ipfire.org Git - thirdparty/git.git/blob - parallel-checkout.c
Merge branch 'mt/t2080-cp-symlink-fix'
[thirdparty/git.git] / parallel-checkout.c
1 #include "cache.h"
2 #include "config.h"
3 #include "entry.h"
4 #include "parallel-checkout.h"
5 #include "pkt-line.h"
6 #include "progress.h"
7 #include "run-command.h"
8 #include "sigchain.h"
9 #include "streaming.h"
10 #include "thread-utils.h"
11 #include "trace2.h"
12
13 struct pc_worker {
14 struct child_process cp;
15 size_t next_item_to_complete, nr_items_to_complete;
16 };
17
18 struct parallel_checkout {
19 enum pc_status status;
20 struct parallel_checkout_item *items; /* The parallel checkout queue. */
21 size_t nr, alloc;
22 struct progress *progress;
23 unsigned int *progress_cnt;
24 };
25
26 static struct parallel_checkout parallel_checkout;
27
28 enum pc_status parallel_checkout_status(void)
29 {
30 return parallel_checkout.status;
31 }
32
33 static const int DEFAULT_THRESHOLD_FOR_PARALLELISM = 100;
34 static const int DEFAULT_NUM_WORKERS = 1;
35
36 void get_parallel_checkout_configs(int *num_workers, int *threshold)
37 {
38 char *env_workers = getenv("GIT_TEST_CHECKOUT_WORKERS");
39
40 if (env_workers && *env_workers) {
41 if (strtol_i(env_workers, 10, num_workers)) {
42 die("invalid value for GIT_TEST_CHECKOUT_WORKERS: '%s'",
43 env_workers);
44 }
45 if (*num_workers < 1)
46 *num_workers = online_cpus();
47
48 *threshold = 0;
49 return;
50 }
51
52 if (git_config_get_int("checkout.workers", num_workers))
53 *num_workers = DEFAULT_NUM_WORKERS;
54 else if (*num_workers < 1)
55 *num_workers = online_cpus();
56
57 if (git_config_get_int("checkout.thresholdForParallelism", threshold))
58 *threshold = DEFAULT_THRESHOLD_FOR_PARALLELISM;
59 }
60
61 void init_parallel_checkout(void)
62 {
63 if (parallel_checkout.status != PC_UNINITIALIZED)
64 BUG("parallel checkout already initialized");
65
66 parallel_checkout.status = PC_ACCEPTING_ENTRIES;
67 }
68
69 static void finish_parallel_checkout(void)
70 {
71 if (parallel_checkout.status == PC_UNINITIALIZED)
72 BUG("cannot finish parallel checkout: not initialized yet");
73
74 free(parallel_checkout.items);
75 memset(&parallel_checkout, 0, sizeof(parallel_checkout));
76 }
77
78 static int is_eligible_for_parallel_checkout(const struct cache_entry *ce,
79 const struct conv_attrs *ca)
80 {
81 enum conv_attrs_classification c;
82 size_t packed_item_size;
83
84 /*
85 * Symlinks cannot be checked out in parallel as, in case of path
86 * collision, they could racily replace leading directories of other
87 * entries being checked out. Submodules are checked out in child
88 * processes, which have their own parallel checkout queues.
89 */
90 if (!S_ISREG(ce->ce_mode))
91 return 0;
92
93 packed_item_size = sizeof(struct pc_item_fixed_portion) + ce->ce_namelen +
94 (ca->working_tree_encoding ? strlen(ca->working_tree_encoding) : 0);
95
96 /*
97 * The amount of data we send to the workers per checkout item is
98 * typically small (75~300B). So unless we find an insanely huge path
99 * of 64KB, we should never reach the 65KB limit of one pkt-line. If
100 * that does happen, we let the sequential code handle the item.
101 */
102 if (packed_item_size > LARGE_PACKET_DATA_MAX)
103 return 0;
104
105 c = classify_conv_attrs(ca);
106 switch (c) {
107 case CA_CLASS_INCORE:
108 return 1;
109
110 case CA_CLASS_INCORE_FILTER:
111 /*
112 * It would be safe to allow concurrent instances of
113 * single-file smudge filters, like rot13, but we should not
114 * assume that all filters are parallel-process safe. So we
115 * don't allow this.
116 */
117 return 0;
118
119 case CA_CLASS_INCORE_PROCESS:
120 /*
121 * The parallel queue and the delayed queue are not compatible,
122 * so they must be kept completely separated. And we can't tell
123 * if a long-running process will delay its response without
124 * actually asking it to perform the filtering. Therefore, this
125 * type of filter is not allowed in parallel checkout.
126 *
127 * Furthermore, there should only be one instance of the
128 * long-running process filter as we don't know how it is
129 * managing its own concurrency. So, spreading the entries that
130 * requisite such a filter among the parallel workers would
131 * require a lot more inter-process communication. We would
132 * probably have to designate a single process to interact with
133 * the filter and send all the necessary data to it, for each
134 * entry.
135 */
136 return 0;
137
138 case CA_CLASS_STREAMABLE:
139 return 1;
140
141 default:
142 BUG("unsupported conv_attrs classification '%d'", c);
143 }
144 }
145
146 int enqueue_checkout(struct cache_entry *ce, struct conv_attrs *ca)
147 {
148 struct parallel_checkout_item *pc_item;
149
150 if (parallel_checkout.status != PC_ACCEPTING_ENTRIES ||
151 !is_eligible_for_parallel_checkout(ce, ca))
152 return -1;
153
154 ALLOC_GROW(parallel_checkout.items, parallel_checkout.nr + 1,
155 parallel_checkout.alloc);
156
157 pc_item = &parallel_checkout.items[parallel_checkout.nr];
158 pc_item->ce = ce;
159 memcpy(&pc_item->ca, ca, sizeof(pc_item->ca));
160 pc_item->status = PC_ITEM_PENDING;
161 pc_item->id = parallel_checkout.nr;
162 parallel_checkout.nr++;
163
164 return 0;
165 }
166
167 size_t pc_queue_size(void)
168 {
169 return parallel_checkout.nr;
170 }
171
172 static void advance_progress_meter(void)
173 {
174 if (parallel_checkout.progress) {
175 (*parallel_checkout.progress_cnt)++;
176 display_progress(parallel_checkout.progress,
177 *parallel_checkout.progress_cnt);
178 }
179 }
180
181 static int handle_results(struct checkout *state)
182 {
183 int ret = 0;
184 size_t i;
185 int have_pending = 0;
186
187 /*
188 * We first update the successfully written entries with the collected
189 * stat() data, so that they can be found by mark_colliding_entries(),
190 * in the next loop, when necessary.
191 */
192 for (i = 0; i < parallel_checkout.nr; i++) {
193 struct parallel_checkout_item *pc_item = &parallel_checkout.items[i];
194 if (pc_item->status == PC_ITEM_WRITTEN)
195 update_ce_after_write(state, pc_item->ce, &pc_item->st);
196 }
197
198 for (i = 0; i < parallel_checkout.nr; i++) {
199 struct parallel_checkout_item *pc_item = &parallel_checkout.items[i];
200
201 switch(pc_item->status) {
202 case PC_ITEM_WRITTEN:
203 /* Already handled */
204 break;
205 case PC_ITEM_COLLIDED:
206 /*
207 * The entry could not be checked out due to a path
208 * collision with another entry. Since there can only
209 * be one entry of each colliding group on the disk, we
210 * could skip trying to check out this one and move on.
211 * However, this would leave the unwritten entries with
212 * null stat() fields on the index, which could
213 * potentially slow down subsequent operations that
214 * require refreshing it: git would not be able to
215 * trust st_size and would have to go to the filesystem
216 * to see if the contents match (see ie_modified()).
217 *
218 * Instead, let's pay the overhead only once, now, and
219 * call checkout_entry_ca() again for this file, to
220 * have its stat() data stored in the index. This also
221 * has the benefit of adding this entry and its
222 * colliding pair to the collision report message.
223 * Additionally, this overwriting behavior is consistent
224 * with what the sequential checkout does, so it doesn't
225 * add any extra overhead.
226 */
227 ret |= checkout_entry_ca(pc_item->ce, &pc_item->ca,
228 state, NULL, NULL);
229 advance_progress_meter();
230 break;
231 case PC_ITEM_PENDING:
232 have_pending = 1;
233 /* fall through */
234 case PC_ITEM_FAILED:
235 ret = -1;
236 break;
237 default:
238 BUG("unknown checkout item status in parallel checkout");
239 }
240 }
241
242 if (have_pending)
243 error("parallel checkout finished with pending entries");
244
245 return ret;
246 }
247
248 static int reset_fd(int fd, const char *path)
249 {
250 if (lseek(fd, 0, SEEK_SET) != 0)
251 return error_errno("failed to rewind descriptor of '%s'", path);
252 if (ftruncate(fd, 0))
253 return error_errno("failed to truncate file '%s'", path);
254 return 0;
255 }
256
257 static int write_pc_item_to_fd(struct parallel_checkout_item *pc_item, int fd,
258 const char *path)
259 {
260 int ret;
261 struct stream_filter *filter;
262 struct strbuf buf = STRBUF_INIT;
263 char *blob;
264 unsigned long size;
265 ssize_t wrote;
266
267 /* Sanity check */
268 assert(is_eligible_for_parallel_checkout(pc_item->ce, &pc_item->ca));
269
270 filter = get_stream_filter_ca(&pc_item->ca, &pc_item->ce->oid);
271 if (filter) {
272 if (stream_blob_to_fd(fd, &pc_item->ce->oid, filter, 1)) {
273 /* On error, reset fd to try writing without streaming */
274 if (reset_fd(fd, path))
275 return -1;
276 } else {
277 return 0;
278 }
279 }
280
281 blob = read_blob_entry(pc_item->ce, &size);
282 if (!blob)
283 return error("cannot read object %s '%s'",
284 oid_to_hex(&pc_item->ce->oid), pc_item->ce->name);
285
286 /*
287 * checkout metadata is used to give context for external process
288 * filters. Files requiring such filters are not eligible for parallel
289 * checkout, so pass NULL. Note: if that changes, the metadata must also
290 * be passed from the main process to the workers.
291 */
292 ret = convert_to_working_tree_ca(&pc_item->ca, pc_item->ce->name,
293 blob, size, &buf, NULL);
294
295 if (ret) {
296 size_t newsize;
297 free(blob);
298 blob = strbuf_detach(&buf, &newsize);
299 size = newsize;
300 }
301
302 wrote = write_in_full(fd, blob, size);
303 free(blob);
304 if (wrote < 0)
305 return error("unable to write file '%s'", path);
306
307 return 0;
308 }
309
310 static int close_and_clear(int *fd)
311 {
312 int ret = 0;
313
314 if (*fd >= 0) {
315 ret = close(*fd);
316 *fd = -1;
317 }
318
319 return ret;
320 }
321
322 void write_pc_item(struct parallel_checkout_item *pc_item,
323 struct checkout *state)
324 {
325 unsigned int mode = (pc_item->ce->ce_mode & 0100) ? 0777 : 0666;
326 int fd = -1, fstat_done = 0;
327 struct strbuf path = STRBUF_INIT;
328 const char *dir_sep;
329
330 strbuf_add(&path, state->base_dir, state->base_dir_len);
331 strbuf_add(&path, pc_item->ce->name, pc_item->ce->ce_namelen);
332
333 dir_sep = find_last_dir_sep(path.buf);
334
335 /*
336 * The leading dirs should have been already created by now. But, in
337 * case of path collisions, one of the dirs could have been replaced by
338 * a symlink (checked out after we enqueued this entry for parallel
339 * checkout). Thus, we must check the leading dirs again.
340 */
341 if (dir_sep && !has_dirs_only_path(path.buf, dir_sep - path.buf,
342 state->base_dir_len)) {
343 pc_item->status = PC_ITEM_COLLIDED;
344 trace2_data_string("pcheckout", NULL, "collision/dirname", path.buf);
345 goto out;
346 }
347
348 fd = open(path.buf, O_WRONLY | O_CREAT | O_EXCL, mode);
349
350 if (fd < 0) {
351 if (errno == EEXIST || errno == EISDIR) {
352 /*
353 * Errors which probably represent a path collision.
354 * Suppress the error message and mark the item to be
355 * retried later, sequentially. ENOTDIR and ENOENT are
356 * also interesting, but the above has_dirs_only_path()
357 * call should have already caught these cases.
358 */
359 pc_item->status = PC_ITEM_COLLIDED;
360 trace2_data_string("pcheckout", NULL,
361 "collision/basename", path.buf);
362 } else {
363 error_errno("failed to open file '%s'", path.buf);
364 pc_item->status = PC_ITEM_FAILED;
365 }
366 goto out;
367 }
368
369 if (write_pc_item_to_fd(pc_item, fd, path.buf)) {
370 /* Error was already reported. */
371 pc_item->status = PC_ITEM_FAILED;
372 close_and_clear(&fd);
373 unlink(path.buf);
374 goto out;
375 }
376
377 fstat_done = fstat_checkout_output(fd, state, &pc_item->st);
378
379 if (close_and_clear(&fd)) {
380 error_errno("unable to close file '%s'", path.buf);
381 pc_item->status = PC_ITEM_FAILED;
382 goto out;
383 }
384
385 if (state->refresh_cache && !fstat_done && lstat(path.buf, &pc_item->st) < 0) {
386 error_errno("unable to stat just-written file '%s'", path.buf);
387 pc_item->status = PC_ITEM_FAILED;
388 goto out;
389 }
390
391 pc_item->status = PC_ITEM_WRITTEN;
392
393 out:
394 strbuf_release(&path);
395 }
396
397 static void send_one_item(int fd, struct parallel_checkout_item *pc_item)
398 {
399 size_t len_data;
400 char *data, *variant;
401 struct pc_item_fixed_portion *fixed_portion;
402 const char *working_tree_encoding = pc_item->ca.working_tree_encoding;
403 size_t name_len = pc_item->ce->ce_namelen;
404 size_t working_tree_encoding_len = working_tree_encoding ?
405 strlen(working_tree_encoding) : 0;
406
407 /*
408 * Any changes in the calculation of the message size must also be made
409 * in is_eligible_for_parallel_checkout().
410 */
411 len_data = sizeof(struct pc_item_fixed_portion) + name_len +
412 working_tree_encoding_len;
413
414 data = xcalloc(1, len_data);
415
416 fixed_portion = (struct pc_item_fixed_portion *)data;
417 fixed_portion->id = pc_item->id;
418 fixed_portion->ce_mode = pc_item->ce->ce_mode;
419 fixed_portion->crlf_action = pc_item->ca.crlf_action;
420 fixed_portion->ident = pc_item->ca.ident;
421 fixed_portion->name_len = name_len;
422 fixed_portion->working_tree_encoding_len = working_tree_encoding_len;
423 /*
424 * We use hashcpy() instead of oidcpy() because the hash[] positions
425 * after `the_hash_algo->rawsz` might not be initialized. And Valgrind
426 * would complain about passing uninitialized bytes to a syscall
427 * (write(2)). There is no real harm in this case, but the warning could
428 * hinder the detection of actual errors.
429 */
430 hashcpy(fixed_portion->oid.hash, pc_item->ce->oid.hash);
431
432 variant = data + sizeof(*fixed_portion);
433 if (working_tree_encoding_len) {
434 memcpy(variant, working_tree_encoding, working_tree_encoding_len);
435 variant += working_tree_encoding_len;
436 }
437 memcpy(variant, pc_item->ce->name, name_len);
438
439 packet_write(fd, data, len_data);
440
441 free(data);
442 }
443
444 static void send_batch(int fd, size_t start, size_t nr)
445 {
446 size_t i;
447 sigchain_push(SIGPIPE, SIG_IGN);
448 for (i = 0; i < nr; i++)
449 send_one_item(fd, &parallel_checkout.items[start + i]);
450 packet_flush(fd);
451 sigchain_pop(SIGPIPE);
452 }
453
454 static struct pc_worker *setup_workers(struct checkout *state, int num_workers)
455 {
456 struct pc_worker *workers;
457 int i, workers_with_one_extra_item;
458 size_t base_batch_size, batch_beginning = 0;
459
460 ALLOC_ARRAY(workers, num_workers);
461
462 for (i = 0; i < num_workers; i++) {
463 struct child_process *cp = &workers[i].cp;
464
465 child_process_init(cp);
466 cp->git_cmd = 1;
467 cp->in = -1;
468 cp->out = -1;
469 cp->clean_on_exit = 1;
470 strvec_push(&cp->args, "checkout--worker");
471 if (state->base_dir_len)
472 strvec_pushf(&cp->args, "--prefix=%s", state->base_dir);
473 if (start_command(cp))
474 die("failed to spawn checkout worker");
475 }
476
477 base_batch_size = parallel_checkout.nr / num_workers;
478 workers_with_one_extra_item = parallel_checkout.nr % num_workers;
479
480 for (i = 0; i < num_workers; i++) {
481 struct pc_worker *worker = &workers[i];
482 size_t batch_size = base_batch_size;
483
484 /* distribute the extra work evenly */
485 if (i < workers_with_one_extra_item)
486 batch_size++;
487
488 send_batch(worker->cp.in, batch_beginning, batch_size);
489 worker->next_item_to_complete = batch_beginning;
490 worker->nr_items_to_complete = batch_size;
491
492 batch_beginning += batch_size;
493 }
494
495 return workers;
496 }
497
498 static void finish_workers(struct pc_worker *workers, int num_workers)
499 {
500 int i;
501
502 /*
503 * Close pipes before calling finish_command() to let the workers
504 * exit asynchronously and avoid spending extra time on wait().
505 */
506 for (i = 0; i < num_workers; i++) {
507 struct child_process *cp = &workers[i].cp;
508 if (cp->in >= 0)
509 close(cp->in);
510 if (cp->out >= 0)
511 close(cp->out);
512 }
513
514 for (i = 0; i < num_workers; i++) {
515 int rc = finish_command(&workers[i].cp);
516 if (rc > 128) {
517 /*
518 * For a normal non-zero exit, the worker should have
519 * already printed something useful to stderr. But a
520 * death by signal should be mentioned to the user.
521 */
522 error("checkout worker %d died of signal %d", i, rc - 128);
523 }
524 }
525
526 free(workers);
527 }
528
529 static inline void assert_pc_item_result_size(int got, int exp)
530 {
531 if (got != exp)
532 BUG("wrong result size from checkout worker (got %dB, exp %dB)",
533 got, exp);
534 }
535
536 static void parse_and_save_result(const char *buffer, int len,
537 struct pc_worker *worker)
538 {
539 struct pc_item_result *res;
540 struct parallel_checkout_item *pc_item;
541 struct stat *st = NULL;
542
543 if (len < PC_ITEM_RESULT_BASE_SIZE)
544 BUG("too short result from checkout worker (got %dB, exp >=%dB)",
545 len, (int)PC_ITEM_RESULT_BASE_SIZE);
546
547 res = (struct pc_item_result *)buffer;
548
549 /*
550 * Worker should send either the full result struct on success, or
551 * just the base (i.e. no stat data), otherwise.
552 */
553 if (res->status == PC_ITEM_WRITTEN) {
554 assert_pc_item_result_size(len, (int)sizeof(struct pc_item_result));
555 st = &res->st;
556 } else {
557 assert_pc_item_result_size(len, (int)PC_ITEM_RESULT_BASE_SIZE);
558 }
559
560 if (!worker->nr_items_to_complete)
561 BUG("received result from supposedly finished checkout worker");
562 if (res->id != worker->next_item_to_complete)
563 BUG("unexpected item id from checkout worker (got %"PRIuMAX", exp %"PRIuMAX")",
564 (uintmax_t)res->id, (uintmax_t)worker->next_item_to_complete);
565
566 worker->next_item_to_complete++;
567 worker->nr_items_to_complete--;
568
569 pc_item = &parallel_checkout.items[res->id];
570 pc_item->status = res->status;
571 if (st)
572 pc_item->st = *st;
573
574 if (res->status != PC_ITEM_COLLIDED)
575 advance_progress_meter();
576 }
577
578 static void gather_results_from_workers(struct pc_worker *workers,
579 int num_workers)
580 {
581 int i, active_workers = num_workers;
582 struct pollfd *pfds;
583
584 CALLOC_ARRAY(pfds, num_workers);
585 for (i = 0; i < num_workers; i++) {
586 pfds[i].fd = workers[i].cp.out;
587 pfds[i].events = POLLIN;
588 }
589
590 while (active_workers) {
591 int nr = poll(pfds, num_workers, -1);
592
593 if (nr < 0) {
594 if (errno == EINTR)
595 continue;
596 die_errno("failed to poll checkout workers");
597 }
598
599 for (i = 0; i < num_workers && nr > 0; i++) {
600 struct pc_worker *worker = &workers[i];
601 struct pollfd *pfd = &pfds[i];
602
603 if (!pfd->revents)
604 continue;
605
606 if (pfd->revents & POLLIN) {
607 int len = packet_read(pfd->fd, NULL, NULL,
608 packet_buffer,
609 sizeof(packet_buffer), 0);
610
611 if (len < 0) {
612 BUG("packet_read() returned negative value");
613 } else if (!len) {
614 pfd->fd = -1;
615 active_workers--;
616 } else {
617 parse_and_save_result(packet_buffer,
618 len, worker);
619 }
620 } else if (pfd->revents & POLLHUP) {
621 pfd->fd = -1;
622 active_workers--;
623 } else if (pfd->revents & (POLLNVAL | POLLERR)) {
624 die("error polling from checkout worker");
625 }
626
627 nr--;
628 }
629 }
630
631 free(pfds);
632 }
633
634 static void write_items_sequentially(struct checkout *state)
635 {
636 size_t i;
637
638 for (i = 0; i < parallel_checkout.nr; i++) {
639 struct parallel_checkout_item *pc_item = &parallel_checkout.items[i];
640 write_pc_item(pc_item, state);
641 if (pc_item->status != PC_ITEM_COLLIDED)
642 advance_progress_meter();
643 }
644 }
645
646 int run_parallel_checkout(struct checkout *state, int num_workers, int threshold,
647 struct progress *progress, unsigned int *progress_cnt)
648 {
649 int ret;
650
651 if (parallel_checkout.status != PC_ACCEPTING_ENTRIES)
652 BUG("cannot run parallel checkout: uninitialized or already running");
653
654 parallel_checkout.status = PC_RUNNING;
655 parallel_checkout.progress = progress;
656 parallel_checkout.progress_cnt = progress_cnt;
657
658 if (parallel_checkout.nr < num_workers)
659 num_workers = parallel_checkout.nr;
660
661 if (num_workers <= 1 || parallel_checkout.nr < threshold) {
662 write_items_sequentially(state);
663 } else {
664 struct pc_worker *workers = setup_workers(state, num_workers);
665 gather_results_from_workers(workers, num_workers);
666 finish_workers(workers, num_workers);
667 }
668
669 ret = handle_results(state);
670
671 finish_parallel_checkout();
672 return ret;
673 }