]> git.ipfire.org Git - thirdparty/git.git/blame - builtin/fsmonitor--daemon.c
Merge branch 'jk/clone-allow-bare-and-o-together'
[thirdparty/git.git] / builtin / fsmonitor--daemon.c
CommitLineData
16d9d617
JH
1#include "builtin.h"
2#include "config.h"
3#include "parse-options.h"
4#include "fsmonitor.h"
5#include "fsmonitor-ipc.h"
d0605550 6#include "compat/fsmonitor/fsm-health.h"
9dcba0ba
JH
7#include "compat/fsmonitor/fsm-listen.h"
8#include "fsmonitor--daemon.h"
16d9d617
JH
9#include "simple-ipc.h"
10#include "khash.h"
518a522f 11#include "pkt-line.h"
16d9d617
JH
12
13static const char * const builtin_fsmonitor__daemon_usage[] = {
c284e27b 14 N_("git fsmonitor--daemon start [<options>]"),
9dcba0ba 15 N_("git fsmonitor--daemon run [<options>]"),
02cb8b9e
AH
16 "git fsmonitor--daemon stop",
17 "git fsmonitor--daemon status",
16d9d617
JH
18 NULL
19};
20
21#ifdef HAVE_FSMONITOR_DAEMON_BACKEND
9dcba0ba
JH
22/*
23 * Global state loaded from config.
24 */
25#define FSMONITOR__IPC_THREADS "fsmonitor.ipcthreads"
26static int fsmonitor__ipc_threads = 8;
27
c284e27b
JH
28#define FSMONITOR__START_TIMEOUT "fsmonitor.starttimeout"
29static int fsmonitor__start_timeout_sec = 60;
30
9dcba0ba
JH
31#define FSMONITOR__ANNOUNCE_STARTUP "fsmonitor.announcestartup"
32static int fsmonitor__announce_startup = 0;
33
34static int fsmonitor_config(const char *var, const char *value, void *cb)
35{
36 if (!strcmp(var, FSMONITOR__IPC_THREADS)) {
37 int i = git_config_int(var, value);
38 if (i < 1)
39 return error(_("value of '%s' out of range: %d"),
40 FSMONITOR__IPC_THREADS, i);
41 fsmonitor__ipc_threads = i;
42 return 0;
43 }
44
c284e27b
JH
45 if (!strcmp(var, FSMONITOR__START_TIMEOUT)) {
46 int i = git_config_int(var, value);
47 if (i < 0)
48 return error(_("value of '%s' out of range: %d"),
49 FSMONITOR__START_TIMEOUT, i);
50 fsmonitor__start_timeout_sec = i;
51 return 0;
52 }
53
9dcba0ba
JH
54 if (!strcmp(var, FSMONITOR__ANNOUNCE_STARTUP)) {
55 int is_bool;
56 int i = git_config_bool_or_int(var, value, &is_bool);
57 if (i < 0)
58 return error(_("value of '%s' not bool or int: %d"),
59 var, i);
60 fsmonitor__announce_startup = i;
61 return 0;
62 }
63
64 return git_default_config(var, value, cb);
65}
66
abc9dbc0
JH
67/*
68 * Acting as a CLIENT.
69 *
70 * Send a "quit" command to the `git-fsmonitor--daemon` (if running)
71 * and wait for it to shutdown.
72 */
73static int do_as_client__send_stop(void)
74{
75 struct strbuf answer = STRBUF_INIT;
76 int ret;
77
78 ret = fsmonitor_ipc__send_command("quit", &answer);
79
80 /* The quit command does not return any response data. */
81 strbuf_release(&answer);
82
83 if (ret)
84 return ret;
85
86 trace2_region_enter("fsm_client", "polling-for-daemon-exit", NULL);
87 while (fsmonitor_ipc__get_state() == IPC_STATE__LISTENING)
88 sleep_millisec(50);
89 trace2_region_leave("fsm_client", "polling-for-daemon-exit", NULL);
90
91 return 0;
92}
93
94static int do_as_client__status(void)
95{
96 enum ipc_active_state state = fsmonitor_ipc__get_state();
97
98 switch (state) {
99 case IPC_STATE__LISTENING:
100 printf(_("fsmonitor-daemon is watching '%s'\n"),
101 the_repository->worktree);
102 return 0;
103
104 default:
105 printf(_("fsmonitor-daemon is not watching '%s'\n"),
106 the_repository->worktree);
107 return 1;
108 }
109}
16d9d617 110
b05880d3
JH
111enum fsmonitor_cookie_item_result {
112 FCIR_ERROR = -1, /* could not create cookie file ? */
113 FCIR_INIT,
114 FCIR_SEEN,
115 FCIR_ABORT,
116};
117
118struct fsmonitor_cookie_item {
119 struct hashmap_entry entry;
120 char *name;
121 enum fsmonitor_cookie_item_result result;
122};
123
124static int cookies_cmp(const void *data, const struct hashmap_entry *he1,
125 const struct hashmap_entry *he2, const void *keydata)
126{
127 const struct fsmonitor_cookie_item *a =
128 container_of(he1, const struct fsmonitor_cookie_item, entry);
129 const struct fsmonitor_cookie_item *b =
130 container_of(he2, const struct fsmonitor_cookie_item, entry);
131
132 return strcmp(a->name, keydata ? keydata : b->name);
133}
134
135static enum fsmonitor_cookie_item_result with_lock__wait_for_cookie(
136 struct fsmonitor_daemon_state *state)
137{
138 /* assert current thread holding state->main_lock */
139
140 int fd;
141 struct fsmonitor_cookie_item *cookie;
142 struct strbuf cookie_pathname = STRBUF_INIT;
143 struct strbuf cookie_filename = STRBUF_INIT;
144 enum fsmonitor_cookie_item_result result;
145 int my_cookie_seq;
146
147 CALLOC_ARRAY(cookie, 1);
148
149 my_cookie_seq = state->cookie_seq++;
150
151 strbuf_addf(&cookie_filename, "%i-%i", getpid(), my_cookie_seq);
152
153 strbuf_addbuf(&cookie_pathname, &state->path_cookie_prefix);
154 strbuf_addbuf(&cookie_pathname, &cookie_filename);
155
156 cookie->name = strbuf_detach(&cookie_filename, NULL);
157 cookie->result = FCIR_INIT;
158 hashmap_entry_init(&cookie->entry, strhash(cookie->name));
159
160 hashmap_add(&state->cookies, &cookie->entry);
161
162 trace_printf_key(&trace_fsmonitor, "cookie-wait: '%s' '%s'",
163 cookie->name, cookie_pathname.buf);
164
165 /*
166 * Create the cookie file on disk and then wait for a notification
167 * that the listener thread has seen it.
168 */
169 fd = open(cookie_pathname.buf, O_WRONLY | O_CREAT | O_EXCL, 0600);
170 if (fd < 0) {
171 error_errno(_("could not create fsmonitor cookie '%s'"),
172 cookie->name);
173
174 cookie->result = FCIR_ERROR;
175 goto done;
176 }
177
178 /*
179 * Technically, close() and unlink() can fail, but we don't
180 * care here. We only created the file to trigger a watch
181 * event from the FS to know that when we're up to date.
182 */
183 close(fd);
184 unlink(cookie_pathname.buf);
185
186 /*
187 * Technically, this is an infinite wait (well, unless another
188 * thread sends us an abort). I'd like to change this to
189 * use `pthread_cond_timedwait()` and return an error/timeout
190 * and let the caller do the trivial response thing, but we
191 * don't have that routine in our thread-utils.
192 *
193 * After extensive beta testing I'm not really worried about
194 * this. Also note that the above open() and unlink() calls
195 * will cause at least two FS events on that path, so the odds
196 * of getting stuck are pretty slim.
197 */
198 while (cookie->result == FCIR_INIT)
199 pthread_cond_wait(&state->cookies_cond,
200 &state->main_lock);
201
202done:
203 hashmap_remove(&state->cookies, &cookie->entry, NULL);
204
205 result = cookie->result;
206
207 free(cookie->name);
208 free(cookie);
209 strbuf_release(&cookie_pathname);
210
211 return result;
212}
213
214/*
215 * Mark these cookies as _SEEN and wake up the corresponding client threads.
216 */
217static void with_lock__mark_cookies_seen(struct fsmonitor_daemon_state *state,
218 const struct string_list *cookie_names)
219{
220 /* assert current thread holding state->main_lock */
221
222 int k;
223 int nr_seen = 0;
224
225 for (k = 0; k < cookie_names->nr; k++) {
226 struct fsmonitor_cookie_item key;
227 struct fsmonitor_cookie_item *cookie;
228
229 key.name = cookie_names->items[k].string;
230 hashmap_entry_init(&key.entry, strhash(key.name));
231
232 cookie = hashmap_get_entry(&state->cookies, &key, entry, NULL);
233 if (cookie) {
234 trace_printf_key(&trace_fsmonitor, "cookie-seen: '%s'",
235 cookie->name);
236 cookie->result = FCIR_SEEN;
237 nr_seen++;
238 }
239 }
240
241 if (nr_seen)
242 pthread_cond_broadcast(&state->cookies_cond);
243}
244
245/*
246 * Set _ABORT on all pending cookies and wake up all client threads.
247 */
248static void with_lock__abort_all_cookies(struct fsmonitor_daemon_state *state)
249{
250 /* assert current thread holding state->main_lock */
251
252 struct hashmap_iter iter;
253 struct fsmonitor_cookie_item *cookie;
254 int nr_aborted = 0;
255
256 hashmap_for_each_entry(&state->cookies, &iter, cookie, entry) {
257 trace_printf_key(&trace_fsmonitor, "cookie-abort: '%s'",
258 cookie->name);
259 cookie->result = FCIR_ABORT;
260 nr_aborted++;
261 }
262
263 if (nr_aborted)
264 pthread_cond_broadcast(&state->cookies_cond);
265}
266
aeef767a
JH
267/*
268 * Requests to and from a FSMonitor Protocol V2 provider use an opaque
269 * "token" as a virtual timestamp. Clients can request a summary of all
270 * created/deleted/modified files relative to a token. In the response,
271 * clients receive a new token for the next (relative) request.
272 *
273 *
274 * Token Format
275 * ============
276 *
277 * The contents of the token are private and provider-specific.
278 *
279 * For the built-in fsmonitor--daemon, we define a token as follows:
280 *
281 * "builtin" ":" <token_id> ":" <sequence_nr>
282 *
283 * The "builtin" prefix is used as a namespace to avoid conflicts
284 * with other providers (such as Watchman).
285 *
286 * The <token_id> is an arbitrary OPAQUE string, such as a GUID,
287 * UUID, or {timestamp,pid}. It is used to group all filesystem
288 * events that happened while the daemon was monitoring (and in-sync
289 * with the filesystem).
290 *
291 * Unlike FSMonitor Protocol V1, it is not defined as a timestamp
292 * and does not define less-than/greater-than relationships.
293 * (There are too many race conditions to rely on file system
294 * event timestamps.)
295 *
296 * The <sequence_nr> is a simple integer incremented whenever the
297 * daemon needs to make its state public. For example, if 1000 file
298 * system events come in, but no clients have requested the data,
299 * the daemon can continue to accumulate file changes in the same
300 * bin and does not need to advance the sequence number. However,
301 * as soon as a client does arrive, the daemon needs to start a new
302 * bin and increment the sequence number.
303 *
304 * The sequence number serves as the boundary between 2 sets
305 * of bins -- the older ones that the client has already seen
306 * and the newer ones that it hasn't.
307 *
308 * When a new <token_id> is created, the <sequence_nr> is reset to
309 * zero.
310 *
311 *
312 * About Token Ids
313 * ===============
314 *
315 * A new token_id is created:
316 *
317 * [1] each time the daemon is started.
318 *
319 * [2] any time that the daemon must re-sync with the filesystem
320 * (such as when the kernel drops or we miss events on a very
321 * active volume).
322 *
323 * [3] in response to a client "flush" command (for dropped event
324 * testing).
325 *
326 * When a new token_id is created, the daemon is free to discard all
327 * cached filesystem events associated with any previous token_ids.
328 * Events associated with a non-current token_id will never be sent
329 * to a client. A token_id change implicitly means that the daemon
330 * has gap in its event history.
331 *
332 * Therefore, clients that present a token with a stale (non-current)
333 * token_id will always be given a trivial response.
334 */
335struct fsmonitor_token_data {
336 struct strbuf token_id;
337 struct fsmonitor_batch *batch_head;
338 struct fsmonitor_batch *batch_tail;
339 uint64_t client_ref_count;
340};
341
bec486b9
JH
342struct fsmonitor_batch {
343 struct fsmonitor_batch *next;
344 uint64_t batch_seq_nr;
345 const char **interned_paths;
346 size_t nr, alloc;
347 time_t pinned_time;
348};
349
aeef767a
JH
350static struct fsmonitor_token_data *fsmonitor_new_token_data(void)
351{
352 static int test_env_value = -1;
353 static uint64_t flush_count = 0;
354 struct fsmonitor_token_data *token;
bec486b9 355 struct fsmonitor_batch *batch;
aeef767a
JH
356
357 CALLOC_ARRAY(token, 1);
bec486b9 358 batch = fsmonitor_batch__new();
aeef767a
JH
359
360 strbuf_init(&token->token_id, 0);
bec486b9
JH
361 token->batch_head = batch;
362 token->batch_tail = batch;
aeef767a
JH
363 token->client_ref_count = 0;
364
365 if (test_env_value < 0)
366 test_env_value = git_env_bool("GIT_TEST_FSMONITOR_TOKEN", 0);
367
368 if (!test_env_value) {
369 struct timeval tv;
370 struct tm tm;
371 time_t secs;
372
373 gettimeofday(&tv, NULL);
374 secs = tv.tv_sec;
375 gmtime_r(&secs, &tm);
376
377 strbuf_addf(&token->token_id,
378 "%"PRIu64".%d.%4d%02d%02dT%02d%02d%02d.%06ldZ",
379 flush_count++,
380 getpid(),
381 tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday,
382 tm.tm_hour, tm.tm_min, tm.tm_sec,
383 (long)tv.tv_usec);
384 } else {
385 strbuf_addf(&token->token_id, "test_%08x", test_env_value++);
386 }
387
bec486b9
JH
388 /*
389 * We created a new <token_id> and are starting a new series
390 * of tokens with a zero <seq_nr>.
391 *
392 * Since clients cannot guess our new (non test) <token_id>
393 * they will always receive a trivial response (because of the
394 * mismatch on the <token_id>). The trivial response will
395 * tell them our new <token_id> so that subsequent requests
396 * will be relative to our new series. (And when sending that
397 * response, we pin the current head of the batch list.)
398 *
399 * Even if the client correctly guesses the <token_id>, their
400 * request of "builtin:<token_id>:0" asks for all changes MORE
401 * RECENT than batch/bin 0.
402 *
403 * This implies that it is a waste to accumulate paths in the
404 * initial batch/bin (because they will never be transmitted).
405 *
406 * So the daemon could be running for days and watching the
407 * file system, but doesn't need to actually accumulate any
408 * paths UNTIL we need to set a reference point for a later
409 * relative request.
410 *
411 * However, it is very useful for testing to always have a
412 * reference point set. Pin batch 0 to force early file system
413 * events to accumulate.
414 */
415 if (test_env_value)
416 batch->pinned_time = time(NULL);
417
aeef767a
JH
418 return token;
419}
420
bec486b9
JH
421struct fsmonitor_batch *fsmonitor_batch__new(void)
422{
423 struct fsmonitor_batch *batch;
424
425 CALLOC_ARRAY(batch, 1);
426
427 return batch;
428}
429
430void fsmonitor_batch__free_list(struct fsmonitor_batch *batch)
431{
432 while (batch) {
433 struct fsmonitor_batch *next = batch->next;
434
435 /*
436 * The actual strings within the array of this batch
437 * are interned, so we don't own them. We only own
438 * the array.
439 */
440 free(batch->interned_paths);
441 free(batch);
442
443 batch = next;
444 }
445}
446
447void fsmonitor_batch__add_path(struct fsmonitor_batch *batch,
448 const char *path)
449{
450 const char *interned_path = strintern(path);
451
452 trace_printf_key(&trace_fsmonitor, "event: %s", interned_path);
453
454 ALLOC_GROW(batch->interned_paths, batch->nr + 1, batch->alloc);
455 batch->interned_paths[batch->nr++] = interned_path;
456}
457
458static void fsmonitor_batch__combine(struct fsmonitor_batch *batch_dest,
459 const struct fsmonitor_batch *batch_src)
460{
461 size_t k;
462
463 ALLOC_GROW(batch_dest->interned_paths,
464 batch_dest->nr + batch_src->nr + 1,
465 batch_dest->alloc);
466
467 for (k = 0; k < batch_src->nr; k++)
468 batch_dest->interned_paths[batch_dest->nr++] =
469 batch_src->interned_paths[k];
470}
471
50c725d6
JH
472/*
473 * To keep the batch list from growing unbounded in response to filesystem
474 * activity, we try to truncate old batches from the end of the list as
475 * they become irrelevant.
476 *
477 * We assume that the .git/index will be updated with the most recent token
478 * any time the index is updated. And future commands will only ask for
479 * recent changes *since* that new token. So as tokens advance into the
480 * future, older batch items will never be requested/needed. So we can
481 * truncate them without loss of functionality.
482 *
483 * However, multiple commands may be talking to the daemon concurrently
484 * or perform a slow command, so a little "token skew" is possible.
485 * Therefore, we want this to be a little bit lazy and have a generous
486 * delay.
487 *
488 * The current reader thread walked backwards in time from `token->batch_head`
489 * back to `batch_marker` somewhere in the middle of the batch list.
490 *
491 * Let's walk backwards in time from that marker an arbitrary delay
492 * and truncate the list there. Note that these timestamps are completely
493 * artificial (based on when we pinned the batch item) and not on any
494 * filesystem activity.
495 *
496 * Return the obsolete portion of the list after we have removed it from
497 * the official list so that the caller can free it after leaving the lock.
498 */
499#define MY_TIME_DELAY_SECONDS (5 * 60) /* seconds */
500
501static struct fsmonitor_batch *with_lock__truncate_old_batches(
502 struct fsmonitor_daemon_state *state,
503 const struct fsmonitor_batch *batch_marker)
504{
505 /* assert current thread holding state->main_lock */
506
507 const struct fsmonitor_batch *batch;
508 struct fsmonitor_batch *remainder;
509
510 if (!batch_marker)
511 return NULL;
512
513 trace_printf_key(&trace_fsmonitor, "Truncate: mark (%"PRIu64",%"PRIu64")",
514 batch_marker->batch_seq_nr,
515 (uint64_t)batch_marker->pinned_time);
516
517 for (batch = batch_marker; batch; batch = batch->next) {
518 time_t t;
519
520 if (!batch->pinned_time) /* an overflow batch */
521 continue;
522
523 t = batch->pinned_time + MY_TIME_DELAY_SECONDS;
524 if (t > batch_marker->pinned_time) /* too close to marker */
525 continue;
526
527 goto truncate_past_here;
528 }
529
530 return NULL;
531
532truncate_past_here:
533 state->current_token_data->batch_tail = (struct fsmonitor_batch *)batch;
534
535 remainder = ((struct fsmonitor_batch *)batch)->next;
536 ((struct fsmonitor_batch *)batch)->next = NULL;
537
538 return remainder;
539}
540
bec486b9
JH
541static void fsmonitor_free_token_data(struct fsmonitor_token_data *token)
542{
543 if (!token)
544 return;
545
546 assert(token->client_ref_count == 0);
547
548 strbuf_release(&token->token_id);
549
550 fsmonitor_batch__free_list(token->batch_head);
551
552 free(token);
553}
554
555/*
556 * Flush all of our cached data about the filesystem. Call this if we
557 * lose sync with the filesystem and miss some notification events.
558 *
559 * [1] If we are missing events, then we no longer have a complete
560 * history of the directory (relative to our current start token).
561 * We should create a new token and start fresh (as if we just
562 * booted up).
563 *
b05880d3
JH
564 * [2] Some of those lost events may have been for cookie files. We
565 * should assume the worst and abort them rather letting them starve.
566 *
bec486b9
JH
567 * If there are no concurrent threads reading the current token data
568 * series, we can free it now. Otherwise, let the last reader free
569 * it.
570 *
571 * Either way, the old token data series is no longer associated with
572 * our state data.
573 */
574static void with_lock__do_force_resync(struct fsmonitor_daemon_state *state)
575{
576 /* assert current thread holding state->main_lock */
577
578 struct fsmonitor_token_data *free_me = NULL;
579 struct fsmonitor_token_data *new_one = NULL;
580
581 new_one = fsmonitor_new_token_data();
582
583 if (state->current_token_data->client_ref_count == 0)
584 free_me = state->current_token_data;
585 state->current_token_data = new_one;
586
587 fsmonitor_free_token_data(free_me);
b05880d3
JH
588
589 with_lock__abort_all_cookies(state);
bec486b9
JH
590}
591
592void fsmonitor_force_resync(struct fsmonitor_daemon_state *state)
593{
594 pthread_mutex_lock(&state->main_lock);
595 with_lock__do_force_resync(state);
596 pthread_mutex_unlock(&state->main_lock);
597}
598
518a522f
JH
599/*
600 * Format an opaque token string to send to the client.
601 */
602static void with_lock__format_response_token(
603 struct strbuf *response_token,
604 const struct strbuf *response_token_id,
605 const struct fsmonitor_batch *batch)
606{
607 /* assert current thread holding state->main_lock */
608
609 strbuf_reset(response_token);
610 strbuf_addf(response_token, "builtin:%s:%"PRIu64,
611 response_token_id->buf, batch->batch_seq_nr);
612}
613
614/*
615 * Parse an opaque token from the client.
616 * Returns -1 on error.
617 */
618static int fsmonitor_parse_client_token(const char *buf_token,
619 struct strbuf *requested_token_id,
620 uint64_t *seq_nr)
621{
622 const char *p;
623 char *p_end;
624
625 strbuf_reset(requested_token_id);
626 *seq_nr = 0;
627
628 if (!skip_prefix(buf_token, "builtin:", &p))
629 return -1;
630
631 while (*p && *p != ':')
632 strbuf_addch(requested_token_id, *p++);
633 if (!*p++)
634 return -1;
635
636 *seq_nr = (uint64_t)strtoumax(p, &p_end, 10);
637 if (*p_end)
638 return -1;
639
640 return 0;
641}
642
643KHASH_INIT(str, const char *, int, 0, kh_str_hash_func, kh_str_hash_equal)
644
645static int do_handle_client(struct fsmonitor_daemon_state *state,
646 const char *command,
647 ipc_server_reply_cb *reply,
648 struct ipc_server_reply_data *reply_data)
649{
650 struct fsmonitor_token_data *token_data = NULL;
651 struct strbuf response_token = STRBUF_INIT;
652 struct strbuf requested_token_id = STRBUF_INIT;
653 struct strbuf payload = STRBUF_INIT;
654 uint64_t requested_oldest_seq_nr = 0;
655 uint64_t total_response_len = 0;
656 const char *p;
657 const struct fsmonitor_batch *batch_head;
658 const struct fsmonitor_batch *batch;
50c725d6 659 struct fsmonitor_batch *remainder = NULL;
518a522f
JH
660 intmax_t count = 0, duplicates = 0;
661 kh_str_t *shown;
662 int hash_ret;
663 int do_trivial = 0;
664 int do_flush = 0;
b05880d3
JH
665 int do_cookie = 0;
666 enum fsmonitor_cookie_item_result cookie_result;
518a522f
JH
667
668 /*
669 * We expect `command` to be of the form:
670 *
671 * <command> := quit NUL
672 * | flush NUL
673 * | <V1-time-since-epoch-ns> NUL
674 * | <V2-opaque-fsmonitor-token> NUL
675 */
676
677 if (!strcmp(command, "quit")) {
678 /*
679 * A client has requested over the socket/pipe that the
680 * daemon shutdown.
681 *
682 * Tell the IPC thread pool to shutdown (which completes
683 * the await in the main thread (which can stop the
684 * fsmonitor listener thread)).
685 *
686 * There is no reply to the client.
687 */
688 return SIMPLE_IPC_QUIT;
689
690 } else if (!strcmp(command, "flush")) {
691 /*
692 * Flush all of our cached data and generate a new token
693 * just like if we lost sync with the filesystem.
694 *
695 * Then send a trivial response using the new token.
696 */
697 do_flush = 1;
698 do_trivial = 1;
699
700 } else if (!skip_prefix(command, "builtin:", &p)) {
701 /* assume V1 timestamp or garbage */
702
703 char *p_end;
704
705 strtoumax(command, &p_end, 10);
706 trace_printf_key(&trace_fsmonitor,
707 ((*p_end) ?
708 "fsmonitor: invalid command line '%s'" :
709 "fsmonitor: unsupported V1 protocol '%s'"),
710 command);
711 do_trivial = 1;
712
713 } else {
714 /* We have "builtin:*" */
715 if (fsmonitor_parse_client_token(command, &requested_token_id,
716 &requested_oldest_seq_nr)) {
717 trace_printf_key(&trace_fsmonitor,
718 "fsmonitor: invalid V2 protocol token '%s'",
719 command);
720 do_trivial = 1;
721
722 } else {
723 /*
724 * We have a V2 valid token:
725 * "builtin:<token_id>:<seq_nr>"
726 */
b05880d3 727 do_cookie = 1;
518a522f
JH
728 }
729 }
730
731 pthread_mutex_lock(&state->main_lock);
732
733 if (!state->current_token_data)
734 BUG("fsmonitor state does not have a current token");
735
b05880d3
JH
736 /*
737 * Write a cookie file inside the directory being watched in
738 * an effort to flush out existing filesystem events that we
739 * actually care about. Suspend this client thread until we
740 * see the filesystem events for this cookie file.
741 *
742 * Creating the cookie lets us guarantee that our FS listener
743 * thread has drained the kernel queue and we are caught up
744 * with the kernel.
745 *
746 * If we cannot create the cookie (or otherwise guarantee that
747 * we are caught up), we send a trivial response. We have to
748 * assume that there might be some very, very recent activity
749 * on the FS still in flight.
750 */
751 if (do_cookie) {
752 cookie_result = with_lock__wait_for_cookie(state);
753 if (cookie_result != FCIR_SEEN) {
754 error(_("fsmonitor: cookie_result '%d' != SEEN"),
755 cookie_result);
756 do_trivial = 1;
757 }
758 }
759
518a522f
JH
760 if (do_flush)
761 with_lock__do_force_resync(state);
762
763 /*
764 * We mark the current head of the batch list as "pinned" so
765 * that the listener thread will treat this item as read-only
766 * (and prevent any more paths from being added to it) from
767 * now on.
768 */
769 token_data = state->current_token_data;
770 batch_head = token_data->batch_head;
771 ((struct fsmonitor_batch *)batch_head)->pinned_time = time(NULL);
772
773 /*
774 * FSMonitor Protocol V2 requires that we send a response header
775 * with a "new current token" and then all of the paths that changed
776 * since the "requested token". We send the seq_nr of the just-pinned
777 * head batch so that future requests from a client will be relative
778 * to it.
779 */
780 with_lock__format_response_token(&response_token,
781 &token_data->token_id, batch_head);
782
783 reply(reply_data, response_token.buf, response_token.len + 1);
784 total_response_len += response_token.len + 1;
785
786 trace2_data_string("fsmonitor", the_repository, "response/token",
787 response_token.buf);
788 trace_printf_key(&trace_fsmonitor, "response token: %s",
789 response_token.buf);
790
791 if (!do_trivial) {
792 if (strcmp(requested_token_id.buf, token_data->token_id.buf)) {
793 /*
794 * The client last spoke to a different daemon
795 * instance -OR- the daemon had to resync with
796 * the filesystem (and lost events), so reject.
797 */
798 trace2_data_string("fsmonitor", the_repository,
799 "response/token", "different");
800 do_trivial = 1;
801
802 } else if (requested_oldest_seq_nr <
803 token_data->batch_tail->batch_seq_nr) {
804 /*
805 * The client wants older events than we have for
806 * this token_id. This means that the end of our
807 * batch list was truncated and we cannot give the
808 * client a complete snapshot relative to their
809 * request.
810 */
811 trace_printf_key(&trace_fsmonitor,
812 "client requested truncated data");
813 do_trivial = 1;
814 }
815 }
816
817 if (do_trivial) {
818 pthread_mutex_unlock(&state->main_lock);
819
820 reply(reply_data, "/", 2);
821
822 trace2_data_intmax("fsmonitor", the_repository,
823 "response/trivial", 1);
824
825 goto cleanup;
826 }
827
828 /*
829 * We're going to hold onto a pointer to the current
830 * token-data while we walk the list of batches of files.
831 * During this time, we will NOT be under the lock.
832 * So we ref-count it.
833 *
834 * This allows the listener thread to continue prepending
835 * new batches of items to the token-data (which we'll ignore).
836 *
837 * AND it allows the listener thread to do a token-reset
838 * (and install a new `current_token_data`).
839 */
840 token_data->client_ref_count++;
841
842 pthread_mutex_unlock(&state->main_lock);
843
844 /*
845 * The client request is relative to the token that they sent,
846 * so walk the batch list backwards from the current head back
847 * to the batch (sequence number) they named.
848 *
849 * We use khash to de-dup the list of pathnames.
850 *
851 * NEEDSWORK: each batch contains a list of interned strings,
852 * so we only need to do pointer comparisons here to build the
853 * hash table. Currently, we're still comparing the string
854 * values.
855 */
856 shown = kh_init_str();
857 for (batch = batch_head;
858 batch && batch->batch_seq_nr > requested_oldest_seq_nr;
859 batch = batch->next) {
860 size_t k;
861
862 for (k = 0; k < batch->nr; k++) {
863 const char *s = batch->interned_paths[k];
864 size_t s_len;
865
866 if (kh_get_str(shown, s) != kh_end(shown))
867 duplicates++;
868 else {
869 kh_put_str(shown, s, &hash_ret);
870
871 trace_printf_key(&trace_fsmonitor,
872 "send[%"PRIuMAX"]: %s",
873 count, s);
874
875 /* Each path gets written with a trailing NUL */
876 s_len = strlen(s) + 1;
877
878 if (payload.len + s_len >=
879 LARGE_PACKET_DATA_MAX) {
880 reply(reply_data, payload.buf,
881 payload.len);
882 total_response_len += payload.len;
883 strbuf_reset(&payload);
884 }
885
886 strbuf_add(&payload, s, s_len);
887 count++;
888 }
889 }
890 }
891
892 if (payload.len) {
893 reply(reply_data, payload.buf, payload.len);
894 total_response_len += payload.len;
895 }
896
897 kh_release_str(shown);
898
899 pthread_mutex_lock(&state->main_lock);
900
901 if (token_data->client_ref_count > 0)
902 token_data->client_ref_count--;
903
904 if (token_data->client_ref_count == 0) {
905 if (token_data != state->current_token_data) {
906 /*
907 * The listener thread did a token-reset while we were
908 * walking the batch list. Therefore, this token is
909 * stale and can be discarded completely. If we are
910 * the last reader thread using this token, we own
911 * that work.
912 */
913 fsmonitor_free_token_data(token_data);
50c725d6
JH
914 } else if (batch) {
915 /*
916 * We are holding the lock and are the only
917 * reader of the ref-counted portion of the
918 * list, so we get the honor of seeing if the
919 * list can be truncated to save memory.
920 *
921 * The main loop did not walk to the end of the
922 * list, so this batch is the first item in the
923 * batch-list that is older than the requested
924 * end-point sequence number. See if the tail
925 * end of the list is obsolete.
926 */
927 remainder = with_lock__truncate_old_batches(state,
928 batch);
518a522f
JH
929 }
930 }
931
932 pthread_mutex_unlock(&state->main_lock);
933
50c725d6
JH
934 if (remainder)
935 fsmonitor_batch__free_list(remainder);
936
518a522f
JH
937 trace2_data_intmax("fsmonitor", the_repository, "response/length", total_response_len);
938 trace2_data_intmax("fsmonitor", the_repository, "response/count/files", count);
939 trace2_data_intmax("fsmonitor", the_repository, "response/count/duplicates", duplicates);
940
941cleanup:
942 strbuf_release(&response_token);
943 strbuf_release(&requested_token_id);
944 strbuf_release(&payload);
945
946 return 0;
947}
948
9dcba0ba
JH
949static ipc_server_application_cb handle_client;
950
951static int handle_client(void *data,
952 const char *command, size_t command_len,
953 ipc_server_reply_cb *reply,
954 struct ipc_server_reply_data *reply_data)
955{
518a522f 956 struct fsmonitor_daemon_state *state = data;
9dcba0ba
JH
957 int result;
958
959 /*
960 * The Simple IPC API now supports {char*, len} arguments, but
961 * FSMonitor always uses proper null-terminated strings, so
962 * we can ignore the command_len argument. (Trust, but verify.)
963 */
964 if (command_len != strlen(command))
965 BUG("FSMonitor assumes text messages");
966
518a522f
JH
967 trace_printf_key(&trace_fsmonitor, "requested token: %s", command);
968
9dcba0ba
JH
969 trace2_region_enter("fsmonitor", "handle_client", the_repository);
970 trace2_data_string("fsmonitor", the_repository, "request", command);
971
518a522f 972 result = do_handle_client(state, command, reply, reply_data);
9dcba0ba
JH
973
974 trace2_region_leave("fsmonitor", "handle_client", the_repository);
975
976 return result;
977}
978
b05880d3
JH
979#define FSMONITOR_DIR "fsmonitor--daemon"
980#define FSMONITOR_COOKIE_DIR "cookies"
981#define FSMONITOR_COOKIE_PREFIX (FSMONITOR_DIR "/" FSMONITOR_COOKIE_DIR "/")
0ae7a1d9
JH
982
983enum fsmonitor_path_type fsmonitor_classify_path_workdir_relative(
984 const char *rel)
985{
986 if (fspathncmp(rel, ".git", 4))
987 return IS_WORKDIR_PATH;
988 rel += 4;
989
990 if (!*rel)
991 return IS_DOT_GIT;
992 if (*rel != '/')
993 return IS_WORKDIR_PATH; /* e.g. .gitignore */
994 rel++;
995
996 if (!fspathncmp(rel, FSMONITOR_COOKIE_PREFIX,
997 strlen(FSMONITOR_COOKIE_PREFIX)))
998 return IS_INSIDE_DOT_GIT_WITH_COOKIE_PREFIX;
999
1000 return IS_INSIDE_DOT_GIT;
1001}
1002
1003enum fsmonitor_path_type fsmonitor_classify_path_gitdir_relative(
1004 const char *rel)
1005{
1006 if (!fspathncmp(rel, FSMONITOR_COOKIE_PREFIX,
1007 strlen(FSMONITOR_COOKIE_PREFIX)))
1008 return IS_INSIDE_GITDIR_WITH_COOKIE_PREFIX;
1009
1010 return IS_INSIDE_GITDIR;
1011}
1012
1013static enum fsmonitor_path_type try_classify_workdir_abs_path(
1014 struct fsmonitor_daemon_state *state,
1015 const char *path)
1016{
1017 const char *rel;
1018
1019 if (fspathncmp(path, state->path_worktree_watch.buf,
1020 state->path_worktree_watch.len))
1021 return IS_OUTSIDE_CONE;
1022
1023 rel = path + state->path_worktree_watch.len;
1024
1025 if (!*rel)
1026 return IS_WORKDIR_PATH; /* it is the root dir exactly */
1027 if (*rel != '/')
1028 return IS_OUTSIDE_CONE;
1029 rel++;
1030
1031 return fsmonitor_classify_path_workdir_relative(rel);
1032}
1033
1034enum fsmonitor_path_type fsmonitor_classify_path_absolute(
1035 struct fsmonitor_daemon_state *state,
1036 const char *path)
1037{
1038 const char *rel;
1039 enum fsmonitor_path_type t;
1040
1041 t = try_classify_workdir_abs_path(state, path);
1042 if (state->nr_paths_watching == 1)
1043 return t;
1044 if (t != IS_OUTSIDE_CONE)
1045 return t;
1046
1047 if (fspathncmp(path, state->path_gitdir_watch.buf,
1048 state->path_gitdir_watch.len))
1049 return IS_OUTSIDE_CONE;
1050
1051 rel = path + state->path_gitdir_watch.len;
1052
1053 if (!*rel)
1054 return IS_GITDIR; /* it is the <gitdir> exactly */
1055 if (*rel != '/')
1056 return IS_OUTSIDE_CONE;
1057 rel++;
1058
1059 return fsmonitor_classify_path_gitdir_relative(rel);
1060}
1061
bec486b9
JH
1062/*
1063 * We try to combine small batches at the front of the batch-list to avoid
1064 * having a long list. This hopefully makes it a little easier when we want
1065 * to truncate and maintain the list. However, we don't want the paths array
1066 * to just keep growing and growing with realloc, so we insert an arbitrary
1067 * limit.
1068 */
1069#define MY_COMBINE_LIMIT (1024)
1070
1071void fsmonitor_publish(struct fsmonitor_daemon_state *state,
1072 struct fsmonitor_batch *batch,
1073 const struct string_list *cookie_names)
1074{
1075 if (!batch && !cookie_names->nr)
1076 return;
1077
1078 pthread_mutex_lock(&state->main_lock);
1079
1080 if (batch) {
1081 struct fsmonitor_batch *head;
1082
1083 head = state->current_token_data->batch_head;
1084 if (!head) {
1085 BUG("token does not have batch");
1086 } else if (head->pinned_time) {
1087 /*
1088 * We cannot alter the current batch list
1089 * because:
1090 *
1091 * [a] it is being transmitted to at least one
1092 * client and the handle_client() thread has a
1093 * ref-count, but not a lock on the batch list
1094 * starting with this item.
1095 *
1096 * [b] it has been transmitted in the past to
1097 * at least one client such that future
1098 * requests are relative to this head batch.
1099 *
1100 * So, we can only prepend a new batch onto
1101 * the front of the list.
1102 */
1103 batch->batch_seq_nr = head->batch_seq_nr + 1;
1104 batch->next = head;
1105 state->current_token_data->batch_head = batch;
1106 } else if (!head->batch_seq_nr) {
1107 /*
1108 * Batch 0 is unpinned. See the note in
1109 * `fsmonitor_new_token_data()` about why we
1110 * don't need to accumulate these paths.
1111 */
1112 fsmonitor_batch__free_list(batch);
1113 } else if (head->nr + batch->nr > MY_COMBINE_LIMIT) {
1114 /*
1115 * The head batch in the list has never been
1116 * transmitted to a client, but folding the
1117 * contents of the new batch onto it would
1118 * exceed our arbitrary limit, so just prepend
1119 * the new batch onto the list.
1120 */
1121 batch->batch_seq_nr = head->batch_seq_nr + 1;
1122 batch->next = head;
1123 state->current_token_data->batch_head = batch;
1124 } else {
1125 /*
1126 * We are free to add the paths in the given
1127 * batch onto the end of the current head batch.
1128 */
1129 fsmonitor_batch__combine(head, batch);
1130 fsmonitor_batch__free_list(batch);
1131 }
1132 }
1133
b05880d3
JH
1134 if (cookie_names->nr)
1135 with_lock__mark_cookies_seen(state, cookie_names);
1136
bec486b9
JH
1137 pthread_mutex_unlock(&state->main_lock);
1138}
1139
d0605550
JH
1140static void *fsm_health__thread_proc(void *_state)
1141{
1142 struct fsmonitor_daemon_state *state = _state;
1143
1144 trace2_thread_start("fsm-health");
1145
1146 fsm_health__loop(state);
1147
1148 trace2_thread_exit();
1149 return NULL;
1150}
1151
9dcba0ba
JH
1152static void *fsm_listen__thread_proc(void *_state)
1153{
1154 struct fsmonitor_daemon_state *state = _state;
1155
1156 trace2_thread_start("fsm-listen");
1157
1158 trace_printf_key(&trace_fsmonitor, "Watching: worktree '%s'",
1159 state->path_worktree_watch.buf);
1160 if (state->nr_paths_watching > 1)
1161 trace_printf_key(&trace_fsmonitor, "Watching: gitdir '%s'",
1162 state->path_gitdir_watch.buf);
1163
1164 fsm_listen__loop(state);
1165
bec486b9
JH
1166 pthread_mutex_lock(&state->main_lock);
1167 if (state->current_token_data &&
1168 state->current_token_data->client_ref_count == 0)
1169 fsmonitor_free_token_data(state->current_token_data);
1170 state->current_token_data = NULL;
1171 pthread_mutex_unlock(&state->main_lock);
1172
9dcba0ba
JH
1173 trace2_thread_exit();
1174 return NULL;
1175}
1176
1177static int fsmonitor_run_daemon_1(struct fsmonitor_daemon_state *state)
1178{
1179 struct ipc_server_opts ipc_opts = {
1180 .nr_threads = fsmonitor__ipc_threads,
1181
1182 /*
1183 * We know that there are no other active threads yet,
1184 * so we can let the IPC layer temporarily chdir() if
1185 * it needs to when creating the server side of the
1186 * Unix domain socket.
1187 */
1188 .uds_disallow_chdir = 0
1189 };
d0605550 1190 int health_started = 0;
802aa318
JH
1191 int listener_started = 0;
1192 int err = 0;
9dcba0ba
JH
1193
1194 /*
1195 * Start the IPC thread pool before the we've started the file
1196 * system event listener thread so that we have the IPC handle
1197 * before we need it.
1198 */
1199 if (ipc_server_run_async(&state->ipc_server_data,
39664e93 1200 state->path_ipc.buf, &ipc_opts,
9dcba0ba
JH
1201 handle_client, state))
1202 return error_errno(
1203 _("could not start IPC thread pool on '%s'"),
39664e93 1204 state->path_ipc.buf);
9dcba0ba
JH
1205
1206 /*
1207 * Start the fsmonitor listener thread to collect filesystem
1208 * events.
1209 */
1210 if (pthread_create(&state->listener_thread, NULL,
1211 fsm_listen__thread_proc, state) < 0) {
1212 ipc_server_stop_async(state->ipc_server_data);
802aa318
JH
1213 err = error(_("could not start fsmonitor listener thread"));
1214 goto cleanup;
9dcba0ba 1215 }
802aa318 1216 listener_started = 1;
9dcba0ba 1217
d0605550
JH
1218 /*
1219 * Start the health thread to watch over our process.
1220 */
1221 if (pthread_create(&state->health_thread, NULL,
1222 fsm_health__thread_proc, state) < 0) {
1223 ipc_server_stop_async(state->ipc_server_data);
1224 err = error(_("could not start fsmonitor health thread"));
1225 goto cleanup;
1226 }
1227 health_started = 1;
1228
9dcba0ba
JH
1229 /*
1230 * The daemon is now fully functional in background threads.
802aa318
JH
1231 * Our primary thread should now just wait while the threads
1232 * do all the work.
1233 */
1234cleanup:
1235 /*
9dcba0ba 1236 * Wait for the IPC thread pool to shutdown (whether by client
802aa318 1237 * request, from filesystem activity, or an error).
9dcba0ba
JH
1238 */
1239 ipc_server_await(state->ipc_server_data);
1240
1241 /*
1242 * The fsmonitor listener thread may have received a shutdown
1243 * event from the IPC thread pool, but it doesn't hurt to tell
1244 * it again. And wait for it to shutdown.
1245 */
802aa318
JH
1246 if (listener_started) {
1247 fsm_listen__stop_async(state);
1248 pthread_join(state->listener_thread, NULL);
1249 }
9dcba0ba 1250
d0605550
JH
1251 if (health_started) {
1252 fsm_health__stop_async(state);
1253 pthread_join(state->health_thread, NULL);
1254 }
1255
802aa318
JH
1256 if (err)
1257 return err;
207534e4
JH
1258 if (state->listen_error_code)
1259 return state->listen_error_code;
d0605550
JH
1260 if (state->health_error_code)
1261 return state->health_error_code;
802aa318 1262 return 0;
9dcba0ba
JH
1263}
1264
1265static int fsmonitor_run_daemon(void)
1266{
1267 struct fsmonitor_daemon_state state;
39664e93 1268 const char *home;
9dcba0ba
JH
1269 int err;
1270
1271 memset(&state, 0, sizeof(state));
1272
b05880d3 1273 hashmap_init(&state.cookies, cookies_cmp, NULL, 0);
9dcba0ba 1274 pthread_mutex_init(&state.main_lock, NULL);
b05880d3 1275 pthread_cond_init(&state.cookies_cond, NULL);
207534e4 1276 state.listen_error_code = 0;
d0605550 1277 state.health_error_code = 0;
aeef767a 1278 state.current_token_data = fsmonitor_new_token_data();
9dcba0ba
JH
1279
1280 /* Prepare to (recursively) watch the <worktree-root> directory. */
1281 strbuf_init(&state.path_worktree_watch, 0);
1282 strbuf_addstr(&state.path_worktree_watch, absolute_path(get_git_work_tree()));
1283 state.nr_paths_watching = 1;
1284
1285 /*
1286 * We create and delete cookie files somewhere inside the .git
1287 * directory to help us keep sync with the file system. If
1288 * ".git" is not a directory, then <gitdir> is not inside the
1289 * cone of <worktree-root>, so set up a second watch to watch
1290 * the <gitdir> so that we get events for the cookie files.
1291 */
1292 strbuf_init(&state.path_gitdir_watch, 0);
1293 strbuf_addbuf(&state.path_gitdir_watch, &state.path_worktree_watch);
1294 strbuf_addstr(&state.path_gitdir_watch, "/.git");
1295 if (!is_directory(state.path_gitdir_watch.buf)) {
1296 strbuf_reset(&state.path_gitdir_watch);
1297 strbuf_addstr(&state.path_gitdir_watch, absolute_path(get_git_dir()));
1298 state.nr_paths_watching = 2;
1299 }
1300
b05880d3
JH
1301 /*
1302 * We will write filesystem syncing cookie files into
1303 * <gitdir>/<fsmonitor-dir>/<cookie-dir>/<pid>-<seq>.
1304 *
1305 * The extra layers of subdirectories here keep us from
1306 * changing the mtime on ".git/" or ".git/foo/" when we create
1307 * or delete cookie files.
1308 *
1309 * There have been problems with some IDEs that do a
1310 * non-recursive watch of the ".git/" directory and run a
1311 * series of commands any time something happens.
1312 *
1313 * For example, if we place our cookie files directly in
1314 * ".git/" or ".git/foo/" then a `git status` (or similar
1315 * command) from the IDE will cause a cookie file to be
1316 * created in one of those dirs. This causes the mtime of
1317 * those dirs to change. This triggers the IDE's watch
1318 * notification. This triggers the IDE to run those commands
1319 * again. And the process repeats and the machine never goes
1320 * idle.
1321 *
1322 * Adding the extra layers of subdirectories prevents the
1323 * mtime of ".git/" and ".git/foo" from changing when a
1324 * cookie file is created.
1325 */
1326 strbuf_init(&state.path_cookie_prefix, 0);
1327 strbuf_addbuf(&state.path_cookie_prefix, &state.path_gitdir_watch);
1328
1329 strbuf_addch(&state.path_cookie_prefix, '/');
1330 strbuf_addstr(&state.path_cookie_prefix, FSMONITOR_DIR);
1331 mkdir(state.path_cookie_prefix.buf, 0777);
1332
1333 strbuf_addch(&state.path_cookie_prefix, '/');
1334 strbuf_addstr(&state.path_cookie_prefix, FSMONITOR_COOKIE_DIR);
1335 mkdir(state.path_cookie_prefix.buf, 0777);
1336
1337 strbuf_addch(&state.path_cookie_prefix, '/');
1338
39664e93
JH
1339 /*
1340 * We create a named-pipe or unix domain socket inside of the
1341 * ".git" directory. (Well, on Windows, we base our named
1342 * pipe in the NPFS on the absolute path of the git
1343 * directory.)
1344 */
1345 strbuf_init(&state.path_ipc, 0);
1346 strbuf_addstr(&state.path_ipc, absolute_path(fsmonitor_ipc__get_path()));
1347
9dcba0ba
JH
1348 /*
1349 * Confirm that we can create platform-specific resources for the
1350 * filesystem listener before we bother starting all the threads.
1351 */
1352 if (fsm_listen__ctor(&state)) {
1353 err = error(_("could not initialize listener thread"));
1354 goto done;
1355 }
1356
d0605550
JH
1357 if (fsm_health__ctor(&state)) {
1358 err = error(_("could not initialize health thread"));
1359 goto done;
1360 }
1361
39664e93
JH
1362 /*
1363 * CD out of the worktree root directory.
1364 *
1365 * The common Git startup mechanism causes our CWD to be the
1366 * root of the worktree. On Windows, this causes our process
1367 * to hold a locked handle on the CWD. This prevents the
1368 * worktree from being moved or deleted while the daemon is
1369 * running.
1370 *
1371 * We assume that our FS and IPC listener threads have either
1372 * opened all of the handles that they need or will do
1373 * everything using absolute paths.
1374 */
1375 home = getenv("HOME");
1376 if (home && *home && chdir(home))
1377 die_errno(_("could not cd home '%s'"), home);
1378
9dcba0ba
JH
1379 err = fsmonitor_run_daemon_1(&state);
1380
1381done:
b05880d3 1382 pthread_cond_destroy(&state.cookies_cond);
9dcba0ba
JH
1383 pthread_mutex_destroy(&state.main_lock);
1384 fsm_listen__dtor(&state);
d0605550 1385 fsm_health__dtor(&state);
9dcba0ba
JH
1386
1387 ipc_server_free(state.ipc_server_data);
1388
1389 strbuf_release(&state.path_worktree_watch);
1390 strbuf_release(&state.path_gitdir_watch);
b05880d3 1391 strbuf_release(&state.path_cookie_prefix);
39664e93 1392 strbuf_release(&state.path_ipc);
9dcba0ba
JH
1393
1394 return err;
1395}
1396
c284e27b 1397static int try_to_run_foreground_daemon(int detach_console)
9dcba0ba
JH
1398{
1399 /*
1400 * Technically, we don't need to probe for an existing daemon
1401 * process, since we could just call `fsmonitor_run_daemon()`
1402 * and let it fail if the pipe/socket is busy.
1403 *
1404 * However, this method gives us a nicer error message for a
1405 * common error case.
1406 */
1407 if (fsmonitor_ipc__get_state() == IPC_STATE__LISTENING)
1408 die(_("fsmonitor--daemon is already running '%s'"),
1409 the_repository->worktree);
1410
1411 if (fsmonitor__announce_startup) {
1412 fprintf(stderr, _("running fsmonitor-daemon in '%s'\n"),
1413 the_repository->worktree);
1414 fflush(stderr);
1415 }
1416
c284e27b
JH
1417#ifdef GIT_WINDOWS_NATIVE
1418 if (detach_console)
1419 FreeConsole();
1420#endif
1421
9dcba0ba
JH
1422 return !!fsmonitor_run_daemon();
1423}
1424
c284e27b
JH
1425static start_bg_wait_cb bg_wait_cb;
1426
1427static int bg_wait_cb(const struct child_process *cp, void *cb_data)
1428{
1429 enum ipc_active_state s = fsmonitor_ipc__get_state();
1430
1431 switch (s) {
1432 case IPC_STATE__LISTENING:
1433 /* child is "ready" */
1434 return 0;
1435
1436 case IPC_STATE__NOT_LISTENING:
1437 case IPC_STATE__PATH_NOT_FOUND:
1438 /* give child more time */
1439 return 1;
1440
1441 default:
1442 case IPC_STATE__INVALID_PATH:
1443 case IPC_STATE__OTHER_ERROR:
1444 /* all the time in world won't help */
1445 return -1;
1446 }
1447}
1448
1449static int try_to_start_background_daemon(void)
1450{
1451 struct child_process cp = CHILD_PROCESS_INIT;
1452 enum start_bg_result sbgr;
1453
1454 /*
1455 * Before we try to create a background daemon process, see
1456 * if a daemon process is already listening. This makes it
1457 * easier for us to report an already-listening error to the
1458 * console, since our spawn/daemon can only report the success
1459 * of creating the background process (and not whether it
1460 * immediately exited).
1461 */
1462 if (fsmonitor_ipc__get_state() == IPC_STATE__LISTENING)
1463 die(_("fsmonitor--daemon is already running '%s'"),
1464 the_repository->worktree);
1465
1466 if (fsmonitor__announce_startup) {
1467 fprintf(stderr, _("starting fsmonitor-daemon in '%s'\n"),
1468 the_repository->worktree);
1469 fflush(stderr);
1470 }
1471
1472 cp.git_cmd = 1;
1473
1474 strvec_push(&cp.args, "fsmonitor--daemon");
1475 strvec_push(&cp.args, "run");
1476 strvec_push(&cp.args, "--detach");
1477 strvec_pushf(&cp.args, "--ipc-threads=%d", fsmonitor__ipc_threads);
1478
1479 cp.no_stdin = 1;
1480 cp.no_stdout = 1;
1481 cp.no_stderr = 1;
1482
1483 sbgr = start_bg_command(&cp, bg_wait_cb, NULL,
1484 fsmonitor__start_timeout_sec);
1485
1486 switch (sbgr) {
1487 case SBGR_READY:
1488 return 0;
1489
1490 default:
1491 case SBGR_ERROR:
1492 case SBGR_CB_ERROR:
1493 return error(_("daemon failed to start"));
1494
1495 case SBGR_TIMEOUT:
1496 return error(_("daemon not online yet"));
1497
1498 case SBGR_DIED:
1499 return error(_("daemon terminated"));
1500 }
1501}
1502
16d9d617
JH
1503int cmd_fsmonitor__daemon(int argc, const char **argv, const char *prefix)
1504{
1505 const char *subcmd;
62a62a28 1506 enum fsmonitor_reason reason;
c284e27b 1507 int detach_console = 0;
16d9d617
JH
1508
1509 struct option options[] = {
c284e27b 1510 OPT_BOOL(0, "detach", &detach_console, N_("detach from console")),
9dcba0ba
JH
1511 OPT_INTEGER(0, "ipc-threads",
1512 &fsmonitor__ipc_threads,
1513 N_("use <n> ipc worker threads")),
c284e27b
JH
1514 OPT_INTEGER(0, "start-timeout",
1515 &fsmonitor__start_timeout_sec,
1516 N_("max seconds to wait for background daemon startup")),
1517
16d9d617
JH
1518 OPT_END()
1519 };
1520
9dcba0ba 1521 git_config(fsmonitor_config, NULL);
16d9d617
JH
1522
1523 argc = parse_options(argc, argv, prefix, options,
1524 builtin_fsmonitor__daemon_usage, 0);
1525 if (argc != 1)
1526 usage_with_options(builtin_fsmonitor__daemon_usage, options);
1527 subcmd = argv[0];
1528
9dcba0ba
JH
1529 if (fsmonitor__ipc_threads < 1)
1530 die(_("invalid 'ipc-threads' value (%d)"),
1531 fsmonitor__ipc_threads);
1532
62a62a28
JH
1533 prepare_repo_settings(the_repository);
1534 /*
1535 * If the repo is fsmonitor-compatible, explicitly set IPC-mode
1536 * (without bothering to load the `core.fsmonitor` config settings).
1537 *
1538 * If the repo is not compatible, the repo-settings will be set to
1539 * incompatible rather than IPC, so we can use one of the __get
1540 * routines to detect the discrepancy.
1541 */
1542 fsm_settings__set_ipc(the_repository);
1543
1544 reason = fsm_settings__get_reason(the_repository);
1545 if (reason > FSMONITOR_REASON_OK)
1546 die("%s",
1547 fsm_settings__get_incompatible_msg(the_repository,
1548 reason));
1549
c284e27b
JH
1550 if (!strcmp(subcmd, "start"))
1551 return !!try_to_start_background_daemon();
1552
9dcba0ba 1553 if (!strcmp(subcmd, "run"))
c284e27b 1554 return !!try_to_run_foreground_daemon(detach_console);
9dcba0ba 1555
abc9dbc0
JH
1556 if (!strcmp(subcmd, "stop"))
1557 return !!do_as_client__send_stop();
1558
1559 if (!strcmp(subcmd, "status"))
1560 return !!do_as_client__status();
1561
16d9d617
JH
1562 die(_("Unhandled subcommand '%s'"), subcmd);
1563}
1564
1565#else
1566int cmd_fsmonitor__daemon(int argc, const char **argv, const char *prefix)
1567{
1568 struct option options[] = {
1569 OPT_END()
1570 };
1571
1572 if (argc == 2 && !strcmp(argv[1], "-h"))
1573 usage_with_options(builtin_fsmonitor__daemon_usage, options);
1574
1575 die(_("fsmonitor--daemon not supported on this platform"));
1576}
1577#endif